Mon, 20 Feb 2017 13:00:42 +0800
[C2] Use gsswx in storeF_imm0 for Loongson CPUs.
1 //
2 // Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
3 // Copyright (c) 2015, 2016, Loongson Technology. All rights reserved.
4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 //
6 // This code is free software; you can redistribute it and/or modify it
7 // under the terms of the GNU General Public License version 2 only, as
8 // published by the Free Software Foundation.
9 //
10 // This code is distributed in the hope that it will be useful, but WITHOUT
11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 // version 2 for more details (a copy is included in the LICENSE file that
14 // accompanied this code).
15 //
16 // You should have received a copy of the GNU General Public License version
17 // 2 along with this work; if not, write to the Free Software Foundation,
18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 //
20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 // or visit www.oracle.com if you need additional information or have any
22 // questions.
23 //
24 //
26 // GodSon3 Architecture Description File
28 //----------REGISTER DEFINITION BLOCK------------------------------------------
29 // This information is used by the matcher and the register allocator to
30 // describe individual registers and classes of registers within the target
31 // archtecture.
33 // format:
34 // reg_def name (call convention, c-call convention, ideal type, encoding);
35 // call convention :
36 // NS = No-Save
37 // SOC = Save-On-Call
38 // SOE = Save-On-Entry
39 // AS = Always-Save
40 // ideal type :
41 // see opto/opcodes.hpp for more info
42 // reg_class name (reg, ...);
43 // alloc_class name (reg, ...);
44 register %{
46 // General Registers
47 // Integer Registers
48 reg_def R0 ( NS, NS, Op_RegI, 0, VMRegImpl::Bad());
49 reg_def AT ( NS, NS, Op_RegI, 1, AT->as_VMReg());
50 reg_def AT_H ( NS, NS, Op_RegI, 1, AT->as_VMReg()->next());
51 reg_def V0 (SOC, SOC, Op_RegI, 2, V0->as_VMReg());
52 reg_def V0_H (SOC, SOC, Op_RegI, 2, V0->as_VMReg()->next());
53 reg_def V1 (SOC, SOC, Op_RegI, 3, V1->as_VMReg());
54 reg_def V1_H (SOC, SOC, Op_RegI, 3, V1->as_VMReg()->next());
55 reg_def A0 (SOC, SOC, Op_RegI, 4, A0->as_VMReg());
56 reg_def A0_H (SOC, SOC, Op_RegI, 4, A0->as_VMReg()->next());
57 reg_def A1 (SOC, SOC, Op_RegI, 5, A1->as_VMReg());
58 reg_def A1_H (SOC, SOC, Op_RegI, 5, A1->as_VMReg()->next());
59 reg_def A2 (SOC, SOC, Op_RegI, 6, A2->as_VMReg());
60 reg_def A2_H (SOC, SOC, Op_RegI, 6, A2->as_VMReg()->next());
61 reg_def A3 (SOC, SOC, Op_RegI, 7, A3->as_VMReg());
62 reg_def A3_H (SOC, SOC, Op_RegI, 7, A3->as_VMReg()->next());
63 reg_def A4 (SOC, SOC, Op_RegI, 8, A4->as_VMReg());
64 reg_def A4_H (SOC, SOC, Op_RegI, 8, A4->as_VMReg()->next());
65 reg_def A5 (SOC, SOC, Op_RegI, 9, A5->as_VMReg());
66 reg_def A5_H (SOC, SOC, Op_RegI, 9, A5->as_VMReg()->next());
67 reg_def A6 (SOC, SOC, Op_RegI, 10, A6->as_VMReg());
68 reg_def A6_H (SOC, SOC, Op_RegI, 10, A6->as_VMReg()->next());
69 reg_def A7 (SOC, SOC, Op_RegI, 11, A7->as_VMReg());
70 reg_def A7_H (SOC, SOC, Op_RegI, 11, A7->as_VMReg()->next());
71 reg_def T0 (SOC, SOC, Op_RegI, 12, T0->as_VMReg());
72 reg_def T0_H (SOC, SOC, Op_RegI, 12, T0->as_VMReg()->next());
73 reg_def T1 (SOC, SOC, Op_RegI, 13, T1->as_VMReg());
74 reg_def T1_H (SOC, SOC, Op_RegI, 13, T1->as_VMReg()->next());
75 reg_def T2 (SOC, SOC, Op_RegI, 14, T2->as_VMReg());
76 reg_def T2_H (SOC, SOC, Op_RegI, 14, T2->as_VMReg()->next());
77 reg_def T3 (SOC, SOC, Op_RegI, 15, T3->as_VMReg());
78 reg_def T3_H (SOC, SOC, Op_RegI, 15, T3->as_VMReg()->next());
79 reg_def S0 (SOC, SOE, Op_RegI, 16, S0->as_VMReg());
80 reg_def S0_H (SOC, SOE, Op_RegI, 16, S0->as_VMReg()->next());
81 reg_def S1 (SOC, SOE, Op_RegI, 17, S1->as_VMReg());
82 reg_def S1_H (SOC, SOE, Op_RegI, 17, S1->as_VMReg()->next());
83 reg_def S2 (SOC, SOE, Op_RegI, 18, S2->as_VMReg());
84 reg_def S2_H (SOC, SOE, Op_RegI, 18, S2->as_VMReg()->next());
85 reg_def S3 (SOC, SOE, Op_RegI, 19, S3->as_VMReg());
86 reg_def S3_H (SOC, SOE, Op_RegI, 19, S3->as_VMReg()->next());
87 reg_def S4 (SOC, SOE, Op_RegI, 20, S4->as_VMReg());
88 reg_def S4_H (SOC, SOE, Op_RegI, 20, S4->as_VMReg()->next());
89 reg_def S5 (SOC, SOE, Op_RegI, 21, S5->as_VMReg());
90 reg_def S5_H (SOC, SOE, Op_RegI, 21, S5->as_VMReg()->next());
91 reg_def S6 (SOC, SOE, Op_RegI, 22, S6->as_VMReg());
92 reg_def S6_H (SOC, SOE, Op_RegI, 22, S6->as_VMReg()->next());
93 reg_def S7 (SOC, SOE, Op_RegI, 23, S7->as_VMReg());
94 reg_def S7_H (SOC, SOE, Op_RegI, 23, S7->as_VMReg()->next());
95 reg_def T8 (SOC, SOC, Op_RegI, 24, T8->as_VMReg());
96 reg_def T8_H (SOC, SOC, Op_RegI, 24, T8->as_VMReg()->next());
97 reg_def T9 (SOC, SOC, Op_RegI, 25, T9->as_VMReg());
98 reg_def T9_H (SOC, SOC, Op_RegI, 25, T9->as_VMReg()->next());
100 // Special Registers
101 reg_def K0 ( NS, NS, Op_RegI, 26, K0->as_VMReg());
102 reg_def K1 ( NS, NS, Op_RegI, 27, K1->as_VMReg());
103 reg_def GP ( NS, NS, Op_RegI, 28, GP->as_VMReg());
104 reg_def GP_H ( NS, NS, Op_RegI, 28, GP->as_VMReg()->next());
105 reg_def SP ( NS, NS, Op_RegI, 29, SP->as_VMReg());
106 reg_def SP_H ( NS, NS, Op_RegI, 29, SP->as_VMReg()->next());
107 reg_def FP ( NS, NS, Op_RegI, 30, FP->as_VMReg());
108 reg_def FP_H ( NS, NS, Op_RegI, 30, FP->as_VMReg()->next());
109 reg_def RA ( NS, NS, Op_RegI, 31, RA->as_VMReg());
110 reg_def RA_H ( NS, NS, Op_RegI, 31, RA->as_VMReg()->next());
112 // Floating registers.
113 reg_def F0 ( SOC, SOC, Op_RegF, 0, F0->as_VMReg());
114 reg_def F0_H ( SOC, SOC, Op_RegF, 0, F0->as_VMReg()->next());
115 reg_def F1 ( SOC, SOC, Op_RegF, 1, F1->as_VMReg());
116 reg_def F1_H ( SOC, SOC, Op_RegF, 1, F1->as_VMReg()->next());
117 reg_def F2 ( SOC, SOC, Op_RegF, 2, F2->as_VMReg());
118 reg_def F2_H ( SOC, SOC, Op_RegF, 2, F2->as_VMReg()->next());
119 reg_def F3 ( SOC, SOC, Op_RegF, 3, F3->as_VMReg());
120 reg_def F3_H ( SOC, SOC, Op_RegF, 3, F3->as_VMReg()->next());
121 reg_def F4 ( SOC, SOC, Op_RegF, 4, F4->as_VMReg());
122 reg_def F4_H ( SOC, SOC, Op_RegF, 4, F4->as_VMReg()->next());
123 reg_def F5 ( SOC, SOC, Op_RegF, 5, F5->as_VMReg());
124 reg_def F5_H ( SOC, SOC, Op_RegF, 5, F5->as_VMReg()->next());
125 reg_def F6 ( SOC, SOC, Op_RegF, 6, F6->as_VMReg());
126 reg_def F6_H ( SOC, SOC, Op_RegF, 6, F6->as_VMReg()->next());
127 reg_def F7 ( SOC, SOC, Op_RegF, 7, F7->as_VMReg());
128 reg_def F7_H ( SOC, SOC, Op_RegF, 7, F7->as_VMReg()->next());
129 reg_def F8 ( SOC, SOC, Op_RegF, 8, F8->as_VMReg());
130 reg_def F8_H ( SOC, SOC, Op_RegF, 8, F8->as_VMReg()->next());
131 reg_def F9 ( SOC, SOC, Op_RegF, 9, F9->as_VMReg());
132 reg_def F9_H ( SOC, SOC, Op_RegF, 9, F9->as_VMReg()->next());
133 reg_def F10 ( SOC, SOC, Op_RegF, 10, F10->as_VMReg());
134 reg_def F10_H ( SOC, SOC, Op_RegF, 10, F10->as_VMReg()->next());
135 reg_def F11 ( SOC, SOC, Op_RegF, 11, F11->as_VMReg());
136 reg_def F11_H ( SOC, SOC, Op_RegF, 11, F11->as_VMReg()->next());
137 reg_def F12 ( SOC, SOC, Op_RegF, 12, F12->as_VMReg());
138 reg_def F12_H ( SOC, SOC, Op_RegF, 12, F12->as_VMReg()->next());
139 reg_def F13 ( SOC, SOC, Op_RegF, 13, F13->as_VMReg());
140 reg_def F13_H ( SOC, SOC, Op_RegF, 13, F13->as_VMReg()->next());
141 reg_def F14 ( SOC, SOC, Op_RegF, 14, F14->as_VMReg());
142 reg_def F14_H ( SOC, SOC, Op_RegF, 14, F14->as_VMReg()->next());
143 reg_def F15 ( SOC, SOC, Op_RegF, 15, F15->as_VMReg());
144 reg_def F15_H ( SOC, SOC, Op_RegF, 15, F15->as_VMReg()->next());
145 reg_def F16 ( SOC, SOC, Op_RegF, 16, F16->as_VMReg());
146 reg_def F16_H ( SOC, SOC, Op_RegF, 16, F16->as_VMReg()->next());
147 reg_def F17 ( SOC, SOC, Op_RegF, 17, F17->as_VMReg());
148 reg_def F17_H ( SOC, SOC, Op_RegF, 17, F17->as_VMReg()->next());
149 reg_def F18 ( SOC, SOC, Op_RegF, 18, F18->as_VMReg());
150 reg_def F18_H ( SOC, SOC, Op_RegF, 18, F18->as_VMReg()->next());
151 reg_def F19 ( SOC, SOC, Op_RegF, 19, F19->as_VMReg());
152 reg_def F19_H ( SOC, SOC, Op_RegF, 19, F19->as_VMReg()->next());
153 reg_def F20 ( SOC, SOC, Op_RegF, 20, F20->as_VMReg());
154 reg_def F20_H ( SOC, SOC, Op_RegF, 20, F20->as_VMReg()->next());
155 reg_def F21 ( SOC, SOC, Op_RegF, 21, F21->as_VMReg());
156 reg_def F21_H ( SOC, SOC, Op_RegF, 21, F21->as_VMReg()->next());
157 reg_def F22 ( SOC, SOC, Op_RegF, 22, F22->as_VMReg());
158 reg_def F22_H ( SOC, SOC, Op_RegF, 22, F22->as_VMReg()->next());
159 reg_def F23 ( SOC, SOC, Op_RegF, 23, F23->as_VMReg());
160 reg_def F23_H ( SOC, SOC, Op_RegF, 23, F23->as_VMReg()->next());
161 reg_def F24 ( SOC, SOC, Op_RegF, 24, F24->as_VMReg());
162 reg_def F24_H ( SOC, SOC, Op_RegF, 24, F24->as_VMReg()->next());
163 reg_def F25 ( SOC, SOC, Op_RegF, 25, F25->as_VMReg());
164 reg_def F25_H ( SOC, SOC, Op_RegF, 25, F25->as_VMReg()->next());
165 reg_def F26 ( SOC, SOC, Op_RegF, 26, F26->as_VMReg());
166 reg_def F26_H ( SOC, SOC, Op_RegF, 26, F26->as_VMReg()->next());
167 reg_def F27 ( SOC, SOC, Op_RegF, 27, F27->as_VMReg());
168 reg_def F27_H ( SOC, SOC, Op_RegF, 27, F27->as_VMReg()->next());
169 reg_def F28 ( SOC, SOC, Op_RegF, 28, F28->as_VMReg());
170 reg_def F28_H ( SOC, SOC, Op_RegF, 28, F28->as_VMReg()->next());
171 reg_def F29 ( SOC, SOC, Op_RegF, 29, F29->as_VMReg());
172 reg_def F29_H ( SOC, SOC, Op_RegF, 29, F29->as_VMReg()->next());
173 reg_def F30 ( SOC, SOC, Op_RegF, 30, F30->as_VMReg());
174 reg_def F30_H ( SOC, SOC, Op_RegF, 30, F30->as_VMReg()->next());
175 reg_def F31 ( SOC, SOC, Op_RegF, 31, F31->as_VMReg());
176 reg_def F31_H ( SOC, SOC, Op_RegF, 31, F31->as_VMReg()->next());
179 // ----------------------------
180 // Special Registers
181 // Condition Codes Flag Registers
182 reg_def MIPS_FLAG (SOC, SOC, Op_RegFlags, 1, as_Register(1)->as_VMReg());
183 //S6 is used for get_thread(S6)
184 //S5 is uesd for heapbase of compressed oop
185 alloc_class chunk0(
186 S7, S7_H,
187 S0, S0_H,
188 S1, S1_H,
189 S2, S2_H,
190 S4, S4_H,
191 S5, S5_H,
192 S6, S6_H,
193 S3, S3_H,
194 T2, T2_H,
195 T3, T3_H,
196 T8, T8_H,
197 T9, T9_H,
198 T1, T1_H, // inline_cache_reg
199 V1, V1_H,
200 A7, A7_H,
201 A6, A6_H,
202 A5, A5_H,
203 A4, A4_H,
204 V0, V0_H,
205 A3, A3_H,
206 A2, A2_H,
207 A1, A1_H,
208 A0, A0_H,
209 T0, T0_H,
210 GP, GP_H
211 RA, RA_H,
212 SP, SP_H, // stack_pointer
213 FP, FP_H // frame_pointer
214 );
216 alloc_class chunk1( F0, F0_H,
217 F1, F1_H,
218 F2, F2_H,
219 F3, F3_H,
220 F4, F4_H,
221 F5, F5_H,
222 F6, F6_H,
223 F7, F7_H,
224 F8, F8_H,
225 F9, F9_H,
226 F10, F10_H,
227 F11, F11_H,
228 F20, F20_H,
229 F21, F21_H,
230 F22, F22_H,
231 F23, F23_H,
232 F24, F24_H,
233 F25, F25_H,
234 F26, F26_H,
235 F27, F27_H,
236 F28, F28_H,
237 F19, F19_H,
238 F18, F18_H,
239 F17, F17_H,
240 F16, F16_H,
241 F15, F15_H,
242 F14, F14_H,
243 F13, F13_H,
244 F12, F12_H,
245 F29, F29_H,
246 F30, F30_H,
247 F31, F31_H);
249 alloc_class chunk2(MIPS_FLAG);
251 reg_class s_reg( S0, S1, S2, S3, S4, S5, S6, S7 );
252 reg_class s0_reg( S0 );
253 reg_class s1_reg( S1 );
254 reg_class s2_reg( S2 );
255 reg_class s3_reg( S3 );
256 reg_class s4_reg( S4 );
257 reg_class s5_reg( S5 );
258 reg_class s6_reg( S6 );
259 reg_class s7_reg( S7 );
261 reg_class t_reg( T0, T1, T2, T3, T8, T9 );
262 reg_class t0_reg( T0 );
263 reg_class t1_reg( T1 );
264 reg_class t2_reg( T2 );
265 reg_class t3_reg( T3 );
266 reg_class t8_reg( T8 );
267 reg_class t9_reg( T9 );
269 reg_class a_reg( A0, A1, A2, A3, A4, A5, A6, A7 );
270 reg_class a0_reg( A0 );
271 reg_class a1_reg( A1 );
272 reg_class a2_reg( A2 );
273 reg_class a3_reg( A3 );
274 reg_class a4_reg( A4 );
275 reg_class a5_reg( A5 );
276 reg_class a6_reg( A6 );
277 reg_class a7_reg( A7 );
279 reg_class v0_reg( V0 );
280 reg_class v1_reg( V1 );
282 reg_class sp_reg( SP, SP_H );
283 reg_class fp_reg( FP, FP_H );
285 reg_class mips_flags(MIPS_FLAG);
287 reg_class v0_long_reg( V0, V0_H );
288 reg_class v1_long_reg( V1, V1_H );
289 reg_class a0_long_reg( A0, A0_H );
290 reg_class a1_long_reg( A1, A1_H );
291 reg_class a2_long_reg( A2, A2_H );
292 reg_class a3_long_reg( A3, A3_H );
293 reg_class a4_long_reg( A4, A4_H );
294 reg_class a5_long_reg( A5, A5_H );
295 reg_class a6_long_reg( A6, A6_H );
296 reg_class a7_long_reg( A7, A7_H );
297 reg_class t0_long_reg( T0, T0_H );
298 reg_class t1_long_reg( T1, T1_H );
299 reg_class t2_long_reg( T2, T2_H );
300 reg_class t3_long_reg( T3, T3_H );
301 reg_class t8_long_reg( T8, T8_H );
302 reg_class t9_long_reg( T9, T9_H );
303 reg_class s0_long_reg( S0, S0_H );
304 reg_class s1_long_reg( S1, S1_H );
305 reg_class s2_long_reg( S2, S2_H );
306 reg_class s3_long_reg( S3, S3_H );
307 reg_class s4_long_reg( S4, S4_H );
308 reg_class s5_long_reg( S5, S5_H );
309 reg_class s6_long_reg( S6, S6_H );
310 reg_class s7_long_reg( S7, S7_H );
312 reg_class int_reg( S7, S0, S1, S2, S4, S3, T8, T2, T3, T1, V1, A7, A6, A5, A4, V0, A3, A2, A1, A0, T0 );
314 reg_class no_Ax_int_reg( S7, S0, S1, S2, S4, S3, T8, T2, T3, T1, V1, V0, T0 );
316 reg_class p_reg(
317 S7, S7_H,
318 S0, S0_H,
319 S1, S1_H,
320 S2, S2_H,
321 S4, S4_H,
322 S3, S3_H,
323 T8, T8_H,
324 T2, T2_H,
325 T3, T3_H,
326 T1, T1_H,
327 A7, A7_H,
328 A6, A6_H,
329 A5, A5_H,
330 A4, A4_H,
331 A3, A3_H,
332 A2, A2_H,
333 A1, A1_H,
334 A0, A0_H,
335 T0, T0_H
336 );
338 reg_class no_T8_p_reg(
339 S7, S7_H,
340 S0, S0_H,
341 S1, S1_H,
342 S2, S2_H,
343 S4, S4_H,
344 S3, S3_H,
345 T2, T2_H,
346 T3, T3_H,
347 T1, T1_H,
348 A7, A7_H,
349 A6, A6_H,
350 A5, A5_H,
351 A4, A4_H,
352 A3, A3_H,
353 A2, A2_H,
354 A1, A1_H,
355 A0, A0_H,
356 T0, T0_H
357 );
359 reg_class long_reg(
360 S7, S7_H,
361 S0, S0_H,
362 S1, S1_H,
363 S2, S2_H,
364 S4, S4_H,
365 S3, S3_H,
366 T8, T8_H,
367 T2, T2_H,
368 T3, T3_H,
369 T1, T1_H,
370 A7, A7_H,
371 A6, A6_H,
372 A5, A5_H,
373 A4, A4_H,
374 A3, A3_H,
375 A2, A2_H,
376 A1, A1_H,
377 A0, A0_H,
378 T0, T0_H
379 );
382 // Floating point registers.
383 // 2012/8/23 Fu: F30/F31 are used as temporary registers in D2I
384 // 2016/12/1 aoqi: F31 are not used as temporary registers in D2I
385 reg_class flt_reg( F0, F1, F2, F3, F4, F5, F6, F7, F8, F9, F10, F11, F12, F13, F14, F15, F16, F17 F18, F19, F20, F21, F22, F23, F24, F25, F26, F27, F28, F29, F31);
386 reg_class dbl_reg( F0, F0_H,
387 F1, F1_H,
388 F2, F2_H,
389 F3, F3_H,
390 F4, F4_H,
391 F5, F5_H,
392 F6, F6_H,
393 F7, F7_H,
394 F8, F8_H,
395 F9, F9_H,
396 F10, F10_H,
397 F11, F11_H,
398 F12, F12_H,
399 F13, F13_H,
400 F14, F14_H,
401 F15, F15_H,
402 F16, F16_H,
403 F17, F17_H,
404 F18, F18_H,
405 F19, F19_H,
406 F20, F20_H,
407 F21, F21_H,
408 F22, F22_H,
409 F23, F23_H,
410 F24, F24_H,
411 F25, F25_H,
412 F26, F26_H,
413 F27, F27_H,
414 F28, F28_H,
415 F29, F29_H,
416 F31, F31_H);
418 reg_class flt_arg0( F12 );
419 reg_class dbl_arg0( F12, F12_H );
420 reg_class dbl_arg1( F14, F14_H );
422 %}
424 //----------DEFINITION BLOCK---------------------------------------------------
425 // Define name --> value mappings to inform the ADLC of an integer valued name
426 // Current support includes integer values in the range [0, 0x7FFFFFFF]
427 // Format:
428 // int_def <name> ( <int_value>, <expression>);
429 // Generated Code in ad_<arch>.hpp
430 // #define <name> (<expression>)
431 // // value == <int_value>
432 // Generated code in ad_<arch>.cpp adlc_verification()
433 // assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
434 //
435 definitions %{
436 int_def DEFAULT_COST ( 100, 100);
437 int_def HUGE_COST (1000000, 1000000);
439 // Memory refs are twice as expensive as run-of-the-mill.
440 int_def MEMORY_REF_COST ( 200, DEFAULT_COST * 2);
442 // Branches are even more expensive.
443 int_def BRANCH_COST ( 300, DEFAULT_COST * 3);
444 // we use jr instruction to construct call, so more expensive
445 // by yjl 2/28/2006
446 int_def CALL_COST ( 500, DEFAULT_COST * 5);
447 /*
448 int_def EQUAL ( 1, 1 );
449 int_def NOT_EQUAL ( 2, 2 );
450 int_def GREATER ( 3, 3 );
451 int_def GREATER_EQUAL ( 4, 4 );
452 int_def LESS ( 5, 5 );
453 int_def LESS_EQUAL ( 6, 6 );
454 */
455 %}
459 //----------SOURCE BLOCK-------------------------------------------------------
460 // This is a block of C++ code which provides values, functions, and
461 // definitions necessary in the rest of the architecture description
463 source_hpp %{
464 // Header information of the source block.
465 // Method declarations/definitions which are used outside
466 // the ad-scope can conveniently be defined here.
467 //
468 // To keep related declarations/definitions/uses close together,
469 // we switch between source %{ }% and source_hpp %{ }% freely as needed.
471 class CallStubImpl {
473 //--------------------------------------------------------------
474 //---< Used for optimization in Compile::shorten_branches >---
475 //--------------------------------------------------------------
477 public:
478 // Size of call trampoline stub.
479 static uint size_call_trampoline() {
480 return 0; // no call trampolines on this platform
481 }
483 // number of relocations needed by a call trampoline stub
484 static uint reloc_call_trampoline() {
485 return 0; // no call trampolines on this platform
486 }
487 };
489 class HandlerImpl {
491 public:
493 static int emit_exception_handler(CodeBuffer &cbuf);
494 static int emit_deopt_handler(CodeBuffer& cbuf);
496 static uint size_exception_handler() {
497 // NativeCall instruction size is the same as NativeJump.
498 // exception handler starts out as jump and can be patched to
499 // a call be deoptimization. (4932387)
500 // Note that this value is also credited (in output.cpp) to
501 // the size of the code section.
502 // return NativeJump::instruction_size;
503 int size = NativeCall::instruction_size;
504 return round_to(size, 16);
505 }
507 #ifdef _LP64
508 static uint size_deopt_handler() {
509 int size = NativeCall::instruction_size;
510 return round_to(size, 16);
511 }
512 #else
513 static uint size_deopt_handler() {
514 // NativeCall instruction size is the same as NativeJump.
515 // exception handler starts out as jump and can be patched to
516 // a call be deoptimization. (4932387)
517 // Note that this value is also credited (in output.cpp) to
518 // the size of the code section.
519 return 5 + NativeJump::instruction_size; // pushl(); jmp;
520 }
521 #endif
522 };
524 %} // end source_hpp
526 source %{
528 #define NO_INDEX 0
529 #define RELOC_IMM64 Assembler::imm_operand
530 #define RELOC_DISP32 Assembler::disp32_operand
533 #define __ _masm.
536 // Emit exception handler code.
537 // Stuff framesize into a register and call a VM stub routine.
538 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf) {
539 /*
540 // Note that the code buffer's insts_mark is always relative to insts.
541 // That's why we must use the macroassembler to generate a handler.
542 MacroAssembler _masm(&cbuf);
543 address base = __ start_a_stub(size_exception_handler());
544 if (base == NULL) return 0; // CodeBuffer::expand failed
545 int offset = __ offset();
546 __ jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
547 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
548 __ end_a_stub();
549 return offset;
550 */
551 // Note that the code buffer's insts_mark is always relative to insts.
552 // That's why we must use the macroassembler to generate a handler.
553 MacroAssembler _masm(&cbuf);
554 address base =
555 __ start_a_stub(size_exception_handler());
556 if (base == NULL) return 0; // CodeBuffer::expand failed
557 int offset = __ offset();
559 __ block_comment("; emit_exception_handler");
561 /* 2012/9/25 FIXME Jin: According to X86, we should use direct jumpt.
562 * * However, this will trigger an assert after the 40th method:
563 * *
564 * * 39 b java.lang.Throwable::<init> (25 bytes)
565 * * --- ns java.lang.Throwable::fillInStackTrace
566 * * 40 !b java.net.URLClassLoader::findClass (29 bytes)
567 * * /vm/opto/runtime.cpp, 900 , assert(caller.is_compiled_frame(),"must be")
568 * * 40 made not entrant (2) java.net.URLClassLoader::findClass (29 bytes)
569 * *
570 * * If we change from JR to JALR, the assert will disappear, but WebClient will
571 * * fail after the 403th method with unknown reason.
572 * */
573 __ li48(T9, (long)OptoRuntime::exception_blob()->entry_point());
574 __ jr(T9);
575 __ delayed()->nop();
576 __ align(16);
577 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
578 __ end_a_stub();
579 return offset;
580 }
582 // Emit deopt handler code.
583 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) {
584 // Note that the code buffer's insts_mark is always relative to insts.
585 // That's why we must use the macroassembler to generate a handler.
586 MacroAssembler _masm(&cbuf);
587 address base =
588 __ start_a_stub(size_deopt_handler());
590 // FIXME
591 if (base == NULL) return 0; // CodeBuffer::expand failed
592 int offset = __ offset();
594 __ block_comment("; emit_deopt_handler");
596 cbuf.set_insts_mark();
597 __ relocate(relocInfo::runtime_call_type);
599 __ li48(T9, (long)SharedRuntime::deopt_blob()->unpack());
600 __ jalr(T9);
601 __ delayed()->nop();
602 __ align(16);
603 assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
604 __ end_a_stub();
605 return offset;
606 }
609 const bool Matcher::match_rule_supported(int opcode) {
610 if (!has_match_rule(opcode))
611 return false;
613 switch (opcode) {
614 //Op_CountLeadingZerosI Op_CountLeadingZerosL can be deleted, all MIPS CPUs support clz & dclz.
615 case Op_CountLeadingZerosI:
616 case Op_CountLeadingZerosL:
617 if (!UseCountLeadingZerosInstruction)
618 return false;
619 break;
620 case Op_CountTrailingZerosI:
621 case Op_CountTrailingZerosL:
622 if (!UseCountTrailingZerosInstruction)
623 return false;
624 break;
625 }
627 return true; // Per default match rules are supported.
628 }
630 //FIXME
631 // emit call stub, compiled java to interpreter
632 void emit_java_to_interp(CodeBuffer &cbuf ) {
633 // Stub is fixed up when the corresponding call is converted from calling
634 // compiled code to calling interpreted code.
635 // mov rbx,0
636 // jmp -1
638 address mark = cbuf.insts_mark(); // get mark within main instrs section
640 // Note that the code buffer's insts_mark is always relative to insts.
641 // That's why we must use the macroassembler to generate a stub.
642 MacroAssembler _masm(&cbuf);
644 address base =
645 __ start_a_stub(Compile::MAX_stubs_size);
646 if (base == NULL) return; // CodeBuffer::expand failed
647 // static stub relocation stores the instruction address of the call
649 __ relocate(static_stub_Relocation::spec(mark), 0);
651 /* 2012/10/29 Jin: Rmethod contains methodOop, it should be relocated for GC */
652 /*
653 int oop_index = __ oop_recorder()->allocate_index(NULL);
654 RelocationHolder rspec = oop_Relocation::spec(oop_index);
655 __ relocate(rspec);
656 */
658 // static stub relocation also tags the methodOop in the code-stream.
659 __ li48(S3, (long)0);
660 // This is recognized as unresolved by relocs/nativeInst/ic code
662 __ relocate(relocInfo::runtime_call_type);
664 cbuf.set_insts_mark();
665 address call_pc = (address)-1;
666 __ li48(AT, (long)call_pc);
667 __ jr(AT);
668 __ nop();
669 __ align(16);
670 __ end_a_stub();
671 // Update current stubs pointer and restore code_end.
672 }
674 // size of call stub, compiled java to interpretor
675 uint size_java_to_interp() {
676 int size = 4 * 4 + NativeCall::instruction_size; // sizeof(li48) + NativeCall::instruction_size
677 return round_to(size, 16);
678 }
680 // relocation entries for call stub, compiled java to interpreter
681 uint reloc_java_to_interp() {
682 return 16; // in emit_java_to_interp + in Java_Static_Call
683 }
685 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
686 if( Assembler::is_simm16(offset) ) return true;
687 else
688 {
689 assert(false, "Not implemented yet !" );
690 Unimplemented();
691 }
692 }
695 // No additional cost for CMOVL.
696 const int Matcher::long_cmove_cost() { return 0; }
698 // No CMOVF/CMOVD with SSE2
699 const int Matcher::float_cmove_cost() { return ConditionalMoveLimit; }
701 // Does the CPU require late expand (see block.cpp for description of late expand)?
702 const bool Matcher::require_postalloc_expand = false;
704 // Should the Matcher clone shifts on addressing modes, expecting them
705 // to be subsumed into complex addressing expressions or compute them
706 // into registers? True for Intel but false for most RISCs
707 const bool Matcher::clone_shift_expressions = false;
709 // Do we need to mask the count passed to shift instructions or does
710 // the cpu only look at the lower 5/6 bits anyway?
711 const bool Matcher::need_masked_shift_count = false;
713 bool Matcher::narrow_oop_use_complex_address() {
714 NOT_LP64(ShouldNotCallThis());
715 assert(UseCompressedOops, "only for compressed oops code");
716 return false;
717 }
719 bool Matcher::narrow_klass_use_complex_address() {
720 NOT_LP64(ShouldNotCallThis());
721 assert(UseCompressedClassPointers, "only for compressed klass code");
722 return false;
723 }
725 // This is UltraSparc specific, true just means we have fast l2f conversion
726 const bool Matcher::convL2FSupported(void) {
727 return true;
728 }
730 // Max vector size in bytes. 0 if not supported.
731 const int Matcher::vector_width_in_bytes(BasicType bt) {
732 assert(MaxVectorSize == 8, "");
733 return 8;
734 }
736 // Vector ideal reg
737 const int Matcher::vector_ideal_reg(int size) {
738 assert(MaxVectorSize == 8, "");
739 switch(size) {
740 case 8: return Op_VecD;
741 }
742 ShouldNotReachHere();
743 return 0;
744 }
746 // Only lowest bits of xmm reg are used for vector shift count.
747 const int Matcher::vector_shift_count_ideal_reg(int size) {
748 fatal("vector shift is not supported");
749 return Node::NotAMachineReg;
750 }
752 // Limits on vector size (number of elements) loaded into vector.
753 const int Matcher::max_vector_size(const BasicType bt) {
754 assert(is_java_primitive(bt), "only primitive type vectors");
755 return vector_width_in_bytes(bt)/type2aelembytes(bt);
756 }
758 const int Matcher::min_vector_size(const BasicType bt) {
759 return max_vector_size(bt); // Same as max.
760 }
762 // MIPS supports misaligned vectors store/load? FIXME
763 const bool Matcher::misaligned_vectors_ok() {
764 return false;
765 //return !AlignVector; // can be changed by flag
766 }
768 // Register for DIVI projection of divmodI
769 RegMask Matcher::divI_proj_mask() {
770 ShouldNotReachHere();
771 return RegMask();
772 }
774 // Register for MODI projection of divmodI
775 RegMask Matcher::modI_proj_mask() {
776 ShouldNotReachHere();
777 return RegMask();
778 }
780 // Register for DIVL projection of divmodL
781 RegMask Matcher::divL_proj_mask() {
782 ShouldNotReachHere();
783 return RegMask();
784 }
786 int Matcher::regnum_to_fpu_offset(int regnum) {
787 return regnum - 32; // The FP registers are in the second chunk
788 }
791 const bool Matcher::isSimpleConstant64(jlong value) {
792 // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
793 return true;
794 }
797 // Return whether or not this register is ever used as an argument. This
798 // function is used on startup to build the trampoline stubs in generateOptoStub.
799 // Registers not mentioned will be killed by the VM call in the trampoline, and
800 // arguments in those registers not be available to the callee.
801 bool Matcher::can_be_java_arg( int reg ) {
802 /* Refer to: [sharedRuntime_mips_64.cpp] SharedRuntime::java_calling_convention() */
803 if ( reg == T0_num || reg == T0_H_num
804 || reg == A0_num || reg == A0_H_num
805 || reg == A1_num || reg == A1_H_num
806 || reg == A2_num || reg == A2_H_num
807 || reg == A3_num || reg == A3_H_num
808 || reg == A4_num || reg == A4_H_num
809 || reg == A5_num || reg == A5_H_num
810 || reg == A6_num || reg == A6_H_num
811 || reg == A7_num || reg == A7_H_num )
812 return true;
814 if ( reg == F12_num || reg == F12_H_num
815 || reg == F13_num || reg == F13_H_num
816 || reg == F14_num || reg == F14_H_num
817 || reg == F15_num || reg == F15_H_num
818 || reg == F16_num || reg == F16_H_num
819 || reg == F17_num || reg == F17_H_num
820 || reg == F18_num || reg == F18_H_num
821 || reg == F19_num || reg == F19_H_num )
822 return true;
824 return false;
825 }
827 bool Matcher::is_spillable_arg( int reg ) {
828 return can_be_java_arg(reg);
829 }
831 bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) {
832 return false;
833 }
835 // Register for MODL projection of divmodL
836 RegMask Matcher::modL_proj_mask() {
837 ShouldNotReachHere();
838 return RegMask();
839 }
841 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
842 return FP_REG_mask();
843 }
845 // MIPS doesn't support AES intrinsics
846 const bool Matcher::pass_original_key_for_aes() {
847 return false;
848 }
850 // The address of the call instruction needs to be 16-byte aligned to
851 // ensure that it does not span a cache line so that it can be patched.
853 int CallStaticJavaDirectNode::compute_padding(int current_offset) const {
854 //lui
855 //ori
856 //dsll
857 //ori
859 //jalr
860 //nop
862 return round_to(current_offset, alignment_required()) - current_offset;
863 }
865 // The address of the call instruction needs to be 16-byte aligned to
866 // ensure that it does not span a cache line so that it can be patched.
867 int CallDynamicJavaDirectNode::compute_padding(int current_offset) const {
868 //li64 <--- skip
870 //lui
871 //ori
872 //dsll
873 //ori
875 //jalr
876 //nop
878 current_offset += 4 * 6; // skip li64
879 return round_to(current_offset, alignment_required()) - current_offset;
880 }
882 int CallLeafNoFPDirectNode::compute_padding(int current_offset) const {
883 //lui
884 //ori
885 //dsll
886 //ori
888 //jalr
889 //nop
891 return round_to(current_offset, alignment_required()) - current_offset;
892 }
894 int CallLeafDirectNode::compute_padding(int current_offset) const {
895 //lui
896 //ori
897 //dsll
898 //ori
900 //jalr
901 //nop
903 return round_to(current_offset, alignment_required()) - current_offset;
904 }
906 int CallRuntimeDirectNode::compute_padding(int current_offset) const {
907 //lui
908 //ori
909 //dsll
910 //ori
912 //jalr
913 //nop
915 return round_to(current_offset, alignment_required()) - current_offset;
916 }
918 // If CPU can load and store mis-aligned doubles directly then no fixup is
919 // needed. Else we split the double into 2 integer pieces and move it
920 // piece-by-piece. Only happens when passing doubles into C code as the
921 // Java calling convention forces doubles to be aligned.
922 const bool Matcher::misaligned_doubles_ok = false;
923 // Do floats take an entire double register or just half?
924 //const bool Matcher::float_in_double = true;
925 bool Matcher::float_in_double() { return false; }
926 // Threshold size for cleararray.
927 const int Matcher::init_array_short_size = 8 * BytesPerLong;
928 // Do ints take an entire long register or just half?
929 const bool Matcher::int_in_long = true;
930 // Is it better to copy float constants, or load them directly from memory?
931 // Intel can load a float constant from a direct address, requiring no
932 // extra registers. Most RISCs will have to materialize an address into a
933 // register first, so they would do better to copy the constant from stack.
934 const bool Matcher::rematerialize_float_constants = false;
935 // Advertise here if the CPU requires explicit rounding operations
936 // to implement the UseStrictFP mode.
937 const bool Matcher::strict_fp_requires_explicit_rounding = false;
938 // The ecx parameter to rep stos for the ClearArray node is in dwords.
939 const bool Matcher::init_array_count_is_in_bytes = false;
942 // Indicate if the safepoint node needs the polling page as an input.
943 // Since MIPS doesn't have absolute addressing, it needs.
944 bool SafePointNode::needs_polling_address_input() {
945 return true;
946 }
948 // !!!!! Special hack to get all type of calls to specify the byte offset
949 // from the start of the call to the point where the return address
950 // will point.
951 int MachCallStaticJavaNode::ret_addr_offset() {
952 assert(NativeCall::instruction_size == 24, "in MachCallStaticJavaNode::ret_addr_offset");
953 //The value ought to be 16 bytes.
954 //lui
955 //ori
956 //dsll
957 //ori
958 //jalr
959 //nop
960 return NativeCall::instruction_size;
961 }
963 int MachCallDynamicJavaNode::ret_addr_offset() {
964 /* 2012/9/10 Jin: must be kept in sync with Java_Dynamic_Call */
966 // return NativeCall::instruction_size;
967 assert(NativeCall::instruction_size == 24, "in MachCallDynamicJavaNode::ret_addr_offset");
968 //The value ought to be 4 + 16 bytes.
969 //lui IC_Klass,
970 //ori IC_Klass,
971 //dsll IC_Klass
972 //ori IC_Klass
973 //lui T9
974 //ori T9
975 //dsll T9
976 //ori T9
977 //jalr T9
978 //nop
979 return 6 * 4 + NativeCall::instruction_size;
981 }
983 //=============================================================================
985 // Figure out which register class each belongs in: rc_int, rc_float, rc_stack
986 enum RC { rc_bad, rc_int, rc_float, rc_stack };
987 static enum RC rc_class( OptoReg::Name reg ) {
988 if( !OptoReg::is_valid(reg) ) return rc_bad;
989 if (OptoReg::is_stack(reg)) return rc_stack;
990 VMReg r = OptoReg::as_VMReg(reg);
991 if (r->is_Register()) return rc_int;
992 assert(r->is_FloatRegister(), "must be");
993 return rc_float;
994 }
996 uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream* st ) const {
997 // Get registers to move
998 OptoReg::Name src_second = ra_->get_reg_second(in(1));
999 OptoReg::Name src_first = ra_->get_reg_first(in(1));
1000 OptoReg::Name dst_second = ra_->get_reg_second(this );
1001 OptoReg::Name dst_first = ra_->get_reg_first(this );
1003 enum RC src_second_rc = rc_class(src_second);
1004 enum RC src_first_rc = rc_class(src_first);
1005 enum RC dst_second_rc = rc_class(dst_second);
1006 enum RC dst_first_rc = rc_class(dst_first);
1008 assert(OptoReg::is_valid(src_first) && OptoReg::is_valid(dst_first), "must move at least 1 register" );
1010 // Generate spill code!
1011 int size = 0;
1013 if( src_first == dst_first && src_second == dst_second )
1014 return 0; // Self copy, no move
1016 if (src_first_rc == rc_stack) {
1017 // mem ->
1018 if (dst_first_rc == rc_stack) {
1019 // mem -> mem
1020 assert(src_second != dst_first, "overlap");
1021 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1022 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1023 // 64-bit
1024 int src_offset = ra_->reg2offset(src_first);
1025 int dst_offset = ra_->reg2offset(dst_first);
1026 if (cbuf) {
1027 MacroAssembler _masm(cbuf);
1028 __ ld(AT, Address(SP, src_offset));
1029 __ sd(AT, Address(SP, dst_offset));
1030 #ifndef PRODUCT
1031 } else {
1032 if(!do_size){
1033 if (size != 0) st->print("\n\t");
1034 st->print("ld AT, [SP + #%d]\t# 64-bit mem-mem spill 1\n\t"
1035 "sd AT, [SP + #%d]",
1036 src_offset, dst_offset);
1037 }
1038 #endif
1039 }
1040 size += 8;
1041 } else {
1042 // 32-bit
1043 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1044 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1045 // No pushl/popl, so:
1046 int src_offset = ra_->reg2offset(src_first);
1047 int dst_offset = ra_->reg2offset(dst_first);
1048 if (cbuf) {
1049 MacroAssembler _masm(cbuf);
1050 __ lw(AT, Address(SP, src_offset));
1051 __ sw(AT, Address(SP, dst_offset));
1052 #ifndef PRODUCT
1053 } else {
1054 if(!do_size){
1055 if (size != 0) st->print("\n\t");
1056 st->print("lw AT, [SP + #%d] spill 2\n\t"
1057 "sw AT, [SP + #%d]\n\t",
1058 src_offset, dst_offset);
1059 }
1060 #endif
1061 }
1062 size += 8;
1063 }
1064 return size;
1065 } else if (dst_first_rc == rc_int) {
1066 // mem -> gpr
1067 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1068 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1069 // 64-bit
1070 int offset = ra_->reg2offset(src_first);
1071 if (cbuf) {
1072 MacroAssembler _masm(cbuf);
1073 __ ld(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1074 #ifndef PRODUCT
1075 } else {
1076 if(!do_size){
1077 if (size != 0) st->print("\n\t");
1078 st->print("ld %s, [SP + #%d]\t# spill 3",
1079 Matcher::regName[dst_first],
1080 offset);
1081 }
1082 #endif
1083 }
1084 size += 4;
1085 } else {
1086 // 32-bit
1087 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1088 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1089 int offset = ra_->reg2offset(src_first);
1090 if (cbuf) {
1091 MacroAssembler _masm(cbuf);
1092 if (this->ideal_reg() == Op_RegI)
1093 __ lw(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1094 else
1095 __ lwu(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1096 #ifndef PRODUCT
1097 } else {
1098 if(!do_size){
1099 if (size != 0) st->print("\n\t");
1100 if (this->ideal_reg() == Op_RegI)
1101 st->print("lw %s, [SP + #%d]\t# spill 4",
1102 Matcher::regName[dst_first],
1103 offset);
1104 else
1105 st->print("lwu %s, [SP + #%d]\t# spill 5",
1106 Matcher::regName[dst_first],
1107 offset);
1108 }
1109 #endif
1110 }
1111 size += 4;
1112 }
1113 return size;
1114 } else if (dst_first_rc == rc_float) {
1115 // mem-> xmm
1116 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1117 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1118 // 64-bit
1119 int offset = ra_->reg2offset(src_first);
1120 if (cbuf) {
1121 MacroAssembler _masm(cbuf);
1122 __ ldc1( as_FloatRegister(Matcher::_regEncode[dst_first]), Address(SP, offset));
1123 #ifndef PRODUCT
1124 } else {
1125 if(!do_size){
1126 if (size != 0) st->print("\n\t");
1127 st->print("ldc1 %s, [SP + #%d]\t# spill 6",
1128 Matcher::regName[dst_first],
1129 offset);
1130 }
1131 #endif
1132 }
1133 size += 4;
1134 } else {
1135 // 32-bit
1136 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1137 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1138 int offset = ra_->reg2offset(src_first);
1139 if (cbuf) {
1140 MacroAssembler _masm(cbuf);
1141 __ lwc1( as_FloatRegister(Matcher::_regEncode[dst_first]), Address(SP, offset));
1142 #ifndef PRODUCT
1143 } else {
1144 if(!do_size){
1145 if (size != 0) st->print("\n\t");
1146 st->print("lwc1 %s, [SP + #%d]\t# spill 7",
1147 Matcher::regName[dst_first],
1148 offset);
1149 }
1150 #endif
1151 }
1152 size += 4;
1153 }
1154 return size;
1155 }
1156 } else if (src_first_rc == rc_int) {
1157 // gpr ->
1158 if (dst_first_rc == rc_stack) {
1159 // gpr -> mem
1160 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1161 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1162 // 64-bit
1163 int offset = ra_->reg2offset(dst_first);
1164 if (cbuf) {
1165 MacroAssembler _masm(cbuf);
1166 __ sd(as_Register(Matcher::_regEncode[src_first]), Address(SP, offset));
1167 #ifndef PRODUCT
1168 } else {
1169 if(!do_size){
1170 if (size != 0) st->print("\n\t");
1171 st->print("sd %s, [SP + #%d] # spill 8",
1172 Matcher::regName[src_first],
1173 offset);
1174 }
1175 #endif
1176 }
1177 size += 4;
1178 } else {
1179 // 32-bit
1180 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1181 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1182 int offset = ra_->reg2offset(dst_first);
1183 if (cbuf) {
1184 MacroAssembler _masm(cbuf);
1185 __ sw(as_Register(Matcher::_regEncode[src_first]), Address(SP, offset));
1186 #ifndef PRODUCT
1187 } else {
1188 if(!do_size){
1189 if (size != 0) st->print("\n\t");
1190 st->print("sw %s, [SP + #%d]\t# spill 9",
1191 Matcher::regName[src_first], offset);
1192 }
1193 #endif
1194 }
1195 size += 4;
1196 }
1197 return size;
1198 } else if (dst_first_rc == rc_int) {
1199 // gpr -> gpr
1200 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1201 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1202 // 64-bit
1203 if (cbuf) {
1204 MacroAssembler _masm(cbuf);
1205 __ move(as_Register(Matcher::_regEncode[dst_first]),
1206 as_Register(Matcher::_regEncode[src_first]));
1207 #ifndef PRODUCT
1208 } else {
1209 if(!do_size){
1210 if (size != 0) st->print("\n\t");
1211 st->print("move(64bit) %s <-- %s\t# spill 10",
1212 Matcher::regName[dst_first],
1213 Matcher::regName[src_first]);
1214 }
1215 #endif
1216 }
1217 size += 4;
1218 return size;
1219 } else {
1220 // 32-bit
1221 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1222 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1223 if (cbuf) {
1224 MacroAssembler _masm(cbuf);
1225 if (this->ideal_reg() == Op_RegI)
1226 __ move_u32(as_Register(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first]));
1227 else
1228 __ daddu(as_Register(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first]), R0);
1230 #ifndef PRODUCT
1231 } else {
1232 if(!do_size){
1233 if (size != 0) st->print("\n\t");
1234 st->print("move(32-bit) %s <-- %s\t# spill 11",
1235 Matcher::regName[dst_first],
1236 Matcher::regName[src_first]);
1237 }
1238 #endif
1239 }
1240 size += 4;
1241 return size;
1242 }
1243 } else if (dst_first_rc == rc_float) {
1244 // gpr -> xmm
1245 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1246 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1247 // 64-bit
1248 if (cbuf) {
1249 MacroAssembler _masm(cbuf);
1250 __ dmtc1(as_Register(Matcher::_regEncode[src_first]), as_FloatRegister(Matcher::_regEncode[dst_first]));
1251 #ifndef PRODUCT
1252 } else {
1253 if(!do_size){
1254 if (size != 0) st->print("\n\t");
1255 st->print("dmtc1 %s, %s\t# spill 12",
1256 Matcher::regName[dst_first],
1257 Matcher::regName[src_first]);
1258 }
1259 #endif
1260 }
1261 size += 4;
1262 } else {
1263 // 32-bit
1264 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1265 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1266 if (cbuf) {
1267 MacroAssembler _masm(cbuf);
1268 __ mtc1( as_Register(Matcher::_regEncode[src_first]), as_FloatRegister(Matcher::_regEncode[dst_first]) );
1269 #ifndef PRODUCT
1270 } else {
1271 if(!do_size){
1272 if (size != 0) st->print("\n\t");
1273 st->print("mtc1 %s, %s\t# spill 13",
1274 Matcher::regName[dst_first],
1275 Matcher::regName[src_first]);
1276 }
1277 #endif
1278 }
1279 size += 4;
1280 }
1281 return size;
1282 }
1283 } else if (src_first_rc == rc_float) {
1284 // xmm ->
1285 if (dst_first_rc == rc_stack) {
1286 // xmm -> mem
1287 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1288 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1289 // 64-bit
1290 int offset = ra_->reg2offset(dst_first);
1291 if (cbuf) {
1292 MacroAssembler _masm(cbuf);
1293 __ sdc1( as_FloatRegister(Matcher::_regEncode[src_first]), Address(SP, offset) );
1294 #ifndef PRODUCT
1295 } else {
1296 if(!do_size){
1297 if (size != 0) st->print("\n\t");
1298 st->print("sdc1 %s, [SP + #%d]\t# spill 14",
1299 Matcher::regName[src_first],
1300 offset);
1301 }
1302 #endif
1303 }
1304 size += 4;
1305 } else {
1306 // 32-bit
1307 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1308 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1309 int offset = ra_->reg2offset(dst_first);
1310 if (cbuf) {
1311 MacroAssembler _masm(cbuf);
1312 __ swc1(as_FloatRegister(Matcher::_regEncode[src_first]), Address(SP, offset));
1313 #ifndef PRODUCT
1314 } else {
1315 if(!do_size){
1316 if (size != 0) st->print("\n\t");
1317 st->print("swc1 %s, [SP + #%d]\t# spill 15",
1318 Matcher::regName[src_first],
1319 offset);
1320 }
1321 #endif
1322 }
1323 size += 4;
1324 }
1325 return size;
1326 } else if (dst_first_rc == rc_int) {
1327 // xmm -> gpr
1328 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1329 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1330 // 64-bit
1331 if (cbuf) {
1332 MacroAssembler _masm(cbuf);
1333 __ dmfc1( as_Register(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1334 #ifndef PRODUCT
1335 } else {
1336 if(!do_size){
1337 if (size != 0) st->print("\n\t");
1338 st->print("dmfc1 %s, %s\t# spill 16",
1339 Matcher::regName[dst_first],
1340 Matcher::regName[src_first]);
1341 }
1342 #endif
1343 }
1344 size += 4;
1345 } else {
1346 // 32-bit
1347 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1348 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1349 if (cbuf) {
1350 MacroAssembler _masm(cbuf);
1351 __ mfc1( as_Register(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1352 #ifndef PRODUCT
1353 } else {
1354 if(!do_size){
1355 if (size != 0) st->print("\n\t");
1356 st->print("mfc1 %s, %s\t# spill 17",
1357 Matcher::regName[dst_first],
1358 Matcher::regName[src_first]);
1359 }
1360 #endif
1361 }
1362 size += 4;
1363 }
1364 return size;
1365 } else if (dst_first_rc == rc_float) {
1366 // xmm -> xmm
1367 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1368 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1369 // 64-bit
1370 if (cbuf) {
1371 MacroAssembler _masm(cbuf);
1372 __ mov_d( as_FloatRegister(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1373 #ifndef PRODUCT
1374 } else {
1375 if(!do_size){
1376 if (size != 0) st->print("\n\t");
1377 st->print("mov_d %s <-- %s\t# spill 18",
1378 Matcher::regName[dst_first],
1379 Matcher::regName[src_first]);
1380 }
1381 #endif
1382 }
1383 size += 4;
1384 } else {
1385 // 32-bit
1386 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1387 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1388 if (cbuf) {
1389 MacroAssembler _masm(cbuf);
1390 __ mov_s( as_FloatRegister(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1391 #ifndef PRODUCT
1392 } else {
1393 if(!do_size){
1394 if (size != 0) st->print("\n\t");
1395 st->print("mov_s %s <-- %s\t# spill 19",
1396 Matcher::regName[dst_first],
1397 Matcher::regName[src_first]);
1398 }
1399 #endif
1400 }
1401 size += 4;
1402 }
1403 return size;
1404 }
1405 }
1407 assert(0," foo ");
1408 Unimplemented();
1409 return size;
1411 }
1413 #ifndef PRODUCT
1414 void MachSpillCopyNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1415 implementation( NULL, ra_, false, st );
1416 }
1417 #endif
1419 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1420 implementation( &cbuf, ra_, false, NULL );
1421 }
1423 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
1424 return implementation( NULL, ra_, true, NULL );
1425 }
1427 //=============================================================================
1428 #
1430 #ifndef PRODUCT
1431 void MachBreakpointNode::format( PhaseRegAlloc *, outputStream* st ) const {
1432 st->print("INT3");
1433 }
1434 #endif
1436 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc* ra_) const {
1437 MacroAssembler _masm(&cbuf);
1438 __ int3();
1439 }
1441 uint MachBreakpointNode::size(PhaseRegAlloc* ra_) const {
1442 return MachNode::size(ra_);
1443 }
1446 //=============================================================================
1447 #ifndef PRODUCT
1448 void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1449 Compile *C = ra_->C;
1450 int framesize = C->frame_size_in_bytes();
1452 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1454 st->print("daddiu SP, SP, %d # Rlease stack @ MachEpilogNode",framesize);
1455 st->cr(); st->print("\t");
1456 if (UseLoongsonISA) {
1457 st->print("gslq RA, FP, SP, %d # Restore FP & RA @ MachEpilogNode", -wordSize*2);
1458 } else {
1459 st->print("ld RA, SP, %d # Restore RA @ MachEpilogNode", -wordSize);
1460 st->cr(); st->print("\t");
1461 st->print("ld FP, SP, %d # Restore FP @ MachEpilogNode", -wordSize*2);
1462 }
1464 if( do_polling() && C->is_method_compilation() ) {
1465 st->print("Poll Safepoint # MachEpilogNode");
1466 }
1467 }
1468 #endif
1470 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1471 Compile *C = ra_->C;
1472 MacroAssembler _masm(&cbuf);
1473 int framesize = C->frame_size_in_bytes();
1475 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1477 __ daddiu(SP, SP, framesize);
1479 if (UseLoongsonISA) {
1480 __ gslq(RA, FP, SP, -wordSize*2);
1481 } else {
1482 __ ld(RA, SP, -wordSize );
1483 __ ld(FP, SP, -wordSize*2 );
1484 }
1486 /* 2012/11/19 Jin: The epilog in a RuntimeStub should not contain a safepoint */
1487 if( do_polling() && C->is_method_compilation() ) {
1488 #ifndef OPT_SAFEPOINT
1489 __ set64(AT, (long)os::get_polling_page());
1490 __ relocate(relocInfo::poll_return_type);
1491 __ lw(AT, AT, 0);
1492 #else
1493 __ lui(AT, Assembler::split_high((intptr_t)os::get_polling_page()));
1494 __ relocate(relocInfo::poll_return_type);
1495 __ lw(AT, AT, Assembler::split_low((intptr_t)os::get_polling_page()));
1496 #endif
1497 }
1498 }
1500 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
1501 return MachNode::size(ra_); // too many variables; just compute it the hard way fujie debug
1502 }
1504 int MachEpilogNode::reloc() const {
1505 return 0; // a large enough number
1506 }
1508 const Pipeline * MachEpilogNode::pipeline() const {
1509 return MachNode::pipeline_class();
1510 }
1512 int MachEpilogNode::safepoint_offset() const { return 0; }
1514 //=============================================================================
1516 #ifndef PRODUCT
1517 void BoxLockNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1518 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1519 int reg = ra_->get_reg_first(this);
1520 st->print("ADDI %s, SP, %d @BoxLockNode",Matcher::regName[reg],offset);
1521 }
1522 #endif
1525 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
1526 return 4;
1527 }
1529 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1530 MacroAssembler _masm(&cbuf);
1531 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1532 int reg = ra_->get_encode(this);
1534 __ addi(as_Register(reg), SP, offset);
1535 /*
1536 if( offset >= 128 ) {
1537 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
1538 emit_rm(cbuf, 0x2, reg, 0x04);
1539 emit_rm(cbuf, 0x0, 0x04, SP_enc);
1540 emit_d32(cbuf, offset);
1541 }
1542 else {
1543 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
1544 emit_rm(cbuf, 0x1, reg, 0x04);
1545 emit_rm(cbuf, 0x0, 0x04, SP_enc);
1546 emit_d8(cbuf, offset);
1547 }
1548 */
1549 }
1552 //static int sizeof_FFree_Float_Stack_All = -1;
1554 int MachCallRuntimeNode::ret_addr_offset() {
1555 //lui
1556 //ori
1557 //dsll
1558 //ori
1559 //jalr
1560 //nop
1561 assert(NativeCall::instruction_size == 24, "in MachCallRuntimeNode::ret_addr_offset()");
1562 return NativeCall::instruction_size;
1563 // return 16;
1564 }
1570 //=============================================================================
1571 #ifndef PRODUCT
1572 void MachNopNode::format( PhaseRegAlloc *, outputStream* st ) const {
1573 st->print("NOP \t# %d bytes pad for loops and calls", 4 * _count);
1574 }
1575 #endif
1577 void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc * ) const {
1578 MacroAssembler _masm(&cbuf);
1579 int i = 0;
1580 for(i = 0; i < _count; i++)
1581 __ nop();
1582 }
1584 uint MachNopNode::size(PhaseRegAlloc *) const {
1585 return 4 * _count;
1586 }
1587 const Pipeline* MachNopNode::pipeline() const {
1588 return MachNode::pipeline_class();
1589 }
1591 //=============================================================================
1593 //=============================================================================
1594 #ifndef PRODUCT
1595 void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1596 st->print_cr("load_klass(AT, T0)");
1597 st->print_cr("\tbeq(AT, iCache, L)");
1598 st->print_cr("\tnop");
1599 st->print_cr("\tjmp(SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type)");
1600 st->print_cr("\tnop");
1601 st->print_cr("\tnop");
1602 st->print_cr(" L:");
1603 }
1604 #endif
1607 void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1608 MacroAssembler _masm(&cbuf);
1609 #ifdef ASSERT
1610 //uint code_size = cbuf.code_size();
1611 #endif
1612 int ic_reg = Matcher::inline_cache_reg_encode();
1613 Label L;
1614 Register receiver = T0;
1615 Register iCache = as_Register(ic_reg);
1616 __ load_klass(AT, receiver);
1617 __ beq(AT, iCache, L);
1618 __ nop();
1620 __ relocate(relocInfo::runtime_call_type);
1621 __ li48(T9, (long)SharedRuntime::get_ic_miss_stub());
1622 __ jr(T9);
1623 __ nop();
1625 /* WARNING these NOPs are critical so that verified entry point is properly
1626 * 8 bytes aligned for patching by NativeJump::patch_verified_entry() */
1627 __ align(CodeEntryAlignment);
1628 __ bind(L);
1629 }
1631 uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
1632 return MachNode::size(ra_);
1633 }
1637 //=============================================================================
1639 const RegMask& MachConstantBaseNode::_out_RegMask = P_REG_mask();
1641 int Compile::ConstantTable::calculate_table_base_offset() const {
1642 return 0; // absolute addressing, no offset
1643 }
1645 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
1646 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
1647 ShouldNotReachHere();
1648 }
1650 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
1651 Compile* C = ra_->C;
1652 Compile::ConstantTable& constant_table = C->constant_table();
1653 MacroAssembler _masm(&cbuf);
1655 Register Rtoc = as_Register(ra_->get_encode(this));
1656 CodeSection* consts_section = __ code()->consts();
1657 int consts_size = consts_section->align_at_start(consts_section->size());
1658 assert(constant_table.size() == consts_size, "must be equal");
1660 if (consts_section->size()) {
1661 // Materialize the constant table base.
1662 address baseaddr = consts_section->start() + -(constant_table.table_base_offset());
1663 // RelocationHolder rspec = internal_word_Relocation::spec(baseaddr);
1664 __ relocate(relocInfo::internal_pc_type);
1665 __ li48(Rtoc, (long)baseaddr);
1666 }
1667 }
1669 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
1670 // li48 (4 insts)
1671 return 4 * 4;
1672 }
1674 #ifndef PRODUCT
1675 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
1676 Register r = as_Register(ra_->get_encode(this));
1677 st->print("li48 %s, &constanttable (constant table base) @ MachConstantBaseNode", r->name());
1678 }
1679 #endif
1682 //=============================================================================
1683 #ifndef PRODUCT
1684 void MachPrologNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1685 Compile* C = ra_->C;
1687 int framesize = C->frame_size_in_bytes();
1688 int bangsize = C->bang_size_in_bytes();
1689 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1691 // Calls to C2R adapters often do not accept exceptional returns.
1692 // We require that their callers must bang for them. But be careful, because
1693 // some VM calls (such as call site linkage) can use several kilobytes of
1694 // stack. But the stack safety zone should account for that.
1695 // See bugs 4446381, 4468289, 4497237.
1696 if (C->need_stack_bang(bangsize)) {
1697 st->print_cr("# stack bang"); st->print("\t");
1698 }
1699 if (UseLoongsonISA) {
1700 st->print("gssq RA, FP, %d(SP) @ MachPrologNode\n\t", -wordSize*2);
1701 } else {
1702 st->print("sd RA, %d(SP) @ MachPrologNode\n\t", -wordSize);
1703 st->print("sd FP, %d(SP) @ MachPrologNode\n\t", -wordSize*2);
1704 }
1705 st->print("daddiu FP, SP, -%d \n\t", wordSize*2);
1706 st->print("daddiu SP, SP, -%d \t",framesize);
1707 }
1708 #endif
1711 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1712 Compile* C = ra_->C;
1713 MacroAssembler _masm(&cbuf);
1715 int framesize = C->frame_size_in_bytes();
1716 int bangsize = C->bang_size_in_bytes();
1718 // __ verified_entry(framesize, C->need_stack_bang(bangsize)?bangsize:0, false);
1720 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1722 if (C->need_stack_bang(framesize)) {
1723 __ generate_stack_overflow_check(framesize);
1724 }
1726 if (UseLoongsonISA) {
1727 __ gssq(RA, FP, SP, -wordSize*2);
1728 } else {
1729 __ sd(RA, SP, -wordSize);
1730 __ sd(FP, SP, -wordSize*2);
1731 }
1732 __ daddiu(FP, SP, -wordSize*2);
1733 __ daddiu(SP, SP, -framesize);
1734 __ nop(); /* 2013.10.22 Jin: Make enough room for patch_verified_entry() */
1735 __ nop();
1737 C->set_frame_complete(cbuf.insts_size());
1738 if (C->has_mach_constant_base_node()) {
1739 // NOTE: We set the table base offset here because users might be
1740 // emitted before MachConstantBaseNode.
1741 Compile::ConstantTable& constant_table = C->constant_table();
1742 constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
1743 }
1745 }
1748 uint MachPrologNode::size(PhaseRegAlloc *ra_) const {
1749 //fprintf(stderr, "\nPrologNode::size(ra_)= %d \n", MachNode::size(ra_));//fujie debug
1750 return MachNode::size(ra_); // too many variables; just compute it the hard way
1751 }
1753 int MachPrologNode::reloc() const {
1754 return 0; // a large enough number
1755 }
1757 %}
1759 //----------ENCODING BLOCK-----------------------------------------------------
1760 // This block specifies the encoding classes used by the compiler to output
1761 // byte streams. Encoding classes generate functions which are called by
1762 // Machine Instruction Nodes in order to generate the bit encoding of the
1763 // instruction. Operands specify their base encoding interface with the
1764 // interface keyword. There are currently supported four interfaces,
1765 // REG_INTER, CONST_INTER, MEMORY_INTER, & COND_INTER. REG_INTER causes an
1766 // operand to generate a function which returns its register number when
1767 // queried. CONST_INTER causes an operand to generate a function which
1768 // returns the value of the constant when queried. MEMORY_INTER causes an
1769 // operand to generate four functions which return the Base Register, the
1770 // Index Register, the Scale Value, and the Offset Value of the operand when
1771 // queried. COND_INTER causes an operand to generate six functions which
1772 // return the encoding code (ie - encoding bits for the instruction)
1773 // associated with each basic boolean condition for a conditional instruction.
1774 // Instructions specify two basic values for encoding. They use the
1775 // ins_encode keyword to specify their encoding class (which must be one of
1776 // the class names specified in the encoding block), and they use the
1777 // opcode keyword to specify, in order, their primary, secondary, and
1778 // tertiary opcode. Only the opcode sections which a particular instruction
1779 // needs for encoding need to be specified.
1780 encode %{
1781 /*
1782 Alias:
1783 1044 b java.io.ObjectInputStream::readHandle (130 bytes)
1784 118 B14: # B19 B15 <- B13 Freq: 0.899955
1785 118 add S1, S2, V0 #@addP_reg_reg
1786 11c lb S0, [S1 + #-8257524] #@loadB
1787 120 BReq S0, #3, B19 #@branchConI_reg_imm P=0.100000 C=-1.000000
1788 */
1789 //Load byte signed
1790 enc_class load_B_enc (mRegI dst, memory mem) %{
1791 MacroAssembler _masm(&cbuf);
1792 int dst = $dst$$reg;
1793 int base = $mem$$base;
1794 int index = $mem$$index;
1795 int scale = $mem$$scale;
1796 int disp = $mem$$disp;
1798 if( index != 0 ) {
1799 if( Assembler::is_simm16(disp) ) {
1800 if( UseLoongsonISA ) {
1801 if (scale == 0) {
1802 __ gslbx(as_Register(dst), as_Register(base), as_Register(index), disp);
1803 } else {
1804 __ dsll(AT, as_Register(index), scale);
1805 __ gslbx(as_Register(dst), as_Register(base), AT, disp);
1806 }
1807 } else {
1808 if (scale == 0) {
1809 __ addu(AT, as_Register(base), as_Register(index));
1810 } else {
1811 __ dsll(AT, as_Register(index), scale);
1812 __ addu(AT, as_Register(base), AT);
1813 }
1814 __ lb(as_Register(dst), AT, disp);
1815 }
1816 } else {
1817 if (scale == 0) {
1818 __ addu(AT, as_Register(base), as_Register(index));
1819 } else {
1820 __ dsll(AT, as_Register(index), scale);
1821 __ addu(AT, as_Register(base), AT);
1822 }
1823 __ move(T9, disp);
1824 if( UseLoongsonISA ) {
1825 __ gslbx(as_Register(dst), AT, T9, 0);
1826 } else {
1827 __ addu(AT, AT, T9);
1828 __ lb(as_Register(dst), AT, 0);
1829 }
1830 }
1831 } else {
1832 if( Assembler::is_simm16(disp) ) {
1833 __ lb(as_Register(dst), as_Register(base), disp);
1834 } else {
1835 __ move(T9, disp);
1836 if( UseLoongsonISA ) {
1837 __ gslbx(as_Register(dst), as_Register(base), T9, 0);
1838 } else {
1839 __ addu(AT, as_Register(base), T9);
1840 __ lb(as_Register(dst), AT, 0);
1841 }
1842 }
1843 }
1844 %}
1846 //Load byte unsigned
1847 enc_class load_UB_enc (mRegI dst, memory mem) %{
1848 MacroAssembler _masm(&cbuf);
1849 int dst = $dst$$reg;
1850 int base = $mem$$base;
1851 int index = $mem$$index;
1852 int scale = $mem$$scale;
1853 int disp = $mem$$disp;
1855 if( index != 0 ) {
1856 if (scale == 0) {
1857 __ daddu(AT, as_Register(base), as_Register(index));
1858 } else {
1859 __ dsll(AT, as_Register(index), scale);
1860 __ daddu(AT, as_Register(base), AT);
1861 }
1862 if( Assembler::is_simm16(disp) ) {
1863 __ lbu(as_Register(dst), AT, disp);
1864 } else {
1865 __ move(T9, disp);
1866 __ daddu(AT, AT, T9);
1867 __ lbu(as_Register(dst), AT, 0);
1868 }
1869 } else {
1870 if( Assembler::is_simm16(disp) ) {
1871 __ lbu(as_Register(dst), as_Register(base), disp);
1872 } else {
1873 __ move(T9, disp);
1874 __ daddu(AT, as_Register(base), T9);
1875 __ lbu(as_Register(dst), AT, 0);
1876 }
1877 }
1878 %}
1880 enc_class store_B_reg_enc (memory mem, mRegI src) %{
1881 MacroAssembler _masm(&cbuf);
1882 int src = $src$$reg;
1883 int base = $mem$$base;
1884 int index = $mem$$index;
1885 int scale = $mem$$scale;
1886 int disp = $mem$$disp;
1888 if( index != 0 ) {
1889 if (scale == 0) {
1890 if( Assembler::is_simm(disp, 8) ) {
1891 if (UseLoongsonISA) {
1892 __ gssbx(as_Register(src), as_Register(base), as_Register(index), disp);
1893 } else {
1894 __ addu(AT, as_Register(base), as_Register(index));
1895 __ sb(as_Register(src), AT, disp);
1896 }
1897 } else if( Assembler::is_simm16(disp) ) {
1898 __ addu(AT, as_Register(base), as_Register(index));
1899 __ sb(as_Register(src), AT, disp);
1900 } else {
1901 __ addu(AT, as_Register(base), as_Register(index));
1902 __ move(T9, disp);
1903 if (UseLoongsonISA) {
1904 __ gssbx(as_Register(src), AT, T9, 0);
1905 } else {
1906 __ addu(AT, AT, T9);
1907 __ sb(as_Register(src), AT, 0);
1908 }
1909 }
1910 } else {
1911 __ dsll(AT, as_Register(index), scale);
1912 if( Assembler::is_simm(disp, 8) ) {
1913 if (UseLoongsonISA) {
1914 __ gssbx(as_Register(src), AT, as_Register(base), disp);
1915 } else {
1916 __ addu(AT, as_Register(base), AT);
1917 __ sb(as_Register(src), AT, disp);
1918 }
1919 } else if( Assembler::is_simm16(disp) ) {
1920 __ addu(AT, as_Register(base), AT);
1921 __ sb(as_Register(src), AT, disp);
1922 } else {
1923 __ addu(AT, as_Register(base), AT);
1924 __ move(T9, disp);
1925 if (UseLoongsonISA) {
1926 __ gssbx(as_Register(src), AT, T9, 0);
1927 } else {
1928 __ addu(AT, AT, T9);
1929 __ sb(as_Register(src), AT, 0);
1930 }
1931 }
1932 }
1933 } else {
1934 if( Assembler::is_simm16(disp) ) {
1935 __ sb(as_Register(src), as_Register(base), disp);
1936 } else {
1937 __ move(T9, disp);
1938 if (UseLoongsonISA) {
1939 __ gssbx(as_Register(src), as_Register(base), T9, 0);
1940 } else {
1941 __ addu(AT, as_Register(base), T9);
1942 __ sb(as_Register(src), AT, 0);
1943 }
1944 }
1945 }
1946 %}
1948 enc_class store_B_immI_enc (memory mem, immI8 src) %{
1949 MacroAssembler _masm(&cbuf);
1950 int base = $mem$$base;
1951 int index = $mem$$index;
1952 int scale = $mem$$scale;
1953 int disp = $mem$$disp;
1954 int value = $src$$constant;
1956 if( index != 0 ) {
1957 if (!UseLoongsonISA) {
1958 if (scale == 0) {
1959 __ daddu(AT, as_Register(base), as_Register(index));
1960 } else {
1961 __ dsll(AT, as_Register(index), scale);
1962 __ daddu(AT, as_Register(base), AT);
1963 }
1964 if( Assembler::is_simm16(disp) ) {
1965 if (value == 0) {
1966 __ sb(R0, AT, disp);
1967 } else {
1968 __ move(T9, value);
1969 __ sb(T9, AT, disp);
1970 }
1971 } else {
1972 if (value == 0) {
1973 __ move(T9, disp);
1974 __ daddu(AT, AT, T9);
1975 __ sb(R0, AT, 0);
1976 } else {
1977 __ move(T9, disp);
1978 __ daddu(AT, AT, T9);
1979 __ move(T9, value);
1980 __ sb(T9, AT, 0);
1981 }
1982 }
1983 } else {
1985 if (scale == 0) {
1986 if( Assembler::is_simm(disp, 8) ) {
1987 if (value == 0) {
1988 __ gssbx(R0, as_Register(base), as_Register(index), disp);
1989 } else {
1990 __ move(T9, value);
1991 __ gssbx(T9, as_Register(base), as_Register(index), disp);
1992 }
1993 } else if( Assembler::is_simm16(disp) ) {
1994 __ daddu(AT, as_Register(base), as_Register(index));
1995 if (value == 0) {
1996 __ sb(R0, AT, disp);
1997 } else {
1998 __ move(T9, value);
1999 __ sb(T9, AT, disp);
2000 }
2001 } else {
2002 if (value == 0) {
2003 __ daddu(AT, as_Register(base), as_Register(index));
2004 __ move(T9, disp);
2005 __ gssbx(R0, AT, T9, 0);
2006 } else {
2007 __ move(AT, disp);
2008 __ move(T9, value);
2009 __ daddu(AT, as_Register(base), AT);
2010 __ gssbx(T9, AT, as_Register(index), 0);
2011 }
2012 }
2014 } else {
2016 if( Assembler::is_simm(disp, 8) ) {
2017 __ dsll(AT, as_Register(index), scale);
2018 if (value == 0) {
2019 __ gssbx(R0, as_Register(base), AT, disp);
2020 } else {
2021 __ move(T9, value);
2022 __ gssbx(T9, as_Register(base), AT, disp);
2023 }
2024 } else if( Assembler::is_simm16(disp) ) {
2025 __ dsll(AT, as_Register(index), scale);
2026 __ daddu(AT, as_Register(base), AT);
2027 if (value == 0) {
2028 __ sb(R0, AT, disp);
2029 } else {
2030 __ move(T9, value);
2031 __ sb(T9, AT, disp);
2032 }
2033 } else {
2034 __ dsll(AT, as_Register(index), scale);
2035 if (value == 0) {
2036 __ daddu(AT, as_Register(base), AT);
2037 __ move(T9, disp);
2038 __ gssbx(R0, AT, T9, 0);
2039 } else {
2040 __ move(T9, disp);
2041 __ daddu(AT, AT, T9);
2042 __ move(T9, value);
2043 __ gssbx(T9, as_Register(base), AT, 0);
2044 }
2045 }
2046 }
2047 }
2048 } else {
2049 if( Assembler::is_simm16(disp) ) {
2050 if (value == 0) {
2051 __ sb(R0, as_Register(base), disp);
2052 } else {
2053 __ move(AT, value);
2054 __ sb(AT, as_Register(base), disp);
2055 }
2056 } else {
2057 if (value == 0) {
2058 __ move(T9, disp);
2059 if (UseLoongsonISA) {
2060 __ gssbx(R0, as_Register(base), T9, 0);
2061 } else {
2062 __ daddu(AT, as_Register(base), T9);
2063 __ sb(R0, AT, 0);
2064 }
2065 } else {
2066 __ move(T9, disp);
2067 if (UseLoongsonISA) {
2068 __ move(AT, value);
2069 __ gssbx(AT, as_Register(base), T9, 0);
2070 } else {
2071 __ daddu(AT, as_Register(base), T9);
2072 __ move(T9, value);
2073 __ sb(T9, AT, 0);
2074 }
2075 }
2076 }
2077 }
2078 %}
2081 enc_class store_B_immI_enc_sync (memory mem, immI8 src) %{
2082 MacroAssembler _masm(&cbuf);
2083 int base = $mem$$base;
2084 int index = $mem$$index;
2085 int scale = $mem$$scale;
2086 int disp = $mem$$disp;
2087 int value = $src$$constant;
2089 if( index != 0 ) {
2090 if ( UseLoongsonISA ) {
2091 if ( Assembler::is_simm(disp,8) ) {
2092 if ( scale == 0 ) {
2093 if ( value == 0 ) {
2094 __ gssbx(R0, as_Register(base), as_Register(index), disp);
2095 } else {
2096 __ move(AT, value);
2097 __ gssbx(AT, as_Register(base), as_Register(index), disp);
2098 }
2099 } else {
2100 __ dsll(AT, as_Register(index), scale);
2101 if ( value == 0 ) {
2102 __ gssbx(R0, as_Register(base), AT, disp);
2103 } else {
2104 __ move(T9, value);
2105 __ gssbx(T9, as_Register(base), AT, disp);
2106 }
2107 }
2108 } else if ( Assembler::is_simm16(disp) ) {
2109 if ( scale == 0 ) {
2110 __ daddu(AT, as_Register(base), as_Register(index));
2111 if ( value == 0 ){
2112 __ sb(R0, AT, disp);
2113 } else {
2114 __ move(T9, value);
2115 __ sb(T9, AT, disp);
2116 }
2117 } else {
2118 __ dsll(AT, as_Register(index), scale);
2119 __ daddu(AT, as_Register(base), AT);
2120 if ( value == 0 ) {
2121 __ sb(R0, AT, disp);
2122 } else {
2123 __ move(T9, value);
2124 __ sb(T9, AT, disp);
2125 }
2126 }
2127 } else {
2128 if ( scale == 0 ) {
2129 __ move(AT, disp);
2130 __ daddu(AT, as_Register(index), AT);
2131 if ( value == 0 ) {
2132 __ gssbx(R0, as_Register(base), AT, 0);
2133 } else {
2134 __ move(T9, value);
2135 __ gssbx(T9, as_Register(base), AT, 0);
2136 }
2137 } else {
2138 __ dsll(AT, as_Register(index), scale);
2139 __ move(T9, disp);
2140 __ daddu(AT, AT, T9);
2141 if ( value == 0 ) {
2142 __ gssbx(R0, as_Register(base), AT, 0);
2143 } else {
2144 __ move(T9, value);
2145 __ gssbx(T9, as_Register(base), AT, 0);
2146 }
2147 }
2148 }
2149 } else { //not use loongson isa
2150 if (scale == 0) {
2151 __ daddu(AT, as_Register(base), as_Register(index));
2152 } else {
2153 __ dsll(AT, as_Register(index), scale);
2154 __ daddu(AT, as_Register(base), AT);
2155 }
2156 if( Assembler::is_simm16(disp) ) {
2157 if (value == 0) {
2158 __ sb(R0, AT, disp);
2159 } else {
2160 __ move(T9, value);
2161 __ sb(T9, AT, disp);
2162 }
2163 } else {
2164 if (value == 0) {
2165 __ move(T9, disp);
2166 __ daddu(AT, AT, T9);
2167 __ sb(R0, AT, 0);
2168 } else {
2169 __ move(T9, disp);
2170 __ daddu(AT, AT, T9);
2171 __ move(T9, value);
2172 __ sb(T9, AT, 0);
2173 }
2174 }
2175 }
2176 } else {
2177 if ( UseLoongsonISA ){
2178 if ( Assembler::is_simm16(disp) ){
2179 if ( value == 0 ) {
2180 __ sb(R0, as_Register(base), disp);
2181 } else {
2182 __ move(AT, value);
2183 __ sb(AT, as_Register(base), disp);
2184 }
2185 } else {
2186 __ move(AT, disp);
2187 if ( value == 0 ) {
2188 __ gssbx(R0, as_Register(base), AT, 0);
2189 } else {
2190 __ move(T9, value);
2191 __ gssbx(T9, as_Register(base), AT, 0);
2192 }
2193 }
2194 } else {
2195 if( Assembler::is_simm16(disp) ) {
2196 if (value == 0) {
2197 __ sb(R0, as_Register(base), disp);
2198 } else {
2199 __ move(AT, value);
2200 __ sb(AT, as_Register(base), disp);
2201 }
2202 } else {
2203 if (value == 0) {
2204 __ move(T9, disp);
2205 __ daddu(AT, as_Register(base), T9);
2206 __ sb(R0, AT, 0);
2207 } else {
2208 __ move(T9, disp);
2209 __ daddu(AT, as_Register(base), T9);
2210 __ move(T9, value);
2211 __ sb(T9, AT, 0);
2212 }
2213 }
2214 }
2215 }
2217 __ sync();
2218 %}
2220 // Load Short (16bit signed)
2221 enc_class load_S_enc (mRegI dst, memory mem) %{
2222 MacroAssembler _masm(&cbuf);
2223 int dst = $dst$$reg;
2224 int base = $mem$$base;
2225 int index = $mem$$index;
2226 int scale = $mem$$scale;
2227 int disp = $mem$$disp;
2229 if( index != 0 ) {
2230 if ( UseLoongsonISA ) {
2231 if ( Assembler::is_simm(disp, 8) ) {
2232 if (scale == 0) {
2233 __ gslhx(as_Register(dst), as_Register(base), as_Register(index), disp);
2234 } else {
2235 __ dsll(AT, as_Register(index), scale);
2236 __ gslhx(as_Register(dst), as_Register(base), AT, disp);
2237 }
2238 } else if ( Assembler::is_simm16(disp) ) {
2239 if (scale == 0) {
2240 __ daddu(AT, as_Register(base), as_Register(index));
2241 __ lh(as_Register(dst), AT, disp);
2242 } else {
2243 __ dsll(AT, as_Register(index), scale);
2244 __ daddu(AT, as_Register(base), AT);
2245 __ lh(as_Register(dst), AT, disp);
2246 }
2247 } else {
2248 if (scale == 0) {
2249 __ move(AT, disp);
2250 __ daddu(AT, as_Register(index), AT);
2251 __ gslhx(as_Register(dst), as_Register(base), AT, 0);
2252 } else {
2253 __ dsll(AT, as_Register(index), scale);
2254 __ move(T9, disp);
2255 __ daddu(AT, AT, T9);
2256 __ gslhx(as_Register(dst), as_Register(base), AT, 0);
2257 }
2258 }
2259 } else { // not use loongson isa
2260 if (scale == 0) {
2261 __ daddu(AT, as_Register(base), as_Register(index));
2262 } else {
2263 __ dsll(AT, as_Register(index), scale);
2264 __ daddu(AT, as_Register(base), AT);
2265 }
2266 if( Assembler::is_simm16(disp) ) {
2267 __ lh(as_Register(dst), AT, disp);
2268 } else {
2269 __ move(T9, disp);
2270 __ addu(AT, AT, T9);
2271 __ lh(as_Register(dst), AT, 0);
2272 }
2273 }
2274 } else { // index is 0
2275 if ( UseLoongsonISA ) {
2276 if ( Assembler::is_simm16(disp) ) {
2277 __ lh(as_Register(dst), as_Register(base), disp);
2278 } else {
2279 __ move(T9, disp);
2280 __ gslhx(as_Register(dst), as_Register(base), T9, 0);
2281 }
2282 } else { //not use loongson isa
2283 if( Assembler::is_simm16(disp) ) {
2284 __ lh(as_Register(dst), as_Register(base), disp);
2285 } else {
2286 __ move(T9, disp);
2287 __ addu(AT, as_Register(base), T9);
2288 __ lh(as_Register(dst), AT, 0);
2289 }
2290 }
2291 }
2292 %}
2294 // Load Char (16bit unsigned)
2295 enc_class load_C_enc (mRegI dst, memory mem) %{
2296 MacroAssembler _masm(&cbuf);
2297 int dst = $dst$$reg;
2298 int base = $mem$$base;
2299 int index = $mem$$index;
2300 int scale = $mem$$scale;
2301 int disp = $mem$$disp;
2303 if( index != 0 ) {
2304 if (scale == 0) {
2305 __ daddu(AT, as_Register(base), as_Register(index));
2306 } else {
2307 __ dsll(AT, as_Register(index), scale);
2308 __ daddu(AT, as_Register(base), AT);
2309 }
2310 if( Assembler::is_simm16(disp) ) {
2311 __ lhu(as_Register(dst), AT, disp);
2312 } else {
2313 __ move(T9, disp);
2314 __ addu(AT, AT, T9);
2315 __ lhu(as_Register(dst), AT, 0);
2316 }
2317 } else {
2318 if( Assembler::is_simm16(disp) ) {
2319 __ lhu(as_Register(dst), as_Register(base), disp);
2320 } else {
2321 __ move(T9, disp);
2322 __ daddu(AT, as_Register(base), T9);
2323 __ lhu(as_Register(dst), AT, 0);
2324 }
2325 }
2326 %}
2328 // Store Char (16bit unsigned)
2329 enc_class store_C_reg_enc (memory mem, mRegI src) %{
2330 MacroAssembler _masm(&cbuf);
2331 int src = $src$$reg;
2332 int base = $mem$$base;
2333 int index = $mem$$index;
2334 int scale = $mem$$scale;
2335 int disp = $mem$$disp;
2337 if( index != 0 ) {
2338 if( Assembler::is_simm16(disp) ) {
2339 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2340 if (scale == 0) {
2341 __ gsshx(as_Register(src), as_Register(base), as_Register(index), disp);
2342 } else {
2343 __ dsll(AT, as_Register(index), scale);
2344 __ gsshx(as_Register(src), as_Register(base), AT, disp);
2345 }
2346 } else {
2347 if (scale == 0) {
2348 __ addu(AT, as_Register(base), as_Register(index));
2349 } else {
2350 __ dsll(AT, as_Register(index), scale);
2351 __ addu(AT, as_Register(base), AT);
2352 }
2353 __ sh(as_Register(src), AT, disp);
2354 }
2355 } else {
2356 if (scale == 0) {
2357 __ addu(AT, as_Register(base), as_Register(index));
2358 } else {
2359 __ dsll(AT, as_Register(index), scale);
2360 __ addu(AT, as_Register(base), AT);
2361 }
2362 __ move(T9, disp);
2363 if( UseLoongsonISA ) {
2364 __ gsshx(as_Register(src), AT, T9, 0);
2365 } else {
2366 __ addu(AT, AT, T9);
2367 __ sh(as_Register(src), AT, 0);
2368 }
2369 }
2370 } else {
2371 if( Assembler::is_simm16(disp) ) {
2372 __ sh(as_Register(src), as_Register(base), disp);
2373 } else {
2374 __ move(T9, disp);
2375 if( UseLoongsonISA ) {
2376 __ gsshx(as_Register(src), as_Register(base), T9, 0);
2377 } else {
2378 __ addu(AT, as_Register(base), T9);
2379 __ sh(as_Register(src), AT, 0);
2380 }
2381 }
2382 }
2383 %}
2385 enc_class store_C0_enc (memory mem) %{
2386 MacroAssembler _masm(&cbuf);
2387 int base = $mem$$base;
2388 int index = $mem$$index;
2389 int scale = $mem$$scale;
2390 int disp = $mem$$disp;
2392 if( index != 0 ) {
2393 if( Assembler::is_simm16(disp) ) {
2394 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2395 if (scale == 0) {
2396 __ gsshx(R0, as_Register(base), as_Register(index), disp);
2397 } else {
2398 __ dsll(AT, as_Register(index), scale);
2399 __ gsshx(R0, as_Register(base), AT, disp);
2400 }
2401 } else {
2402 if (scale == 0) {
2403 __ addu(AT, as_Register(base), as_Register(index));
2404 } else {
2405 __ dsll(AT, as_Register(index), scale);
2406 __ addu(AT, as_Register(base), AT);
2407 }
2408 __ sh(R0, AT, disp);
2409 }
2410 } else {
2411 if (scale == 0) {
2412 __ addu(AT, as_Register(base), as_Register(index));
2413 } else {
2414 __ dsll(AT, as_Register(index), scale);
2415 __ addu(AT, as_Register(base), AT);
2416 }
2417 __ move(T9, disp);
2418 if( UseLoongsonISA ) {
2419 __ gsshx(R0, AT, T9, 0);
2420 } else {
2421 __ addu(AT, AT, T9);
2422 __ sh(R0, AT, 0);
2423 }
2424 }
2425 } else {
2426 if( Assembler::is_simm16(disp) ) {
2427 __ sh(R0, as_Register(base), disp);
2428 } else {
2429 __ move(T9, disp);
2430 if( UseLoongsonISA ) {
2431 __ gsshx(R0, as_Register(base), T9, 0);
2432 } else {
2433 __ addu(AT, as_Register(base), T9);
2434 __ sh(R0, AT, 0);
2435 }
2436 }
2437 }
2438 %}
2440 enc_class load_I_enc (mRegI dst, memory mem) %{
2441 MacroAssembler _masm(&cbuf);
2442 int dst = $dst$$reg;
2443 int base = $mem$$base;
2444 int index = $mem$$index;
2445 int scale = $mem$$scale;
2446 int disp = $mem$$disp;
2448 if( index != 0 ) {
2449 if( Assembler::is_simm16(disp) ) {
2450 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2451 if (scale == 0) {
2452 __ gslwx(as_Register(dst), as_Register(base), as_Register(index), disp);
2453 } else {
2454 __ dsll(AT, as_Register(index), scale);
2455 __ gslwx(as_Register(dst), as_Register(base), AT, disp);
2456 }
2457 } else {
2458 if (scale == 0) {
2459 __ addu(AT, as_Register(base), as_Register(index));
2460 } else {
2461 __ dsll(AT, as_Register(index), scale);
2462 __ addu(AT, as_Register(base), AT);
2463 }
2464 __ lw(as_Register(dst), AT, disp);
2465 }
2466 } else {
2467 if (scale == 0) {
2468 __ addu(AT, as_Register(base), as_Register(index));
2469 } else {
2470 __ dsll(AT, as_Register(index), scale);
2471 __ addu(AT, as_Register(base), AT);
2472 }
2473 __ move(T9, disp);
2474 if( UseLoongsonISA ) {
2475 __ gslwx(as_Register(dst), AT, T9, 0);
2476 } else {
2477 __ addu(AT, AT, T9);
2478 __ lw(as_Register(dst), AT, 0);
2479 }
2480 }
2481 } else {
2482 if( Assembler::is_simm16(disp) ) {
2483 __ lw(as_Register(dst), as_Register(base), disp);
2484 } else {
2485 __ move(T9, disp);
2486 if( UseLoongsonISA ) {
2487 __ gslwx(as_Register(dst), as_Register(base), T9, 0);
2488 } else {
2489 __ addu(AT, as_Register(base), T9);
2490 __ lw(as_Register(dst), AT, 0);
2491 }
2492 }
2493 }
2494 %}
2496 enc_class store_I_reg_enc (memory mem, mRegI src) %{
2497 MacroAssembler _masm(&cbuf);
2498 int src = $src$$reg;
2499 int base = $mem$$base;
2500 int index = $mem$$index;
2501 int scale = $mem$$scale;
2502 int disp = $mem$$disp;
2504 if( index != 0 ) {
2505 if( Assembler::is_simm16(disp) ) {
2506 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2507 if (scale == 0) {
2508 __ gsswx(as_Register(src), as_Register(base), as_Register(index), disp);
2509 } else {
2510 __ dsll(AT, as_Register(index), scale);
2511 __ gsswx(as_Register(src), as_Register(base), AT, disp);
2512 }
2513 } else {
2514 if (scale == 0) {
2515 __ addu(AT, as_Register(base), as_Register(index));
2516 } else {
2517 __ dsll(AT, as_Register(index), scale);
2518 __ addu(AT, as_Register(base), AT);
2519 }
2520 __ sw(as_Register(src), AT, disp);
2521 }
2522 } else {
2523 if (scale == 0) {
2524 __ addu(AT, as_Register(base), as_Register(index));
2525 } else {
2526 __ dsll(AT, as_Register(index), scale);
2527 __ addu(AT, as_Register(base), AT);
2528 }
2529 __ move(T9, disp);
2530 if( UseLoongsonISA ) {
2531 __ gsswx(as_Register(src), AT, T9, 0);
2532 } else {
2533 __ addu(AT, AT, T9);
2534 __ sw(as_Register(src), AT, 0);
2535 }
2536 }
2537 } else {
2538 if( Assembler::is_simm16(disp) ) {
2539 __ sw(as_Register(src), as_Register(base), disp);
2540 } else {
2541 __ move(T9, disp);
2542 if( UseLoongsonISA ) {
2543 __ gsswx(as_Register(src), as_Register(base), T9, 0);
2544 } else {
2545 __ addu(AT, as_Register(base), T9);
2546 __ sw(as_Register(src), AT, 0);
2547 }
2548 }
2549 }
2550 %}
2552 enc_class store_I_immI_enc (memory mem, immI src) %{
2553 MacroAssembler _masm(&cbuf);
2554 int base = $mem$$base;
2555 int index = $mem$$index;
2556 int scale = $mem$$scale;
2557 int disp = $mem$$disp;
2558 int value = $src$$constant;
2560 if( index != 0 ) {
2561 if ( UseLoongsonISA ) {
2562 if ( Assembler::is_simm(disp, 8) ) {
2563 if ( scale == 0 ) {
2564 if ( value == 0 ) {
2565 __ gsswx(R0, as_Register(base), as_Register(index), disp);
2566 } else {
2567 __ move(T9, value);
2568 __ gsswx(T9, as_Register(base), as_Register(index), disp);
2569 }
2570 } else {
2571 __ dsll(AT, as_Register(index), scale);
2572 if ( value == 0 ) {
2573 __ gsswx(R0, as_Register(base), AT, disp);
2574 } else {
2575 __ move(T9, value);
2576 __ gsswx(T9, as_Register(base), AT, disp);
2577 }
2578 }
2579 } else if ( Assembler::is_simm16(disp) ) {
2580 if ( scale == 0 ) {
2581 __ daddu(AT, as_Register(base), as_Register(index));
2582 if ( value == 0 ) {
2583 __ sw(R0, AT, disp);
2584 } else {
2585 __ move(T9, value);
2586 __ sw(T9, AT, disp);
2587 }
2588 } else {
2589 __ dsll(AT, as_Register(index), scale);
2590 __ daddu(AT, as_Register(base), AT);
2591 if ( value == 0 ) {
2592 __ sw(R0, AT, disp);
2593 } else {
2594 __ move(T9, value);
2595 __ sw(T9, AT, disp);
2596 }
2597 }
2598 } else {
2599 if ( scale == 0 ) {
2600 __ move(T9, disp);
2601 __ daddu(AT, as_Register(index), T9);
2602 if ( value ==0 ) {
2603 __ gsswx(R0, as_Register(base), AT, 0);
2604 } else {
2605 __ move(T9, value);
2606 __ gsswx(T9, as_Register(base), AT, 0);
2607 }
2608 } else {
2609 __ dsll(AT, as_Register(index), scale);
2610 __ move(T9, disp);
2611 __ daddu(AT, AT, T9);
2612 if ( value == 0 ) {
2613 __ gsswx(R0, as_Register(base), AT, 0);
2614 } else {
2615 __ move(T9, value);
2616 __ gsswx(T9, as_Register(base), AT, 0);
2617 }
2618 }
2619 }
2620 } else { //not use loongson isa
2621 if (scale == 0) {
2622 __ daddu(AT, as_Register(base), as_Register(index));
2623 } else {
2624 __ dsll(AT, as_Register(index), scale);
2625 __ daddu(AT, as_Register(base), AT);
2626 }
2627 if( Assembler::is_simm16(disp) ) {
2628 if (value == 0) {
2629 __ sw(R0, AT, disp);
2630 } else {
2631 __ move(T9, value);
2632 __ sw(T9, AT, disp);
2633 }
2634 } else {
2635 if (value == 0) {
2636 __ move(T9, disp);
2637 __ addu(AT, AT, T9);
2638 __ sw(R0, AT, 0);
2639 } else {
2640 __ move(T9, disp);
2641 __ addu(AT, AT, T9);
2642 __ move(T9, value);
2643 __ sw(T9, AT, 0);
2644 }
2645 }
2646 }
2647 } else {
2648 if ( UseLoongsonISA ) {
2649 if ( Assembler::is_simm16(disp) ) {
2650 if ( value == 0 ) {
2651 __ sw(R0, as_Register(base), disp);
2652 } else {
2653 __ move(AT, value);
2654 __ sw(AT, as_Register(base), disp);
2655 }
2656 } else {
2657 __ move(T9, disp);
2658 if ( value == 0 ) {
2659 __ gsswx(R0, as_Register(base), T9, 0);
2660 } else {
2661 __ move(AT, value);
2662 __ gsswx(AT, as_Register(base), T9, 0);
2663 }
2664 }
2665 } else {
2666 if( Assembler::is_simm16(disp) ) {
2667 if (value == 0) {
2668 __ sw(R0, as_Register(base), disp);
2669 } else {
2670 __ move(AT, value);
2671 __ sw(AT, as_Register(base), disp);
2672 }
2673 } else {
2674 if (value == 0) {
2675 __ move(T9, disp);
2676 __ addu(AT, as_Register(base), T9);
2677 __ sw(R0, AT, 0);
2678 } else {
2679 __ move(T9, disp);
2680 __ addu(AT, as_Register(base), T9);
2681 __ move(T9, value);
2682 __ sw(T9, AT, 0);
2683 }
2684 }
2685 }
2686 }
2687 %}
2689 enc_class load_N_enc (mRegN dst, memory mem) %{
2690 MacroAssembler _masm(&cbuf);
2691 int dst = $dst$$reg;
2692 int base = $mem$$base;
2693 int index = $mem$$index;
2694 int scale = $mem$$scale;
2695 int disp = $mem$$disp;
2696 relocInfo::relocType disp_reloc = $mem->disp_reloc();
2697 assert(disp_reloc == relocInfo::none, "cannot have disp");
2699 if( index != 0 ) {
2700 if (scale == 0) {
2701 __ daddu(AT, as_Register(base), as_Register(index));
2702 } else {
2703 __ dsll(AT, as_Register(index), scale);
2704 __ daddu(AT, as_Register(base), AT);
2705 }
2706 if( Assembler::is_simm16(disp) ) {
2707 __ lwu(as_Register(dst), AT, disp);
2708 } else {
2709 __ li(T9, disp);
2710 __ daddu(AT, AT, T9);
2711 __ lwu(as_Register(dst), AT, 0);
2712 }
2713 } else {
2714 if( Assembler::is_simm16(disp) ) {
2715 __ lwu(as_Register(dst), as_Register(base), disp);
2716 } else {
2717 __ li(T9, disp);
2718 __ daddu(AT, as_Register(base), T9);
2719 __ lwu(as_Register(dst), AT, 0);
2720 }
2721 }
2723 %}
2726 enc_class load_P_enc (mRegP dst, memory mem) %{
2727 MacroAssembler _masm(&cbuf);
2728 int dst = $dst$$reg;
2729 int base = $mem$$base;
2730 int index = $mem$$index;
2731 int scale = $mem$$scale;
2732 int disp = $mem$$disp;
2733 relocInfo::relocType disp_reloc = $mem->disp_reloc();
2734 assert(disp_reloc == relocInfo::none, "cannot have disp");
2736 if( index != 0 ) {
2737 if ( UseLoongsonISA ) {
2738 if ( Assembler::is_simm(disp, 8) ) {
2739 if ( scale != 0 ) {
2740 __ dsll(AT, as_Register(index), scale);
2741 __ gsldx(as_Register(dst), as_Register(base), AT, disp);
2742 } else {
2743 __ gsldx(as_Register(dst), as_Register(base), as_Register(index), disp);
2744 }
2745 } else if ( Assembler::is_simm16(disp) ){
2746 if ( scale != 0 ) {
2747 __ dsll(AT, as_Register(index), scale);
2748 __ daddu(AT, AT, as_Register(base));
2749 } else {
2750 __ daddu(AT, as_Register(index), as_Register(base));
2751 }
2752 __ ld(as_Register(dst), AT, disp);
2753 } else {
2754 if ( scale != 0 ) {
2755 __ dsll(AT, as_Register(index), scale);
2756 __ move(T9, disp);
2757 __ daddu(AT, AT, T9);
2758 } else {
2759 __ move(T9, disp);
2760 __ daddu(AT, as_Register(index), T9);
2761 }
2762 __ gsldx(as_Register(dst), as_Register(base), AT, 0);
2763 }
2764 } else { //not use loongson isa
2765 if (scale == 0) {
2766 __ daddu(AT, as_Register(base), as_Register(index));
2767 } else {
2768 __ dsll(AT, as_Register(index), scale);
2769 __ daddu(AT, as_Register(base), AT);
2770 }
2771 if( Assembler::is_simm16(disp) ) {
2772 __ ld(as_Register(dst), AT, disp);
2773 } else {
2774 __ li(T9, disp);
2775 __ daddu(AT, AT, T9);
2776 __ ld(as_Register(dst), AT, 0);
2777 }
2778 }
2779 } else {
2780 if ( UseLoongsonISA ) {
2781 if ( Assembler::is_simm16(disp) ){
2782 __ ld(as_Register(dst), as_Register(base), disp);
2783 } else {
2784 __ li(T9, disp);
2785 __ gsldx(as_Register(dst), as_Register(base), T9, 0);
2786 }
2787 } else { //not use loongson isa
2788 if( Assembler::is_simm16(disp) ) {
2789 __ ld(as_Register(dst), as_Register(base), disp);
2790 } else {
2791 __ li(T9, disp);
2792 __ daddu(AT, as_Register(base), T9);
2793 __ ld(as_Register(dst), AT, 0);
2794 }
2795 }
2796 }
2797 // if( disp_reloc != relocInfo::none) __ ld(as_Register(dst), as_Register(dst), 0);
2798 %}
2800 enc_class store_P_reg_enc (memory mem, mRegP src) %{
2801 MacroAssembler _masm(&cbuf);
2802 int src = $src$$reg;
2803 int base = $mem$$base;
2804 int index = $mem$$index;
2805 int scale = $mem$$scale;
2806 int disp = $mem$$disp;
2808 if( index != 0 ) {
2809 if ( UseLoongsonISA ){
2810 if ( Assembler::is_simm(disp, 8) ) {
2811 if ( scale == 0 ) {
2812 __ gssdx(as_Register(src), as_Register(base), as_Register(index), disp);
2813 } else {
2814 __ dsll(AT, as_Register(index), scale);
2815 __ gssdx(as_Register(src), as_Register(base), AT, disp);
2816 }
2817 } else if ( Assembler::is_simm16(disp) ) {
2818 if ( scale == 0 ) {
2819 __ daddu(AT, as_Register(base), as_Register(index));
2820 } else {
2821 __ dsll(AT, as_Register(index), scale);
2822 __ daddu(AT, as_Register(base), AT);
2823 }
2824 __ sd(as_Register(src), AT, disp);
2825 } else {
2826 if ( scale == 0 ) {
2827 __ move(T9, disp);
2828 __ daddu(AT, as_Register(index), T9);
2829 } else {
2830 __ dsll(AT, as_Register(index), scale);
2831 __ move(T9, disp);
2832 __ daddu(AT, AT, T9);
2833 }
2834 __ gssdx(as_Register(src), as_Register(base), AT, 0);
2835 }
2836 } else { //not use loongson isa
2837 if (scale == 0) {
2838 __ daddu(AT, as_Register(base), as_Register(index));
2839 } else {
2840 __ dsll(AT, as_Register(index), scale);
2841 __ daddu(AT, as_Register(base), AT);
2842 }
2843 if( Assembler::is_simm16(disp) ) {
2844 __ sd(as_Register(src), AT, disp);
2845 } else {
2846 __ move(T9, disp);
2847 __ daddu(AT, AT, T9);
2848 __ sd(as_Register(src), AT, 0);
2849 }
2850 }
2851 } else {
2852 if ( UseLoongsonISA ) {
2853 if ( Assembler::is_simm16(disp) ) {
2854 __ sd(as_Register(src), as_Register(base), disp);
2855 } else {
2856 __ move(T9, disp);
2857 __ gssdx(as_Register(src), as_Register(base), T9, 0);
2858 }
2859 } else {
2860 if( Assembler::is_simm16(disp) ) {
2861 __ sd(as_Register(src), as_Register(base), disp);
2862 } else {
2863 __ move(T9, disp);
2864 __ daddu(AT, as_Register(base), T9);
2865 __ sd(as_Register(src), AT, 0);
2866 }
2867 }
2868 }
2869 %}
2871 enc_class store_N_reg_enc (memory mem, mRegN src) %{
2872 MacroAssembler _masm(&cbuf);
2873 int src = $src$$reg;
2874 int base = $mem$$base;
2875 int index = $mem$$index;
2876 int scale = $mem$$scale;
2877 int disp = $mem$$disp;
2879 if( index != 0 ) {
2880 if ( UseLoongsonISA ){
2881 if ( Assembler::is_simm(disp, 8) ) {
2882 if ( scale == 0 ) {
2883 __ gsswx(as_Register(src), as_Register(base), as_Register(index), disp);
2884 } else {
2885 __ dsll(AT, as_Register(index), scale);
2886 __ gsswx(as_Register(src), as_Register(base), AT, disp);
2887 }
2888 } else if ( Assembler::is_simm16(disp) ) {
2889 if ( scale == 0 ) {
2890 __ daddu(AT, as_Register(base), as_Register(index));
2891 } else {
2892 __ dsll(AT, as_Register(index), scale);
2893 __ daddu(AT, as_Register(base), AT);
2894 }
2895 __ sw(as_Register(src), AT, disp);
2896 } else {
2897 if ( scale == 0 ) {
2898 __ move(T9, disp);
2899 __ daddu(AT, as_Register(index), T9);
2900 } else {
2901 __ dsll(AT, as_Register(index), scale);
2902 __ move(T9, disp);
2903 __ daddu(AT, AT, T9);
2904 }
2905 __ gsswx(as_Register(src), as_Register(base), AT, 0);
2906 }
2907 } else { //not use loongson isa
2908 if (scale == 0) {
2909 __ daddu(AT, as_Register(base), as_Register(index));
2910 } else {
2911 __ dsll(AT, as_Register(index), scale);
2912 __ daddu(AT, as_Register(base), AT);
2913 }
2914 if( Assembler::is_simm16(disp) ) {
2915 __ sw(as_Register(src), AT, disp);
2916 } else {
2917 __ move(T9, disp);
2918 __ addu(AT, AT, T9);
2919 __ sw(as_Register(src), AT, 0);
2920 }
2921 }
2922 } else {
2923 if ( UseLoongsonISA ) {
2924 if ( Assembler::is_simm16(disp) ) {
2925 __ sw(as_Register(src), as_Register(base), disp);
2926 } else {
2927 __ move(T9, disp);
2928 __ gsswx(as_Register(src), as_Register(base), T9, 0);
2929 }
2930 } else {
2931 if( Assembler::is_simm16(disp) ) {
2932 __ sw(as_Register(src), as_Register(base), disp);
2933 } else {
2934 __ move(T9, disp);
2935 __ addu(AT, as_Register(base), T9);
2936 __ sw(as_Register(src), AT, 0);
2937 }
2938 }
2939 }
2940 %}
2942 enc_class store_P_immP0_enc (memory mem) %{
2943 MacroAssembler _masm(&cbuf);
2944 int base = $mem$$base;
2945 int index = $mem$$index;
2946 int scale = $mem$$scale;
2947 int disp = $mem$$disp;
2949 if( index != 0 ) {
2950 if (scale == 0) {
2951 if( Assembler::is_simm16(disp) ) {
2952 if (UseLoongsonISA && Assembler::is_simm(disp, 8)) {
2953 __ gssdx(R0, as_Register(base), as_Register(index), disp);
2954 } else {
2955 __ daddu(AT, as_Register(base), as_Register(index));
2956 __ sd(R0, AT, disp);
2957 }
2958 } else {
2959 __ daddu(AT, as_Register(base), as_Register(index));
2960 __ move(T9, disp);
2961 if(UseLoongsonISA) {
2962 __ gssdx(R0, AT, T9, 0);
2963 } else {
2964 __ daddu(AT, AT, T9);
2965 __ sd(R0, AT, 0);
2966 }
2967 }
2968 } else {
2969 __ dsll(AT, as_Register(index), scale);
2970 if( Assembler::is_simm16(disp) ) {
2971 if (UseLoongsonISA && Assembler::is_simm(disp, 8)) {
2972 __ gssdx(R0, as_Register(base), AT, disp);
2973 } else {
2974 __ daddu(AT, as_Register(base), AT);
2975 __ sd(R0, AT, disp);
2976 }
2977 } else {
2978 __ daddu(AT, as_Register(base), AT);
2979 __ move(T9, disp);
2980 if (UseLoongsonISA) {
2981 __ gssdx(R0, AT, T9, 0);
2982 } else {
2983 __ daddu(AT, AT, T9);
2984 __ sd(R0, AT, 0);
2985 }
2986 }
2987 }
2988 } else {
2989 if( Assembler::is_simm16(disp) ) {
2990 __ sd(R0, as_Register(base), disp);
2991 } else {
2992 __ move(T9, disp);
2993 if (UseLoongsonISA) {
2994 __ gssdx(R0, as_Register(base), T9, 0);
2995 } else {
2996 __ daddu(AT, as_Register(base), T9);
2997 __ sd(R0, AT, 0);
2998 }
2999 }
3000 }
3001 %}
3004 enc_class storeImmN0_enc(memory mem, ImmN0 src) %{
3005 MacroAssembler _masm(&cbuf);
3006 int base = $mem$$base;
3007 int index = $mem$$index;
3008 int scale = $mem$$scale;
3009 int disp = $mem$$disp;
3011 if(index!=0){
3012 if (scale == 0) {
3013 __ daddu(AT, as_Register(base), as_Register(index));
3014 } else {
3015 __ dsll(AT, as_Register(index), scale);
3016 __ daddu(AT, as_Register(base), AT);
3017 }
3019 if( Assembler::is_simm16(disp) ) {
3020 __ sw(R0, AT, disp);
3021 } else {
3022 __ move(T9, disp);
3023 __ daddu(AT, AT, T9);
3024 __ sw(R0, AT, 0);
3025 }
3026 }
3027 else {
3028 if( Assembler::is_simm16(disp) ) {
3029 __ sw(R0, as_Register(base), disp);
3030 } else {
3031 __ move(T9, disp);
3032 __ daddu(AT, as_Register(base), T9);
3033 __ sw(R0, AT, 0);
3034 }
3035 }
3036 %}
3038 enc_class load_L_enc (mRegL dst, memory mem) %{
3039 MacroAssembler _masm(&cbuf);
3040 int base = $mem$$base;
3041 int index = $mem$$index;
3042 int scale = $mem$$scale;
3043 int disp = $mem$$disp;
3044 Register dst_reg = as_Register($dst$$reg);
3046 /*********************2013/03/27**************************
3047 * Jin: $base may contain a null object.
3048 * Server JIT force the exception_offset to be the pos of
3049 * the first instruction.
3050 * I insert such a 'null_check' at the beginning.
3051 *******************************************************/
3053 __ lw(AT, as_Register(base), 0);
3055 /*********************2012/10/04**************************
3056 * Error case found in SortTest
3057 * 337 b java.util.Arrays::sort1 (401 bytes)
3058 * B73:
3059 * d34 lw T4.lo, [T4 + #16] #@loadL-lo
3060 * lw T4.hi, [T4 + #16]+4 #@loadL-hi
3061 *
3062 * The original instructions generated here are :
3063 * __ lw(dst_lo, as_Register(base), disp);
3064 * __ lw(dst_hi, as_Register(base), disp + 4);
3065 *******************************************************/
3067 if( index != 0 ) {
3068 if (scale == 0) {
3069 __ daddu(AT, as_Register(base), as_Register(index));
3070 } else {
3071 __ dsll(AT, as_Register(index), scale);
3072 __ daddu(AT, as_Register(base), AT);
3073 }
3074 if( Assembler::is_simm16(disp) ) {
3075 __ ld(dst_reg, AT, disp);
3076 } else {
3077 __ move(T9, disp);
3078 __ daddu(AT, AT, T9);
3079 __ ld(dst_reg, AT, 0);
3080 }
3081 } else {
3082 if( Assembler::is_simm16(disp) ) {
3083 __ move(AT, as_Register(base));
3084 __ ld(dst_reg, AT, disp);
3085 } else {
3086 __ move(T9, disp);
3087 __ daddu(AT, as_Register(base), T9);
3088 __ ld(dst_reg, AT, 0);
3089 }
3090 }
3091 %}
3093 enc_class store_L_reg_enc (memory mem, mRegL src) %{
3094 MacroAssembler _masm(&cbuf);
3095 int base = $mem$$base;
3096 int index = $mem$$index;
3097 int scale = $mem$$scale;
3098 int disp = $mem$$disp;
3099 Register src_reg = as_Register($src$$reg);
3101 if( index != 0 ) {
3102 if (scale == 0) {
3103 __ daddu(AT, as_Register(base), as_Register(index));
3104 } else {
3105 __ dsll(AT, as_Register(index), scale);
3106 __ daddu(AT, as_Register(base), AT);
3107 }
3108 if( Assembler::is_simm16(disp) ) {
3109 __ sd(src_reg, AT, disp);
3110 } else {
3111 __ move(T9, disp);
3112 __ daddu(AT, AT, T9);
3113 __ sd(src_reg, AT, 0);
3114 }
3115 } else {
3116 if( Assembler::is_simm16(disp) ) {
3117 __ move(AT, as_Register(base));
3118 __ sd(src_reg, AT, disp);
3119 } else {
3120 __ move(T9, disp);
3121 __ daddu(AT, as_Register(base), T9);
3122 __ sd(src_reg, AT, 0);
3123 }
3124 }
3125 %}
3127 enc_class store_L_immL0_enc (memory mem, immL0 src) %{
3128 MacroAssembler _masm(&cbuf);
3129 int base = $mem$$base;
3130 int index = $mem$$index;
3131 int scale = $mem$$scale;
3132 int disp = $mem$$disp;
3134 if( index != 0 ) {
3135 if (scale == 0) {
3136 __ daddu(AT, as_Register(base), as_Register(index));
3137 } else {
3138 __ dsll(AT, as_Register(index), scale);
3139 __ daddu(AT, as_Register(base), AT);
3140 }
3141 if( Assembler::is_simm16(disp) ) {
3142 __ sd(R0, AT, disp);
3143 } else {
3144 __ move(T9, disp);
3145 __ addu(AT, AT, T9);
3146 __ sd(R0, AT, 0);
3147 }
3148 } else {
3149 if( Assembler::is_simm16(disp) ) {
3150 __ move(AT, as_Register(base));
3151 __ sd(R0, AT, disp);
3152 } else {
3153 __ move(T9, disp);
3154 __ addu(AT, as_Register(base), T9);
3155 __ sd(R0, AT, 0);
3156 }
3157 }
3158 %}
3160 enc_class load_F_enc (regF dst, memory mem) %{
3161 MacroAssembler _masm(&cbuf);
3162 int base = $mem$$base;
3163 int index = $mem$$index;
3164 int scale = $mem$$scale;
3165 int disp = $mem$$disp;
3166 FloatRegister dst = $dst$$FloatRegister;
3168 if( index != 0 ) {
3169 if( Assembler::is_simm16(disp) ) {
3170 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3171 if (scale == 0) {
3172 __ gslwxc1(dst, as_Register(base), as_Register(index), disp);
3173 } else {
3174 __ dsll(AT, as_Register(index), scale);
3175 __ gslwxc1(dst, as_Register(base), AT, disp);
3176 }
3177 } else {
3178 if (scale == 0) {
3179 __ daddu(AT, as_Register(base), as_Register(index));
3180 } else {
3181 __ dsll(AT, as_Register(index), scale);
3182 __ daddu(AT, as_Register(base), AT);
3183 }
3184 __ lwc1(dst, AT, disp);
3185 }
3186 } else {
3187 if (scale == 0) {
3188 __ daddu(AT, as_Register(base), as_Register(index));
3189 } else {
3190 __ dsll(AT, as_Register(index), scale);
3191 __ daddu(AT, as_Register(base), AT);
3192 }
3193 __ move(T9, disp);
3194 if( UseLoongsonISA ) {
3195 __ gslwxc1(dst, AT, T9, 0);
3196 } else {
3197 __ daddu(AT, AT, T9);
3198 __ lwc1(dst, AT, 0);
3199 }
3200 }
3201 } else {
3202 if( Assembler::is_simm16(disp) ) {
3203 __ lwc1(dst, as_Register(base), disp);
3204 } else {
3205 __ move(T9, disp);
3206 if( UseLoongsonISA ) {
3207 __ gslwxc1(dst, as_Register(base), T9, 0);
3208 } else {
3209 __ daddu(AT, as_Register(base), T9);
3210 __ lwc1(dst, AT, 0);
3211 }
3212 }
3213 }
3214 %}
3216 enc_class store_F_reg_enc (memory mem, regF src) %{
3217 MacroAssembler _masm(&cbuf);
3218 int base = $mem$$base;
3219 int index = $mem$$index;
3220 int scale = $mem$$scale;
3221 int disp = $mem$$disp;
3222 FloatRegister src = $src$$FloatRegister;
3224 if( index != 0 ) {
3225 if( Assembler::is_simm16(disp) ) {
3226 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3227 if (scale == 0) {
3228 __ gsswxc1(src, as_Register(base), as_Register(index), disp);
3229 } else {
3230 __ dsll(AT, as_Register(index), scale);
3231 __ gsswxc1(src, as_Register(base), AT, disp);
3232 }
3233 } else {
3234 if (scale == 0) {
3235 __ daddu(AT, as_Register(base), as_Register(index));
3236 } else {
3237 __ dsll(AT, as_Register(index), scale);
3238 __ daddu(AT, as_Register(base), AT);
3239 }
3240 __ swc1(src, AT, disp);
3241 }
3242 } else {
3243 if (scale == 0) {
3244 __ daddu(AT, as_Register(base), as_Register(index));
3245 } else {
3246 __ dsll(AT, as_Register(index), scale);
3247 __ daddu(AT, as_Register(base), AT);
3248 }
3249 __ move(T9, disp);
3250 if( UseLoongsonISA ) {
3251 __ gsswxc1(src, AT, T9, 0);
3252 } else {
3253 __ daddu(AT, AT, T9);
3254 __ swc1(src, AT, 0);
3255 }
3256 }
3257 } else {
3258 if( Assembler::is_simm16(disp) ) {
3259 __ swc1(src, as_Register(base), disp);
3260 } else {
3261 __ move(T9, disp);
3262 if( UseLoongsonISA ) {
3263 __ gslwxc1(src, as_Register(base), T9, 0);
3264 } else {
3265 __ daddu(AT, as_Register(base), T9);
3266 __ swc1(src, AT, 0);
3267 }
3268 }
3269 }
3270 %}
3272 enc_class load_D_enc (regD dst, memory mem) %{
3273 MacroAssembler _masm(&cbuf);
3274 int base = $mem$$base;
3275 int index = $mem$$index;
3276 int scale = $mem$$scale;
3277 int disp = $mem$$disp;
3278 FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3280 if( index != 0 ) {
3281 if( Assembler::is_simm16(disp) ) {
3282 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3283 if (scale == 0) {
3284 __ gsldxc1(dst_reg, as_Register(base), as_Register(index), disp);
3285 } else {
3286 __ dsll(AT, as_Register(index), scale);
3287 __ gsldxc1(dst_reg, as_Register(base), AT, disp);
3288 }
3289 } else {
3290 if (scale == 0) {
3291 __ daddu(AT, as_Register(base), as_Register(index));
3292 } else {
3293 __ dsll(AT, as_Register(index), scale);
3294 __ daddu(AT, as_Register(base), AT);
3295 }
3296 __ ldc1(dst_reg, AT, disp);
3297 }
3298 } else {
3299 if (scale == 0) {
3300 __ daddu(AT, as_Register(base), as_Register(index));
3301 } else {
3302 __ dsll(AT, as_Register(index), scale);
3303 __ daddu(AT, as_Register(base), AT);
3304 }
3305 __ move(T9, disp);
3306 if( UseLoongsonISA ) {
3307 __ gsldxc1(dst_reg, AT, T9, 0);
3308 } else {
3309 __ addu(AT, AT, T9);
3310 __ ldc1(dst_reg, AT, 0);
3311 }
3312 }
3313 } else {
3314 if( Assembler::is_simm16(disp) ) {
3315 __ ldc1(dst_reg, as_Register(base), disp);
3316 } else {
3317 __ move(T9, disp);
3318 if( UseLoongsonISA ) {
3319 __ gsldxc1(dst_reg, as_Register(base), T9, 0);
3320 } else {
3321 __ addu(AT, as_Register(base), T9);
3322 __ ldc1(dst_reg, AT, 0);
3323 }
3324 }
3325 }
3326 %}
3328 enc_class store_D_reg_enc (memory mem, regD src) %{
3329 MacroAssembler _masm(&cbuf);
3330 int base = $mem$$base;
3331 int index = $mem$$index;
3332 int scale = $mem$$scale;
3333 int disp = $mem$$disp;
3334 FloatRegister src_reg = as_FloatRegister($src$$reg);
3336 if( index != 0 ) {
3337 if( Assembler::is_simm16(disp) ) {
3338 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3339 if (scale == 0) {
3340 __ gssdxc1(src_reg, as_Register(base), as_Register(index), disp);
3341 } else {
3342 __ dsll(AT, as_Register(index), scale);
3343 __ gssdxc1(src_reg, as_Register(base), AT, disp);
3344 }
3345 } else {
3346 if (scale == 0) {
3347 __ daddu(AT, as_Register(base), as_Register(index));
3348 } else {
3349 __ dsll(AT, as_Register(index), scale);
3350 __ daddu(AT, as_Register(base), AT);
3351 }
3352 __ sdc1(src_reg, AT, disp);
3353 }
3354 } else {
3355 if (scale == 0) {
3356 __ daddu(AT, as_Register(base), as_Register(index));
3357 } else {
3358 __ dsll(AT, as_Register(index), scale);
3359 __ daddu(AT, as_Register(base), AT);
3360 }
3361 __ move(T9, disp);
3362 if( UseLoongsonISA ) {
3363 __ gssdxc1(src_reg, AT, T9, 0);
3364 } else {
3365 __ addu(AT, AT, T9);
3366 __ sdc1(src_reg, AT, 0);
3367 }
3368 }
3369 } else {
3370 if( Assembler::is_simm16(disp) ) {
3371 __ sdc1(src_reg, as_Register(base), disp);
3372 } else {
3373 __ move(T9, disp);
3374 if( UseLoongsonISA ) {
3375 __ gssdxc1(src_reg, as_Register(base), T9, 0);
3376 } else {
3377 __ addu(AT, as_Register(base), T9);
3378 __ sdc1(src_reg, AT, 0);
3379 }
3380 }
3381 }
3382 %}
3384 enc_class Java_To_Runtime (method meth) %{ // CALL Java_To_Runtime, Java_To_Runtime_Leaf
3385 MacroAssembler _masm(&cbuf);
3386 // This is the instruction starting address for relocation info.
3387 __ block_comment("Java_To_Runtime");
3388 cbuf.set_insts_mark();
3389 __ relocate(relocInfo::runtime_call_type);
3391 __ li48(T9, (long)$meth$$method);
3392 __ jalr(T9);
3393 __ nop();
3394 %}
3396 enc_class Java_Static_Call (method meth) %{ // JAVA STATIC CALL
3397 // CALL to fixup routine. Fixup routine uses ScopeDesc info to determine
3398 // who we intended to call.
3399 MacroAssembler _masm(&cbuf);
3400 cbuf.set_insts_mark();
3402 if ( !_method ) {
3403 __ relocate(relocInfo::runtime_call_type);
3404 } else if(_optimized_virtual) {
3405 __ relocate(relocInfo::opt_virtual_call_type);
3406 } else {
3407 __ relocate(relocInfo::static_call_type);
3408 }
3410 __ li(T9, $meth$$method);
3411 __ jalr(T9);
3412 __ nop();
3413 if( _method ) { // Emit stub for static call
3414 emit_java_to_interp(cbuf);
3415 }
3416 %}
3419 /*
3420 * [Ref: LIR_Assembler::ic_call() ]
3421 */
3422 enc_class Java_Dynamic_Call (method meth) %{ // JAVA DYNAMIC CALL
3423 MacroAssembler _masm(&cbuf);
3424 __ block_comment("Java_Dynamic_Call");
3425 __ ic_call((address)$meth$$method);
3426 %}
3429 enc_class Set_Flags_After_Fast_Lock_Unlock(FlagsReg cr) %{
3430 Register flags = $cr$$Register;
3431 Label L;
3433 MacroAssembler _masm(&cbuf);
3435 __ addu(flags, R0, R0);
3436 __ beq(AT, R0, L);
3437 __ delayed()->nop();
3438 __ move(flags, 0xFFFFFFFF);
3439 __ bind(L);
3440 %}
3442 enc_class enc_PartialSubtypeCheck(mRegP result, mRegP sub, mRegP super, mRegI tmp) %{
3443 Register result = $result$$Register;
3444 Register sub = $sub$$Register;
3445 Register super = $super$$Register;
3446 Register length = $tmp$$Register;
3447 Register tmp = T9;
3448 Label miss;
3450 /* 2012/9/28 Jin: result may be the same as sub
3451 * 47c B40: # B21 B41 <- B20 Freq: 0.155379
3452 * 47c partialSubtypeCheck result=S1, sub=S1, super=S3, length=S0
3453 * 4bc mov S2, NULL #@loadConP
3454 * 4c0 beq S1, S2, B21 #@branchConP P=0.999999 C=-1.000000
3455 */
3456 MacroAssembler _masm(&cbuf);
3457 Label done;
3458 __ check_klass_subtype_slow_path(sub, super, length, tmp,
3459 NULL, &miss,
3460 /*set_cond_codes:*/ true);
3461 /* 2013/7/22 Jin: Refer to X86_64's RDI */
3462 __ move(result, 0);
3463 __ b(done);
3464 __ nop();
3466 __ bind(miss);
3467 __ move(result, 1);
3468 __ bind(done);
3469 %}
3471 %}
3474 //---------MIPS FRAME--------------------------------------------------------------
3475 // Definition of frame structure and management information.
3476 //
3477 // S T A C K L A Y O U T Allocators stack-slot number
3478 // | (to get allocators register number
3479 // G Owned by | | v add SharedInfo::stack0)
3480 // r CALLER | |
3481 // o | +--------+ pad to even-align allocators stack-slot
3482 // w V | pad0 | numbers; owned by CALLER
3483 // t -----------+--------+----> Matcher::_in_arg_limit, unaligned
3484 // h ^ | in | 5
3485 // | | args | 4 Holes in incoming args owned by SELF
3486 // | | old | | 3
3487 // | | SP-+--------+----> Matcher::_old_SP, even aligned
3488 // v | | ret | 3 return address
3489 // Owned by +--------+
3490 // Self | pad2 | 2 pad to align old SP
3491 // | +--------+ 1
3492 // | | locks | 0
3493 // | +--------+----> SharedInfo::stack0, even aligned
3494 // | | pad1 | 11 pad to align new SP
3495 // | +--------+
3496 // | | | 10
3497 // | | spills | 9 spills
3498 // V | | 8 (pad0 slot for callee)
3499 // -----------+--------+----> Matcher::_out_arg_limit, unaligned
3500 // ^ | out | 7
3501 // | | args | 6 Holes in outgoing args owned by CALLEE
3502 // Owned by new | |
3503 // Callee SP-+--------+----> Matcher::_new_SP, even aligned
3504 // | |
3505 //
3506 // Note 1: Only region 8-11 is determined by the allocator. Region 0-5 is
3507 // known from SELF's arguments and the Java calling convention.
3508 // Region 6-7 is determined per call site.
3509 // Note 2: If the calling convention leaves holes in the incoming argument
3510 // area, those holes are owned by SELF. Holes in the outgoing area
3511 // are owned by the CALLEE. Holes should not be nessecary in the
3512 // incoming area, as the Java calling convention is completely under
3513 // the control of the AD file. Doubles can be sorted and packed to
3514 // avoid holes. Holes in the outgoing arguments may be nessecary for
3515 // varargs C calling conventions.
3516 // Note 3: Region 0-3 is even aligned, with pad2 as needed. Region 3-5 is
3517 // even aligned with pad0 as needed.
3518 // Region 6 is even aligned. Region 6-7 is NOT even aligned;
3519 // region 6-11 is even aligned; it may be padded out more so that
3520 // the region from SP to FP meets the minimum stack alignment.
3521 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
3522 // alignment. Region 11, pad1, may be dynamically extended so that
3523 // SP meets the minimum alignment.
3526 frame %{
3528 stack_direction(TOWARDS_LOW);
3530 // These two registers define part of the calling convention
3531 // between compiled code and the interpreter.
3532 // SEE StartI2CNode::calling_convention & StartC2INode::calling_convention & StartOSRNode::calling_convention
3533 // for more information. by yjl 3/16/2006
3535 inline_cache_reg(T1); // Inline Cache Register
3536 interpreter_method_oop_reg(S3); // Method Oop Register when calling interpreter
3537 /*
3538 inline_cache_reg(T1); // Inline Cache Register or methodOop for I2C
3539 interpreter_arg_ptr_reg(A0); // Argument pointer for I2C adapters
3540 */
3542 // Optional: name the operand used by cisc-spilling to access [stack_pointer + offset]
3543 cisc_spilling_operand_name(indOffset32);
3545 // Number of stack slots consumed by locking an object
3546 // generate Compile::sync_stack_slots
3547 #ifdef _LP64
3548 sync_stack_slots(2);
3549 #else
3550 sync_stack_slots(1);
3551 #endif
3553 frame_pointer(SP);
3555 // Interpreter stores its frame pointer in a register which is
3556 // stored to the stack by I2CAdaptors.
3557 // I2CAdaptors convert from interpreted java to compiled java.
3559 interpreter_frame_pointer(FP);
3561 // generate Matcher::stack_alignment
3562 stack_alignment(StackAlignmentInBytes); //wordSize = sizeof(char*);
3564 // Number of stack slots between incoming argument block and the start of
3565 // a new frame. The PROLOG must add this many slots to the stack. The
3566 // EPILOG must remove this many slots. Intel needs one slot for
3567 // return address.
3568 // generate Matcher::in_preserve_stack_slots
3569 //in_preserve_stack_slots(VerifyStackAtCalls + 2); //Now VerifyStackAtCalls is defined as false ! Leave one stack slot for ra and fp
3570 in_preserve_stack_slots(4); //Now VerifyStackAtCalls is defined as false ! Leave two stack slots for ra and fp
3572 // Number of outgoing stack slots killed above the out_preserve_stack_slots
3573 // for calls to C. Supports the var-args backing area for register parms.
3574 varargs_C_out_slots_killed(0);
3576 // The after-PROLOG location of the return address. Location of
3577 // return address specifies a type (REG or STACK) and a number
3578 // representing the register number (i.e. - use a register name) or
3579 // stack slot.
3580 // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
3581 // Otherwise, it is above the locks and verification slot and alignment word
3582 //return_addr(STACK -1+ round_to(1+VerifyStackAtCalls+Compile::current()->sync()*Compile::current()->sync_stack_slots(),WordsPerLong));
3583 return_addr(REG RA);
3585 // Body of function which returns an integer array locating
3586 // arguments either in registers or in stack slots. Passed an array
3587 // of ideal registers called "sig" and a "length" count. Stack-slot
3588 // offsets are based on outgoing arguments, i.e. a CALLER setting up
3589 // arguments for a CALLEE. Incoming stack arguments are
3590 // automatically biased by the preserve_stack_slots field above.
3593 // will generated to Matcher::calling_convention(OptoRegPair *sig, uint length, bool is_outgoing)
3594 // StartNode::calling_convention call this. by yjl 3/16/2006
3595 calling_convention %{
3596 SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
3597 %}
3602 // Body of function which returns an integer array locating
3603 // arguments either in registers or in stack slots. Passed an array
3604 // of ideal registers called "sig" and a "length" count. Stack-slot
3605 // offsets are based on outgoing arguments, i.e. a CALLER setting up
3606 // arguments for a CALLEE. Incoming stack arguments are
3607 // automatically biased by the preserve_stack_slots field above.
3610 // SEE CallRuntimeNode::calling_convention for more information. by yjl 3/16/2006
3611 c_calling_convention %{
3612 (void) SharedRuntime::c_calling_convention(sig_bt, regs, /*regs2=*/NULL, length);
3613 %}
3616 // Location of C & interpreter return values
3617 // register(s) contain(s) return value for Op_StartI2C and Op_StartOSR.
3618 // SEE Matcher::match. by yjl 3/16/2006
3619 c_return_value %{
3620 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3621 /* -- , -- , Op_RegN, Op_RegI, Op_RegP, Op_RegF, Op_RegD, Op_RegL */
3622 static int lo[Op_RegL+1] = { 0, 0, V0_num, V0_num, V0_num, F0_num, F0_num, V0_num };
3623 static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, V0_H_num, OptoReg::Bad, F0_H_num, V0_H_num };
3624 return OptoRegPair(hi[ideal_reg],lo[ideal_reg]);
3625 %}
3627 // Location of return values
3628 // register(s) contain(s) return value for Op_StartC2I and Op_Start.
3629 // SEE Matcher::match. by yjl 3/16/2006
3631 return_value %{
3632 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3633 /* -- , -- , Op_RegN, Op_RegI, Op_RegP, Op_RegF, Op_RegD, Op_RegL */
3634 static int lo[Op_RegL+1] = { 0, 0, V0_num, V0_num, V0_num, F0_num, F0_num, V0_num };
3635 static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, V0_H_num, OptoReg::Bad, F0_H_num, V0_H_num};
3636 return OptoRegPair(hi[ideal_reg],lo[ideal_reg]);
3637 %}
3639 %}
3641 //----------ATTRIBUTES---------------------------------------------------------
3642 //----------Operand Attributes-------------------------------------------------
3643 op_attrib op_cost(0); // Required cost attribute
3645 //----------Instruction Attributes---------------------------------------------
3646 ins_attrib ins_cost(100); // Required cost attribute
3647 ins_attrib ins_size(32); // Required size attribute (in bits)
3648 ins_attrib ins_pc_relative(0); // Required PC Relative flag
3649 ins_attrib ins_short_branch(0); // Required flag: is this instruction a
3650 // non-matching short branch variant of some
3651 // long branch?
3652 ins_attrib ins_alignment(4); // Required alignment attribute (must be a power of 2)
3653 // specifies the alignment that some part of the instruction (not
3654 // necessarily the start) requires. If > 1, a compute_padding()
3655 // function must be provided for the instruction
3657 //----------OPERANDS-----------------------------------------------------------
3658 // Operand definitions must precede instruction definitions for correct parsing
3659 // in the ADLC because operands constitute user defined types which are used in
3660 // instruction definitions.
3662 // Vectors
3663 operand vecD() %{
3664 constraint(ALLOC_IN_RC(dbl_reg));
3665 match(VecD);
3667 format %{ %}
3668 interface(REG_INTER);
3669 %}
3671 // Flags register, used as output of compare instructions
3672 operand FlagsReg() %{
3673 constraint(ALLOC_IN_RC(mips_flags));
3674 match(RegFlags);
3676 format %{ "EFLAGS" %}
3677 interface(REG_INTER);
3678 %}
3680 //----------Simple Operands----------------------------------------------------
3681 //TODO: Should we need to define some more special immediate number ?
3682 // Immediate Operands
3683 // Integer Immediate
3684 operand immI() %{
3685 match(ConI);
3686 //TODO: should not match immI8 here LEE
3687 match(immI8);
3689 op_cost(20);
3690 format %{ %}
3691 interface(CONST_INTER);
3692 %}
3694 // Long Immediate 8-bit
3695 operand immL8()
3696 %{
3697 predicate(-0x80L <= n->get_long() && n->get_long() < 0x80L);
3698 match(ConL);
3700 op_cost(5);
3701 format %{ %}
3702 interface(CONST_INTER);
3703 %}
3705 // Constant for test vs zero
3706 operand immI0() %{
3707 predicate(n->get_int() == 0);
3708 match(ConI);
3710 op_cost(0);
3711 format %{ %}
3712 interface(CONST_INTER);
3713 %}
3715 // Constant for increment
3716 operand immI1() %{
3717 predicate(n->get_int() == 1);
3718 match(ConI);
3720 op_cost(0);
3721 format %{ %}
3722 interface(CONST_INTER);
3723 %}
3725 // Constant for decrement
3726 operand immI_M1() %{
3727 predicate(n->get_int() == -1);
3728 match(ConI);
3730 op_cost(0);
3731 format %{ %}
3732 interface(CONST_INTER);
3733 %}
3735 operand immI_MaxI() %{
3736 predicate(n->get_int() == 2147483647);
3737 match(ConI);
3739 op_cost(0);
3740 format %{ %}
3741 interface(CONST_INTER);
3742 %}
3744 // Valid scale values for addressing modes
3745 operand immI2() %{
3746 predicate(0 <= n->get_int() && (n->get_int() <= 3));
3747 match(ConI);
3749 format %{ %}
3750 interface(CONST_INTER);
3751 %}
3753 operand immI8() %{
3754 predicate((-128 <= n->get_int()) && (n->get_int() <= 127));
3755 match(ConI);
3757 op_cost(5);
3758 format %{ %}
3759 interface(CONST_INTER);
3760 %}
3762 operand immI16() %{
3763 predicate((-32768 <= n->get_int()) && (n->get_int() <= 32767));
3764 match(ConI);
3766 op_cost(10);
3767 format %{ %}
3768 interface(CONST_INTER);
3769 %}
3771 // Constant for long shifts
3772 operand immI_32() %{
3773 predicate( n->get_int() == 32 );
3774 match(ConI);
3776 op_cost(0);
3777 format %{ %}
3778 interface(CONST_INTER);
3779 %}
3781 operand immI_63() %{
3782 predicate( n->get_int() == 63 );
3783 match(ConI);
3785 op_cost(0);
3786 format %{ %}
3787 interface(CONST_INTER);
3788 %}
3790 operand immI_0_31() %{
3791 predicate( n->get_int() >= 0 && n->get_int() <= 31 );
3792 match(ConI);
3794 op_cost(0);
3795 format %{ %}
3796 interface(CONST_INTER);
3797 %}
3799 // Operand for non-negtive integer mask
3800 operand immI_nonneg_mask() %{
3801 predicate( (n->get_int() >= 0) && (Assembler::is_int_mask(n->get_int()) != -1) );
3802 match(ConI);
3804 op_cost(0);
3805 format %{ %}
3806 interface(CONST_INTER);
3807 %}
3809 operand immI_32_63() %{
3810 predicate( n->get_int() >= 32 && n->get_int() <= 63 );
3811 match(ConI);
3812 op_cost(0);
3814 format %{ %}
3815 interface(CONST_INTER);
3816 %}
3818 operand immI16_sub() %{
3819 predicate((-32767 <= n->get_int()) && (n->get_int() <= 32768));
3820 match(ConI);
3822 op_cost(10);
3823 format %{ %}
3824 interface(CONST_INTER);
3825 %}
3827 operand immI_0_32767() %{
3828 predicate( n->get_int() >= 0 && n->get_int() <= 32767 );
3829 match(ConI);
3830 op_cost(0);
3832 format %{ %}
3833 interface(CONST_INTER);
3834 %}
3836 operand immI_0_65535() %{
3837 predicate( n->get_int() >= 0 && n->get_int() <= 65535 );
3838 match(ConI);
3839 op_cost(0);
3841 format %{ %}
3842 interface(CONST_INTER);
3843 %}
3845 operand immI_1() %{
3846 predicate( n->get_int() == 1 );
3847 match(ConI);
3849 op_cost(0);
3850 format %{ %}
3851 interface(CONST_INTER);
3852 %}
3854 operand immI_2() %{
3855 predicate( n->get_int() == 2 );
3856 match(ConI);
3858 op_cost(0);
3859 format %{ %}
3860 interface(CONST_INTER);
3861 %}
3863 operand immI_3() %{
3864 predicate( n->get_int() == 3 );
3865 match(ConI);
3867 op_cost(0);
3868 format %{ %}
3869 interface(CONST_INTER);
3870 %}
3872 operand immI_7() %{
3873 predicate( n->get_int() == 7 );
3874 match(ConI);
3876 format %{ %}
3877 interface(CONST_INTER);
3878 %}
3880 // Immediates for special shifts (sign extend)
3882 // Constants for increment
3883 operand immI_16() %{
3884 predicate( n->get_int() == 16 );
3885 match(ConI);
3887 format %{ %}
3888 interface(CONST_INTER);
3889 %}
3891 operand immI_24() %{
3892 predicate( n->get_int() == 24 );
3893 match(ConI);
3895 format %{ %}
3896 interface(CONST_INTER);
3897 %}
3899 // Constant for byte-wide masking
3900 operand immI_255() %{
3901 predicate( n->get_int() == 255 );
3902 match(ConI);
3904 op_cost(0);
3905 format %{ %}
3906 interface(CONST_INTER);
3907 %}
3909 operand immI_65535() %{
3910 predicate( n->get_int() == 65535 );
3911 match(ConI);
3913 op_cost(5);
3914 format %{ %}
3915 interface(CONST_INTER);
3916 %}
3918 operand immI_65536() %{
3919 predicate( n->get_int() == 65536 );
3920 match(ConI);
3922 op_cost(5);
3923 format %{ %}
3924 interface(CONST_INTER);
3925 %}
3927 operand immI_M65536() %{
3928 predicate( n->get_int() == -65536 );
3929 match(ConI);
3931 op_cost(5);
3932 format %{ %}
3933 interface(CONST_INTER);
3934 %}
3936 // Pointer Immediate
3937 operand immP() %{
3938 match(ConP);
3940 op_cost(10);
3941 format %{ %}
3942 interface(CONST_INTER);
3943 %}
3945 // NULL Pointer Immediate
3946 operand immP0() %{
3947 predicate( n->get_ptr() == 0 );
3948 match(ConP);
3949 op_cost(0);
3951 format %{ %}
3952 interface(CONST_INTER);
3953 %}
3955 // Pointer Immediate: 64-bit
3956 operand immP_set() %{
3957 match(ConP);
3959 op_cost(5);
3960 // formats are generated automatically for constants and base registers
3961 format %{ %}
3962 interface(CONST_INTER);
3963 %}
3965 // Pointer Immediate: 64-bit
3966 operand immP_load() %{
3967 predicate(n->bottom_type()->isa_oop_ptr() || (MacroAssembler::insts_for_set64(n->get_ptr()) > 3));
3968 match(ConP);
3970 op_cost(5);
3971 // formats are generated automatically for constants and base registers
3972 format %{ %}
3973 interface(CONST_INTER);
3974 %}
3976 // Pointer Immediate: 64-bit
3977 operand immP_no_oop_cheap() %{
3978 predicate(!n->bottom_type()->isa_oop_ptr() && (MacroAssembler::insts_for_set64(n->get_ptr()) <= 3));
3979 match(ConP);
3981 op_cost(5);
3982 // formats are generated automatically for constants and base registers
3983 format %{ %}
3984 interface(CONST_INTER);
3985 %}
3987 // Pointer for polling page
3988 operand immP_poll() %{
3989 predicate(n->get_ptr() != 0 && n->get_ptr() == (intptr_t)os::get_polling_page());
3990 match(ConP);
3991 op_cost(5);
3993 format %{ %}
3994 interface(CONST_INTER);
3995 %}
3997 // Pointer Immediate
3998 operand immN() %{
3999 match(ConN);
4001 op_cost(10);
4002 format %{ %}
4003 interface(CONST_INTER);
4004 %}
4006 operand immNKlass() %{
4007 match(ConNKlass);
4009 op_cost(10);
4010 format %{ %}
4011 interface(CONST_INTER);
4012 %}
4014 // NULL Pointer Immediate
4015 operand immN0() %{
4016 predicate(n->get_narrowcon() == 0);
4017 match(ConN);
4019 op_cost(5);
4020 format %{ %}
4021 interface(CONST_INTER);
4022 %}
4024 // Long Immediate
4025 operand immL() %{
4026 match(ConL);
4028 op_cost(20);
4029 format %{ %}
4030 interface(CONST_INTER);
4031 %}
4033 // Long Immediate zero
4034 operand immL0() %{
4035 predicate( n->get_long() == 0L );
4036 match(ConL);
4037 op_cost(0);
4039 format %{ %}
4040 interface(CONST_INTER);
4041 %}
4043 operand immL7() %{
4044 predicate( n->get_long() == 7L );
4045 match(ConL);
4046 op_cost(0);
4048 format %{ %}
4049 interface(CONST_INTER);
4050 %}
4052 operand immL_M1() %{
4053 predicate( n->get_long() == -1L );
4054 match(ConL);
4055 op_cost(0);
4057 format %{ %}
4058 interface(CONST_INTER);
4059 %}
4061 // bit 0..2 zero
4062 operand immL_M8() %{
4063 predicate( n->get_long() == -8L );
4064 match(ConL);
4065 op_cost(0);
4067 format %{ %}
4068 interface(CONST_INTER);
4069 %}
4071 // bit 2 zero
4072 operand immL_M5() %{
4073 predicate( n->get_long() == -5L );
4074 match(ConL);
4075 op_cost(0);
4077 format %{ %}
4078 interface(CONST_INTER);
4079 %}
4081 // bit 1..2 zero
4082 operand immL_M7() %{
4083 predicate( n->get_long() == -7L );
4084 match(ConL);
4085 op_cost(0);
4087 format %{ %}
4088 interface(CONST_INTER);
4089 %}
4091 // bit 0..1 zero
4092 operand immL_M4() %{
4093 predicate( n->get_long() == -4L );
4094 match(ConL);
4095 op_cost(0);
4097 format %{ %}
4098 interface(CONST_INTER);
4099 %}
4101 // bit 3..6 zero
4102 operand immL_M121() %{
4103 predicate( n->get_long() == -121L );
4104 match(ConL);
4105 op_cost(0);
4107 format %{ %}
4108 interface(CONST_INTER);
4109 %}
4111 // Long immediate from 0 to 127.
4112 // Used for a shorter form of long mul by 10.
4113 operand immL_127() %{
4114 predicate((0 <= n->get_long()) && (n->get_long() <= 127));
4115 match(ConL);
4116 op_cost(0);
4118 format %{ %}
4119 interface(CONST_INTER);
4120 %}
4122 // Operand for non-negtive long mask
4123 operand immL_nonneg_mask() %{
4124 predicate( (n->get_long() >= 0) && (Assembler::is_jlong_mask(n->get_long()) != -1) );
4125 match(ConL);
4127 op_cost(0);
4128 format %{ %}
4129 interface(CONST_INTER);
4130 %}
4132 operand immL_0_65535() %{
4133 predicate( n->get_long() >= 0 && n->get_long() <= 65535 );
4134 match(ConL);
4135 op_cost(0);
4137 format %{ %}
4138 interface(CONST_INTER);
4139 %}
4141 // Long Immediate: cheap (materialize in <= 3 instructions)
4142 operand immL_cheap() %{
4143 predicate(MacroAssembler::insts_for_set64(n->get_long()) <= 3);
4144 match(ConL);
4145 op_cost(0);
4147 format %{ %}
4148 interface(CONST_INTER);
4149 %}
4151 // Long Immediate: expensive (materialize in > 3 instructions)
4152 operand immL_expensive() %{
4153 predicate(MacroAssembler::insts_for_set64(n->get_long()) > 3);
4154 match(ConL);
4155 op_cost(0);
4157 format %{ %}
4158 interface(CONST_INTER);
4159 %}
4161 operand immL16() %{
4162 predicate((-32768 <= n->get_long()) && (n->get_long() <= 32767));
4163 match(ConL);
4165 op_cost(10);
4166 format %{ %}
4167 interface(CONST_INTER);
4168 %}
4170 operand immL16_sub() %{
4171 predicate((-32767 <= n->get_long()) && (n->get_long() <= 32768));
4172 match(ConL);
4174 op_cost(10);
4175 format %{ %}
4176 interface(CONST_INTER);
4177 %}
4179 // Long Immediate: low 32-bit mask
4180 operand immL_32bits() %{
4181 predicate(n->get_long() == 0xFFFFFFFFL);
4182 match(ConL);
4183 op_cost(20);
4185 format %{ %}
4186 interface(CONST_INTER);
4187 %}
4189 // Long Immediate 32-bit signed
4190 operand immL32()
4191 %{
4192 predicate(n->get_long() == (int) (n->get_long()));
4193 match(ConL);
4195 op_cost(15);
4196 format %{ %}
4197 interface(CONST_INTER);
4198 %}
4201 //single-precision floating-point zero
4202 operand immF0() %{
4203 predicate(jint_cast(n->getf()) == 0);
4204 match(ConF);
4206 op_cost(5);
4207 format %{ %}
4208 interface(CONST_INTER);
4209 %}
4211 //single-precision floating-point immediate
4212 operand immF() %{
4213 match(ConF);
4215 op_cost(20);
4216 format %{ %}
4217 interface(CONST_INTER);
4218 %}
4220 //double-precision floating-point zero
4221 operand immD0() %{
4222 predicate(jlong_cast(n->getd()) == 0);
4223 match(ConD);
4225 op_cost(5);
4226 format %{ %}
4227 interface(CONST_INTER);
4228 %}
4230 //double-precision floating-point immediate
4231 operand immD() %{
4232 match(ConD);
4234 op_cost(20);
4235 format %{ %}
4236 interface(CONST_INTER);
4237 %}
4239 // Register Operands
4240 // Integer Register
4241 operand mRegI() %{
4242 constraint(ALLOC_IN_RC(int_reg));
4243 match(RegI);
4245 format %{ %}
4246 interface(REG_INTER);
4247 %}
4249 operand no_Ax_mRegI() %{
4250 constraint(ALLOC_IN_RC(no_Ax_int_reg));
4251 match(RegI);
4252 match(mRegI);
4254 format %{ %}
4255 interface(REG_INTER);
4256 %}
4258 operand mS0RegI() %{
4259 constraint(ALLOC_IN_RC(s0_reg));
4260 match(RegI);
4261 match(mRegI);
4263 format %{ "S0" %}
4264 interface(REG_INTER);
4265 %}
4267 operand mS1RegI() %{
4268 constraint(ALLOC_IN_RC(s1_reg));
4269 match(RegI);
4270 match(mRegI);
4272 format %{ "S1" %}
4273 interface(REG_INTER);
4274 %}
4276 operand mS2RegI() %{
4277 constraint(ALLOC_IN_RC(s2_reg));
4278 match(RegI);
4279 match(mRegI);
4281 format %{ "S2" %}
4282 interface(REG_INTER);
4283 %}
4285 operand mS3RegI() %{
4286 constraint(ALLOC_IN_RC(s3_reg));
4287 match(RegI);
4288 match(mRegI);
4290 format %{ "S3" %}
4291 interface(REG_INTER);
4292 %}
4294 operand mS4RegI() %{
4295 constraint(ALLOC_IN_RC(s4_reg));
4296 match(RegI);
4297 match(mRegI);
4299 format %{ "S4" %}
4300 interface(REG_INTER);
4301 %}
4303 operand mS5RegI() %{
4304 constraint(ALLOC_IN_RC(s5_reg));
4305 match(RegI);
4306 match(mRegI);
4308 format %{ "S5" %}
4309 interface(REG_INTER);
4310 %}
4312 operand mS6RegI() %{
4313 constraint(ALLOC_IN_RC(s6_reg));
4314 match(RegI);
4315 match(mRegI);
4317 format %{ "S6" %}
4318 interface(REG_INTER);
4319 %}
4321 operand mS7RegI() %{
4322 constraint(ALLOC_IN_RC(s7_reg));
4323 match(RegI);
4324 match(mRegI);
4326 format %{ "S7" %}
4327 interface(REG_INTER);
4328 %}
4331 operand mT0RegI() %{
4332 constraint(ALLOC_IN_RC(t0_reg));
4333 match(RegI);
4334 match(mRegI);
4336 format %{ "T0" %}
4337 interface(REG_INTER);
4338 %}
4340 operand mT1RegI() %{
4341 constraint(ALLOC_IN_RC(t1_reg));
4342 match(RegI);
4343 match(mRegI);
4345 format %{ "T1" %}
4346 interface(REG_INTER);
4347 %}
4349 operand mT2RegI() %{
4350 constraint(ALLOC_IN_RC(t2_reg));
4351 match(RegI);
4352 match(mRegI);
4354 format %{ "T2" %}
4355 interface(REG_INTER);
4356 %}
4358 operand mT3RegI() %{
4359 constraint(ALLOC_IN_RC(t3_reg));
4360 match(RegI);
4361 match(mRegI);
4363 format %{ "T3" %}
4364 interface(REG_INTER);
4365 %}
4367 operand mT8RegI() %{
4368 constraint(ALLOC_IN_RC(t8_reg));
4369 match(RegI);
4370 match(mRegI);
4372 format %{ "T8" %}
4373 interface(REG_INTER);
4374 %}
4376 operand mT9RegI() %{
4377 constraint(ALLOC_IN_RC(t9_reg));
4378 match(RegI);
4379 match(mRegI);
4381 format %{ "T9" %}
4382 interface(REG_INTER);
4383 %}
4385 operand mA0RegI() %{
4386 constraint(ALLOC_IN_RC(a0_reg));
4387 match(RegI);
4388 match(mRegI);
4390 format %{ "A0" %}
4391 interface(REG_INTER);
4392 %}
4394 operand mA1RegI() %{
4395 constraint(ALLOC_IN_RC(a1_reg));
4396 match(RegI);
4397 match(mRegI);
4399 format %{ "A1" %}
4400 interface(REG_INTER);
4401 %}
4403 operand mA2RegI() %{
4404 constraint(ALLOC_IN_RC(a2_reg));
4405 match(RegI);
4406 match(mRegI);
4408 format %{ "A2" %}
4409 interface(REG_INTER);
4410 %}
4412 operand mA3RegI() %{
4413 constraint(ALLOC_IN_RC(a3_reg));
4414 match(RegI);
4415 match(mRegI);
4417 format %{ "A3" %}
4418 interface(REG_INTER);
4419 %}
4421 operand mA4RegI() %{
4422 constraint(ALLOC_IN_RC(a4_reg));
4423 match(RegI);
4424 match(mRegI);
4426 format %{ "A4" %}
4427 interface(REG_INTER);
4428 %}
4430 operand mA5RegI() %{
4431 constraint(ALLOC_IN_RC(a5_reg));
4432 match(RegI);
4433 match(mRegI);
4435 format %{ "A5" %}
4436 interface(REG_INTER);
4437 %}
4439 operand mA6RegI() %{
4440 constraint(ALLOC_IN_RC(a6_reg));
4441 match(RegI);
4442 match(mRegI);
4444 format %{ "A6" %}
4445 interface(REG_INTER);
4446 %}
4448 operand mA7RegI() %{
4449 constraint(ALLOC_IN_RC(a7_reg));
4450 match(RegI);
4451 match(mRegI);
4453 format %{ "A7" %}
4454 interface(REG_INTER);
4455 %}
4457 operand mV0RegI() %{
4458 constraint(ALLOC_IN_RC(v0_reg));
4459 match(RegI);
4460 match(mRegI);
4462 format %{ "V0" %}
4463 interface(REG_INTER);
4464 %}
4466 operand mV1RegI() %{
4467 constraint(ALLOC_IN_RC(v1_reg));
4468 match(RegI);
4469 match(mRegI);
4471 format %{ "V1" %}
4472 interface(REG_INTER);
4473 %}
4475 operand mRegN() %{
4476 constraint(ALLOC_IN_RC(int_reg));
4477 match(RegN);
4479 format %{ %}
4480 interface(REG_INTER);
4481 %}
4483 operand t0_RegN() %{
4484 constraint(ALLOC_IN_RC(t0_reg));
4485 match(RegN);
4486 match(mRegN);
4488 format %{ %}
4489 interface(REG_INTER);
4490 %}
4492 operand t1_RegN() %{
4493 constraint(ALLOC_IN_RC(t1_reg));
4494 match(RegN);
4495 match(mRegN);
4497 format %{ %}
4498 interface(REG_INTER);
4499 %}
4501 operand t2_RegN() %{
4502 constraint(ALLOC_IN_RC(t2_reg));
4503 match(RegN);
4504 match(mRegN);
4506 format %{ %}
4507 interface(REG_INTER);
4508 %}
4510 operand t3_RegN() %{
4511 constraint(ALLOC_IN_RC(t3_reg));
4512 match(RegN);
4513 match(mRegN);
4515 format %{ %}
4516 interface(REG_INTER);
4517 %}
4519 operand t8_RegN() %{
4520 constraint(ALLOC_IN_RC(t8_reg));
4521 match(RegN);
4522 match(mRegN);
4524 format %{ %}
4525 interface(REG_INTER);
4526 %}
4528 operand t9_RegN() %{
4529 constraint(ALLOC_IN_RC(t9_reg));
4530 match(RegN);
4531 match(mRegN);
4533 format %{ %}
4534 interface(REG_INTER);
4535 %}
4537 operand a0_RegN() %{
4538 constraint(ALLOC_IN_RC(a0_reg));
4539 match(RegN);
4540 match(mRegN);
4542 format %{ %}
4543 interface(REG_INTER);
4544 %}
4546 operand a1_RegN() %{
4547 constraint(ALLOC_IN_RC(a1_reg));
4548 match(RegN);
4549 match(mRegN);
4551 format %{ %}
4552 interface(REG_INTER);
4553 %}
4555 operand a2_RegN() %{
4556 constraint(ALLOC_IN_RC(a2_reg));
4557 match(RegN);
4558 match(mRegN);
4560 format %{ %}
4561 interface(REG_INTER);
4562 %}
4564 operand a3_RegN() %{
4565 constraint(ALLOC_IN_RC(a3_reg));
4566 match(RegN);
4567 match(mRegN);
4569 format %{ %}
4570 interface(REG_INTER);
4571 %}
4573 operand a4_RegN() %{
4574 constraint(ALLOC_IN_RC(a4_reg));
4575 match(RegN);
4576 match(mRegN);
4578 format %{ %}
4579 interface(REG_INTER);
4580 %}
4582 operand a5_RegN() %{
4583 constraint(ALLOC_IN_RC(a5_reg));
4584 match(RegN);
4585 match(mRegN);
4587 format %{ %}
4588 interface(REG_INTER);
4589 %}
4591 operand a6_RegN() %{
4592 constraint(ALLOC_IN_RC(a6_reg));
4593 match(RegN);
4594 match(mRegN);
4596 format %{ %}
4597 interface(REG_INTER);
4598 %}
4600 operand a7_RegN() %{
4601 constraint(ALLOC_IN_RC(a7_reg));
4602 match(RegN);
4603 match(mRegN);
4605 format %{ %}
4606 interface(REG_INTER);
4607 %}
4609 operand s0_RegN() %{
4610 constraint(ALLOC_IN_RC(s0_reg));
4611 match(RegN);
4612 match(mRegN);
4614 format %{ %}
4615 interface(REG_INTER);
4616 %}
4618 operand s1_RegN() %{
4619 constraint(ALLOC_IN_RC(s1_reg));
4620 match(RegN);
4621 match(mRegN);
4623 format %{ %}
4624 interface(REG_INTER);
4625 %}
4627 operand s2_RegN() %{
4628 constraint(ALLOC_IN_RC(s2_reg));
4629 match(RegN);
4630 match(mRegN);
4632 format %{ %}
4633 interface(REG_INTER);
4634 %}
4636 operand s3_RegN() %{
4637 constraint(ALLOC_IN_RC(s3_reg));
4638 match(RegN);
4639 match(mRegN);
4641 format %{ %}
4642 interface(REG_INTER);
4643 %}
4645 operand s4_RegN() %{
4646 constraint(ALLOC_IN_RC(s4_reg));
4647 match(RegN);
4648 match(mRegN);
4650 format %{ %}
4651 interface(REG_INTER);
4652 %}
4654 operand s5_RegN() %{
4655 constraint(ALLOC_IN_RC(s5_reg));
4656 match(RegN);
4657 match(mRegN);
4659 format %{ %}
4660 interface(REG_INTER);
4661 %}
4663 operand s6_RegN() %{
4664 constraint(ALLOC_IN_RC(s6_reg));
4665 match(RegN);
4666 match(mRegN);
4668 format %{ %}
4669 interface(REG_INTER);
4670 %}
4672 operand s7_RegN() %{
4673 constraint(ALLOC_IN_RC(s7_reg));
4674 match(RegN);
4675 match(mRegN);
4677 format %{ %}
4678 interface(REG_INTER);
4679 %}
4681 operand v0_RegN() %{
4682 constraint(ALLOC_IN_RC(v0_reg));
4683 match(RegN);
4684 match(mRegN);
4686 format %{ %}
4687 interface(REG_INTER);
4688 %}
4690 operand v1_RegN() %{
4691 constraint(ALLOC_IN_RC(v1_reg));
4692 match(RegN);
4693 match(mRegN);
4695 format %{ %}
4696 interface(REG_INTER);
4697 %}
4699 // Pointer Register
4700 operand mRegP() %{
4701 constraint(ALLOC_IN_RC(p_reg));
4702 match(RegP);
4704 format %{ %}
4705 interface(REG_INTER);
4706 %}
4708 operand no_T8_mRegP() %{
4709 constraint(ALLOC_IN_RC(no_T8_p_reg));
4710 match(RegP);
4711 match(mRegP);
4713 format %{ %}
4714 interface(REG_INTER);
4715 %}
4717 operand s0_RegP()
4718 %{
4719 constraint(ALLOC_IN_RC(s0_long_reg));
4720 match(RegP);
4721 match(mRegP);
4722 match(no_T8_mRegP);
4724 format %{ %}
4725 interface(REG_INTER);
4726 %}
4728 operand s1_RegP()
4729 %{
4730 constraint(ALLOC_IN_RC(s1_long_reg));
4731 match(RegP);
4732 match(mRegP);
4733 match(no_T8_mRegP);
4735 format %{ %}
4736 interface(REG_INTER);
4737 %}
4739 operand s2_RegP()
4740 %{
4741 constraint(ALLOC_IN_RC(s2_long_reg));
4742 match(RegP);
4743 match(mRegP);
4744 match(no_T8_mRegP);
4746 format %{ %}
4747 interface(REG_INTER);
4748 %}
4750 operand s3_RegP()
4751 %{
4752 constraint(ALLOC_IN_RC(s3_long_reg));
4753 match(RegP);
4754 match(mRegP);
4755 match(no_T8_mRegP);
4757 format %{ %}
4758 interface(REG_INTER);
4759 %}
4761 operand s4_RegP()
4762 %{
4763 constraint(ALLOC_IN_RC(s4_long_reg));
4764 match(RegP);
4765 match(mRegP);
4766 match(no_T8_mRegP);
4768 format %{ %}
4769 interface(REG_INTER);
4770 %}
4772 operand s5_RegP()
4773 %{
4774 constraint(ALLOC_IN_RC(s5_long_reg));
4775 match(RegP);
4776 match(mRegP);
4777 match(no_T8_mRegP);
4779 format %{ %}
4780 interface(REG_INTER);
4781 %}
4783 operand s6_RegP()
4784 %{
4785 constraint(ALLOC_IN_RC(s6_long_reg));
4786 match(RegP);
4787 match(mRegP);
4788 match(no_T8_mRegP);
4790 format %{ %}
4791 interface(REG_INTER);
4792 %}
4794 operand s7_RegP()
4795 %{
4796 constraint(ALLOC_IN_RC(s7_long_reg));
4797 match(RegP);
4798 match(mRegP);
4799 match(no_T8_mRegP);
4801 format %{ %}
4802 interface(REG_INTER);
4803 %}
4805 operand t0_RegP()
4806 %{
4807 constraint(ALLOC_IN_RC(t0_long_reg));
4808 match(RegP);
4809 match(mRegP);
4810 match(no_T8_mRegP);
4812 format %{ %}
4813 interface(REG_INTER);
4814 %}
4816 operand t1_RegP()
4817 %{
4818 constraint(ALLOC_IN_RC(t1_long_reg));
4819 match(RegP);
4820 match(mRegP);
4821 match(no_T8_mRegP);
4823 format %{ %}
4824 interface(REG_INTER);
4825 %}
4827 operand t2_RegP()
4828 %{
4829 constraint(ALLOC_IN_RC(t2_long_reg));
4830 match(RegP);
4831 match(mRegP);
4832 match(no_T8_mRegP);
4834 format %{ %}
4835 interface(REG_INTER);
4836 %}
4838 operand t3_RegP()
4839 %{
4840 constraint(ALLOC_IN_RC(t3_long_reg));
4841 match(RegP);
4842 match(mRegP);
4843 match(no_T8_mRegP);
4845 format %{ %}
4846 interface(REG_INTER);
4847 %}
4849 operand t8_RegP()
4850 %{
4851 constraint(ALLOC_IN_RC(t8_long_reg));
4852 match(RegP);
4853 match(mRegP);
4855 format %{ %}
4856 interface(REG_INTER);
4857 %}
4859 operand t9_RegP()
4860 %{
4861 constraint(ALLOC_IN_RC(t9_long_reg));
4862 match(RegP);
4863 match(mRegP);
4864 match(no_T8_mRegP);
4866 format %{ %}
4867 interface(REG_INTER);
4868 %}
4870 operand a0_RegP()
4871 %{
4872 constraint(ALLOC_IN_RC(a0_long_reg));
4873 match(RegP);
4874 match(mRegP);
4875 match(no_T8_mRegP);
4877 format %{ %}
4878 interface(REG_INTER);
4879 %}
4881 operand a1_RegP()
4882 %{
4883 constraint(ALLOC_IN_RC(a1_long_reg));
4884 match(RegP);
4885 match(mRegP);
4886 match(no_T8_mRegP);
4888 format %{ %}
4889 interface(REG_INTER);
4890 %}
4892 operand a2_RegP()
4893 %{
4894 constraint(ALLOC_IN_RC(a2_long_reg));
4895 match(RegP);
4896 match(mRegP);
4897 match(no_T8_mRegP);
4899 format %{ %}
4900 interface(REG_INTER);
4901 %}
4903 operand a3_RegP()
4904 %{
4905 constraint(ALLOC_IN_RC(a3_long_reg));
4906 match(RegP);
4907 match(mRegP);
4908 match(no_T8_mRegP);
4910 format %{ %}
4911 interface(REG_INTER);
4912 %}
4914 operand a4_RegP()
4915 %{
4916 constraint(ALLOC_IN_RC(a4_long_reg));
4917 match(RegP);
4918 match(mRegP);
4919 match(no_T8_mRegP);
4921 format %{ %}
4922 interface(REG_INTER);
4923 %}
4926 operand a5_RegP()
4927 %{
4928 constraint(ALLOC_IN_RC(a5_long_reg));
4929 match(RegP);
4930 match(mRegP);
4931 match(no_T8_mRegP);
4933 format %{ %}
4934 interface(REG_INTER);
4935 %}
4937 operand a6_RegP()
4938 %{
4939 constraint(ALLOC_IN_RC(a6_long_reg));
4940 match(RegP);
4941 match(mRegP);
4942 match(no_T8_mRegP);
4944 format %{ %}
4945 interface(REG_INTER);
4946 %}
4948 operand a7_RegP()
4949 %{
4950 constraint(ALLOC_IN_RC(a7_long_reg));
4951 match(RegP);
4952 match(mRegP);
4953 match(no_T8_mRegP);
4955 format %{ %}
4956 interface(REG_INTER);
4957 %}
4959 operand v0_RegP()
4960 %{
4961 constraint(ALLOC_IN_RC(v0_long_reg));
4962 match(RegP);
4963 match(mRegP);
4964 match(no_T8_mRegP);
4966 format %{ %}
4967 interface(REG_INTER);
4968 %}
4970 operand v1_RegP()
4971 %{
4972 constraint(ALLOC_IN_RC(v1_long_reg));
4973 match(RegP);
4974 match(mRegP);
4975 match(no_T8_mRegP);
4977 format %{ %}
4978 interface(REG_INTER);
4979 %}
4981 /*
4982 operand mSPRegP(mRegP reg) %{
4983 constraint(ALLOC_IN_RC(sp_reg));
4984 match(reg);
4986 format %{ "SP" %}
4987 interface(REG_INTER);
4988 %}
4990 operand mFPRegP(mRegP reg) %{
4991 constraint(ALLOC_IN_RC(fp_reg));
4992 match(reg);
4994 format %{ "FP" %}
4995 interface(REG_INTER);
4996 %}
4997 */
4999 operand mRegL() %{
5000 constraint(ALLOC_IN_RC(long_reg));
5001 match(RegL);
5003 format %{ %}
5004 interface(REG_INTER);
5005 %}
5007 operand v0RegL() %{
5008 constraint(ALLOC_IN_RC(v0_long_reg));
5009 match(RegL);
5010 match(mRegL);
5012 format %{ %}
5013 interface(REG_INTER);
5014 %}
5016 operand v1RegL() %{
5017 constraint(ALLOC_IN_RC(v1_long_reg));
5018 match(RegL);
5019 match(mRegL);
5021 format %{ %}
5022 interface(REG_INTER);
5023 %}
5025 operand a0RegL() %{
5026 constraint(ALLOC_IN_RC(a0_long_reg));
5027 match(RegL);
5028 match(mRegL);
5030 format %{ "A0" %}
5031 interface(REG_INTER);
5032 %}
5034 operand a1RegL() %{
5035 constraint(ALLOC_IN_RC(a1_long_reg));
5036 match(RegL);
5037 match(mRegL);
5039 format %{ %}
5040 interface(REG_INTER);
5041 %}
5043 operand a2RegL() %{
5044 constraint(ALLOC_IN_RC(a2_long_reg));
5045 match(RegL);
5046 match(mRegL);
5048 format %{ %}
5049 interface(REG_INTER);
5050 %}
5052 operand a3RegL() %{
5053 constraint(ALLOC_IN_RC(a3_long_reg));
5054 match(RegL);
5055 match(mRegL);
5057 format %{ %}
5058 interface(REG_INTER);
5059 %}
5061 operand t0RegL() %{
5062 constraint(ALLOC_IN_RC(t0_long_reg));
5063 match(RegL);
5064 match(mRegL);
5066 format %{ %}
5067 interface(REG_INTER);
5068 %}
5070 operand t1RegL() %{
5071 constraint(ALLOC_IN_RC(t1_long_reg));
5072 match(RegL);
5073 match(mRegL);
5075 format %{ %}
5076 interface(REG_INTER);
5077 %}
5079 operand t2RegL() %{
5080 constraint(ALLOC_IN_RC(t2_long_reg));
5081 match(RegL);
5082 match(mRegL);
5084 format %{ %}
5085 interface(REG_INTER);
5086 %}
5088 operand t3RegL() %{
5089 constraint(ALLOC_IN_RC(t3_long_reg));
5090 match(RegL);
5091 match(mRegL);
5093 format %{ %}
5094 interface(REG_INTER);
5095 %}
5097 operand t8RegL() %{
5098 constraint(ALLOC_IN_RC(t8_long_reg));
5099 match(RegL);
5100 match(mRegL);
5102 format %{ %}
5103 interface(REG_INTER);
5104 %}
5106 operand a4RegL() %{
5107 constraint(ALLOC_IN_RC(a4_long_reg));
5108 match(RegL);
5109 match(mRegL);
5111 format %{ %}
5112 interface(REG_INTER);
5113 %}
5115 operand a5RegL() %{
5116 constraint(ALLOC_IN_RC(a5_long_reg));
5117 match(RegL);
5118 match(mRegL);
5120 format %{ %}
5121 interface(REG_INTER);
5122 %}
5124 operand a6RegL() %{
5125 constraint(ALLOC_IN_RC(a6_long_reg));
5126 match(RegL);
5127 match(mRegL);
5129 format %{ %}
5130 interface(REG_INTER);
5131 %}
5133 operand a7RegL() %{
5134 constraint(ALLOC_IN_RC(a7_long_reg));
5135 match(RegL);
5136 match(mRegL);
5138 format %{ %}
5139 interface(REG_INTER);
5140 %}
5142 operand s0RegL() %{
5143 constraint(ALLOC_IN_RC(s0_long_reg));
5144 match(RegL);
5145 match(mRegL);
5147 format %{ %}
5148 interface(REG_INTER);
5149 %}
5151 operand s1RegL() %{
5152 constraint(ALLOC_IN_RC(s1_long_reg));
5153 match(RegL);
5154 match(mRegL);
5156 format %{ %}
5157 interface(REG_INTER);
5158 %}
5160 operand s2RegL() %{
5161 constraint(ALLOC_IN_RC(s2_long_reg));
5162 match(RegL);
5163 match(mRegL);
5165 format %{ %}
5166 interface(REG_INTER);
5167 %}
5169 operand s3RegL() %{
5170 constraint(ALLOC_IN_RC(s3_long_reg));
5171 match(RegL);
5172 match(mRegL);
5174 format %{ %}
5175 interface(REG_INTER);
5176 %}
5178 operand s4RegL() %{
5179 constraint(ALLOC_IN_RC(s4_long_reg));
5180 match(RegL);
5181 match(mRegL);
5183 format %{ %}
5184 interface(REG_INTER);
5185 %}
5187 operand s7RegL() %{
5188 constraint(ALLOC_IN_RC(s7_long_reg));
5189 match(RegL);
5190 match(mRegL);
5192 format %{ %}
5193 interface(REG_INTER);
5194 %}
5196 // Floating register operands
5197 operand regF() %{
5198 constraint(ALLOC_IN_RC(flt_reg));
5199 match(RegF);
5201 format %{ %}
5202 interface(REG_INTER);
5203 %}
5205 //Double Precision Floating register operands
5206 operand regD() %{
5207 constraint(ALLOC_IN_RC(dbl_reg));
5208 match(RegD);
5210 format %{ %}
5211 interface(REG_INTER);
5212 %}
5214 //----------Memory Operands----------------------------------------------------
5215 // Indirect Memory Operand
5216 operand indirect(mRegP reg) %{
5217 constraint(ALLOC_IN_RC(p_reg));
5218 match(reg);
5220 format %{ "[$reg] @ indirect" %}
5221 interface(MEMORY_INTER) %{
5222 base($reg);
5223 index(0x0); /* NO_INDEX */
5224 scale(0x0);
5225 disp(0x0);
5226 %}
5227 %}
5229 // Indirect Memory Plus Short Offset Operand
5230 operand indOffset8(mRegP reg, immL8 off)
5231 %{
5232 constraint(ALLOC_IN_RC(p_reg));
5233 match(AddP reg off);
5235 format %{ "[$reg + $off (8-bit)] @ indOffset8" %}
5236 interface(MEMORY_INTER) %{
5237 base($reg);
5238 index(0x0); /* NO_INDEX */
5239 scale(0x0);
5240 disp($off);
5241 %}
5242 %}
5244 // Indirect Memory Times Scale Plus Index Register
5245 operand indIndexScale(mRegP reg, mRegL lreg, immI2 scale)
5246 %{
5247 constraint(ALLOC_IN_RC(p_reg));
5248 match(AddP reg (LShiftL lreg scale));
5250 op_cost(10);
5251 format %{"[$reg + $lreg << $scale] @ indIndexScale" %}
5252 interface(MEMORY_INTER) %{
5253 base($reg);
5254 index($lreg);
5255 scale($scale);
5256 disp(0x0);
5257 %}
5258 %}
5261 // [base + index + offset]
5262 operand baseIndexOffset8(mRegP base, mRegL index, immL8 off)
5263 %{
5264 constraint(ALLOC_IN_RC(p_reg));
5265 op_cost(5);
5266 match(AddP (AddP base index) off);
5268 format %{ "[$base + $index + $off (8-bit)] @ baseIndexOffset8" %}
5269 interface(MEMORY_INTER) %{
5270 base($base);
5271 index($index);
5272 scale(0x0);
5273 disp($off);
5274 %}
5275 %}
5277 // [base + index + offset]
5278 operand baseIndexOffset8_convI2L(mRegP base, mRegI index, immL8 off)
5279 %{
5280 constraint(ALLOC_IN_RC(p_reg));
5281 op_cost(5);
5282 match(AddP (AddP base (ConvI2L index)) off);
5284 format %{ "[$base + $index + $off (8-bit)] @ baseIndexOffset8_convI2L" %}
5285 interface(MEMORY_INTER) %{
5286 base($base);
5287 index($index);
5288 scale(0x0);
5289 disp($off);
5290 %}
5291 %}
5293 // Indirect Memory Times Scale Plus Index Register Plus Offset Operand
5294 operand indIndexScaleOffset8(mRegP reg, immL8 off, mRegL lreg, immI2 scale)
5295 %{
5296 constraint(ALLOC_IN_RC(p_reg));
5297 match(AddP (AddP reg (LShiftL lreg scale)) off);
5299 op_cost(10);
5300 format %{"[$reg + $off + $lreg << $scale] @ indIndexScaleOffset8" %}
5301 interface(MEMORY_INTER) %{
5302 base($reg);
5303 index($lreg);
5304 scale($scale);
5305 disp($off);
5306 %}
5307 %}
5309 operand indIndexScaleOffset8_convI2L(mRegP reg, immL8 off, mRegI ireg, immI2 scale)
5310 %{
5311 constraint(ALLOC_IN_RC(p_reg));
5312 match(AddP (AddP reg (LShiftL (ConvI2L ireg) scale)) off);
5314 op_cost(10);
5315 format %{"[$reg + $off + $ireg << $scale] @ indIndexScaleOffset8_convI2L" %}
5316 interface(MEMORY_INTER) %{
5317 base($reg);
5318 index($ireg);
5319 scale($scale);
5320 disp($off);
5321 %}
5322 %}
5324 // [base + index<<scale + offset]
5325 operand basePosIndexScaleOffset8(mRegP base, mRegI index, immL8 off, immI_0_31 scale)
5326 %{
5327 constraint(ALLOC_IN_RC(p_reg));
5328 //predicate(n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
5329 op_cost(10);
5330 match(AddP (AddP base (LShiftL (ConvI2L index) scale)) off);
5332 format %{ "[$base + $index << $scale + $off (8-bit)] @ basePosIndexScaleOffset8" %}
5333 interface(MEMORY_INTER) %{
5334 base($base);
5335 index($index);
5336 scale($scale);
5337 disp($off);
5338 %}
5339 %}
5341 // Indirect Memory Times Scale Plus Index Register Plus Offset Operand
5342 operand indIndexScaleOffsetNarrow(mRegN reg, immL8 off, mRegL lreg, immI2 scale)
5343 %{
5344 predicate(Universe::narrow_oop_shift() == 0);
5345 constraint(ALLOC_IN_RC(p_reg));
5346 match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
5348 op_cost(10);
5349 format %{"[$reg + $off + $lreg << $scale] @ indIndexScaleOffsetNarrow" %}
5350 interface(MEMORY_INTER) %{
5351 base($reg);
5352 index($lreg);
5353 scale($scale);
5354 disp($off);
5355 %}
5356 %}
5358 // [base + index<<scale + offset] for compressd Oops
5359 operand indPosIndexI2LScaleOffset8Narrow(mRegN base, mRegI index, immL8 off, immI_0_31 scale)
5360 %{
5361 constraint(ALLOC_IN_RC(p_reg));
5362 //predicate(Universe::narrow_oop_shift() == 0 && n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
5363 predicate(Universe::narrow_oop_shift() == 0);
5364 op_cost(10);
5365 match(AddP (AddP (DecodeN base) (LShiftL (ConvI2L index) scale)) off);
5367 format %{ "[$base + $index << $scale + $off (8-bit)] @ indPosIndexI2LScaleOffset8Narrow" %}
5368 interface(MEMORY_INTER) %{
5369 base($base);
5370 index($index);
5371 scale($scale);
5372 disp($off);
5373 %}
5374 %}
5376 //FIXME: I think it's better to limit the immI to be 16-bit at most!
5377 // Indirect Memory Plus Long Offset Operand
5378 operand indOffset32(mRegP reg, immL32 off) %{
5379 constraint(ALLOC_IN_RC(p_reg));
5380 op_cost(20);
5381 match(AddP reg off);
5383 format %{ "[$reg + $off (32-bit)] @ indOffset32" %}
5384 interface(MEMORY_INTER) %{
5385 base($reg);
5386 index(0x0); /* NO_INDEX */
5387 scale(0x0);
5388 disp($off);
5389 %}
5390 %}
5392 // Indirect Memory Plus Index Register
5393 operand indIndex(mRegP addr, mRegL index) %{
5394 constraint(ALLOC_IN_RC(p_reg));
5395 match(AddP addr index);
5397 op_cost(20);
5398 format %{"[$addr + $index] @ indIndex" %}
5399 interface(MEMORY_INTER) %{
5400 base($addr);
5401 index($index);
5402 scale(0x0);
5403 disp(0x0);
5404 %}
5405 %}
5407 operand indirectNarrowKlass(mRegN reg)
5408 %{
5409 predicate(Universe::narrow_klass_shift() == 0);
5410 constraint(ALLOC_IN_RC(p_reg));
5411 op_cost(10);
5412 match(DecodeNKlass reg);
5414 format %{ "[$reg] @ indirectNarrowKlass" %}
5415 interface(MEMORY_INTER) %{
5416 base($reg);
5417 index(0x0);
5418 scale(0x0);
5419 disp(0x0);
5420 %}
5421 %}
5423 operand indOffset8NarrowKlass(mRegN reg, immL8 off)
5424 %{
5425 predicate(Universe::narrow_klass_shift() == 0);
5426 constraint(ALLOC_IN_RC(p_reg));
5427 op_cost(10);
5428 match(AddP (DecodeNKlass reg) off);
5430 format %{ "[$reg + $off (8-bit)] @ indOffset8NarrowKlass" %}
5431 interface(MEMORY_INTER) %{
5432 base($reg);
5433 index(0x0);
5434 scale(0x0);
5435 disp($off);
5436 %}
5437 %}
5439 operand indOffset32NarrowKlass(mRegN reg, immL32 off)
5440 %{
5441 predicate(Universe::narrow_klass_shift() == 0);
5442 constraint(ALLOC_IN_RC(p_reg));
5443 op_cost(10);
5444 match(AddP (DecodeNKlass reg) off);
5446 format %{ "[$reg + $off (32-bit)] @ indOffset32NarrowKlass" %}
5447 interface(MEMORY_INTER) %{
5448 base($reg);
5449 index(0x0);
5450 scale(0x0);
5451 disp($off);
5452 %}
5453 %}
5455 operand indIndexOffsetNarrowKlass(mRegN reg, mRegL lreg, immL32 off)
5456 %{
5457 predicate(Universe::narrow_klass_shift() == 0);
5458 constraint(ALLOC_IN_RC(p_reg));
5459 match(AddP (AddP (DecodeNKlass reg) lreg) off);
5461 op_cost(10);
5462 format %{"[$reg + $off + $lreg] @ indIndexOffsetNarrowKlass" %}
5463 interface(MEMORY_INTER) %{
5464 base($reg);
5465 index($lreg);
5466 scale(0x0);
5467 disp($off);
5468 %}
5469 %}
5471 operand indIndexNarrowKlass(mRegN reg, mRegL lreg)
5472 %{
5473 predicate(Universe::narrow_klass_shift() == 0);
5474 constraint(ALLOC_IN_RC(p_reg));
5475 match(AddP (DecodeNKlass reg) lreg);
5477 op_cost(10);
5478 format %{"[$reg + $lreg] @ indIndexNarrowKlass" %}
5479 interface(MEMORY_INTER) %{
5480 base($reg);
5481 index($lreg);
5482 scale(0x0);
5483 disp(0x0);
5484 %}
5485 %}
5487 // Indirect Memory Operand
5488 operand indirectNarrow(mRegN reg)
5489 %{
5490 predicate(Universe::narrow_oop_shift() == 0);
5491 constraint(ALLOC_IN_RC(p_reg));
5492 op_cost(10);
5493 match(DecodeN reg);
5495 format %{ "[$reg] @ indirectNarrow" %}
5496 interface(MEMORY_INTER) %{
5497 base($reg);
5498 index(0x0);
5499 scale(0x0);
5500 disp(0x0);
5501 %}
5502 %}
5504 // Indirect Memory Plus Short Offset Operand
5505 operand indOffset8Narrow(mRegN reg, immL8 off)
5506 %{
5507 predicate(Universe::narrow_oop_shift() == 0);
5508 constraint(ALLOC_IN_RC(p_reg));
5509 op_cost(10);
5510 match(AddP (DecodeN reg) off);
5512 format %{ "[$reg + $off (8-bit)] @ indOffset8Narrow" %}
5513 interface(MEMORY_INTER) %{
5514 base($reg);
5515 index(0x0);
5516 scale(0x0);
5517 disp($off);
5518 %}
5519 %}
5521 // Indirect Memory Plus Index Register Plus Offset Operand
5522 operand indIndexOffset8Narrow(mRegN reg, mRegL lreg, immL8 off)
5523 %{
5524 predicate(Universe::narrow_oop_shift() == 0);
5525 constraint(ALLOC_IN_RC(p_reg));
5526 match(AddP (AddP (DecodeN reg) lreg) off);
5528 op_cost(10);
5529 format %{"[$reg + $off + $lreg] @ indIndexOffset8Narrow" %}
5530 interface(MEMORY_INTER) %{
5531 base($reg);
5532 index($lreg);
5533 scale(0x0);
5534 disp($off);
5535 %}
5536 %}
5538 //----------Load Long Memory Operands------------------------------------------
5539 // The load-long idiom will use it's address expression again after loading
5540 // the first word of the long. If the load-long destination overlaps with
5541 // registers used in the addressing expression, the 2nd half will be loaded
5542 // from a clobbered address. Fix this by requiring that load-long use
5543 // address registers that do not overlap with the load-long target.
5545 // load-long support
5546 operand load_long_RegP() %{
5547 constraint(ALLOC_IN_RC(p_reg));
5548 match(RegP);
5549 match(mRegP);
5550 op_cost(100);
5551 format %{ %}
5552 interface(REG_INTER);
5553 %}
5555 // Indirect Memory Operand Long
5556 operand load_long_indirect(load_long_RegP reg) %{
5557 constraint(ALLOC_IN_RC(p_reg));
5558 match(reg);
5560 format %{ "[$reg]" %}
5561 interface(MEMORY_INTER) %{
5562 base($reg);
5563 index(0x0);
5564 scale(0x0);
5565 disp(0x0);
5566 %}
5567 %}
5569 // Indirect Memory Plus Long Offset Operand
5570 operand load_long_indOffset32(load_long_RegP reg, immL32 off) %{
5571 match(AddP reg off);
5573 format %{ "[$reg + $off]" %}
5574 interface(MEMORY_INTER) %{
5575 base($reg);
5576 index(0x0);
5577 scale(0x0);
5578 disp($off);
5579 %}
5580 %}
5582 //----------Conditional Branch Operands----------------------------------------
5583 // Comparison Op - This is the operation of the comparison, and is limited to
5584 // the following set of codes:
5585 // L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
5586 //
5587 // Other attributes of the comparison, such as unsignedness, are specified
5588 // by the comparison instruction that sets a condition code flags register.
5589 // That result is represented by a flags operand whose subtype is appropriate
5590 // to the unsignedness (etc.) of the comparison.
5591 //
5592 // Later, the instruction which matches both the Comparison Op (a Bool) and
5593 // the flags (produced by the Cmp) specifies the coding of the comparison op
5594 // by matching a specific subtype of Bool operand below, such as cmpOpU.
5596 // Comparision Code
5597 operand cmpOp() %{
5598 match(Bool);
5600 format %{ "" %}
5601 interface(COND_INTER) %{
5602 equal(0x01);
5603 not_equal(0x02);
5604 greater(0x03);
5605 greater_equal(0x04);
5606 less(0x05);
5607 less_equal(0x06);
5608 overflow(0x7);
5609 no_overflow(0x8);
5610 %}
5611 %}
5614 // Comparision Code
5615 // Comparison Code, unsigned compare. Used by FP also, with
5616 // C2 (unordered) turned into GT or LT already. The other bits
5617 // C0 and C3 are turned into Carry & Zero flags.
5618 operand cmpOpU() %{
5619 match(Bool);
5621 format %{ "" %}
5622 interface(COND_INTER) %{
5623 equal(0x01);
5624 not_equal(0x02);
5625 greater(0x03);
5626 greater_equal(0x04);
5627 less(0x05);
5628 less_equal(0x06);
5629 overflow(0x7);
5630 no_overflow(0x8);
5631 %}
5632 %}
5634 /*
5635 // Comparison Code, unsigned compare. Used by FP also, with
5636 // C2 (unordered) turned into GT or LT already. The other bits
5637 // C0 and C3 are turned into Carry & Zero flags.
5638 operand cmpOpU() %{
5639 match(Bool);
5641 format %{ "" %}
5642 interface(COND_INTER) %{
5643 equal(0x4);
5644 not_equal(0x5);
5645 less(0x2);
5646 greater_equal(0x3);
5647 less_equal(0x6);
5648 greater(0x7);
5649 %}
5650 %}
5651 */
5652 /*
5653 // Comparison Code for FP conditional move
5654 operand cmpOp_fcmov() %{
5655 match(Bool);
5657 format %{ "" %}
5658 interface(COND_INTER) %{
5659 equal (0x01);
5660 not_equal (0x02);
5661 greater (0x03);
5662 greater_equal(0x04);
5663 less (0x05);
5664 less_equal (0x06);
5665 %}
5666 %}
5668 // Comparision Code used in long compares
5669 operand cmpOp_commute() %{
5670 match(Bool);
5672 format %{ "" %}
5673 interface(COND_INTER) %{
5674 equal(0x4);
5675 not_equal(0x5);
5676 less(0xF);
5677 greater_equal(0xE);
5678 less_equal(0xD);
5679 greater(0xC);
5680 %}
5681 %}
5682 */
5684 //----------Special Memory Operands--------------------------------------------
5685 // Stack Slot Operand - This operand is used for loading and storing temporary
5686 // values on the stack where a match requires a value to
5687 // flow through memory.
5688 operand stackSlotP(sRegP reg) %{
5689 constraint(ALLOC_IN_RC(stack_slots));
5690 // No match rule because this operand is only generated in matching
5691 op_cost(50);
5692 format %{ "[$reg]" %}
5693 interface(MEMORY_INTER) %{
5694 base(0x1d); // SP
5695 index(0x0); // No Index
5696 scale(0x0); // No Scale
5697 disp($reg); // Stack Offset
5698 %}
5699 %}
5701 operand stackSlotI(sRegI reg) %{
5702 constraint(ALLOC_IN_RC(stack_slots));
5703 // No match rule because this operand is only generated in matching
5704 op_cost(50);
5705 format %{ "[$reg]" %}
5706 interface(MEMORY_INTER) %{
5707 base(0x1d); // SP
5708 index(0x0); // No Index
5709 scale(0x0); // No Scale
5710 disp($reg); // Stack Offset
5711 %}
5712 %}
5714 operand stackSlotF(sRegF reg) %{
5715 constraint(ALLOC_IN_RC(stack_slots));
5716 // No match rule because this operand is only generated in matching
5717 op_cost(50);
5718 format %{ "[$reg]" %}
5719 interface(MEMORY_INTER) %{
5720 base(0x1d); // SP
5721 index(0x0); // No Index
5722 scale(0x0); // No Scale
5723 disp($reg); // Stack Offset
5724 %}
5725 %}
5727 operand stackSlotD(sRegD reg) %{
5728 constraint(ALLOC_IN_RC(stack_slots));
5729 // No match rule because this operand is only generated in matching
5730 op_cost(50);
5731 format %{ "[$reg]" %}
5732 interface(MEMORY_INTER) %{
5733 base(0x1d); // SP
5734 index(0x0); // No Index
5735 scale(0x0); // No Scale
5736 disp($reg); // Stack Offset
5737 %}
5738 %}
5740 operand stackSlotL(sRegL reg) %{
5741 constraint(ALLOC_IN_RC(stack_slots));
5742 // No match rule because this operand is only generated in matching
5743 op_cost(50);
5744 format %{ "[$reg]" %}
5745 interface(MEMORY_INTER) %{
5746 base(0x1d); // SP
5747 index(0x0); // No Index
5748 scale(0x0); // No Scale
5749 disp($reg); // Stack Offset
5750 %}
5751 %}
5754 //------------------------OPERAND CLASSES--------------------------------------
5755 //opclass memory( direct, indirect, indOffset16, indOffset32, indOffset32X, indIndexOffset );
5756 opclass memory( indirect, indirectNarrow, indOffset8, indOffset32, indIndex, indIndexScale, load_long_indirect, load_long_indOffset32, baseIndexOffset8, baseIndexOffset8_convI2L, indIndexScaleOffset8, indIndexScaleOffset8_convI2L, basePosIndexScaleOffset8, indIndexScaleOffsetNarrow, indPosIndexI2LScaleOffset8Narrow, indOffset8Narrow, indIndexOffset8Narrow);
5759 //----------PIPELINE-----------------------------------------------------------
5760 // Rules which define the behavior of the target architectures pipeline.
5762 pipeline %{
5764 //----------ATTRIBUTES---------------------------------------------------------
5765 attributes %{
5766 fixed_size_instructions; // Fixed size instructions
5767 branch_has_delay_slot; // branch have delay slot in gs2
5768 max_instructions_per_bundle = 1; // 1 instruction per bundle
5769 max_bundles_per_cycle = 4; // Up to 4 bundles per cycle
5770 bundle_unit_size=4;
5771 instruction_unit_size = 4; // An instruction is 4 bytes long
5772 instruction_fetch_unit_size = 16; // The processor fetches one line
5773 instruction_fetch_units = 1; // of 16 bytes
5775 // List of nop instructions
5776 nops( MachNop );
5777 %}
5779 //----------RESOURCES----------------------------------------------------------
5780 // Resources are the functional units available to the machine
5782 resources(D1, D2, D3, D4, DECODE = D1 | D2 | D3| D4, ALU1, ALU2, ALU = ALU1 | ALU2, FPU1, FPU2, FPU = FPU1 | FPU2, MEM, BR);
5784 //----------PIPELINE DESCRIPTION-----------------------------------------------
5785 // Pipeline Description specifies the stages in the machine's pipeline
5787 // IF: fetch
5788 // ID: decode
5789 // RD: read
5790 // CA: caculate
5791 // WB: write back
5792 // CM: commit
5794 pipe_desc(IF, ID, RD, CA, WB, CM);
5797 //----------PIPELINE CLASSES---------------------------------------------------
5798 // Pipeline Classes describe the stages in which input and output are
5799 // referenced by the hardware pipeline.
5801 //No.1 Integer ALU reg-reg operation : dst <-- reg1 op reg2
5802 pipe_class ialu_regI_regI(mRegI dst, mRegI src1, mRegI src2) %{
5803 single_instruction;
5804 src1 : RD(read);
5805 src2 : RD(read);
5806 dst : WB(write)+1;
5807 DECODE : ID;
5808 ALU : CA;
5809 %}
5811 //No.19 Integer mult operation : dst <-- reg1 mult reg2
5812 pipe_class ialu_mult(mRegI dst, mRegI src1, mRegI src2) %{
5813 src1 : RD(read);
5814 src2 : RD(read);
5815 dst : WB(write)+5;
5816 DECODE : ID;
5817 ALU2 : CA;
5818 %}
5820 pipe_class mulL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
5821 src1 : RD(read);
5822 src2 : RD(read);
5823 dst : WB(write)+10;
5824 DECODE : ID;
5825 ALU2 : CA;
5826 %}
5828 //No.19 Integer div operation : dst <-- reg1 div reg2
5829 pipe_class ialu_div(mRegI dst, mRegI src1, mRegI src2) %{
5830 src1 : RD(read);
5831 src2 : RD(read);
5832 dst : WB(write)+10;
5833 DECODE : ID;
5834 ALU2 : CA;
5835 %}
5837 //No.19 Integer mod operation : dst <-- reg1 mod reg2
5838 pipe_class ialu_mod(mRegI dst, mRegI src1, mRegI src2) %{
5839 instruction_count(2);
5840 src1 : RD(read);
5841 src2 : RD(read);
5842 dst : WB(write)+10;
5843 DECODE : ID;
5844 ALU2 : CA;
5845 %}
5847 //No.15 Long ALU reg-reg operation : dst <-- reg1 op reg2
5848 pipe_class ialu_regL_regL(mRegL dst, mRegL src1, mRegL src2) %{
5849 instruction_count(2);
5850 src1 : RD(read);
5851 src2 : RD(read);
5852 dst : WB(write);
5853 DECODE : ID;
5854 ALU : CA;
5855 %}
5857 //No.18 Long ALU reg-imm16 operation : dst <-- reg1 op imm16
5858 pipe_class ialu_regL_imm16(mRegL dst, mRegL src) %{
5859 instruction_count(2);
5860 src : RD(read);
5861 dst : WB(write);
5862 DECODE : ID;
5863 ALU : CA;
5864 %}
5866 //no.16 load Long from memory :
5867 pipe_class ialu_loadL(mRegL dst, memory mem) %{
5868 instruction_count(2);
5869 mem : RD(read);
5870 dst : WB(write)+5;
5871 DECODE : ID;
5872 MEM : RD;
5873 %}
5875 //No.17 Store Long to Memory :
5876 pipe_class ialu_storeL(mRegL src, memory mem) %{
5877 instruction_count(2);
5878 mem : RD(read);
5879 src : RD(read);
5880 DECODE : ID;
5881 MEM : RD;
5882 %}
5884 //No.2 Integer ALU reg-imm16 operation : dst <-- reg1 op imm16
5885 pipe_class ialu_regI_imm16(mRegI dst, mRegI src) %{
5886 single_instruction;
5887 src : RD(read);
5888 dst : WB(write);
5889 DECODE : ID;
5890 ALU : CA;
5891 %}
5893 //No.3 Integer move operation : dst <-- reg
5894 pipe_class ialu_regI_mov(mRegI dst, mRegI src) %{
5895 src : RD(read);
5896 dst : WB(write);
5897 DECODE : ID;
5898 ALU : CA;
5899 %}
5901 //No.4 No instructions : do nothing
5902 pipe_class empty( ) %{
5903 instruction_count(0);
5904 %}
5906 //No.5 UnConditional branch :
5907 pipe_class pipe_jump( label labl ) %{
5908 multiple_bundles;
5909 DECODE : ID;
5910 BR : RD;
5911 %}
5913 //No.6 ALU Conditional branch :
5914 pipe_class pipe_alu_branch(mRegI src1, mRegI src2, label labl ) %{
5915 multiple_bundles;
5916 src1 : RD(read);
5917 src2 : RD(read);
5918 DECODE : ID;
5919 BR : RD;
5920 %}
5922 //no.7 load integer from memory :
5923 pipe_class ialu_loadI(mRegI dst, memory mem) %{
5924 mem : RD(read);
5925 dst : WB(write)+3;
5926 DECODE : ID;
5927 MEM : RD;
5928 %}
5930 //No.8 Store Integer to Memory :
5931 pipe_class ialu_storeI(mRegI src, memory mem) %{
5932 mem : RD(read);
5933 src : RD(read);
5934 DECODE : ID;
5935 MEM : RD;
5936 %}
5939 //No.10 Floating FPU reg-reg operation : dst <-- reg1 op reg2
5940 pipe_class fpu_regF_regF(regF dst, regF src1, regF src2) %{
5941 src1 : RD(read);
5942 src2 : RD(read);
5943 dst : WB(write);
5944 DECODE : ID;
5945 FPU : CA;
5946 %}
5948 //No.22 Floating div operation : dst <-- reg1 div reg2
5949 pipe_class fpu_div(regF dst, regF src1, regF src2) %{
5950 src1 : RD(read);
5951 src2 : RD(read);
5952 dst : WB(write);
5953 DECODE : ID;
5954 FPU2 : CA;
5955 %}
5957 pipe_class fcvt_I2D(regD dst, mRegI src) %{
5958 src : RD(read);
5959 dst : WB(write);
5960 DECODE : ID;
5961 FPU1 : CA;
5962 %}
5964 pipe_class fcvt_D2I(mRegI dst, regD src) %{
5965 src : RD(read);
5966 dst : WB(write);
5967 DECODE : ID;
5968 FPU1 : CA;
5969 %}
5971 pipe_class pipe_mfc1(mRegI dst, regD src) %{
5972 src : RD(read);
5973 dst : WB(write);
5974 DECODE : ID;
5975 MEM : RD;
5976 %}
5978 pipe_class pipe_mtc1(regD dst, mRegI src) %{
5979 src : RD(read);
5980 dst : WB(write);
5981 DECODE : ID;
5982 MEM : RD(5);
5983 %}
5985 //No.23 Floating sqrt operation : dst <-- reg1 sqrt reg2
5986 pipe_class fpu_sqrt(regF dst, regF src1, regF src2) %{
5987 multiple_bundles;
5988 src1 : RD(read);
5989 src2 : RD(read);
5990 dst : WB(write);
5991 DECODE : ID;
5992 FPU2 : CA;
5993 %}
5995 //No.11 Load Floating from Memory :
5996 pipe_class fpu_loadF(regF dst, memory mem) %{
5997 instruction_count(1);
5998 mem : RD(read);
5999 dst : WB(write)+3;
6000 DECODE : ID;
6001 MEM : RD;
6002 %}
6004 //No.12 Store Floating to Memory :
6005 pipe_class fpu_storeF(regF src, memory mem) %{
6006 instruction_count(1);
6007 mem : RD(read);
6008 src : RD(read);
6009 DECODE : ID;
6010 MEM : RD;
6011 %}
6013 //No.13 FPU Conditional branch :
6014 pipe_class pipe_fpu_branch(regF src1, regF src2, label labl ) %{
6015 multiple_bundles;
6016 src1 : RD(read);
6017 src2 : RD(read);
6018 DECODE : ID;
6019 BR : RD;
6020 %}
6022 //No.14 Floating FPU reg operation : dst <-- op reg
6023 pipe_class fpu1_regF(regF dst, regF src) %{
6024 src : RD(read);
6025 dst : WB(write);
6026 DECODE : ID;
6027 FPU : CA;
6028 %}
6030 pipe_class long_memory_op() %{
6031 instruction_count(10); multiple_bundles; force_serialization;
6032 fixed_latency(30);
6033 %}
6035 pipe_class simple_call() %{
6036 instruction_count(10); multiple_bundles; force_serialization;
6037 fixed_latency(200);
6038 BR : RD;
6039 %}
6041 pipe_class call() %{
6042 instruction_count(10); multiple_bundles; force_serialization;
6043 fixed_latency(200);
6044 %}
6046 //FIXME:
6047 //No.9 Piple slow : for multi-instructions
6048 pipe_class pipe_slow( ) %{
6049 instruction_count(20);
6050 force_serialization;
6051 multiple_bundles;
6052 fixed_latency(50);
6053 %}
6055 %}
6059 //----------INSTRUCTIONS-------------------------------------------------------
6060 //
6061 // match -- States which machine-independent subtree may be replaced
6062 // by this instruction.
6063 // ins_cost -- The estimated cost of this instruction is used by instruction
6064 // selection to identify a minimum cost tree of machine
6065 // instructions that matches a tree of machine-independent
6066 // instructions.
6067 // format -- A string providing the disassembly for this instruction.
6068 // The value of an instruction's operand may be inserted
6069 // by referring to it with a '$' prefix.
6070 // opcode -- Three instruction opcodes may be provided. These are referred
6071 // to within an encode class as $primary, $secondary, and $tertiary
6072 // respectively. The primary opcode is commonly used to
6073 // indicate the type of machine instruction, while secondary
6074 // and tertiary are often used for prefix options or addressing
6075 // modes.
6076 // ins_encode -- A list of encode classes with parameters. The encode class
6077 // name must have been defined in an 'enc_class' specification
6078 // in the encode section of the architecture description.
6081 // Load Integer
6082 instruct loadI(mRegI dst, memory mem) %{
6083 match(Set dst (LoadI mem));
6085 ins_cost(125);
6086 format %{ "lw $dst, $mem #@loadI" %}
6087 ins_encode (load_I_enc(dst, mem));
6088 ins_pipe( ialu_loadI );
6089 %}
6091 instruct loadI_convI2L(mRegL dst, memory mem) %{
6092 match(Set dst (ConvI2L (LoadI mem)));
6094 ins_cost(125);
6095 format %{ "lw $dst, $mem #@loadI_convI2L" %}
6096 ins_encode (load_I_enc(dst, mem));
6097 ins_pipe( ialu_loadI );
6098 %}
6100 // Load Integer (32 bit signed) to Byte (8 bit signed)
6101 instruct loadI2B(mRegI dst, memory mem, immI_24 twentyfour) %{
6102 match(Set dst (RShiftI (LShiftI (LoadI mem) twentyfour) twentyfour));
6104 ins_cost(125);
6105 format %{ "lb $dst, $mem\t# int -> byte #@loadI2B" %}
6106 ins_encode(load_B_enc(dst, mem));
6107 ins_pipe(ialu_loadI);
6108 %}
6110 // Load Integer (32 bit signed) to Unsigned Byte (8 bit UNsigned)
6111 instruct loadI2UB(mRegI dst, memory mem, immI_255 mask) %{
6112 match(Set dst (AndI (LoadI mem) mask));
6114 ins_cost(125);
6115 format %{ "lbu $dst, $mem\t# int -> ubyte #@loadI2UB" %}
6116 ins_encode(load_UB_enc(dst, mem));
6117 ins_pipe(ialu_loadI);
6118 %}
6120 // Load Integer (32 bit signed) to Short (16 bit signed)
6121 instruct loadI2S(mRegI dst, memory mem, immI_16 sixteen) %{
6122 match(Set dst (RShiftI (LShiftI (LoadI mem) sixteen) sixteen));
6124 ins_cost(125);
6125 format %{ "lh $dst, $mem\t# int -> short #@loadI2S" %}
6126 ins_encode(load_S_enc(dst, mem));
6127 ins_pipe(ialu_loadI);
6128 %}
6130 // Load Integer (32 bit signed) to Unsigned Short/Char (16 bit UNsigned)
6131 instruct loadI2US(mRegI dst, memory mem, immI_65535 mask) %{
6132 match(Set dst (AndI (LoadI mem) mask));
6134 ins_cost(125);
6135 format %{ "lhu $dst, $mem\t# int -> ushort/char #@loadI2US" %}
6136 ins_encode(load_C_enc(dst, mem));
6137 ins_pipe(ialu_loadI);
6138 %}
6140 // Load Long.
6141 instruct loadL(mRegL dst, memory mem) %{
6142 // predicate(!((LoadLNode*)n)->require_atomic_access());
6143 match(Set dst (LoadL mem));
6145 ins_cost(250);
6146 format %{ "ld $dst, $mem #@loadL" %}
6147 ins_encode(load_L_enc(dst, mem));
6148 ins_pipe( ialu_loadL );
6149 %}
6151 // Load Long - UNaligned
6152 instruct loadL_unaligned(mRegL dst, memory mem) %{
6153 match(Set dst (LoadL_unaligned mem));
6155 // FIXME: Jin: Need more effective ldl/ldr
6156 ins_cost(450);
6157 format %{ "ld $dst, $mem #@loadL_unaligned\n\t" %}
6158 ins_encode(load_L_enc(dst, mem));
6159 ins_pipe( ialu_loadL );
6160 %}
6162 // Store Long
6163 instruct storeL_reg(memory mem, mRegL src) %{
6164 match(Set mem (StoreL mem src));
6166 ins_cost(200);
6167 format %{ "sd $mem, $src #@storeL_reg\n" %}
6168 ins_encode(store_L_reg_enc(mem, src));
6169 ins_pipe( ialu_storeL );
6170 %}
6173 instruct storeL_immL0(memory mem, immL0 zero) %{
6174 match(Set mem (StoreL mem zero));
6176 ins_cost(180);
6177 format %{ "sd $mem, zero #@storeL_immL0" %}
6178 ins_encode(store_L_immL0_enc(mem, zero));
6179 ins_pipe( ialu_storeL );
6180 %}
6182 // Load Compressed Pointer
6183 instruct loadN(mRegN dst, memory mem)
6184 %{
6185 match(Set dst (LoadN mem));
6187 ins_cost(125); // XXX
6188 format %{ "lwu $dst, $mem\t# compressed ptr @ loadN" %}
6189 ins_encode (load_N_enc(dst, mem));
6190 ins_pipe( ialu_loadI ); // XXX
6191 %}
6193 // Load Pointer
6194 instruct loadP(mRegP dst, memory mem) %{
6195 match(Set dst (LoadP mem));
6197 ins_cost(125);
6198 format %{ "ld $dst, $mem #@loadP" %}
6199 ins_encode (load_P_enc(dst, mem));
6200 ins_pipe( ialu_loadI );
6201 %}
6203 // Load Klass Pointer
6204 instruct loadKlass(mRegP dst, memory mem) %{
6205 match(Set dst (LoadKlass mem));
6207 ins_cost(125);
6208 format %{ "MOV $dst,$mem @ loadKlass" %}
6209 ins_encode (load_P_enc(dst, mem));
6210 ins_pipe( ialu_loadI );
6211 %}
6213 // Load narrow Klass Pointer
6214 instruct loadNKlass(mRegN dst, memory mem)
6215 %{
6216 match(Set dst (LoadNKlass mem));
6218 ins_cost(125); // XXX
6219 format %{ "lwu $dst, $mem\t# compressed klass ptr @ loadNKlass" %}
6220 ins_encode (load_N_enc(dst, mem));
6221 ins_pipe( ialu_loadI ); // XXX
6222 %}
6224 // Load Constant
6225 instruct loadConI(mRegI dst, immI src) %{
6226 match(Set dst src);
6228 ins_cost(150);
6229 format %{ "mov $dst, $src #@loadConI" %}
6230 ins_encode %{
6231 Register dst = $dst$$Register;
6232 int value = $src$$constant;
6233 __ move(dst, value);
6234 %}
6235 ins_pipe( ialu_regI_regI );
6236 %}
6239 instruct loadConL_set64(mRegL dst, immL src) %{
6240 match(Set dst src);
6241 ins_cost(120);
6242 format %{ "li $dst, $src @ loadConL_set64" %}
6243 ins_encode %{
6244 __ set64($dst$$Register, $src$$constant);
6245 %}
6246 ins_pipe(ialu_regL_regL);
6247 %}
6249 /*
6250 // Load long value from constant table (predicated by immL_expensive).
6251 instruct loadConL_load(mRegL dst, immL_expensive src) %{
6252 match(Set dst src);
6253 ins_cost(150);
6254 format %{ "ld $dst, $constantoffset[$constanttablebase] # load long $src from table @ loadConL_ldx" %}
6255 ins_encode %{
6256 int con_offset = $constantoffset($src);
6258 if (Assembler::is_simm16(con_offset)) {
6259 __ ld($dst$$Register, $constanttablebase, con_offset);
6260 } else {
6261 __ set64(AT, con_offset);
6262 if (UseLoongsonISA) {
6263 __ gsldx($dst$$Register, $constanttablebase, AT, 0);
6264 } else {
6265 __ daddu(AT, $constanttablebase, AT);
6266 __ ld($dst$$Register, AT, 0);
6267 }
6268 }
6269 %}
6270 ins_pipe(ialu_loadI);
6271 %}
6272 */
6274 instruct loadConL16(mRegL dst, immL16 src) %{
6275 match(Set dst src);
6276 ins_cost(105);
6277 format %{ "mov $dst, $src #@loadConL16" %}
6278 ins_encode %{
6279 Register dst_reg = as_Register($dst$$reg);
6280 int value = $src$$constant;
6281 __ daddiu(dst_reg, R0, value);
6282 %}
6283 ins_pipe( ialu_regL_regL );
6284 %}
6287 instruct loadConL0(mRegL dst, immL0 src) %{
6288 match(Set dst src);
6289 ins_cost(100);
6290 format %{ "mov $dst, zero #@loadConL0" %}
6291 ins_encode %{
6292 Register dst_reg = as_Register($dst$$reg);
6293 __ daddu(dst_reg, R0, R0);
6294 %}
6295 ins_pipe( ialu_regL_regL );
6296 %}
6298 // Load Range
6299 instruct loadRange(mRegI dst, memory mem) %{
6300 match(Set dst (LoadRange mem));
6302 ins_cost(125);
6303 format %{ "MOV $dst,$mem @ loadRange" %}
6304 ins_encode(load_I_enc(dst, mem));
6305 ins_pipe( ialu_loadI );
6306 %}
6309 instruct storeP(memory mem, mRegP src ) %{
6310 match(Set mem (StoreP mem src));
6312 ins_cost(125);
6313 format %{ "sd $src, $mem #@storeP" %}
6314 ins_encode(store_P_reg_enc(mem, src));
6315 ins_pipe( ialu_storeI );
6316 %}
6318 // Store NULL Pointer, mark word, or other simple pointer constant.
6319 instruct storeImmP0(memory mem, immP0 zero) %{
6320 match(Set mem (StoreP mem zero));
6322 ins_cost(125);
6323 format %{ "mov $mem, $zero #@storeImmP0" %}
6324 ins_encode(store_P_immP0_enc(mem));
6325 ins_pipe( ialu_storeI );
6326 %}
6328 // Store Byte Immediate
6329 instruct storeImmB(memory mem, immI8 src) %{
6330 match(Set mem (StoreB mem src));
6332 ins_cost(150);
6333 format %{ "movb $mem, $src #@storeImmB" %}
6334 ins_encode(store_B_immI_enc(mem, src));
6335 ins_pipe( ialu_storeI );
6336 %}
6338 // Store Compressed Pointer
6339 instruct storeN(memory mem, mRegN src)
6340 %{
6341 match(Set mem (StoreN mem src));
6343 ins_cost(125); // XXX
6344 format %{ "sw $mem, $src\t# compressed ptr @ storeN" %}
6345 ins_encode(store_N_reg_enc(mem, src));
6346 ins_pipe( ialu_storeI );
6347 %}
6349 instruct storeNKlass(memory mem, mRegN src)
6350 %{
6351 match(Set mem (StoreNKlass mem src));
6353 ins_cost(125); // XXX
6354 format %{ "sw $mem, $src\t# compressed klass ptr @ storeNKlass" %}
6355 ins_encode(store_N_reg_enc(mem, src));
6356 ins_pipe( ialu_storeI );
6357 %}
6359 instruct storeImmN0(memory mem, immN0 zero)
6360 %{
6361 predicate(Universe::narrow_oop_base() == NULL && Universe::narrow_klass_base() == NULL);
6362 match(Set mem (StoreN mem zero));
6364 ins_cost(125); // XXX
6365 format %{ "storeN0 $mem, R12\t# compressed ptr" %}
6366 ins_encode(storeImmN0_enc(mem, zero));
6367 ins_pipe( ialu_storeI );
6368 %}
6370 // Store Byte
6371 instruct storeB(memory mem, mRegI src) %{
6372 match(Set mem (StoreB mem src));
6374 ins_cost(125);
6375 format %{ "sb $src, $mem #@storeB" %}
6376 ins_encode(store_B_reg_enc(mem, src));
6377 ins_pipe( ialu_storeI );
6378 %}
6380 instruct storeB_convL2I(memory mem, mRegL src) %{
6381 match(Set mem (StoreB mem (ConvL2I src)));
6383 ins_cost(125);
6384 format %{ "sb $src, $mem #@storeB_convL2I" %}
6385 ins_encode(store_B_reg_enc(mem, src));
6386 ins_pipe( ialu_storeI );
6387 %}
6389 // Load Byte (8bit signed)
6390 instruct loadB(mRegI dst, memory mem) %{
6391 match(Set dst (LoadB mem));
6393 ins_cost(125);
6394 format %{ "lb $dst, $mem #@loadB" %}
6395 ins_encode(load_B_enc(dst, mem));
6396 ins_pipe( ialu_loadI );
6397 %}
6399 instruct loadB_convI2L(mRegL dst, memory mem) %{
6400 match(Set dst (ConvI2L (LoadB mem)));
6402 ins_cost(125);
6403 format %{ "lb $dst, $mem #@loadB_convI2L" %}
6404 ins_encode(load_B_enc(dst, mem));
6405 ins_pipe( ialu_loadI );
6406 %}
6408 // Load Byte (8bit UNsigned)
6409 instruct loadUB(mRegI dst, memory mem) %{
6410 match(Set dst (LoadUB mem));
6412 ins_cost(125);
6413 format %{ "lbu $dst, $mem #@loadUB" %}
6414 ins_encode(load_UB_enc(dst, mem));
6415 ins_pipe( ialu_loadI );
6416 %}
6418 instruct loadUB_convI2L(mRegL dst, memory mem) %{
6419 match(Set dst (ConvI2L (LoadUB mem)));
6421 ins_cost(125);
6422 format %{ "lbu $dst, $mem #@loadUB_convI2L" %}
6423 ins_encode(load_UB_enc(dst, mem));
6424 ins_pipe( ialu_loadI );
6425 %}
6427 // Load Short (16bit signed)
6428 instruct loadS(mRegI dst, memory mem) %{
6429 match(Set dst (LoadS mem));
6431 ins_cost(125);
6432 format %{ "lh $dst, $mem #@loadS" %}
6433 ins_encode(load_S_enc(dst, mem));
6434 ins_pipe( ialu_loadI );
6435 %}
6437 // Load Short (16 bit signed) to Byte (8 bit signed)
6438 instruct loadS2B(mRegI dst, memory mem, immI_24 twentyfour) %{
6439 match(Set dst (RShiftI (LShiftI (LoadS mem) twentyfour) twentyfour));
6441 ins_cost(125);
6442 format %{ "lb $dst, $mem\t# short -> byte #@loadS2B" %}
6443 ins_encode(load_B_enc(dst, mem));
6444 ins_pipe(ialu_loadI);
6445 %}
6447 instruct loadS_convI2L(mRegL dst, memory mem) %{
6448 match(Set dst (ConvI2L (LoadS mem)));
6450 ins_cost(125);
6451 format %{ "lh $dst, $mem #@loadS_convI2L" %}
6452 ins_encode(load_S_enc(dst, mem));
6453 ins_pipe( ialu_loadI );
6454 %}
6456 // Store Integer Immediate
6457 instruct storeImmI(memory mem, immI src) %{
6458 match(Set mem (StoreI mem src));
6460 ins_cost(150);
6461 format %{ "mov $mem, $src #@storeImmI" %}
6462 ins_encode(store_I_immI_enc(mem, src));
6463 ins_pipe( ialu_storeI );
6464 %}
6466 // Store Integer
6467 instruct storeI(memory mem, mRegI src) %{
6468 match(Set mem (StoreI mem src));
6470 ins_cost(125);
6471 format %{ "sw $mem, $src #@storeI" %}
6472 ins_encode(store_I_reg_enc(mem, src));
6473 ins_pipe( ialu_storeI );
6474 %}
6476 instruct storeI_convL2I(memory mem, mRegL src) %{
6477 match(Set mem (StoreI mem (ConvL2I src)));
6479 ins_cost(125);
6480 format %{ "sw $mem, $src #@storeI_convL2I" %}
6481 ins_encode(store_I_reg_enc(mem, src));
6482 ins_pipe( ialu_storeI );
6483 %}
6485 // Load Float
6486 instruct loadF(regF dst, memory mem) %{
6487 match(Set dst (LoadF mem));
6489 ins_cost(150);
6490 format %{ "loadF $dst, $mem #@loadF" %}
6491 ins_encode(load_F_enc(dst, mem));
6492 ins_pipe( ialu_loadI );
6493 %}
6495 instruct loadConP_general(mRegP dst, immP src) %{
6496 match(Set dst src);
6498 ins_cost(120);
6499 format %{ "li $dst, $src #@loadConP_general" %}
6501 ins_encode %{
6502 Register dst = $dst$$Register;
6503 long* value = (long*)$src$$constant;
6505 if($src->constant_reloc() == relocInfo::metadata_type){
6506 int klass_index = __ oop_recorder()->find_index((Klass*)value);
6507 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
6509 __ relocate(rspec);
6510 __ li48(dst, (long)value);
6511 }else if($src->constant_reloc() == relocInfo::oop_type){
6512 int oop_index = __ oop_recorder()->find_index((jobject)value);
6513 RelocationHolder rspec = oop_Relocation::spec(oop_index);
6515 __ relocate(rspec);
6516 __ li48(dst, (long)value);
6517 } else if ($src->constant_reloc() == relocInfo::none) {
6518 __ set64(dst, (long)value);
6519 }
6520 %}
6522 ins_pipe( ialu_regI_regI );
6523 %}
6525 /*
6526 instruct loadConP_load(mRegP dst, immP_load src) %{
6527 match(Set dst src);
6529 ins_cost(100);
6530 format %{ "ld $dst, [$constanttablebase + $constantoffset] load from constant table: ptr=$src @ loadConP_load" %}
6532 ins_encode %{
6534 int con_offset = $constantoffset($src);
6536 if (Assembler::is_simm16(con_offset)) {
6537 __ ld($dst$$Register, $constanttablebase, con_offset);
6538 } else {
6539 __ set64(AT, con_offset);
6540 if (UseLoongsonISA) {
6541 __ gsldx($dst$$Register, $constanttablebase, AT, 0);
6542 } else {
6543 __ daddu(AT, $constanttablebase, AT);
6544 __ ld($dst$$Register, AT, 0);
6545 }
6546 }
6547 %}
6549 ins_pipe(ialu_loadI);
6550 %}
6551 */
6553 instruct loadConP_no_oop_cheap(mRegP dst, immP_no_oop_cheap src) %{
6554 match(Set dst src);
6556 ins_cost(80);
6557 format %{ "li $dst, $src @ loadConP_no_oop_cheap" %}
6559 ins_encode %{
6560 __ set64($dst$$Register, $src$$constant);
6561 %}
6563 ins_pipe(ialu_regI_regI);
6564 %}
6567 instruct loadConP_poll(mRegP dst, immP_poll src) %{
6568 match(Set dst src);
6570 ins_cost(50);
6571 format %{ "li $dst, $src #@loadConP_poll" %}
6573 ins_encode %{
6574 Register dst = $dst$$Register;
6575 intptr_t value = (intptr_t)$src$$constant;
6577 __ set64(dst, (jlong)value);
6578 %}
6580 ins_pipe( ialu_regI_regI );
6581 %}
6583 instruct loadConP0(mRegP dst, immP0 src)
6584 %{
6585 match(Set dst src);
6587 ins_cost(50);
6588 format %{ "mov $dst, R0\t# ptr" %}
6589 ins_encode %{
6590 Register dst_reg = $dst$$Register;
6591 __ daddu(dst_reg, R0, R0);
6592 %}
6593 ins_pipe( ialu_regI_regI );
6594 %}
6596 instruct loadConN0(mRegN dst, immN0 src) %{
6597 match(Set dst src);
6598 format %{ "move $dst, R0\t# compressed NULL ptr" %}
6599 ins_encode %{
6600 __ move($dst$$Register, R0);
6601 %}
6602 ins_pipe( ialu_regI_regI );
6603 %}
6605 instruct loadConN(mRegN dst, immN src) %{
6606 match(Set dst src);
6608 ins_cost(125);
6609 format %{ "li $dst, $src\t# compressed ptr @ loadConN" %}
6610 ins_encode %{
6611 Register dst = $dst$$Register;
6612 __ set_narrow_oop(dst, (jobject)$src$$constant);
6613 %}
6614 ins_pipe( ialu_regI_regI ); // XXX
6615 %}
6617 instruct loadConNKlass(mRegN dst, immNKlass src) %{
6618 match(Set dst src);
6620 ins_cost(125);
6621 format %{ "li $dst, $src\t# compressed klass ptr @ loadConNKlass" %}
6622 ins_encode %{
6623 Register dst = $dst$$Register;
6624 __ set_narrow_klass(dst, (Klass*)$src$$constant);
6625 %}
6626 ins_pipe( ialu_regI_regI ); // XXX
6627 %}
6629 //FIXME
6630 // Tail Call; Jump from runtime stub to Java code.
6631 // Also known as an 'interprocedural jump'.
6632 // Target of jump will eventually return to caller.
6633 // TailJump below removes the return address.
6634 instruct TailCalljmpInd(mRegP jump_target, mRegP method_oop) %{
6635 match(TailCall jump_target method_oop );
6636 ins_cost(300);
6637 format %{ "JMP $jump_target \t# @TailCalljmpInd" %}
6639 ins_encode %{
6640 Register target = $jump_target$$Register;
6641 Register oop = $method_oop$$Register;
6643 /* 2012/10/12 Jin: RA will be used in generate_forward_exception() */
6644 __ push(RA);
6646 __ move(S3, oop);
6647 __ jr(target);
6648 __ nop();
6649 %}
6651 ins_pipe( pipe_jump );
6652 %}
6654 // Create exception oop: created by stack-crawling runtime code.
6655 // Created exception is now available to this handler, and is setup
6656 // just prior to jumping to this handler. No code emitted.
6657 instruct CreateException( a0_RegP ex_oop )
6658 %{
6659 match(Set ex_oop (CreateEx));
6661 // use the following format syntax
6662 format %{ "# exception oop is in A0; no code emitted @CreateException" %}
6663 ins_encode %{
6664 /* Jin: X86 leaves this function empty */
6665 __ block_comment("CreateException is empty in X86/MIPS");
6666 %}
6667 ins_pipe( empty );
6668 // ins_pipe( pipe_jump );
6669 %}
6672 /* 2012/9/14 Jin: The mechanism of exception handling is clear now.
6674 - Common try/catch:
6675 2012/9/14 Jin: [stubGenerator_mips.cpp] generate_forward_exception()
6676 |- V0, V1 are created
6677 |- T9 <= SharedRuntime::exception_handler_for_return_address
6678 `- jr T9
6679 `- the caller's exception_handler
6680 `- jr OptoRuntime::exception_blob
6681 `- here
6682 - Rethrow(e.g. 'unwind'):
6683 * The callee:
6684 |- an exception is triggered during execution
6685 `- exits the callee method through RethrowException node
6686 |- The callee pushes exception_oop(T0) and exception_pc(RA)
6687 `- The callee jumps to OptoRuntime::rethrow_stub()
6688 * In OptoRuntime::rethrow_stub:
6689 |- The VM calls _rethrow_Java to determine the return address in the caller method
6690 `- exits the stub with tailjmpInd
6691 |- pops exception_oop(V0) and exception_pc(V1)
6692 `- jumps to the return address(usually an exception_handler)
6693 * The caller:
6694 `- continues processing the exception_blob with V0/V1
6695 */
6697 /*
6698 Disassembling OptoRuntime::rethrow_stub()
6700 ; locals
6701 0x2d3bf320: addiu sp, sp, 0xfffffff8
6702 0x2d3bf324: sw ra, 0x4(sp)
6703 0x2d3bf328: sw fp, 0x0(sp)
6704 0x2d3bf32c: addu fp, sp, zero
6705 0x2d3bf330: addiu sp, sp, 0xfffffff0
6706 0x2d3bf334: sw ra, 0x8(sp)
6707 0x2d3bf338: sw t0, 0x4(sp)
6708 0x2d3bf33c: sw sp, 0x0(sp)
6710 ; get_thread(S2)
6711 0x2d3bf340: addu s2, sp, zero
6712 0x2d3bf344: srl s2, s2, 12
6713 0x2d3bf348: sll s2, s2, 2
6714 0x2d3bf34c: lui at, 0x2c85
6715 0x2d3bf350: addu at, at, s2
6716 0x2d3bf354: lw s2, 0xffffcc80(at)
6718 0x2d3bf358: lw s0, 0x0(sp)
6719 0x2d3bf35c: sw s0, 0x118(s2) // last_sp -> threa
6720 0x2d3bf360: sw s2, 0xc(sp)
6722 ; OptoRuntime::rethrow_C(oopDesc* exception, JavaThread* thread, address ret_pc)
6723 0x2d3bf364: lw a0, 0x4(sp)
6724 0x2d3bf368: lw a1, 0xc(sp)
6725 0x2d3bf36c: lw a2, 0x8(sp)
6726 ;; Java_To_Runtime
6727 0x2d3bf370: lui t9, 0x2c34
6728 0x2d3bf374: addiu t9, t9, 0xffff8a48
6729 0x2d3bf378: jalr t9
6730 0x2d3bf37c: nop
6732 0x2d3bf380: addu s3, v0, zero ; S3: SharedRuntime::raw_exception_handler_for_return_address()
6734 0x2d3bf384: lw s0, 0xc(sp)
6735 0x2d3bf388: sw zero, 0x118(s0)
6736 0x2d3bf38c: sw zero, 0x11c(s0)
6737 0x2d3bf390: lw s1, 0x144(s0) ; ex_oop: S1
6738 0x2d3bf394: addu s2, s0, zero
6739 0x2d3bf398: sw zero, 0x144(s2)
6740 0x2d3bf39c: lw s0, 0x4(s2)
6741 0x2d3bf3a0: addiu s4, zero, 0x0
6742 0x2d3bf3a4: bne s0, s4, 0x2d3bf3d4
6743 0x2d3bf3a8: nop
6744 0x2d3bf3ac: addiu sp, sp, 0x10
6745 0x2d3bf3b0: addiu sp, sp, 0x8
6746 0x2d3bf3b4: lw ra, 0xfffffffc(sp)
6747 0x2d3bf3b8: lw fp, 0xfffffff8(sp)
6748 0x2d3bf3bc: lui at, 0x2b48
6749 0x2d3bf3c0: lw at, 0x100(at)
6751 ; tailjmpInd: Restores exception_oop & exception_pc
6752 0x2d3bf3c4: addu v1, ra, zero
6753 0x2d3bf3c8: addu v0, s1, zero
6754 0x2d3bf3cc: jr s3
6755 0x2d3bf3d0: nop
6756 ; Exception:
6757 0x2d3bf3d4: lui s1, 0x2cc8 ; generate_forward_exception()
6758 0x2d3bf3d8: addiu s1, s1, 0x40
6759 0x2d3bf3dc: addiu s2, zero, 0x0
6760 0x2d3bf3e0: addiu sp, sp, 0x10
6761 0x2d3bf3e4: addiu sp, sp, 0x8
6762 0x2d3bf3e8: lw ra, 0xfffffffc(sp)
6763 0x2d3bf3ec: lw fp, 0xfffffff8(sp)
6764 0x2d3bf3f0: lui at, 0x2b48
6765 0x2d3bf3f4: lw at, 0x100(at)
6766 ; TailCalljmpInd
6767 __ push(RA); ; to be used in generate_forward_exception()
6768 0x2d3bf3f8: addu t7, s2, zero
6769 0x2d3bf3fc: jr s1
6770 0x2d3bf400: nop
6771 */
6772 // Rethrow exception:
6773 // The exception oop will come in the first argument position.
6774 // Then JUMP (not call) to the rethrow stub code.
6775 instruct RethrowException()
6776 %{
6777 match(Rethrow);
6779 // use the following format syntax
6780 format %{ "JMP rethrow_stub #@RethrowException" %}
6781 ins_encode %{
6782 __ block_comment("@ RethrowException");
6784 cbuf.set_insts_mark();
6785 cbuf.relocate(cbuf.insts_mark(), runtime_call_Relocation::spec());
6787 // call OptoRuntime::rethrow_stub to get the exception handler in parent method
6788 __ li(T9, OptoRuntime::rethrow_stub());
6789 __ jr(T9);
6790 __ nop();
6791 %}
6792 ins_pipe( pipe_jump );
6793 %}
6795 instruct branchConP_zero(cmpOpU cmp, mRegP op1, immP0 zero, label labl) %{
6796 match(If cmp (CmpP op1 zero));
6797 effect(USE labl);
6799 ins_cost(180);
6800 format %{ "b$cmp $op1, R0, $labl #@branchConP_zero" %}
6802 ins_encode %{
6803 Register op1 = $op1$$Register;
6804 Register op2 = R0;
6805 Label &L = *($labl$$label);
6806 int flag = $cmp$$cmpcode;
6808 switch(flag)
6809 {
6810 case 0x01: //equal
6811 if (&L)
6812 __ beq(op1, op2, L);
6813 else
6814 __ beq(op1, op2, (int)0);
6815 break;
6816 case 0x02: //not_equal
6817 if (&L)
6818 __ bne(op1, op2, L);
6819 else
6820 __ bne(op1, op2, (int)0);
6821 break;
6822 /*
6823 case 0x03: //above
6824 __ sltu(AT, op2, op1);
6825 if(&L)
6826 __ bne(R0, AT, L);
6827 else
6828 __ bne(R0, AT, (int)0);
6829 break;
6830 case 0x04: //above_equal
6831 __ sltu(AT, op1, op2);
6832 if(&L)
6833 __ beq(AT, R0, L);
6834 else
6835 __ beq(AT, R0, (int)0);
6836 break;
6837 case 0x05: //below
6838 __ sltu(AT, op1, op2);
6839 if(&L)
6840 __ bne(R0, AT, L);
6841 else
6842 __ bne(R0, AT, (int)0);
6843 break;
6844 case 0x06: //below_equal
6845 __ sltu(AT, op2, op1);
6846 if(&L)
6847 __ beq(AT, R0, L);
6848 else
6849 __ beq(AT, R0, (int)0);
6850 break;
6851 */
6852 default:
6853 Unimplemented();
6854 }
6855 __ nop();
6856 %}
6858 ins_pc_relative(1);
6859 ins_pipe( pipe_alu_branch );
6860 %}
6863 instruct branchConP(cmpOpU cmp, mRegP op1, mRegP op2, label labl) %{
6864 match(If cmp (CmpP op1 op2));
6865 // predicate(can_branch_register(_kids[0]->_leaf, _kids[1]->_leaf));
6866 effect(USE labl);
6868 ins_cost(200);
6869 format %{ "b$cmp $op1, $op2, $labl #@branchConP" %}
6871 ins_encode %{
6872 Register op1 = $op1$$Register;
6873 Register op2 = $op2$$Register;
6874 Label &L = *($labl$$label);
6875 int flag = $cmp$$cmpcode;
6877 switch(flag)
6878 {
6879 case 0x01: //equal
6880 if (&L)
6881 __ beq(op1, op2, L);
6882 else
6883 __ beq(op1, op2, (int)0);
6884 break;
6885 case 0x02: //not_equal
6886 if (&L)
6887 __ bne(op1, op2, L);
6888 else
6889 __ bne(op1, op2, (int)0);
6890 break;
6891 case 0x03: //above
6892 __ sltu(AT, op2, op1);
6893 if(&L)
6894 __ bne(R0, AT, L);
6895 else
6896 __ bne(R0, AT, (int)0);
6897 break;
6898 case 0x04: //above_equal
6899 __ sltu(AT, op1, op2);
6900 if(&L)
6901 __ beq(AT, R0, L);
6902 else
6903 __ beq(AT, R0, (int)0);
6904 break;
6905 case 0x05: //below
6906 __ sltu(AT, op1, op2);
6907 if(&L)
6908 __ bne(R0, AT, L);
6909 else
6910 __ bne(R0, AT, (int)0);
6911 break;
6912 case 0x06: //below_equal
6913 __ sltu(AT, op2, op1);
6914 if(&L)
6915 __ beq(AT, R0, L);
6916 else
6917 __ beq(AT, R0, (int)0);
6918 break;
6919 default:
6920 Unimplemented();
6921 }
6922 __ nop();
6923 %}
6925 ins_pc_relative(1);
6926 ins_pipe( pipe_alu_branch );
6927 %}
6929 instruct cmpN_null_branch(cmpOp cmp, mRegN op1, immN0 null, label labl) %{
6930 match(If cmp (CmpN op1 null));
6931 effect(USE labl);
6933 ins_cost(180);
6934 format %{ "CMP $op1,0\t! compressed ptr\n\t"
6935 "BP$cmp $labl @ cmpN_null_branch" %}
6936 ins_encode %{
6937 Register op1 = $op1$$Register;
6938 Register op2 = R0;
6939 Label &L = *($labl$$label);
6940 int flag = $cmp$$cmpcode;
6942 switch(flag)
6943 {
6944 case 0x01: //equal
6945 if (&L)
6946 __ beq(op1, op2, L);
6947 else
6948 __ beq(op1, op2, (int)0);
6949 break;
6950 case 0x02: //not_equal
6951 if (&L)
6952 __ bne(op1, op2, L);
6953 else
6954 __ bne(op1, op2, (int)0);
6955 break;
6956 default:
6957 Unimplemented();
6958 }
6959 __ nop();
6960 %}
6961 //TODO: pipe_branchP or create pipe_branchN LEE
6962 ins_pc_relative(1);
6963 ins_pipe( pipe_alu_branch );
6964 %}
6966 instruct cmpN_reg_branch(cmpOp cmp, mRegN op1, mRegN op2, label labl) %{
6967 match(If cmp (CmpN op1 op2));
6968 effect(USE labl);
6970 ins_cost(180);
6971 format %{ "CMP $op1,$op2\t! compressed ptr\n\t"
6972 "BP$cmp $labl" %}
6973 ins_encode %{
6974 Register op1_reg = $op1$$Register;
6975 Register op2_reg = $op2$$Register;
6976 Label &L = *($labl$$label);
6977 int flag = $cmp$$cmpcode;
6979 switch(flag)
6980 {
6981 case 0x01: //equal
6982 if (&L)
6983 __ beq(op1_reg, op2_reg, L);
6984 else
6985 __ beq(op1_reg, op2_reg, (int)0);
6986 break;
6987 case 0x02: //not_equal
6988 if (&L)
6989 __ bne(op1_reg, op2_reg, L);
6990 else
6991 __ bne(op1_reg, op2_reg, (int)0);
6992 break;
6993 case 0x03: //above
6994 __ sltu(AT, op2_reg, op1_reg);
6995 if(&L)
6996 __ bne(R0, AT, L);
6997 else
6998 __ bne(R0, AT, (int)0);
6999 break;
7000 case 0x04: //above_equal
7001 __ sltu(AT, op1_reg, op2_reg);
7002 if(&L)
7003 __ beq(AT, R0, L);
7004 else
7005 __ beq(AT, R0, (int)0);
7006 break;
7007 case 0x05: //below
7008 __ sltu(AT, op1_reg, op2_reg);
7009 if(&L)
7010 __ bne(R0, AT, L);
7011 else
7012 __ bne(R0, AT, (int)0);
7013 break;
7014 case 0x06: //below_equal
7015 __ sltu(AT, op2_reg, op1_reg);
7016 if(&L)
7017 __ beq(AT, R0, L);
7018 else
7019 __ beq(AT, R0, (int)0);
7020 break;
7021 default:
7022 Unimplemented();
7023 }
7024 __ nop();
7025 %}
7026 ins_pc_relative(1);
7027 ins_pipe( pipe_alu_branch );
7028 %}
7030 instruct branchConIU_reg_reg(cmpOpU cmp, mRegI src1, mRegI src2, label labl) %{
7031 match( If cmp (CmpU src1 src2) );
7032 effect(USE labl);
7033 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_reg" %}
7035 ins_encode %{
7036 Register op1 = $src1$$Register;
7037 Register op2 = $src2$$Register;
7038 Label &L = *($labl$$label);
7039 int flag = $cmp$$cmpcode;
7041 switch(flag)
7042 {
7043 case 0x01: //equal
7044 if (&L)
7045 __ beq(op1, op2, L);
7046 else
7047 __ beq(op1, op2, (int)0);
7048 break;
7049 case 0x02: //not_equal
7050 if (&L)
7051 __ bne(op1, op2, L);
7052 else
7053 __ bne(op1, op2, (int)0);
7054 break;
7055 case 0x03: //above
7056 __ sltu(AT, op2, op1);
7057 if(&L)
7058 __ bne(AT, R0, L);
7059 else
7060 __ bne(AT, R0, (int)0);
7061 break;
7062 case 0x04: //above_equal
7063 __ sltu(AT, op1, op2);
7064 if(&L)
7065 __ beq(AT, R0, L);
7066 else
7067 __ beq(AT, R0, (int)0);
7068 break;
7069 case 0x05: //below
7070 __ sltu(AT, op1, op2);
7071 if(&L)
7072 __ bne(AT, R0, L);
7073 else
7074 __ bne(AT, R0, (int)0);
7075 break;
7076 case 0x06: //below_equal
7077 __ sltu(AT, op2, op1);
7078 if(&L)
7079 __ beq(AT, R0, L);
7080 else
7081 __ beq(AT, R0, (int)0);
7082 break;
7083 default:
7084 Unimplemented();
7085 }
7086 __ nop();
7087 %}
7089 ins_pc_relative(1);
7090 ins_pipe( pipe_alu_branch );
7091 %}
7094 instruct branchConIU_reg_imm(cmpOpU cmp, mRegI src1, immI src2, label labl) %{
7095 match( If cmp (CmpU src1 src2) );
7096 effect(USE labl);
7097 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_imm" %}
7099 ins_encode %{
7100 Register op1 = $src1$$Register;
7101 int val = $src2$$constant;
7102 Label &L = *($labl$$label);
7103 int flag = $cmp$$cmpcode;
7105 __ move(AT, val);
7106 switch(flag)
7107 {
7108 case 0x01: //equal
7109 if (&L)
7110 __ beq(op1, AT, L);
7111 else
7112 __ beq(op1, AT, (int)0);
7113 break;
7114 case 0x02: //not_equal
7115 if (&L)
7116 __ bne(op1, AT, L);
7117 else
7118 __ bne(op1, AT, (int)0);
7119 break;
7120 case 0x03: //above
7121 __ sltu(AT, AT, op1);
7122 if(&L)
7123 __ bne(R0, AT, L);
7124 else
7125 __ bne(R0, AT, (int)0);
7126 break;
7127 case 0x04: //above_equal
7128 __ sltu(AT, op1, AT);
7129 if(&L)
7130 __ beq(AT, R0, L);
7131 else
7132 __ beq(AT, R0, (int)0);
7133 break;
7134 case 0x05: //below
7135 __ sltu(AT, op1, AT);
7136 if(&L)
7137 __ bne(R0, AT, L);
7138 else
7139 __ bne(R0, AT, (int)0);
7140 break;
7141 case 0x06: //below_equal
7142 __ sltu(AT, AT, op1);
7143 if(&L)
7144 __ beq(AT, R0, L);
7145 else
7146 __ beq(AT, R0, (int)0);
7147 break;
7148 default:
7149 Unimplemented();
7150 }
7151 __ nop();
7152 %}
7154 ins_pc_relative(1);
7155 ins_pipe( pipe_alu_branch );
7156 %}
7158 instruct branchConI_reg_reg(cmpOp cmp, mRegI src1, mRegI src2, label labl) %{
7159 match( If cmp (CmpI src1 src2) );
7160 effect(USE labl);
7161 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_reg" %}
7163 ins_encode %{
7164 Register op1 = $src1$$Register;
7165 Register op2 = $src2$$Register;
7166 Label &L = *($labl$$label);
7167 int flag = $cmp$$cmpcode;
7169 switch(flag)
7170 {
7171 case 0x01: //equal
7172 if (&L)
7173 __ beq(op1, op2, L);
7174 else
7175 __ beq(op1, op2, (int)0);
7176 break;
7177 case 0x02: //not_equal
7178 if (&L)
7179 __ bne(op1, op2, L);
7180 else
7181 __ bne(op1, op2, (int)0);
7182 break;
7183 case 0x03: //above
7184 __ slt(AT, op2, op1);
7185 if(&L)
7186 __ bne(R0, AT, L);
7187 else
7188 __ bne(R0, AT, (int)0);
7189 break;
7190 case 0x04: //above_equal
7191 __ slt(AT, op1, op2);
7192 if(&L)
7193 __ beq(AT, R0, L);
7194 else
7195 __ beq(AT, R0, (int)0);
7196 break;
7197 case 0x05: //below
7198 __ slt(AT, op1, op2);
7199 if(&L)
7200 __ bne(R0, AT, L);
7201 else
7202 __ bne(R0, AT, (int)0);
7203 break;
7204 case 0x06: //below_equal
7205 __ slt(AT, op2, op1);
7206 if(&L)
7207 __ beq(AT, R0, L);
7208 else
7209 __ beq(AT, R0, (int)0);
7210 break;
7211 default:
7212 Unimplemented();
7213 }
7214 __ nop();
7215 %}
7217 ins_pc_relative(1);
7218 ins_pipe( pipe_alu_branch );
7219 %}
7221 instruct branchConI_reg_imm0(cmpOp cmp, mRegI src1, immI0 src2, label labl) %{
7222 match( If cmp (CmpI src1 src2) );
7223 effect(USE labl);
7224 ins_cost(170);
7225 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm0" %}
7227 ins_encode %{
7228 Register op1 = $src1$$Register;
7229 // int val = $src2$$constant;
7230 Label &L = *($labl$$label);
7231 int flag = $cmp$$cmpcode;
7233 //__ move(AT, val);
7234 switch(flag)
7235 {
7236 case 0x01: //equal
7237 if (&L)
7238 __ beq(op1, R0, L);
7239 else
7240 __ beq(op1, R0, (int)0);
7241 break;
7242 case 0x02: //not_equal
7243 if (&L)
7244 __ bne(op1, R0, L);
7245 else
7246 __ bne(op1, R0, (int)0);
7247 break;
7248 case 0x03: //greater
7249 if(&L)
7250 __ bgtz(op1, L);
7251 else
7252 __ bgtz(op1, (int)0);
7253 break;
7254 case 0x04: //greater_equal
7255 if(&L)
7256 __ bgez(op1, L);
7257 else
7258 __ bgez(op1, (int)0);
7259 break;
7260 case 0x05: //less
7261 if(&L)
7262 __ bltz(op1, L);
7263 else
7264 __ bltz(op1, (int)0);
7265 break;
7266 case 0x06: //less_equal
7267 if(&L)
7268 __ blez(op1, L);
7269 else
7270 __ blez(op1, (int)0);
7271 break;
7272 default:
7273 Unimplemented();
7274 }
7275 __ nop();
7276 %}
7278 ins_pc_relative(1);
7279 ins_pipe( pipe_alu_branch );
7280 %}
7283 instruct branchConI_reg_imm(cmpOp cmp, mRegI src1, immI src2, label labl) %{
7284 match( If cmp (CmpI src1 src2) );
7285 effect(USE labl);
7286 ins_cost(200);
7287 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm" %}
7289 ins_encode %{
7290 Register op1 = $src1$$Register;
7291 int val = $src2$$constant;
7292 Label &L = *($labl$$label);
7293 int flag = $cmp$$cmpcode;
7295 __ move(AT, val);
7296 switch(flag)
7297 {
7298 case 0x01: //equal
7299 if (&L)
7300 __ beq(op1, AT, L);
7301 else
7302 __ beq(op1, AT, (int)0);
7303 break;
7304 case 0x02: //not_equal
7305 if (&L)
7306 __ bne(op1, AT, L);
7307 else
7308 __ bne(op1, AT, (int)0);
7309 break;
7310 case 0x03: //greater
7311 __ slt(AT, AT, op1);
7312 if(&L)
7313 __ bne(R0, AT, L);
7314 else
7315 __ bne(R0, AT, (int)0);
7316 break;
7317 case 0x04: //greater_equal
7318 __ slt(AT, op1, AT);
7319 if(&L)
7320 __ beq(AT, R0, L);
7321 else
7322 __ beq(AT, R0, (int)0);
7323 break;
7324 case 0x05: //less
7325 __ slt(AT, op1, AT);
7326 if(&L)
7327 __ bne(R0, AT, L);
7328 else
7329 __ bne(R0, AT, (int)0);
7330 break;
7331 case 0x06: //less_equal
7332 __ slt(AT, AT, op1);
7333 if(&L)
7334 __ beq(AT, R0, L);
7335 else
7336 __ beq(AT, R0, (int)0);
7337 break;
7338 default:
7339 Unimplemented();
7340 }
7341 __ nop();
7342 %}
7344 ins_pc_relative(1);
7345 ins_pipe( pipe_alu_branch );
7346 %}
7348 instruct branchConIU_reg_imm0(cmpOpU cmp, mRegI src1, immI0 zero, label labl) %{
7349 match( If cmp (CmpU src1 zero) );
7350 effect(USE labl);
7351 format %{ "BR$cmp $src1, zero, $labl #@branchConIU_reg_imm0" %}
7353 ins_encode %{
7354 Register op1 = $src1$$Register;
7355 Label &L = *($labl$$label);
7356 int flag = $cmp$$cmpcode;
7358 switch(flag)
7359 {
7360 case 0x01: //equal
7361 if (&L)
7362 __ beq(op1, R0, L);
7363 else
7364 __ beq(op1, R0, (int)0);
7365 break;
7366 case 0x02: //not_equal
7367 if (&L)
7368 __ bne(op1, R0, L);
7369 else
7370 __ bne(op1, R0, (int)0);
7371 break;
7372 case 0x03: //above
7373 if(&L)
7374 __ bne(R0, op1, L);
7375 else
7376 __ bne(R0, op1, (int)0);
7377 break;
7378 case 0x04: //above_equal
7379 if(&L)
7380 __ beq(R0, R0, L);
7381 else
7382 __ beq(R0, R0, (int)0);
7383 break;
7384 case 0x05: //below
7385 return;
7386 break;
7387 case 0x06: //below_equal
7388 if(&L)
7389 __ beq(op1, R0, L);
7390 else
7391 __ beq(op1, R0, (int)0);
7392 break;
7393 default:
7394 Unimplemented();
7395 }
7396 __ nop();
7397 %}
7399 ins_pc_relative(1);
7400 ins_pipe( pipe_alu_branch );
7401 %}
7404 instruct branchConIU_reg_immI16(cmpOpU cmp, mRegI src1, immI16 src2, label labl) %{
7405 match( If cmp (CmpU src1 src2) );
7406 effect(USE labl);
7407 ins_cost(180);
7408 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_immI16" %}
7410 ins_encode %{
7411 Register op1 = $src1$$Register;
7412 int val = $src2$$constant;
7413 Label &L = *($labl$$label);
7414 int flag = $cmp$$cmpcode;
7416 switch(flag)
7417 {
7418 case 0x01: //equal
7419 __ move(AT, val);
7420 if (&L)
7421 __ beq(op1, AT, L);
7422 else
7423 __ beq(op1, AT, (int)0);
7424 break;
7425 case 0x02: //not_equal
7426 __ move(AT, val);
7427 if (&L)
7428 __ bne(op1, AT, L);
7429 else
7430 __ bne(op1, AT, (int)0);
7431 break;
7432 case 0x03: //above
7433 __ move(AT, val);
7434 __ sltu(AT, AT, op1);
7435 if(&L)
7436 __ bne(R0, AT, L);
7437 else
7438 __ bne(R0, AT, (int)0);
7439 break;
7440 case 0x04: //above_equal
7441 __ sltiu(AT, op1, val);
7442 if(&L)
7443 __ beq(AT, R0, L);
7444 else
7445 __ beq(AT, R0, (int)0);
7446 break;
7447 case 0x05: //below
7448 __ sltiu(AT, op1, val);
7449 if(&L)
7450 __ bne(R0, AT, L);
7451 else
7452 __ bne(R0, AT, (int)0);
7453 break;
7454 case 0x06: //below_equal
7455 __ move(AT, val);
7456 __ sltu(AT, AT, op1);
7457 if(&L)
7458 __ beq(AT, R0, L);
7459 else
7460 __ beq(AT, R0, (int)0);
7461 break;
7462 default:
7463 Unimplemented();
7464 }
7465 __ nop();
7466 %}
7468 ins_pc_relative(1);
7469 ins_pipe( pipe_alu_branch );
7470 %}
7473 instruct branchConL_regL_regL(cmpOp cmp, mRegL src1, mRegL src2, label labl) %{
7474 match( If cmp (CmpL src1 src2) );
7475 effect(USE labl);
7476 format %{ "BR$cmp $src1, $src2, $labl #@branchConL_regL_regL" %}
7477 ins_cost(250);
7479 ins_encode %{
7480 Register opr1_reg = as_Register($src1$$reg);
7481 Register opr2_reg = as_Register($src2$$reg);
7483 Label &target = *($labl$$label);
7484 int flag = $cmp$$cmpcode;
7486 switch(flag)
7487 {
7488 case 0x01: //equal
7489 if (&target)
7490 __ beq(opr1_reg, opr2_reg, target);
7491 else
7492 __ beq(opr1_reg, opr2_reg, (int)0);
7493 __ delayed()->nop();
7494 break;
7496 case 0x02: //not_equal
7497 if(&target)
7498 __ bne(opr1_reg, opr2_reg, target);
7499 else
7500 __ bne(opr1_reg, opr2_reg, (int)0);
7501 __ delayed()->nop();
7502 break;
7504 case 0x03: //greater
7505 __ slt(AT, opr2_reg, opr1_reg);
7506 if(&target)
7507 __ bne(AT, R0, target);
7508 else
7509 __ bne(AT, R0, (int)0);
7510 __ delayed()->nop();
7511 break;
7513 case 0x04: //greater_equal
7514 __ slt(AT, opr1_reg, opr2_reg);
7515 if(&target)
7516 __ beq(AT, R0, target);
7517 else
7518 __ beq(AT, R0, (int)0);
7519 __ delayed()->nop();
7521 break;
7523 case 0x05: //less
7524 __ slt(AT, opr1_reg, opr2_reg);
7525 if(&target)
7526 __ bne(AT, R0, target);
7527 else
7528 __ bne(AT, R0, (int)0);
7529 __ delayed()->nop();
7531 break;
7533 case 0x06: //less_equal
7534 __ slt(AT, opr2_reg, opr1_reg);
7536 if(&target)
7537 __ beq(AT, R0, target);
7538 else
7539 __ beq(AT, R0, (int)0);
7540 __ delayed()->nop();
7542 break;
7544 default:
7545 Unimplemented();
7546 }
7547 %}
7550 ins_pc_relative(1);
7551 ins_pipe( pipe_alu_branch );
7552 %}
7554 instruct branchConL_reg_immL16_sub(cmpOp cmp, mRegL src1, immL16_sub src2, label labl) %{
7555 match( If cmp (CmpL src1 src2) );
7556 effect(USE labl);
7557 ins_cost(180);
7558 format %{ "BR$cmp $src1, $src2, $labl #@branchConL_reg_immL16_sub" %}
7560 ins_encode %{
7561 Register op1 = $src1$$Register;
7562 int val = $src2$$constant;
7563 Label &L = *($labl$$label);
7564 int flag = $cmp$$cmpcode;
7566 __ daddiu(AT, op1, -1 * val);
7567 switch(flag)
7568 {
7569 case 0x01: //equal
7570 if (&L)
7571 __ beq(R0, AT, L);
7572 else
7573 __ beq(R0, AT, (int)0);
7574 break;
7575 case 0x02: //not_equal
7576 if (&L)
7577 __ bne(R0, AT, L);
7578 else
7579 __ bne(R0, AT, (int)0);
7580 break;
7581 case 0x03: //greater
7582 if(&L)
7583 __ bgtz(AT, L);
7584 else
7585 __ bgtz(AT, (int)0);
7586 break;
7587 case 0x04: //greater_equal
7588 if(&L)
7589 __ bgez(AT, L);
7590 else
7591 __ bgez(AT, (int)0);
7592 break;
7593 case 0x05: //less
7594 if(&L)
7595 __ bltz(AT, L);
7596 else
7597 __ bltz(AT, (int)0);
7598 break;
7599 case 0x06: //less_equal
7600 if(&L)
7601 __ blez(AT, L);
7602 else
7603 __ blez(AT, (int)0);
7604 break;
7605 default:
7606 Unimplemented();
7607 }
7608 __ nop();
7609 %}
7611 ins_pc_relative(1);
7612 ins_pipe( pipe_alu_branch );
7613 %}
7616 instruct branchConI_reg_imm16_sub(cmpOp cmp, mRegI src1, immI16_sub src2, label labl) %{
7617 match( If cmp (CmpI src1 src2) );
7618 effect(USE labl);
7619 ins_cost(180);
7620 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm16_sub" %}
7622 ins_encode %{
7623 Register op1 = $src1$$Register;
7624 int val = $src2$$constant;
7625 Label &L = *($labl$$label);
7626 int flag = $cmp$$cmpcode;
7628 __ addiu32(AT, op1, -1 * val);
7629 switch(flag)
7630 {
7631 case 0x01: //equal
7632 if (&L)
7633 __ beq(R0, AT, L);
7634 else
7635 __ beq(R0, AT, (int)0);
7636 break;
7637 case 0x02: //not_equal
7638 if (&L)
7639 __ bne(R0, AT, L);
7640 else
7641 __ bne(R0, AT, (int)0);
7642 break;
7643 case 0x03: //greater
7644 if(&L)
7645 __ bgtz(AT, L);
7646 else
7647 __ bgtz(AT, (int)0);
7648 break;
7649 case 0x04: //greater_equal
7650 if(&L)
7651 __ bgez(AT, L);
7652 else
7653 __ bgez(AT, (int)0);
7654 break;
7655 case 0x05: //less
7656 if(&L)
7657 __ bltz(AT, L);
7658 else
7659 __ bltz(AT, (int)0);
7660 break;
7661 case 0x06: //less_equal
7662 if(&L)
7663 __ blez(AT, L);
7664 else
7665 __ blez(AT, (int)0);
7666 break;
7667 default:
7668 Unimplemented();
7669 }
7670 __ nop();
7671 %}
7673 ins_pc_relative(1);
7674 ins_pipe( pipe_alu_branch );
7675 %}
7677 instruct branchConL_regL_immL0(cmpOp cmp, mRegL src1, immL0 zero, label labl) %{
7678 match( If cmp (CmpL src1 zero) );
7679 effect(USE labl);
7680 format %{ "BR$cmp $src1, zero, $labl #@branchConL_regL_immL0" %}
7681 ins_cost(150);
7683 ins_encode %{
7684 Register opr1_reg = as_Register($src1$$reg);
7685 Label &target = *($labl$$label);
7686 int flag = $cmp$$cmpcode;
7688 switch(flag)
7689 {
7690 case 0x01: //equal
7691 if (&target)
7692 __ beq(opr1_reg, R0, target);
7693 else
7694 __ beq(opr1_reg, R0, int(0));
7695 break;
7697 case 0x02: //not_equal
7698 if(&target)
7699 __ bne(opr1_reg, R0, target);
7700 else
7701 __ bne(opr1_reg, R0, (int)0);
7702 break;
7704 case 0x03: //greater
7705 if(&target)
7706 __ bgtz(opr1_reg, target);
7707 else
7708 __ bgtz(opr1_reg, (int)0);
7709 break;
7711 case 0x04: //greater_equal
7712 if(&target)
7713 __ bgez(opr1_reg, target);
7714 else
7715 __ bgez(opr1_reg, (int)0);
7716 break;
7718 case 0x05: //less
7719 __ slt(AT, opr1_reg, R0);
7720 if(&target)
7721 __ bne(AT, R0, target);
7722 else
7723 __ bne(AT, R0, (int)0);
7724 break;
7726 case 0x06: //less_equal
7727 if (&target)
7728 __ blez(opr1_reg, target);
7729 else
7730 __ blez(opr1_reg, int(0));
7731 break;
7733 default:
7734 Unimplemented();
7735 }
7736 __ delayed()->nop();
7737 %}
7740 ins_pc_relative(1);
7741 ins_pipe( pipe_alu_branch );
7742 %}
7745 //FIXME
7746 instruct branchConF_reg_reg(cmpOp cmp, regF src1, regF src2, label labl) %{
7747 match( If cmp (CmpF src1 src2) );
7748 effect(USE labl);
7749 format %{ "BR$cmp $src1, $src2, $labl #@branchConF_reg_reg" %}
7751 ins_encode %{
7752 FloatRegister reg_op1 = $src1$$FloatRegister;
7753 FloatRegister reg_op2 = $src2$$FloatRegister;
7754 Label &L = *($labl$$label);
7755 int flag = $cmp$$cmpcode;
7757 switch(flag)
7758 {
7759 case 0x01: //equal
7760 __ c_eq_s(reg_op1, reg_op2);
7761 if (&L)
7762 __ bc1t(L);
7763 else
7764 __ bc1t((int)0);
7765 break;
7766 case 0x02: //not_equal
7767 __ c_eq_s(reg_op1, reg_op2);
7768 if (&L)
7769 __ bc1f(L);
7770 else
7771 __ bc1f((int)0);
7772 break;
7773 case 0x03: //greater
7774 __ c_ule_s(reg_op1, reg_op2);
7775 if(&L)
7776 __ bc1f(L);
7777 else
7778 __ bc1f((int)0);
7779 break;
7780 case 0x04: //greater_equal
7781 __ c_ult_s(reg_op1, reg_op2);
7782 if(&L)
7783 __ bc1f(L);
7784 else
7785 __ bc1f((int)0);
7786 break;
7787 case 0x05: //less
7788 __ c_ult_s(reg_op1, reg_op2);
7789 if(&L)
7790 __ bc1t(L);
7791 else
7792 __ bc1t((int)0);
7793 break;
7794 case 0x06: //less_equal
7795 __ c_ule_s(reg_op1, reg_op2);
7796 if(&L)
7797 __ bc1t(L);
7798 else
7799 __ bc1t((int)0);
7800 break;
7801 default:
7802 Unimplemented();
7803 }
7804 __ nop();
7805 %}
7807 ins_pc_relative(1);
7808 ins_pipe(pipe_slow);
7809 %}
7811 instruct branchConD_reg_reg(cmpOp cmp, regD src1, regD src2, label labl) %{
7812 match( If cmp (CmpD src1 src2) );
7813 effect(USE labl);
7814 format %{ "BR$cmp $src1, $src2, $labl #@branchConD_reg_reg" %}
7816 ins_encode %{
7817 FloatRegister reg_op1 = $src1$$FloatRegister;
7818 FloatRegister reg_op2 = $src2$$FloatRegister;
7819 Label &L = *($labl$$label);
7820 int flag = $cmp$$cmpcode;
7822 switch(flag)
7823 {
7824 case 0x01: //equal
7825 __ c_eq_d(reg_op1, reg_op2);
7826 if (&L)
7827 __ bc1t(L);
7828 else
7829 __ bc1t((int)0);
7830 break;
7831 case 0x02: //not_equal
7832 //2016/4/19 aoqi: c_ueq_d cannot distinguish NaN from equal. Double.isNaN(Double) is implemented by 'f != f', so the use of c_ueq_d causes bugs.
7833 __ c_eq_d(reg_op1, reg_op2);
7834 if (&L)
7835 __ bc1f(L);
7836 else
7837 __ bc1f((int)0);
7838 break;
7839 case 0x03: //greater
7840 __ c_ule_d(reg_op1, reg_op2);
7841 if(&L)
7842 __ bc1f(L);
7843 else
7844 __ bc1f((int)0);
7845 break;
7846 case 0x04: //greater_equal
7847 __ c_ult_d(reg_op1, reg_op2);
7848 if(&L)
7849 __ bc1f(L);
7850 else
7851 __ bc1f((int)0);
7852 break;
7853 case 0x05: //less
7854 __ c_ult_d(reg_op1, reg_op2);
7855 if(&L)
7856 __ bc1t(L);
7857 else
7858 __ bc1t((int)0);
7859 break;
7860 case 0x06: //less_equal
7861 __ c_ule_d(reg_op1, reg_op2);
7862 if(&L)
7863 __ bc1t(L);
7864 else
7865 __ bc1t((int)0);
7866 break;
7867 default:
7868 Unimplemented();
7869 }
7870 __ nop();
7871 %}
7873 ins_pc_relative(1);
7874 ins_pipe(pipe_slow);
7875 %}
7878 // Call Runtime Instruction
7879 instruct CallRuntimeDirect(method meth) %{
7880 match(CallRuntime );
7881 effect(USE meth);
7883 ins_cost(300);
7884 format %{ "CALL,runtime #@CallRuntimeDirect" %}
7885 ins_encode( Java_To_Runtime( meth ) );
7886 ins_pipe( pipe_slow );
7887 ins_alignment(16);
7888 %}
7892 //------------------------MemBar Instructions-------------------------------
7893 //Memory barrier flavors
7895 instruct membar_acquire() %{
7896 match(MemBarAcquire);
7897 ins_cost(0);
7899 size(0);
7900 format %{ "MEMBAR-acquire (empty) @ membar_acquire" %}
7901 ins_encode();
7902 ins_pipe(empty);
7903 %}
7905 instruct load_fence() %{
7906 match(LoadFence);
7907 ins_cost(400);
7909 format %{ "MEMBAR @ load_fence" %}
7910 ins_encode %{
7911 __ sync();
7912 %}
7913 ins_pipe(pipe_slow);
7914 %}
7916 instruct membar_acquire_lock()
7917 %{
7918 match(MemBarAcquireLock);
7919 ins_cost(0);
7921 size(0);
7922 format %{ "MEMBAR-acquire (acquire as part of CAS in prior FastLock so empty encoding) @ membar_acquire_lock" %}
7923 ins_encode();
7924 ins_pipe(empty);
7925 %}
7927 instruct membar_release() %{
7928 match(MemBarRelease);
7929 ins_cost(0);
7931 size(0);
7932 format %{ "MEMBAR-release (empty) @ membar_release" %}
7933 ins_encode();
7934 ins_pipe(empty);
7935 %}
7937 instruct store_fence() %{
7938 match(StoreFence);
7939 ins_cost(400);
7941 format %{ "MEMBAR @ store_fence" %}
7943 ins_encode %{
7944 __ sync();
7945 %}
7947 ins_pipe(pipe_slow);
7948 %}
7950 instruct membar_release_lock()
7951 %{
7952 match(MemBarReleaseLock);
7953 ins_cost(0);
7955 size(0);
7956 format %{ "MEMBAR-release-lock (release in FastUnlock so empty) @ membar_release_lock" %}
7957 ins_encode();
7958 ins_pipe(empty);
7959 %}
7962 instruct membar_volatile() %{
7963 match(MemBarVolatile);
7964 ins_cost(400);
7966 format %{ "MEMBAR-volatile" %}
7967 ins_encode %{
7968 if( !os::is_MP() ) return; // Not needed on single CPU
7969 __ sync();
7971 %}
7972 ins_pipe(pipe_slow);
7973 %}
7975 instruct unnecessary_membar_volatile() %{
7976 match(MemBarVolatile);
7977 predicate(Matcher::post_store_load_barrier(n));
7978 ins_cost(0);
7980 size(0);
7981 format %{ "MEMBAR-volatile (unnecessary so empty encoding) @ unnecessary_membar_volatile" %}
7982 ins_encode( );
7983 ins_pipe(empty);
7984 %}
7986 instruct membar_storestore() %{
7987 match(MemBarStoreStore);
7989 ins_cost(0);
7990 size(0);
7991 format %{ "MEMBAR-storestore (empty encoding) @ membar_storestore" %}
7992 ins_encode( );
7993 ins_pipe(empty);
7994 %}
7996 //----------Move Instructions--------------------------------------------------
7997 instruct castX2P(mRegP dst, mRegL src) %{
7998 match(Set dst (CastX2P src));
7999 format %{ "castX2P $dst, $src @ castX2P" %}
8000 ins_encode %{
8001 Register src = $src$$Register;
8002 Register dst = $dst$$Register;
8004 if(src != dst)
8005 __ move(dst, src);
8006 %}
8007 ins_cost(10);
8008 ins_pipe( ialu_regI_mov );
8009 %}
8011 instruct castP2X(mRegL dst, mRegP src ) %{
8012 match(Set dst (CastP2X src));
8014 format %{ "mov $dst, $src\t #@castP2X" %}
8015 ins_encode %{
8016 Register src = $src$$Register;
8017 Register dst = $dst$$Register;
8019 if(src != dst)
8020 __ move(dst, src);
8021 %}
8022 ins_pipe( ialu_regI_mov );
8023 %}
8025 instruct MoveF2I_reg_reg(mRegI dst, regF src) %{
8026 match(Set dst (MoveF2I src));
8027 effect(DEF dst, USE src);
8028 ins_cost(85);
8029 format %{ "MoveF2I $dst, $src @ MoveF2I_reg_reg" %}
8030 ins_encode %{
8031 Register dst = as_Register($dst$$reg);
8032 FloatRegister src = as_FloatRegister($src$$reg);
8034 __ mfc1(dst, src);
8035 %}
8036 ins_pipe( pipe_slow );
8037 %}
8039 instruct MoveI2F_reg_reg(regF dst, mRegI src) %{
8040 match(Set dst (MoveI2F src));
8041 effect(DEF dst, USE src);
8042 ins_cost(85);
8043 format %{ "MoveI2F $dst, $src @ MoveI2F_reg_reg" %}
8044 ins_encode %{
8045 Register src = as_Register($src$$reg);
8046 FloatRegister dst = as_FloatRegister($dst$$reg);
8048 __ mtc1(src, dst);
8049 %}
8050 ins_pipe( pipe_slow );
8051 %}
8053 instruct MoveD2L_reg_reg(mRegL dst, regD src) %{
8054 match(Set dst (MoveD2L src));
8055 effect(DEF dst, USE src);
8056 ins_cost(85);
8057 format %{ "MoveD2L $dst, $src @ MoveD2L_reg_reg" %}
8058 ins_encode %{
8059 Register dst = as_Register($dst$$reg);
8060 FloatRegister src = as_FloatRegister($src$$reg);
8062 __ dmfc1(dst, src);
8063 %}
8064 ins_pipe( pipe_slow );
8065 %}
8067 instruct MoveL2D_reg_reg(regD dst, mRegL src) %{
8068 match(Set dst (MoveL2D src));
8069 effect(DEF dst, USE src);
8070 ins_cost(85);
8071 format %{ "MoveL2D $dst, $src @ MoveL2D_reg_reg" %}
8072 ins_encode %{
8073 FloatRegister dst = as_FloatRegister($dst$$reg);
8074 Register src = as_Register($src$$reg);
8076 __ dmtc1(src, dst);
8077 %}
8078 ins_pipe( pipe_slow );
8079 %}
8081 //----------Conditional Move---------------------------------------------------
8082 // Conditional move
8083 instruct cmovI_cmpI_reg_reg(mRegI dst, mRegI src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8084 match(Set dst (CMoveI (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8085 ins_cost(80);
8086 format %{
8087 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpI_reg_reg\n"
8088 "\tCMOV $dst,$src \t @cmovI_cmpI_reg_reg"
8089 %}
8091 ins_encode %{
8092 Register op1 = $tmp1$$Register;
8093 Register op2 = $tmp2$$Register;
8094 Register dst = $dst$$Register;
8095 Register src = $src$$Register;
8096 int flag = $cop$$cmpcode;
8098 switch(flag)
8099 {
8100 case 0x01: //equal
8101 __ subu32(AT, op1, op2);
8102 __ movz(dst, src, AT);
8103 break;
8105 case 0x02: //not_equal
8106 __ subu32(AT, op1, op2);
8107 __ movn(dst, src, AT);
8108 break;
8110 case 0x03: //great
8111 __ slt(AT, op2, op1);
8112 __ movn(dst, src, AT);
8113 break;
8115 case 0x04: //great_equal
8116 __ slt(AT, op1, op2);
8117 __ movz(dst, src, AT);
8118 break;
8120 case 0x05: //less
8121 __ slt(AT, op1, op2);
8122 __ movn(dst, src, AT);
8123 break;
8125 case 0x06: //less_equal
8126 __ slt(AT, op2, op1);
8127 __ movz(dst, src, AT);
8128 break;
8130 default:
8131 Unimplemented();
8132 }
8133 %}
8135 ins_pipe( pipe_slow );
8136 %}
8138 instruct cmovI_cmpP_reg_reg(mRegI dst, mRegI src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
8139 match(Set dst (CMoveI (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
8140 ins_cost(80);
8141 format %{
8142 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpP_reg_reg\n\t"
8143 "CMOV $dst,$src\t @cmovI_cmpP_reg_reg"
8144 %}
8145 ins_encode %{
8146 Register op1 = $tmp1$$Register;
8147 Register op2 = $tmp2$$Register;
8148 Register dst = $dst$$Register;
8149 Register src = $src$$Register;
8150 int flag = $cop$$cmpcode;
8152 switch(flag)
8153 {
8154 case 0x01: //equal
8155 __ subu(AT, op1, op2);
8156 __ movz(dst, src, AT);
8157 break;
8159 case 0x02: //not_equal
8160 __ subu(AT, op1, op2);
8161 __ movn(dst, src, AT);
8162 break;
8164 case 0x03: //above
8165 __ sltu(AT, op2, op1);
8166 __ movn(dst, src, AT);
8167 break;
8169 case 0x04: //above_equal
8170 __ sltu(AT, op1, op2);
8171 __ movz(dst, src, AT);
8172 break;
8174 case 0x05: //below
8175 __ sltu(AT, op1, op2);
8176 __ movn(dst, src, AT);
8177 break;
8179 case 0x06: //below_equal
8180 __ sltu(AT, op2, op1);
8181 __ movz(dst, src, AT);
8182 break;
8184 default:
8185 Unimplemented();
8186 }
8187 %}
8189 ins_pipe( pipe_slow );
8190 %}
8192 instruct cmovI_cmpN_reg_reg(mRegI dst, mRegI src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8193 match(Set dst (CMoveI (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8194 ins_cost(80);
8195 format %{
8196 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpN_reg_reg\n\t"
8197 "CMOV $dst,$src\t @cmovI_cmpN_reg_reg"
8198 %}
8199 ins_encode %{
8200 Register op1 = $tmp1$$Register;
8201 Register op2 = $tmp2$$Register;
8202 Register dst = $dst$$Register;
8203 Register src = $src$$Register;
8204 int flag = $cop$$cmpcode;
8206 switch(flag)
8207 {
8208 case 0x01: //equal
8209 __ subu32(AT, op1, op2);
8210 __ movz(dst, src, AT);
8211 break;
8213 case 0x02: //not_equal
8214 __ subu32(AT, op1, op2);
8215 __ movn(dst, src, AT);
8216 break;
8218 case 0x03: //above
8219 __ sltu(AT, op2, op1);
8220 __ movn(dst, src, AT);
8221 break;
8223 case 0x04: //above_equal
8224 __ sltu(AT, op1, op2);
8225 __ movz(dst, src, AT);
8226 break;
8228 case 0x05: //below
8229 __ sltu(AT, op1, op2);
8230 __ movn(dst, src, AT);
8231 break;
8233 case 0x06: //below_equal
8234 __ sltu(AT, op2, op1);
8235 __ movz(dst, src, AT);
8236 break;
8238 default:
8239 Unimplemented();
8240 }
8241 %}
8243 ins_pipe( pipe_slow );
8244 %}
8246 instruct cmovP_cmpN_reg_reg(mRegP dst, mRegP src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8247 match(Set dst (CMoveP (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8248 ins_cost(80);
8249 format %{
8250 "CMPU$cop $tmp1,$tmp2\t @cmovP_cmpN_reg_reg\n\t"
8251 "CMOV $dst,$src\t @cmovP_cmpN_reg_reg"
8252 %}
8253 ins_encode %{
8254 Register op1 = $tmp1$$Register;
8255 Register op2 = $tmp2$$Register;
8256 Register dst = $dst$$Register;
8257 Register src = $src$$Register;
8258 int flag = $cop$$cmpcode;
8260 switch(flag)
8261 {
8262 case 0x01: //equal
8263 __ subu32(AT, op1, op2);
8264 __ movz(dst, src, AT);
8265 break;
8267 case 0x02: //not_equal
8268 __ subu32(AT, op1, op2);
8269 __ movn(dst, src, AT);
8270 break;
8272 case 0x03: //above
8273 __ sltu(AT, op2, op1);
8274 __ movn(dst, src, AT);
8275 break;
8277 case 0x04: //above_equal
8278 __ sltu(AT, op1, op2);
8279 __ movz(dst, src, AT);
8280 break;
8282 case 0x05: //below
8283 __ sltu(AT, op1, op2);
8284 __ movn(dst, src, AT);
8285 break;
8287 case 0x06: //below_equal
8288 __ sltu(AT, op2, op1);
8289 __ movz(dst, src, AT);
8290 break;
8292 default:
8293 Unimplemented();
8294 }
8295 %}
8297 ins_pipe( pipe_slow );
8298 %}
8300 instruct cmovN_cmpP_reg_reg(mRegN dst, mRegN src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
8301 match(Set dst (CMoveN (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
8302 ins_cost(80);
8303 format %{
8304 "CMPU$cop $tmp1,$tmp2\t @cmovN_cmpP_reg_reg\n\t"
8305 "CMOV $dst,$src\t @cmovN_cmpP_reg_reg"
8306 %}
8307 ins_encode %{
8308 Register op1 = $tmp1$$Register;
8309 Register op2 = $tmp2$$Register;
8310 Register dst = $dst$$Register;
8311 Register src = $src$$Register;
8312 int flag = $cop$$cmpcode;
8314 switch(flag)
8315 {
8316 case 0x01: //equal
8317 __ subu(AT, op1, op2);
8318 __ movz(dst, src, AT);
8319 break;
8321 case 0x02: //not_equal
8322 __ subu(AT, op1, op2);
8323 __ movn(dst, src, AT);
8324 break;
8326 case 0x03: //above
8327 __ sltu(AT, op2, op1);
8328 __ movn(dst, src, AT);
8329 break;
8331 case 0x04: //above_equal
8332 __ sltu(AT, op1, op2);
8333 __ movz(dst, src, AT);
8334 break;
8336 case 0x05: //below
8337 __ sltu(AT, op1, op2);
8338 __ movn(dst, src, AT);
8339 break;
8341 case 0x06: //below_equal
8342 __ sltu(AT, op2, op1);
8343 __ movz(dst, src, AT);
8344 break;
8346 default:
8347 Unimplemented();
8348 }
8349 %}
8351 ins_pipe( pipe_slow );
8352 %}
8354 instruct cmovP_cmpD_reg_reg(mRegP dst, mRegP src, regD tmp1, regD tmp2, cmpOp cop ) %{
8355 match(Set dst (CMoveP (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8356 ins_cost(80);
8357 format %{
8358 "CMP$cop $tmp1, $tmp2\t @cmovP_cmpD_reg_reg\n"
8359 "\tCMOV $dst,$src \t @cmovP_cmpD_reg_reg"
8360 %}
8361 ins_encode %{
8362 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8363 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8364 Register dst = as_Register($dst$$reg);
8365 Register src = as_Register($src$$reg);
8367 int flag = $cop$$cmpcode;
8369 switch(flag)
8370 {
8371 case 0x01: //equal
8372 __ c_eq_d(reg_op1, reg_op2);
8373 __ movt(dst, src);
8374 break;
8375 case 0x02: //not_equal
8376 __ c_eq_d(reg_op1, reg_op2);
8377 __ movf(dst, src);
8378 break;
8379 case 0x03: //greater
8380 __ c_ole_d(reg_op1, reg_op2);
8381 __ movf(dst, src);
8382 break;
8383 case 0x04: //greater_equal
8384 __ c_olt_d(reg_op1, reg_op2);
8385 __ movf(dst, src);
8386 break;
8387 case 0x05: //less
8388 __ c_ult_d(reg_op1, reg_op2);
8389 __ movt(dst, src);
8390 break;
8391 case 0x06: //less_equal
8392 __ c_ule_d(reg_op1, reg_op2);
8393 __ movt(dst, src);
8394 break;
8395 default:
8396 Unimplemented();
8397 }
8398 %}
8400 ins_pipe( pipe_slow );
8401 %}
8404 instruct cmovN_cmpN_reg_reg(mRegN dst, mRegN src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8405 match(Set dst (CMoveN (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8406 ins_cost(80);
8407 format %{
8408 "CMPU$cop $tmp1,$tmp2\t @cmovN_cmpN_reg_reg\n\t"
8409 "CMOV $dst,$src\t @cmovN_cmpN_reg_reg"
8410 %}
8411 ins_encode %{
8412 Register op1 = $tmp1$$Register;
8413 Register op2 = $tmp2$$Register;
8414 Register dst = $dst$$Register;
8415 Register src = $src$$Register;
8416 int flag = $cop$$cmpcode;
8418 switch(flag)
8419 {
8420 case 0x01: //equal
8421 __ subu32(AT, op1, op2);
8422 __ movz(dst, src, AT);
8423 break;
8425 case 0x02: //not_equal
8426 __ subu32(AT, op1, op2);
8427 __ movn(dst, src, AT);
8428 break;
8430 case 0x03: //above
8431 __ sltu(AT, op2, op1);
8432 __ movn(dst, src, AT);
8433 break;
8435 case 0x04: //above_equal
8436 __ sltu(AT, op1, op2);
8437 __ movz(dst, src, AT);
8438 break;
8440 case 0x05: //below
8441 __ sltu(AT, op1, op2);
8442 __ movn(dst, src, AT);
8443 break;
8445 case 0x06: //below_equal
8446 __ sltu(AT, op2, op1);
8447 __ movz(dst, src, AT);
8448 break;
8450 default:
8451 Unimplemented();
8452 }
8453 %}
8455 ins_pipe( pipe_slow );
8456 %}
8459 instruct cmovI_cmpU_reg_reg(mRegI dst, mRegI src, mRegI tmp1, mRegI tmp2, cmpOpU cop ) %{
8460 match(Set dst (CMoveI (Binary cop (CmpU tmp1 tmp2)) (Binary dst src)));
8461 ins_cost(80);
8462 format %{
8463 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpU_reg_reg\n\t"
8464 "CMOV $dst,$src\t @cmovI_cmpU_reg_reg"
8465 %}
8466 ins_encode %{
8467 Register op1 = $tmp1$$Register;
8468 Register op2 = $tmp2$$Register;
8469 Register dst = $dst$$Register;
8470 Register src = $src$$Register;
8471 int flag = $cop$$cmpcode;
8473 switch(flag)
8474 {
8475 case 0x01: //equal
8476 __ subu(AT, op1, op2);
8477 __ movz(dst, src, AT);
8478 break;
8480 case 0x02: //not_equal
8481 __ subu(AT, op1, op2);
8482 __ movn(dst, src, AT);
8483 break;
8485 case 0x03: //above
8486 __ sltu(AT, op2, op1);
8487 __ movn(dst, src, AT);
8488 break;
8490 case 0x04: //above_equal
8491 __ sltu(AT, op1, op2);
8492 __ movz(dst, src, AT);
8493 break;
8495 case 0x05: //below
8496 __ sltu(AT, op1, op2);
8497 __ movn(dst, src, AT);
8498 break;
8500 case 0x06: //below_equal
8501 __ sltu(AT, op2, op1);
8502 __ movz(dst, src, AT);
8503 break;
8505 default:
8506 Unimplemented();
8507 }
8508 %}
8510 ins_pipe( pipe_slow );
8511 %}
8513 instruct cmovI_cmpL_reg_reg(mRegI dst, mRegI src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8514 match(Set dst (CMoveI (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8515 ins_cost(80);
8516 format %{
8517 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpL_reg_reg\n"
8518 "\tCMOV $dst,$src \t @cmovI_cmpL_reg_reg"
8519 %}
8520 ins_encode %{
8521 Register opr1 = as_Register($tmp1$$reg);
8522 Register opr2 = as_Register($tmp2$$reg);
8523 Register dst = $dst$$Register;
8524 Register src = $src$$Register;
8525 int flag = $cop$$cmpcode;
8527 switch(flag)
8528 {
8529 case 0x01: //equal
8530 __ subu(AT, opr1, opr2);
8531 __ movz(dst, src, AT);
8532 break;
8534 case 0x02: //not_equal
8535 __ subu(AT, opr1, opr2);
8536 __ movn(dst, src, AT);
8537 break;
8539 case 0x03: //greater
8540 __ slt(AT, opr2, opr1);
8541 __ movn(dst, src, AT);
8542 break;
8544 case 0x04: //greater_equal
8545 __ slt(AT, opr1, opr2);
8546 __ movz(dst, src, AT);
8547 break;
8549 case 0x05: //less
8550 __ slt(AT, opr1, opr2);
8551 __ movn(dst, src, AT);
8552 break;
8554 case 0x06: //less_equal
8555 __ slt(AT, opr2, opr1);
8556 __ movz(dst, src, AT);
8557 break;
8559 default:
8560 Unimplemented();
8561 }
8562 %}
8564 ins_pipe( pipe_slow );
8565 %}
8567 instruct cmovP_cmpL_reg_reg(mRegP dst, mRegP src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8568 match(Set dst (CMoveP (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8569 ins_cost(80);
8570 format %{
8571 "CMP$cop $tmp1, $tmp2\t @cmovP_cmpL_reg_reg\n"
8572 "\tCMOV $dst,$src \t @cmovP_cmpL_reg_reg"
8573 %}
8574 ins_encode %{
8575 Register opr1 = as_Register($tmp1$$reg);
8576 Register opr2 = as_Register($tmp2$$reg);
8577 Register dst = $dst$$Register;
8578 Register src = $src$$Register;
8579 int flag = $cop$$cmpcode;
8581 switch(flag)
8582 {
8583 case 0x01: //equal
8584 __ subu(AT, opr1, opr2);
8585 __ movz(dst, src, AT);
8586 break;
8588 case 0x02: //not_equal
8589 __ subu(AT, opr1, opr2);
8590 __ movn(dst, src, AT);
8591 break;
8593 case 0x03: //greater
8594 __ slt(AT, opr2, opr1);
8595 __ movn(dst, src, AT);
8596 break;
8598 case 0x04: //greater_equal
8599 __ slt(AT, opr1, opr2);
8600 __ movz(dst, src, AT);
8601 break;
8603 case 0x05: //less
8604 __ slt(AT, opr1, opr2);
8605 __ movn(dst, src, AT);
8606 break;
8608 case 0x06: //less_equal
8609 __ slt(AT, opr2, opr1);
8610 __ movz(dst, src, AT);
8611 break;
8613 default:
8614 Unimplemented();
8615 }
8616 %}
8618 ins_pipe( pipe_slow );
8619 %}
8621 instruct cmovI_cmpD_reg_reg(mRegI dst, mRegI src, regD tmp1, regD tmp2, cmpOp cop ) %{
8622 match(Set dst (CMoveI (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8623 ins_cost(80);
8624 format %{
8625 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpD_reg_reg\n"
8626 "\tCMOV $dst,$src \t @cmovI_cmpD_reg_reg"
8627 %}
8628 ins_encode %{
8629 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8630 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8631 Register dst = as_Register($dst$$reg);
8632 Register src = as_Register($src$$reg);
8634 int flag = $cop$$cmpcode;
8636 switch(flag)
8637 {
8638 case 0x01: //equal
8639 __ c_eq_d(reg_op1, reg_op2);
8640 __ movt(dst, src);
8641 break;
8642 case 0x02: //not_equal
8643 //2016/4/19 aoqi: See instruct branchConD_reg_reg. The change in branchConD_reg_reg fixed a bug. It seems similar here, so I made thesame change.
8644 __ c_eq_d(reg_op1, reg_op2);
8645 __ movf(dst, src);
8646 break;
8647 case 0x03: //greater
8648 __ c_ole_d(reg_op1, reg_op2);
8649 __ movf(dst, src);
8650 break;
8651 case 0x04: //greater_equal
8652 __ c_olt_d(reg_op1, reg_op2);
8653 __ movf(dst, src);
8654 break;
8655 case 0x05: //less
8656 __ c_ult_d(reg_op1, reg_op2);
8657 __ movt(dst, src);
8658 break;
8659 case 0x06: //less_equal
8660 __ c_ule_d(reg_op1, reg_op2);
8661 __ movt(dst, src);
8662 break;
8663 default:
8664 Unimplemented();
8665 }
8666 %}
8668 ins_pipe( pipe_slow );
8669 %}
8672 instruct cmovP_cmpP_reg_reg(mRegP dst, mRegP src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
8673 match(Set dst (CMoveP (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
8674 ins_cost(80);
8675 format %{
8676 "CMPU$cop $tmp1,$tmp2\t @cmovP_cmpP_reg_reg\n\t"
8677 "CMOV $dst,$src\t @cmovP_cmpP_reg_reg"
8678 %}
8679 ins_encode %{
8680 Register op1 = $tmp1$$Register;
8681 Register op2 = $tmp2$$Register;
8682 Register dst = $dst$$Register;
8683 Register src = $src$$Register;
8684 int flag = $cop$$cmpcode;
8686 switch(flag)
8687 {
8688 case 0x01: //equal
8689 __ subu(AT, op1, op2);
8690 __ movz(dst, src, AT);
8691 break;
8693 case 0x02: //not_equal
8694 __ subu(AT, op1, op2);
8695 __ movn(dst, src, AT);
8696 break;
8698 case 0x03: //above
8699 __ sltu(AT, op2, op1);
8700 __ movn(dst, src, AT);
8701 break;
8703 case 0x04: //above_equal
8704 __ sltu(AT, op1, op2);
8705 __ movz(dst, src, AT);
8706 break;
8708 case 0x05: //below
8709 __ sltu(AT, op1, op2);
8710 __ movn(dst, src, AT);
8711 break;
8713 case 0x06: //below_equal
8714 __ sltu(AT, op2, op1);
8715 __ movz(dst, src, AT);
8716 break;
8718 default:
8719 Unimplemented();
8720 }
8721 %}
8723 ins_pipe( pipe_slow );
8724 %}
8726 instruct cmovP_cmpI_reg_reg(mRegP dst, mRegP src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8727 match(Set dst (CMoveP (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8728 ins_cost(80);
8729 format %{
8730 "CMP$cop $tmp1,$tmp2\t @cmovP_cmpI_reg_reg\n\t"
8731 "CMOV $dst,$src\t @cmovP_cmpI_reg_reg"
8732 %}
8733 ins_encode %{
8734 Register op1 = $tmp1$$Register;
8735 Register op2 = $tmp2$$Register;
8736 Register dst = $dst$$Register;
8737 Register src = $src$$Register;
8738 int flag = $cop$$cmpcode;
8740 switch(flag)
8741 {
8742 case 0x01: //equal
8743 __ subu32(AT, op1, op2);
8744 __ movz(dst, src, AT);
8745 break;
8747 case 0x02: //not_equal
8748 __ subu32(AT, op1, op2);
8749 __ movn(dst, src, AT);
8750 break;
8752 case 0x03: //above
8753 __ slt(AT, op2, op1);
8754 __ movn(dst, src, AT);
8755 break;
8757 case 0x04: //above_equal
8758 __ slt(AT, op1, op2);
8759 __ movz(dst, src, AT);
8760 break;
8762 case 0x05: //below
8763 __ slt(AT, op1, op2);
8764 __ movn(dst, src, AT);
8765 break;
8767 case 0x06: //below_equal
8768 __ slt(AT, op2, op1);
8769 __ movz(dst, src, AT);
8770 break;
8772 default:
8773 Unimplemented();
8774 }
8775 %}
8777 ins_pipe( pipe_slow );
8778 %}
8780 instruct cmovN_cmpI_reg_reg(mRegN dst, mRegN src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8781 match(Set dst (CMoveN (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8782 ins_cost(80);
8783 format %{
8784 "CMP$cop $tmp1,$tmp2\t @cmovN_cmpI_reg_reg\n\t"
8785 "CMOV $dst,$src\t @cmovN_cmpI_reg_reg"
8786 %}
8787 ins_encode %{
8788 Register op1 = $tmp1$$Register;
8789 Register op2 = $tmp2$$Register;
8790 Register dst = $dst$$Register;
8791 Register src = $src$$Register;
8792 int flag = $cop$$cmpcode;
8794 switch(flag)
8795 {
8796 case 0x01: //equal
8797 __ subu32(AT, op1, op2);
8798 __ movz(dst, src, AT);
8799 break;
8801 case 0x02: //not_equal
8802 __ subu32(AT, op1, op2);
8803 __ movn(dst, src, AT);
8804 break;
8806 case 0x03: //above
8807 __ slt(AT, op2, op1);
8808 __ movn(dst, src, AT);
8809 break;
8811 case 0x04: //above_equal
8812 __ slt(AT, op1, op2);
8813 __ movz(dst, src, AT);
8814 break;
8816 case 0x05: //below
8817 __ slt(AT, op1, op2);
8818 __ movn(dst, src, AT);
8819 break;
8821 case 0x06: //below_equal
8822 __ slt(AT, op2, op1);
8823 __ movz(dst, src, AT);
8824 break;
8826 default:
8827 Unimplemented();
8828 }
8829 %}
8831 ins_pipe( pipe_slow );
8832 %}
8835 instruct cmovL_cmpI_reg_reg(mRegL dst, mRegL src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8836 match(Set dst (CMoveL (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8837 ins_cost(80);
8838 format %{
8839 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpI_reg_reg\n"
8840 "\tCMOV $dst,$src \t @cmovL_cmpI_reg_reg"
8841 %}
8843 ins_encode %{
8844 Register op1 = $tmp1$$Register;
8845 Register op2 = $tmp2$$Register;
8846 Register dst = as_Register($dst$$reg);
8847 Register src = as_Register($src$$reg);
8848 int flag = $cop$$cmpcode;
8850 switch(flag)
8851 {
8852 case 0x01: //equal
8853 __ subu32(AT, op1, op2);
8854 __ movz(dst, src, AT);
8855 break;
8857 case 0x02: //not_equal
8858 __ subu32(AT, op1, op2);
8859 __ movn(dst, src, AT);
8860 break;
8862 case 0x03: //great
8863 __ slt(AT, op2, op1);
8864 __ movn(dst, src, AT);
8865 break;
8867 case 0x04: //great_equal
8868 __ slt(AT, op1, op2);
8869 __ movz(dst, src, AT);
8870 break;
8872 case 0x05: //less
8873 __ slt(AT, op1, op2);
8874 __ movn(dst, src, AT);
8875 break;
8877 case 0x06: //less_equal
8878 __ slt(AT, op2, op1);
8879 __ movz(dst, src, AT);
8880 break;
8882 default:
8883 Unimplemented();
8884 }
8885 %}
8887 ins_pipe( pipe_slow );
8888 %}
8890 instruct cmovL_cmpL_reg_reg(mRegL dst, mRegL src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8891 match(Set dst (CMoveL (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8892 ins_cost(80);
8893 format %{
8894 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpL_reg_reg\n"
8895 "\tCMOV $dst,$src \t @cmovL_cmpL_reg_reg"
8896 %}
8897 ins_encode %{
8898 Register opr1 = as_Register($tmp1$$reg);
8899 Register opr2 = as_Register($tmp2$$reg);
8900 Register dst = as_Register($dst$$reg);
8901 Register src = as_Register($src$$reg);
8902 int flag = $cop$$cmpcode;
8904 switch(flag)
8905 {
8906 case 0x01: //equal
8907 __ subu(AT, opr1, opr2);
8908 __ movz(dst, src, AT);
8909 break;
8911 case 0x02: //not_equal
8912 __ subu(AT, opr1, opr2);
8913 __ movn(dst, src, AT);
8914 break;
8916 case 0x03: //greater
8917 __ slt(AT, opr2, opr1);
8918 __ movn(dst, src, AT);
8919 break;
8921 case 0x04: //greater_equal
8922 __ slt(AT, opr1, opr2);
8923 __ movz(dst, src, AT);
8924 break;
8926 case 0x05: //less
8927 __ slt(AT, opr1, opr2);
8928 __ movn(dst, src, AT);
8929 break;
8931 case 0x06: //less_equal
8932 __ slt(AT, opr2, opr1);
8933 __ movz(dst, src, AT);
8934 break;
8936 default:
8937 Unimplemented();
8938 }
8939 %}
8941 ins_pipe( pipe_slow );
8942 %}
8944 instruct cmovL_cmpN_reg_reg(mRegL dst, mRegL src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8945 match(Set dst (CMoveL (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8946 ins_cost(80);
8947 format %{
8948 "CMPU$cop $tmp1,$tmp2\t @cmovL_cmpN_reg_reg\n\t"
8949 "CMOV $dst,$src\t @cmovL_cmpN_reg_reg"
8950 %}
8951 ins_encode %{
8952 Register op1 = $tmp1$$Register;
8953 Register op2 = $tmp2$$Register;
8954 Register dst = $dst$$Register;
8955 Register src = $src$$Register;
8956 int flag = $cop$$cmpcode;
8958 switch(flag)
8959 {
8960 case 0x01: //equal
8961 __ subu32(AT, op1, op2);
8962 __ movz(dst, src, AT);
8963 break;
8965 case 0x02: //not_equal
8966 __ subu32(AT, op1, op2);
8967 __ movn(dst, src, AT);
8968 break;
8970 case 0x03: //above
8971 __ sltu(AT, op2, op1);
8972 __ movn(dst, src, AT);
8973 break;
8975 case 0x04: //above_equal
8976 __ sltu(AT, op1, op2);
8977 __ movz(dst, src, AT);
8978 break;
8980 case 0x05: //below
8981 __ sltu(AT, op1, op2);
8982 __ movn(dst, src, AT);
8983 break;
8985 case 0x06: //below_equal
8986 __ sltu(AT, op2, op1);
8987 __ movz(dst, src, AT);
8988 break;
8990 default:
8991 Unimplemented();
8992 }
8993 %}
8995 ins_pipe( pipe_slow );
8996 %}
8999 instruct cmovL_cmpD_reg_reg(mRegL dst, mRegL src, regD tmp1, regD tmp2, cmpOp cop ) %{
9000 match(Set dst (CMoveL (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
9001 ins_cost(80);
9002 format %{
9003 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpD_reg_reg\n"
9004 "\tCMOV $dst,$src \t @cmovL_cmpD_reg_reg"
9005 %}
9006 ins_encode %{
9007 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
9008 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
9009 Register dst = as_Register($dst$$reg);
9010 Register src = as_Register($src$$reg);
9012 int flag = $cop$$cmpcode;
9014 switch(flag)
9015 {
9016 case 0x01: //equal
9017 __ c_eq_d(reg_op1, reg_op2);
9018 __ movt(dst, src);
9019 break;
9020 case 0x02: //not_equal
9021 __ c_eq_d(reg_op1, reg_op2);
9022 __ movf(dst, src);
9023 break;
9024 case 0x03: //greater
9025 __ c_ole_d(reg_op1, reg_op2);
9026 __ movf(dst, src);
9027 break;
9028 case 0x04: //greater_equal
9029 __ c_olt_d(reg_op1, reg_op2);
9030 __ movf(dst, src);
9031 break;
9032 case 0x05: //less
9033 __ c_ult_d(reg_op1, reg_op2);
9034 __ movt(dst, src);
9035 break;
9036 case 0x06: //less_equal
9037 __ c_ule_d(reg_op1, reg_op2);
9038 __ movt(dst, src);
9039 break;
9040 default:
9041 Unimplemented();
9042 }
9043 %}
9045 ins_pipe( pipe_slow );
9046 %}
9048 instruct cmovD_cmpD_reg_reg(regD dst, regD src, regD tmp1, regD tmp2, cmpOp cop ) %{
9049 match(Set dst (CMoveD (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
9050 ins_cost(200);
9051 format %{
9052 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpD_reg_reg\n"
9053 "\tCMOV $dst,$src \t @cmovD_cmpD_reg_reg"
9054 %}
9055 ins_encode %{
9056 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
9057 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
9058 FloatRegister dst = as_FloatRegister($dst$$reg);
9059 FloatRegister src = as_FloatRegister($src$$reg);
9061 int flag = $cop$$cmpcode;
9063 Label L;
9065 switch(flag)
9066 {
9067 case 0x01: //equal
9068 __ c_eq_d(reg_op1, reg_op2);
9069 __ bc1f(L);
9070 __ nop();
9071 __ mov_d(dst, src);
9072 __ bind(L);
9073 break;
9074 case 0x02: //not_equal
9075 //2016/4/19 aoqi: See instruct branchConD_reg_reg. The change in branchConD_reg_reg fixed a bug. It seems similar here, so I made thesame change.
9076 __ c_eq_d(reg_op1, reg_op2);
9077 __ bc1t(L);
9078 __ nop();
9079 __ mov_d(dst, src);
9080 __ bind(L);
9081 break;
9082 case 0x03: //greater
9083 __ c_ole_d(reg_op1, reg_op2);
9084 __ bc1t(L);
9085 __ nop();
9086 __ mov_d(dst, src);
9087 __ bind(L);
9088 break;
9089 case 0x04: //greater_equal
9090 __ c_olt_d(reg_op1, reg_op2);
9091 __ bc1t(L);
9092 __ nop();
9093 __ mov_d(dst, src);
9094 __ bind(L);
9095 break;
9096 case 0x05: //less
9097 __ c_ult_d(reg_op1, reg_op2);
9098 __ bc1f(L);
9099 __ nop();
9100 __ mov_d(dst, src);
9101 __ bind(L);
9102 break;
9103 case 0x06: //less_equal
9104 __ c_ule_d(reg_op1, reg_op2);
9105 __ bc1f(L);
9106 __ nop();
9107 __ mov_d(dst, src);
9108 __ bind(L);
9109 break;
9110 default:
9111 Unimplemented();
9112 }
9113 %}
9115 ins_pipe( pipe_slow );
9116 %}
9118 instruct cmovF_cmpI_reg_reg(regF dst, regF src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
9119 match(Set dst (CMoveF (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
9120 ins_cost(200);
9121 format %{
9122 "CMP$cop $tmp1, $tmp2\t @cmovF_cmpI_reg_reg\n"
9123 "\tCMOV $dst, $src \t @cmovF_cmpI_reg_reg"
9124 %}
9126 ins_encode %{
9127 Register op1 = $tmp1$$Register;
9128 Register op2 = $tmp2$$Register;
9129 FloatRegister dst = as_FloatRegister($dst$$reg);
9130 FloatRegister src = as_FloatRegister($src$$reg);
9131 int flag = $cop$$cmpcode;
9132 Label L;
9134 switch(flag)
9135 {
9136 case 0x01: //equal
9137 __ bne(op1, op2, L);
9138 __ nop();
9139 __ mov_s(dst, src);
9140 __ bind(L);
9141 break;
9142 case 0x02: //not_equal
9143 __ beq(op1, op2, L);
9144 __ nop();
9145 __ mov_s(dst, src);
9146 __ bind(L);
9147 break;
9148 case 0x03: //great
9149 __ slt(AT, op2, op1);
9150 __ beq(AT, R0, L);
9151 __ nop();
9152 __ mov_s(dst, src);
9153 __ bind(L);
9154 break;
9155 case 0x04: //great_equal
9156 __ slt(AT, op1, op2);
9157 __ bne(AT, R0, L);
9158 __ nop();
9159 __ mov_s(dst, src);
9160 __ bind(L);
9161 break;
9162 case 0x05: //less
9163 __ slt(AT, op1, op2);
9164 __ beq(AT, R0, L);
9165 __ nop();
9166 __ mov_s(dst, src);
9167 __ bind(L);
9168 break;
9169 case 0x06: //less_equal
9170 __ slt(AT, op2, op1);
9171 __ bne(AT, R0, L);
9172 __ nop();
9173 __ mov_s(dst, src);
9174 __ bind(L);
9175 break;
9176 default:
9177 Unimplemented();
9178 }
9179 %}
9181 ins_pipe( pipe_slow );
9182 %}
9184 instruct cmovD_cmpI_reg_reg(regD dst, regD src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
9185 match(Set dst (CMoveD (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
9186 ins_cost(200);
9187 format %{
9188 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpI_reg_reg\n"
9189 "\tCMOV $dst, $src \t @cmovD_cmpI_reg_reg"
9190 %}
9192 ins_encode %{
9193 Register op1 = $tmp1$$Register;
9194 Register op2 = $tmp2$$Register;
9195 FloatRegister dst = as_FloatRegister($dst$$reg);
9196 FloatRegister src = as_FloatRegister($src$$reg);
9197 int flag = $cop$$cmpcode;
9198 Label L;
9200 switch(flag)
9201 {
9202 case 0x01: //equal
9203 __ bne(op1, op2, L);
9204 __ nop();
9205 __ mov_d(dst, src);
9206 __ bind(L);
9207 break;
9208 case 0x02: //not_equal
9209 __ beq(op1, op2, L);
9210 __ nop();
9211 __ mov_d(dst, src);
9212 __ bind(L);
9213 break;
9214 case 0x03: //great
9215 __ slt(AT, op2, op1);
9216 __ beq(AT, R0, L);
9217 __ nop();
9218 __ mov_d(dst, src);
9219 __ bind(L);
9220 break;
9221 case 0x04: //great_equal
9222 __ slt(AT, op1, op2);
9223 __ bne(AT, R0, L);
9224 __ nop();
9225 __ mov_d(dst, src);
9226 __ bind(L);
9227 break;
9228 case 0x05: //less
9229 __ slt(AT, op1, op2);
9230 __ beq(AT, R0, L);
9231 __ nop();
9232 __ mov_d(dst, src);
9233 __ bind(L);
9234 break;
9235 case 0x06: //less_equal
9236 __ slt(AT, op2, op1);
9237 __ bne(AT, R0, L);
9238 __ nop();
9239 __ mov_d(dst, src);
9240 __ bind(L);
9241 break;
9242 default:
9243 Unimplemented();
9244 }
9245 %}
9247 ins_pipe( pipe_slow );
9248 %}
9250 instruct cmovD_cmpP_reg_reg(regD dst, regD src, mRegP tmp1, mRegP tmp2, cmpOp cop ) %{
9251 match(Set dst (CMoveD (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
9252 ins_cost(200);
9253 format %{
9254 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpP_reg_reg\n"
9255 "\tCMOV $dst, $src \t @cmovD_cmpP_reg_reg"
9256 %}
9258 ins_encode %{
9259 Register op1 = $tmp1$$Register;
9260 Register op2 = $tmp2$$Register;
9261 FloatRegister dst = as_FloatRegister($dst$$reg);
9262 FloatRegister src = as_FloatRegister($src$$reg);
9263 int flag = $cop$$cmpcode;
9264 Label L;
9266 switch(flag)
9267 {
9268 case 0x01: //equal
9269 __ bne(op1, op2, L);
9270 __ nop();
9271 __ mov_d(dst, src);
9272 __ bind(L);
9273 break;
9274 case 0x02: //not_equal
9275 __ beq(op1, op2, L);
9276 __ nop();
9277 __ mov_d(dst, src);
9278 __ bind(L);
9279 break;
9280 case 0x03: //great
9281 __ slt(AT, op2, op1);
9282 __ beq(AT, R0, L);
9283 __ nop();
9284 __ mov_d(dst, src);
9285 __ bind(L);
9286 break;
9287 case 0x04: //great_equal
9288 __ slt(AT, op1, op2);
9289 __ bne(AT, R0, L);
9290 __ nop();
9291 __ mov_d(dst, src);
9292 __ bind(L);
9293 break;
9294 case 0x05: //less
9295 __ slt(AT, op1, op2);
9296 __ beq(AT, R0, L);
9297 __ nop();
9298 __ mov_d(dst, src);
9299 __ bind(L);
9300 break;
9301 case 0x06: //less_equal
9302 __ slt(AT, op2, op1);
9303 __ bne(AT, R0, L);
9304 __ nop();
9305 __ mov_d(dst, src);
9306 __ bind(L);
9307 break;
9308 default:
9309 Unimplemented();
9310 }
9311 %}
9313 ins_pipe( pipe_slow );
9314 %}
9316 //FIXME
9317 instruct cmovI_cmpF_reg_reg(mRegI dst, mRegI src, regF tmp1, regF tmp2, cmpOp cop ) %{
9318 match(Set dst (CMoveI (Binary cop (CmpF tmp1 tmp2)) (Binary dst src)));
9319 ins_cost(80);
9320 format %{
9321 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpF_reg_reg\n"
9322 "\tCMOV $dst,$src \t @cmovI_cmpF_reg_reg"
9323 %}
9325 ins_encode %{
9326 FloatRegister reg_op1 = $tmp1$$FloatRegister;
9327 FloatRegister reg_op2 = $tmp2$$FloatRegister;
9328 Register dst = $dst$$Register;
9329 Register src = $src$$Register;
9330 int flag = $cop$$cmpcode;
9332 switch(flag)
9333 {
9334 case 0x01: //equal
9335 __ c_eq_s(reg_op1, reg_op2);
9336 __ movt(dst, src);
9337 break;
9338 case 0x02: //not_equal
9339 __ c_eq_s(reg_op1, reg_op2);
9340 __ movf(dst, src);
9341 break;
9342 case 0x03: //greater
9343 __ c_ole_s(reg_op1, reg_op2);
9344 __ movf(dst, src);
9345 break;
9346 case 0x04: //greater_equal
9347 __ c_olt_s(reg_op1, reg_op2);
9348 __ movf(dst, src);
9349 break;
9350 case 0x05: //less
9351 __ c_ult_s(reg_op1, reg_op2);
9352 __ movt(dst, src);
9353 break;
9354 case 0x06: //less_equal
9355 __ c_ule_s(reg_op1, reg_op2);
9356 __ movt(dst, src);
9357 break;
9358 default:
9359 Unimplemented();
9360 }
9361 %}
9362 ins_pipe( pipe_slow );
9363 %}
9365 instruct cmovF_cmpF_reg_reg(regF dst, regF src, regF tmp1, regF tmp2, cmpOp cop ) %{
9366 match(Set dst (CMoveF (Binary cop (CmpF tmp1 tmp2)) (Binary dst src)));
9367 ins_cost(200);
9368 format %{
9369 "CMP$cop $tmp1, $tmp2\t @cmovF_cmpF_reg_reg\n"
9370 "\tCMOV $dst,$src \t @cmovF_cmpF_reg_reg"
9371 %}
9373 ins_encode %{
9374 FloatRegister reg_op1 = $tmp1$$FloatRegister;
9375 FloatRegister reg_op2 = $tmp2$$FloatRegister;
9376 FloatRegister dst = $dst$$FloatRegister;
9377 FloatRegister src = $src$$FloatRegister;
9378 Label L;
9379 int flag = $cop$$cmpcode;
9381 switch(flag)
9382 {
9383 case 0x01: //equal
9384 __ c_eq_s(reg_op1, reg_op2);
9385 __ bc1f(L);
9386 __ nop();
9387 __ mov_s(dst, src);
9388 __ bind(L);
9389 break;
9390 case 0x02: //not_equal
9391 __ c_eq_s(reg_op1, reg_op2);
9392 __ bc1t(L);
9393 __ nop();
9394 __ mov_s(dst, src);
9395 __ bind(L);
9396 break;
9397 case 0x03: //greater
9398 __ c_ole_s(reg_op1, reg_op2);
9399 __ bc1t(L);
9400 __ nop();
9401 __ mov_s(dst, src);
9402 __ bind(L);
9403 break;
9404 case 0x04: //greater_equal
9405 __ c_olt_s(reg_op1, reg_op2);
9406 __ bc1t(L);
9407 __ nop();
9408 __ mov_s(dst, src);
9409 __ bind(L);
9410 break;
9411 case 0x05: //less
9412 __ c_ult_s(reg_op1, reg_op2);
9413 __ bc1f(L);
9414 __ nop();
9415 __ mov_s(dst, src);
9416 __ bind(L);
9417 break;
9418 case 0x06: //less_equal
9419 __ c_ule_s(reg_op1, reg_op2);
9420 __ bc1f(L);
9421 __ nop();
9422 __ mov_s(dst, src);
9423 __ bind(L);
9424 break;
9425 default:
9426 Unimplemented();
9427 }
9428 %}
9429 ins_pipe( pipe_slow );
9430 %}
9432 // Manifest a CmpL result in an integer register. Very painful.
9433 // This is the test to avoid.
9434 instruct cmpL3_reg_reg(mRegI dst, mRegL src1, mRegL src2) %{
9435 match(Set dst (CmpL3 src1 src2));
9436 ins_cost(1000);
9437 format %{ "cmpL3 $dst, $src1, $src2 @ cmpL3_reg_reg" %}
9438 ins_encode %{
9439 Register opr1 = as_Register($src1$$reg);
9440 Register opr2 = as_Register($src2$$reg);
9441 Register dst = as_Register($dst$$reg);
9443 Label Done;
9445 __ subu(AT, opr1, opr2);
9446 __ bltz(AT, Done);
9447 __ delayed()->daddiu(dst, R0, -1);
9449 __ move(dst, 1);
9450 __ movz(dst, R0, AT);
9452 __ bind(Done);
9453 %}
9454 ins_pipe( pipe_slow );
9455 %}
9457 //
9458 // less_rsult = -1
9459 // greater_result = 1
9460 // equal_result = 0
9461 // nan_result = -1
9462 //
9463 instruct cmpF3_reg_reg(mRegI dst, regF src1, regF src2) %{
9464 match(Set dst (CmpF3 src1 src2));
9465 ins_cost(1000);
9466 format %{ "cmpF3 $dst, $src1, $src2 @ cmpF3_reg_reg" %}
9467 ins_encode %{
9468 FloatRegister src1 = as_FloatRegister($src1$$reg);
9469 FloatRegister src2 = as_FloatRegister($src2$$reg);
9470 Register dst = as_Register($dst$$reg);
9472 Label Done;
9474 __ c_ult_s(src1, src2);
9475 __ bc1t(Done);
9476 __ delayed()->daddiu(dst, R0, -1);
9478 __ c_eq_s(src1, src2);
9479 __ move(dst, 1);
9480 __ movt(dst, R0);
9482 __ bind(Done);
9483 %}
9484 ins_pipe( pipe_slow );
9485 %}
9487 instruct cmpD3_reg_reg(mRegI dst, regD src1, regD src2) %{
9488 match(Set dst (CmpD3 src1 src2));
9489 ins_cost(1000);
9490 format %{ "cmpD3 $dst, $src1, $src2 @ cmpD3_reg_reg" %}
9491 ins_encode %{
9492 FloatRegister src1 = as_FloatRegister($src1$$reg);
9493 FloatRegister src2 = as_FloatRegister($src2$$reg);
9494 Register dst = as_Register($dst$$reg);
9496 Label Done;
9498 __ c_ult_d(src1, src2);
9499 __ bc1t(Done);
9500 __ delayed()->daddiu(dst, R0, -1);
9502 __ c_eq_d(src1, src2);
9503 __ move(dst, 1);
9504 __ movt(dst, R0);
9506 __ bind(Done);
9507 %}
9508 ins_pipe( pipe_slow );
9509 %}
9511 instruct clear_array(mRegL cnt, mRegP base, Universe dummy) %{
9512 match(Set dummy (ClearArray cnt base));
9513 format %{ "CLEAR_ARRAY base = $base, cnt = $cnt # Clear doublewords" %}
9514 ins_encode %{
9515 //Assume cnt is the number of bytes in an array to be cleared,
9516 //and base points to the starting address of the array.
9517 Register base = $base$$Register;
9518 Register num = $cnt$$Register;
9519 Label Loop, done;
9521 /* 2012/9/21 Jin: according to X86, $cnt is caculated by doublewords(8 bytes) */
9522 __ move(T9, num); /* T9 = words */
9523 __ beq(T9, R0, done);
9524 __ nop();
9525 __ move(AT, base);
9527 __ bind(Loop);
9528 __ sd(R0, Address(AT, 0));
9529 __ daddi(AT, AT, wordSize);
9530 __ daddi(T9, T9, -1);
9531 __ bne(T9, R0, Loop);
9532 __ delayed()->nop();
9533 __ bind(done);
9534 %}
9535 ins_pipe( pipe_slow );
9536 %}
9538 instruct string_compare(a4_RegP str1, mA5RegI cnt1, a6_RegP str2, mA7RegI cnt2, no_Ax_mRegI result) %{
9539 match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
9540 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2);
9542 format %{ "String Compare $str1[len: $cnt1], $str2[len: $cnt2] -> $result @ string_compare" %}
9543 ins_encode %{
9544 // Get the first character position in both strings
9545 // [8] char array, [12] offset, [16] count
9546 Register str1 = $str1$$Register;
9547 Register str2 = $str2$$Register;
9548 Register cnt1 = $cnt1$$Register;
9549 Register cnt2 = $cnt2$$Register;
9550 Register result = $result$$Register;
9552 Label L, Loop, haveResult, done;
9554 // compute the and difference of lengths (in result)
9555 __ subu(result, cnt1, cnt2); // result holds the difference of two lengths
9557 // compute the shorter length (in cnt1)
9558 __ slt(AT, cnt2, cnt1);
9559 __ movn(cnt1, cnt2, AT);
9561 // Now the shorter length is in cnt1 and cnt2 can be used as a tmp register
9562 __ bind(Loop); // Loop begin
9563 __ beq(cnt1, R0, done);
9564 __ delayed()->lhu(AT, str1, 0);;
9566 // compare current character
9567 __ lhu(cnt2, str2, 0);
9568 __ bne(AT, cnt2, haveResult);
9569 __ delayed()->addi(str1, str1, 2);
9570 __ addi(str2, str2, 2);
9571 __ b(Loop);
9572 __ delayed()->addi(cnt1, cnt1, -1); // Loop end
9574 __ bind(haveResult);
9575 __ subu(result, AT, cnt2);
9577 __ bind(done);
9578 %}
9580 ins_pipe( pipe_slow );
9581 %}
9583 // intrinsic optimization
9584 instruct string_equals(a4_RegP str1, a5_RegP str2, mA6RegI cnt, mA7RegI temp, no_Ax_mRegI result) %{
9585 match(Set result (StrEquals (Binary str1 str2) cnt));
9586 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL temp);
9588 format %{ "String Equal $str1, $str2, len:$cnt tmp:$temp -> $result @ string_equals" %}
9589 ins_encode %{
9590 // Get the first character position in both strings
9591 // [8] char array, [12] offset, [16] count
9592 Register str1 = $str1$$Register;
9593 Register str2 = $str2$$Register;
9594 Register cnt = $cnt$$Register;
9595 Register tmp = $temp$$Register;
9596 Register result = $result$$Register;
9598 Label Loop, done;
9601 __ beq(str1, str2, done); // same char[] ?
9602 __ daddiu(result, R0, 1);
9604 __ bind(Loop); // Loop begin
9605 __ beq(cnt, R0, done);
9606 __ daddiu(result, R0, 1); // count == 0
9608 // compare current character
9609 __ lhu(AT, str1, 0);;
9610 __ lhu(tmp, str2, 0);
9611 __ bne(AT, tmp, done);
9612 __ delayed()->daddi(result, R0, 0);
9613 __ addi(str1, str1, 2);
9614 __ addi(str2, str2, 2);
9615 __ b(Loop);
9616 __ delayed()->addi(cnt, cnt, -1); // Loop end
9618 __ bind(done);
9619 %}
9621 ins_pipe( pipe_slow );
9622 %}
9624 //----------Arithmetic Instructions-------------------------------------------
9625 //----------Addition Instructions---------------------------------------------
9626 instruct addI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9627 match(Set dst (AddI src1 src2));
9629 format %{ "add $dst, $src1, $src2 #@addI_Reg_Reg" %}
9630 ins_encode %{
9631 Register dst = $dst$$Register;
9632 Register src1 = $src1$$Register;
9633 Register src2 = $src2$$Register;
9634 __ addu32(dst, src1, src2);
9635 %}
9636 ins_pipe( ialu_regI_regI );
9637 %}
9639 instruct addI_Reg_imm(mRegI dst, mRegI src1, immI src2) %{
9640 match(Set dst (AddI src1 src2));
9642 format %{ "add $dst, $src1, $src2 #@addI_Reg_imm" %}
9643 ins_encode %{
9644 Register dst = $dst$$Register;
9645 Register src1 = $src1$$Register;
9646 int imm = $src2$$constant;
9648 if(Assembler::is_simm16(imm)) {
9649 __ addiu32(dst, src1, imm);
9650 } else {
9651 __ move(AT, imm);
9652 __ addu32(dst, src1, AT);
9653 }
9654 %}
9655 ins_pipe( ialu_regI_regI );
9656 %}
9658 instruct addP_reg_reg(mRegP dst, mRegP src1, mRegL src2) %{
9659 match(Set dst (AddP src1 src2));
9661 format %{ "dadd $dst, $src1, $src2 #@addP_reg_reg" %}
9663 ins_encode %{
9664 Register dst = $dst$$Register;
9665 Register src1 = $src1$$Register;
9666 Register src2 = $src2$$Register;
9667 __ daddu(dst, src1, src2);
9668 %}
9670 ins_pipe( ialu_regI_regI );
9671 %}
9673 instruct addP_reg_reg_convI2L(mRegP dst, mRegP src1, mRegI src2) %{
9674 match(Set dst (AddP src1 (ConvI2L src2)));
9676 format %{ "dadd $dst, $src1, $src2 #@addP_reg_reg_convI2L" %}
9678 ins_encode %{
9679 Register dst = $dst$$Register;
9680 Register src1 = $src1$$Register;
9681 Register src2 = $src2$$Register;
9682 __ daddu(dst, src1, src2);
9683 %}
9685 ins_pipe( ialu_regI_regI );
9686 %}
9688 instruct addP_reg_imm(mRegP dst, mRegP src1, immL src2) %{
9689 match(Set dst (AddP src1 src2));
9691 format %{ "daddi $dst, $src1, $src2 #@addP_reg_imm" %}
9692 ins_encode %{
9693 Register src1 = $src1$$Register;
9694 long src2 = $src2$$constant;
9695 Register dst = $dst$$Register;
9697 if(Assembler::is_simm16(src2)) {
9698 __ daddiu(dst, src1, src2);
9699 } else {
9700 __ set64(AT, src2);
9701 __ daddu(dst, src1, AT);
9702 }
9703 %}
9704 ins_pipe( ialu_regI_imm16 );
9705 %}
9707 // Add Long Register with Register
9708 instruct addL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
9709 match(Set dst (AddL src1 src2));
9710 ins_cost(200);
9711 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_Reg\t" %}
9713 ins_encode %{
9714 Register dst_reg = as_Register($dst$$reg);
9715 Register src1_reg = as_Register($src1$$reg);
9716 Register src2_reg = as_Register($src2$$reg);
9718 __ daddu(dst_reg, src1_reg, src2_reg);
9719 %}
9721 ins_pipe( ialu_regL_regL );
9722 %}
9724 instruct addL_Reg_imm(mRegL dst, mRegL src1, immL16 src2)
9725 %{
9726 match(Set dst (AddL src1 src2));
9728 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_imm " %}
9729 ins_encode %{
9730 Register dst_reg = as_Register($dst$$reg);
9731 Register src1_reg = as_Register($src1$$reg);
9732 int src2_imm = $src2$$constant;
9734 __ daddiu(dst_reg, src1_reg, src2_imm);
9735 %}
9737 ins_pipe( ialu_regL_regL );
9738 %}
9740 instruct addL_RegI2L_imm(mRegL dst, mRegI src1, immL16 src2)
9741 %{
9742 match(Set dst (AddL (ConvI2L src1) src2));
9744 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_imm " %}
9745 ins_encode %{
9746 Register dst_reg = as_Register($dst$$reg);
9747 Register src1_reg = as_Register($src1$$reg);
9748 int src2_imm = $src2$$constant;
9750 __ daddiu(dst_reg, src1_reg, src2_imm);
9751 %}
9753 ins_pipe( ialu_regL_regL );
9754 %}
9756 instruct addL_RegI2L_Reg(mRegL dst, mRegI src1, mRegL src2) %{
9757 match(Set dst (AddL (ConvI2L src1) src2));
9758 ins_cost(200);
9759 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_Reg\t" %}
9761 ins_encode %{
9762 Register dst_reg = as_Register($dst$$reg);
9763 Register src1_reg = as_Register($src1$$reg);
9764 Register src2_reg = as_Register($src2$$reg);
9766 __ daddu(dst_reg, src1_reg, src2_reg);
9767 %}
9769 ins_pipe( ialu_regL_regL );
9770 %}
9772 instruct addL_RegI2L_RegI2L(mRegL dst, mRegI src1, mRegI src2) %{
9773 match(Set dst (AddL (ConvI2L src1) (ConvI2L src2)));
9774 ins_cost(200);
9775 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_RegI2L\t" %}
9777 ins_encode %{
9778 Register dst_reg = as_Register($dst$$reg);
9779 Register src1_reg = as_Register($src1$$reg);
9780 Register src2_reg = as_Register($src2$$reg);
9782 __ daddu(dst_reg, src1_reg, src2_reg);
9783 %}
9785 ins_pipe( ialu_regL_regL );
9786 %}
9788 instruct addL_Reg_RegI2L(mRegL dst, mRegL src1, mRegI src2) %{
9789 match(Set dst (AddL src1 (ConvI2L src2)));
9790 ins_cost(200);
9791 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_RegI2L\t" %}
9793 ins_encode %{
9794 Register dst_reg = as_Register($dst$$reg);
9795 Register src1_reg = as_Register($src1$$reg);
9796 Register src2_reg = as_Register($src2$$reg);
9798 __ daddu(dst_reg, src1_reg, src2_reg);
9799 %}
9801 ins_pipe( ialu_regL_regL );
9802 %}
9804 //----------Subtraction Instructions-------------------------------------------
9805 // Integer Subtraction Instructions
9806 instruct subI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9807 match(Set dst (SubI src1 src2));
9808 ins_cost(100);
9810 format %{ "sub $dst, $src1, $src2 #@subI_Reg_Reg" %}
9811 ins_encode %{
9812 Register dst = $dst$$Register;
9813 Register src1 = $src1$$Register;
9814 Register src2 = $src2$$Register;
9815 __ subu32(dst, src1, src2);
9816 %}
9817 ins_pipe( ialu_regI_regI );
9818 %}
9820 instruct subI_Reg_immI16_sub(mRegI dst, mRegI src1, immI16_sub src2) %{
9821 match(Set dst (SubI src1 src2));
9822 ins_cost(80);
9824 format %{ "sub $dst, $src1, $src2 #@subI_Reg_immI16_sub" %}
9825 ins_encode %{
9826 Register dst = $dst$$Register;
9827 Register src1 = $src1$$Register;
9828 __ addiu32(dst, src1, -1 * $src2$$constant);
9829 %}
9830 ins_pipe( ialu_regI_regI );
9831 %}
9833 instruct negI_Reg(mRegI dst, immI0 zero, mRegI src) %{
9834 match(Set dst (SubI zero src));
9835 ins_cost(80);
9837 format %{ "neg $dst, $src #@negI_Reg" %}
9838 ins_encode %{
9839 Register dst = $dst$$Register;
9840 Register src = $src$$Register;
9841 __ subu32(dst, R0, src);
9842 %}
9843 ins_pipe( ialu_regI_regI );
9844 %}
9846 instruct negL_Reg(mRegL dst, immL0 zero, mRegL src) %{
9847 match(Set dst (SubL zero src));
9848 ins_cost(80);
9850 format %{ "neg $dst, $src #@negL_Reg" %}
9851 ins_encode %{
9852 Register dst = $dst$$Register;
9853 Register src = $src$$Register;
9854 __ subu(dst, R0, src);
9855 %}
9856 ins_pipe( ialu_regI_regI );
9857 %}
9859 instruct subL_Reg_immL16_sub(mRegL dst, mRegL src1, immL16_sub src2) %{
9860 match(Set dst (SubL src1 src2));
9861 ins_cost(80);
9863 format %{ "sub $dst, $src1, $src2 #@subL_Reg_immL16_sub" %}
9864 ins_encode %{
9865 Register dst = $dst$$Register;
9866 Register src1 = $src1$$Register;
9867 __ daddiu(dst, src1, -1 * $src2$$constant);
9868 %}
9869 ins_pipe( ialu_regI_regI );
9870 %}
9872 // Subtract Long Register with Register.
9873 instruct subL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
9874 match(Set dst (SubL src1 src2));
9875 ins_cost(100);
9876 format %{ "SubL $dst, $src1, $src2 @ subL_Reg_Reg" %}
9877 ins_encode %{
9878 Register dst = as_Register($dst$$reg);
9879 Register src1 = as_Register($src1$$reg);
9880 Register src2 = as_Register($src2$$reg);
9882 __ subu(dst, src1, src2);
9883 %}
9884 ins_pipe( ialu_regL_regL );
9885 %}
9887 instruct subL_Reg_RegI2L(mRegL dst, mRegL src1, mRegI src2) %{
9888 match(Set dst (SubL src1 (ConvI2L src2)));
9889 ins_cost(100);
9890 format %{ "SubL $dst, $src1, $src2 @ subL_Reg_RegI2L" %}
9891 ins_encode %{
9892 Register dst = as_Register($dst$$reg);
9893 Register src1 = as_Register($src1$$reg);
9894 Register src2 = as_Register($src2$$reg);
9896 __ subu(dst, src1, src2);
9897 %}
9898 ins_pipe( ialu_regL_regL );
9899 %}
9901 instruct subL_RegI2L_Reg(mRegL dst, mRegI src1, mRegL src2) %{
9902 match(Set dst (SubL (ConvI2L src1) src2));
9903 ins_cost(200);
9904 format %{ "SubL $dst, $src1, $src2 @ subL_RegI2L_Reg" %}
9905 ins_encode %{
9906 Register dst = as_Register($dst$$reg);
9907 Register src1 = as_Register($src1$$reg);
9908 Register src2 = as_Register($src2$$reg);
9910 __ subu(dst, src1, src2);
9911 %}
9912 ins_pipe( ialu_regL_regL );
9913 %}
9915 instruct subL_RegI2L_RegI2L(mRegL dst, mRegI src1, mRegI src2) %{
9916 match(Set dst (SubL (ConvI2L src1) (ConvI2L src2)));
9917 ins_cost(200);
9918 format %{ "SubL $dst, $src1, $src2 @ subL_RegI2L_RegI2L" %}
9919 ins_encode %{
9920 Register dst = as_Register($dst$$reg);
9921 Register src1 = as_Register($src1$$reg);
9922 Register src2 = as_Register($src2$$reg);
9924 __ subu(dst, src1, src2);
9925 %}
9926 ins_pipe( ialu_regL_regL );
9927 %}
9929 // Integer MOD with Register
9930 instruct modI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9931 match(Set dst (ModI src1 src2));
9932 ins_cost(300);
9933 format %{ "modi $dst, $src1, $src2 @ modI_Reg_Reg" %}
9934 ins_encode %{
9935 Register dst = $dst$$Register;
9936 Register src1 = $src1$$Register;
9937 Register src2 = $src2$$Register;
9939 //if (UseLoongsonISA) {
9940 if (0) {
9941 // 2016.08.10
9942 // Experiments show that gsmod is slower that div+mfhi.
9943 // So I just disable it here.
9944 __ gsmod(dst, src1, src2);
9945 } else {
9946 __ div(src1, src2);
9947 __ mfhi(dst);
9948 }
9949 %}
9951 //ins_pipe( ialu_mod );
9952 ins_pipe( ialu_regI_regI );
9953 %}
9955 instruct modL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
9956 match(Set dst (ModL src1 src2));
9957 format %{ "modL $dst, $src1, $src2 @modL_reg_reg" %}
9959 ins_encode %{
9960 Register dst = as_Register($dst$$reg);
9961 Register op1 = as_Register($src1$$reg);
9962 Register op2 = as_Register($src2$$reg);
9964 if (UseLoongsonISA) {
9965 __ gsdmod(dst, op1, op2);
9966 } else {
9967 __ ddiv(op1, op2);
9968 __ mfhi(dst);
9969 }
9970 %}
9971 ins_pipe( pipe_slow );
9972 %}
9974 instruct mulI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9975 match(Set dst (MulI src1 src2));
9977 ins_cost(300);
9978 format %{ "mul $dst, $src1, $src2 @ mulI_Reg_Reg" %}
9979 ins_encode %{
9980 Register src1 = $src1$$Register;
9981 Register src2 = $src2$$Register;
9982 Register dst = $dst$$Register;
9984 __ mul(dst, src1, src2);
9985 %}
9986 ins_pipe( ialu_mult );
9987 %}
9989 instruct maddI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2, mRegI src3) %{
9990 match(Set dst (AddI (MulI src1 src2) src3));
9992 ins_cost(999);
9993 format %{ "madd $dst, $src1 * $src2 + $src3 #@maddI_Reg_Reg" %}
9994 ins_encode %{
9995 Register src1 = $src1$$Register;
9996 Register src2 = $src2$$Register;
9997 Register src3 = $src3$$Register;
9998 Register dst = $dst$$Register;
10000 __ mtlo(src3);
10001 __ madd(src1, src2);
10002 __ mflo(dst);
10003 %}
10004 ins_pipe( ialu_mult );
10005 %}
10007 instruct divI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10008 match(Set dst (DivI src1 src2));
10010 ins_cost(300);
10011 format %{ "div $dst, $src1, $src2 @ divI_Reg_Reg" %}
10012 ins_encode %{
10013 Register src1 = $src1$$Register;
10014 Register src2 = $src2$$Register;
10015 Register dst = $dst$$Register;
10017 /* 2012/4/21 Jin: In MIPS, div does not cause exception.
10018 We must trap an exception manually. */
10019 __ teq(R0, src2, 0x7);
10021 if (UseLoongsonISA) {
10022 __ gsdiv(dst, src1, src2);
10023 } else {
10024 __ div(src1, src2);
10026 __ nop();
10027 __ nop();
10028 __ mflo(dst);
10029 }
10030 %}
10031 ins_pipe( ialu_mod );
10032 %}
10034 instruct divF_Reg_Reg(regF dst, regF src1, regF src2) %{
10035 match(Set dst (DivF src1 src2));
10037 ins_cost(300);
10038 format %{ "divF $dst, $src1, $src2 @ divF_Reg_Reg" %}
10039 ins_encode %{
10040 FloatRegister src1 = $src1$$FloatRegister;
10041 FloatRegister src2 = $src2$$FloatRegister;
10042 FloatRegister dst = $dst$$FloatRegister;
10044 /* Here do we need to trap an exception manually ? */
10045 __ div_s(dst, src1, src2);
10046 %}
10047 ins_pipe( pipe_slow );
10048 %}
10050 instruct divD_Reg_Reg(regD dst, regD src1, regD src2) %{
10051 match(Set dst (DivD src1 src2));
10053 ins_cost(300);
10054 format %{ "divD $dst, $src1, $src2 @ divD_Reg_Reg" %}
10055 ins_encode %{
10056 FloatRegister src1 = $src1$$FloatRegister;
10057 FloatRegister src2 = $src2$$FloatRegister;
10058 FloatRegister dst = $dst$$FloatRegister;
10060 /* Here do we need to trap an exception manually ? */
10061 __ div_d(dst, src1, src2);
10062 %}
10063 ins_pipe( pipe_slow );
10064 %}
10066 instruct mulL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
10067 match(Set dst (MulL src1 src2));
10068 format %{ "mulL $dst, $src1, $src2 @mulL_reg_reg" %}
10069 ins_encode %{
10070 Register dst = as_Register($dst$$reg);
10071 Register op1 = as_Register($src1$$reg);
10072 Register op2 = as_Register($src2$$reg);
10074 if (UseLoongsonISA) {
10075 __ gsdmult(dst, op1, op2);
10076 } else {
10077 __ dmult(op1, op2);
10078 __ mflo(dst);
10079 }
10080 %}
10081 ins_pipe( pipe_slow );
10082 %}
10084 instruct mulL_reg_regI2L(mRegL dst, mRegL src1, mRegI src2) %{
10085 match(Set dst (MulL src1 (ConvI2L src2)));
10086 format %{ "mulL $dst, $src1, $src2 @mulL_reg_regI2L" %}
10087 ins_encode %{
10088 Register dst = as_Register($dst$$reg);
10089 Register op1 = as_Register($src1$$reg);
10090 Register op2 = as_Register($src2$$reg);
10092 if (UseLoongsonISA) {
10093 __ gsdmult(dst, op1, op2);
10094 } else {
10095 __ dmult(op1, op2);
10096 __ mflo(dst);
10097 }
10098 %}
10099 ins_pipe( pipe_slow );
10100 %}
10102 instruct divL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
10103 match(Set dst (DivL src1 src2));
10104 format %{ "divL $dst, $src1, $src2 @divL_reg_reg" %}
10106 ins_encode %{
10107 Register dst = as_Register($dst$$reg);
10108 Register op1 = as_Register($src1$$reg);
10109 Register op2 = as_Register($src2$$reg);
10111 if (UseLoongsonISA) {
10112 __ gsddiv(dst, op1, op2);
10113 } else {
10114 __ ddiv(op1, op2);
10115 __ mflo(dst);
10116 }
10117 %}
10118 ins_pipe( pipe_slow );
10119 %}
10121 instruct addF_reg_reg(regF dst, regF src1, regF src2) %{
10122 match(Set dst (AddF src1 src2));
10123 format %{ "AddF $dst, $src1, $src2 @addF_reg_reg" %}
10124 ins_encode %{
10125 FloatRegister src1 = as_FloatRegister($src1$$reg);
10126 FloatRegister src2 = as_FloatRegister($src2$$reg);
10127 FloatRegister dst = as_FloatRegister($dst$$reg);
10129 __ add_s(dst, src1, src2);
10130 %}
10131 ins_pipe( fpu_regF_regF );
10132 %}
10134 instruct subF_reg_reg(regF dst, regF src1, regF src2) %{
10135 match(Set dst (SubF src1 src2));
10136 format %{ "SubF $dst, $src1, $src2 @subF_reg_reg" %}
10137 ins_encode %{
10138 FloatRegister src1 = as_FloatRegister($src1$$reg);
10139 FloatRegister src2 = as_FloatRegister($src2$$reg);
10140 FloatRegister dst = as_FloatRegister($dst$$reg);
10142 __ sub_s(dst, src1, src2);
10143 %}
10144 ins_pipe( fpu_regF_regF );
10145 %}
10146 instruct addD_reg_reg(regD dst, regD src1, regD src2) %{
10147 match(Set dst (AddD src1 src2));
10148 format %{ "AddD $dst, $src1, $src2 @addD_reg_reg" %}
10149 ins_encode %{
10150 FloatRegister src1 = as_FloatRegister($src1$$reg);
10151 FloatRegister src2 = as_FloatRegister($src2$$reg);
10152 FloatRegister dst = as_FloatRegister($dst$$reg);
10154 __ add_d(dst, src1, src2);
10155 %}
10156 ins_pipe( fpu_regF_regF );
10157 %}
10159 instruct subD_reg_reg(regD dst, regD src1, regD src2) %{
10160 match(Set dst (SubD src1 src2));
10161 format %{ "SubD $dst, $src1, $src2 @subD_reg_reg" %}
10162 ins_encode %{
10163 FloatRegister src1 = as_FloatRegister($src1$$reg);
10164 FloatRegister src2 = as_FloatRegister($src2$$reg);
10165 FloatRegister dst = as_FloatRegister($dst$$reg);
10167 __ sub_d(dst, src1, src2);
10168 %}
10169 ins_pipe( fpu_regF_regF );
10170 %}
10172 instruct negF_reg(regF dst, regF src) %{
10173 match(Set dst (NegF src));
10174 format %{ "negF $dst, $src @negF_reg" %}
10175 ins_encode %{
10176 FloatRegister src = as_FloatRegister($src$$reg);
10177 FloatRegister dst = as_FloatRegister($dst$$reg);
10179 __ neg_s(dst, src);
10180 %}
10181 ins_pipe( fpu_regF_regF );
10182 %}
10184 instruct negD_reg(regD dst, regD src) %{
10185 match(Set dst (NegD src));
10186 format %{ "negD $dst, $src @negD_reg" %}
10187 ins_encode %{
10188 FloatRegister src = as_FloatRegister($src$$reg);
10189 FloatRegister dst = as_FloatRegister($dst$$reg);
10191 __ neg_d(dst, src);
10192 %}
10193 ins_pipe( fpu_regF_regF );
10194 %}
10197 instruct mulF_reg_reg(regF dst, regF src1, regF src2) %{
10198 match(Set dst (MulF src1 src2));
10199 format %{ "MULF $dst, $src1, $src2 @mulF_reg_reg" %}
10200 ins_encode %{
10201 FloatRegister src1 = $src1$$FloatRegister;
10202 FloatRegister src2 = $src2$$FloatRegister;
10203 FloatRegister dst = $dst$$FloatRegister;
10205 __ mul_s(dst, src1, src2);
10206 %}
10207 ins_pipe( fpu_regF_regF );
10208 %}
10210 instruct maddF_reg_reg(regF dst, regF src1, regF src2, regF src3) %{
10211 match(Set dst (AddF (MulF src1 src2) src3));
10212 // For compatibility reason (e.g. on the Loongson platform), disable this guy.
10213 ins_cost(44444);
10214 format %{ "maddF $dst, $src1, $src2, $src3 @maddF_reg_reg" %}
10215 ins_encode %{
10216 FloatRegister src1 = $src1$$FloatRegister;
10217 FloatRegister src2 = $src2$$FloatRegister;
10218 FloatRegister src3 = $src3$$FloatRegister;
10219 FloatRegister dst = $dst$$FloatRegister;
10221 __ madd_s(dst, src1, src2, src3);
10222 %}
10223 ins_pipe( fpu_regF_regF );
10224 %}
10226 // Mul two double precision floating piont number
10227 instruct mulD_reg_reg(regD dst, regD src1, regD src2) %{
10228 match(Set dst (MulD src1 src2));
10229 format %{ "MULD $dst, $src1, $src2 @mulD_reg_reg" %}
10230 ins_encode %{
10231 FloatRegister src1 = $src1$$FloatRegister;
10232 FloatRegister src2 = $src2$$FloatRegister;
10233 FloatRegister dst = $dst$$FloatRegister;
10235 __ mul_d(dst, src1, src2);
10236 %}
10237 ins_pipe( fpu_regF_regF );
10238 %}
10240 instruct maddD_reg_reg(regD dst, regD src1, regD src2, regD src3) %{
10241 match(Set dst (AddD (MulD src1 src2) src3));
10242 // For compatibility reason (e.g. on the Loongson platform), disable this guy.
10243 ins_cost(44444);
10244 format %{ "maddD $dst, $src1, $src2, $src3 @maddD_reg_reg" %}
10245 ins_encode %{
10246 FloatRegister src1 = $src1$$FloatRegister;
10247 FloatRegister src2 = $src2$$FloatRegister;
10248 FloatRegister src3 = $src3$$FloatRegister;
10249 FloatRegister dst = $dst$$FloatRegister;
10251 __ madd_d(dst, src1, src2, src3);
10252 %}
10253 ins_pipe( fpu_regF_regF );
10254 %}
10256 instruct absF_reg(regF dst, regF src) %{
10257 match(Set dst (AbsF src));
10258 ins_cost(100);
10259 format %{ "absF $dst, $src @absF_reg" %}
10260 ins_encode %{
10261 FloatRegister src = as_FloatRegister($src$$reg);
10262 FloatRegister dst = as_FloatRegister($dst$$reg);
10264 __ abs_s(dst, src);
10265 %}
10266 ins_pipe( fpu_regF_regF );
10267 %}
10270 // intrinsics for math_native.
10271 // AbsD SqrtD CosD SinD TanD LogD Log10D
10273 instruct absD_reg(regD dst, regD src) %{
10274 match(Set dst (AbsD src));
10275 ins_cost(100);
10276 format %{ "absD $dst, $src @absD_reg" %}
10277 ins_encode %{
10278 FloatRegister src = as_FloatRegister($src$$reg);
10279 FloatRegister dst = as_FloatRegister($dst$$reg);
10281 __ abs_d(dst, src);
10282 %}
10283 ins_pipe( fpu_regF_regF );
10284 %}
10286 instruct sqrtD_reg(regD dst, regD src) %{
10287 match(Set dst (SqrtD src));
10288 ins_cost(100);
10289 format %{ "SqrtD $dst, $src @sqrtD_reg" %}
10290 ins_encode %{
10291 FloatRegister src = as_FloatRegister($src$$reg);
10292 FloatRegister dst = as_FloatRegister($dst$$reg);
10294 __ sqrt_d(dst, src);
10295 %}
10296 ins_pipe( fpu_regF_regF );
10297 %}
10299 instruct sqrtF_reg(regF dst, regF src) %{
10300 match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
10301 ins_cost(100);
10302 format %{ "SqrtF $dst, $src @sqrtF_reg" %}
10303 ins_encode %{
10304 FloatRegister src = as_FloatRegister($src$$reg);
10305 FloatRegister dst = as_FloatRegister($dst$$reg);
10307 __ sqrt_s(dst, src);
10308 %}
10309 ins_pipe( fpu_regF_regF );
10310 %}
10311 //----------------------------------Logical Instructions----------------------
10312 //__________________________________Integer Logical Instructions-------------
10314 //And Instuctions
10315 // And Register with Immediate
10316 instruct andI_Reg_immI(mRegI dst, mRegI src1, immI src2) %{
10317 match(Set dst (AndI src1 src2));
10319 format %{ "and $dst, $src1, $src2 #@andI_Reg_immI" %}
10320 ins_encode %{
10321 Register dst = $dst$$Register;
10322 Register src = $src1$$Register;
10323 int val = $src2$$constant;
10325 __ move(AT, val);
10326 __ andr(dst, src, AT);
10327 %}
10328 ins_pipe( ialu_regI_regI );
10329 %}
10331 instruct andI_Reg_imm_0_65535(mRegI dst, mRegI src1, immI_0_65535 src2) %{
10332 match(Set dst (AndI src1 src2));
10333 ins_cost(60);
10335 format %{ "and $dst, $src1, $src2 #@andI_Reg_imm_0_65535" %}
10336 ins_encode %{
10337 Register dst = $dst$$Register;
10338 Register src = $src1$$Register;
10339 int val = $src2$$constant;
10341 __ andi(dst, src, val);
10342 %}
10343 ins_pipe( ialu_regI_regI );
10344 %}
10346 instruct andI_Reg_immI_nonneg_mask(mRegI dst, mRegI src1, immI_nonneg_mask mask) %{
10347 match(Set dst (AndI src1 mask));
10348 ins_cost(60);
10350 format %{ "and $dst, $src1, $mask #@andI_Reg_immI_nonneg_mask" %}
10351 ins_encode %{
10352 Register dst = $dst$$Register;
10353 Register src = $src1$$Register;
10354 int size = Assembler::is_int_mask($mask$$constant);
10356 __ ext(dst, src, 0, size);
10357 %}
10358 ins_pipe( ialu_regI_regI );
10359 %}
10361 instruct andL_Reg_immL_nonneg_mask(mRegL dst, mRegL src1, immL_nonneg_mask mask) %{
10362 match(Set dst (AndL src1 mask));
10363 ins_cost(60);
10365 format %{ "and $dst, $src1, $mask #@andL_Reg_immL_nonneg_mask" %}
10366 ins_encode %{
10367 Register dst = $dst$$Register;
10368 Register src = $src1$$Register;
10369 int size = Assembler::is_jlong_mask($mask$$constant);
10371 __ dext(dst, src, 0, size);
10372 %}
10373 ins_pipe( ialu_regI_regI );
10374 %}
10376 instruct xorI_Reg_imm_0_65535(mRegI dst, mRegI src1, immI_0_65535 src2) %{
10377 match(Set dst (XorI src1 src2));
10378 ins_cost(60);
10380 format %{ "xori $dst, $src1, $src2 #@xorI_Reg_imm_0_65535" %}
10381 ins_encode %{
10382 Register dst = $dst$$Register;
10383 Register src = $src1$$Register;
10384 int val = $src2$$constant;
10386 __ xori(dst, src, val);
10387 %}
10388 ins_pipe( ialu_regI_regI );
10389 %}
10391 instruct xorI_Reg_immI_M1(mRegI dst, mRegI src1, immI_M1 M1) %{
10392 match(Set dst (XorI src1 M1));
10393 predicate(UseLoongsonISA);
10394 ins_cost(60);
10396 format %{ "xor $dst, $src1, $M1 #@xorI_Reg_immI_M1" %}
10397 ins_encode %{
10398 Register dst = $dst$$Register;
10399 Register src = $src1$$Register;
10401 __ gsorn(dst, R0, src);
10402 %}
10403 ins_pipe( ialu_regI_regI );
10404 %}
10406 instruct xorL2I_Reg_immI_M1(mRegI dst, mRegL src1, immI_M1 M1) %{
10407 match(Set dst (XorI (ConvL2I src1) M1));
10408 predicate(UseLoongsonISA);
10409 ins_cost(60);
10411 format %{ "xor $dst, $src1, $M1 #@xorL2I_Reg_immI_M1" %}
10412 ins_encode %{
10413 Register dst = $dst$$Register;
10414 Register src = $src1$$Register;
10416 __ gsorn(dst, R0, src);
10417 %}
10418 ins_pipe( ialu_regI_regI );
10419 %}
10421 instruct xorL_Reg_imm_0_65535(mRegL dst, mRegL src1, immL_0_65535 src2) %{
10422 match(Set dst (XorL src1 src2));
10423 ins_cost(60);
10425 format %{ "xori $dst, $src1, $src2 #@xorL_Reg_imm_0_65535" %}
10426 ins_encode %{
10427 Register dst = $dst$$Register;
10428 Register src = $src1$$Register;
10429 int val = $src2$$constant;
10431 __ xori(dst, src, val);
10432 %}
10433 ins_pipe( ialu_regI_regI );
10434 %}
10436 /*
10437 instruct xorL_Reg_immL_M1(mRegL dst, mRegL src1, immL_M1 M1) %{
10438 match(Set dst (XorL src1 M1));
10439 predicate(UseLoongsonISA);
10440 ins_cost(60);
10442 format %{ "xor $dst, $src1, $M1 #@xorL_Reg_immL_M1" %}
10443 ins_encode %{
10444 Register dst = $dst$$Register;
10445 Register src = $src1$$Register;
10447 __ gsorn(dst, R0, src);
10448 %}
10449 ins_pipe( ialu_regI_regI );
10450 %}
10451 */
10453 instruct lbu_and_lmask(mRegI dst, memory mem, immI_255 mask) %{
10454 match(Set dst (AndI mask (LoadB mem)));
10455 ins_cost(60);
10457 format %{ "lhu $dst, $mem #@lbu_and_lmask" %}
10458 ins_encode(load_UB_enc(dst, mem));
10459 ins_pipe( ialu_loadI );
10460 %}
10462 instruct lbu_and_rmask(mRegI dst, memory mem, immI_255 mask) %{
10463 match(Set dst (AndI (LoadB mem) mask));
10464 ins_cost(60);
10466 format %{ "lhu $dst, $mem #@lbu_and_rmask" %}
10467 ins_encode(load_UB_enc(dst, mem));
10468 ins_pipe( ialu_loadI );
10469 %}
10471 instruct andI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10472 match(Set dst (AndI src1 src2));
10474 format %{ "and $dst, $src1, $src2 #@andI_Reg_Reg" %}
10475 ins_encode %{
10476 Register dst = $dst$$Register;
10477 Register src1 = $src1$$Register;
10478 Register src2 = $src2$$Register;
10479 __ andr(dst, src1, src2);
10480 %}
10481 ins_pipe( ialu_regI_regI );
10482 %}
10484 instruct andnI_Reg_nReg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10485 match(Set dst (AndI src1 (XorI src2 M1)));
10486 predicate(UseLoongsonISA);
10488 format %{ "andn $dst, $src1, $src2 #@andnI_Reg_nReg" %}
10489 ins_encode %{
10490 Register dst = $dst$$Register;
10491 Register src1 = $src1$$Register;
10492 Register src2 = $src2$$Register;
10494 __ gsandn(dst, src1, src2);
10495 %}
10496 ins_pipe( ialu_regI_regI );
10497 %}
10499 instruct ornI_Reg_nReg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10500 match(Set dst (OrI src1 (XorI src2 M1)));
10501 predicate(UseLoongsonISA);
10503 format %{ "orn $dst, $src1, $src2 #@ornI_Reg_nReg" %}
10504 ins_encode %{
10505 Register dst = $dst$$Register;
10506 Register src1 = $src1$$Register;
10507 Register src2 = $src2$$Register;
10509 __ gsorn(dst, src1, src2);
10510 %}
10511 ins_pipe( ialu_regI_regI );
10512 %}
10514 instruct andnI_nReg_Reg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10515 match(Set dst (AndI (XorI src1 M1) src2));
10516 predicate(UseLoongsonISA);
10518 format %{ "andn $dst, $src2, $src1 #@andnI_nReg_Reg" %}
10519 ins_encode %{
10520 Register dst = $dst$$Register;
10521 Register src1 = $src1$$Register;
10522 Register src2 = $src2$$Register;
10524 __ gsandn(dst, src2, src1);
10525 %}
10526 ins_pipe( ialu_regI_regI );
10527 %}
10529 instruct ornI_nReg_Reg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10530 match(Set dst (OrI (XorI src1 M1) src2));
10531 predicate(UseLoongsonISA);
10533 format %{ "orn $dst, $src2, $src1 #@ornI_nReg_Reg" %}
10534 ins_encode %{
10535 Register dst = $dst$$Register;
10536 Register src1 = $src1$$Register;
10537 Register src2 = $src2$$Register;
10539 __ gsorn(dst, src2, src1);
10540 %}
10541 ins_pipe( ialu_regI_regI );
10542 %}
10544 // And Long Register with Register
10545 instruct andL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10546 match(Set dst (AndL src1 src2));
10547 format %{ "AND $dst, $src1, $src2 @ andL_Reg_Reg\n\t" %}
10548 ins_encode %{
10549 Register dst_reg = as_Register($dst$$reg);
10550 Register src1_reg = as_Register($src1$$reg);
10551 Register src2_reg = as_Register($src2$$reg);
10553 __ andr(dst_reg, src1_reg, src2_reg);
10554 %}
10555 ins_pipe( ialu_regL_regL );
10556 %}
10558 instruct andL_Reg_Reg_convI2L(mRegL dst, mRegL src1, mRegI src2) %{
10559 match(Set dst (AndL src1 (ConvI2L src2)));
10560 format %{ "AND $dst, $src1, $src2 @ andL_Reg_Reg_convI2L\n\t" %}
10561 ins_encode %{
10562 Register dst_reg = as_Register($dst$$reg);
10563 Register src1_reg = as_Register($src1$$reg);
10564 Register src2_reg = as_Register($src2$$reg);
10566 __ andr(dst_reg, src1_reg, src2_reg);
10567 %}
10568 ins_pipe( ialu_regL_regL );
10569 %}
10571 instruct andL_Reg_imm_0_65535(mRegL dst, mRegL src1, immL_0_65535 src2) %{
10572 match(Set dst (AndL src1 src2));
10573 ins_cost(60);
10575 format %{ "and $dst, $src1, $src2 #@andL_Reg_imm_0_65535" %}
10576 ins_encode %{
10577 Register dst = $dst$$Register;
10578 Register src = $src1$$Register;
10579 long val = $src2$$constant;
10581 __ andi(dst, src, val);
10582 %}
10583 ins_pipe( ialu_regI_regI );
10584 %}
10586 instruct andL2I_Reg_imm_0_65535(mRegI dst, mRegL src1, immL_0_65535 src2) %{
10587 match(Set dst (ConvL2I (AndL src1 src2)));
10588 ins_cost(60);
10590 format %{ "and $dst, $src1, $src2 #@andL2I_Reg_imm_0_65535" %}
10591 ins_encode %{
10592 Register dst = $dst$$Register;
10593 Register src = $src1$$Register;
10594 long val = $src2$$constant;
10596 __ andi(dst, src, val);
10597 %}
10598 ins_pipe( ialu_regI_regI );
10599 %}
10601 /*
10602 instruct andnL_Reg_nReg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10603 match(Set dst (AndL src1 (XorL src2 M1)));
10604 predicate(UseLoongsonISA);
10606 format %{ "andn $dst, $src1, $src2 #@andnL_Reg_nReg" %}
10607 ins_encode %{
10608 Register dst = $dst$$Register;
10609 Register src1 = $src1$$Register;
10610 Register src2 = $src2$$Register;
10612 __ gsandn(dst, src1, src2);
10613 %}
10614 ins_pipe( ialu_regI_regI );
10615 %}
10616 */
10618 /*
10619 instruct ornL_Reg_nReg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10620 match(Set dst (OrL src1 (XorL src2 M1)));
10621 predicate(UseLoongsonISA);
10623 format %{ "orn $dst, $src1, $src2 #@ornL_Reg_nReg" %}
10624 ins_encode %{
10625 Register dst = $dst$$Register;
10626 Register src1 = $src1$$Register;
10627 Register src2 = $src2$$Register;
10629 __ gsorn(dst, src1, src2);
10630 %}
10631 ins_pipe( ialu_regI_regI );
10632 %}
10633 */
10635 /*
10636 instruct andnL_nReg_Reg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10637 match(Set dst (AndL (XorL src1 M1) src2));
10638 predicate(UseLoongsonISA);
10640 format %{ "andn $dst, $src2, $src1 #@andnL_nReg_Reg" %}
10641 ins_encode %{
10642 Register dst = $dst$$Register;
10643 Register src1 = $src1$$Register;
10644 Register src2 = $src2$$Register;
10646 __ gsandn(dst, src2, src1);
10647 %}
10648 ins_pipe( ialu_regI_regI );
10649 %}
10650 */
10652 /*
10653 instruct ornL_nReg_Reg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10654 match(Set dst (OrL (XorL src1 M1) src2));
10655 predicate(UseLoongsonISA);
10657 format %{ "orn $dst, $src2, $src1 #@ornL_nReg_Reg" %}
10658 ins_encode %{
10659 Register dst = $dst$$Register;
10660 Register src1 = $src1$$Register;
10661 Register src2 = $src2$$Register;
10663 __ gsorn(dst, src2, src1);
10664 %}
10665 ins_pipe( ialu_regI_regI );
10666 %}
10667 */
10669 instruct andL_Reg_immL_M8(mRegL dst, immL_M8 M8) %{
10670 match(Set dst (AndL dst M8));
10671 ins_cost(60);
10673 format %{ "and $dst, $dst, $M8 #@andL_Reg_immL_M8" %}
10674 ins_encode %{
10675 Register dst = $dst$$Register;
10677 __ dins(dst, R0, 0, 3);
10678 %}
10679 ins_pipe( ialu_regI_regI );
10680 %}
10682 instruct andL_Reg_immL_M5(mRegL dst, immL_M5 M5) %{
10683 match(Set dst (AndL dst M5));
10684 ins_cost(60);
10686 format %{ "and $dst, $dst, $M5 #@andL_Reg_immL_M5" %}
10687 ins_encode %{
10688 Register dst = $dst$$Register;
10690 __ dins(dst, R0, 2, 1);
10691 %}
10692 ins_pipe( ialu_regI_regI );
10693 %}
10695 instruct andL_Reg_immL_M7(mRegL dst, immL_M7 M7) %{
10696 match(Set dst (AndL dst M7));
10697 ins_cost(60);
10699 format %{ "and $dst, $dst, $M7 #@andL_Reg_immL_M7" %}
10700 ins_encode %{
10701 Register dst = $dst$$Register;
10703 __ dins(dst, R0, 1, 2);
10704 %}
10705 ins_pipe( ialu_regI_regI );
10706 %}
10708 instruct andL_Reg_immL_M4(mRegL dst, immL_M4 M4) %{
10709 match(Set dst (AndL dst M4));
10710 ins_cost(60);
10712 format %{ "and $dst, $dst, $M4 #@andL_Reg_immL_M4" %}
10713 ins_encode %{
10714 Register dst = $dst$$Register;
10716 __ dins(dst, R0, 0, 2);
10717 %}
10718 ins_pipe( ialu_regI_regI );
10719 %}
10721 instruct andL_Reg_immL_M121(mRegL dst, immL_M121 M121) %{
10722 match(Set dst (AndL dst M121));
10723 ins_cost(60);
10725 format %{ "and $dst, $dst, $M121 #@andL_Reg_immL_M121" %}
10726 ins_encode %{
10727 Register dst = $dst$$Register;
10729 __ dins(dst, R0, 3, 4);
10730 %}
10731 ins_pipe( ialu_regI_regI );
10732 %}
10734 // Or Long Register with Register
10735 instruct orL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10736 match(Set dst (OrL src1 src2));
10737 format %{ "OR $dst, $src1, $src2 @ orL_Reg_Reg\t" %}
10738 ins_encode %{
10739 Register dst_reg = $dst$$Register;
10740 Register src1_reg = $src1$$Register;
10741 Register src2_reg = $src2$$Register;
10743 __ orr(dst_reg, src1_reg, src2_reg);
10744 %}
10745 ins_pipe( ialu_regL_regL );
10746 %}
10748 instruct orL_Reg_P2XReg(mRegL dst, mRegP src1, mRegL src2) %{
10749 match(Set dst (OrL (CastP2X src1) src2));
10750 format %{ "OR $dst, $src1, $src2 @ orL_Reg_P2XReg\t" %}
10751 ins_encode %{
10752 Register dst_reg = $dst$$Register;
10753 Register src1_reg = $src1$$Register;
10754 Register src2_reg = $src2$$Register;
10756 __ orr(dst_reg, src1_reg, src2_reg);
10757 %}
10758 ins_pipe( ialu_regL_regL );
10759 %}
10761 // Xor Long Register with Register
10762 instruct xorL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10763 match(Set dst (XorL src1 src2));
10764 format %{ "XOR $dst, $src1, $src2 @ xorL_Reg_Reg\t" %}
10765 ins_encode %{
10766 Register dst_reg = as_Register($dst$$reg);
10767 Register src1_reg = as_Register($src1$$reg);
10768 Register src2_reg = as_Register($src2$$reg);
10770 __ xorr(dst_reg, src1_reg, src2_reg);
10771 %}
10772 ins_pipe( ialu_regL_regL );
10773 %}
10775 // Shift Left by 8-bit immediate
10776 instruct salI_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
10777 match(Set dst (LShiftI src shift));
10779 format %{ "SHL $dst, $src, $shift #@salI_Reg_imm" %}
10780 ins_encode %{
10781 Register src = $src$$Register;
10782 Register dst = $dst$$Register;
10783 int shamt = $shift$$constant;
10785 __ sll(dst, src, shamt);
10786 %}
10787 ins_pipe( ialu_regI_regI );
10788 %}
10790 instruct salL2I_Reg_imm(mRegI dst, mRegL src, immI8 shift) %{
10791 match(Set dst (LShiftI (ConvL2I src) shift));
10793 format %{ "SHL $dst, $src, $shift #@salL2I_Reg_imm" %}
10794 ins_encode %{
10795 Register src = $src$$Register;
10796 Register dst = $dst$$Register;
10797 int shamt = $shift$$constant;
10799 __ sll(dst, src, shamt);
10800 %}
10801 ins_pipe( ialu_regI_regI );
10802 %}
10804 instruct salI_Reg_imm_and_M65536(mRegI dst, mRegI src, immI_16 shift, immI_M65536 mask) %{
10805 match(Set dst (AndI (LShiftI src shift) mask));
10807 format %{ "SHL $dst, $src, $shift #@salI_Reg_imm_and_M65536" %}
10808 ins_encode %{
10809 Register src = $src$$Register;
10810 Register dst = $dst$$Register;
10812 __ sll(dst, src, 16);
10813 %}
10814 ins_pipe( ialu_regI_regI );
10815 %}
10817 instruct land7_2_s(mRegI dst, mRegL src, immL7 seven, immI_16 sixteen)
10818 %{
10819 match(Set dst (RShiftI (LShiftI (ConvL2I (AndL src seven)) sixteen) sixteen));
10821 format %{ "andi $dst, $src, 7\t# @land7_2_s" %}
10822 ins_encode %{
10823 Register src = $src$$Register;
10824 Register dst = $dst$$Register;
10826 __ andi(dst, src, 7);
10827 %}
10828 ins_pipe(ialu_regI_regI);
10829 %}
10831 instruct ori2s(mRegI dst, mRegI src1, immI_0_32767 src2, immI_16 sixteen)
10832 %{
10833 match(Set dst (RShiftI (LShiftI (OrI src1 src2) sixteen) sixteen));
10835 format %{ "ori $dst, $src1, $src2\t# @ori2s" %}
10836 ins_encode %{
10837 Register src = $src1$$Register;
10838 int val = $src2$$constant;
10839 Register dst = $dst$$Register;
10841 __ ori(dst, src, val);
10842 %}
10843 ins_pipe(ialu_regI_regI);
10844 %}
10846 // Logical Shift Right by 16, followed by Arithmetic Shift Left by 16.
10847 // This idiom is used by the compiler the i2s bytecode.
10848 instruct i2s(mRegI dst, mRegI src, immI_16 sixteen)
10849 %{
10850 match(Set dst (RShiftI (LShiftI src sixteen) sixteen));
10852 format %{ "i2s $dst, $src\t# @i2s" %}
10853 ins_encode %{
10854 Register src = $src$$Register;
10855 Register dst = $dst$$Register;
10857 __ seh(dst, src);
10858 %}
10859 ins_pipe(ialu_regI_regI);
10860 %}
10862 // Logical Shift Right by 24, followed by Arithmetic Shift Left by 24.
10863 // This idiom is used by the compiler for the i2b bytecode.
10864 instruct i2b(mRegI dst, mRegI src, immI_24 twentyfour)
10865 %{
10866 match(Set dst (RShiftI (LShiftI src twentyfour) twentyfour));
10868 format %{ "i2b $dst, $src\t# @i2b" %}
10869 ins_encode %{
10870 Register src = $src$$Register;
10871 Register dst = $dst$$Register;
10873 __ seb(dst, src);
10874 %}
10875 ins_pipe(ialu_regI_regI);
10876 %}
10879 instruct salI_RegL2I_imm(mRegI dst, mRegL src, immI8 shift) %{
10880 match(Set dst (LShiftI (ConvL2I src) shift));
10882 format %{ "SHL $dst, $src, $shift #@salI_RegL2I_imm" %}
10883 ins_encode %{
10884 Register src = $src$$Register;
10885 Register dst = $dst$$Register;
10886 int shamt = $shift$$constant;
10888 __ sll(dst, src, shamt);
10889 %}
10890 ins_pipe( ialu_regI_regI );
10891 %}
10893 // Shift Left by 8-bit immediate
10894 instruct salI_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
10895 match(Set dst (LShiftI src shift));
10897 format %{ "SHL $dst, $src, $shift #@salI_Reg_Reg" %}
10898 ins_encode %{
10899 Register src = $src$$Register;
10900 Register dst = $dst$$Register;
10901 Register shamt = $shift$$Register;
10902 __ sllv(dst, src, shamt);
10903 %}
10904 ins_pipe( ialu_regI_regI );
10905 %}
10908 // Shift Left Long
10909 instruct salL_Reg_imm(mRegL dst, mRegL src, immI8 shift) %{
10910 //predicate(UseNewLongLShift);
10911 match(Set dst (LShiftL src shift));
10912 ins_cost(100);
10913 format %{ "salL $dst, $src, $shift @ salL_Reg_imm" %}
10914 ins_encode %{
10915 Register src_reg = as_Register($src$$reg);
10916 Register dst_reg = as_Register($dst$$reg);
10917 int shamt = $shift$$constant;
10919 if (__ is_simm(shamt, 5))
10920 __ dsll(dst_reg, src_reg, shamt);
10921 else
10922 {
10923 int sa = Assembler::low(shamt, 6);
10924 if (sa < 32) {
10925 __ dsll(dst_reg, src_reg, sa);
10926 } else {
10927 __ dsll32(dst_reg, src_reg, sa - 32);
10928 }
10929 }
10930 %}
10931 ins_pipe( ialu_regL_regL );
10932 %}
10934 instruct salL_RegI2L_imm(mRegL dst, mRegI src, immI8 shift) %{
10935 //predicate(UseNewLongLShift);
10936 match(Set dst (LShiftL (ConvI2L src) shift));
10937 ins_cost(100);
10938 format %{ "salL $dst, $src, $shift @ salL_RegI2L_imm" %}
10939 ins_encode %{
10940 Register src_reg = as_Register($src$$reg);
10941 Register dst_reg = as_Register($dst$$reg);
10942 int shamt = $shift$$constant;
10944 if (__ is_simm(shamt, 5))
10945 __ dsll(dst_reg, src_reg, shamt);
10946 else
10947 {
10948 int sa = Assembler::low(shamt, 6);
10949 if (sa < 32) {
10950 __ dsll(dst_reg, src_reg, sa);
10951 } else {
10952 __ dsll32(dst_reg, src_reg, sa - 32);
10953 }
10954 }
10955 %}
10956 ins_pipe( ialu_regL_regL );
10957 %}
10959 // Shift Left Long
10960 instruct salL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
10961 //predicate(UseNewLongLShift);
10962 match(Set dst (LShiftL src shift));
10963 ins_cost(100);
10964 format %{ "salL $dst, $src, $shift @ salL_Reg_Reg" %}
10965 ins_encode %{
10966 Register src_reg = as_Register($src$$reg);
10967 Register dst_reg = as_Register($dst$$reg);
10969 __ dsllv(dst_reg, src_reg, $shift$$Register);
10970 %}
10971 ins_pipe( ialu_regL_regL );
10972 %}
10974 instruct salL_convI2L_Reg_imm(mRegL dst, mRegI src, immI8 shift) %{
10975 match(Set dst (LShiftL (ConvI2L src) shift));
10976 ins_cost(100);
10977 format %{ "salL $dst, $src, $shift @ salL_convI2L_Reg_imm" %}
10978 ins_encode %{
10979 Register src_reg = as_Register($src$$reg);
10980 Register dst_reg = as_Register($dst$$reg);
10981 int shamt = $shift$$constant;
10983 if (__ is_simm(shamt, 5)) {
10984 __ dsll(dst_reg, src_reg, shamt);
10985 } else {
10986 int sa = Assembler::low(shamt, 6);
10987 if (sa < 32) {
10988 __ dsll(dst_reg, src_reg, sa);
10989 } else {
10990 __ dsll32(dst_reg, src_reg, sa - 32);
10991 }
10992 }
10993 %}
10994 ins_pipe( ialu_regL_regL );
10995 %}
10997 // Shift Right Long
10998 instruct sarL_Reg_imm(mRegL dst, mRegL src, immI8 shift) %{
10999 match(Set dst (RShiftL src shift));
11000 ins_cost(100);
11001 format %{ "sarL $dst, $src, $shift @ sarL_Reg_imm" %}
11002 ins_encode %{
11003 Register src_reg = as_Register($src$$reg);
11004 Register dst_reg = as_Register($dst$$reg);
11005 int shamt = ($shift$$constant & 0x3f);
11006 if (__ is_simm(shamt, 5))
11007 __ dsra(dst_reg, src_reg, shamt);
11008 else {
11009 int sa = Assembler::low(shamt, 6);
11010 if (sa < 32) {
11011 __ dsra(dst_reg, src_reg, sa);
11012 } else {
11013 __ dsra32(dst_reg, src_reg, sa - 32);
11014 }
11015 }
11016 %}
11017 ins_pipe( ialu_regL_regL );
11018 %}
11020 instruct sarL2I_Reg_immI_32_63(mRegI dst, mRegL src, immI_32_63 shift) %{
11021 match(Set dst (ConvL2I (RShiftL src shift)));
11022 ins_cost(100);
11023 format %{ "sarL $dst, $src, $shift @ sarL2I_Reg_immI_32_63" %}
11024 ins_encode %{
11025 Register src_reg = as_Register($src$$reg);
11026 Register dst_reg = as_Register($dst$$reg);
11027 int shamt = $shift$$constant;
11029 __ dsra32(dst_reg, src_reg, shamt - 32);
11030 %}
11031 ins_pipe( ialu_regL_regL );
11032 %}
11034 // Shift Right Long arithmetically
11035 instruct sarL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
11036 //predicate(UseNewLongLShift);
11037 match(Set dst (RShiftL src shift));
11038 ins_cost(100);
11039 format %{ "sarL $dst, $src, $shift @ sarL_Reg_Reg" %}
11040 ins_encode %{
11041 Register src_reg = as_Register($src$$reg);
11042 Register dst_reg = as_Register($dst$$reg);
11044 __ dsrav(dst_reg, src_reg, $shift$$Register);
11045 %}
11046 ins_pipe( ialu_regL_regL );
11047 %}
11049 // Shift Right Long logically
11050 instruct slrL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
11051 match(Set dst (URShiftL src shift));
11052 ins_cost(100);
11053 format %{ "slrL $dst, $src, $shift @ slrL_Reg_Reg" %}
11054 ins_encode %{
11055 Register src_reg = as_Register($src$$reg);
11056 Register dst_reg = as_Register($dst$$reg);
11058 __ dsrlv(dst_reg, src_reg, $shift$$Register);
11059 %}
11060 ins_pipe( ialu_regL_regL );
11061 %}
11063 instruct slrL_Reg_immI_0_31(mRegL dst, mRegL src, immI_0_31 shift) %{
11064 match(Set dst (URShiftL src shift));
11065 ins_cost(80);
11066 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_0_31" %}
11067 ins_encode %{
11068 Register src_reg = as_Register($src$$reg);
11069 Register dst_reg = as_Register($dst$$reg);
11070 int shamt = $shift$$constant;
11072 __ dsrl(dst_reg, src_reg, shamt);
11073 %}
11074 ins_pipe( ialu_regL_regL );
11075 %}
11077 instruct slrL_Reg_immI_0_31_and_max_int(mRegI dst, mRegL src, immI_0_31 shift, immI_MaxI max_int) %{
11078 match(Set dst (AndI (ConvL2I (URShiftL src shift)) max_int));
11079 ins_cost(80);
11080 format %{ "dext $dst, $src, $shift, 31 @ slrL_Reg_immI_0_31_and_max_int" %}
11081 ins_encode %{
11082 Register src_reg = as_Register($src$$reg);
11083 Register dst_reg = as_Register($dst$$reg);
11084 int shamt = $shift$$constant;
11086 __ dext(dst_reg, src_reg, shamt, 31);
11087 %}
11088 ins_pipe( ialu_regL_regL );
11089 %}
11091 instruct slrL_P2XReg_immI_0_31(mRegL dst, mRegP src, immI_0_31 shift) %{
11092 match(Set dst (URShiftL (CastP2X src) shift));
11093 ins_cost(80);
11094 format %{ "slrL $dst, $src, $shift @ slrL_P2XReg_immI_0_31" %}
11095 ins_encode %{
11096 Register src_reg = as_Register($src$$reg);
11097 Register dst_reg = as_Register($dst$$reg);
11098 int shamt = $shift$$constant;
11100 __ dsrl(dst_reg, src_reg, shamt);
11101 %}
11102 ins_pipe( ialu_regL_regL );
11103 %}
11105 instruct slrL_Reg_immI_32_63(mRegL dst, mRegL src, immI_32_63 shift) %{
11106 match(Set dst (URShiftL src shift));
11107 ins_cost(80);
11108 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_32_63" %}
11109 ins_encode %{
11110 Register src_reg = as_Register($src$$reg);
11111 Register dst_reg = as_Register($dst$$reg);
11112 int shamt = $shift$$constant;
11114 __ dsrl32(dst_reg, src_reg, shamt - 32);
11115 %}
11116 ins_pipe( ialu_regL_regL );
11117 %}
11119 instruct slrL_Reg_immI_convL2I(mRegI dst, mRegL src, immI_32_63 shift) %{
11120 match(Set dst (ConvL2I (URShiftL src shift)));
11121 predicate(n->in(1)->in(2)->get_int() > 32);
11122 ins_cost(80);
11123 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_convL2I" %}
11124 ins_encode %{
11125 Register src_reg = as_Register($src$$reg);
11126 Register dst_reg = as_Register($dst$$reg);
11127 int shamt = $shift$$constant;
11129 __ dsrl32(dst_reg, src_reg, shamt - 32);
11130 %}
11131 ins_pipe( ialu_regL_regL );
11132 %}
11134 instruct slrL_P2XReg_immI_32_63(mRegL dst, mRegP src, immI_32_63 shift) %{
11135 match(Set dst (URShiftL (CastP2X src) shift));
11136 ins_cost(80);
11137 format %{ "slrL $dst, $src, $shift @ slrL_P2XReg_immI_32_63" %}
11138 ins_encode %{
11139 Register src_reg = as_Register($src$$reg);
11140 Register dst_reg = as_Register($dst$$reg);
11141 int shamt = $shift$$constant;
11143 __ dsrl32(dst_reg, src_reg, shamt - 32);
11144 %}
11145 ins_pipe( ialu_regL_regL );
11146 %}
11148 // Xor Instructions
11149 // Xor Register with Register
11150 instruct xorI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
11151 match(Set dst (XorI src1 src2));
11153 format %{ "XOR $dst, $src1, $src2 #@xorI_Reg_Reg" %}
11155 ins_encode %{
11156 Register dst = $dst$$Register;
11157 Register src1 = $src1$$Register;
11158 Register src2 = $src2$$Register;
11159 __ xorr(dst, src1, src2);
11160 __ sll(dst, dst, 0); /* long -> int */
11161 %}
11163 ins_pipe( ialu_regI_regI );
11164 %}
11166 // Or Instructions
11167 // Or Register with Register
11168 instruct orI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
11169 match(Set dst (OrI src1 src2));
11171 format %{ "OR $dst, $src1, $src2 #@orI_Reg_Reg" %}
11172 ins_encode %{
11173 Register dst = $dst$$Register;
11174 Register src1 = $src1$$Register;
11175 Register src2 = $src2$$Register;
11176 __ orr(dst, src1, src2);
11177 %}
11179 ins_pipe( ialu_regI_regI );
11180 %}
11182 instruct rotI_shr_logical_Reg(mRegI dst, mRegI src, immI_0_31 rshift, immI_0_31 lshift, immI_1 one) %{
11183 match(Set dst (OrI (URShiftI src rshift) (LShiftI (AndI src one) lshift)));
11184 predicate(32 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int())));
11186 format %{ "rotr $dst, $src, 1 ...\n\t"
11187 "srl $dst, $dst, ($rshift-1) @ rotI_shr_logical_Reg" %}
11188 ins_encode %{
11189 Register dst = $dst$$Register;
11190 Register src = $src$$Register;
11191 int rshift = $rshift$$constant;
11193 __ rotr(dst, src, 1);
11194 if (rshift - 1) {
11195 __ srl(dst, dst, rshift - 1);
11196 }
11197 %}
11199 ins_pipe( ialu_regI_regI );
11200 %}
11202 instruct orI_Reg_castP2X(mRegL dst, mRegL src1, mRegP src2) %{
11203 match(Set dst (OrI src1 (CastP2X src2)));
11205 format %{ "OR $dst, $src1, $src2 #@orI_Reg_castP2X" %}
11206 ins_encode %{
11207 Register dst = $dst$$Register;
11208 Register src1 = $src1$$Register;
11209 Register src2 = $src2$$Register;
11210 __ orr(dst, src1, src2);
11211 %}
11213 ins_pipe( ialu_regI_regI );
11214 %}
11216 // Logical Shift Right by 8-bit immediate
11217 instruct shr_logical_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
11218 match(Set dst (URShiftI src shift));
11219 // effect(KILL cr);
11221 format %{ "SRL $dst, $src, $shift #@shr_logical_Reg_imm" %}
11222 ins_encode %{
11223 Register src = $src$$Register;
11224 Register dst = $dst$$Register;
11225 int shift = $shift$$constant;
11227 __ srl(dst, src, shift);
11228 %}
11229 ins_pipe( ialu_regI_regI );
11230 %}
11232 instruct shr_logical_Reg_imm_nonneg_mask(mRegI dst, mRegI src, immI_0_31 shift, immI_nonneg_mask mask) %{
11233 match(Set dst (AndI (URShiftI src shift) mask));
11235 format %{ "ext $dst, $src, $shift, one-bits($mask) #@shr_logical_Reg_imm_nonneg_mask" %}
11236 ins_encode %{
11237 Register src = $src$$Register;
11238 Register dst = $dst$$Register;
11239 int pos = $shift$$constant;
11240 int size = Assembler::is_int_mask($mask$$constant);
11242 __ ext(dst, src, pos, size);
11243 %}
11244 ins_pipe( ialu_regI_regI );
11245 %}
11247 instruct rolI_Reg_immI_0_31(mRegI dst, immI_0_31 lshift, immI_0_31 rshift)
11248 %{
11249 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
11250 match(Set dst (OrI (LShiftI dst lshift) (URShiftI dst rshift)));
11252 ins_cost(100);
11253 format %{ "rotr $dst, $dst, $rshift #@rolI_Reg_immI_0_31" %}
11254 ins_encode %{
11255 Register dst = $dst$$Register;
11256 int sa = $rshift$$constant;
11258 __ rotr(dst, dst, sa);
11259 %}
11260 ins_pipe( ialu_regI_regI );
11261 %}
11263 instruct rolL_Reg_immI_0_31(mRegL dst, immI_32_63 lshift, immI_0_31 rshift)
11264 %{
11265 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11266 match(Set dst (OrL (LShiftL dst lshift) (URShiftL dst rshift)));
11268 ins_cost(100);
11269 format %{ "rotr $dst, $dst, $rshift #@rolL_Reg_immI_0_31" %}
11270 ins_encode %{
11271 Register dst = $dst$$Register;
11272 int sa = $rshift$$constant;
11274 __ drotr(dst, dst, sa);
11275 %}
11276 ins_pipe( ialu_regI_regI );
11277 %}
11279 instruct rolL_Reg_immI_32_63(mRegL dst, immI_0_31 lshift, immI_32_63 rshift)
11280 %{
11281 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11282 match(Set dst (OrL (LShiftL dst lshift) (URShiftL dst rshift)));
11284 ins_cost(100);
11285 format %{ "rotr $dst, $dst, $rshift #@rolL_Reg_immI_32_63" %}
11286 ins_encode %{
11287 Register dst = $dst$$Register;
11288 int sa = $rshift$$constant;
11290 __ drotr32(dst, dst, sa - 32);
11291 %}
11292 ins_pipe( ialu_regI_regI );
11293 %}
11295 instruct rorI_Reg_immI_0_31(mRegI dst, immI_0_31 rshift, immI_0_31 lshift)
11296 %{
11297 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
11298 match(Set dst (OrI (URShiftI dst rshift) (LShiftI dst lshift)));
11300 ins_cost(100);
11301 format %{ "rotr $dst, $dst, $rshift #@rorI_Reg_immI_0_31" %}
11302 ins_encode %{
11303 Register dst = $dst$$Register;
11304 int sa = $rshift$$constant;
11306 __ rotr(dst, dst, sa);
11307 %}
11308 ins_pipe( ialu_regI_regI );
11309 %}
11311 instruct rorL_Reg_immI_0_31(mRegL dst, immI_0_31 rshift, immI_32_63 lshift)
11312 %{
11313 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11314 match(Set dst (OrL (URShiftL dst rshift) (LShiftL dst lshift)));
11316 ins_cost(100);
11317 format %{ "rotr $dst, $dst, $rshift #@rorL_Reg_immI_0_31" %}
11318 ins_encode %{
11319 Register dst = $dst$$Register;
11320 int sa = $rshift$$constant;
11322 __ drotr(dst, dst, sa);
11323 %}
11324 ins_pipe( ialu_regI_regI );
11325 %}
11327 instruct rorL_Reg_immI_32_63(mRegL dst, immI_32_63 rshift, immI_0_31 lshift)
11328 %{
11329 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11330 match(Set dst (OrL (URShiftL dst rshift) (LShiftL dst lshift)));
11332 ins_cost(100);
11333 format %{ "rotr $dst, $dst, $rshift #@rorL_Reg_immI_32_63" %}
11334 ins_encode %{
11335 Register dst = $dst$$Register;
11336 int sa = $rshift$$constant;
11338 __ drotr32(dst, dst, sa - 32);
11339 %}
11340 ins_pipe( ialu_regI_regI );
11341 %}
11343 // Logical Shift Right
11344 instruct shr_logical_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
11345 match(Set dst (URShiftI src shift));
11347 format %{ "SRL $dst, $src, $shift #@shr_logical_Reg_Reg" %}
11348 ins_encode %{
11349 Register src = $src$$Register;
11350 Register dst = $dst$$Register;
11351 Register shift = $shift$$Register;
11352 __ srlv(dst, src, shift);
11353 %}
11354 ins_pipe( ialu_regI_regI );
11355 %}
11358 instruct shr_arith_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
11359 match(Set dst (RShiftI src shift));
11360 // effect(KILL cr);
11362 format %{ "SRA $dst, $src, $shift #@shr_arith_Reg_imm" %}
11363 ins_encode %{
11364 Register src = $src$$Register;
11365 Register dst = $dst$$Register;
11366 int shift = $shift$$constant;
11367 __ sra(dst, src, shift);
11368 %}
11369 ins_pipe( ialu_regI_regI );
11370 %}
11372 instruct shr_arith_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
11373 match(Set dst (RShiftI src shift));
11374 // effect(KILL cr);
11376 format %{ "SRA $dst, $src, $shift #@shr_arith_Reg_Reg" %}
11377 ins_encode %{
11378 Register src = $src$$Register;
11379 Register dst = $dst$$Register;
11380 Register shift = $shift$$Register;
11381 __ srav(dst, src, shift);
11382 %}
11383 ins_pipe( ialu_regI_regI );
11384 %}
11386 //----------Convert Int to Boolean---------------------------------------------
11388 instruct convI2B(mRegI dst, mRegI src) %{
11389 match(Set dst (Conv2B src));
11391 ins_cost(100);
11392 format %{ "convI2B $dst, $src @ convI2B" %}
11393 ins_encode %{
11394 Register dst = as_Register($dst$$reg);
11395 Register src = as_Register($src$$reg);
11397 if (dst != src) {
11398 __ daddiu(dst, R0, 1);
11399 __ movz(dst, R0, src);
11400 } else {
11401 __ move(AT, src);
11402 __ daddiu(dst, R0, 1);
11403 __ movz(dst, R0, AT);
11404 }
11405 %}
11407 ins_pipe( ialu_regL_regL );
11408 %}
11410 instruct convI2L_reg( mRegL dst, mRegI src) %{
11411 match(Set dst (ConvI2L src));
11413 ins_cost(100);
11414 format %{ "SLL $dst, $src @ convI2L_reg\t" %}
11415 ins_encode %{
11416 Register dst = as_Register($dst$$reg);
11417 Register src = as_Register($src$$reg);
11419 if(dst != src) __ sll(dst, src, 0);
11420 %}
11421 ins_pipe( ialu_regL_regL );
11422 %}
11425 instruct convL2I_reg( mRegI dst, mRegL src ) %{
11426 match(Set dst (ConvL2I src));
11428 format %{ "MOV $dst, $src @ convL2I_reg" %}
11429 ins_encode %{
11430 Register dst = as_Register($dst$$reg);
11431 Register src = as_Register($src$$reg);
11433 __ sll(dst, src, 0);
11434 %}
11436 ins_pipe( ialu_regI_regI );
11437 %}
11439 instruct convL2I2L_reg( mRegL dst, mRegL src ) %{
11440 match(Set dst (ConvI2L (ConvL2I src)));
11442 format %{ "sll $dst, $src, 0 @ convL2I2L_reg" %}
11443 ins_encode %{
11444 Register dst = as_Register($dst$$reg);
11445 Register src = as_Register($src$$reg);
11447 __ sll(dst, src, 0);
11448 %}
11450 ins_pipe( ialu_regI_regI );
11451 %}
11453 instruct convL2D_reg( regD dst, mRegL src ) %{
11454 match(Set dst (ConvL2D src));
11455 format %{ "convL2D $dst, $src @ convL2D_reg" %}
11456 ins_encode %{
11457 Register src = as_Register($src$$reg);
11458 FloatRegister dst = as_FloatRegister($dst$$reg);
11460 __ dmtc1(src, dst);
11461 __ cvt_d_l(dst, dst);
11462 %}
11464 ins_pipe( pipe_slow );
11465 %}
11467 instruct convD2L_reg_fast( mRegL dst, regD src ) %{
11468 match(Set dst (ConvD2L src));
11469 ins_cost(150);
11470 format %{ "convD2L $dst, $src @ convD2L_reg_fast" %}
11471 ins_encode %{
11472 Register dst = as_Register($dst$$reg);
11473 FloatRegister src = as_FloatRegister($src$$reg);
11475 Label Done;
11477 __ trunc_l_d(F30, src);
11478 // max_long: 0x7fffffffffffffff
11479 // __ set64(AT, 0x7fffffffffffffff);
11480 __ daddiu(AT, R0, -1);
11481 __ dsrl(AT, AT, 1);
11482 __ dmfc1(dst, F30);
11484 __ bne(dst, AT, Done);
11485 __ delayed()->mtc1(R0, F30);
11487 __ cvt_d_w(F30, F30);
11488 __ c_ult_d(src, F30);
11489 __ bc1f(Done);
11490 __ delayed()->daddiu(T9, R0, -1);
11492 __ c_un_d(src, src); //NaN?
11493 __ subu(dst, T9, AT);
11494 __ movt(dst, R0);
11496 __ bind(Done);
11497 %}
11499 ins_pipe( pipe_slow );
11500 %}
11502 instruct convD2L_reg_slow( mRegL dst, regD src ) %{
11503 match(Set dst (ConvD2L src));
11504 ins_cost(250);
11505 format %{ "convD2L $dst, $src @ convD2L_reg_slow" %}
11506 ins_encode %{
11507 Register dst = as_Register($dst$$reg);
11508 FloatRegister src = as_FloatRegister($src$$reg);
11510 Label L;
11512 __ c_un_d(src, src); //NaN?
11513 __ bc1t(L);
11514 __ delayed();
11515 __ move(dst, R0);
11517 __ trunc_l_d(F30, src);
11518 __ cfc1(AT, 31);
11519 __ li(T9, 0x10000);
11520 __ andr(AT, AT, T9);
11521 __ beq(AT, R0, L);
11522 __ delayed()->dmfc1(dst, F30);
11524 __ mov_d(F12, src);
11525 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1);
11526 __ move(dst, V0);
11527 __ bind(L);
11528 %}
11530 ins_pipe( pipe_slow );
11531 %}
11533 instruct convF2I_reg_fast( mRegI dst, regF src ) %{
11534 match(Set dst (ConvF2I src));
11535 ins_cost(150);
11536 format %{ "convf2i $dst, $src @ convF2I_reg_fast" %}
11537 ins_encode %{
11538 Register dreg = $dst$$Register;
11539 FloatRegister fval = $src$$FloatRegister;
11541 __ trunc_w_s(F30, fval);
11542 __ mfc1(dreg, F30);
11543 __ c_un_s(fval, fval); //NaN?
11544 __ movt(dreg, R0);
11545 %}
11547 ins_pipe( pipe_slow );
11548 %}
11550 instruct convF2I_reg_slow( mRegI dst, regF src ) %{
11551 match(Set dst (ConvF2I src));
11552 ins_cost(250);
11553 format %{ "convf2i $dst, $src @ convF2I_reg_slow" %}
11554 ins_encode %{
11555 Register dreg = $dst$$Register;
11556 FloatRegister fval = $src$$FloatRegister;
11557 Label L;
11559 __ c_un_s(fval, fval); //NaN?
11560 __ bc1t(L);
11561 __ delayed();
11562 __ move(dreg, R0);
11564 __ trunc_w_s(F30, fval);
11566 /* Call SharedRuntime:f2i() to do valid convention */
11567 __ cfc1(AT, 31);
11568 __ li(T9, 0x10000);
11569 __ andr(AT, AT, T9);
11570 __ beq(AT, R0, L);
11571 __ delayed()->mfc1(dreg, F30);
11573 __ mov_s(F12, fval);
11575 /* 2014/01/08 Fu : This bug was found when running ezDS's control-panel.
11576 * J 982 C2 javax.swing.text.BoxView.layoutMajorAxis(II[I[I)V (283 bytes) @ 0x000000555c46aa74
11577 *
11578 * An interger array index has been assigned to V0, and then changed from 1 to Integer.MAX_VALUE.
11579 * V0 is corrupted during call_VM_leaf(), and should be preserved.
11580 */
11581 if(dreg != V0) {
11582 __ push(V0);
11583 }
11584 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
11585 if(dreg != V0) {
11586 __ move(dreg, V0);
11587 __ pop(V0);
11588 }
11589 __ bind(L);
11590 %}
11592 ins_pipe( pipe_slow );
11593 %}
11595 instruct convF2L_reg_fast( mRegL dst, regF src ) %{
11596 match(Set dst (ConvF2L src));
11597 ins_cost(150);
11598 format %{ "convf2l $dst, $src @ convF2L_reg_fast" %}
11599 ins_encode %{
11600 Register dreg = $dst$$Register;
11601 FloatRegister fval = $src$$FloatRegister;
11603 __ trunc_l_s(F30, fval);
11604 __ dmfc1(dreg, F30);
11605 __ c_un_s(fval, fval); //NaN?
11606 __ movt(dreg, R0);
11607 %}
11609 ins_pipe( pipe_slow );
11610 %}
11612 instruct convF2L_reg_slow( mRegL dst, regF src ) %{
11613 match(Set dst (ConvF2L src));
11614 ins_cost(250);
11615 format %{ "convf2l $dst, $src @ convF2L_reg_slow" %}
11616 ins_encode %{
11617 Register dst = as_Register($dst$$reg);
11618 FloatRegister fval = $src$$FloatRegister;
11619 Label L;
11621 __ c_un_s(fval, fval); //NaN?
11622 __ bc1t(L);
11623 __ delayed();
11624 __ move(dst, R0);
11626 __ trunc_l_s(F30, fval);
11627 __ cfc1(AT, 31);
11628 __ li(T9, 0x10000);
11629 __ andr(AT, AT, T9);
11630 __ beq(AT, R0, L);
11631 __ delayed()->dmfc1(dst, F30);
11633 __ mov_s(F12, fval);
11634 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
11635 __ move(dst, V0);
11636 __ bind(L);
11637 %}
11639 ins_pipe( pipe_slow );
11640 %}
11642 instruct convL2F_reg( regF dst, mRegL src ) %{
11643 match(Set dst (ConvL2F src));
11644 format %{ "convl2f $dst, $src @ convL2F_reg" %}
11645 ins_encode %{
11646 FloatRegister dst = $dst$$FloatRegister;
11647 Register src = as_Register($src$$reg);
11648 Label L;
11650 __ dmtc1(src, dst);
11651 __ cvt_s_l(dst, dst);
11652 %}
11654 ins_pipe( pipe_slow );
11655 %}
11657 instruct convI2F_reg( regF dst, mRegI src ) %{
11658 match(Set dst (ConvI2F src));
11659 format %{ "convi2f $dst, $src @ convI2F_reg" %}
11660 ins_encode %{
11661 Register src = $src$$Register;
11662 FloatRegister dst = $dst$$FloatRegister;
11664 __ mtc1(src, dst);
11665 __ cvt_s_w(dst, dst);
11666 %}
11668 ins_pipe( fpu_regF_regF );
11669 %}
11671 instruct cmpLTMask_immI0( mRegI dst, mRegI p, immI0 zero ) %{
11672 match(Set dst (CmpLTMask p zero));
11673 ins_cost(100);
11675 format %{ "sra $dst, $p, 31 @ cmpLTMask_immI0" %}
11676 ins_encode %{
11677 Register src = $p$$Register;
11678 Register dst = $dst$$Register;
11680 __ sra(dst, src, 31);
11681 %}
11682 ins_pipe( pipe_slow );
11683 %}
11686 instruct cmpLTMask( mRegI dst, mRegI p, mRegI q ) %{
11687 match(Set dst (CmpLTMask p q));
11688 ins_cost(400);
11690 format %{ "cmpLTMask $dst, $p, $q @ cmpLTMask" %}
11691 ins_encode %{
11692 Register p = $p$$Register;
11693 Register q = $q$$Register;
11694 Register dst = $dst$$Register;
11696 __ slt(dst, p, q);
11697 __ subu(dst, R0, dst);
11698 %}
11699 ins_pipe( pipe_slow );
11700 %}
11702 instruct convP2B(mRegI dst, mRegP src) %{
11703 match(Set dst (Conv2B src));
11705 ins_cost(100);
11706 format %{ "convP2B $dst, $src @ convP2B" %}
11707 ins_encode %{
11708 Register dst = as_Register($dst$$reg);
11709 Register src = as_Register($src$$reg);
11711 if (dst != src) {
11712 __ daddiu(dst, R0, 1);
11713 __ movz(dst, R0, src);
11714 } else {
11715 __ move(AT, src);
11716 __ daddiu(dst, R0, 1);
11717 __ movz(dst, R0, AT);
11718 }
11719 %}
11721 ins_pipe( ialu_regL_regL );
11722 %}
11725 instruct convI2D_reg_reg(regD dst, mRegI src) %{
11726 match(Set dst (ConvI2D src));
11727 format %{ "conI2D $dst, $src @convI2D_reg" %}
11728 ins_encode %{
11729 Register src = $src$$Register;
11730 FloatRegister dst = $dst$$FloatRegister;
11731 __ mtc1(src, dst);
11732 __ cvt_d_w(dst, dst);
11733 %}
11734 ins_pipe( fpu_regF_regF );
11735 %}
11737 instruct convF2D_reg_reg(regD dst, regF src) %{
11738 match(Set dst (ConvF2D src));
11739 format %{ "convF2D $dst, $src\t# @convF2D_reg_reg" %}
11740 ins_encode %{
11741 FloatRegister dst = $dst$$FloatRegister;
11742 FloatRegister src = $src$$FloatRegister;
11744 __ cvt_d_s(dst, src);
11745 %}
11746 ins_pipe( fpu_regF_regF );
11747 %}
11749 instruct convD2F_reg_reg(regF dst, regD src) %{
11750 match(Set dst (ConvD2F src));
11751 format %{ "convD2F $dst, $src\t# @convD2F_reg_reg" %}
11752 ins_encode %{
11753 FloatRegister dst = $dst$$FloatRegister;
11754 FloatRegister src = $src$$FloatRegister;
11756 __ cvt_s_d(dst, src);
11757 %}
11758 ins_pipe( fpu_regF_regF );
11759 %}
11761 // Convert a double to an int. If the double is a NAN, stuff a zero in instead.
11762 instruct convD2I_reg_reg_fast( mRegI dst, regD src ) %{
11763 match(Set dst (ConvD2I src));
11765 ins_cost(150);
11766 format %{ "convD2I $dst, $src\t# @ convD2I_reg_reg_fast" %}
11768 ins_encode %{
11769 FloatRegister src = $src$$FloatRegister;
11770 Register dst = $dst$$Register;
11772 Label Done;
11774 __ trunc_w_d(F30, src);
11775 // max_int: 2147483647
11776 __ move(AT, 0x7fffffff);
11777 __ mfc1(dst, F30);
11779 __ bne(dst, AT, Done);
11780 __ delayed()->mtc1(R0, F30);
11782 __ cvt_d_w(F30, F30);
11783 __ c_ult_d(src, F30);
11784 __ bc1f(Done);
11785 __ delayed()->addiu(T9, R0, -1);
11787 __ c_un_d(src, src); //NaN?
11788 __ subu32(dst, T9, AT);
11789 __ movt(dst, R0);
11791 __ bind(Done);
11792 %}
11793 ins_pipe( pipe_slow );
11794 %}
11796 instruct convD2I_reg_reg_slow( mRegI dst, regD src ) %{
11797 match(Set dst (ConvD2I src));
11799 ins_cost(250);
11800 format %{ "convD2I $dst, $src\t# @ convD2I_reg_reg_slow" %}
11802 ins_encode %{
11803 FloatRegister src = $src$$FloatRegister;
11804 Register dst = $dst$$Register;
11805 Label L;
11807 __ trunc_w_d(F30, src);
11808 __ cfc1(AT, 31);
11809 __ li(T9, 0x10000);
11810 __ andr(AT, AT, T9);
11811 __ beq(AT, R0, L);
11812 __ delayed()->mfc1(dst, F30);
11814 __ mov_d(F12, src);
11815 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 1);
11816 __ move(dst, V0);
11817 __ bind(L);
11819 %}
11820 ins_pipe( pipe_slow );
11821 %}
11823 // Convert oop pointer into compressed form
11824 instruct encodeHeapOop(mRegN dst, mRegP src) %{
11825 predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
11826 match(Set dst (EncodeP src));
11827 format %{ "encode_heap_oop $dst,$src" %}
11828 ins_encode %{
11829 Register src = $src$$Register;
11830 Register dst = $dst$$Register;
11831 if (src != dst) {
11832 __ move(dst, src);
11833 }
11834 __ encode_heap_oop(dst);
11835 %}
11836 ins_pipe( ialu_regL_regL );
11837 %}
11839 instruct encodeHeapOop_not_null(mRegN dst, mRegP src) %{
11840 predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
11841 match(Set dst (EncodeP src));
11842 format %{ "encode_heap_oop_not_null $dst,$src @ encodeHeapOop_not_null" %}
11843 ins_encode %{
11844 __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
11845 %}
11846 ins_pipe( ialu_regL_regL );
11847 %}
11849 instruct decodeHeapOop(mRegP dst, mRegN src) %{
11850 predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
11851 n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
11852 match(Set dst (DecodeN src));
11853 format %{ "decode_heap_oop $dst,$src @ decodeHeapOop" %}
11854 ins_encode %{
11855 Register s = $src$$Register;
11856 Register d = $dst$$Register;
11857 if (s != d) {
11858 __ move(d, s);
11859 }
11860 __ decode_heap_oop(d);
11861 %}
11862 ins_pipe( ialu_regL_regL );
11863 %}
11865 instruct decodeHeapOop_not_null(mRegP dst, mRegN src) %{
11866 predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
11867 n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
11868 match(Set dst (DecodeN src));
11869 format %{ "decode_heap_oop_not_null $dst,$src @ decodeHeapOop_not_null" %}
11870 ins_encode %{
11871 Register s = $src$$Register;
11872 Register d = $dst$$Register;
11873 if (s != d) {
11874 __ decode_heap_oop_not_null(d, s);
11875 } else {
11876 __ decode_heap_oop_not_null(d);
11877 }
11878 %}
11879 ins_pipe( ialu_regL_regL );
11880 %}
11882 instruct encodeKlass_not_null(mRegN dst, mRegP src) %{
11883 match(Set dst (EncodePKlass src));
11884 format %{ "encode_heap_oop_not_null $dst,$src @ encodeKlass_not_null" %}
11885 ins_encode %{
11886 __ encode_klass_not_null($dst$$Register, $src$$Register);
11887 %}
11888 ins_pipe( ialu_regL_regL );
11889 %}
11891 instruct decodeKlass_not_null(mRegP dst, mRegN src) %{
11892 match(Set dst (DecodeNKlass src));
11893 format %{ "decode_heap_klass_not_null $dst,$src" %}
11894 ins_encode %{
11895 Register s = $src$$Register;
11896 Register d = $dst$$Register;
11897 if (s != d) {
11898 __ decode_klass_not_null(d, s);
11899 } else {
11900 __ decode_klass_not_null(d);
11901 }
11902 %}
11903 ins_pipe( ialu_regL_regL );
11904 %}
11906 //FIXME
11907 instruct tlsLoadP(mRegP dst) %{
11908 match(Set dst (ThreadLocal));
11910 ins_cost(0);
11911 format %{ " get_thread in $dst #@tlsLoadP" %}
11912 ins_encode %{
11913 Register dst = $dst$$Register;
11914 #ifdef OPT_THREAD
11915 __ move(dst, TREG);
11916 #else
11917 __ get_thread(dst);
11918 #endif
11919 %}
11921 ins_pipe( ialu_loadI );
11922 %}
11925 instruct checkCastPP( mRegP dst ) %{
11926 match(Set dst (CheckCastPP dst));
11928 format %{ "#checkcastPP of $dst (empty encoding) #@chekCastPP" %}
11929 ins_encode( /*empty encoding*/ );
11930 ins_pipe( empty );
11931 %}
11933 instruct castPP(mRegP dst)
11934 %{
11935 match(Set dst (CastPP dst));
11937 size(0);
11938 format %{ "# castPP of $dst" %}
11939 ins_encode(/* empty encoding */);
11940 ins_pipe(empty);
11941 %}
11943 instruct castII( mRegI dst ) %{
11944 match(Set dst (CastII dst));
11945 format %{ "#castII of $dst empty encoding" %}
11946 ins_encode( /*empty encoding*/ );
11947 ins_cost(0);
11948 ins_pipe( empty );
11949 %}
11951 // Return Instruction
11952 // Remove the return address & jump to it.
11953 instruct Ret() %{
11954 match(Return);
11955 format %{ "RET #@Ret" %}
11957 ins_encode %{
11958 __ jr(RA);
11959 __ nop();
11960 %}
11962 ins_pipe( pipe_jump );
11963 %}
11965 /*
11966 // For Loongson CPUs, jr seems too slow, so this rule shouldn't be imported.
11967 instruct jumpXtnd(mRegL switch_val) %{
11968 match(Jump switch_val);
11970 ins_cost(350);
11972 format %{ "load T9 <-- [$constanttablebase, $switch_val, $constantoffset] @ jumpXtnd\n\t"
11973 "jr T9\n\t"
11974 "nop" %}
11975 ins_encode %{
11976 Register table_base = $constanttablebase;
11977 int con_offset = $constantoffset;
11978 Register switch_reg = $switch_val$$Register;
11980 if (UseLoongsonISA) {
11981 if (Assembler::is_simm(con_offset, 8)) {
11982 __ gsldx(T9, table_base, switch_reg, con_offset);
11983 } else if (Assembler::is_simm16(con_offset)) {
11984 __ daddu(T9, table_base, switch_reg);
11985 __ ld(T9, T9, con_offset);
11986 } else {
11987 __ move(T9, con_offset);
11988 __ daddu(AT, table_base, switch_reg);
11989 __ gsldx(T9, AT, T9, 0);
11990 }
11991 } else {
11992 if (Assembler::is_simm16(con_offset)) {
11993 __ daddu(T9, table_base, switch_reg);
11994 __ ld(T9, T9, con_offset);
11995 } else {
11996 __ move(T9, con_offset);
11997 __ daddu(AT, table_base, switch_reg);
11998 __ daddu(AT, T9, AT);
11999 __ ld(T9, AT, 0);
12000 }
12001 }
12003 __ jr(T9);
12004 __ nop();
12006 %}
12007 ins_pipe(pipe_jump);
12008 %}
12009 */
12011 // Jump Direct - Label defines a relative address from JMP
12012 instruct jmpDir(label labl) %{
12013 match(Goto);
12014 effect(USE labl);
12016 ins_cost(300);
12017 format %{ "JMP $labl #@jmpDir" %}
12019 ins_encode %{
12020 Label &L = *($labl$$label);
12021 if(&L)
12022 __ b(L);
12023 else
12024 __ b(int(0));
12025 __ nop();
12026 %}
12028 ins_pipe( pipe_jump );
12029 ins_pc_relative(1);
12030 %}
12034 // Tail Jump; remove the return address; jump to target.
12035 // TailCall above leaves the return address around.
12036 // TailJump is used in only one place, the rethrow_Java stub (fancy_jump=2).
12037 // ex_oop (Exception Oop) is needed in %o0 at the jump. As there would be a
12038 // "restore" before this instruction (in Epilogue), we need to materialize it
12039 // in %i0.
12040 //FIXME
12041 instruct tailjmpInd(mRegP jump_target,mRegP ex_oop) %{
12042 match( TailJump jump_target ex_oop );
12043 ins_cost(200);
12044 format %{ "Jmp $jump_target ; ex_oop = $ex_oop #@tailjmpInd" %}
12045 ins_encode %{
12046 Register target = $jump_target$$Register;
12048 /* 2012/9/14 Jin: V0, V1 are indicated in:
12049 * [stubGenerator_mips.cpp] generate_forward_exception()
12050 * [runtime_mips.cpp] OptoRuntime::generate_exception_blob()
12051 */
12052 Register oop = $ex_oop$$Register;
12053 Register exception_oop = V0;
12054 Register exception_pc = V1;
12056 __ move(exception_pc, RA);
12057 __ move(exception_oop, oop);
12059 __ jr(target);
12060 __ nop();
12061 %}
12062 ins_pipe( pipe_jump );
12063 %}
12065 // ============================================================================
12066 // Procedure Call/Return Instructions
12067 // Call Java Static Instruction
12068 // Note: If this code changes, the corresponding ret_addr_offset() and
12069 // compute_padding() functions will have to be adjusted.
12070 instruct CallStaticJavaDirect(method meth) %{
12071 match(CallStaticJava);
12072 effect(USE meth);
12074 ins_cost(300);
12075 format %{ "CALL,static #@CallStaticJavaDirect " %}
12076 ins_encode( Java_Static_Call( meth ) );
12077 ins_pipe( pipe_slow );
12078 ins_pc_relative(1);
12079 ins_alignment(16);
12080 %}
12082 // Call Java Dynamic Instruction
12083 // Note: If this code changes, the corresponding ret_addr_offset() and
12084 // compute_padding() functions will have to be adjusted.
12085 instruct CallDynamicJavaDirect(method meth) %{
12086 match(CallDynamicJava);
12087 effect(USE meth);
12089 ins_cost(300);
12090 format %{"MOV IC_Klass, (oop)-1 @ CallDynamicJavaDirect\n\t"
12091 "CallDynamic @ CallDynamicJavaDirect" %}
12092 ins_encode( Java_Dynamic_Call( meth ) );
12093 ins_pipe( pipe_slow );
12094 ins_pc_relative(1);
12095 ins_alignment(16);
12096 %}
12098 instruct CallLeafNoFPDirect(method meth) %{
12099 match(CallLeafNoFP);
12100 effect(USE meth);
12102 ins_cost(300);
12103 format %{ "CALL_LEAF_NOFP,runtime " %}
12104 ins_encode(Java_To_Runtime(meth));
12105 ins_pipe( pipe_slow );
12106 ins_pc_relative(1);
12107 ins_alignment(16);
12108 %}
12110 // Prefetch instructions.
12112 instruct prefetchrNTA( memory mem ) %{
12113 match(PrefetchRead mem);
12114 ins_cost(125);
12116 format %{ "pref $mem\t# Prefetch into non-temporal cache for read @ prefetchrNTA" %}
12117 ins_encode %{
12118 int base = $mem$$base;
12119 int index = $mem$$index;
12120 int scale = $mem$$scale;
12121 int disp = $mem$$disp;
12123 if( index != 0 ) {
12124 if (scale == 0) {
12125 __ daddu(AT, as_Register(base), as_Register(index));
12126 } else {
12127 __ dsll(AT, as_Register(index), scale);
12128 __ daddu(AT, as_Register(base), AT);
12129 }
12130 } else {
12131 __ move(AT, as_Register(base));
12132 }
12133 if( Assembler::is_simm16(disp) ) {
12134 __ daddiu(AT, as_Register(base), disp);
12135 __ daddiu(AT, AT, disp);
12136 } else {
12137 __ move(T9, disp);
12138 __ daddu(AT, as_Register(base), T9);
12139 }
12140 __ pref(0, AT, 0); //hint: 0:load
12141 %}
12142 ins_pipe(pipe_slow);
12143 %}
12145 instruct prefetchwNTA( memory mem ) %{
12146 match(PrefetchWrite mem);
12147 ins_cost(125);
12148 format %{ "pref $mem\t# Prefetch to non-temporal cache for write @ prefetchwNTA" %}
12149 ins_encode %{
12150 int base = $mem$$base;
12151 int index = $mem$$index;
12152 int scale = $mem$$scale;
12153 int disp = $mem$$disp;
12155 if( index != 0 ) {
12156 if (scale == 0) {
12157 __ daddu(AT, as_Register(base), as_Register(index));
12158 } else {
12159 __ dsll(AT, as_Register(index), scale);
12160 __ daddu(AT, as_Register(base), AT);
12161 }
12162 } else {
12163 __ move(AT, as_Register(base));
12164 }
12165 if( Assembler::is_simm16(disp) ) {
12166 __ daddiu(AT, as_Register(base), disp);
12167 __ daddiu(AT, AT, disp);
12168 } else {
12169 __ move(T9, disp);
12170 __ daddu(AT, as_Register(base), T9);
12171 }
12172 __ pref(1, AT, 0); //hint: 1:store
12173 %}
12174 ins_pipe(pipe_slow);
12175 %}
12177 // Prefetch instructions for allocation.
12179 instruct prefetchAllocNTA( memory mem ) %{
12180 match(PrefetchAllocation mem);
12181 ins_cost(125);
12182 format %{ "pref $mem\t# Prefetch allocation @ prefetchAllocNTA" %}
12183 ins_encode %{
12184 int base = $mem$$base;
12185 int index = $mem$$index;
12186 int scale = $mem$$scale;
12187 int disp = $mem$$disp;
12189 Register dst = R0;
12191 if( index != 0 ) {
12192 if( Assembler::is_simm16(disp) ) {
12193 if( UseLoongsonISA ) {
12194 if (scale == 0) {
12195 __ gslbx(dst, as_Register(base), as_Register(index), disp);
12196 } else {
12197 __ dsll(AT, as_Register(index), scale);
12198 __ gslbx(dst, as_Register(base), AT, disp);
12199 }
12200 } else {
12201 if (scale == 0) {
12202 __ addu(AT, as_Register(base), as_Register(index));
12203 } else {
12204 __ dsll(AT, as_Register(index), scale);
12205 __ addu(AT, as_Register(base), AT);
12206 }
12207 __ lb(dst, AT, disp);
12208 }
12209 } else {
12210 if (scale == 0) {
12211 __ addu(AT, as_Register(base), as_Register(index));
12212 } else {
12213 __ dsll(AT, as_Register(index), scale);
12214 __ addu(AT, as_Register(base), AT);
12215 }
12216 __ move(T9, disp);
12217 if( UseLoongsonISA ) {
12218 __ gslbx(dst, AT, T9, 0);
12219 } else {
12220 __ addu(AT, AT, T9);
12221 __ lb(dst, AT, 0);
12222 }
12223 }
12224 } else {
12225 if( Assembler::is_simm16(disp) ) {
12226 __ lb(dst, as_Register(base), disp);
12227 } else {
12228 __ move(T9, disp);
12229 if( UseLoongsonISA ) {
12230 __ gslbx(dst, as_Register(base), T9, 0);
12231 } else {
12232 __ addu(AT, as_Register(base), T9);
12233 __ lb(dst, AT, 0);
12234 }
12235 }
12236 }
12237 %}
12238 ins_pipe(pipe_slow);
12239 %}
12242 // Call runtime without safepoint
12243 instruct CallLeafDirect(method meth) %{
12244 match(CallLeaf);
12245 effect(USE meth);
12247 ins_cost(300);
12248 format %{ "CALL_LEAF,runtime #@CallLeafDirect " %}
12249 ins_encode(Java_To_Runtime(meth));
12250 ins_pipe( pipe_slow );
12251 ins_pc_relative(1);
12252 ins_alignment(16);
12253 %}
12255 // Load Char (16bit unsigned)
12256 instruct loadUS(mRegI dst, memory mem) %{
12257 match(Set dst (LoadUS mem));
12259 ins_cost(125);
12260 format %{ "loadUS $dst,$mem @ loadC" %}
12261 ins_encode(load_C_enc(dst, mem));
12262 ins_pipe( ialu_loadI );
12263 %}
12265 instruct loadUS_convI2L(mRegL dst, memory mem) %{
12266 match(Set dst (ConvI2L (LoadUS mem)));
12268 ins_cost(125);
12269 format %{ "loadUS $dst,$mem @ loadUS_convI2L" %}
12270 ins_encode(load_C_enc(dst, mem));
12271 ins_pipe( ialu_loadI );
12272 %}
12274 // Store Char (16bit unsigned)
12275 instruct storeC(memory mem, mRegI src) %{
12276 match(Set mem (StoreC mem src));
12278 ins_cost(125);
12279 format %{ "storeC $src, $mem @ storeC" %}
12280 ins_encode(store_C_reg_enc(mem, src));
12281 ins_pipe( ialu_loadI );
12282 %}
12284 instruct storeC0(memory mem, immI0 zero) %{
12285 match(Set mem (StoreC mem zero));
12287 ins_cost(125);
12288 format %{ "storeC $zero, $mem @ storeC0" %}
12289 ins_encode(store_C0_enc(mem));
12290 ins_pipe( ialu_loadI );
12291 %}
12294 instruct loadConF0(regF dst, immF0 zero) %{
12295 match(Set dst zero);
12296 ins_cost(100);
12298 format %{ "mov $dst, zero @ loadConF0\n"%}
12299 ins_encode %{
12300 FloatRegister dst = $dst$$FloatRegister;
12302 __ mtc1(R0, dst);
12303 %}
12304 ins_pipe( fpu_loadF );
12305 %}
12308 instruct loadConF(regF dst, immF src) %{
12309 match(Set dst src);
12310 ins_cost(125);
12312 format %{ "lwc1 $dst, $constantoffset[$constanttablebase] # load FLOAT $src from table @ loadConF" %}
12313 ins_encode %{
12314 int con_offset = $constantoffset($src);
12316 if (Assembler::is_simm16(con_offset)) {
12317 __ lwc1($dst$$FloatRegister, $constanttablebase, con_offset);
12318 } else {
12319 __ set64(AT, con_offset);
12320 if (UseLoongsonISA) {
12321 __ gslwxc1($dst$$FloatRegister, $constanttablebase, AT, 0);
12322 } else {
12323 __ daddu(AT, $constanttablebase, AT);
12324 __ lwc1($dst$$FloatRegister, AT, 0);
12325 }
12326 }
12327 %}
12328 ins_pipe( fpu_loadF );
12329 %}
12332 instruct loadConD0(regD dst, immD0 zero) %{
12333 match(Set dst zero);
12334 ins_cost(100);
12336 format %{ "mov $dst, zero @ loadConD0"%}
12337 ins_encode %{
12338 FloatRegister dst = as_FloatRegister($dst$$reg);
12340 __ dmtc1(R0, dst);
12341 %}
12342 ins_pipe( fpu_loadF );
12343 %}
12345 instruct loadConD(regD dst, immD src) %{
12346 match(Set dst src);
12347 ins_cost(125);
12349 format %{ "ldc1 $dst, $constantoffset[$constanttablebase] # load DOUBLE $src from table @ loadConD" %}
12350 ins_encode %{
12351 int con_offset = $constantoffset($src);
12353 if (Assembler::is_simm16(con_offset)) {
12354 __ ldc1($dst$$FloatRegister, $constanttablebase, con_offset);
12355 } else {
12356 __ set64(AT, con_offset);
12357 if (UseLoongsonISA) {
12358 __ gsldxc1($dst$$FloatRegister, $constanttablebase, AT, 0);
12359 } else {
12360 __ daddu(AT, $constanttablebase, AT);
12361 __ ldc1($dst$$FloatRegister, AT, 0);
12362 }
12363 }
12364 %}
12365 ins_pipe( fpu_loadF );
12366 %}
12368 // Store register Float value (it is faster than store from FPU register)
12369 instruct storeF_reg( memory mem, regF src) %{
12370 match(Set mem (StoreF mem src));
12372 ins_cost(50);
12373 format %{ "store $mem, $src\t# store float @ storeF_reg" %}
12374 ins_encode(store_F_reg_enc(mem, src));
12375 ins_pipe( fpu_storeF );
12376 %}
12378 instruct storeF_imm0( memory mem, immF0 zero) %{
12379 match(Set mem (StoreF mem zero));
12381 ins_cost(40);
12382 format %{ "store $mem, zero\t# store float @ storeF_imm0" %}
12383 ins_encode %{
12384 int base = $mem$$base;
12385 int index = $mem$$index;
12386 int scale = $mem$$scale;
12387 int disp = $mem$$disp;
12389 if( index != 0 ) {
12390 if ( UseLoongsonISA ) {
12391 if ( Assembler::is_simm(disp, 8) ) {
12392 if ( scale == 0 ) {
12393 __ gsswx(R0, as_Register(base), as_Register(index), disp);
12394 } else {
12395 __ dsll(T9, as_Register(index), scale);
12396 __ gsswx(R0, as_Register(base), T9, disp);
12397 }
12398 } else if ( Assembler::is_simm16(disp) ) {
12399 if ( scale == 0 ) {
12400 __ daddu(AT, as_Register(base), as_Register(index));
12401 } else {
12402 __ dsll(T9, as_Register(index), scale);
12403 __ daddu(AT, as_Register(base), T9);
12404 }
12405 __ sw(R0, AT, disp);
12406 } else {
12407 if ( scale == 0 ) {
12408 __ move(T9, disp);
12409 __ daddu(AT, as_Register(index), T9);
12410 __ gsswx(R0, as_Register(base), AT, 0);
12411 } else {
12412 __ dsll(T9, as_Register(index), scale);
12413 __ move(AT, disp);
12414 __ daddu(AT, AT, T9);
12415 __ gsswx(R0, as_Register(base), AT, 0);
12416 }
12417 }
12418 } else { //not use loongson isa
12419 if(scale != 0) {
12420 __ dsll(T9, as_Register(index), scale);
12421 __ daddu(AT, as_Register(base), T9);
12422 } else {
12423 __ daddu(AT, as_Register(base), as_Register(index));
12424 }
12425 if( Assembler::is_simm16(disp) ) {
12426 __ sw(R0, AT, disp);
12427 } else {
12428 __ move(T9, disp);
12429 __ daddu(AT, AT, T9);
12430 __ sw(R0, AT, 0);
12431 }
12432 }
12433 } else { //index is 0
12434 if ( UseLoongsonISA ) {
12435 if ( Assembler::is_simm16(disp) ) {
12436 __ sw(R0, as_Register(base), disp);
12437 } else {
12438 __ move(T9, disp);
12439 __ gsswx(R0, as_Register(base), T9, 0);
12440 }
12441 } else {
12442 if( Assembler::is_simm16(disp) ) {
12443 __ sw(R0, as_Register(base), disp);
12444 } else {
12445 __ move(T9, disp);
12446 __ daddu(AT, as_Register(base), T9);
12447 __ sw(R0, AT, 0);
12448 }
12449 }
12450 }
12451 %}
12452 ins_pipe( ialu_storeI );
12453 %}
12455 // Load Double
12456 instruct loadD(regD dst, memory mem) %{
12457 match(Set dst (LoadD mem));
12459 ins_cost(150);
12460 format %{ "loadD $dst, $mem #@loadD" %}
12461 ins_encode(load_D_enc(dst, mem));
12462 ins_pipe( ialu_loadI );
12463 %}
12465 // Load Double - UNaligned
12466 instruct loadD_unaligned(regD dst, memory mem ) %{
12467 match(Set dst (LoadD_unaligned mem));
12468 ins_cost(250);
12469 // FIXME: Jin: Need more effective ldl/ldr
12470 format %{ "loadD_unaligned $dst, $mem #@loadD_unaligned" %}
12471 ins_encode(load_D_enc(dst, mem));
12472 ins_pipe( ialu_loadI );
12473 %}
12475 instruct storeD_reg( memory mem, regD src) %{
12476 match(Set mem (StoreD mem src));
12478 ins_cost(50);
12479 format %{ "store $mem, $src\t# store float @ storeD_reg" %}
12480 ins_encode(store_D_reg_enc(mem, src));
12481 ins_pipe( fpu_storeF );
12482 %}
12484 instruct storeD_imm0( memory mem, immD0 zero) %{
12485 match(Set mem (StoreD mem zero));
12487 ins_cost(40);
12488 format %{ "store $mem, zero\t# store float @ storeD_imm0" %}
12489 ins_encode %{
12490 int base = $mem$$base;
12491 int index = $mem$$index;
12492 int scale = $mem$$scale;
12493 int disp = $mem$$disp;
12495 __ mtc1(R0, F30);
12496 __ cvt_d_w(F30, F30);
12498 if( index != 0 ) {
12499 if(scale != 0) {
12500 __ dsll(T9, as_Register(index), scale);
12501 __ addu(AT, as_Register(base), T9);
12502 } else {
12503 __ daddu(AT, as_Register(base), as_Register(index));
12504 }
12505 if( Assembler::is_simm16(disp) ) {
12506 __ sdc1(F30, AT, disp);
12507 } else {
12508 __ move(T9, disp);
12509 __ addu(AT, AT, T9);
12510 __ sdc1(F30, AT, 0);
12511 }
12513 } else {
12514 if( Assembler::is_simm16(disp) ) {
12515 __ sdc1(F30, as_Register(base), disp);
12516 } else {
12517 __ move(T9, disp);
12518 __ addu(AT, as_Register(base), T9);
12519 __ sdc1(F30, AT, 0);
12520 }
12521 }
12522 %}
12523 ins_pipe( ialu_storeI );
12524 %}
12526 instruct loadSSI(mRegI dst, stackSlotI src)
12527 %{
12528 match(Set dst src);
12530 ins_cost(125);
12531 format %{ "lw $dst, $src\t# int stk @ loadSSI" %}
12532 ins_encode %{
12533 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSI) !");
12534 __ lw($dst$$Register, SP, $src$$disp);
12535 %}
12536 ins_pipe(ialu_loadI);
12537 %}
12539 instruct storeSSI(stackSlotI dst, mRegI src)
12540 %{
12541 match(Set dst src);
12543 ins_cost(100);
12544 format %{ "sw $dst, $src\t# int stk @ storeSSI" %}
12545 ins_encode %{
12546 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSI) !");
12547 __ sw($src$$Register, SP, $dst$$disp);
12548 %}
12549 ins_pipe(ialu_storeI);
12550 %}
12552 instruct loadSSL(mRegL dst, stackSlotL src)
12553 %{
12554 match(Set dst src);
12556 ins_cost(125);
12557 format %{ "ld $dst, $src\t# long stk @ loadSSL" %}
12558 ins_encode %{
12559 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSL) !");
12560 __ ld($dst$$Register, SP, $src$$disp);
12561 %}
12562 ins_pipe(ialu_loadI);
12563 %}
12565 instruct storeSSL(stackSlotL dst, mRegL src)
12566 %{
12567 match(Set dst src);
12569 ins_cost(100);
12570 format %{ "sd $dst, $src\t# long stk @ storeSSL" %}
12571 ins_encode %{
12572 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSL) !");
12573 __ sd($src$$Register, SP, $dst$$disp);
12574 %}
12575 ins_pipe(ialu_storeI);
12576 %}
12578 instruct loadSSP(mRegP dst, stackSlotP src)
12579 %{
12580 match(Set dst src);
12582 ins_cost(125);
12583 format %{ "ld $dst, $src\t# ptr stk @ loadSSP" %}
12584 ins_encode %{
12585 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSP) !");
12586 __ ld($dst$$Register, SP, $src$$disp);
12587 %}
12588 ins_pipe(ialu_loadI);
12589 %}
12591 instruct storeSSP(stackSlotP dst, mRegP src)
12592 %{
12593 match(Set dst src);
12595 ins_cost(100);
12596 format %{ "sd $dst, $src\t# ptr stk @ storeSSP" %}
12597 ins_encode %{
12598 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSP) !");
12599 __ sd($src$$Register, SP, $dst$$disp);
12600 %}
12601 ins_pipe(ialu_storeI);
12602 %}
12604 instruct loadSSF(regF dst, stackSlotF src)
12605 %{
12606 match(Set dst src);
12608 ins_cost(125);
12609 format %{ "lwc1 $dst, $src\t# float stk @ loadSSF" %}
12610 ins_encode %{
12611 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSF) !");
12612 __ lwc1($dst$$FloatRegister, SP, $src$$disp);
12613 %}
12614 ins_pipe(ialu_loadI);
12615 %}
12617 instruct storeSSF(stackSlotF dst, regF src)
12618 %{
12619 match(Set dst src);
12621 ins_cost(100);
12622 format %{ "swc1 $dst, $src\t# float stk @ storeSSF" %}
12623 ins_encode %{
12624 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSF) !");
12625 __ swc1($src$$FloatRegister, SP, $dst$$disp);
12626 %}
12627 ins_pipe(fpu_storeF);
12628 %}
12630 // Use the same format since predicate() can not be used here.
12631 instruct loadSSD(regD dst, stackSlotD src)
12632 %{
12633 match(Set dst src);
12635 ins_cost(125);
12636 format %{ "ldc1 $dst, $src\t# double stk @ loadSSD" %}
12637 ins_encode %{
12638 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSD) !");
12639 __ ldc1($dst$$FloatRegister, SP, $src$$disp);
12640 %}
12641 ins_pipe(ialu_loadI);
12642 %}
12644 instruct storeSSD(stackSlotD dst, regD src)
12645 %{
12646 match(Set dst src);
12648 ins_cost(100);
12649 format %{ "sdc1 $dst, $src\t# double stk @ storeSSD" %}
12650 ins_encode %{
12651 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSD) !");
12652 __ sdc1($src$$FloatRegister, SP, $dst$$disp);
12653 %}
12654 ins_pipe(fpu_storeF);
12655 %}
12657 instruct cmpFastLock( FlagsReg cr, mRegP object, s0_RegP box, mRegI tmp, mRegP scr) %{
12658 match( Set cr (FastLock object box) );
12659 effect( TEMP tmp, TEMP scr, USE_KILL box );
12660 ins_cost(300);
12661 format %{ "FASTLOCK $cr $object, $box, $tmp #@ cmpFastLock" %}
12662 ins_encode %{
12663 __ fast_lock($object$$Register, $box$$Register, $tmp$$Register, $scr$$Register);
12664 %}
12666 ins_pipe( pipe_slow );
12667 ins_pc_relative(1);
12668 %}
12670 instruct cmpFastUnlock( FlagsReg cr, mRegP object, s0_RegP box, mRegP tmp ) %{
12671 match( Set cr (FastUnlock object box) );
12672 effect( TEMP tmp, USE_KILL box );
12673 ins_cost(300);
12674 format %{ "FASTUNLOCK $object, $box, $tmp #@cmpFastUnlock" %}
12675 ins_encode %{
12676 __ fast_unlock($object$$Register, $box$$Register, $tmp$$Register);
12677 %}
12679 ins_pipe( pipe_slow );
12680 ins_pc_relative(1);
12681 %}
12683 // Store CMS card-mark Immediate
12684 instruct storeImmCM(memory mem, immI8 src) %{
12685 match(Set mem (StoreCM mem src));
12687 ins_cost(150);
12688 format %{ "MOV8 $mem,$src\t! CMS card-mark imm0" %}
12689 // opcode(0xC6);
12690 ins_encode(store_B_immI_enc_sync(mem, src));
12691 ins_pipe( ialu_storeI );
12692 %}
12694 // Die now
12695 instruct ShouldNotReachHere( )
12696 %{
12697 match(Halt);
12698 ins_cost(300);
12700 // Use the following format syntax
12701 format %{ "ILLTRAP ;#@ShouldNotReachHere" %}
12702 ins_encode %{
12703 // Here we should emit illtrap !
12705 __ stop("in ShoudNotReachHere");
12707 %}
12708 ins_pipe( pipe_jump );
12709 %}
12711 instruct leaP8Narrow(mRegP dst, indOffset8Narrow mem)
12712 %{
12713 predicate(Universe::narrow_oop_shift() == 0);
12714 match(Set dst mem);
12716 ins_cost(110);
12717 format %{ "leaq $dst, $mem\t# ptr off8narrow @ leaP8Narrow" %}
12718 ins_encode %{
12719 Register dst = $dst$$Register;
12720 Register base = as_Register($mem$$base);
12721 int disp = $mem$$disp;
12723 __ daddiu(dst, base, disp);
12724 %}
12725 ins_pipe( ialu_regI_imm16 );
12726 %}
12728 instruct leaPPosIdxScaleOff8(mRegP dst, basePosIndexScaleOffset8 mem)
12729 %{
12730 match(Set dst mem);
12732 ins_cost(110);
12733 format %{ "leaq $dst, $mem\t# @ PosIdxScaleOff8" %}
12734 ins_encode %{
12735 Register dst = $dst$$Register;
12736 Register base = as_Register($mem$$base);
12737 Register index = as_Register($mem$$index);
12738 int scale = $mem$$scale;
12739 int disp = $mem$$disp;
12741 if (scale == 0) {
12742 __ daddu(AT, base, index);
12743 __ daddiu(dst, AT, disp);
12744 } else {
12745 __ dsll(AT, index, scale);
12746 __ daddu(AT, base, AT);
12747 __ daddiu(dst, AT, disp);
12748 }
12749 %}
12751 ins_pipe( ialu_regI_imm16 );
12752 %}
12754 instruct leaPIdxScale(mRegP dst, indIndexScale mem)
12755 %{
12756 match(Set dst mem);
12758 ins_cost(110);
12759 format %{ "leaq $dst, $mem\t# @ leaPIdxScale" %}
12760 ins_encode %{
12761 Register dst = $dst$$Register;
12762 Register base = as_Register($mem$$base);
12763 Register index = as_Register($mem$$index);
12764 int scale = $mem$$scale;
12766 if (scale == 0) {
12767 __ daddu(dst, base, index);
12768 } else {
12769 __ dsll(AT, index, scale);
12770 __ daddu(dst, base, AT);
12771 }
12772 %}
12774 ins_pipe( ialu_regI_imm16 );
12775 %}
12777 // Jump Direct Conditional - Label defines a relative address from Jcc+1
12778 instruct jmpLoopEnd(cmpOp cop, mRegI src1, mRegI src2, label labl) %{
12779 match(CountedLoopEnd cop (CmpI src1 src2));
12780 effect(USE labl);
12782 ins_cost(300);
12783 format %{ "J$cop $src1, $src2, $labl\t# Loop end @ jmpLoopEnd" %}
12784 ins_encode %{
12785 Register op1 = $src1$$Register;
12786 Register op2 = $src2$$Register;
12787 Label &L = *($labl$$label);
12788 int flag = $cop$$cmpcode;
12790 switch(flag)
12791 {
12792 case 0x01: //equal
12793 if (&L)
12794 __ beq(op1, op2, L);
12795 else
12796 __ beq(op1, op2, (int)0);
12797 break;
12798 case 0x02: //not_equal
12799 if (&L)
12800 __ bne(op1, op2, L);
12801 else
12802 __ bne(op1, op2, (int)0);
12803 break;
12804 case 0x03: //above
12805 __ slt(AT, op2, op1);
12806 if(&L)
12807 __ bne(AT, R0, L);
12808 else
12809 __ bne(AT, R0, (int)0);
12810 break;
12811 case 0x04: //above_equal
12812 __ slt(AT, op1, op2);
12813 if(&L)
12814 __ beq(AT, R0, L);
12815 else
12816 __ beq(AT, R0, (int)0);
12817 break;
12818 case 0x05: //below
12819 __ slt(AT, op1, op2);
12820 if(&L)
12821 __ bne(AT, R0, L);
12822 else
12823 __ bne(AT, R0, (int)0);
12824 break;
12825 case 0x06: //below_equal
12826 __ slt(AT, op2, op1);
12827 if(&L)
12828 __ beq(AT, R0, L);
12829 else
12830 __ beq(AT, R0, (int)0);
12831 break;
12832 default:
12833 Unimplemented();
12834 }
12835 __ nop();
12836 %}
12837 ins_pipe( pipe_jump );
12838 ins_pc_relative(1);
12839 %}
12842 instruct jmpLoopEnd_reg_imm16_sub(cmpOp cop, mRegI src1, immI16_sub src2, label labl) %{
12843 match(CountedLoopEnd cop (CmpI src1 src2));
12844 effect(USE labl);
12846 ins_cost(250);
12847 format %{ "J$cop $src1, $src2, $labl\t# Loop end @ jmpLoopEnd_reg_imm16_sub" %}
12848 ins_encode %{
12849 Register op1 = $src1$$Register;
12850 int op2 = $src2$$constant;
12851 Label &L = *($labl$$label);
12852 int flag = $cop$$cmpcode;
12854 __ addiu32(AT, op1, -1 * op2);
12856 switch(flag)
12857 {
12858 case 0x01: //equal
12859 if (&L)
12860 __ beq(AT, R0, L);
12861 else
12862 __ beq(AT, R0, (int)0);
12863 break;
12864 case 0x02: //not_equal
12865 if (&L)
12866 __ bne(AT, R0, L);
12867 else
12868 __ bne(AT, R0, (int)0);
12869 break;
12870 case 0x03: //above
12871 if(&L)
12872 __ bgtz(AT, L);
12873 else
12874 __ bgtz(AT, (int)0);
12875 break;
12876 case 0x04: //above_equal
12877 if(&L)
12878 __ bgez(AT, L);
12879 else
12880 __ bgez(AT,(int)0);
12881 break;
12882 case 0x05: //below
12883 if(&L)
12884 __ bltz(AT, L);
12885 else
12886 __ bltz(AT, (int)0);
12887 break;
12888 case 0x06: //below_equal
12889 if(&L)
12890 __ blez(AT, L);
12891 else
12892 __ blez(AT, (int)0);
12893 break;
12894 default:
12895 Unimplemented();
12896 }
12897 __ nop();
12898 %}
12899 ins_pipe( pipe_jump );
12900 ins_pc_relative(1);
12901 %}
12904 /*
12905 // Jump Direct Conditional - Label defines a relative address from Jcc+1
12906 instruct jmpLoopEndU(cmpOpU cop, eFlagsRegU cmp, label labl) %{
12907 match(CountedLoopEnd cop cmp);
12908 effect(USE labl);
12910 ins_cost(300);
12911 format %{ "J$cop,u $labl\t# Loop end" %}
12912 size(6);
12913 opcode(0x0F, 0x80);
12914 ins_encode( Jcc( cop, labl) );
12915 ins_pipe( pipe_jump );
12916 ins_pc_relative(1);
12917 %}
12919 instruct jmpLoopEndUCF(cmpOpUCF cop, eFlagsRegUCF cmp, label labl) %{
12920 match(CountedLoopEnd cop cmp);
12921 effect(USE labl);
12923 ins_cost(200);
12924 format %{ "J$cop,u $labl\t# Loop end" %}
12925 opcode(0x0F, 0x80);
12926 ins_encode( Jcc( cop, labl) );
12927 ins_pipe( pipe_jump );
12928 ins_pc_relative(1);
12929 %}
12930 */
12932 // This match pattern is created for StoreIConditional since I cannot match IfNode without a RegFlags! fujie 2012/07/17
12933 instruct jmpCon_flags(cmpOp cop, FlagsReg cr, label labl) %{
12934 match(If cop cr);
12935 effect(USE labl);
12937 ins_cost(300);
12938 format %{ "J$cop $labl #mips uses AT as eflag @jmpCon_flags" %}
12940 ins_encode %{
12941 Label &L = *($labl$$label);
12942 switch($cop$$cmpcode)
12943 {
12944 case 0x01: //equal
12945 if (&L)
12946 __ bne(AT, R0, L);
12947 else
12948 __ bne(AT, R0, (int)0);
12949 break;
12950 case 0x02: //not equal
12951 if (&L)
12952 __ beq(AT, R0, L);
12953 else
12954 __ beq(AT, R0, (int)0);
12955 break;
12956 default:
12957 Unimplemented();
12958 }
12959 __ nop();
12960 %}
12962 ins_pipe( pipe_jump );
12963 ins_pc_relative(1);
12964 %}
12967 // ============================================================================
12968 // The 2nd slow-half of a subtype check. Scan the subklass's 2ndary superklass
12969 // array for an instance of the superklass. Set a hidden internal cache on a
12970 // hit (cache is checked with exposed code in gen_subtype_check()). Return
12971 // NZ for a miss or zero for a hit. The encoding ALSO sets flags.
12972 instruct partialSubtypeCheck( mRegP result, no_T8_mRegP sub, no_T8_mRegP super, mT8RegI tmp ) %{
12973 match(Set result (PartialSubtypeCheck sub super));
12974 effect(KILL tmp);
12975 ins_cost(1100); // slightly larger than the next version
12976 format %{ "partialSubtypeCheck result=$result, sub=$sub, super=$super, tmp=$tmp " %}
12978 ins_encode( enc_PartialSubtypeCheck(result, sub, super, tmp) );
12979 ins_pipe( pipe_slow );
12980 %}
12983 // Conditional-store of an int value.
12984 // ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG on Intel.
12985 instruct storeIConditional( memory mem, mRegI oldval, mRegI newval, FlagsReg cr ) %{
12986 match(Set cr (StoreIConditional mem (Binary oldval newval)));
12987 // effect(KILL oldval);
12988 format %{ "CMPXCHG $newval, $mem, $oldval \t# @storeIConditional" %}
12990 ins_encode %{
12991 Register oldval = $oldval$$Register;
12992 Register newval = $newval$$Register;
12993 Address addr(as_Register($mem$$base), $mem$$disp);
12994 Label again, failure;
12996 // int base = $mem$$base;
12997 int index = $mem$$index;
12998 int scale = $mem$$scale;
12999 int disp = $mem$$disp;
13001 guarantee(Assembler::is_simm16(disp), "");
13003 if( index != 0 ) {
13004 __ stop("in storeIConditional: index != 0");
13005 } else {
13006 __ bind(again);
13007 if(UseSyncLevel <= 1000) __ sync();
13008 __ ll(AT, addr);
13009 __ bne(AT, oldval, failure);
13010 __ delayed()->addu(AT, R0, R0);
13012 __ addu(AT, newval, R0);
13013 __ sc(AT, addr);
13014 __ beq(AT, R0, again);
13015 __ delayed()->addiu(AT, R0, 0xFF);
13016 __ bind(failure);
13017 __ sync();
13018 }
13019 %}
13021 ins_pipe( long_memory_op );
13022 %}
13024 // Conditional-store of a long value.
13025 // ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG.
13026 instruct storeLConditional(memory mem, t2RegL oldval, mRegL newval, FlagsReg cr )
13027 %{
13028 match(Set cr (StoreLConditional mem (Binary oldval newval)));
13029 effect(KILL oldval);
13031 format %{ "cmpxchg $mem, $newval\t# If $oldval == $mem then store $newval into $mem" %}
13032 ins_encode%{
13033 Register oldval = $oldval$$Register;
13034 Register newval = $newval$$Register;
13035 Address addr((Register)$mem$$base, $mem$$disp);
13037 int index = $mem$$index;
13038 int scale = $mem$$scale;
13039 int disp = $mem$$disp;
13041 guarantee(Assembler::is_simm16(disp), "");
13043 if( index != 0 ) {
13044 __ stop("in storeIConditional: index != 0");
13045 } else {
13046 __ cmpxchg(newval, addr, oldval);
13047 }
13048 %}
13049 ins_pipe( long_memory_op );
13050 %}
13053 instruct compareAndSwapI( mRegI res, mRegP mem_ptr, mS2RegI oldval, mRegI newval) %{
13054 match(Set res (CompareAndSwapI mem_ptr (Binary oldval newval)));
13055 effect(KILL oldval);
13056 // match(CompareAndSwapI mem_ptr (Binary oldval newval));
13057 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapI\n\t"
13058 "MOV $res, 1 @ compareAndSwapI\n\t"
13059 "BNE AT, R0 @ compareAndSwapI\n\t"
13060 "MOV $res, 0 @ compareAndSwapI\n"
13061 "L:" %}
13062 ins_encode %{
13063 Register newval = $newval$$Register;
13064 Register oldval = $oldval$$Register;
13065 Register res = $res$$Register;
13066 Address addr($mem_ptr$$Register, 0);
13067 Label L;
13069 __ cmpxchg32(newval, addr, oldval);
13070 __ move(res, AT);
13071 %}
13072 ins_pipe( long_memory_op );
13073 %}
13075 //FIXME:
13076 instruct compareAndSwapP( mRegI res, mRegP mem_ptr, s2_RegP oldval, mRegP newval) %{
13077 match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval)));
13078 effect(KILL oldval);
13079 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapP\n\t"
13080 "MOV $res, AT @ compareAndSwapP\n\t"
13081 "L:" %}
13082 ins_encode %{
13083 Register newval = $newval$$Register;
13084 Register oldval = $oldval$$Register;
13085 Register res = $res$$Register;
13086 Address addr($mem_ptr$$Register, 0);
13087 Label L;
13089 __ cmpxchg(newval, addr, oldval);
13090 __ move(res, AT);
13091 %}
13092 ins_pipe( long_memory_op );
13093 %}
13095 instruct compareAndSwapN( mRegI res, mRegP mem_ptr, t2_RegN oldval, mRegN newval) %{
13096 match(Set res (CompareAndSwapN mem_ptr (Binary oldval newval)));
13097 effect(KILL oldval);
13098 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapN\n\t"
13099 "MOV $res, AT @ compareAndSwapN\n\t"
13100 "L:" %}
13101 ins_encode %{
13102 Register newval = $newval$$Register;
13103 Register oldval = $oldval$$Register;
13104 Register res = $res$$Register;
13105 Address addr($mem_ptr$$Register, 0);
13106 Label L;
13108 /* 2013/7/19 Jin: cmpxchg32 is implemented with ll/sc, which will do sign extension.
13109 * Thus, we should extend oldval's sign for correct comparision.
13110 */
13111 __ sll(oldval, oldval, 0);
13113 __ cmpxchg32(newval, addr, oldval);
13114 __ move(res, AT);
13115 %}
13116 ins_pipe( long_memory_op );
13117 %}
13119 //----------Max and Min--------------------------------------------------------
13120 // Min Instructions
13121 ////
13122 // *** Min and Max using the conditional move are slower than the
13123 // *** branch version on a Pentium III.
13124 // // Conditional move for min
13125 //instruct cmovI_reg_lt( eRegI op2, eRegI op1, eFlagsReg cr ) %{
13126 // effect( USE_DEF op2, USE op1, USE cr );
13127 // format %{ "CMOVlt $op2,$op1\t! min" %}
13128 // opcode(0x4C,0x0F);
13129 // ins_encode( OpcS, OpcP, RegReg( op2, op1 ) );
13130 // ins_pipe( pipe_cmov_reg );
13131 //%}
13132 //
13133 //// Min Register with Register (P6 version)
13134 //instruct minI_eReg_p6( eRegI op1, eRegI op2 ) %{
13135 // predicate(VM_Version::supports_cmov() );
13136 // match(Set op2 (MinI op1 op2));
13137 // ins_cost(200);
13138 // expand %{
13139 // eFlagsReg cr;
13140 // compI_eReg(cr,op1,op2);
13141 // cmovI_reg_lt(op2,op1,cr);
13142 // %}
13143 //%}
13145 // Min Register with Register (generic version)
13146 instruct minI_Reg_Reg(mRegI dst, mRegI src) %{
13147 match(Set dst (MinI dst src));
13148 //effect(KILL flags);
13149 ins_cost(80);
13151 format %{ "MIN $dst, $src @minI_Reg_Reg" %}
13152 ins_encode %{
13153 Register dst = $dst$$Register;
13154 Register src = $src$$Register;
13156 __ slt(AT, src, dst);
13157 __ movn(dst, src, AT);
13159 %}
13161 ins_pipe( pipe_slow );
13162 %}
13164 // Max Register with Register
13165 // *** Min and Max using the conditional move are slower than the
13166 // *** branch version on a Pentium III.
13167 // // Conditional move for max
13168 //instruct cmovI_reg_gt( eRegI op2, eRegI op1, eFlagsReg cr ) %{
13169 // effect( USE_DEF op2, USE op1, USE cr );
13170 // format %{ "CMOVgt $op2,$op1\t! max" %}
13171 // opcode(0x4F,0x0F);
13172 // ins_encode( OpcS, OpcP, RegReg( op2, op1 ) );
13173 // ins_pipe( pipe_cmov_reg );
13174 //%}
13175 //
13176 // // Max Register with Register (P6 version)
13177 //instruct maxI_eReg_p6( eRegI op1, eRegI op2 ) %{
13178 // predicate(VM_Version::supports_cmov() );
13179 // match(Set op2 (MaxI op1 op2));
13180 // ins_cost(200);
13181 // expand %{
13182 // eFlagsReg cr;
13183 // compI_eReg(cr,op1,op2);
13184 // cmovI_reg_gt(op2,op1,cr);
13185 // %}
13186 //%}
13188 // Max Register with Register (generic version)
13189 instruct maxI_Reg_Reg(mRegI dst, mRegI src) %{
13190 match(Set dst (MaxI dst src));
13191 ins_cost(80);
13193 format %{ "MAX $dst, $src @maxI_Reg_Reg" %}
13195 ins_encode %{
13196 Register dst = $dst$$Register;
13197 Register src = $src$$Register;
13199 __ slt(AT, dst, src);
13200 __ movn(dst, src, AT);
13202 %}
13204 ins_pipe( pipe_slow );
13205 %}
13207 instruct maxI_Reg_zero(mRegI dst, immI0 zero) %{
13208 match(Set dst (MaxI dst zero));
13209 ins_cost(50);
13211 format %{ "MAX $dst, 0 @maxI_Reg_zero" %}
13213 ins_encode %{
13214 Register dst = $dst$$Register;
13216 __ slt(AT, dst, R0);
13217 __ movn(dst, R0, AT);
13219 %}
13221 ins_pipe( pipe_slow );
13222 %}
13224 instruct zerox_long_reg_reg(mRegL dst, mRegL src, immL_32bits mask)
13225 %{
13226 match(Set dst (AndL src mask));
13228 format %{ "movl $dst, $src\t# zero-extend long @ zerox_long_reg_reg" %}
13229 ins_encode %{
13230 Register dst = $dst$$Register;
13231 Register src = $src$$Register;
13233 __ dext(dst, src, 0, 32);
13234 %}
13235 ins_pipe(ialu_regI_regI);
13236 %}
13238 instruct combine_i2l(mRegL dst, mRegI src1, immL_32bits mask, mRegI src2, immI_32 shift32)
13239 %{
13240 match(Set dst (OrL (AndL (ConvI2L src1) mask) (LShiftL (ConvI2L src2) shift32)));
13242 format %{ "combine_i2l $dst, $src2(H), $src1(L) @ combine_i2l" %}
13243 ins_encode %{
13244 Register dst = $dst$$Register;
13245 Register src1 = $src1$$Register;
13246 Register src2 = $src2$$Register;
13248 if (src1 == dst) {
13249 __ dinsu(dst, src2, 32, 32);
13250 } else if (src2 == dst) {
13251 __ dsll32(dst, dst, 0);
13252 __ dins(dst, src1, 0, 32);
13253 } else {
13254 __ dext(dst, src1, 0, 32);
13255 __ dinsu(dst, src2, 32, 32);
13256 }
13257 %}
13258 ins_pipe(ialu_regI_regI);
13259 %}
13261 // Zero-extend convert int to long
13262 instruct convI2L_reg_reg_zex(mRegL dst, mRegI src, immL_32bits mask)
13263 %{
13264 match(Set dst (AndL (ConvI2L src) mask));
13266 format %{ "movl $dst, $src\t# i2l zero-extend @ convI2L_reg_reg_zex" %}
13267 ins_encode %{
13268 Register dst = $dst$$Register;
13269 Register src = $src$$Register;
13271 __ dext(dst, src, 0, 32);
13272 %}
13273 ins_pipe(ialu_regI_regI);
13274 %}
13276 instruct convL2I2L_reg_reg_zex(mRegL dst, mRegL src, immL_32bits mask)
13277 %{
13278 match(Set dst (AndL (ConvI2L (ConvL2I src)) mask));
13280 format %{ "movl $dst, $src\t# i2l zero-extend @ convL2I2L_reg_reg_zex" %}
13281 ins_encode %{
13282 Register dst = $dst$$Register;
13283 Register src = $src$$Register;
13285 __ dext(dst, src, 0, 32);
13286 %}
13287 ins_pipe(ialu_regI_regI);
13288 %}
13290 // Match loading integer and casting it to unsigned int in long register.
13291 // LoadI + ConvI2L + AndL 0xffffffff.
13292 instruct loadUI2L_rmask(mRegL dst, memory mem, immL_32bits mask) %{
13293 match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
13295 format %{ "lwu $dst, $mem \t// zero-extend to long @ loadUI2L_rmask" %}
13296 ins_encode (load_N_enc(dst, mem));
13297 ins_pipe(ialu_loadI);
13298 %}
13300 instruct loadUI2L_lmask(mRegL dst, memory mem, immL_32bits mask) %{
13301 match(Set dst (AndL mask (ConvI2L (LoadI mem))));
13303 format %{ "lwu $dst, $mem \t// zero-extend to long @ loadUI2L_lmask" %}
13304 ins_encode (load_N_enc(dst, mem));
13305 ins_pipe(ialu_loadI);
13306 %}
13309 // ============================================================================
13310 // Safepoint Instruction
13311 instruct safePoint_poll(mRegP poll) %{
13312 match(SafePoint poll);
13313 effect(USE poll);
13315 ins_cost(125);
13316 format %{ "Safepoint @ [$poll] : poll for GC @ safePoint_poll" %}
13318 ins_encode %{
13319 Register poll_reg = $poll$$Register;
13321 __ block_comment("Safepoint:");
13322 __ relocate(relocInfo::poll_type);
13323 __ lw(AT, poll_reg, 0);
13324 %}
13326 ins_pipe( ialu_storeI );
13327 %}
13329 //----------Arithmetic Conversion Instructions---------------------------------
13331 instruct roundFloat_nop(regF dst)
13332 %{
13333 match(Set dst (RoundFloat dst));
13335 ins_cost(0);
13336 ins_encode();
13337 ins_pipe(empty);
13338 %}
13340 instruct roundDouble_nop(regD dst)
13341 %{
13342 match(Set dst (RoundDouble dst));
13344 ins_cost(0);
13345 ins_encode();
13346 ins_pipe(empty);
13347 %}
13349 //---------- Zeros Count Instructions ------------------------------------------
13350 // CountLeadingZerosINode CountTrailingZerosINode
13351 instruct countLeadingZerosI(mRegI dst, mRegI src) %{
13352 predicate(UseCountLeadingZerosInstruction);
13353 match(Set dst (CountLeadingZerosI src));
13355 format %{ "clz $dst, $src\t# count leading zeros (int)" %}
13356 ins_encode %{
13357 __ clz($dst$$Register, $src$$Register);
13358 %}
13359 ins_pipe( ialu_regL_regL );
13360 %}
13362 instruct countLeadingZerosL(mRegI dst, mRegL src) %{
13363 predicate(UseCountLeadingZerosInstruction);
13364 match(Set dst (CountLeadingZerosL src));
13366 format %{ "dclz $dst, $src\t# count leading zeros (long)" %}
13367 ins_encode %{
13368 __ dclz($dst$$Register, $src$$Register);
13369 %}
13370 ins_pipe( ialu_regL_regL );
13371 %}
13373 instruct countTrailingZerosI(mRegI dst, mRegI src) %{
13374 predicate(UseCountTrailingZerosInstruction);
13375 match(Set dst (CountTrailingZerosI src));
13377 format %{ "ctz $dst, $src\t# count trailing zeros (int)" %}
13378 ins_encode %{
13379 // ctz and dctz is gs instructions.
13380 __ ctz($dst$$Register, $src$$Register);
13381 %}
13382 ins_pipe( ialu_regL_regL );
13383 %}
13385 instruct countTrailingZerosL(mRegI dst, mRegL src) %{
13386 predicate(UseCountTrailingZerosInstruction);
13387 match(Set dst (CountTrailingZerosL src));
13389 format %{ "dcto $dst, $src\t# count trailing zeros (long)" %}
13390 ins_encode %{
13391 __ dctz($dst$$Register, $src$$Register);
13392 %}
13393 ins_pipe( ialu_regL_regL );
13394 %}
13396 // ====================VECTOR INSTRUCTIONS=====================================
13398 // Load vectors (8 bytes long)
13399 instruct loadV8(vecD dst, memory mem) %{
13400 predicate(n->as_LoadVector()->memory_size() == 8);
13401 match(Set dst (LoadVector mem));
13402 ins_cost(125);
13403 format %{ "load $dst, $mem\t! load vector (8 bytes)" %}
13404 ins_encode(load_D_enc(dst, mem));
13405 ins_pipe( fpu_loadF );
13406 %}
13408 // Store vectors (8 bytes long)
13409 instruct storeV8(memory mem, vecD src) %{
13410 predicate(n->as_StoreVector()->memory_size() == 8);
13411 match(Set mem (StoreVector mem src));
13412 ins_cost(145);
13413 format %{ "store $mem, $src\t! store vector (8 bytes)" %}
13414 ins_encode(store_D_reg_enc(mem, src));
13415 ins_pipe( fpu_storeF );
13416 %}
13418 instruct Repl8B(vecD dst, mRegI src) %{
13419 predicate(n->as_Vector()->length() == 8);
13420 match(Set dst (ReplicateB src));
13421 format %{ "replv_ob AT, $src\n\t"
13422 "dmtc1 AT, $dst\t! replicate8B" %}
13423 ins_encode %{
13424 __ replv_ob(AT, $src$$Register);
13425 __ dmtc1(AT, $dst$$FloatRegister);
13426 %}
13427 ins_pipe( pipe_mtc1 );
13428 %}
13430 instruct Repl8B_imm(vecD dst, immI con) %{
13431 predicate(n->as_Vector()->length() == 8);
13432 match(Set dst (ReplicateB con));
13433 format %{ "repl_ob AT, [$con]\n\t"
13434 "dmtc1 AT, $dst,0x00\t! replicate8B($con)" %}
13435 ins_encode %{
13436 int val = $con$$constant;
13437 __ repl_ob(AT, val);
13438 __ dmtc1(AT, $dst$$FloatRegister);
13439 %}
13440 ins_pipe( pipe_mtc1 );
13441 %}
13443 instruct Repl8B_zero(vecD dst, immI0 zero) %{
13444 predicate(n->as_Vector()->length() == 8);
13445 match(Set dst (ReplicateB zero));
13446 format %{ "dmtc1 R0, $dst\t! replicate8B zero" %}
13447 ins_encode %{
13448 __ dmtc1(R0, $dst$$FloatRegister);
13449 %}
13450 ins_pipe( pipe_mtc1 );
13451 %}
13453 instruct Repl8B_M1(vecD dst, immI_M1 M1) %{
13454 predicate(n->as_Vector()->length() == 8);
13455 match(Set dst (ReplicateB M1));
13456 format %{ "dmtc1 -1, $dst\t! replicate8B -1" %}
13457 ins_encode %{
13458 __ nor(AT, R0, R0);
13459 __ dmtc1(AT, $dst$$FloatRegister);
13460 %}
13461 ins_pipe( pipe_mtc1 );
13462 %}
13464 instruct Repl4S(vecD dst, mRegI src) %{
13465 predicate(n->as_Vector()->length() == 4);
13466 match(Set dst (ReplicateS src));
13467 format %{ "replv_qh AT, $src\n\t"
13468 "dmtc1 AT, $dst\t! replicate4S" %}
13469 ins_encode %{
13470 __ replv_qh(AT, $src$$Register);
13471 __ dmtc1(AT, $dst$$FloatRegister);
13472 %}
13473 ins_pipe( pipe_mtc1 );
13474 %}
13476 instruct Repl4S_imm(vecD dst, immI con) %{
13477 predicate(n->as_Vector()->length() == 4);
13478 match(Set dst (ReplicateS con));
13479 format %{ "replv_qh AT, [$con]\n\t"
13480 "dmtc1 AT, $dst\t! replicate4S($con)" %}
13481 ins_encode %{
13482 int val = $con$$constant;
13483 if ( Assembler::is_simm(val, 10)) {
13484 //repl_qh supports 10 bits immediate
13485 __ repl_qh(AT, val);
13486 } else {
13487 __ li32(AT, val);
13488 __ replv_qh(AT, AT);
13489 }
13490 __ dmtc1(AT, $dst$$FloatRegister);
13491 %}
13492 ins_pipe( pipe_mtc1 );
13493 %}
13495 instruct Repl4S_zero(vecD dst, immI0 zero) %{
13496 predicate(n->as_Vector()->length() == 4);
13497 match(Set dst (ReplicateS zero));
13498 format %{ "dmtc1 R0, $dst\t! replicate4S zero" %}
13499 ins_encode %{
13500 __ dmtc1(R0, $dst$$FloatRegister);
13501 %}
13502 ins_pipe( pipe_mtc1 );
13503 %}
13505 instruct Repl4S_M1(vecD dst, immI_M1 M1) %{
13506 predicate(n->as_Vector()->length() == 4);
13507 match(Set dst (ReplicateS M1));
13508 format %{ "dmtc1 -1, $dst\t! replicate4S -1" %}
13509 ins_encode %{
13510 __ nor(AT, R0, R0);
13511 __ dmtc1(AT, $dst$$FloatRegister);
13512 %}
13513 ins_pipe( pipe_mtc1 );
13514 %}
13516 // Replicate integer (4 byte) scalar to be vector
13517 instruct Repl2I(vecD dst, mRegI src) %{
13518 predicate(n->as_Vector()->length() == 2);
13519 match(Set dst (ReplicateI src));
13520 format %{ "dins AT, $src, 0, 32\n\t"
13521 "dinsu AT, $src, 32, 32\n\t"
13522 "dmtc1 AT, $dst\t! replicate2I" %}
13523 ins_encode %{
13524 __ dins(AT, $src$$Register, 0, 32);
13525 __ dinsu(AT, $src$$Register, 32, 32);
13526 __ dmtc1(AT, $dst$$FloatRegister);
13527 %}
13528 ins_pipe( pipe_mtc1 );
13529 %}
13531 // Replicate integer (4 byte) scalar immediate to be vector by loading from const table.
13532 instruct Repl2I_imm(vecD dst, immI con, mA7RegI tmp) %{
13533 predicate(n->as_Vector()->length() == 2);
13534 match(Set dst (ReplicateI con));
13535 effect(KILL tmp);
13536 format %{ "li32 AT, [$con], 32\n\t"
13537 "replv_pw AT, AT\n\t"
13538 "dmtc1 AT, $dst\t! replicate2I($con)" %}
13539 ins_encode %{
13540 int val = $con$$constant;
13541 __ li32(AT, val);
13542 __ replv_pw(AT, AT);
13543 __ dmtc1(AT, $dst$$FloatRegister);
13544 %}
13545 ins_pipe( pipe_mtc1 );
13546 %}
13548 // Replicate integer (4 byte) scalar zero to be vector
13549 instruct Repl2I_zero(vecD dst, immI0 zero) %{
13550 predicate(n->as_Vector()->length() == 2);
13551 match(Set dst (ReplicateI zero));
13552 format %{ "dmtc1 R0, $dst\t! replicate2I zero" %}
13553 ins_encode %{
13554 __ dmtc1(R0, $dst$$FloatRegister);
13555 %}
13556 ins_pipe( pipe_mtc1 );
13557 %}
13559 // Replicate integer (4 byte) scalar -1 to be vector
13560 instruct Repl2I_M1(vecD dst, immI_M1 M1) %{
13561 predicate(n->as_Vector()->length() == 2);
13562 match(Set dst (ReplicateI M1));
13563 format %{ "dmtc1 -1, $dst\t! replicate2I -1, use AT" %}
13564 ins_encode %{
13565 __ nor(AT, R0, R0);
13566 __ dmtc1(AT, $dst$$FloatRegister);
13567 %}
13568 ins_pipe( pipe_mtc1 );
13569 %}
13571 // Replicate float (4 byte) scalar to be vector
13572 instruct Repl2F(vecD dst, regF src) %{
13573 predicate(n->as_Vector()->length() == 2);
13574 match(Set dst (ReplicateF src));
13575 format %{ "cvt.ps $dst, $src, $src\t! replicate2F" %}
13576 ins_encode %{
13577 __ cvt_ps_s($dst$$FloatRegister, $src$$FloatRegister, $src$$FloatRegister);
13578 %}
13579 ins_pipe( pipe_slow );
13580 %}
13582 // Replicate float (4 byte) scalar zero to be vector
13583 instruct Repl2F_zero(vecD dst, immF0 zero) %{
13584 predicate(n->as_Vector()->length() == 2);
13585 match(Set dst (ReplicateF zero));
13586 format %{ "dmtc1 R0, $dst\t! replicate2F zero" %}
13587 ins_encode %{
13588 __ dmtc1(R0, $dst$$FloatRegister);
13589 %}
13590 ins_pipe( pipe_mtc1 );
13591 %}
13594 // ====================VECTOR ARITHMETIC=======================================
13596 // --------------------------------- ADD --------------------------------------
13598 // Floats vector add
13599 instruct vadd2F(vecD dst, vecD src) %{
13600 predicate(n->as_Vector()->length() == 2);
13601 match(Set dst (AddVF dst src));
13602 format %{ "add.ps $dst,$src\t! add packed2F" %}
13603 ins_encode %{
13604 __ add_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
13605 %}
13606 ins_pipe( pipe_slow );
13607 %}
13609 instruct vadd2F3(vecD dst, vecD src1, vecD src2) %{
13610 predicate(n->as_Vector()->length() == 2);
13611 match(Set dst (AddVF src1 src2));
13612 format %{ "add.ps $dst,$src1,$src2\t! add packed2F" %}
13613 ins_encode %{
13614 __ add_ps($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
13615 %}
13616 ins_pipe( fpu_regF_regF );
13617 %}
13619 // --------------------------------- SUB --------------------------------------
13621 // Floats vector sub
13622 instruct vsub2F(vecD dst, vecD src) %{
13623 predicate(n->as_Vector()->length() == 2);
13624 match(Set dst (SubVF dst src));
13625 format %{ "sub.ps $dst,$src\t! sub packed2F" %}
13626 ins_encode %{
13627 __ sub_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
13628 %}
13629 ins_pipe( fpu_regF_regF );
13630 %}
13632 // --------------------------------- MUL --------------------------------------
13634 // Floats vector mul
13635 instruct vmul2F(vecD dst, vecD src) %{
13636 predicate(n->as_Vector()->length() == 2);
13637 match(Set dst (MulVF dst src));
13638 format %{ "mul.ps $dst, $src\t! mul packed2F" %}
13639 ins_encode %{
13640 __ mul_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
13641 %}
13642 ins_pipe( fpu_regF_regF );
13643 %}
13645 instruct vmul2F3(vecD dst, vecD src1, vecD src2) %{
13646 predicate(n->as_Vector()->length() == 2);
13647 match(Set dst (MulVF src1 src2));
13648 format %{ "mul.ps $dst, $src1, $src2\t! mul packed2F" %}
13649 ins_encode %{
13650 __ mul_ps($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
13651 %}
13652 ins_pipe( fpu_regF_regF );
13653 %}
13655 // --------------------------------- DIV --------------------------------------
13656 // MIPS do not have div.ps
13659 //----------PEEPHOLE RULES-----------------------------------------------------
13660 // These must follow all instruction definitions as they use the names
13661 // defined in the instructions definitions.
13662 //
13663 // peepmatch ( root_instr_name [preceeding_instruction]* );
13664 //
13665 // peepconstraint %{
13666 // (instruction_number.operand_name relational_op instruction_number.operand_name
13667 // [, ...] );
13668 // // instruction numbers are zero-based using left to right order in peepmatch
13669 //
13670 // peepreplace ( instr_name ( [instruction_number.operand_name]* ) );
13671 // // provide an instruction_number.operand_name for each operand that appears
13672 // // in the replacement instruction's match rule
13673 //
13674 // ---------VM FLAGS---------------------------------------------------------
13675 //
13676 // All peephole optimizations can be turned off using -XX:-OptoPeephole
13677 //
13678 // Each peephole rule is given an identifying number starting with zero and
13679 // increasing by one in the order seen by the parser. An individual peephole
13680 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
13681 // on the command-line.
13682 //
13683 // ---------CURRENT LIMITATIONS----------------------------------------------
13684 //
13685 // Only match adjacent instructions in same basic block
13686 // Only equality constraints
13687 // Only constraints between operands, not (0.dest_reg == EAX_enc)
13688 // Only one replacement instruction
13689 //
13690 // ---------EXAMPLE----------------------------------------------------------
13691 //
13692 // // pertinent parts of existing instructions in architecture description
13693 // instruct movI(eRegI dst, eRegI src) %{
13694 // match(Set dst (CopyI src));
13695 // %}
13696 //
13697 // instruct incI_eReg(eRegI dst, immI1 src, eFlagsReg cr) %{
13698 // match(Set dst (AddI dst src));
13699 // effect(KILL cr);
13700 // %}
13701 //
13702 // // Change (inc mov) to lea
13703 // peephole %{
13704 // // increment preceeded by register-register move
13705 // peepmatch ( incI_eReg movI );
13706 // // require that the destination register of the increment
13707 // // match the destination register of the move
13708 // peepconstraint ( 0.dst == 1.dst );
13709 // // construct a replacement instruction that sets
13710 // // the destination to ( move's source register + one )
13711 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13712 // %}
13713 //
13714 // Implementation no longer uses movX instructions since
13715 // machine-independent system no longer uses CopyX nodes.
13716 //
13717 // peephole %{
13718 // peepmatch ( incI_eReg movI );
13719 // peepconstraint ( 0.dst == 1.dst );
13720 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13721 // %}
13722 //
13723 // peephole %{
13724 // peepmatch ( decI_eReg movI );
13725 // peepconstraint ( 0.dst == 1.dst );
13726 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13727 // %}
13728 //
13729 // peephole %{
13730 // peepmatch ( addI_eReg_imm movI );
13731 // peepconstraint ( 0.dst == 1.dst );
13732 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13733 // %}
13734 //
13735 // peephole %{
13736 // peepmatch ( addP_eReg_imm movP );
13737 // peepconstraint ( 0.dst == 1.dst );
13738 // peepreplace ( leaP_eReg_immI( 0.dst 1.src 0.src ) );
13739 // %}
13741 // // Change load of spilled value to only a spill
13742 // instruct storeI(memory mem, eRegI src) %{
13743 // match(Set mem (StoreI mem src));
13744 // %}
13745 //
13746 // instruct loadI(eRegI dst, memory mem) %{
13747 // match(Set dst (LoadI mem));
13748 // %}
13749 //
13750 //peephole %{
13751 // peepmatch ( loadI storeI );
13752 // peepconstraint ( 1.src == 0.dst, 1.mem == 0.mem );
13753 // peepreplace ( storeI( 1.mem 1.mem 1.src ) );
13754 //%}
13756 //----------SMARTSPILL RULES---------------------------------------------------
13757 // These must follow all instruction definitions as they use the names
13758 // defined in the instructions definitions.