Fri, 17 Feb 2017 17:22:14 +0800
[C2] Use gsldx in load_P_enc for Loongson CPUs.
1 //
2 // Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
3 // Copyright (c) 2015, 2016, Loongson Technology. All rights reserved.
4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 //
6 // This code is free software; you can redistribute it and/or modify it
7 // under the terms of the GNU General Public License version 2 only, as
8 // published by the Free Software Foundation.
9 //
10 // This code is distributed in the hope that it will be useful, but WITHOUT
11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 // version 2 for more details (a copy is included in the LICENSE file that
14 // accompanied this code).
15 //
16 // You should have received a copy of the GNU General Public License version
17 // 2 along with this work; if not, write to the Free Software Foundation,
18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 //
20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 // or visit www.oracle.com if you need additional information or have any
22 // questions.
23 //
24 //
26 // GodSon3 Architecture Description File
28 //----------REGISTER DEFINITION BLOCK------------------------------------------
29 // This information is used by the matcher and the register allocator to
30 // describe individual registers and classes of registers within the target
31 // archtecture.
33 // format:
34 // reg_def name (call convention, c-call convention, ideal type, encoding);
35 // call convention :
36 // NS = No-Save
37 // SOC = Save-On-Call
38 // SOE = Save-On-Entry
39 // AS = Always-Save
40 // ideal type :
41 // see opto/opcodes.hpp for more info
42 // reg_class name (reg, ...);
43 // alloc_class name (reg, ...);
44 register %{
46 // General Registers
47 // Integer Registers
48 reg_def R0 ( NS, NS, Op_RegI, 0, VMRegImpl::Bad());
49 reg_def AT ( NS, NS, Op_RegI, 1, AT->as_VMReg());
50 reg_def AT_H ( NS, NS, Op_RegI, 1, AT->as_VMReg()->next());
51 reg_def V0 (SOC, SOC, Op_RegI, 2, V0->as_VMReg());
52 reg_def V0_H (SOC, SOC, Op_RegI, 2, V0->as_VMReg()->next());
53 reg_def V1 (SOC, SOC, Op_RegI, 3, V1->as_VMReg());
54 reg_def V1_H (SOC, SOC, Op_RegI, 3, V1->as_VMReg()->next());
55 reg_def A0 (SOC, SOC, Op_RegI, 4, A0->as_VMReg());
56 reg_def A0_H (SOC, SOC, Op_RegI, 4, A0->as_VMReg()->next());
57 reg_def A1 (SOC, SOC, Op_RegI, 5, A1->as_VMReg());
58 reg_def A1_H (SOC, SOC, Op_RegI, 5, A1->as_VMReg()->next());
59 reg_def A2 (SOC, SOC, Op_RegI, 6, A2->as_VMReg());
60 reg_def A2_H (SOC, SOC, Op_RegI, 6, A2->as_VMReg()->next());
61 reg_def A3 (SOC, SOC, Op_RegI, 7, A3->as_VMReg());
62 reg_def A3_H (SOC, SOC, Op_RegI, 7, A3->as_VMReg()->next());
63 reg_def A4 (SOC, SOC, Op_RegI, 8, A4->as_VMReg());
64 reg_def A4_H (SOC, SOC, Op_RegI, 8, A4->as_VMReg()->next());
65 reg_def A5 (SOC, SOC, Op_RegI, 9, A5->as_VMReg());
66 reg_def A5_H (SOC, SOC, Op_RegI, 9, A5->as_VMReg()->next());
67 reg_def A6 (SOC, SOC, Op_RegI, 10, A6->as_VMReg());
68 reg_def A6_H (SOC, SOC, Op_RegI, 10, A6->as_VMReg()->next());
69 reg_def A7 (SOC, SOC, Op_RegI, 11, A7->as_VMReg());
70 reg_def A7_H (SOC, SOC, Op_RegI, 11, A7->as_VMReg()->next());
71 reg_def T0 (SOC, SOC, Op_RegI, 12, T0->as_VMReg());
72 reg_def T0_H (SOC, SOC, Op_RegI, 12, T0->as_VMReg()->next());
73 reg_def T1 (SOC, SOC, Op_RegI, 13, T1->as_VMReg());
74 reg_def T1_H (SOC, SOC, Op_RegI, 13, T1->as_VMReg()->next());
75 reg_def T2 (SOC, SOC, Op_RegI, 14, T2->as_VMReg());
76 reg_def T2_H (SOC, SOC, Op_RegI, 14, T2->as_VMReg()->next());
77 reg_def T3 (SOC, SOC, Op_RegI, 15, T3->as_VMReg());
78 reg_def T3_H (SOC, SOC, Op_RegI, 15, T3->as_VMReg()->next());
79 reg_def S0 (SOC, SOE, Op_RegI, 16, S0->as_VMReg());
80 reg_def S0_H (SOC, SOE, Op_RegI, 16, S0->as_VMReg()->next());
81 reg_def S1 (SOC, SOE, Op_RegI, 17, S1->as_VMReg());
82 reg_def S1_H (SOC, SOE, Op_RegI, 17, S1->as_VMReg()->next());
83 reg_def S2 (SOC, SOE, Op_RegI, 18, S2->as_VMReg());
84 reg_def S2_H (SOC, SOE, Op_RegI, 18, S2->as_VMReg()->next());
85 reg_def S3 (SOC, SOE, Op_RegI, 19, S3->as_VMReg());
86 reg_def S3_H (SOC, SOE, Op_RegI, 19, S3->as_VMReg()->next());
87 reg_def S4 (SOC, SOE, Op_RegI, 20, S4->as_VMReg());
88 reg_def S4_H (SOC, SOE, Op_RegI, 20, S4->as_VMReg()->next());
89 reg_def S5 (SOC, SOE, Op_RegI, 21, S5->as_VMReg());
90 reg_def S5_H (SOC, SOE, Op_RegI, 21, S5->as_VMReg()->next());
91 reg_def S6 (SOC, SOE, Op_RegI, 22, S6->as_VMReg());
92 reg_def S6_H (SOC, SOE, Op_RegI, 22, S6->as_VMReg()->next());
93 reg_def S7 (SOC, SOE, Op_RegI, 23, S7->as_VMReg());
94 reg_def S7_H (SOC, SOE, Op_RegI, 23, S7->as_VMReg()->next());
95 reg_def T8 (SOC, SOC, Op_RegI, 24, T8->as_VMReg());
96 reg_def T8_H (SOC, SOC, Op_RegI, 24, T8->as_VMReg()->next());
97 reg_def T9 (SOC, SOC, Op_RegI, 25, T9->as_VMReg());
98 reg_def T9_H (SOC, SOC, Op_RegI, 25, T9->as_VMReg()->next());
100 // Special Registers
101 reg_def K0 ( NS, NS, Op_RegI, 26, K0->as_VMReg());
102 reg_def K1 ( NS, NS, Op_RegI, 27, K1->as_VMReg());
103 reg_def GP ( NS, NS, Op_RegI, 28, GP->as_VMReg());
104 reg_def GP_H ( NS, NS, Op_RegI, 28, GP->as_VMReg()->next());
105 reg_def SP ( NS, NS, Op_RegI, 29, SP->as_VMReg());
106 reg_def SP_H ( NS, NS, Op_RegI, 29, SP->as_VMReg()->next());
107 reg_def FP ( NS, NS, Op_RegI, 30, FP->as_VMReg());
108 reg_def FP_H ( NS, NS, Op_RegI, 30, FP->as_VMReg()->next());
109 reg_def RA ( NS, NS, Op_RegI, 31, RA->as_VMReg());
110 reg_def RA_H ( NS, NS, Op_RegI, 31, RA->as_VMReg()->next());
112 // Floating registers.
113 reg_def F0 ( SOC, SOC, Op_RegF, 0, F0->as_VMReg());
114 reg_def F0_H ( SOC, SOC, Op_RegF, 0, F0->as_VMReg()->next());
115 reg_def F1 ( SOC, SOC, Op_RegF, 1, F1->as_VMReg());
116 reg_def F1_H ( SOC, SOC, Op_RegF, 1, F1->as_VMReg()->next());
117 reg_def F2 ( SOC, SOC, Op_RegF, 2, F2->as_VMReg());
118 reg_def F2_H ( SOC, SOC, Op_RegF, 2, F2->as_VMReg()->next());
119 reg_def F3 ( SOC, SOC, Op_RegF, 3, F3->as_VMReg());
120 reg_def F3_H ( SOC, SOC, Op_RegF, 3, F3->as_VMReg()->next());
121 reg_def F4 ( SOC, SOC, Op_RegF, 4, F4->as_VMReg());
122 reg_def F4_H ( SOC, SOC, Op_RegF, 4, F4->as_VMReg()->next());
123 reg_def F5 ( SOC, SOC, Op_RegF, 5, F5->as_VMReg());
124 reg_def F5_H ( SOC, SOC, Op_RegF, 5, F5->as_VMReg()->next());
125 reg_def F6 ( SOC, SOC, Op_RegF, 6, F6->as_VMReg());
126 reg_def F6_H ( SOC, SOC, Op_RegF, 6, F6->as_VMReg()->next());
127 reg_def F7 ( SOC, SOC, Op_RegF, 7, F7->as_VMReg());
128 reg_def F7_H ( SOC, SOC, Op_RegF, 7, F7->as_VMReg()->next());
129 reg_def F8 ( SOC, SOC, Op_RegF, 8, F8->as_VMReg());
130 reg_def F8_H ( SOC, SOC, Op_RegF, 8, F8->as_VMReg()->next());
131 reg_def F9 ( SOC, SOC, Op_RegF, 9, F9->as_VMReg());
132 reg_def F9_H ( SOC, SOC, Op_RegF, 9, F9->as_VMReg()->next());
133 reg_def F10 ( SOC, SOC, Op_RegF, 10, F10->as_VMReg());
134 reg_def F10_H ( SOC, SOC, Op_RegF, 10, F10->as_VMReg()->next());
135 reg_def F11 ( SOC, SOC, Op_RegF, 11, F11->as_VMReg());
136 reg_def F11_H ( SOC, SOC, Op_RegF, 11, F11->as_VMReg()->next());
137 reg_def F12 ( SOC, SOC, Op_RegF, 12, F12->as_VMReg());
138 reg_def F12_H ( SOC, SOC, Op_RegF, 12, F12->as_VMReg()->next());
139 reg_def F13 ( SOC, SOC, Op_RegF, 13, F13->as_VMReg());
140 reg_def F13_H ( SOC, SOC, Op_RegF, 13, F13->as_VMReg()->next());
141 reg_def F14 ( SOC, SOC, Op_RegF, 14, F14->as_VMReg());
142 reg_def F14_H ( SOC, SOC, Op_RegF, 14, F14->as_VMReg()->next());
143 reg_def F15 ( SOC, SOC, Op_RegF, 15, F15->as_VMReg());
144 reg_def F15_H ( SOC, SOC, Op_RegF, 15, F15->as_VMReg()->next());
145 reg_def F16 ( SOC, SOC, Op_RegF, 16, F16->as_VMReg());
146 reg_def F16_H ( SOC, SOC, Op_RegF, 16, F16->as_VMReg()->next());
147 reg_def F17 ( SOC, SOC, Op_RegF, 17, F17->as_VMReg());
148 reg_def F17_H ( SOC, SOC, Op_RegF, 17, F17->as_VMReg()->next());
149 reg_def F18 ( SOC, SOC, Op_RegF, 18, F18->as_VMReg());
150 reg_def F18_H ( SOC, SOC, Op_RegF, 18, F18->as_VMReg()->next());
151 reg_def F19 ( SOC, SOC, Op_RegF, 19, F19->as_VMReg());
152 reg_def F19_H ( SOC, SOC, Op_RegF, 19, F19->as_VMReg()->next());
153 reg_def F20 ( SOC, SOC, Op_RegF, 20, F20->as_VMReg());
154 reg_def F20_H ( SOC, SOC, Op_RegF, 20, F20->as_VMReg()->next());
155 reg_def F21 ( SOC, SOC, Op_RegF, 21, F21->as_VMReg());
156 reg_def F21_H ( SOC, SOC, Op_RegF, 21, F21->as_VMReg()->next());
157 reg_def F22 ( SOC, SOC, Op_RegF, 22, F22->as_VMReg());
158 reg_def F22_H ( SOC, SOC, Op_RegF, 22, F22->as_VMReg()->next());
159 reg_def F23 ( SOC, SOC, Op_RegF, 23, F23->as_VMReg());
160 reg_def F23_H ( SOC, SOC, Op_RegF, 23, F23->as_VMReg()->next());
161 reg_def F24 ( SOC, SOC, Op_RegF, 24, F24->as_VMReg());
162 reg_def F24_H ( SOC, SOC, Op_RegF, 24, F24->as_VMReg()->next());
163 reg_def F25 ( SOC, SOC, Op_RegF, 25, F25->as_VMReg());
164 reg_def F25_H ( SOC, SOC, Op_RegF, 25, F25->as_VMReg()->next());
165 reg_def F26 ( SOC, SOC, Op_RegF, 26, F26->as_VMReg());
166 reg_def F26_H ( SOC, SOC, Op_RegF, 26, F26->as_VMReg()->next());
167 reg_def F27 ( SOC, SOC, Op_RegF, 27, F27->as_VMReg());
168 reg_def F27_H ( SOC, SOC, Op_RegF, 27, F27->as_VMReg()->next());
169 reg_def F28 ( SOC, SOC, Op_RegF, 28, F28->as_VMReg());
170 reg_def F28_H ( SOC, SOC, Op_RegF, 28, F28->as_VMReg()->next());
171 reg_def F29 ( SOC, SOC, Op_RegF, 29, F29->as_VMReg());
172 reg_def F29_H ( SOC, SOC, Op_RegF, 29, F29->as_VMReg()->next());
173 reg_def F30 ( SOC, SOC, Op_RegF, 30, F30->as_VMReg());
174 reg_def F30_H ( SOC, SOC, Op_RegF, 30, F30->as_VMReg()->next());
175 reg_def F31 ( SOC, SOC, Op_RegF, 31, F31->as_VMReg());
176 reg_def F31_H ( SOC, SOC, Op_RegF, 31, F31->as_VMReg()->next());
179 // ----------------------------
180 // Special Registers
181 // Condition Codes Flag Registers
182 reg_def MIPS_FLAG (SOC, SOC, Op_RegFlags, 1, as_Register(1)->as_VMReg());
183 //S6 is used for get_thread(S6)
184 //S5 is uesd for heapbase of compressed oop
185 alloc_class chunk0(
186 S7, S7_H,
187 S0, S0_H,
188 S1, S1_H,
189 S2, S2_H,
190 S4, S4_H,
191 S5, S5_H,
192 S6, S6_H,
193 S3, S3_H,
194 T2, T2_H,
195 T3, T3_H,
196 T8, T8_H,
197 T9, T9_H,
198 T1, T1_H, // inline_cache_reg
199 V1, V1_H,
200 A7, A7_H,
201 A6, A6_H,
202 A5, A5_H,
203 A4, A4_H,
204 V0, V0_H,
205 A3, A3_H,
206 A2, A2_H,
207 A1, A1_H,
208 A0, A0_H,
209 T0, T0_H,
210 GP, GP_H
211 RA, RA_H,
212 SP, SP_H, // stack_pointer
213 FP, FP_H // frame_pointer
214 );
216 alloc_class chunk1( F0, F0_H,
217 F1, F1_H,
218 F2, F2_H,
219 F3, F3_H,
220 F4, F4_H,
221 F5, F5_H,
222 F6, F6_H,
223 F7, F7_H,
224 F8, F8_H,
225 F9, F9_H,
226 F10, F10_H,
227 F11, F11_H,
228 F20, F20_H,
229 F21, F21_H,
230 F22, F22_H,
231 F23, F23_H,
232 F24, F24_H,
233 F25, F25_H,
234 F26, F26_H,
235 F27, F27_H,
236 F28, F28_H,
237 F19, F19_H,
238 F18, F18_H,
239 F17, F17_H,
240 F16, F16_H,
241 F15, F15_H,
242 F14, F14_H,
243 F13, F13_H,
244 F12, F12_H,
245 F29, F29_H,
246 F30, F30_H,
247 F31, F31_H);
249 alloc_class chunk2(MIPS_FLAG);
251 reg_class s_reg( S0, S1, S2, S3, S4, S5, S6, S7 );
252 reg_class s0_reg( S0 );
253 reg_class s1_reg( S1 );
254 reg_class s2_reg( S2 );
255 reg_class s3_reg( S3 );
256 reg_class s4_reg( S4 );
257 reg_class s5_reg( S5 );
258 reg_class s6_reg( S6 );
259 reg_class s7_reg( S7 );
261 reg_class t_reg( T0, T1, T2, T3, T8, T9 );
262 reg_class t0_reg( T0 );
263 reg_class t1_reg( T1 );
264 reg_class t2_reg( T2 );
265 reg_class t3_reg( T3 );
266 reg_class t8_reg( T8 );
267 reg_class t9_reg( T9 );
269 reg_class a_reg( A0, A1, A2, A3, A4, A5, A6, A7 );
270 reg_class a0_reg( A0 );
271 reg_class a1_reg( A1 );
272 reg_class a2_reg( A2 );
273 reg_class a3_reg( A3 );
274 reg_class a4_reg( A4 );
275 reg_class a5_reg( A5 );
276 reg_class a6_reg( A6 );
277 reg_class a7_reg( A7 );
279 reg_class v0_reg( V0 );
280 reg_class v1_reg( V1 );
282 reg_class sp_reg( SP, SP_H );
283 reg_class fp_reg( FP, FP_H );
285 reg_class mips_flags(MIPS_FLAG);
287 reg_class v0_long_reg( V0, V0_H );
288 reg_class v1_long_reg( V1, V1_H );
289 reg_class a0_long_reg( A0, A0_H );
290 reg_class a1_long_reg( A1, A1_H );
291 reg_class a2_long_reg( A2, A2_H );
292 reg_class a3_long_reg( A3, A3_H );
293 reg_class a4_long_reg( A4, A4_H );
294 reg_class a5_long_reg( A5, A5_H );
295 reg_class a6_long_reg( A6, A6_H );
296 reg_class a7_long_reg( A7, A7_H );
297 reg_class t0_long_reg( T0, T0_H );
298 reg_class t1_long_reg( T1, T1_H );
299 reg_class t2_long_reg( T2, T2_H );
300 reg_class t3_long_reg( T3, T3_H );
301 reg_class t8_long_reg( T8, T8_H );
302 reg_class t9_long_reg( T9, T9_H );
303 reg_class s0_long_reg( S0, S0_H );
304 reg_class s1_long_reg( S1, S1_H );
305 reg_class s2_long_reg( S2, S2_H );
306 reg_class s3_long_reg( S3, S3_H );
307 reg_class s4_long_reg( S4, S4_H );
308 reg_class s5_long_reg( S5, S5_H );
309 reg_class s6_long_reg( S6, S6_H );
310 reg_class s7_long_reg( S7, S7_H );
312 reg_class int_reg( S7, S0, S1, S2, S4, S3, T8, T2, T3, T1, V1, A7, A6, A5, A4, V0, A3, A2, A1, A0, T0 );
314 reg_class no_Ax_int_reg( S7, S0, S1, S2, S4, S3, T8, T2, T3, T1, V1, V0, T0 );
316 reg_class p_reg(
317 S7, S7_H,
318 S0, S0_H,
319 S1, S1_H,
320 S2, S2_H,
321 S4, S4_H,
322 S3, S3_H,
323 T8, T8_H,
324 T2, T2_H,
325 T3, T3_H,
326 T1, T1_H,
327 A7, A7_H,
328 A6, A6_H,
329 A5, A5_H,
330 A4, A4_H,
331 A3, A3_H,
332 A2, A2_H,
333 A1, A1_H,
334 A0, A0_H,
335 T0, T0_H
336 );
338 reg_class no_T8_p_reg(
339 S7, S7_H,
340 S0, S0_H,
341 S1, S1_H,
342 S2, S2_H,
343 S4, S4_H,
344 S3, S3_H,
345 T2, T2_H,
346 T3, T3_H,
347 T1, T1_H,
348 A7, A7_H,
349 A6, A6_H,
350 A5, A5_H,
351 A4, A4_H,
352 A3, A3_H,
353 A2, A2_H,
354 A1, A1_H,
355 A0, A0_H,
356 T0, T0_H
357 );
359 reg_class long_reg(
360 S7, S7_H,
361 S0, S0_H,
362 S1, S1_H,
363 S2, S2_H,
364 S4, S4_H,
365 S3, S3_H,
366 T8, T8_H,
367 T2, T2_H,
368 T3, T3_H,
369 T1, T1_H,
370 A7, A7_H,
371 A6, A6_H,
372 A5, A5_H,
373 A4, A4_H,
374 A3, A3_H,
375 A2, A2_H,
376 A1, A1_H,
377 A0, A0_H,
378 T0, T0_H
379 );
382 // Floating point registers.
383 // 2012/8/23 Fu: F30/F31 are used as temporary registers in D2I
384 // 2016/12/1 aoqi: F31 are not used as temporary registers in D2I
385 reg_class flt_reg( F0, F1, F2, F3, F4, F5, F6, F7, F8, F9, F10, F11, F12, F13, F14, F15, F16, F17 F18, F19, F20, F21, F22, F23, F24, F25, F26, F27, F28, F29, F31);
386 reg_class dbl_reg( F0, F0_H,
387 F1, F1_H,
388 F2, F2_H,
389 F3, F3_H,
390 F4, F4_H,
391 F5, F5_H,
392 F6, F6_H,
393 F7, F7_H,
394 F8, F8_H,
395 F9, F9_H,
396 F10, F10_H,
397 F11, F11_H,
398 F12, F12_H,
399 F13, F13_H,
400 F14, F14_H,
401 F15, F15_H,
402 F16, F16_H,
403 F17, F17_H,
404 F18, F18_H,
405 F19, F19_H,
406 F20, F20_H,
407 F21, F21_H,
408 F22, F22_H,
409 F23, F23_H,
410 F24, F24_H,
411 F25, F25_H,
412 F26, F26_H,
413 F27, F27_H,
414 F28, F28_H,
415 F29, F29_H,
416 F31, F31_H);
418 reg_class flt_arg0( F12 );
419 reg_class dbl_arg0( F12, F12_H );
420 reg_class dbl_arg1( F14, F14_H );
422 %}
424 //----------DEFINITION BLOCK---------------------------------------------------
425 // Define name --> value mappings to inform the ADLC of an integer valued name
426 // Current support includes integer values in the range [0, 0x7FFFFFFF]
427 // Format:
428 // int_def <name> ( <int_value>, <expression>);
429 // Generated Code in ad_<arch>.hpp
430 // #define <name> (<expression>)
431 // // value == <int_value>
432 // Generated code in ad_<arch>.cpp adlc_verification()
433 // assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
434 //
435 definitions %{
436 int_def DEFAULT_COST ( 100, 100);
437 int_def HUGE_COST (1000000, 1000000);
439 // Memory refs are twice as expensive as run-of-the-mill.
440 int_def MEMORY_REF_COST ( 200, DEFAULT_COST * 2);
442 // Branches are even more expensive.
443 int_def BRANCH_COST ( 300, DEFAULT_COST * 3);
444 // we use jr instruction to construct call, so more expensive
445 // by yjl 2/28/2006
446 int_def CALL_COST ( 500, DEFAULT_COST * 5);
447 /*
448 int_def EQUAL ( 1, 1 );
449 int_def NOT_EQUAL ( 2, 2 );
450 int_def GREATER ( 3, 3 );
451 int_def GREATER_EQUAL ( 4, 4 );
452 int_def LESS ( 5, 5 );
453 int_def LESS_EQUAL ( 6, 6 );
454 */
455 %}
459 //----------SOURCE BLOCK-------------------------------------------------------
460 // This is a block of C++ code which provides values, functions, and
461 // definitions necessary in the rest of the architecture description
463 source_hpp %{
464 // Header information of the source block.
465 // Method declarations/definitions which are used outside
466 // the ad-scope can conveniently be defined here.
467 //
468 // To keep related declarations/definitions/uses close together,
469 // we switch between source %{ }% and source_hpp %{ }% freely as needed.
471 class CallStubImpl {
473 //--------------------------------------------------------------
474 //---< Used for optimization in Compile::shorten_branches >---
475 //--------------------------------------------------------------
477 public:
478 // Size of call trampoline stub.
479 static uint size_call_trampoline() {
480 return 0; // no call trampolines on this platform
481 }
483 // number of relocations needed by a call trampoline stub
484 static uint reloc_call_trampoline() {
485 return 0; // no call trampolines on this platform
486 }
487 };
489 class HandlerImpl {
491 public:
493 static int emit_exception_handler(CodeBuffer &cbuf);
494 static int emit_deopt_handler(CodeBuffer& cbuf);
496 static uint size_exception_handler() {
497 // NativeCall instruction size is the same as NativeJump.
498 // exception handler starts out as jump and can be patched to
499 // a call be deoptimization. (4932387)
500 // Note that this value is also credited (in output.cpp) to
501 // the size of the code section.
502 // return NativeJump::instruction_size;
503 int size = NativeCall::instruction_size;
504 return round_to(size, 16);
505 }
507 #ifdef _LP64
508 static uint size_deopt_handler() {
509 int size = NativeCall::instruction_size;
510 return round_to(size, 16);
511 }
512 #else
513 static uint size_deopt_handler() {
514 // NativeCall instruction size is the same as NativeJump.
515 // exception handler starts out as jump and can be patched to
516 // a call be deoptimization. (4932387)
517 // Note that this value is also credited (in output.cpp) to
518 // the size of the code section.
519 return 5 + NativeJump::instruction_size; // pushl(); jmp;
520 }
521 #endif
522 };
524 %} // end source_hpp
526 source %{
528 #define NO_INDEX 0
529 #define RELOC_IMM64 Assembler::imm_operand
530 #define RELOC_DISP32 Assembler::disp32_operand
533 #define __ _masm.
536 // Emit exception handler code.
537 // Stuff framesize into a register and call a VM stub routine.
538 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf) {
539 /*
540 // Note that the code buffer's insts_mark is always relative to insts.
541 // That's why we must use the macroassembler to generate a handler.
542 MacroAssembler _masm(&cbuf);
543 address base = __ start_a_stub(size_exception_handler());
544 if (base == NULL) return 0; // CodeBuffer::expand failed
545 int offset = __ offset();
546 __ jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
547 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
548 __ end_a_stub();
549 return offset;
550 */
551 // Note that the code buffer's insts_mark is always relative to insts.
552 // That's why we must use the macroassembler to generate a handler.
553 MacroAssembler _masm(&cbuf);
554 address base =
555 __ start_a_stub(size_exception_handler());
556 if (base == NULL) return 0; // CodeBuffer::expand failed
557 int offset = __ offset();
559 __ block_comment("; emit_exception_handler");
561 /* 2012/9/25 FIXME Jin: According to X86, we should use direct jumpt.
562 * * However, this will trigger an assert after the 40th method:
563 * *
564 * * 39 b java.lang.Throwable::<init> (25 bytes)
565 * * --- ns java.lang.Throwable::fillInStackTrace
566 * * 40 !b java.net.URLClassLoader::findClass (29 bytes)
567 * * /vm/opto/runtime.cpp, 900 , assert(caller.is_compiled_frame(),"must be")
568 * * 40 made not entrant (2) java.net.URLClassLoader::findClass (29 bytes)
569 * *
570 * * If we change from JR to JALR, the assert will disappear, but WebClient will
571 * * fail after the 403th method with unknown reason.
572 * */
573 __ li48(T9, (long)OptoRuntime::exception_blob()->entry_point());
574 __ jr(T9);
575 __ delayed()->nop();
576 __ align(16);
577 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
578 __ end_a_stub();
579 return offset;
580 }
582 // Emit deopt handler code.
583 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) {
584 // Note that the code buffer's insts_mark is always relative to insts.
585 // That's why we must use the macroassembler to generate a handler.
586 MacroAssembler _masm(&cbuf);
587 address base =
588 __ start_a_stub(size_deopt_handler());
590 // FIXME
591 if (base == NULL) return 0; // CodeBuffer::expand failed
592 int offset = __ offset();
594 __ block_comment("; emit_deopt_handler");
596 cbuf.set_insts_mark();
597 __ relocate(relocInfo::runtime_call_type);
599 __ li48(T9, (long)SharedRuntime::deopt_blob()->unpack());
600 __ jalr(T9);
601 __ delayed()->nop();
602 __ align(16);
603 assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
604 __ end_a_stub();
605 return offset;
606 }
609 const bool Matcher::match_rule_supported(int opcode) {
610 if (!has_match_rule(opcode))
611 return false;
613 switch (opcode) {
614 //Op_CountLeadingZerosI Op_CountLeadingZerosL can be deleted, all MIPS CPUs support clz & dclz.
615 case Op_CountLeadingZerosI:
616 case Op_CountLeadingZerosL:
617 if (!UseCountLeadingZerosInstruction)
618 return false;
619 break;
620 case Op_CountTrailingZerosI:
621 case Op_CountTrailingZerosL:
622 if (!UseCountTrailingZerosInstruction)
623 return false;
624 break;
625 }
627 return true; // Per default match rules are supported.
628 }
630 //FIXME
631 // emit call stub, compiled java to interpreter
632 void emit_java_to_interp(CodeBuffer &cbuf ) {
633 // Stub is fixed up when the corresponding call is converted from calling
634 // compiled code to calling interpreted code.
635 // mov rbx,0
636 // jmp -1
638 address mark = cbuf.insts_mark(); // get mark within main instrs section
640 // Note that the code buffer's insts_mark is always relative to insts.
641 // That's why we must use the macroassembler to generate a stub.
642 MacroAssembler _masm(&cbuf);
644 address base =
645 __ start_a_stub(Compile::MAX_stubs_size);
646 if (base == NULL) return; // CodeBuffer::expand failed
647 // static stub relocation stores the instruction address of the call
649 __ relocate(static_stub_Relocation::spec(mark), 0);
651 /* 2012/10/29 Jin: Rmethod contains methodOop, it should be relocated for GC */
652 /*
653 int oop_index = __ oop_recorder()->allocate_index(NULL);
654 RelocationHolder rspec = oop_Relocation::spec(oop_index);
655 __ relocate(rspec);
656 */
658 // static stub relocation also tags the methodOop in the code-stream.
659 __ li48(S3, (long)0);
660 // This is recognized as unresolved by relocs/nativeInst/ic code
662 __ relocate(relocInfo::runtime_call_type);
664 cbuf.set_insts_mark();
665 address call_pc = (address)-1;
666 __ li48(AT, (long)call_pc);
667 __ jr(AT);
668 __ nop();
669 __ align(16);
670 __ end_a_stub();
671 // Update current stubs pointer and restore code_end.
672 }
674 // size of call stub, compiled java to interpretor
675 uint size_java_to_interp() {
676 int size = 4 * 4 + NativeCall::instruction_size; // sizeof(li48) + NativeCall::instruction_size
677 return round_to(size, 16);
678 }
680 // relocation entries for call stub, compiled java to interpreter
681 uint reloc_java_to_interp() {
682 return 16; // in emit_java_to_interp + in Java_Static_Call
683 }
685 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
686 if( Assembler::is_simm16(offset) ) return true;
687 else
688 {
689 assert(false, "Not implemented yet !" );
690 Unimplemented();
691 }
692 }
695 // No additional cost for CMOVL.
696 const int Matcher::long_cmove_cost() { return 0; }
698 // No CMOVF/CMOVD with SSE2
699 const int Matcher::float_cmove_cost() { return ConditionalMoveLimit; }
701 // Does the CPU require late expand (see block.cpp for description of late expand)?
702 const bool Matcher::require_postalloc_expand = false;
704 // Should the Matcher clone shifts on addressing modes, expecting them
705 // to be subsumed into complex addressing expressions or compute them
706 // into registers? True for Intel but false for most RISCs
707 const bool Matcher::clone_shift_expressions = false;
709 // Do we need to mask the count passed to shift instructions or does
710 // the cpu only look at the lower 5/6 bits anyway?
711 const bool Matcher::need_masked_shift_count = false;
713 bool Matcher::narrow_oop_use_complex_address() {
714 NOT_LP64(ShouldNotCallThis());
715 assert(UseCompressedOops, "only for compressed oops code");
716 return false;
717 }
719 bool Matcher::narrow_klass_use_complex_address() {
720 NOT_LP64(ShouldNotCallThis());
721 assert(UseCompressedClassPointers, "only for compressed klass code");
722 return false;
723 }
725 // This is UltraSparc specific, true just means we have fast l2f conversion
726 const bool Matcher::convL2FSupported(void) {
727 return true;
728 }
730 // Max vector size in bytes. 0 if not supported.
731 const int Matcher::vector_width_in_bytes(BasicType bt) {
732 assert(MaxVectorSize == 8, "");
733 return 8;
734 }
736 // Vector ideal reg
737 const int Matcher::vector_ideal_reg(int size) {
738 assert(MaxVectorSize == 8, "");
739 switch(size) {
740 case 8: return Op_VecD;
741 }
742 ShouldNotReachHere();
743 return 0;
744 }
746 // Only lowest bits of xmm reg are used for vector shift count.
747 const int Matcher::vector_shift_count_ideal_reg(int size) {
748 fatal("vector shift is not supported");
749 return Node::NotAMachineReg;
750 }
752 // Limits on vector size (number of elements) loaded into vector.
753 const int Matcher::max_vector_size(const BasicType bt) {
754 assert(is_java_primitive(bt), "only primitive type vectors");
755 return vector_width_in_bytes(bt)/type2aelembytes(bt);
756 }
758 const int Matcher::min_vector_size(const BasicType bt) {
759 return max_vector_size(bt); // Same as max.
760 }
762 // MIPS supports misaligned vectors store/load? FIXME
763 const bool Matcher::misaligned_vectors_ok() {
764 return false;
765 //return !AlignVector; // can be changed by flag
766 }
768 // Register for DIVI projection of divmodI
769 RegMask Matcher::divI_proj_mask() {
770 ShouldNotReachHere();
771 return RegMask();
772 }
774 // Register for MODI projection of divmodI
775 RegMask Matcher::modI_proj_mask() {
776 ShouldNotReachHere();
777 return RegMask();
778 }
780 // Register for DIVL projection of divmodL
781 RegMask Matcher::divL_proj_mask() {
782 ShouldNotReachHere();
783 return RegMask();
784 }
786 int Matcher::regnum_to_fpu_offset(int regnum) {
787 return regnum - 32; // The FP registers are in the second chunk
788 }
791 const bool Matcher::isSimpleConstant64(jlong value) {
792 // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
793 return true;
794 }
797 // Return whether or not this register is ever used as an argument. This
798 // function is used on startup to build the trampoline stubs in generateOptoStub.
799 // Registers not mentioned will be killed by the VM call in the trampoline, and
800 // arguments in those registers not be available to the callee.
801 bool Matcher::can_be_java_arg( int reg ) {
802 /* Refer to: [sharedRuntime_mips_64.cpp] SharedRuntime::java_calling_convention() */
803 if ( reg == T0_num || reg == T0_H_num
804 || reg == A0_num || reg == A0_H_num
805 || reg == A1_num || reg == A1_H_num
806 || reg == A2_num || reg == A2_H_num
807 || reg == A3_num || reg == A3_H_num
808 || reg == A4_num || reg == A4_H_num
809 || reg == A5_num || reg == A5_H_num
810 || reg == A6_num || reg == A6_H_num
811 || reg == A7_num || reg == A7_H_num )
812 return true;
814 if ( reg == F12_num || reg == F12_H_num
815 || reg == F13_num || reg == F13_H_num
816 || reg == F14_num || reg == F14_H_num
817 || reg == F15_num || reg == F15_H_num
818 || reg == F16_num || reg == F16_H_num
819 || reg == F17_num || reg == F17_H_num
820 || reg == F18_num || reg == F18_H_num
821 || reg == F19_num || reg == F19_H_num )
822 return true;
824 return false;
825 }
827 bool Matcher::is_spillable_arg( int reg ) {
828 return can_be_java_arg(reg);
829 }
831 bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) {
832 return false;
833 }
835 // Register for MODL projection of divmodL
836 RegMask Matcher::modL_proj_mask() {
837 ShouldNotReachHere();
838 return RegMask();
839 }
841 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
842 return FP_REG_mask();
843 }
845 // MIPS doesn't support AES intrinsics
846 const bool Matcher::pass_original_key_for_aes() {
847 return false;
848 }
850 // The address of the call instruction needs to be 16-byte aligned to
851 // ensure that it does not span a cache line so that it can be patched.
853 int CallStaticJavaDirectNode::compute_padding(int current_offset) const {
854 //lui
855 //ori
856 //dsll
857 //ori
859 //jalr
860 //nop
862 return round_to(current_offset, alignment_required()) - current_offset;
863 }
865 // The address of the call instruction needs to be 16-byte aligned to
866 // ensure that it does not span a cache line so that it can be patched.
867 int CallDynamicJavaDirectNode::compute_padding(int current_offset) const {
868 //li64 <--- skip
870 //lui
871 //ori
872 //dsll
873 //ori
875 //jalr
876 //nop
878 current_offset += 4 * 6; // skip li64
879 return round_to(current_offset, alignment_required()) - current_offset;
880 }
882 int CallLeafNoFPDirectNode::compute_padding(int current_offset) const {
883 //lui
884 //ori
885 //dsll
886 //ori
888 //jalr
889 //nop
891 return round_to(current_offset, alignment_required()) - current_offset;
892 }
894 int CallLeafDirectNode::compute_padding(int current_offset) const {
895 //lui
896 //ori
897 //dsll
898 //ori
900 //jalr
901 //nop
903 return round_to(current_offset, alignment_required()) - current_offset;
904 }
906 int CallRuntimeDirectNode::compute_padding(int current_offset) const {
907 //lui
908 //ori
909 //dsll
910 //ori
912 //jalr
913 //nop
915 return round_to(current_offset, alignment_required()) - current_offset;
916 }
918 // If CPU can load and store mis-aligned doubles directly then no fixup is
919 // needed. Else we split the double into 2 integer pieces and move it
920 // piece-by-piece. Only happens when passing doubles into C code as the
921 // Java calling convention forces doubles to be aligned.
922 const bool Matcher::misaligned_doubles_ok = false;
923 // Do floats take an entire double register or just half?
924 //const bool Matcher::float_in_double = true;
925 bool Matcher::float_in_double() { return false; }
926 // Threshold size for cleararray.
927 const int Matcher::init_array_short_size = 8 * BytesPerLong;
928 // Do ints take an entire long register or just half?
929 const bool Matcher::int_in_long = true;
930 // Is it better to copy float constants, or load them directly from memory?
931 // Intel can load a float constant from a direct address, requiring no
932 // extra registers. Most RISCs will have to materialize an address into a
933 // register first, so they would do better to copy the constant from stack.
934 const bool Matcher::rematerialize_float_constants = false;
935 // Advertise here if the CPU requires explicit rounding operations
936 // to implement the UseStrictFP mode.
937 const bool Matcher::strict_fp_requires_explicit_rounding = false;
938 // The ecx parameter to rep stos for the ClearArray node is in dwords.
939 const bool Matcher::init_array_count_is_in_bytes = false;
942 // Indicate if the safepoint node needs the polling page as an input.
943 // Since MIPS doesn't have absolute addressing, it needs.
944 bool SafePointNode::needs_polling_address_input() {
945 return true;
946 }
948 // !!!!! Special hack to get all type of calls to specify the byte offset
949 // from the start of the call to the point where the return address
950 // will point.
951 int MachCallStaticJavaNode::ret_addr_offset() {
952 assert(NativeCall::instruction_size == 24, "in MachCallStaticJavaNode::ret_addr_offset");
953 //The value ought to be 16 bytes.
954 //lui
955 //ori
956 //dsll
957 //ori
958 //jalr
959 //nop
960 return NativeCall::instruction_size;
961 }
963 int MachCallDynamicJavaNode::ret_addr_offset() {
964 /* 2012/9/10 Jin: must be kept in sync with Java_Dynamic_Call */
966 // return NativeCall::instruction_size;
967 assert(NativeCall::instruction_size == 24, "in MachCallDynamicJavaNode::ret_addr_offset");
968 //The value ought to be 4 + 16 bytes.
969 //lui IC_Klass,
970 //ori IC_Klass,
971 //dsll IC_Klass
972 //ori IC_Klass
973 //lui T9
974 //ori T9
975 //dsll T9
976 //ori T9
977 //jalr T9
978 //nop
979 return 6 * 4 + NativeCall::instruction_size;
981 }
983 //=============================================================================
985 // Figure out which register class each belongs in: rc_int, rc_float, rc_stack
986 enum RC { rc_bad, rc_int, rc_float, rc_stack };
987 static enum RC rc_class( OptoReg::Name reg ) {
988 if( !OptoReg::is_valid(reg) ) return rc_bad;
989 if (OptoReg::is_stack(reg)) return rc_stack;
990 VMReg r = OptoReg::as_VMReg(reg);
991 if (r->is_Register()) return rc_int;
992 assert(r->is_FloatRegister(), "must be");
993 return rc_float;
994 }
996 uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream* st ) const {
997 // Get registers to move
998 OptoReg::Name src_second = ra_->get_reg_second(in(1));
999 OptoReg::Name src_first = ra_->get_reg_first(in(1));
1000 OptoReg::Name dst_second = ra_->get_reg_second(this );
1001 OptoReg::Name dst_first = ra_->get_reg_first(this );
1003 enum RC src_second_rc = rc_class(src_second);
1004 enum RC src_first_rc = rc_class(src_first);
1005 enum RC dst_second_rc = rc_class(dst_second);
1006 enum RC dst_first_rc = rc_class(dst_first);
1008 assert(OptoReg::is_valid(src_first) && OptoReg::is_valid(dst_first), "must move at least 1 register" );
1010 // Generate spill code!
1011 int size = 0;
1013 if( src_first == dst_first && src_second == dst_second )
1014 return 0; // Self copy, no move
1016 if (src_first_rc == rc_stack) {
1017 // mem ->
1018 if (dst_first_rc == rc_stack) {
1019 // mem -> mem
1020 assert(src_second != dst_first, "overlap");
1021 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1022 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1023 // 64-bit
1024 int src_offset = ra_->reg2offset(src_first);
1025 int dst_offset = ra_->reg2offset(dst_first);
1026 if (cbuf) {
1027 MacroAssembler _masm(cbuf);
1028 __ ld(AT, Address(SP, src_offset));
1029 __ sd(AT, Address(SP, dst_offset));
1030 #ifndef PRODUCT
1031 } else {
1032 if(!do_size){
1033 if (size != 0) st->print("\n\t");
1034 st->print("ld AT, [SP + #%d]\t# 64-bit mem-mem spill 1\n\t"
1035 "sd AT, [SP + #%d]",
1036 src_offset, dst_offset);
1037 }
1038 #endif
1039 }
1040 size += 8;
1041 } else {
1042 // 32-bit
1043 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1044 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1045 // No pushl/popl, so:
1046 int src_offset = ra_->reg2offset(src_first);
1047 int dst_offset = ra_->reg2offset(dst_first);
1048 if (cbuf) {
1049 MacroAssembler _masm(cbuf);
1050 __ lw(AT, Address(SP, src_offset));
1051 __ sw(AT, Address(SP, dst_offset));
1052 #ifndef PRODUCT
1053 } else {
1054 if(!do_size){
1055 if (size != 0) st->print("\n\t");
1056 st->print("lw AT, [SP + #%d] spill 2\n\t"
1057 "sw AT, [SP + #%d]\n\t",
1058 src_offset, dst_offset);
1059 }
1060 #endif
1061 }
1062 size += 8;
1063 }
1064 return size;
1065 } else if (dst_first_rc == rc_int) {
1066 // mem -> gpr
1067 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1068 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1069 // 64-bit
1070 int offset = ra_->reg2offset(src_first);
1071 if (cbuf) {
1072 MacroAssembler _masm(cbuf);
1073 __ ld(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1074 #ifndef PRODUCT
1075 } else {
1076 if(!do_size){
1077 if (size != 0) st->print("\n\t");
1078 st->print("ld %s, [SP + #%d]\t# spill 3",
1079 Matcher::regName[dst_first],
1080 offset);
1081 }
1082 #endif
1083 }
1084 size += 4;
1085 } else {
1086 // 32-bit
1087 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1088 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1089 int offset = ra_->reg2offset(src_first);
1090 if (cbuf) {
1091 MacroAssembler _masm(cbuf);
1092 if (this->ideal_reg() == Op_RegI)
1093 __ lw(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1094 else
1095 __ lwu(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1096 #ifndef PRODUCT
1097 } else {
1098 if(!do_size){
1099 if (size != 0) st->print("\n\t");
1100 if (this->ideal_reg() == Op_RegI)
1101 st->print("lw %s, [SP + #%d]\t# spill 4",
1102 Matcher::regName[dst_first],
1103 offset);
1104 else
1105 st->print("lwu %s, [SP + #%d]\t# spill 5",
1106 Matcher::regName[dst_first],
1107 offset);
1108 }
1109 #endif
1110 }
1111 size += 4;
1112 }
1113 return size;
1114 } else if (dst_first_rc == rc_float) {
1115 // mem-> xmm
1116 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1117 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1118 // 64-bit
1119 int offset = ra_->reg2offset(src_first);
1120 if (cbuf) {
1121 MacroAssembler _masm(cbuf);
1122 __ ldc1( as_FloatRegister(Matcher::_regEncode[dst_first]), Address(SP, offset));
1123 #ifndef PRODUCT
1124 } else {
1125 if(!do_size){
1126 if (size != 0) st->print("\n\t");
1127 st->print("ldc1 %s, [SP + #%d]\t# spill 6",
1128 Matcher::regName[dst_first],
1129 offset);
1130 }
1131 #endif
1132 }
1133 size += 4;
1134 } else {
1135 // 32-bit
1136 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1137 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1138 int offset = ra_->reg2offset(src_first);
1139 if (cbuf) {
1140 MacroAssembler _masm(cbuf);
1141 __ lwc1( as_FloatRegister(Matcher::_regEncode[dst_first]), Address(SP, offset));
1142 #ifndef PRODUCT
1143 } else {
1144 if(!do_size){
1145 if (size != 0) st->print("\n\t");
1146 st->print("lwc1 %s, [SP + #%d]\t# spill 7",
1147 Matcher::regName[dst_first],
1148 offset);
1149 }
1150 #endif
1151 }
1152 size += 4;
1153 }
1154 return size;
1155 }
1156 } else if (src_first_rc == rc_int) {
1157 // gpr ->
1158 if (dst_first_rc == rc_stack) {
1159 // gpr -> mem
1160 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1161 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1162 // 64-bit
1163 int offset = ra_->reg2offset(dst_first);
1164 if (cbuf) {
1165 MacroAssembler _masm(cbuf);
1166 __ sd(as_Register(Matcher::_regEncode[src_first]), Address(SP, offset));
1167 #ifndef PRODUCT
1168 } else {
1169 if(!do_size){
1170 if (size != 0) st->print("\n\t");
1171 st->print("sd %s, [SP + #%d] # spill 8",
1172 Matcher::regName[src_first],
1173 offset);
1174 }
1175 #endif
1176 }
1177 size += 4;
1178 } else {
1179 // 32-bit
1180 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1181 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1182 int offset = ra_->reg2offset(dst_first);
1183 if (cbuf) {
1184 MacroAssembler _masm(cbuf);
1185 __ sw(as_Register(Matcher::_regEncode[src_first]), Address(SP, offset));
1186 #ifndef PRODUCT
1187 } else {
1188 if(!do_size){
1189 if (size != 0) st->print("\n\t");
1190 st->print("sw %s, [SP + #%d]\t# spill 9",
1191 Matcher::regName[src_first], offset);
1192 }
1193 #endif
1194 }
1195 size += 4;
1196 }
1197 return size;
1198 } else if (dst_first_rc == rc_int) {
1199 // gpr -> gpr
1200 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1201 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1202 // 64-bit
1203 if (cbuf) {
1204 MacroAssembler _masm(cbuf);
1205 __ move(as_Register(Matcher::_regEncode[dst_first]),
1206 as_Register(Matcher::_regEncode[src_first]));
1207 #ifndef PRODUCT
1208 } else {
1209 if(!do_size){
1210 if (size != 0) st->print("\n\t");
1211 st->print("move(64bit) %s <-- %s\t# spill 10",
1212 Matcher::regName[dst_first],
1213 Matcher::regName[src_first]);
1214 }
1215 #endif
1216 }
1217 size += 4;
1218 return size;
1219 } else {
1220 // 32-bit
1221 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1222 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1223 if (cbuf) {
1224 MacroAssembler _masm(cbuf);
1225 if (this->ideal_reg() == Op_RegI)
1226 __ move_u32(as_Register(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first]));
1227 else
1228 __ daddu(as_Register(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first]), R0);
1230 #ifndef PRODUCT
1231 } else {
1232 if(!do_size){
1233 if (size != 0) st->print("\n\t");
1234 st->print("move(32-bit) %s <-- %s\t# spill 11",
1235 Matcher::regName[dst_first],
1236 Matcher::regName[src_first]);
1237 }
1238 #endif
1239 }
1240 size += 4;
1241 return size;
1242 }
1243 } else if (dst_first_rc == rc_float) {
1244 // gpr -> xmm
1245 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1246 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1247 // 64-bit
1248 if (cbuf) {
1249 MacroAssembler _masm(cbuf);
1250 __ dmtc1(as_Register(Matcher::_regEncode[src_first]), as_FloatRegister(Matcher::_regEncode[dst_first]));
1251 #ifndef PRODUCT
1252 } else {
1253 if(!do_size){
1254 if (size != 0) st->print("\n\t");
1255 st->print("dmtc1 %s, %s\t# spill 12",
1256 Matcher::regName[dst_first],
1257 Matcher::regName[src_first]);
1258 }
1259 #endif
1260 }
1261 size += 4;
1262 } else {
1263 // 32-bit
1264 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1265 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1266 if (cbuf) {
1267 MacroAssembler _masm(cbuf);
1268 __ mtc1( as_Register(Matcher::_regEncode[src_first]), as_FloatRegister(Matcher::_regEncode[dst_first]) );
1269 #ifndef PRODUCT
1270 } else {
1271 if(!do_size){
1272 if (size != 0) st->print("\n\t");
1273 st->print("mtc1 %s, %s\t# spill 13",
1274 Matcher::regName[dst_first],
1275 Matcher::regName[src_first]);
1276 }
1277 #endif
1278 }
1279 size += 4;
1280 }
1281 return size;
1282 }
1283 } else if (src_first_rc == rc_float) {
1284 // xmm ->
1285 if (dst_first_rc == rc_stack) {
1286 // xmm -> mem
1287 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1288 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1289 // 64-bit
1290 int offset = ra_->reg2offset(dst_first);
1291 if (cbuf) {
1292 MacroAssembler _masm(cbuf);
1293 __ sdc1( as_FloatRegister(Matcher::_regEncode[src_first]), Address(SP, offset) );
1294 #ifndef PRODUCT
1295 } else {
1296 if(!do_size){
1297 if (size != 0) st->print("\n\t");
1298 st->print("sdc1 %s, [SP + #%d]\t# spill 14",
1299 Matcher::regName[src_first],
1300 offset);
1301 }
1302 #endif
1303 }
1304 size += 4;
1305 } else {
1306 // 32-bit
1307 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1308 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1309 int offset = ra_->reg2offset(dst_first);
1310 if (cbuf) {
1311 MacroAssembler _masm(cbuf);
1312 __ swc1(as_FloatRegister(Matcher::_regEncode[src_first]), Address(SP, offset));
1313 #ifndef PRODUCT
1314 } else {
1315 if(!do_size){
1316 if (size != 0) st->print("\n\t");
1317 st->print("swc1 %s, [SP + #%d]\t# spill 15",
1318 Matcher::regName[src_first],
1319 offset);
1320 }
1321 #endif
1322 }
1323 size += 4;
1324 }
1325 return size;
1326 } else if (dst_first_rc == rc_int) {
1327 // xmm -> gpr
1328 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1329 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1330 // 64-bit
1331 if (cbuf) {
1332 MacroAssembler _masm(cbuf);
1333 __ dmfc1( as_Register(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1334 #ifndef PRODUCT
1335 } else {
1336 if(!do_size){
1337 if (size != 0) st->print("\n\t");
1338 st->print("dmfc1 %s, %s\t# spill 16",
1339 Matcher::regName[dst_first],
1340 Matcher::regName[src_first]);
1341 }
1342 #endif
1343 }
1344 size += 4;
1345 } else {
1346 // 32-bit
1347 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1348 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1349 if (cbuf) {
1350 MacroAssembler _masm(cbuf);
1351 __ mfc1( as_Register(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1352 #ifndef PRODUCT
1353 } else {
1354 if(!do_size){
1355 if (size != 0) st->print("\n\t");
1356 st->print("mfc1 %s, %s\t# spill 17",
1357 Matcher::regName[dst_first],
1358 Matcher::regName[src_first]);
1359 }
1360 #endif
1361 }
1362 size += 4;
1363 }
1364 return size;
1365 } else if (dst_first_rc == rc_float) {
1366 // xmm -> xmm
1367 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1368 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1369 // 64-bit
1370 if (cbuf) {
1371 MacroAssembler _masm(cbuf);
1372 __ mov_d( as_FloatRegister(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1373 #ifndef PRODUCT
1374 } else {
1375 if(!do_size){
1376 if (size != 0) st->print("\n\t");
1377 st->print("mov_d %s <-- %s\t# spill 18",
1378 Matcher::regName[dst_first],
1379 Matcher::regName[src_first]);
1380 }
1381 #endif
1382 }
1383 size += 4;
1384 } else {
1385 // 32-bit
1386 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1387 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1388 if (cbuf) {
1389 MacroAssembler _masm(cbuf);
1390 __ mov_s( as_FloatRegister(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1391 #ifndef PRODUCT
1392 } else {
1393 if(!do_size){
1394 if (size != 0) st->print("\n\t");
1395 st->print("mov_s %s <-- %s\t# spill 19",
1396 Matcher::regName[dst_first],
1397 Matcher::regName[src_first]);
1398 }
1399 #endif
1400 }
1401 size += 4;
1402 }
1403 return size;
1404 }
1405 }
1407 assert(0," foo ");
1408 Unimplemented();
1409 return size;
1411 }
1413 #ifndef PRODUCT
1414 void MachSpillCopyNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1415 implementation( NULL, ra_, false, st );
1416 }
1417 #endif
1419 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1420 implementation( &cbuf, ra_, false, NULL );
1421 }
1423 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
1424 return implementation( NULL, ra_, true, NULL );
1425 }
1427 //=============================================================================
1428 #
1430 #ifndef PRODUCT
1431 void MachBreakpointNode::format( PhaseRegAlloc *, outputStream* st ) const {
1432 st->print("INT3");
1433 }
1434 #endif
1436 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc* ra_) const {
1437 MacroAssembler _masm(&cbuf);
1438 __ int3();
1439 }
1441 uint MachBreakpointNode::size(PhaseRegAlloc* ra_) const {
1442 return MachNode::size(ra_);
1443 }
1446 //=============================================================================
1447 #ifndef PRODUCT
1448 void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1449 Compile *C = ra_->C;
1450 int framesize = C->frame_size_in_bytes();
1452 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1454 st->print("daddiu SP, SP, %d # Rlease stack @ MachEpilogNode",framesize);
1455 st->cr(); st->print("\t");
1456 if (UseLoongsonISA) {
1457 st->print("gslq RA, FP, SP, %d # Restore FP & RA @ MachEpilogNode", -wordSize*2);
1458 } else {
1459 st->print("ld RA, SP, %d # Restore RA @ MachEpilogNode", -wordSize);
1460 st->cr(); st->print("\t");
1461 st->print("ld FP, SP, %d # Restore FP @ MachEpilogNode", -wordSize*2);
1462 }
1464 if( do_polling() && C->is_method_compilation() ) {
1465 st->print("Poll Safepoint # MachEpilogNode");
1466 }
1467 }
1468 #endif
1470 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1471 Compile *C = ra_->C;
1472 MacroAssembler _masm(&cbuf);
1473 int framesize = C->frame_size_in_bytes();
1475 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1477 __ daddiu(SP, SP, framesize);
1479 if (UseLoongsonISA) {
1480 __ gslq(RA, FP, SP, -wordSize*2);
1481 } else {
1482 __ ld(RA, SP, -wordSize );
1483 __ ld(FP, SP, -wordSize*2 );
1484 }
1486 /* 2012/11/19 Jin: The epilog in a RuntimeStub should not contain a safepoint */
1487 if( do_polling() && C->is_method_compilation() ) {
1488 #ifndef OPT_SAFEPOINT
1489 __ set64(AT, (long)os::get_polling_page());
1490 __ relocate(relocInfo::poll_return_type);
1491 __ lw(AT, AT, 0);
1492 #else
1493 __ lui(AT, Assembler::split_high((intptr_t)os::get_polling_page()));
1494 __ relocate(relocInfo::poll_return_type);
1495 __ lw(AT, AT, Assembler::split_low((intptr_t)os::get_polling_page()));
1496 #endif
1497 }
1498 }
1500 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
1501 return MachNode::size(ra_); // too many variables; just compute it the hard way fujie debug
1502 }
1504 int MachEpilogNode::reloc() const {
1505 return 0; // a large enough number
1506 }
1508 const Pipeline * MachEpilogNode::pipeline() const {
1509 return MachNode::pipeline_class();
1510 }
1512 int MachEpilogNode::safepoint_offset() const { return 0; }
1514 //=============================================================================
1516 #ifndef PRODUCT
1517 void BoxLockNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1518 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1519 int reg = ra_->get_reg_first(this);
1520 st->print("ADDI %s, SP, %d @BoxLockNode",Matcher::regName[reg],offset);
1521 }
1522 #endif
1525 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
1526 return 4;
1527 }
1529 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1530 MacroAssembler _masm(&cbuf);
1531 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1532 int reg = ra_->get_encode(this);
1534 __ addi(as_Register(reg), SP, offset);
1535 /*
1536 if( offset >= 128 ) {
1537 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
1538 emit_rm(cbuf, 0x2, reg, 0x04);
1539 emit_rm(cbuf, 0x0, 0x04, SP_enc);
1540 emit_d32(cbuf, offset);
1541 }
1542 else {
1543 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
1544 emit_rm(cbuf, 0x1, reg, 0x04);
1545 emit_rm(cbuf, 0x0, 0x04, SP_enc);
1546 emit_d8(cbuf, offset);
1547 }
1548 */
1549 }
1552 //static int sizeof_FFree_Float_Stack_All = -1;
1554 int MachCallRuntimeNode::ret_addr_offset() {
1555 //lui
1556 //ori
1557 //dsll
1558 //ori
1559 //jalr
1560 //nop
1561 assert(NativeCall::instruction_size == 24, "in MachCallRuntimeNode::ret_addr_offset()");
1562 return NativeCall::instruction_size;
1563 // return 16;
1564 }
1570 //=============================================================================
1571 #ifndef PRODUCT
1572 void MachNopNode::format( PhaseRegAlloc *, outputStream* st ) const {
1573 st->print("NOP \t# %d bytes pad for loops and calls", 4 * _count);
1574 }
1575 #endif
1577 void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc * ) const {
1578 MacroAssembler _masm(&cbuf);
1579 int i = 0;
1580 for(i = 0; i < _count; i++)
1581 __ nop();
1582 }
1584 uint MachNopNode::size(PhaseRegAlloc *) const {
1585 return 4 * _count;
1586 }
1587 const Pipeline* MachNopNode::pipeline() const {
1588 return MachNode::pipeline_class();
1589 }
1591 //=============================================================================
1593 //=============================================================================
1594 #ifndef PRODUCT
1595 void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1596 st->print_cr("load_klass(AT, T0)");
1597 st->print_cr("\tbeq(AT, iCache, L)");
1598 st->print_cr("\tnop");
1599 st->print_cr("\tjmp(SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type)");
1600 st->print_cr("\tnop");
1601 st->print_cr("\tnop");
1602 st->print_cr(" L:");
1603 }
1604 #endif
1607 void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1608 MacroAssembler _masm(&cbuf);
1609 #ifdef ASSERT
1610 //uint code_size = cbuf.code_size();
1611 #endif
1612 int ic_reg = Matcher::inline_cache_reg_encode();
1613 Label L;
1614 Register receiver = T0;
1615 Register iCache = as_Register(ic_reg);
1616 __ load_klass(AT, receiver);
1617 __ beq(AT, iCache, L);
1618 __ nop();
1620 __ relocate(relocInfo::runtime_call_type);
1621 __ li48(T9, (long)SharedRuntime::get_ic_miss_stub());
1622 __ jr(T9);
1623 __ nop();
1625 /* WARNING these NOPs are critical so that verified entry point is properly
1626 * 8 bytes aligned for patching by NativeJump::patch_verified_entry() */
1627 __ align(CodeEntryAlignment);
1628 __ bind(L);
1629 }
1631 uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
1632 return MachNode::size(ra_);
1633 }
1637 //=============================================================================
1639 const RegMask& MachConstantBaseNode::_out_RegMask = P_REG_mask();
1641 int Compile::ConstantTable::calculate_table_base_offset() const {
1642 return 0; // absolute addressing, no offset
1643 }
1645 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
1646 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
1647 ShouldNotReachHere();
1648 }
1650 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
1651 Compile* C = ra_->C;
1652 Compile::ConstantTable& constant_table = C->constant_table();
1653 MacroAssembler _masm(&cbuf);
1655 Register Rtoc = as_Register(ra_->get_encode(this));
1656 CodeSection* consts_section = __ code()->consts();
1657 int consts_size = consts_section->align_at_start(consts_section->size());
1658 assert(constant_table.size() == consts_size, "must be equal");
1660 if (consts_section->size()) {
1661 // Materialize the constant table base.
1662 address baseaddr = consts_section->start() + -(constant_table.table_base_offset());
1663 // RelocationHolder rspec = internal_word_Relocation::spec(baseaddr);
1664 __ relocate(relocInfo::internal_pc_type);
1665 __ li48(Rtoc, (long)baseaddr);
1666 }
1667 }
1669 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
1670 // li48 (4 insts)
1671 return 4 * 4;
1672 }
1674 #ifndef PRODUCT
1675 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
1676 Register r = as_Register(ra_->get_encode(this));
1677 st->print("li48 %s, &constanttable (constant table base) @ MachConstantBaseNode", r->name());
1678 }
1679 #endif
1682 //=============================================================================
1683 #ifndef PRODUCT
1684 void MachPrologNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1685 Compile* C = ra_->C;
1687 int framesize = C->frame_size_in_bytes();
1688 int bangsize = C->bang_size_in_bytes();
1689 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1691 // Calls to C2R adapters often do not accept exceptional returns.
1692 // We require that their callers must bang for them. But be careful, because
1693 // some VM calls (such as call site linkage) can use several kilobytes of
1694 // stack. But the stack safety zone should account for that.
1695 // See bugs 4446381, 4468289, 4497237.
1696 if (C->need_stack_bang(bangsize)) {
1697 st->print_cr("# stack bang"); st->print("\t");
1698 }
1699 if (UseLoongsonISA) {
1700 st->print("gssq RA, FP, %d(SP) @ MachPrologNode\n\t", -wordSize*2);
1701 } else {
1702 st->print("sd RA, %d(SP) @ MachPrologNode\n\t", -wordSize);
1703 st->print("sd FP, %d(SP) @ MachPrologNode\n\t", -wordSize*2);
1704 }
1705 st->print("daddiu FP, SP, -%d \n\t", wordSize*2);
1706 st->print("daddiu SP, SP, -%d \t",framesize);
1707 }
1708 #endif
1711 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1712 Compile* C = ra_->C;
1713 MacroAssembler _masm(&cbuf);
1715 int framesize = C->frame_size_in_bytes();
1716 int bangsize = C->bang_size_in_bytes();
1718 // __ verified_entry(framesize, C->need_stack_bang(bangsize)?bangsize:0, false);
1720 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1722 if (C->need_stack_bang(framesize)) {
1723 __ generate_stack_overflow_check(framesize);
1724 }
1726 if (UseLoongsonISA) {
1727 __ gssq(RA, FP, SP, -wordSize*2);
1728 } else {
1729 __ sd(RA, SP, -wordSize);
1730 __ sd(FP, SP, -wordSize*2);
1731 }
1732 __ daddiu(FP, SP, -wordSize*2);
1733 __ daddiu(SP, SP, -framesize);
1734 __ nop(); /* 2013.10.22 Jin: Make enough room for patch_verified_entry() */
1735 __ nop();
1737 C->set_frame_complete(cbuf.insts_size());
1738 if (C->has_mach_constant_base_node()) {
1739 // NOTE: We set the table base offset here because users might be
1740 // emitted before MachConstantBaseNode.
1741 Compile::ConstantTable& constant_table = C->constant_table();
1742 constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
1743 }
1745 }
1748 uint MachPrologNode::size(PhaseRegAlloc *ra_) const {
1749 //fprintf(stderr, "\nPrologNode::size(ra_)= %d \n", MachNode::size(ra_));//fujie debug
1750 return MachNode::size(ra_); // too many variables; just compute it the hard way
1751 }
1753 int MachPrologNode::reloc() const {
1754 return 0; // a large enough number
1755 }
1757 %}
1759 //----------ENCODING BLOCK-----------------------------------------------------
1760 // This block specifies the encoding classes used by the compiler to output
1761 // byte streams. Encoding classes generate functions which are called by
1762 // Machine Instruction Nodes in order to generate the bit encoding of the
1763 // instruction. Operands specify their base encoding interface with the
1764 // interface keyword. There are currently supported four interfaces,
1765 // REG_INTER, CONST_INTER, MEMORY_INTER, & COND_INTER. REG_INTER causes an
1766 // operand to generate a function which returns its register number when
1767 // queried. CONST_INTER causes an operand to generate a function which
1768 // returns the value of the constant when queried. MEMORY_INTER causes an
1769 // operand to generate four functions which return the Base Register, the
1770 // Index Register, the Scale Value, and the Offset Value of the operand when
1771 // queried. COND_INTER causes an operand to generate six functions which
1772 // return the encoding code (ie - encoding bits for the instruction)
1773 // associated with each basic boolean condition for a conditional instruction.
1774 // Instructions specify two basic values for encoding. They use the
1775 // ins_encode keyword to specify their encoding class (which must be one of
1776 // the class names specified in the encoding block), and they use the
1777 // opcode keyword to specify, in order, their primary, secondary, and
1778 // tertiary opcode. Only the opcode sections which a particular instruction
1779 // needs for encoding need to be specified.
1780 encode %{
1781 /*
1782 Alias:
1783 1044 b java.io.ObjectInputStream::readHandle (130 bytes)
1784 118 B14: # B19 B15 <- B13 Freq: 0.899955
1785 118 add S1, S2, V0 #@addP_reg_reg
1786 11c lb S0, [S1 + #-8257524] #@loadB
1787 120 BReq S0, #3, B19 #@branchConI_reg_imm P=0.100000 C=-1.000000
1788 */
1789 //Load byte signed
1790 enc_class load_B_enc (mRegI dst, memory mem) %{
1791 MacroAssembler _masm(&cbuf);
1792 int dst = $dst$$reg;
1793 int base = $mem$$base;
1794 int index = $mem$$index;
1795 int scale = $mem$$scale;
1796 int disp = $mem$$disp;
1798 if( index != 0 ) {
1799 if( Assembler::is_simm16(disp) ) {
1800 if( UseLoongsonISA ) {
1801 if (scale == 0) {
1802 __ gslbx(as_Register(dst), as_Register(base), as_Register(index), disp);
1803 } else {
1804 __ dsll(AT, as_Register(index), scale);
1805 __ gslbx(as_Register(dst), as_Register(base), AT, disp);
1806 }
1807 } else {
1808 if (scale == 0) {
1809 __ addu(AT, as_Register(base), as_Register(index));
1810 } else {
1811 __ dsll(AT, as_Register(index), scale);
1812 __ addu(AT, as_Register(base), AT);
1813 }
1814 __ lb(as_Register(dst), AT, disp);
1815 }
1816 } else {
1817 if (scale == 0) {
1818 __ addu(AT, as_Register(base), as_Register(index));
1819 } else {
1820 __ dsll(AT, as_Register(index), scale);
1821 __ addu(AT, as_Register(base), AT);
1822 }
1823 __ move(T9, disp);
1824 if( UseLoongsonISA ) {
1825 __ gslbx(as_Register(dst), AT, T9, 0);
1826 } else {
1827 __ addu(AT, AT, T9);
1828 __ lb(as_Register(dst), AT, 0);
1829 }
1830 }
1831 } else {
1832 if( Assembler::is_simm16(disp) ) {
1833 __ lb(as_Register(dst), as_Register(base), disp);
1834 } else {
1835 __ move(T9, disp);
1836 if( UseLoongsonISA ) {
1837 __ gslbx(as_Register(dst), as_Register(base), T9, 0);
1838 } else {
1839 __ addu(AT, as_Register(base), T9);
1840 __ lb(as_Register(dst), AT, 0);
1841 }
1842 }
1843 }
1844 %}
1846 //Load byte unsigned
1847 enc_class load_UB_enc (mRegI dst, memory mem) %{
1848 MacroAssembler _masm(&cbuf);
1849 int dst = $dst$$reg;
1850 int base = $mem$$base;
1851 int index = $mem$$index;
1852 int scale = $mem$$scale;
1853 int disp = $mem$$disp;
1855 if( index != 0 ) {
1856 if (scale == 0) {
1857 __ daddu(AT, as_Register(base), as_Register(index));
1858 } else {
1859 __ dsll(AT, as_Register(index), scale);
1860 __ daddu(AT, as_Register(base), AT);
1861 }
1862 if( Assembler::is_simm16(disp) ) {
1863 __ lbu(as_Register(dst), AT, disp);
1864 } else {
1865 __ move(T9, disp);
1866 __ daddu(AT, AT, T9);
1867 __ lbu(as_Register(dst), AT, 0);
1868 }
1869 } else {
1870 if( Assembler::is_simm16(disp) ) {
1871 __ lbu(as_Register(dst), as_Register(base), disp);
1872 } else {
1873 __ move(T9, disp);
1874 __ daddu(AT, as_Register(base), T9);
1875 __ lbu(as_Register(dst), AT, 0);
1876 }
1877 }
1878 %}
1880 enc_class store_B_reg_enc (memory mem, mRegI src) %{
1881 MacroAssembler _masm(&cbuf);
1882 int src = $src$$reg;
1883 int base = $mem$$base;
1884 int index = $mem$$index;
1885 int scale = $mem$$scale;
1886 int disp = $mem$$disp;
1888 if( index != 0 ) {
1889 if (scale == 0) {
1890 if( Assembler::is_simm(disp, 8) ) {
1891 if (UseLoongsonISA) {
1892 __ gssbx(as_Register(src), as_Register(base), as_Register(index), disp);
1893 } else {
1894 __ addu(AT, as_Register(base), as_Register(index));
1895 __ sb(as_Register(src), AT, disp);
1896 }
1897 } else if( Assembler::is_simm16(disp) ) {
1898 __ addu(AT, as_Register(base), as_Register(index));
1899 __ sb(as_Register(src), AT, disp);
1900 } else {
1901 __ addu(AT, as_Register(base), as_Register(index));
1902 __ move(T9, disp);
1903 if (UseLoongsonISA) {
1904 __ gssbx(as_Register(src), AT, T9, 0);
1905 } else {
1906 __ addu(AT, AT, T9);
1907 __ sb(as_Register(src), AT, 0);
1908 }
1909 }
1910 } else {
1911 __ dsll(AT, as_Register(index), scale);
1912 if( Assembler::is_simm(disp, 8) ) {
1913 if (UseLoongsonISA) {
1914 __ gssbx(as_Register(src), AT, as_Register(base), disp);
1915 } else {
1916 __ addu(AT, as_Register(base), AT);
1917 __ sb(as_Register(src), AT, disp);
1918 }
1919 } else if( Assembler::is_simm16(disp) ) {
1920 __ addu(AT, as_Register(base), AT);
1921 __ sb(as_Register(src), AT, disp);
1922 } else {
1923 __ addu(AT, as_Register(base), AT);
1924 __ move(T9, disp);
1925 if (UseLoongsonISA) {
1926 __ gssbx(as_Register(src), AT, T9, 0);
1927 } else {
1928 __ addu(AT, AT, T9);
1929 __ sb(as_Register(src), AT, 0);
1930 }
1931 }
1932 }
1933 } else {
1934 if( Assembler::is_simm16(disp) ) {
1935 __ sb(as_Register(src), as_Register(base), disp);
1936 } else {
1937 __ move(T9, disp);
1938 if (UseLoongsonISA) {
1939 __ gssbx(as_Register(src), as_Register(base), T9, 0);
1940 } else {
1941 __ addu(AT, as_Register(base), T9);
1942 __ sb(as_Register(src), AT, 0);
1943 }
1944 }
1945 }
1946 %}
1948 enc_class store_B_immI_enc (memory mem, immI8 src) %{
1949 MacroAssembler _masm(&cbuf);
1950 int base = $mem$$base;
1951 int index = $mem$$index;
1952 int scale = $mem$$scale;
1953 int disp = $mem$$disp;
1954 int value = $src$$constant;
1956 if( index != 0 ) {
1957 if (!UseLoongsonISA) {
1958 if (scale == 0) {
1959 __ daddu(AT, as_Register(base), as_Register(index));
1960 } else {
1961 __ dsll(AT, as_Register(index), scale);
1962 __ daddu(AT, as_Register(base), AT);
1963 }
1964 if( Assembler::is_simm16(disp) ) {
1965 if (value == 0) {
1966 __ sb(R0, AT, disp);
1967 } else {
1968 __ move(T9, value);
1969 __ sb(T9, AT, disp);
1970 }
1971 } else {
1972 if (value == 0) {
1973 __ move(T9, disp);
1974 __ daddu(AT, AT, T9);
1975 __ sb(R0, AT, 0);
1976 } else {
1977 __ move(T9, disp);
1978 __ daddu(AT, AT, T9);
1979 __ move(T9, value);
1980 __ sb(T9, AT, 0);
1981 }
1982 }
1983 } else {
1985 if (scale == 0) {
1986 if( Assembler::is_simm(disp, 8) ) {
1987 if (value == 0) {
1988 __ gssbx(R0, as_Register(base), as_Register(index), disp);
1989 } else {
1990 __ move(T9, value);
1991 __ gssbx(T9, as_Register(base), as_Register(index), disp);
1992 }
1993 } else if( Assembler::is_simm16(disp) ) {
1994 __ daddu(AT, as_Register(base), as_Register(index));
1995 if (value == 0) {
1996 __ sb(R0, AT, disp);
1997 } else {
1998 __ move(T9, value);
1999 __ sb(T9, AT, disp);
2000 }
2001 } else {
2002 if (value == 0) {
2003 __ daddu(AT, as_Register(base), as_Register(index));
2004 __ move(T9, disp);
2005 __ gssbx(R0, AT, T9, 0);
2006 } else {
2007 __ move(AT, disp);
2008 __ move(T9, value);
2009 __ daddu(AT, as_Register(base), AT);
2010 __ gssbx(T9, AT, as_Register(index), 0);
2011 }
2012 }
2014 } else {
2016 if( Assembler::is_simm(disp, 8) ) {
2017 __ dsll(AT, as_Register(index), scale);
2018 if (value == 0) {
2019 __ gssbx(R0, as_Register(base), AT, disp);
2020 } else {
2021 __ move(T9, value);
2022 __ gssbx(T9, as_Register(base), AT, disp);
2023 }
2024 } else if( Assembler::is_simm16(disp) ) {
2025 __ dsll(AT, as_Register(index), scale);
2026 __ daddu(AT, as_Register(base), AT);
2027 if (value == 0) {
2028 __ sb(R0, AT, disp);
2029 } else {
2030 __ move(T9, value);
2031 __ sb(T9, AT, disp);
2032 }
2033 } else {
2034 __ dsll(AT, as_Register(index), scale);
2035 if (value == 0) {
2036 __ daddu(AT, as_Register(base), AT);
2037 __ move(T9, disp);
2038 __ gssbx(R0, AT, T9, 0);
2039 } else {
2040 __ move(T9, disp);
2041 __ daddu(AT, AT, T9);
2042 __ move(T9, value);
2043 __ gssbx(T9, as_Register(base), AT, 0);
2044 }
2045 }
2046 }
2047 }
2048 } else {
2049 if( Assembler::is_simm16(disp) ) {
2050 if (value == 0) {
2051 __ sb(R0, as_Register(base), disp);
2052 } else {
2053 __ move(AT, value);
2054 __ sb(AT, as_Register(base), disp);
2055 }
2056 } else {
2057 if (value == 0) {
2058 __ move(T9, disp);
2059 if (UseLoongsonISA) {
2060 __ gssbx(R0, as_Register(base), T9, 0);
2061 } else {
2062 __ daddu(AT, as_Register(base), T9);
2063 __ sb(R0, AT, 0);
2064 }
2065 } else {
2066 __ move(T9, disp);
2067 if (UseLoongsonISA) {
2068 __ move(AT, value);
2069 __ gssbx(AT, as_Register(base), T9, 0);
2070 } else {
2071 __ daddu(AT, as_Register(base), T9);
2072 __ move(T9, value);
2073 __ sb(T9, AT, 0);
2074 }
2075 }
2076 }
2077 }
2078 %}
2081 enc_class store_B_immI_enc_sync (memory mem, immI8 src) %{
2082 MacroAssembler _masm(&cbuf);
2083 int base = $mem$$base;
2084 int index = $mem$$index;
2085 int scale = $mem$$scale;
2086 int disp = $mem$$disp;
2087 int value = $src$$constant;
2089 if( index != 0 ) {
2090 if (scale == 0) {
2091 __ daddu(AT, as_Register(base), as_Register(index));
2092 } else {
2093 __ dsll(AT, as_Register(index), scale);
2094 __ daddu(AT, as_Register(base), AT);
2095 }
2096 if( Assembler::is_simm16(disp) ) {
2097 if (value == 0) {
2098 __ sb(R0, AT, disp);
2099 } else {
2100 __ move(T9, value);
2101 __ sb(T9, AT, disp);
2102 }
2103 } else {
2104 if (value == 0) {
2105 __ move(T9, disp);
2106 __ daddu(AT, AT, T9);
2107 __ sb(R0, AT, 0);
2108 } else {
2109 __ move(T9, disp);
2110 __ daddu(AT, AT, T9);
2111 __ move(T9, value);
2112 __ sb(T9, AT, 0);
2113 }
2114 }
2115 } else {
2116 if( Assembler::is_simm16(disp) ) {
2117 if (value == 0) {
2118 __ sb(R0, as_Register(base), disp);
2119 } else {
2120 __ move(AT, value);
2121 __ sb(AT, as_Register(base), disp);
2122 }
2123 } else {
2124 if (value == 0) {
2125 __ move(T9, disp);
2126 __ daddu(AT, as_Register(base), T9);
2127 __ sb(R0, AT, 0);
2128 } else {
2129 __ move(T9, disp);
2130 __ daddu(AT, as_Register(base), T9);
2131 __ move(T9, value);
2132 __ sb(T9, AT, 0);
2133 }
2134 }
2135 }
2137 __ sync();
2138 %}
2140 // Load Short (16bit signed)
2141 enc_class load_S_enc (mRegI dst, memory mem) %{
2142 MacroAssembler _masm(&cbuf);
2143 int dst = $dst$$reg;
2144 int base = $mem$$base;
2145 int index = $mem$$index;
2146 int scale = $mem$$scale;
2147 int disp = $mem$$disp;
2149 if( index != 0 ) {
2150 if (scale == 0) {
2151 __ daddu(AT, as_Register(base), as_Register(index));
2152 } else {
2153 __ dsll(AT, as_Register(index), scale);
2154 __ daddu(AT, as_Register(base), AT);
2155 }
2156 if( Assembler::is_simm16(disp) ) {
2157 __ lh(as_Register(dst), AT, disp);
2158 } else {
2159 __ move(T9, disp);
2160 __ addu(AT, AT, T9);
2161 __ lh(as_Register(dst), AT, 0);
2162 }
2163 } else {
2164 if( Assembler::is_simm16(disp) ) {
2165 __ lh(as_Register(dst), as_Register(base), disp);
2166 } else {
2167 __ move(T9, disp);
2168 __ addu(AT, as_Register(base), T9);
2169 __ lh(as_Register(dst), AT, 0);
2170 }
2171 }
2172 %}
2174 // Load Char (16bit unsigned)
2175 enc_class load_C_enc (mRegI dst, memory mem) %{
2176 MacroAssembler _masm(&cbuf);
2177 int dst = $dst$$reg;
2178 int base = $mem$$base;
2179 int index = $mem$$index;
2180 int scale = $mem$$scale;
2181 int disp = $mem$$disp;
2183 if( index != 0 ) {
2184 if (scale == 0) {
2185 __ daddu(AT, as_Register(base), as_Register(index));
2186 } else {
2187 __ dsll(AT, as_Register(index), scale);
2188 __ daddu(AT, as_Register(base), AT);
2189 }
2190 if( Assembler::is_simm16(disp) ) {
2191 __ lhu(as_Register(dst), AT, disp);
2192 } else {
2193 __ move(T9, disp);
2194 __ addu(AT, AT, T9);
2195 __ lhu(as_Register(dst), AT, 0);
2196 }
2197 } else {
2198 if( Assembler::is_simm16(disp) ) {
2199 __ lhu(as_Register(dst), as_Register(base), disp);
2200 } else {
2201 __ move(T9, disp);
2202 __ daddu(AT, as_Register(base), T9);
2203 __ lhu(as_Register(dst), AT, 0);
2204 }
2205 }
2206 %}
2208 // Store Char (16bit unsigned)
2209 enc_class store_C_reg_enc (memory mem, mRegI src) %{
2210 MacroAssembler _masm(&cbuf);
2211 int src = $src$$reg;
2212 int base = $mem$$base;
2213 int index = $mem$$index;
2214 int scale = $mem$$scale;
2215 int disp = $mem$$disp;
2217 if( index != 0 ) {
2218 if( Assembler::is_simm16(disp) ) {
2219 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2220 if (scale == 0) {
2221 __ gsshx(as_Register(src), as_Register(base), as_Register(index), disp);
2222 } else {
2223 __ dsll(AT, as_Register(index), scale);
2224 __ gsshx(as_Register(src), as_Register(base), AT, disp);
2225 }
2226 } else {
2227 if (scale == 0) {
2228 __ addu(AT, as_Register(base), as_Register(index));
2229 } else {
2230 __ dsll(AT, as_Register(index), scale);
2231 __ addu(AT, as_Register(base), AT);
2232 }
2233 __ sh(as_Register(src), AT, disp);
2234 }
2235 } else {
2236 if (scale == 0) {
2237 __ addu(AT, as_Register(base), as_Register(index));
2238 } else {
2239 __ dsll(AT, as_Register(index), scale);
2240 __ addu(AT, as_Register(base), AT);
2241 }
2242 __ move(T9, disp);
2243 if( UseLoongsonISA ) {
2244 __ gsshx(as_Register(src), AT, T9, 0);
2245 } else {
2246 __ addu(AT, AT, T9);
2247 __ sh(as_Register(src), AT, 0);
2248 }
2249 }
2250 } else {
2251 if( Assembler::is_simm16(disp) ) {
2252 __ sh(as_Register(src), as_Register(base), disp);
2253 } else {
2254 __ move(T9, disp);
2255 if( UseLoongsonISA ) {
2256 __ gsshx(as_Register(src), as_Register(base), T9, 0);
2257 } else {
2258 __ addu(AT, as_Register(base), T9);
2259 __ sh(as_Register(src), AT, 0);
2260 }
2261 }
2262 }
2263 %}
2265 enc_class store_C0_enc (memory mem) %{
2266 MacroAssembler _masm(&cbuf);
2267 int base = $mem$$base;
2268 int index = $mem$$index;
2269 int scale = $mem$$scale;
2270 int disp = $mem$$disp;
2272 if( index != 0 ) {
2273 if( Assembler::is_simm16(disp) ) {
2274 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2275 if (scale == 0) {
2276 __ gsshx(R0, as_Register(base), as_Register(index), disp);
2277 } else {
2278 __ dsll(AT, as_Register(index), scale);
2279 __ gsshx(R0, as_Register(base), AT, disp);
2280 }
2281 } else {
2282 if (scale == 0) {
2283 __ addu(AT, as_Register(base), as_Register(index));
2284 } else {
2285 __ dsll(AT, as_Register(index), scale);
2286 __ addu(AT, as_Register(base), AT);
2287 }
2288 __ sh(R0, AT, disp);
2289 }
2290 } else {
2291 if (scale == 0) {
2292 __ addu(AT, as_Register(base), as_Register(index));
2293 } else {
2294 __ dsll(AT, as_Register(index), scale);
2295 __ addu(AT, as_Register(base), AT);
2296 }
2297 __ move(T9, disp);
2298 if( UseLoongsonISA ) {
2299 __ gsshx(R0, AT, T9, 0);
2300 } else {
2301 __ addu(AT, AT, T9);
2302 __ sh(R0, AT, 0);
2303 }
2304 }
2305 } else {
2306 if( Assembler::is_simm16(disp) ) {
2307 __ sh(R0, as_Register(base), disp);
2308 } else {
2309 __ move(T9, disp);
2310 if( UseLoongsonISA ) {
2311 __ gsshx(R0, as_Register(base), T9, 0);
2312 } else {
2313 __ addu(AT, as_Register(base), T9);
2314 __ sh(R0, AT, 0);
2315 }
2316 }
2317 }
2318 %}
2320 enc_class load_I_enc (mRegI dst, memory mem) %{
2321 MacroAssembler _masm(&cbuf);
2322 int dst = $dst$$reg;
2323 int base = $mem$$base;
2324 int index = $mem$$index;
2325 int scale = $mem$$scale;
2326 int disp = $mem$$disp;
2328 if( index != 0 ) {
2329 if( Assembler::is_simm16(disp) ) {
2330 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2331 if (scale == 0) {
2332 __ gslwx(as_Register(dst), as_Register(base), as_Register(index), disp);
2333 } else {
2334 __ dsll(AT, as_Register(index), scale);
2335 __ gslwx(as_Register(dst), as_Register(base), AT, disp);
2336 }
2337 } else {
2338 if (scale == 0) {
2339 __ addu(AT, as_Register(base), as_Register(index));
2340 } else {
2341 __ dsll(AT, as_Register(index), scale);
2342 __ addu(AT, as_Register(base), AT);
2343 }
2344 __ lw(as_Register(dst), AT, disp);
2345 }
2346 } else {
2347 if (scale == 0) {
2348 __ addu(AT, as_Register(base), as_Register(index));
2349 } else {
2350 __ dsll(AT, as_Register(index), scale);
2351 __ addu(AT, as_Register(base), AT);
2352 }
2353 __ move(T9, disp);
2354 if( UseLoongsonISA ) {
2355 __ gslwx(as_Register(dst), AT, T9, 0);
2356 } else {
2357 __ addu(AT, AT, T9);
2358 __ lw(as_Register(dst), AT, 0);
2359 }
2360 }
2361 } else {
2362 if( Assembler::is_simm16(disp) ) {
2363 __ lw(as_Register(dst), as_Register(base), disp);
2364 } else {
2365 __ move(T9, disp);
2366 if( UseLoongsonISA ) {
2367 __ gslwx(as_Register(dst), as_Register(base), T9, 0);
2368 } else {
2369 __ addu(AT, as_Register(base), T9);
2370 __ lw(as_Register(dst), AT, 0);
2371 }
2372 }
2373 }
2374 %}
2376 enc_class store_I_reg_enc (memory mem, mRegI src) %{
2377 MacroAssembler _masm(&cbuf);
2378 int src = $src$$reg;
2379 int base = $mem$$base;
2380 int index = $mem$$index;
2381 int scale = $mem$$scale;
2382 int disp = $mem$$disp;
2384 if( index != 0 ) {
2385 if( Assembler::is_simm16(disp) ) {
2386 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2387 if (scale == 0) {
2388 __ gsswx(as_Register(src), as_Register(base), as_Register(index), disp);
2389 } else {
2390 __ dsll(AT, as_Register(index), scale);
2391 __ gsswx(as_Register(src), as_Register(base), AT, disp);
2392 }
2393 } else {
2394 if (scale == 0) {
2395 __ addu(AT, as_Register(base), as_Register(index));
2396 } else {
2397 __ dsll(AT, as_Register(index), scale);
2398 __ addu(AT, as_Register(base), AT);
2399 }
2400 __ sw(as_Register(src), AT, disp);
2401 }
2402 } else {
2403 if (scale == 0) {
2404 __ addu(AT, as_Register(base), as_Register(index));
2405 } else {
2406 __ dsll(AT, as_Register(index), scale);
2407 __ addu(AT, as_Register(base), AT);
2408 }
2409 __ move(T9, disp);
2410 if( UseLoongsonISA ) {
2411 __ gsswx(as_Register(src), AT, T9, 0);
2412 } else {
2413 __ addu(AT, AT, T9);
2414 __ sw(as_Register(src), AT, 0);
2415 }
2416 }
2417 } else {
2418 if( Assembler::is_simm16(disp) ) {
2419 __ sw(as_Register(src), as_Register(base), disp);
2420 } else {
2421 __ move(T9, disp);
2422 if( UseLoongsonISA ) {
2423 __ gsswx(as_Register(src), as_Register(base), T9, 0);
2424 } else {
2425 __ addu(AT, as_Register(base), T9);
2426 __ sw(as_Register(src), AT, 0);
2427 }
2428 }
2429 }
2430 %}
2432 enc_class store_I_immI_enc (memory mem, immI src) %{
2433 MacroAssembler _masm(&cbuf);
2434 int base = $mem$$base;
2435 int index = $mem$$index;
2436 int scale = $mem$$scale;
2437 int disp = $mem$$disp;
2438 int value = $src$$constant;
2440 if( index != 0 ) {
2441 if (scale == 0) {
2442 __ daddu(AT, as_Register(base), as_Register(index));
2443 } else {
2444 __ dsll(AT, as_Register(index), scale);
2445 __ daddu(AT, as_Register(base), AT);
2446 }
2447 if( Assembler::is_simm16(disp) ) {
2448 if (value == 0) {
2449 __ sw(R0, AT, disp);
2450 } else {
2451 __ move(T9, value);
2452 __ sw(T9, AT, disp);
2453 }
2454 } else {
2455 if (value == 0) {
2456 __ move(T9, disp);
2457 __ addu(AT, AT, T9);
2458 __ sw(R0, AT, 0);
2459 } else {
2460 __ move(T9, disp);
2461 __ addu(AT, AT, T9);
2462 __ move(T9, value);
2463 __ sw(T9, AT, 0);
2464 }
2465 }
2466 } else {
2467 if( Assembler::is_simm16(disp) ) {
2468 if (value == 0) {
2469 __ sw(R0, as_Register(base), disp);
2470 } else {
2471 __ move(AT, value);
2472 __ sw(AT, as_Register(base), disp);
2473 }
2474 } else {
2475 if (value == 0) {
2476 __ move(T9, disp);
2477 __ addu(AT, as_Register(base), T9);
2478 __ sw(R0, AT, 0);
2479 } else {
2480 __ move(T9, disp);
2481 __ addu(AT, as_Register(base), T9);
2482 __ move(T9, value);
2483 __ sw(T9, AT, 0);
2484 }
2485 }
2486 }
2487 %}
2489 enc_class load_N_enc (mRegN dst, memory mem) %{
2490 MacroAssembler _masm(&cbuf);
2491 int dst = $dst$$reg;
2492 int base = $mem$$base;
2493 int index = $mem$$index;
2494 int scale = $mem$$scale;
2495 int disp = $mem$$disp;
2496 relocInfo::relocType disp_reloc = $mem->disp_reloc();
2497 assert(disp_reloc == relocInfo::none, "cannot have disp");
2499 if( index != 0 ) {
2500 if (scale == 0) {
2501 __ daddu(AT, as_Register(base), as_Register(index));
2502 } else {
2503 __ dsll(AT, as_Register(index), scale);
2504 __ daddu(AT, as_Register(base), AT);
2505 }
2506 if( Assembler::is_simm16(disp) ) {
2507 __ lwu(as_Register(dst), AT, disp);
2508 } else {
2509 __ li(T9, disp);
2510 __ daddu(AT, AT, T9);
2511 __ lwu(as_Register(dst), AT, 0);
2512 }
2513 } else {
2514 if( Assembler::is_simm16(disp) ) {
2515 __ lwu(as_Register(dst), as_Register(base), disp);
2516 } else {
2517 __ li(T9, disp);
2518 __ daddu(AT, as_Register(base), T9);
2519 __ lwu(as_Register(dst), AT, 0);
2520 }
2521 }
2523 %}
2526 enc_class load_P_enc (mRegP dst, memory mem) %{
2527 MacroAssembler _masm(&cbuf);
2528 int dst = $dst$$reg;
2529 int base = $mem$$base;
2530 int index = $mem$$index;
2531 int scale = $mem$$scale;
2532 int disp = $mem$$disp;
2533 relocInfo::relocType disp_reloc = $mem->disp_reloc();
2534 assert(disp_reloc == relocInfo::none, "cannot have disp");
2536 if( index != 0 ) {
2537 if ( UseLoongsonISA ) {
2538 if ( Assembler::is_simm(disp, 8) ) {
2539 if ( scale != 0 ) {
2540 __ dsll(AT, as_Register(index), scale);
2541 } else {
2542 __ move(AT, as_Register(index));
2543 }
2544 __ gsldx(as_Register(dst), as_Register(base), AT, disp);
2545 } else {
2546 if ( scale != 0 ) {
2547 __ dsll(AT, as_Register(index), scale);
2548 __ move(T9, disp);
2549 __ daddu(AT, AT, T9);
2550 } else {
2551 __ move(T9, disp);
2552 __ daddu(AT, as_Register(index), T9);
2553 }
2554 __ gsldx(as_Register(dst), as_Register(base), AT, 0);
2555 }
2556 } else { //not use loongson isa
2557 if (scale == 0) {
2558 __ daddu(AT, as_Register(base), as_Register(index));
2559 } else {
2560 __ dsll(AT, as_Register(index), scale);
2561 __ daddu(AT, as_Register(base), AT);
2562 }
2563 if( Assembler::is_simm16(disp) ) {
2564 __ ld(as_Register(dst), AT, disp);
2565 } else {
2566 __ li(T9, disp);
2567 __ daddu(AT, AT, T9);
2568 __ ld(as_Register(dst), AT, 0);
2569 }
2570 }
2571 } else {
2572 if ( UseLoongsonISA ) {
2573 if( Assembler::is_simm(disp, 8) ) {
2574 __ gsldx(as_Register(dst), as_Register(base), R0, disp);
2575 } else if ( Assembler::is_simm16(disp) ){
2576 __ ld(as_Register(dst), as_Register(base), disp);
2577 } else {
2578 __ li(T9, disp);
2579 __ gsldx(as_Register(dst), as_Register(base), T9, 0);
2580 }
2581 } else { //not use loongson isa
2582 if( Assembler::is_simm16(disp) ) {
2583 __ ld(as_Register(dst), as_Register(base), disp);
2584 } else {
2585 __ li(T9, disp);
2586 __ daddu(AT, as_Register(base), T9);
2587 __ ld(as_Register(dst), AT, 0);
2588 }
2589 }
2590 }
2591 // if( disp_reloc != relocInfo::none) __ ld(as_Register(dst), as_Register(dst), 0);
2592 %}
2594 enc_class store_P_reg_enc (memory mem, mRegP src) %{
2595 MacroAssembler _masm(&cbuf);
2596 int src = $src$$reg;
2597 int base = $mem$$base;
2598 int index = $mem$$index;
2599 int scale = $mem$$scale;
2600 int disp = $mem$$disp;
2602 if( index != 0 ) {
2603 if (scale == 0) {
2604 __ daddu(AT, as_Register(base), as_Register(index));
2605 } else {
2606 __ dsll(AT, as_Register(index), scale);
2607 __ daddu(AT, as_Register(base), AT);
2608 }
2609 if( Assembler::is_simm16(disp) ) {
2610 __ sd(as_Register(src), AT, disp);
2611 } else {
2612 __ move(T9, disp);
2613 __ daddu(AT, AT, T9);
2614 __ sd(as_Register(src), AT, 0);
2615 }
2616 } else {
2617 if( Assembler::is_simm16(disp) ) {
2618 __ sd(as_Register(src), as_Register(base), disp);
2619 } else {
2620 __ move(T9, disp);
2621 __ daddu(AT, as_Register(base), T9);
2622 __ sd(as_Register(src), AT, 0);
2623 }
2624 }
2625 %}
2627 enc_class store_N_reg_enc (memory mem, mRegN src) %{
2628 MacroAssembler _masm(&cbuf);
2629 int src = $src$$reg;
2630 int base = $mem$$base;
2631 int index = $mem$$index;
2632 int scale = $mem$$scale;
2633 int disp = $mem$$disp;
2635 if( index != 0 ) {
2636 if (scale == 0) {
2637 __ daddu(AT, as_Register(base), as_Register(index));
2638 } else {
2639 __ dsll(AT, as_Register(index), scale);
2640 __ daddu(AT, as_Register(base), AT);
2641 }
2642 if( Assembler::is_simm16(disp) ) {
2643 __ sw(as_Register(src), AT, disp);
2644 } else {
2645 __ move(T9, disp);
2646 __ addu(AT, AT, T9);
2647 __ sw(as_Register(src), AT, 0);
2648 }
2649 } else {
2650 if( Assembler::is_simm16(disp) ) {
2651 __ sw(as_Register(src), as_Register(base), disp);
2652 } else {
2653 __ move(T9, disp);
2654 __ addu(AT, as_Register(base), T9);
2655 __ sw(as_Register(src), AT, 0);
2656 }
2657 }
2658 %}
2660 enc_class store_P_immP0_enc (memory mem) %{
2661 MacroAssembler _masm(&cbuf);
2662 int base = $mem$$base;
2663 int index = $mem$$index;
2664 int scale = $mem$$scale;
2665 int disp = $mem$$disp;
2667 if( index != 0 ) {
2668 if (scale == 0) {
2669 if( Assembler::is_simm16(disp) ) {
2670 if (UseLoongsonISA && Assembler::is_simm(disp, 8)) {
2671 __ gssdx(R0, as_Register(base), as_Register(index), disp);
2672 } else {
2673 __ daddu(AT, as_Register(base), as_Register(index));
2674 __ sd(R0, AT, disp);
2675 }
2676 } else {
2677 __ daddu(AT, as_Register(base), as_Register(index));
2678 __ move(T9, disp);
2679 if(UseLoongsonISA) {
2680 __ gssdx(R0, AT, T9, 0);
2681 } else {
2682 __ daddu(AT, AT, T9);
2683 __ sd(R0, AT, 0);
2684 }
2685 }
2686 } else {
2687 __ dsll(AT, as_Register(index), scale);
2688 if( Assembler::is_simm16(disp) ) {
2689 if (UseLoongsonISA && Assembler::is_simm(disp, 8)) {
2690 __ gssdx(R0, as_Register(base), AT, disp);
2691 } else {
2692 __ daddu(AT, as_Register(base), AT);
2693 __ sd(R0, AT, disp);
2694 }
2695 } else {
2696 __ daddu(AT, as_Register(base), AT);
2697 __ move(T9, disp);
2698 if (UseLoongsonISA) {
2699 __ gssdx(R0, AT, T9, 0);
2700 } else {
2701 __ daddu(AT, AT, T9);
2702 __ sd(R0, AT, 0);
2703 }
2704 }
2705 }
2706 } else {
2707 if( Assembler::is_simm16(disp) ) {
2708 __ sd(R0, as_Register(base), disp);
2709 } else {
2710 __ move(T9, disp);
2711 if (UseLoongsonISA) {
2712 __ gssdx(R0, as_Register(base), T9, 0);
2713 } else {
2714 __ daddu(AT, as_Register(base), T9);
2715 __ sd(R0, AT, 0);
2716 }
2717 }
2718 }
2719 %}
2722 enc_class storeImmN0_enc(memory mem, ImmN0 src) %{
2723 MacroAssembler _masm(&cbuf);
2724 int base = $mem$$base;
2725 int index = $mem$$index;
2726 int scale = $mem$$scale;
2727 int disp = $mem$$disp;
2729 if(index!=0){
2730 if (scale == 0) {
2731 __ daddu(AT, as_Register(base), as_Register(index));
2732 } else {
2733 __ dsll(AT, as_Register(index), scale);
2734 __ daddu(AT, as_Register(base), AT);
2735 }
2737 if( Assembler::is_simm16(disp) ) {
2738 __ sw(R0, AT, disp);
2739 } else {
2740 __ move(T9, disp);
2741 __ daddu(AT, AT, T9);
2742 __ sw(R0, AT, 0);
2743 }
2744 }
2745 else {
2746 if( Assembler::is_simm16(disp) ) {
2747 __ sw(R0, as_Register(base), disp);
2748 } else {
2749 __ move(T9, disp);
2750 __ daddu(AT, as_Register(base), T9);
2751 __ sw(R0, AT, 0);
2752 }
2753 }
2754 %}
2756 enc_class load_L_enc (mRegL dst, memory mem) %{
2757 MacroAssembler _masm(&cbuf);
2758 int base = $mem$$base;
2759 int index = $mem$$index;
2760 int scale = $mem$$scale;
2761 int disp = $mem$$disp;
2762 Register dst_reg = as_Register($dst$$reg);
2764 /*********************2013/03/27**************************
2765 * Jin: $base may contain a null object.
2766 * Server JIT force the exception_offset to be the pos of
2767 * the first instruction.
2768 * I insert such a 'null_check' at the beginning.
2769 *******************************************************/
2771 __ lw(AT, as_Register(base), 0);
2773 /*********************2012/10/04**************************
2774 * Error case found in SortTest
2775 * 337 b java.util.Arrays::sort1 (401 bytes)
2776 * B73:
2777 * d34 lw T4.lo, [T4 + #16] #@loadL-lo
2778 * lw T4.hi, [T4 + #16]+4 #@loadL-hi
2779 *
2780 * The original instructions generated here are :
2781 * __ lw(dst_lo, as_Register(base), disp);
2782 * __ lw(dst_hi, as_Register(base), disp + 4);
2783 *******************************************************/
2785 if( index != 0 ) {
2786 if (scale == 0) {
2787 __ daddu(AT, as_Register(base), as_Register(index));
2788 } else {
2789 __ dsll(AT, as_Register(index), scale);
2790 __ daddu(AT, as_Register(base), AT);
2791 }
2792 if( Assembler::is_simm16(disp) ) {
2793 __ ld(dst_reg, AT, disp);
2794 } else {
2795 __ move(T9, disp);
2796 __ daddu(AT, AT, T9);
2797 __ ld(dst_reg, AT, 0);
2798 }
2799 } else {
2800 if( Assembler::is_simm16(disp) ) {
2801 __ move(AT, as_Register(base));
2802 __ ld(dst_reg, AT, disp);
2803 } else {
2804 __ move(T9, disp);
2805 __ daddu(AT, as_Register(base), T9);
2806 __ ld(dst_reg, AT, 0);
2807 }
2808 }
2809 %}
2811 enc_class store_L_reg_enc (memory mem, mRegL src) %{
2812 MacroAssembler _masm(&cbuf);
2813 int base = $mem$$base;
2814 int index = $mem$$index;
2815 int scale = $mem$$scale;
2816 int disp = $mem$$disp;
2817 Register src_reg = as_Register($src$$reg);
2819 if( index != 0 ) {
2820 if (scale == 0) {
2821 __ daddu(AT, as_Register(base), as_Register(index));
2822 } else {
2823 __ dsll(AT, as_Register(index), scale);
2824 __ daddu(AT, as_Register(base), AT);
2825 }
2826 if( Assembler::is_simm16(disp) ) {
2827 __ sd(src_reg, AT, disp);
2828 } else {
2829 __ move(T9, disp);
2830 __ daddu(AT, AT, T9);
2831 __ sd(src_reg, AT, 0);
2832 }
2833 } else {
2834 if( Assembler::is_simm16(disp) ) {
2835 __ move(AT, as_Register(base));
2836 __ sd(src_reg, AT, disp);
2837 } else {
2838 __ move(T9, disp);
2839 __ daddu(AT, as_Register(base), T9);
2840 __ sd(src_reg, AT, 0);
2841 }
2842 }
2843 %}
2845 enc_class store_L_immL0_enc (memory mem, immL0 src) %{
2846 MacroAssembler _masm(&cbuf);
2847 int base = $mem$$base;
2848 int index = $mem$$index;
2849 int scale = $mem$$scale;
2850 int disp = $mem$$disp;
2852 if( index != 0 ) {
2853 if (scale == 0) {
2854 __ daddu(AT, as_Register(base), as_Register(index));
2855 } else {
2856 __ dsll(AT, as_Register(index), scale);
2857 __ daddu(AT, as_Register(base), AT);
2858 }
2859 if( Assembler::is_simm16(disp) ) {
2860 __ sd(R0, AT, disp);
2861 } else {
2862 __ move(T9, disp);
2863 __ addu(AT, AT, T9);
2864 __ sd(R0, AT, 0);
2865 }
2866 } else {
2867 if( Assembler::is_simm16(disp) ) {
2868 __ move(AT, as_Register(base));
2869 __ sd(R0, AT, disp);
2870 } else {
2871 __ move(T9, disp);
2872 __ addu(AT, as_Register(base), T9);
2873 __ sd(R0, AT, 0);
2874 }
2875 }
2876 %}
2878 enc_class load_F_enc (regF dst, memory mem) %{
2879 MacroAssembler _masm(&cbuf);
2880 int base = $mem$$base;
2881 int index = $mem$$index;
2882 int scale = $mem$$scale;
2883 int disp = $mem$$disp;
2884 FloatRegister dst = $dst$$FloatRegister;
2886 if( index != 0 ) {
2887 if( Assembler::is_simm16(disp) ) {
2888 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2889 if (scale == 0) {
2890 __ gslwxc1(dst, as_Register(base), as_Register(index), disp);
2891 } else {
2892 __ dsll(AT, as_Register(index), scale);
2893 __ gslwxc1(dst, as_Register(base), AT, disp);
2894 }
2895 } else {
2896 if (scale == 0) {
2897 __ daddu(AT, as_Register(base), as_Register(index));
2898 } else {
2899 __ dsll(AT, as_Register(index), scale);
2900 __ daddu(AT, as_Register(base), AT);
2901 }
2902 __ lwc1(dst, AT, disp);
2903 }
2904 } else {
2905 if (scale == 0) {
2906 __ daddu(AT, as_Register(base), as_Register(index));
2907 } else {
2908 __ dsll(AT, as_Register(index), scale);
2909 __ daddu(AT, as_Register(base), AT);
2910 }
2911 __ move(T9, disp);
2912 if( UseLoongsonISA ) {
2913 __ gslwxc1(dst, AT, T9, 0);
2914 } else {
2915 __ daddu(AT, AT, T9);
2916 __ lwc1(dst, AT, 0);
2917 }
2918 }
2919 } else {
2920 if( Assembler::is_simm16(disp) ) {
2921 __ lwc1(dst, as_Register(base), disp);
2922 } else {
2923 __ move(T9, disp);
2924 if( UseLoongsonISA ) {
2925 __ gslwxc1(dst, as_Register(base), T9, 0);
2926 } else {
2927 __ daddu(AT, as_Register(base), T9);
2928 __ lwc1(dst, AT, 0);
2929 }
2930 }
2931 }
2932 %}
2934 enc_class store_F_reg_enc (memory mem, regF src) %{
2935 MacroAssembler _masm(&cbuf);
2936 int base = $mem$$base;
2937 int index = $mem$$index;
2938 int scale = $mem$$scale;
2939 int disp = $mem$$disp;
2940 FloatRegister src = $src$$FloatRegister;
2942 if( index != 0 ) {
2943 if( Assembler::is_simm16(disp) ) {
2944 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2945 if (scale == 0) {
2946 __ gsswxc1(src, as_Register(base), as_Register(index), disp);
2947 } else {
2948 __ dsll(AT, as_Register(index), scale);
2949 __ gsswxc1(src, as_Register(base), AT, disp);
2950 }
2951 } else {
2952 if (scale == 0) {
2953 __ daddu(AT, as_Register(base), as_Register(index));
2954 } else {
2955 __ dsll(AT, as_Register(index), scale);
2956 __ daddu(AT, as_Register(base), AT);
2957 }
2958 __ swc1(src, AT, disp);
2959 }
2960 } else {
2961 if (scale == 0) {
2962 __ daddu(AT, as_Register(base), as_Register(index));
2963 } else {
2964 __ dsll(AT, as_Register(index), scale);
2965 __ daddu(AT, as_Register(base), AT);
2966 }
2967 __ move(T9, disp);
2968 if( UseLoongsonISA ) {
2969 __ gsswxc1(src, AT, T9, 0);
2970 } else {
2971 __ daddu(AT, AT, T9);
2972 __ swc1(src, AT, 0);
2973 }
2974 }
2975 } else {
2976 if( Assembler::is_simm16(disp) ) {
2977 __ swc1(src, as_Register(base), disp);
2978 } else {
2979 __ move(T9, disp);
2980 if( UseLoongsonISA ) {
2981 __ gslwxc1(src, as_Register(base), T9, 0);
2982 } else {
2983 __ daddu(AT, as_Register(base), T9);
2984 __ swc1(src, AT, 0);
2985 }
2986 }
2987 }
2988 %}
2990 enc_class load_D_enc (regD dst, memory mem) %{
2991 MacroAssembler _masm(&cbuf);
2992 int base = $mem$$base;
2993 int index = $mem$$index;
2994 int scale = $mem$$scale;
2995 int disp = $mem$$disp;
2996 FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2998 if( index != 0 ) {
2999 if( Assembler::is_simm16(disp) ) {
3000 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3001 if (scale == 0) {
3002 __ gsldxc1(dst_reg, as_Register(base), as_Register(index), disp);
3003 } else {
3004 __ dsll(AT, as_Register(index), scale);
3005 __ gsldxc1(dst_reg, as_Register(base), AT, disp);
3006 }
3007 } else {
3008 if (scale == 0) {
3009 __ daddu(AT, as_Register(base), as_Register(index));
3010 } else {
3011 __ dsll(AT, as_Register(index), scale);
3012 __ daddu(AT, as_Register(base), AT);
3013 }
3014 __ ldc1(dst_reg, AT, disp);
3015 }
3016 } else {
3017 if (scale == 0) {
3018 __ daddu(AT, as_Register(base), as_Register(index));
3019 } else {
3020 __ dsll(AT, as_Register(index), scale);
3021 __ daddu(AT, as_Register(base), AT);
3022 }
3023 __ move(T9, disp);
3024 if( UseLoongsonISA ) {
3025 __ gsldxc1(dst_reg, AT, T9, 0);
3026 } else {
3027 __ addu(AT, AT, T9);
3028 __ ldc1(dst_reg, AT, 0);
3029 }
3030 }
3031 } else {
3032 if( Assembler::is_simm16(disp) ) {
3033 __ ldc1(dst_reg, as_Register(base), disp);
3034 } else {
3035 __ move(T9, disp);
3036 if( UseLoongsonISA ) {
3037 __ gsldxc1(dst_reg, as_Register(base), T9, 0);
3038 } else {
3039 __ addu(AT, as_Register(base), T9);
3040 __ ldc1(dst_reg, AT, 0);
3041 }
3042 }
3043 }
3044 %}
3046 enc_class store_D_reg_enc (memory mem, regD src) %{
3047 MacroAssembler _masm(&cbuf);
3048 int base = $mem$$base;
3049 int index = $mem$$index;
3050 int scale = $mem$$scale;
3051 int disp = $mem$$disp;
3052 FloatRegister src_reg = as_FloatRegister($src$$reg);
3054 if( index != 0 ) {
3055 if( Assembler::is_simm16(disp) ) {
3056 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3057 if (scale == 0) {
3058 __ gssdxc1(src_reg, as_Register(base), as_Register(index), disp);
3059 } else {
3060 __ dsll(AT, as_Register(index), scale);
3061 __ gssdxc1(src_reg, as_Register(base), AT, disp);
3062 }
3063 } else {
3064 if (scale == 0) {
3065 __ daddu(AT, as_Register(base), as_Register(index));
3066 } else {
3067 __ dsll(AT, as_Register(index), scale);
3068 __ daddu(AT, as_Register(base), AT);
3069 }
3070 __ sdc1(src_reg, AT, disp);
3071 }
3072 } else {
3073 if (scale == 0) {
3074 __ daddu(AT, as_Register(base), as_Register(index));
3075 } else {
3076 __ dsll(AT, as_Register(index), scale);
3077 __ daddu(AT, as_Register(base), AT);
3078 }
3079 __ move(T9, disp);
3080 if( UseLoongsonISA ) {
3081 __ gssdxc1(src_reg, AT, T9, 0);
3082 } else {
3083 __ addu(AT, AT, T9);
3084 __ sdc1(src_reg, AT, 0);
3085 }
3086 }
3087 } else {
3088 if( Assembler::is_simm16(disp) ) {
3089 __ sdc1(src_reg, as_Register(base), disp);
3090 } else {
3091 __ move(T9, disp);
3092 if( UseLoongsonISA ) {
3093 __ gssdxc1(src_reg, as_Register(base), T9, 0);
3094 } else {
3095 __ addu(AT, as_Register(base), T9);
3096 __ sdc1(src_reg, AT, 0);
3097 }
3098 }
3099 }
3100 %}
3102 enc_class Java_To_Runtime (method meth) %{ // CALL Java_To_Runtime, Java_To_Runtime_Leaf
3103 MacroAssembler _masm(&cbuf);
3104 // This is the instruction starting address for relocation info.
3105 __ block_comment("Java_To_Runtime");
3106 cbuf.set_insts_mark();
3107 __ relocate(relocInfo::runtime_call_type);
3109 __ li48(T9, (long)$meth$$method);
3110 __ jalr(T9);
3111 __ nop();
3112 %}
3114 enc_class Java_Static_Call (method meth) %{ // JAVA STATIC CALL
3115 // CALL to fixup routine. Fixup routine uses ScopeDesc info to determine
3116 // who we intended to call.
3117 MacroAssembler _masm(&cbuf);
3118 cbuf.set_insts_mark();
3120 if ( !_method ) {
3121 __ relocate(relocInfo::runtime_call_type);
3122 } else if(_optimized_virtual) {
3123 __ relocate(relocInfo::opt_virtual_call_type);
3124 } else {
3125 __ relocate(relocInfo::static_call_type);
3126 }
3128 __ li(T9, $meth$$method);
3129 __ jalr(T9);
3130 __ nop();
3131 if( _method ) { // Emit stub for static call
3132 emit_java_to_interp(cbuf);
3133 }
3134 %}
3137 /*
3138 * [Ref: LIR_Assembler::ic_call() ]
3139 */
3140 enc_class Java_Dynamic_Call (method meth) %{ // JAVA DYNAMIC CALL
3141 MacroAssembler _masm(&cbuf);
3142 __ block_comment("Java_Dynamic_Call");
3143 __ ic_call((address)$meth$$method);
3144 %}
3147 enc_class Set_Flags_After_Fast_Lock_Unlock(FlagsReg cr) %{
3148 Register flags = $cr$$Register;
3149 Label L;
3151 MacroAssembler _masm(&cbuf);
3153 __ addu(flags, R0, R0);
3154 __ beq(AT, R0, L);
3155 __ delayed()->nop();
3156 __ move(flags, 0xFFFFFFFF);
3157 __ bind(L);
3158 %}
3160 enc_class enc_PartialSubtypeCheck(mRegP result, mRegP sub, mRegP super, mRegI tmp) %{
3161 Register result = $result$$Register;
3162 Register sub = $sub$$Register;
3163 Register super = $super$$Register;
3164 Register length = $tmp$$Register;
3165 Register tmp = T9;
3166 Label miss;
3168 /* 2012/9/28 Jin: result may be the same as sub
3169 * 47c B40: # B21 B41 <- B20 Freq: 0.155379
3170 * 47c partialSubtypeCheck result=S1, sub=S1, super=S3, length=S0
3171 * 4bc mov S2, NULL #@loadConP
3172 * 4c0 beq S1, S2, B21 #@branchConP P=0.999999 C=-1.000000
3173 */
3174 MacroAssembler _masm(&cbuf);
3175 Label done;
3176 __ check_klass_subtype_slow_path(sub, super, length, tmp,
3177 NULL, &miss,
3178 /*set_cond_codes:*/ true);
3179 /* 2013/7/22 Jin: Refer to X86_64's RDI */
3180 __ move(result, 0);
3181 __ b(done);
3182 __ nop();
3184 __ bind(miss);
3185 __ move(result, 1);
3186 __ bind(done);
3187 %}
3189 %}
3192 //---------MIPS FRAME--------------------------------------------------------------
3193 // Definition of frame structure and management information.
3194 //
3195 // S T A C K L A Y O U T Allocators stack-slot number
3196 // | (to get allocators register number
3197 // G Owned by | | v add SharedInfo::stack0)
3198 // r CALLER | |
3199 // o | +--------+ pad to even-align allocators stack-slot
3200 // w V | pad0 | numbers; owned by CALLER
3201 // t -----------+--------+----> Matcher::_in_arg_limit, unaligned
3202 // h ^ | in | 5
3203 // | | args | 4 Holes in incoming args owned by SELF
3204 // | | old | | 3
3205 // | | SP-+--------+----> Matcher::_old_SP, even aligned
3206 // v | | ret | 3 return address
3207 // Owned by +--------+
3208 // Self | pad2 | 2 pad to align old SP
3209 // | +--------+ 1
3210 // | | locks | 0
3211 // | +--------+----> SharedInfo::stack0, even aligned
3212 // | | pad1 | 11 pad to align new SP
3213 // | +--------+
3214 // | | | 10
3215 // | | spills | 9 spills
3216 // V | | 8 (pad0 slot for callee)
3217 // -----------+--------+----> Matcher::_out_arg_limit, unaligned
3218 // ^ | out | 7
3219 // | | args | 6 Holes in outgoing args owned by CALLEE
3220 // Owned by new | |
3221 // Callee SP-+--------+----> Matcher::_new_SP, even aligned
3222 // | |
3223 //
3224 // Note 1: Only region 8-11 is determined by the allocator. Region 0-5 is
3225 // known from SELF's arguments and the Java calling convention.
3226 // Region 6-7 is determined per call site.
3227 // Note 2: If the calling convention leaves holes in the incoming argument
3228 // area, those holes are owned by SELF. Holes in the outgoing area
3229 // are owned by the CALLEE. Holes should not be nessecary in the
3230 // incoming area, as the Java calling convention is completely under
3231 // the control of the AD file. Doubles can be sorted and packed to
3232 // avoid holes. Holes in the outgoing arguments may be nessecary for
3233 // varargs C calling conventions.
3234 // Note 3: Region 0-3 is even aligned, with pad2 as needed. Region 3-5 is
3235 // even aligned with pad0 as needed.
3236 // Region 6 is even aligned. Region 6-7 is NOT even aligned;
3237 // region 6-11 is even aligned; it may be padded out more so that
3238 // the region from SP to FP meets the minimum stack alignment.
3239 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
3240 // alignment. Region 11, pad1, may be dynamically extended so that
3241 // SP meets the minimum alignment.
3244 frame %{
3246 stack_direction(TOWARDS_LOW);
3248 // These two registers define part of the calling convention
3249 // between compiled code and the interpreter.
3250 // SEE StartI2CNode::calling_convention & StartC2INode::calling_convention & StartOSRNode::calling_convention
3251 // for more information. by yjl 3/16/2006
3253 inline_cache_reg(T1); // Inline Cache Register
3254 interpreter_method_oop_reg(S3); // Method Oop Register when calling interpreter
3255 /*
3256 inline_cache_reg(T1); // Inline Cache Register or methodOop for I2C
3257 interpreter_arg_ptr_reg(A0); // Argument pointer for I2C adapters
3258 */
3260 // Optional: name the operand used by cisc-spilling to access [stack_pointer + offset]
3261 cisc_spilling_operand_name(indOffset32);
3263 // Number of stack slots consumed by locking an object
3264 // generate Compile::sync_stack_slots
3265 #ifdef _LP64
3266 sync_stack_slots(2);
3267 #else
3268 sync_stack_slots(1);
3269 #endif
3271 frame_pointer(SP);
3273 // Interpreter stores its frame pointer in a register which is
3274 // stored to the stack by I2CAdaptors.
3275 // I2CAdaptors convert from interpreted java to compiled java.
3277 interpreter_frame_pointer(FP);
3279 // generate Matcher::stack_alignment
3280 stack_alignment(StackAlignmentInBytes); //wordSize = sizeof(char*);
3282 // Number of stack slots between incoming argument block and the start of
3283 // a new frame. The PROLOG must add this many slots to the stack. The
3284 // EPILOG must remove this many slots. Intel needs one slot for
3285 // return address.
3286 // generate Matcher::in_preserve_stack_slots
3287 //in_preserve_stack_slots(VerifyStackAtCalls + 2); //Now VerifyStackAtCalls is defined as false ! Leave one stack slot for ra and fp
3288 in_preserve_stack_slots(4); //Now VerifyStackAtCalls is defined as false ! Leave two stack slots for ra and fp
3290 // Number of outgoing stack slots killed above the out_preserve_stack_slots
3291 // for calls to C. Supports the var-args backing area for register parms.
3292 varargs_C_out_slots_killed(0);
3294 // The after-PROLOG location of the return address. Location of
3295 // return address specifies a type (REG or STACK) and a number
3296 // representing the register number (i.e. - use a register name) or
3297 // stack slot.
3298 // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
3299 // Otherwise, it is above the locks and verification slot and alignment word
3300 //return_addr(STACK -1+ round_to(1+VerifyStackAtCalls+Compile::current()->sync()*Compile::current()->sync_stack_slots(),WordsPerLong));
3301 return_addr(REG RA);
3303 // Body of function which returns an integer array locating
3304 // arguments either in registers or in stack slots. Passed an array
3305 // of ideal registers called "sig" and a "length" count. Stack-slot
3306 // offsets are based on outgoing arguments, i.e. a CALLER setting up
3307 // arguments for a CALLEE. Incoming stack arguments are
3308 // automatically biased by the preserve_stack_slots field above.
3311 // will generated to Matcher::calling_convention(OptoRegPair *sig, uint length, bool is_outgoing)
3312 // StartNode::calling_convention call this. by yjl 3/16/2006
3313 calling_convention %{
3314 SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
3315 %}
3320 // Body of function which returns an integer array locating
3321 // arguments either in registers or in stack slots. Passed an array
3322 // of ideal registers called "sig" and a "length" count. Stack-slot
3323 // offsets are based on outgoing arguments, i.e. a CALLER setting up
3324 // arguments for a CALLEE. Incoming stack arguments are
3325 // automatically biased by the preserve_stack_slots field above.
3328 // SEE CallRuntimeNode::calling_convention for more information. by yjl 3/16/2006
3329 c_calling_convention %{
3330 (void) SharedRuntime::c_calling_convention(sig_bt, regs, /*regs2=*/NULL, length);
3331 %}
3334 // Location of C & interpreter return values
3335 // register(s) contain(s) return value for Op_StartI2C and Op_StartOSR.
3336 // SEE Matcher::match. by yjl 3/16/2006
3337 c_return_value %{
3338 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3339 /* -- , -- , Op_RegN, Op_RegI, Op_RegP, Op_RegF, Op_RegD, Op_RegL */
3340 static int lo[Op_RegL+1] = { 0, 0, V0_num, V0_num, V0_num, F0_num, F0_num, V0_num };
3341 static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, V0_H_num, OptoReg::Bad, F0_H_num, V0_H_num };
3342 return OptoRegPair(hi[ideal_reg],lo[ideal_reg]);
3343 %}
3345 // Location of return values
3346 // register(s) contain(s) return value for Op_StartC2I and Op_Start.
3347 // SEE Matcher::match. by yjl 3/16/2006
3349 return_value %{
3350 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3351 /* -- , -- , Op_RegN, Op_RegI, Op_RegP, Op_RegF, Op_RegD, Op_RegL */
3352 static int lo[Op_RegL+1] = { 0, 0, V0_num, V0_num, V0_num, F0_num, F0_num, V0_num };
3353 static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, V0_H_num, OptoReg::Bad, F0_H_num, V0_H_num};
3354 return OptoRegPair(hi[ideal_reg],lo[ideal_reg]);
3355 %}
3357 %}
3359 //----------ATTRIBUTES---------------------------------------------------------
3360 //----------Operand Attributes-------------------------------------------------
3361 op_attrib op_cost(0); // Required cost attribute
3363 //----------Instruction Attributes---------------------------------------------
3364 ins_attrib ins_cost(100); // Required cost attribute
3365 ins_attrib ins_size(32); // Required size attribute (in bits)
3366 ins_attrib ins_pc_relative(0); // Required PC Relative flag
3367 ins_attrib ins_short_branch(0); // Required flag: is this instruction a
3368 // non-matching short branch variant of some
3369 // long branch?
3370 ins_attrib ins_alignment(4); // Required alignment attribute (must be a power of 2)
3371 // specifies the alignment that some part of the instruction (not
3372 // necessarily the start) requires. If > 1, a compute_padding()
3373 // function must be provided for the instruction
3375 //----------OPERANDS-----------------------------------------------------------
3376 // Operand definitions must precede instruction definitions for correct parsing
3377 // in the ADLC because operands constitute user defined types which are used in
3378 // instruction definitions.
3380 // Vectors
3381 operand vecD() %{
3382 constraint(ALLOC_IN_RC(dbl_reg));
3383 match(VecD);
3385 format %{ %}
3386 interface(REG_INTER);
3387 %}
3389 // Flags register, used as output of compare instructions
3390 operand FlagsReg() %{
3391 constraint(ALLOC_IN_RC(mips_flags));
3392 match(RegFlags);
3394 format %{ "EFLAGS" %}
3395 interface(REG_INTER);
3396 %}
3398 //----------Simple Operands----------------------------------------------------
3399 //TODO: Should we need to define some more special immediate number ?
3400 // Immediate Operands
3401 // Integer Immediate
3402 operand immI() %{
3403 match(ConI);
3404 //TODO: should not match immI8 here LEE
3405 match(immI8);
3407 op_cost(20);
3408 format %{ %}
3409 interface(CONST_INTER);
3410 %}
3412 // Long Immediate 8-bit
3413 operand immL8()
3414 %{
3415 predicate(-0x80L <= n->get_long() && n->get_long() < 0x80L);
3416 match(ConL);
3418 op_cost(5);
3419 format %{ %}
3420 interface(CONST_INTER);
3421 %}
3423 // Constant for test vs zero
3424 operand immI0() %{
3425 predicate(n->get_int() == 0);
3426 match(ConI);
3428 op_cost(0);
3429 format %{ %}
3430 interface(CONST_INTER);
3431 %}
3433 // Constant for increment
3434 operand immI1() %{
3435 predicate(n->get_int() == 1);
3436 match(ConI);
3438 op_cost(0);
3439 format %{ %}
3440 interface(CONST_INTER);
3441 %}
3443 // Constant for decrement
3444 operand immI_M1() %{
3445 predicate(n->get_int() == -1);
3446 match(ConI);
3448 op_cost(0);
3449 format %{ %}
3450 interface(CONST_INTER);
3451 %}
3453 operand immI_MaxI() %{
3454 predicate(n->get_int() == 2147483647);
3455 match(ConI);
3457 op_cost(0);
3458 format %{ %}
3459 interface(CONST_INTER);
3460 %}
3462 // Valid scale values for addressing modes
3463 operand immI2() %{
3464 predicate(0 <= n->get_int() && (n->get_int() <= 3));
3465 match(ConI);
3467 format %{ %}
3468 interface(CONST_INTER);
3469 %}
3471 operand immI8() %{
3472 predicate((-128 <= n->get_int()) && (n->get_int() <= 127));
3473 match(ConI);
3475 op_cost(5);
3476 format %{ %}
3477 interface(CONST_INTER);
3478 %}
3480 operand immI16() %{
3481 predicate((-32768 <= n->get_int()) && (n->get_int() <= 32767));
3482 match(ConI);
3484 op_cost(10);
3485 format %{ %}
3486 interface(CONST_INTER);
3487 %}
3489 // Constant for long shifts
3490 operand immI_32() %{
3491 predicate( n->get_int() == 32 );
3492 match(ConI);
3494 op_cost(0);
3495 format %{ %}
3496 interface(CONST_INTER);
3497 %}
3499 operand immI_63() %{
3500 predicate( n->get_int() == 63 );
3501 match(ConI);
3503 op_cost(0);
3504 format %{ %}
3505 interface(CONST_INTER);
3506 %}
3508 operand immI_0_31() %{
3509 predicate( n->get_int() >= 0 && n->get_int() <= 31 );
3510 match(ConI);
3512 op_cost(0);
3513 format %{ %}
3514 interface(CONST_INTER);
3515 %}
3517 // Operand for non-negtive integer mask
3518 operand immI_nonneg_mask() %{
3519 predicate( (n->get_int() >= 0) && (Assembler::is_int_mask(n->get_int()) != -1) );
3520 match(ConI);
3522 op_cost(0);
3523 format %{ %}
3524 interface(CONST_INTER);
3525 %}
3527 operand immI_32_63() %{
3528 predicate( n->get_int() >= 32 && n->get_int() <= 63 );
3529 match(ConI);
3530 op_cost(0);
3532 format %{ %}
3533 interface(CONST_INTER);
3534 %}
3536 operand immI16_sub() %{
3537 predicate((-32767 <= n->get_int()) && (n->get_int() <= 32768));
3538 match(ConI);
3540 op_cost(10);
3541 format %{ %}
3542 interface(CONST_INTER);
3543 %}
3545 operand immI_0_32767() %{
3546 predicate( n->get_int() >= 0 && n->get_int() <= 32767 );
3547 match(ConI);
3548 op_cost(0);
3550 format %{ %}
3551 interface(CONST_INTER);
3552 %}
3554 operand immI_0_65535() %{
3555 predicate( n->get_int() >= 0 && n->get_int() <= 65535 );
3556 match(ConI);
3557 op_cost(0);
3559 format %{ %}
3560 interface(CONST_INTER);
3561 %}
3563 operand immI_1() %{
3564 predicate( n->get_int() == 1 );
3565 match(ConI);
3567 op_cost(0);
3568 format %{ %}
3569 interface(CONST_INTER);
3570 %}
3572 operand immI_2() %{
3573 predicate( n->get_int() == 2 );
3574 match(ConI);
3576 op_cost(0);
3577 format %{ %}
3578 interface(CONST_INTER);
3579 %}
3581 operand immI_3() %{
3582 predicate( n->get_int() == 3 );
3583 match(ConI);
3585 op_cost(0);
3586 format %{ %}
3587 interface(CONST_INTER);
3588 %}
3590 operand immI_7() %{
3591 predicate( n->get_int() == 7 );
3592 match(ConI);
3594 format %{ %}
3595 interface(CONST_INTER);
3596 %}
3598 // Immediates for special shifts (sign extend)
3600 // Constants for increment
3601 operand immI_16() %{
3602 predicate( n->get_int() == 16 );
3603 match(ConI);
3605 format %{ %}
3606 interface(CONST_INTER);
3607 %}
3609 operand immI_24() %{
3610 predicate( n->get_int() == 24 );
3611 match(ConI);
3613 format %{ %}
3614 interface(CONST_INTER);
3615 %}
3617 // Constant for byte-wide masking
3618 operand immI_255() %{
3619 predicate( n->get_int() == 255 );
3620 match(ConI);
3622 op_cost(0);
3623 format %{ %}
3624 interface(CONST_INTER);
3625 %}
3627 operand immI_65535() %{
3628 predicate( n->get_int() == 65535 );
3629 match(ConI);
3631 op_cost(5);
3632 format %{ %}
3633 interface(CONST_INTER);
3634 %}
3636 operand immI_65536() %{
3637 predicate( n->get_int() == 65536 );
3638 match(ConI);
3640 op_cost(5);
3641 format %{ %}
3642 interface(CONST_INTER);
3643 %}
3645 operand immI_M65536() %{
3646 predicate( n->get_int() == -65536 );
3647 match(ConI);
3649 op_cost(5);
3650 format %{ %}
3651 interface(CONST_INTER);
3652 %}
3654 // Pointer Immediate
3655 operand immP() %{
3656 match(ConP);
3658 op_cost(10);
3659 format %{ %}
3660 interface(CONST_INTER);
3661 %}
3663 // NULL Pointer Immediate
3664 operand immP0() %{
3665 predicate( n->get_ptr() == 0 );
3666 match(ConP);
3667 op_cost(0);
3669 format %{ %}
3670 interface(CONST_INTER);
3671 %}
3673 // Pointer Immediate: 64-bit
3674 operand immP_set() %{
3675 match(ConP);
3677 op_cost(5);
3678 // formats are generated automatically for constants and base registers
3679 format %{ %}
3680 interface(CONST_INTER);
3681 %}
3683 // Pointer Immediate: 64-bit
3684 operand immP_load() %{
3685 predicate(n->bottom_type()->isa_oop_ptr() || (MacroAssembler::insts_for_set64(n->get_ptr()) > 3));
3686 match(ConP);
3688 op_cost(5);
3689 // formats are generated automatically for constants and base registers
3690 format %{ %}
3691 interface(CONST_INTER);
3692 %}
3694 // Pointer Immediate: 64-bit
3695 operand immP_no_oop_cheap() %{
3696 predicate(!n->bottom_type()->isa_oop_ptr() && (MacroAssembler::insts_for_set64(n->get_ptr()) <= 3));
3697 match(ConP);
3699 op_cost(5);
3700 // formats are generated automatically for constants and base registers
3701 format %{ %}
3702 interface(CONST_INTER);
3703 %}
3705 // Pointer for polling page
3706 operand immP_poll() %{
3707 predicate(n->get_ptr() != 0 && n->get_ptr() == (intptr_t)os::get_polling_page());
3708 match(ConP);
3709 op_cost(5);
3711 format %{ %}
3712 interface(CONST_INTER);
3713 %}
3715 // Pointer Immediate
3716 operand immN() %{
3717 match(ConN);
3719 op_cost(10);
3720 format %{ %}
3721 interface(CONST_INTER);
3722 %}
3724 operand immNKlass() %{
3725 match(ConNKlass);
3727 op_cost(10);
3728 format %{ %}
3729 interface(CONST_INTER);
3730 %}
3732 // NULL Pointer Immediate
3733 operand immN0() %{
3734 predicate(n->get_narrowcon() == 0);
3735 match(ConN);
3737 op_cost(5);
3738 format %{ %}
3739 interface(CONST_INTER);
3740 %}
3742 // Long Immediate
3743 operand immL() %{
3744 match(ConL);
3746 op_cost(20);
3747 format %{ %}
3748 interface(CONST_INTER);
3749 %}
3751 // Long Immediate zero
3752 operand immL0() %{
3753 predicate( n->get_long() == 0L );
3754 match(ConL);
3755 op_cost(0);
3757 format %{ %}
3758 interface(CONST_INTER);
3759 %}
3761 operand immL7() %{
3762 predicate( n->get_long() == 7L );
3763 match(ConL);
3764 op_cost(0);
3766 format %{ %}
3767 interface(CONST_INTER);
3768 %}
3770 operand immL_M1() %{
3771 predicate( n->get_long() == -1L );
3772 match(ConL);
3773 op_cost(0);
3775 format %{ %}
3776 interface(CONST_INTER);
3777 %}
3779 // bit 0..2 zero
3780 operand immL_M8() %{
3781 predicate( n->get_long() == -8L );
3782 match(ConL);
3783 op_cost(0);
3785 format %{ %}
3786 interface(CONST_INTER);
3787 %}
3789 // bit 2 zero
3790 operand immL_M5() %{
3791 predicate( n->get_long() == -5L );
3792 match(ConL);
3793 op_cost(0);
3795 format %{ %}
3796 interface(CONST_INTER);
3797 %}
3799 // bit 1..2 zero
3800 operand immL_M7() %{
3801 predicate( n->get_long() == -7L );
3802 match(ConL);
3803 op_cost(0);
3805 format %{ %}
3806 interface(CONST_INTER);
3807 %}
3809 // bit 0..1 zero
3810 operand immL_M4() %{
3811 predicate( n->get_long() == -4L );
3812 match(ConL);
3813 op_cost(0);
3815 format %{ %}
3816 interface(CONST_INTER);
3817 %}
3819 // bit 3..6 zero
3820 operand immL_M121() %{
3821 predicate( n->get_long() == -121L );
3822 match(ConL);
3823 op_cost(0);
3825 format %{ %}
3826 interface(CONST_INTER);
3827 %}
3829 // Long immediate from 0 to 127.
3830 // Used for a shorter form of long mul by 10.
3831 operand immL_127() %{
3832 predicate((0 <= n->get_long()) && (n->get_long() <= 127));
3833 match(ConL);
3834 op_cost(0);
3836 format %{ %}
3837 interface(CONST_INTER);
3838 %}
3840 // Operand for non-negtive long mask
3841 operand immL_nonneg_mask() %{
3842 predicate( (n->get_long() >= 0) && (Assembler::is_jlong_mask(n->get_long()) != -1) );
3843 match(ConL);
3845 op_cost(0);
3846 format %{ %}
3847 interface(CONST_INTER);
3848 %}
3850 operand immL_0_65535() %{
3851 predicate( n->get_long() >= 0 && n->get_long() <= 65535 );
3852 match(ConL);
3853 op_cost(0);
3855 format %{ %}
3856 interface(CONST_INTER);
3857 %}
3859 // Long Immediate: cheap (materialize in <= 3 instructions)
3860 operand immL_cheap() %{
3861 predicate(MacroAssembler::insts_for_set64(n->get_long()) <= 3);
3862 match(ConL);
3863 op_cost(0);
3865 format %{ %}
3866 interface(CONST_INTER);
3867 %}
3869 // Long Immediate: expensive (materialize in > 3 instructions)
3870 operand immL_expensive() %{
3871 predicate(MacroAssembler::insts_for_set64(n->get_long()) > 3);
3872 match(ConL);
3873 op_cost(0);
3875 format %{ %}
3876 interface(CONST_INTER);
3877 %}
3879 operand immL16() %{
3880 predicate((-32768 <= n->get_long()) && (n->get_long() <= 32767));
3881 match(ConL);
3883 op_cost(10);
3884 format %{ %}
3885 interface(CONST_INTER);
3886 %}
3888 operand immL16_sub() %{
3889 predicate((-32767 <= n->get_long()) && (n->get_long() <= 32768));
3890 match(ConL);
3892 op_cost(10);
3893 format %{ %}
3894 interface(CONST_INTER);
3895 %}
3897 // Long Immediate: low 32-bit mask
3898 operand immL_32bits() %{
3899 predicate(n->get_long() == 0xFFFFFFFFL);
3900 match(ConL);
3901 op_cost(20);
3903 format %{ %}
3904 interface(CONST_INTER);
3905 %}
3907 // Long Immediate 32-bit signed
3908 operand immL32()
3909 %{
3910 predicate(n->get_long() == (int) (n->get_long()));
3911 match(ConL);
3913 op_cost(15);
3914 format %{ %}
3915 interface(CONST_INTER);
3916 %}
3919 //single-precision floating-point zero
3920 operand immF0() %{
3921 predicate(jint_cast(n->getf()) == 0);
3922 match(ConF);
3924 op_cost(5);
3925 format %{ %}
3926 interface(CONST_INTER);
3927 %}
3929 //single-precision floating-point immediate
3930 operand immF() %{
3931 match(ConF);
3933 op_cost(20);
3934 format %{ %}
3935 interface(CONST_INTER);
3936 %}
3938 //double-precision floating-point zero
3939 operand immD0() %{
3940 predicate(jlong_cast(n->getd()) == 0);
3941 match(ConD);
3943 op_cost(5);
3944 format %{ %}
3945 interface(CONST_INTER);
3946 %}
3948 //double-precision floating-point immediate
3949 operand immD() %{
3950 match(ConD);
3952 op_cost(20);
3953 format %{ %}
3954 interface(CONST_INTER);
3955 %}
3957 // Register Operands
3958 // Integer Register
3959 operand mRegI() %{
3960 constraint(ALLOC_IN_RC(int_reg));
3961 match(RegI);
3963 format %{ %}
3964 interface(REG_INTER);
3965 %}
3967 operand no_Ax_mRegI() %{
3968 constraint(ALLOC_IN_RC(no_Ax_int_reg));
3969 match(RegI);
3970 match(mRegI);
3972 format %{ %}
3973 interface(REG_INTER);
3974 %}
3976 operand mS0RegI() %{
3977 constraint(ALLOC_IN_RC(s0_reg));
3978 match(RegI);
3979 match(mRegI);
3981 format %{ "S0" %}
3982 interface(REG_INTER);
3983 %}
3985 operand mS1RegI() %{
3986 constraint(ALLOC_IN_RC(s1_reg));
3987 match(RegI);
3988 match(mRegI);
3990 format %{ "S1" %}
3991 interface(REG_INTER);
3992 %}
3994 operand mS2RegI() %{
3995 constraint(ALLOC_IN_RC(s2_reg));
3996 match(RegI);
3997 match(mRegI);
3999 format %{ "S2" %}
4000 interface(REG_INTER);
4001 %}
4003 operand mS3RegI() %{
4004 constraint(ALLOC_IN_RC(s3_reg));
4005 match(RegI);
4006 match(mRegI);
4008 format %{ "S3" %}
4009 interface(REG_INTER);
4010 %}
4012 operand mS4RegI() %{
4013 constraint(ALLOC_IN_RC(s4_reg));
4014 match(RegI);
4015 match(mRegI);
4017 format %{ "S4" %}
4018 interface(REG_INTER);
4019 %}
4021 operand mS5RegI() %{
4022 constraint(ALLOC_IN_RC(s5_reg));
4023 match(RegI);
4024 match(mRegI);
4026 format %{ "S5" %}
4027 interface(REG_INTER);
4028 %}
4030 operand mS6RegI() %{
4031 constraint(ALLOC_IN_RC(s6_reg));
4032 match(RegI);
4033 match(mRegI);
4035 format %{ "S6" %}
4036 interface(REG_INTER);
4037 %}
4039 operand mS7RegI() %{
4040 constraint(ALLOC_IN_RC(s7_reg));
4041 match(RegI);
4042 match(mRegI);
4044 format %{ "S7" %}
4045 interface(REG_INTER);
4046 %}
4049 operand mT0RegI() %{
4050 constraint(ALLOC_IN_RC(t0_reg));
4051 match(RegI);
4052 match(mRegI);
4054 format %{ "T0" %}
4055 interface(REG_INTER);
4056 %}
4058 operand mT1RegI() %{
4059 constraint(ALLOC_IN_RC(t1_reg));
4060 match(RegI);
4061 match(mRegI);
4063 format %{ "T1" %}
4064 interface(REG_INTER);
4065 %}
4067 operand mT2RegI() %{
4068 constraint(ALLOC_IN_RC(t2_reg));
4069 match(RegI);
4070 match(mRegI);
4072 format %{ "T2" %}
4073 interface(REG_INTER);
4074 %}
4076 operand mT3RegI() %{
4077 constraint(ALLOC_IN_RC(t3_reg));
4078 match(RegI);
4079 match(mRegI);
4081 format %{ "T3" %}
4082 interface(REG_INTER);
4083 %}
4085 operand mT8RegI() %{
4086 constraint(ALLOC_IN_RC(t8_reg));
4087 match(RegI);
4088 match(mRegI);
4090 format %{ "T8" %}
4091 interface(REG_INTER);
4092 %}
4094 operand mT9RegI() %{
4095 constraint(ALLOC_IN_RC(t9_reg));
4096 match(RegI);
4097 match(mRegI);
4099 format %{ "T9" %}
4100 interface(REG_INTER);
4101 %}
4103 operand mA0RegI() %{
4104 constraint(ALLOC_IN_RC(a0_reg));
4105 match(RegI);
4106 match(mRegI);
4108 format %{ "A0" %}
4109 interface(REG_INTER);
4110 %}
4112 operand mA1RegI() %{
4113 constraint(ALLOC_IN_RC(a1_reg));
4114 match(RegI);
4115 match(mRegI);
4117 format %{ "A1" %}
4118 interface(REG_INTER);
4119 %}
4121 operand mA2RegI() %{
4122 constraint(ALLOC_IN_RC(a2_reg));
4123 match(RegI);
4124 match(mRegI);
4126 format %{ "A2" %}
4127 interface(REG_INTER);
4128 %}
4130 operand mA3RegI() %{
4131 constraint(ALLOC_IN_RC(a3_reg));
4132 match(RegI);
4133 match(mRegI);
4135 format %{ "A3" %}
4136 interface(REG_INTER);
4137 %}
4139 operand mA4RegI() %{
4140 constraint(ALLOC_IN_RC(a4_reg));
4141 match(RegI);
4142 match(mRegI);
4144 format %{ "A4" %}
4145 interface(REG_INTER);
4146 %}
4148 operand mA5RegI() %{
4149 constraint(ALLOC_IN_RC(a5_reg));
4150 match(RegI);
4151 match(mRegI);
4153 format %{ "A5" %}
4154 interface(REG_INTER);
4155 %}
4157 operand mA6RegI() %{
4158 constraint(ALLOC_IN_RC(a6_reg));
4159 match(RegI);
4160 match(mRegI);
4162 format %{ "A6" %}
4163 interface(REG_INTER);
4164 %}
4166 operand mA7RegI() %{
4167 constraint(ALLOC_IN_RC(a7_reg));
4168 match(RegI);
4169 match(mRegI);
4171 format %{ "A7" %}
4172 interface(REG_INTER);
4173 %}
4175 operand mV0RegI() %{
4176 constraint(ALLOC_IN_RC(v0_reg));
4177 match(RegI);
4178 match(mRegI);
4180 format %{ "V0" %}
4181 interface(REG_INTER);
4182 %}
4184 operand mV1RegI() %{
4185 constraint(ALLOC_IN_RC(v1_reg));
4186 match(RegI);
4187 match(mRegI);
4189 format %{ "V1" %}
4190 interface(REG_INTER);
4191 %}
4193 operand mRegN() %{
4194 constraint(ALLOC_IN_RC(int_reg));
4195 match(RegN);
4197 format %{ %}
4198 interface(REG_INTER);
4199 %}
4201 operand t0_RegN() %{
4202 constraint(ALLOC_IN_RC(t0_reg));
4203 match(RegN);
4204 match(mRegN);
4206 format %{ %}
4207 interface(REG_INTER);
4208 %}
4210 operand t1_RegN() %{
4211 constraint(ALLOC_IN_RC(t1_reg));
4212 match(RegN);
4213 match(mRegN);
4215 format %{ %}
4216 interface(REG_INTER);
4217 %}
4219 operand t2_RegN() %{
4220 constraint(ALLOC_IN_RC(t2_reg));
4221 match(RegN);
4222 match(mRegN);
4224 format %{ %}
4225 interface(REG_INTER);
4226 %}
4228 operand t3_RegN() %{
4229 constraint(ALLOC_IN_RC(t3_reg));
4230 match(RegN);
4231 match(mRegN);
4233 format %{ %}
4234 interface(REG_INTER);
4235 %}
4237 operand t8_RegN() %{
4238 constraint(ALLOC_IN_RC(t8_reg));
4239 match(RegN);
4240 match(mRegN);
4242 format %{ %}
4243 interface(REG_INTER);
4244 %}
4246 operand t9_RegN() %{
4247 constraint(ALLOC_IN_RC(t9_reg));
4248 match(RegN);
4249 match(mRegN);
4251 format %{ %}
4252 interface(REG_INTER);
4253 %}
4255 operand a0_RegN() %{
4256 constraint(ALLOC_IN_RC(a0_reg));
4257 match(RegN);
4258 match(mRegN);
4260 format %{ %}
4261 interface(REG_INTER);
4262 %}
4264 operand a1_RegN() %{
4265 constraint(ALLOC_IN_RC(a1_reg));
4266 match(RegN);
4267 match(mRegN);
4269 format %{ %}
4270 interface(REG_INTER);
4271 %}
4273 operand a2_RegN() %{
4274 constraint(ALLOC_IN_RC(a2_reg));
4275 match(RegN);
4276 match(mRegN);
4278 format %{ %}
4279 interface(REG_INTER);
4280 %}
4282 operand a3_RegN() %{
4283 constraint(ALLOC_IN_RC(a3_reg));
4284 match(RegN);
4285 match(mRegN);
4287 format %{ %}
4288 interface(REG_INTER);
4289 %}
4291 operand a4_RegN() %{
4292 constraint(ALLOC_IN_RC(a4_reg));
4293 match(RegN);
4294 match(mRegN);
4296 format %{ %}
4297 interface(REG_INTER);
4298 %}
4300 operand a5_RegN() %{
4301 constraint(ALLOC_IN_RC(a5_reg));
4302 match(RegN);
4303 match(mRegN);
4305 format %{ %}
4306 interface(REG_INTER);
4307 %}
4309 operand a6_RegN() %{
4310 constraint(ALLOC_IN_RC(a6_reg));
4311 match(RegN);
4312 match(mRegN);
4314 format %{ %}
4315 interface(REG_INTER);
4316 %}
4318 operand a7_RegN() %{
4319 constraint(ALLOC_IN_RC(a7_reg));
4320 match(RegN);
4321 match(mRegN);
4323 format %{ %}
4324 interface(REG_INTER);
4325 %}
4327 operand s0_RegN() %{
4328 constraint(ALLOC_IN_RC(s0_reg));
4329 match(RegN);
4330 match(mRegN);
4332 format %{ %}
4333 interface(REG_INTER);
4334 %}
4336 operand s1_RegN() %{
4337 constraint(ALLOC_IN_RC(s1_reg));
4338 match(RegN);
4339 match(mRegN);
4341 format %{ %}
4342 interface(REG_INTER);
4343 %}
4345 operand s2_RegN() %{
4346 constraint(ALLOC_IN_RC(s2_reg));
4347 match(RegN);
4348 match(mRegN);
4350 format %{ %}
4351 interface(REG_INTER);
4352 %}
4354 operand s3_RegN() %{
4355 constraint(ALLOC_IN_RC(s3_reg));
4356 match(RegN);
4357 match(mRegN);
4359 format %{ %}
4360 interface(REG_INTER);
4361 %}
4363 operand s4_RegN() %{
4364 constraint(ALLOC_IN_RC(s4_reg));
4365 match(RegN);
4366 match(mRegN);
4368 format %{ %}
4369 interface(REG_INTER);
4370 %}
4372 operand s5_RegN() %{
4373 constraint(ALLOC_IN_RC(s5_reg));
4374 match(RegN);
4375 match(mRegN);
4377 format %{ %}
4378 interface(REG_INTER);
4379 %}
4381 operand s6_RegN() %{
4382 constraint(ALLOC_IN_RC(s6_reg));
4383 match(RegN);
4384 match(mRegN);
4386 format %{ %}
4387 interface(REG_INTER);
4388 %}
4390 operand s7_RegN() %{
4391 constraint(ALLOC_IN_RC(s7_reg));
4392 match(RegN);
4393 match(mRegN);
4395 format %{ %}
4396 interface(REG_INTER);
4397 %}
4399 operand v0_RegN() %{
4400 constraint(ALLOC_IN_RC(v0_reg));
4401 match(RegN);
4402 match(mRegN);
4404 format %{ %}
4405 interface(REG_INTER);
4406 %}
4408 operand v1_RegN() %{
4409 constraint(ALLOC_IN_RC(v1_reg));
4410 match(RegN);
4411 match(mRegN);
4413 format %{ %}
4414 interface(REG_INTER);
4415 %}
4417 // Pointer Register
4418 operand mRegP() %{
4419 constraint(ALLOC_IN_RC(p_reg));
4420 match(RegP);
4422 format %{ %}
4423 interface(REG_INTER);
4424 %}
4426 operand no_T8_mRegP() %{
4427 constraint(ALLOC_IN_RC(no_T8_p_reg));
4428 match(RegP);
4429 match(mRegP);
4431 format %{ %}
4432 interface(REG_INTER);
4433 %}
4435 operand s0_RegP()
4436 %{
4437 constraint(ALLOC_IN_RC(s0_long_reg));
4438 match(RegP);
4439 match(mRegP);
4440 match(no_T8_mRegP);
4442 format %{ %}
4443 interface(REG_INTER);
4444 %}
4446 operand s1_RegP()
4447 %{
4448 constraint(ALLOC_IN_RC(s1_long_reg));
4449 match(RegP);
4450 match(mRegP);
4451 match(no_T8_mRegP);
4453 format %{ %}
4454 interface(REG_INTER);
4455 %}
4457 operand s2_RegP()
4458 %{
4459 constraint(ALLOC_IN_RC(s2_long_reg));
4460 match(RegP);
4461 match(mRegP);
4462 match(no_T8_mRegP);
4464 format %{ %}
4465 interface(REG_INTER);
4466 %}
4468 operand s3_RegP()
4469 %{
4470 constraint(ALLOC_IN_RC(s3_long_reg));
4471 match(RegP);
4472 match(mRegP);
4473 match(no_T8_mRegP);
4475 format %{ %}
4476 interface(REG_INTER);
4477 %}
4479 operand s4_RegP()
4480 %{
4481 constraint(ALLOC_IN_RC(s4_long_reg));
4482 match(RegP);
4483 match(mRegP);
4484 match(no_T8_mRegP);
4486 format %{ %}
4487 interface(REG_INTER);
4488 %}
4490 operand s5_RegP()
4491 %{
4492 constraint(ALLOC_IN_RC(s5_long_reg));
4493 match(RegP);
4494 match(mRegP);
4495 match(no_T8_mRegP);
4497 format %{ %}
4498 interface(REG_INTER);
4499 %}
4501 operand s6_RegP()
4502 %{
4503 constraint(ALLOC_IN_RC(s6_long_reg));
4504 match(RegP);
4505 match(mRegP);
4506 match(no_T8_mRegP);
4508 format %{ %}
4509 interface(REG_INTER);
4510 %}
4512 operand s7_RegP()
4513 %{
4514 constraint(ALLOC_IN_RC(s7_long_reg));
4515 match(RegP);
4516 match(mRegP);
4517 match(no_T8_mRegP);
4519 format %{ %}
4520 interface(REG_INTER);
4521 %}
4523 operand t0_RegP()
4524 %{
4525 constraint(ALLOC_IN_RC(t0_long_reg));
4526 match(RegP);
4527 match(mRegP);
4528 match(no_T8_mRegP);
4530 format %{ %}
4531 interface(REG_INTER);
4532 %}
4534 operand t1_RegP()
4535 %{
4536 constraint(ALLOC_IN_RC(t1_long_reg));
4537 match(RegP);
4538 match(mRegP);
4539 match(no_T8_mRegP);
4541 format %{ %}
4542 interface(REG_INTER);
4543 %}
4545 operand t2_RegP()
4546 %{
4547 constraint(ALLOC_IN_RC(t2_long_reg));
4548 match(RegP);
4549 match(mRegP);
4550 match(no_T8_mRegP);
4552 format %{ %}
4553 interface(REG_INTER);
4554 %}
4556 operand t3_RegP()
4557 %{
4558 constraint(ALLOC_IN_RC(t3_long_reg));
4559 match(RegP);
4560 match(mRegP);
4561 match(no_T8_mRegP);
4563 format %{ %}
4564 interface(REG_INTER);
4565 %}
4567 operand t8_RegP()
4568 %{
4569 constraint(ALLOC_IN_RC(t8_long_reg));
4570 match(RegP);
4571 match(mRegP);
4573 format %{ %}
4574 interface(REG_INTER);
4575 %}
4577 operand t9_RegP()
4578 %{
4579 constraint(ALLOC_IN_RC(t9_long_reg));
4580 match(RegP);
4581 match(mRegP);
4582 match(no_T8_mRegP);
4584 format %{ %}
4585 interface(REG_INTER);
4586 %}
4588 operand a0_RegP()
4589 %{
4590 constraint(ALLOC_IN_RC(a0_long_reg));
4591 match(RegP);
4592 match(mRegP);
4593 match(no_T8_mRegP);
4595 format %{ %}
4596 interface(REG_INTER);
4597 %}
4599 operand a1_RegP()
4600 %{
4601 constraint(ALLOC_IN_RC(a1_long_reg));
4602 match(RegP);
4603 match(mRegP);
4604 match(no_T8_mRegP);
4606 format %{ %}
4607 interface(REG_INTER);
4608 %}
4610 operand a2_RegP()
4611 %{
4612 constraint(ALLOC_IN_RC(a2_long_reg));
4613 match(RegP);
4614 match(mRegP);
4615 match(no_T8_mRegP);
4617 format %{ %}
4618 interface(REG_INTER);
4619 %}
4621 operand a3_RegP()
4622 %{
4623 constraint(ALLOC_IN_RC(a3_long_reg));
4624 match(RegP);
4625 match(mRegP);
4626 match(no_T8_mRegP);
4628 format %{ %}
4629 interface(REG_INTER);
4630 %}
4632 operand a4_RegP()
4633 %{
4634 constraint(ALLOC_IN_RC(a4_long_reg));
4635 match(RegP);
4636 match(mRegP);
4637 match(no_T8_mRegP);
4639 format %{ %}
4640 interface(REG_INTER);
4641 %}
4644 operand a5_RegP()
4645 %{
4646 constraint(ALLOC_IN_RC(a5_long_reg));
4647 match(RegP);
4648 match(mRegP);
4649 match(no_T8_mRegP);
4651 format %{ %}
4652 interface(REG_INTER);
4653 %}
4655 operand a6_RegP()
4656 %{
4657 constraint(ALLOC_IN_RC(a6_long_reg));
4658 match(RegP);
4659 match(mRegP);
4660 match(no_T8_mRegP);
4662 format %{ %}
4663 interface(REG_INTER);
4664 %}
4666 operand a7_RegP()
4667 %{
4668 constraint(ALLOC_IN_RC(a7_long_reg));
4669 match(RegP);
4670 match(mRegP);
4671 match(no_T8_mRegP);
4673 format %{ %}
4674 interface(REG_INTER);
4675 %}
4677 operand v0_RegP()
4678 %{
4679 constraint(ALLOC_IN_RC(v0_long_reg));
4680 match(RegP);
4681 match(mRegP);
4682 match(no_T8_mRegP);
4684 format %{ %}
4685 interface(REG_INTER);
4686 %}
4688 operand v1_RegP()
4689 %{
4690 constraint(ALLOC_IN_RC(v1_long_reg));
4691 match(RegP);
4692 match(mRegP);
4693 match(no_T8_mRegP);
4695 format %{ %}
4696 interface(REG_INTER);
4697 %}
4699 /*
4700 operand mSPRegP(mRegP reg) %{
4701 constraint(ALLOC_IN_RC(sp_reg));
4702 match(reg);
4704 format %{ "SP" %}
4705 interface(REG_INTER);
4706 %}
4708 operand mFPRegP(mRegP reg) %{
4709 constraint(ALLOC_IN_RC(fp_reg));
4710 match(reg);
4712 format %{ "FP" %}
4713 interface(REG_INTER);
4714 %}
4715 */
4717 operand mRegL() %{
4718 constraint(ALLOC_IN_RC(long_reg));
4719 match(RegL);
4721 format %{ %}
4722 interface(REG_INTER);
4723 %}
4725 operand v0RegL() %{
4726 constraint(ALLOC_IN_RC(v0_long_reg));
4727 match(RegL);
4728 match(mRegL);
4730 format %{ %}
4731 interface(REG_INTER);
4732 %}
4734 operand v1RegL() %{
4735 constraint(ALLOC_IN_RC(v1_long_reg));
4736 match(RegL);
4737 match(mRegL);
4739 format %{ %}
4740 interface(REG_INTER);
4741 %}
4743 operand a0RegL() %{
4744 constraint(ALLOC_IN_RC(a0_long_reg));
4745 match(RegL);
4746 match(mRegL);
4748 format %{ "A0" %}
4749 interface(REG_INTER);
4750 %}
4752 operand a1RegL() %{
4753 constraint(ALLOC_IN_RC(a1_long_reg));
4754 match(RegL);
4755 match(mRegL);
4757 format %{ %}
4758 interface(REG_INTER);
4759 %}
4761 operand a2RegL() %{
4762 constraint(ALLOC_IN_RC(a2_long_reg));
4763 match(RegL);
4764 match(mRegL);
4766 format %{ %}
4767 interface(REG_INTER);
4768 %}
4770 operand a3RegL() %{
4771 constraint(ALLOC_IN_RC(a3_long_reg));
4772 match(RegL);
4773 match(mRegL);
4775 format %{ %}
4776 interface(REG_INTER);
4777 %}
4779 operand t0RegL() %{
4780 constraint(ALLOC_IN_RC(t0_long_reg));
4781 match(RegL);
4782 match(mRegL);
4784 format %{ %}
4785 interface(REG_INTER);
4786 %}
4788 operand t1RegL() %{
4789 constraint(ALLOC_IN_RC(t1_long_reg));
4790 match(RegL);
4791 match(mRegL);
4793 format %{ %}
4794 interface(REG_INTER);
4795 %}
4797 operand t2RegL() %{
4798 constraint(ALLOC_IN_RC(t2_long_reg));
4799 match(RegL);
4800 match(mRegL);
4802 format %{ %}
4803 interface(REG_INTER);
4804 %}
4806 operand t3RegL() %{
4807 constraint(ALLOC_IN_RC(t3_long_reg));
4808 match(RegL);
4809 match(mRegL);
4811 format %{ %}
4812 interface(REG_INTER);
4813 %}
4815 operand t8RegL() %{
4816 constraint(ALLOC_IN_RC(t8_long_reg));
4817 match(RegL);
4818 match(mRegL);
4820 format %{ %}
4821 interface(REG_INTER);
4822 %}
4824 operand a4RegL() %{
4825 constraint(ALLOC_IN_RC(a4_long_reg));
4826 match(RegL);
4827 match(mRegL);
4829 format %{ %}
4830 interface(REG_INTER);
4831 %}
4833 operand a5RegL() %{
4834 constraint(ALLOC_IN_RC(a5_long_reg));
4835 match(RegL);
4836 match(mRegL);
4838 format %{ %}
4839 interface(REG_INTER);
4840 %}
4842 operand a6RegL() %{
4843 constraint(ALLOC_IN_RC(a6_long_reg));
4844 match(RegL);
4845 match(mRegL);
4847 format %{ %}
4848 interface(REG_INTER);
4849 %}
4851 operand a7RegL() %{
4852 constraint(ALLOC_IN_RC(a7_long_reg));
4853 match(RegL);
4854 match(mRegL);
4856 format %{ %}
4857 interface(REG_INTER);
4858 %}
4860 operand s0RegL() %{
4861 constraint(ALLOC_IN_RC(s0_long_reg));
4862 match(RegL);
4863 match(mRegL);
4865 format %{ %}
4866 interface(REG_INTER);
4867 %}
4869 operand s1RegL() %{
4870 constraint(ALLOC_IN_RC(s1_long_reg));
4871 match(RegL);
4872 match(mRegL);
4874 format %{ %}
4875 interface(REG_INTER);
4876 %}
4878 operand s2RegL() %{
4879 constraint(ALLOC_IN_RC(s2_long_reg));
4880 match(RegL);
4881 match(mRegL);
4883 format %{ %}
4884 interface(REG_INTER);
4885 %}
4887 operand s3RegL() %{
4888 constraint(ALLOC_IN_RC(s3_long_reg));
4889 match(RegL);
4890 match(mRegL);
4892 format %{ %}
4893 interface(REG_INTER);
4894 %}
4896 operand s4RegL() %{
4897 constraint(ALLOC_IN_RC(s4_long_reg));
4898 match(RegL);
4899 match(mRegL);
4901 format %{ %}
4902 interface(REG_INTER);
4903 %}
4905 operand s7RegL() %{
4906 constraint(ALLOC_IN_RC(s7_long_reg));
4907 match(RegL);
4908 match(mRegL);
4910 format %{ %}
4911 interface(REG_INTER);
4912 %}
4914 // Floating register operands
4915 operand regF() %{
4916 constraint(ALLOC_IN_RC(flt_reg));
4917 match(RegF);
4919 format %{ %}
4920 interface(REG_INTER);
4921 %}
4923 //Double Precision Floating register operands
4924 operand regD() %{
4925 constraint(ALLOC_IN_RC(dbl_reg));
4926 match(RegD);
4928 format %{ %}
4929 interface(REG_INTER);
4930 %}
4932 //----------Memory Operands----------------------------------------------------
4933 // Indirect Memory Operand
4934 operand indirect(mRegP reg) %{
4935 constraint(ALLOC_IN_RC(p_reg));
4936 match(reg);
4938 format %{ "[$reg] @ indirect" %}
4939 interface(MEMORY_INTER) %{
4940 base($reg);
4941 index(0x0); /* NO_INDEX */
4942 scale(0x0);
4943 disp(0x0);
4944 %}
4945 %}
4947 // Indirect Memory Plus Short Offset Operand
4948 operand indOffset8(mRegP reg, immL8 off)
4949 %{
4950 constraint(ALLOC_IN_RC(p_reg));
4951 match(AddP reg off);
4953 format %{ "[$reg + $off (8-bit)] @ indOffset8" %}
4954 interface(MEMORY_INTER) %{
4955 base($reg);
4956 index(0x0); /* NO_INDEX */
4957 scale(0x0);
4958 disp($off);
4959 %}
4960 %}
4962 // Indirect Memory Times Scale Plus Index Register
4963 operand indIndexScale(mRegP reg, mRegL lreg, immI2 scale)
4964 %{
4965 constraint(ALLOC_IN_RC(p_reg));
4966 match(AddP reg (LShiftL lreg scale));
4968 op_cost(10);
4969 format %{"[$reg + $lreg << $scale] @ indIndexScale" %}
4970 interface(MEMORY_INTER) %{
4971 base($reg);
4972 index($lreg);
4973 scale($scale);
4974 disp(0x0);
4975 %}
4976 %}
4979 // [base + index + offset]
4980 operand baseIndexOffset8(mRegP base, mRegL index, immL8 off)
4981 %{
4982 constraint(ALLOC_IN_RC(p_reg));
4983 op_cost(5);
4984 match(AddP (AddP base index) off);
4986 format %{ "[$base + $index + $off (8-bit)] @ baseIndexOffset8" %}
4987 interface(MEMORY_INTER) %{
4988 base($base);
4989 index($index);
4990 scale(0x0);
4991 disp($off);
4992 %}
4993 %}
4995 // [base + index + offset]
4996 operand baseIndexOffset8_convI2L(mRegP base, mRegI index, immL8 off)
4997 %{
4998 constraint(ALLOC_IN_RC(p_reg));
4999 op_cost(5);
5000 match(AddP (AddP base (ConvI2L index)) off);
5002 format %{ "[$base + $index + $off (8-bit)] @ baseIndexOffset8_convI2L" %}
5003 interface(MEMORY_INTER) %{
5004 base($base);
5005 index($index);
5006 scale(0x0);
5007 disp($off);
5008 %}
5009 %}
5011 // Indirect Memory Times Scale Plus Index Register Plus Offset Operand
5012 operand indIndexScaleOffset8(mRegP reg, immL8 off, mRegL lreg, immI2 scale)
5013 %{
5014 constraint(ALLOC_IN_RC(p_reg));
5015 match(AddP (AddP reg (LShiftL lreg scale)) off);
5017 op_cost(10);
5018 format %{"[$reg + $off + $lreg << $scale] @ indIndexScaleOffset8" %}
5019 interface(MEMORY_INTER) %{
5020 base($reg);
5021 index($lreg);
5022 scale($scale);
5023 disp($off);
5024 %}
5025 %}
5027 operand indIndexScaleOffset8_convI2L(mRegP reg, immL8 off, mRegI ireg, immI2 scale)
5028 %{
5029 constraint(ALLOC_IN_RC(p_reg));
5030 match(AddP (AddP reg (LShiftL (ConvI2L ireg) scale)) off);
5032 op_cost(10);
5033 format %{"[$reg + $off + $ireg << $scale] @ indIndexScaleOffset8_convI2L" %}
5034 interface(MEMORY_INTER) %{
5035 base($reg);
5036 index($ireg);
5037 scale($scale);
5038 disp($off);
5039 %}
5040 %}
5042 // [base + index<<scale + offset]
5043 operand basePosIndexScaleOffset8(mRegP base, mRegI index, immL8 off, immI_0_31 scale)
5044 %{
5045 constraint(ALLOC_IN_RC(p_reg));
5046 //predicate(n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
5047 op_cost(10);
5048 match(AddP (AddP base (LShiftL (ConvI2L index) scale)) off);
5050 format %{ "[$base + $index << $scale + $off (8-bit)] @ basePosIndexScaleOffset8" %}
5051 interface(MEMORY_INTER) %{
5052 base($base);
5053 index($index);
5054 scale($scale);
5055 disp($off);
5056 %}
5057 %}
5059 // Indirect Memory Times Scale Plus Index Register Plus Offset Operand
5060 operand indIndexScaleOffsetNarrow(mRegN reg, immL8 off, mRegL lreg, immI2 scale)
5061 %{
5062 predicate(Universe::narrow_oop_shift() == 0);
5063 constraint(ALLOC_IN_RC(p_reg));
5064 match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
5066 op_cost(10);
5067 format %{"[$reg + $off + $lreg << $scale] @ indIndexScaleOffsetNarrow" %}
5068 interface(MEMORY_INTER) %{
5069 base($reg);
5070 index($lreg);
5071 scale($scale);
5072 disp($off);
5073 %}
5074 %}
5076 // [base + index<<scale + offset] for compressd Oops
5077 operand indPosIndexI2LScaleOffset8Narrow(mRegN base, mRegI index, immL8 off, immI_0_31 scale)
5078 %{
5079 constraint(ALLOC_IN_RC(p_reg));
5080 //predicate(Universe::narrow_oop_shift() == 0 && n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
5081 predicate(Universe::narrow_oop_shift() == 0);
5082 op_cost(10);
5083 match(AddP (AddP (DecodeN base) (LShiftL (ConvI2L index) scale)) off);
5085 format %{ "[$base + $index << $scale + $off (8-bit)] @ indPosIndexI2LScaleOffset8Narrow" %}
5086 interface(MEMORY_INTER) %{
5087 base($base);
5088 index($index);
5089 scale($scale);
5090 disp($off);
5091 %}
5092 %}
5094 //FIXME: I think it's better to limit the immI to be 16-bit at most!
5095 // Indirect Memory Plus Long Offset Operand
5096 operand indOffset32(mRegP reg, immL32 off) %{
5097 constraint(ALLOC_IN_RC(p_reg));
5098 op_cost(20);
5099 match(AddP reg off);
5101 format %{ "[$reg + $off (32-bit)] @ indOffset32" %}
5102 interface(MEMORY_INTER) %{
5103 base($reg);
5104 index(0x0); /* NO_INDEX */
5105 scale(0x0);
5106 disp($off);
5107 %}
5108 %}
5110 // Indirect Memory Plus Index Register
5111 operand indIndex(mRegP addr, mRegL index) %{
5112 constraint(ALLOC_IN_RC(p_reg));
5113 match(AddP addr index);
5115 op_cost(20);
5116 format %{"[$addr + $index] @ indIndex" %}
5117 interface(MEMORY_INTER) %{
5118 base($addr);
5119 index($index);
5120 scale(0x0);
5121 disp(0x0);
5122 %}
5123 %}
5125 operand indirectNarrowKlass(mRegN reg)
5126 %{
5127 predicate(Universe::narrow_klass_shift() == 0);
5128 constraint(ALLOC_IN_RC(p_reg));
5129 op_cost(10);
5130 match(DecodeNKlass reg);
5132 format %{ "[$reg] @ indirectNarrowKlass" %}
5133 interface(MEMORY_INTER) %{
5134 base($reg);
5135 index(0x0);
5136 scale(0x0);
5137 disp(0x0);
5138 %}
5139 %}
5141 operand indOffset8NarrowKlass(mRegN reg, immL8 off)
5142 %{
5143 predicate(Universe::narrow_klass_shift() == 0);
5144 constraint(ALLOC_IN_RC(p_reg));
5145 op_cost(10);
5146 match(AddP (DecodeNKlass reg) off);
5148 format %{ "[$reg + $off (8-bit)] @ indOffset8NarrowKlass" %}
5149 interface(MEMORY_INTER) %{
5150 base($reg);
5151 index(0x0);
5152 scale(0x0);
5153 disp($off);
5154 %}
5155 %}
5157 operand indOffset32NarrowKlass(mRegN reg, immL32 off)
5158 %{
5159 predicate(Universe::narrow_klass_shift() == 0);
5160 constraint(ALLOC_IN_RC(p_reg));
5161 op_cost(10);
5162 match(AddP (DecodeNKlass reg) off);
5164 format %{ "[$reg + $off (32-bit)] @ indOffset32NarrowKlass" %}
5165 interface(MEMORY_INTER) %{
5166 base($reg);
5167 index(0x0);
5168 scale(0x0);
5169 disp($off);
5170 %}
5171 %}
5173 operand indIndexOffsetNarrowKlass(mRegN reg, mRegL lreg, immL32 off)
5174 %{
5175 predicate(Universe::narrow_klass_shift() == 0);
5176 constraint(ALLOC_IN_RC(p_reg));
5177 match(AddP (AddP (DecodeNKlass reg) lreg) off);
5179 op_cost(10);
5180 format %{"[$reg + $off + $lreg] @ indIndexOffsetNarrowKlass" %}
5181 interface(MEMORY_INTER) %{
5182 base($reg);
5183 index($lreg);
5184 scale(0x0);
5185 disp($off);
5186 %}
5187 %}
5189 operand indIndexNarrowKlass(mRegN reg, mRegL lreg)
5190 %{
5191 predicate(Universe::narrow_klass_shift() == 0);
5192 constraint(ALLOC_IN_RC(p_reg));
5193 match(AddP (DecodeNKlass reg) lreg);
5195 op_cost(10);
5196 format %{"[$reg + $lreg] @ indIndexNarrowKlass" %}
5197 interface(MEMORY_INTER) %{
5198 base($reg);
5199 index($lreg);
5200 scale(0x0);
5201 disp(0x0);
5202 %}
5203 %}
5205 // Indirect Memory Operand
5206 operand indirectNarrow(mRegN reg)
5207 %{
5208 predicate(Universe::narrow_oop_shift() == 0);
5209 constraint(ALLOC_IN_RC(p_reg));
5210 op_cost(10);
5211 match(DecodeN reg);
5213 format %{ "[$reg] @ indirectNarrow" %}
5214 interface(MEMORY_INTER) %{
5215 base($reg);
5216 index(0x0);
5217 scale(0x0);
5218 disp(0x0);
5219 %}
5220 %}
5222 // Indirect Memory Plus Short Offset Operand
5223 operand indOffset8Narrow(mRegN reg, immL8 off)
5224 %{
5225 predicate(Universe::narrow_oop_shift() == 0);
5226 constraint(ALLOC_IN_RC(p_reg));
5227 op_cost(10);
5228 match(AddP (DecodeN reg) off);
5230 format %{ "[$reg + $off (8-bit)] @ indOffset8Narrow" %}
5231 interface(MEMORY_INTER) %{
5232 base($reg);
5233 index(0x0);
5234 scale(0x0);
5235 disp($off);
5236 %}
5237 %}
5239 // Indirect Memory Plus Index Register Plus Offset Operand
5240 operand indIndexOffset8Narrow(mRegN reg, mRegL lreg, immL8 off)
5241 %{
5242 predicate(Universe::narrow_oop_shift() == 0);
5243 constraint(ALLOC_IN_RC(p_reg));
5244 match(AddP (AddP (DecodeN reg) lreg) off);
5246 op_cost(10);
5247 format %{"[$reg + $off + $lreg] @ indIndexOffset8Narrow" %}
5248 interface(MEMORY_INTER) %{
5249 base($reg);
5250 index($lreg);
5251 scale(0x0);
5252 disp($off);
5253 %}
5254 %}
5256 //----------Load Long Memory Operands------------------------------------------
5257 // The load-long idiom will use it's address expression again after loading
5258 // the first word of the long. If the load-long destination overlaps with
5259 // registers used in the addressing expression, the 2nd half will be loaded
5260 // from a clobbered address. Fix this by requiring that load-long use
5261 // address registers that do not overlap with the load-long target.
5263 // load-long support
5264 operand load_long_RegP() %{
5265 constraint(ALLOC_IN_RC(p_reg));
5266 match(RegP);
5267 match(mRegP);
5268 op_cost(100);
5269 format %{ %}
5270 interface(REG_INTER);
5271 %}
5273 // Indirect Memory Operand Long
5274 operand load_long_indirect(load_long_RegP reg) %{
5275 constraint(ALLOC_IN_RC(p_reg));
5276 match(reg);
5278 format %{ "[$reg]" %}
5279 interface(MEMORY_INTER) %{
5280 base($reg);
5281 index(0x0);
5282 scale(0x0);
5283 disp(0x0);
5284 %}
5285 %}
5287 // Indirect Memory Plus Long Offset Operand
5288 operand load_long_indOffset32(load_long_RegP reg, immL32 off) %{
5289 match(AddP reg off);
5291 format %{ "[$reg + $off]" %}
5292 interface(MEMORY_INTER) %{
5293 base($reg);
5294 index(0x0);
5295 scale(0x0);
5296 disp($off);
5297 %}
5298 %}
5300 //----------Conditional Branch Operands----------------------------------------
5301 // Comparison Op - This is the operation of the comparison, and is limited to
5302 // the following set of codes:
5303 // L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
5304 //
5305 // Other attributes of the comparison, such as unsignedness, are specified
5306 // by the comparison instruction that sets a condition code flags register.
5307 // That result is represented by a flags operand whose subtype is appropriate
5308 // to the unsignedness (etc.) of the comparison.
5309 //
5310 // Later, the instruction which matches both the Comparison Op (a Bool) and
5311 // the flags (produced by the Cmp) specifies the coding of the comparison op
5312 // by matching a specific subtype of Bool operand below, such as cmpOpU.
5314 // Comparision Code
5315 operand cmpOp() %{
5316 match(Bool);
5318 format %{ "" %}
5319 interface(COND_INTER) %{
5320 equal(0x01);
5321 not_equal(0x02);
5322 greater(0x03);
5323 greater_equal(0x04);
5324 less(0x05);
5325 less_equal(0x06);
5326 overflow(0x7);
5327 no_overflow(0x8);
5328 %}
5329 %}
5332 // Comparision Code
5333 // Comparison Code, unsigned compare. Used by FP also, with
5334 // C2 (unordered) turned into GT or LT already. The other bits
5335 // C0 and C3 are turned into Carry & Zero flags.
5336 operand cmpOpU() %{
5337 match(Bool);
5339 format %{ "" %}
5340 interface(COND_INTER) %{
5341 equal(0x01);
5342 not_equal(0x02);
5343 greater(0x03);
5344 greater_equal(0x04);
5345 less(0x05);
5346 less_equal(0x06);
5347 overflow(0x7);
5348 no_overflow(0x8);
5349 %}
5350 %}
5352 /*
5353 // Comparison Code, unsigned compare. Used by FP also, with
5354 // C2 (unordered) turned into GT or LT already. The other bits
5355 // C0 and C3 are turned into Carry & Zero flags.
5356 operand cmpOpU() %{
5357 match(Bool);
5359 format %{ "" %}
5360 interface(COND_INTER) %{
5361 equal(0x4);
5362 not_equal(0x5);
5363 less(0x2);
5364 greater_equal(0x3);
5365 less_equal(0x6);
5366 greater(0x7);
5367 %}
5368 %}
5369 */
5370 /*
5371 // Comparison Code for FP conditional move
5372 operand cmpOp_fcmov() %{
5373 match(Bool);
5375 format %{ "" %}
5376 interface(COND_INTER) %{
5377 equal (0x01);
5378 not_equal (0x02);
5379 greater (0x03);
5380 greater_equal(0x04);
5381 less (0x05);
5382 less_equal (0x06);
5383 %}
5384 %}
5386 // Comparision Code used in long compares
5387 operand cmpOp_commute() %{
5388 match(Bool);
5390 format %{ "" %}
5391 interface(COND_INTER) %{
5392 equal(0x4);
5393 not_equal(0x5);
5394 less(0xF);
5395 greater_equal(0xE);
5396 less_equal(0xD);
5397 greater(0xC);
5398 %}
5399 %}
5400 */
5402 //----------Special Memory Operands--------------------------------------------
5403 // Stack Slot Operand - This operand is used for loading and storing temporary
5404 // values on the stack where a match requires a value to
5405 // flow through memory.
5406 operand stackSlotP(sRegP reg) %{
5407 constraint(ALLOC_IN_RC(stack_slots));
5408 // No match rule because this operand is only generated in matching
5409 op_cost(50);
5410 format %{ "[$reg]" %}
5411 interface(MEMORY_INTER) %{
5412 base(0x1d); // SP
5413 index(0x0); // No Index
5414 scale(0x0); // No Scale
5415 disp($reg); // Stack Offset
5416 %}
5417 %}
5419 operand stackSlotI(sRegI reg) %{
5420 constraint(ALLOC_IN_RC(stack_slots));
5421 // No match rule because this operand is only generated in matching
5422 op_cost(50);
5423 format %{ "[$reg]" %}
5424 interface(MEMORY_INTER) %{
5425 base(0x1d); // SP
5426 index(0x0); // No Index
5427 scale(0x0); // No Scale
5428 disp($reg); // Stack Offset
5429 %}
5430 %}
5432 operand stackSlotF(sRegF reg) %{
5433 constraint(ALLOC_IN_RC(stack_slots));
5434 // No match rule because this operand is only generated in matching
5435 op_cost(50);
5436 format %{ "[$reg]" %}
5437 interface(MEMORY_INTER) %{
5438 base(0x1d); // SP
5439 index(0x0); // No Index
5440 scale(0x0); // No Scale
5441 disp($reg); // Stack Offset
5442 %}
5443 %}
5445 operand stackSlotD(sRegD reg) %{
5446 constraint(ALLOC_IN_RC(stack_slots));
5447 // No match rule because this operand is only generated in matching
5448 op_cost(50);
5449 format %{ "[$reg]" %}
5450 interface(MEMORY_INTER) %{
5451 base(0x1d); // SP
5452 index(0x0); // No Index
5453 scale(0x0); // No Scale
5454 disp($reg); // Stack Offset
5455 %}
5456 %}
5458 operand stackSlotL(sRegL reg) %{
5459 constraint(ALLOC_IN_RC(stack_slots));
5460 // No match rule because this operand is only generated in matching
5461 op_cost(50);
5462 format %{ "[$reg]" %}
5463 interface(MEMORY_INTER) %{
5464 base(0x1d); // SP
5465 index(0x0); // No Index
5466 scale(0x0); // No Scale
5467 disp($reg); // Stack Offset
5468 %}
5469 %}
5472 //------------------------OPERAND CLASSES--------------------------------------
5473 //opclass memory( direct, indirect, indOffset16, indOffset32, indOffset32X, indIndexOffset );
5474 opclass memory( indirect, indirectNarrow, indOffset8, indOffset32, indIndex, indIndexScale, load_long_indirect, load_long_indOffset32, baseIndexOffset8, baseIndexOffset8_convI2L, indIndexScaleOffset8, indIndexScaleOffset8_convI2L, basePosIndexScaleOffset8, indIndexScaleOffsetNarrow, indPosIndexI2LScaleOffset8Narrow, indOffset8Narrow, indIndexOffset8Narrow);
5477 //----------PIPELINE-----------------------------------------------------------
5478 // Rules which define the behavior of the target architectures pipeline.
5480 pipeline %{
5482 //----------ATTRIBUTES---------------------------------------------------------
5483 attributes %{
5484 fixed_size_instructions; // Fixed size instructions
5485 branch_has_delay_slot; // branch have delay slot in gs2
5486 max_instructions_per_bundle = 1; // 1 instruction per bundle
5487 max_bundles_per_cycle = 4; // Up to 4 bundles per cycle
5488 bundle_unit_size=4;
5489 instruction_unit_size = 4; // An instruction is 4 bytes long
5490 instruction_fetch_unit_size = 16; // The processor fetches one line
5491 instruction_fetch_units = 1; // of 16 bytes
5493 // List of nop instructions
5494 nops( MachNop );
5495 %}
5497 //----------RESOURCES----------------------------------------------------------
5498 // Resources are the functional units available to the machine
5500 resources(D1, D2, D3, D4, DECODE = D1 | D2 | D3| D4, ALU1, ALU2, ALU = ALU1 | ALU2, FPU1, FPU2, FPU = FPU1 | FPU2, MEM, BR);
5502 //----------PIPELINE DESCRIPTION-----------------------------------------------
5503 // Pipeline Description specifies the stages in the machine's pipeline
5505 // IF: fetch
5506 // ID: decode
5507 // RD: read
5508 // CA: caculate
5509 // WB: write back
5510 // CM: commit
5512 pipe_desc(IF, ID, RD, CA, WB, CM);
5515 //----------PIPELINE CLASSES---------------------------------------------------
5516 // Pipeline Classes describe the stages in which input and output are
5517 // referenced by the hardware pipeline.
5519 //No.1 Integer ALU reg-reg operation : dst <-- reg1 op reg2
5520 pipe_class ialu_regI_regI(mRegI dst, mRegI src1, mRegI src2) %{
5521 single_instruction;
5522 src1 : RD(read);
5523 src2 : RD(read);
5524 dst : WB(write)+1;
5525 DECODE : ID;
5526 ALU : CA;
5527 %}
5529 //No.19 Integer mult operation : dst <-- reg1 mult reg2
5530 pipe_class ialu_mult(mRegI dst, mRegI src1, mRegI src2) %{
5531 src1 : RD(read);
5532 src2 : RD(read);
5533 dst : WB(write)+5;
5534 DECODE : ID;
5535 ALU2 : CA;
5536 %}
5538 pipe_class mulL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
5539 src1 : RD(read);
5540 src2 : RD(read);
5541 dst : WB(write)+10;
5542 DECODE : ID;
5543 ALU2 : CA;
5544 %}
5546 //No.19 Integer div operation : dst <-- reg1 div reg2
5547 pipe_class ialu_div(mRegI dst, mRegI src1, mRegI src2) %{
5548 src1 : RD(read);
5549 src2 : RD(read);
5550 dst : WB(write)+10;
5551 DECODE : ID;
5552 ALU2 : CA;
5553 %}
5555 //No.19 Integer mod operation : dst <-- reg1 mod reg2
5556 pipe_class ialu_mod(mRegI dst, mRegI src1, mRegI src2) %{
5557 instruction_count(2);
5558 src1 : RD(read);
5559 src2 : RD(read);
5560 dst : WB(write)+10;
5561 DECODE : ID;
5562 ALU2 : CA;
5563 %}
5565 //No.15 Long ALU reg-reg operation : dst <-- reg1 op reg2
5566 pipe_class ialu_regL_regL(mRegL dst, mRegL src1, mRegL src2) %{
5567 instruction_count(2);
5568 src1 : RD(read);
5569 src2 : RD(read);
5570 dst : WB(write);
5571 DECODE : ID;
5572 ALU : CA;
5573 %}
5575 //No.18 Long ALU reg-imm16 operation : dst <-- reg1 op imm16
5576 pipe_class ialu_regL_imm16(mRegL dst, mRegL src) %{
5577 instruction_count(2);
5578 src : RD(read);
5579 dst : WB(write);
5580 DECODE : ID;
5581 ALU : CA;
5582 %}
5584 //no.16 load Long from memory :
5585 pipe_class ialu_loadL(mRegL dst, memory mem) %{
5586 instruction_count(2);
5587 mem : RD(read);
5588 dst : WB(write)+5;
5589 DECODE : ID;
5590 MEM : RD;
5591 %}
5593 //No.17 Store Long to Memory :
5594 pipe_class ialu_storeL(mRegL src, memory mem) %{
5595 instruction_count(2);
5596 mem : RD(read);
5597 src : RD(read);
5598 DECODE : ID;
5599 MEM : RD;
5600 %}
5602 //No.2 Integer ALU reg-imm16 operation : dst <-- reg1 op imm16
5603 pipe_class ialu_regI_imm16(mRegI dst, mRegI src) %{
5604 single_instruction;
5605 src : RD(read);
5606 dst : WB(write);
5607 DECODE : ID;
5608 ALU : CA;
5609 %}
5611 //No.3 Integer move operation : dst <-- reg
5612 pipe_class ialu_regI_mov(mRegI dst, mRegI src) %{
5613 src : RD(read);
5614 dst : WB(write);
5615 DECODE : ID;
5616 ALU : CA;
5617 %}
5619 //No.4 No instructions : do nothing
5620 pipe_class empty( ) %{
5621 instruction_count(0);
5622 %}
5624 //No.5 UnConditional branch :
5625 pipe_class pipe_jump( label labl ) %{
5626 multiple_bundles;
5627 DECODE : ID;
5628 BR : RD;
5629 %}
5631 //No.6 ALU Conditional branch :
5632 pipe_class pipe_alu_branch(mRegI src1, mRegI src2, label labl ) %{
5633 multiple_bundles;
5634 src1 : RD(read);
5635 src2 : RD(read);
5636 DECODE : ID;
5637 BR : RD;
5638 %}
5640 //no.7 load integer from memory :
5641 pipe_class ialu_loadI(mRegI dst, memory mem) %{
5642 mem : RD(read);
5643 dst : WB(write)+3;
5644 DECODE : ID;
5645 MEM : RD;
5646 %}
5648 //No.8 Store Integer to Memory :
5649 pipe_class ialu_storeI(mRegI src, memory mem) %{
5650 mem : RD(read);
5651 src : RD(read);
5652 DECODE : ID;
5653 MEM : RD;
5654 %}
5657 //No.10 Floating FPU reg-reg operation : dst <-- reg1 op reg2
5658 pipe_class fpu_regF_regF(regF dst, regF src1, regF src2) %{
5659 src1 : RD(read);
5660 src2 : RD(read);
5661 dst : WB(write);
5662 DECODE : ID;
5663 FPU : CA;
5664 %}
5666 //No.22 Floating div operation : dst <-- reg1 div reg2
5667 pipe_class fpu_div(regF dst, regF src1, regF src2) %{
5668 src1 : RD(read);
5669 src2 : RD(read);
5670 dst : WB(write);
5671 DECODE : ID;
5672 FPU2 : CA;
5673 %}
5675 pipe_class fcvt_I2D(regD dst, mRegI src) %{
5676 src : RD(read);
5677 dst : WB(write);
5678 DECODE : ID;
5679 FPU1 : CA;
5680 %}
5682 pipe_class fcvt_D2I(mRegI dst, regD src) %{
5683 src : RD(read);
5684 dst : WB(write);
5685 DECODE : ID;
5686 FPU1 : CA;
5687 %}
5689 pipe_class pipe_mfc1(mRegI dst, regD src) %{
5690 src : RD(read);
5691 dst : WB(write);
5692 DECODE : ID;
5693 MEM : RD;
5694 %}
5696 pipe_class pipe_mtc1(regD dst, mRegI src) %{
5697 src : RD(read);
5698 dst : WB(write);
5699 DECODE : ID;
5700 MEM : RD(5);
5701 %}
5703 //No.23 Floating sqrt operation : dst <-- reg1 sqrt reg2
5704 pipe_class fpu_sqrt(regF dst, regF src1, regF src2) %{
5705 multiple_bundles;
5706 src1 : RD(read);
5707 src2 : RD(read);
5708 dst : WB(write);
5709 DECODE : ID;
5710 FPU2 : CA;
5711 %}
5713 //No.11 Load Floating from Memory :
5714 pipe_class fpu_loadF(regF dst, memory mem) %{
5715 instruction_count(1);
5716 mem : RD(read);
5717 dst : WB(write)+3;
5718 DECODE : ID;
5719 MEM : RD;
5720 %}
5722 //No.12 Store Floating to Memory :
5723 pipe_class fpu_storeF(regF src, memory mem) %{
5724 instruction_count(1);
5725 mem : RD(read);
5726 src : RD(read);
5727 DECODE : ID;
5728 MEM : RD;
5729 %}
5731 //No.13 FPU Conditional branch :
5732 pipe_class pipe_fpu_branch(regF src1, regF src2, label labl ) %{
5733 multiple_bundles;
5734 src1 : RD(read);
5735 src2 : RD(read);
5736 DECODE : ID;
5737 BR : RD;
5738 %}
5740 //No.14 Floating FPU reg operation : dst <-- op reg
5741 pipe_class fpu1_regF(regF dst, regF src) %{
5742 src : RD(read);
5743 dst : WB(write);
5744 DECODE : ID;
5745 FPU : CA;
5746 %}
5748 pipe_class long_memory_op() %{
5749 instruction_count(10); multiple_bundles; force_serialization;
5750 fixed_latency(30);
5751 %}
5753 pipe_class simple_call() %{
5754 instruction_count(10); multiple_bundles; force_serialization;
5755 fixed_latency(200);
5756 BR : RD;
5757 %}
5759 pipe_class call() %{
5760 instruction_count(10); multiple_bundles; force_serialization;
5761 fixed_latency(200);
5762 %}
5764 //FIXME:
5765 //No.9 Piple slow : for multi-instructions
5766 pipe_class pipe_slow( ) %{
5767 instruction_count(20);
5768 force_serialization;
5769 multiple_bundles;
5770 fixed_latency(50);
5771 %}
5773 %}
5777 //----------INSTRUCTIONS-------------------------------------------------------
5778 //
5779 // match -- States which machine-independent subtree may be replaced
5780 // by this instruction.
5781 // ins_cost -- The estimated cost of this instruction is used by instruction
5782 // selection to identify a minimum cost tree of machine
5783 // instructions that matches a tree of machine-independent
5784 // instructions.
5785 // format -- A string providing the disassembly for this instruction.
5786 // The value of an instruction's operand may be inserted
5787 // by referring to it with a '$' prefix.
5788 // opcode -- Three instruction opcodes may be provided. These are referred
5789 // to within an encode class as $primary, $secondary, and $tertiary
5790 // respectively. The primary opcode is commonly used to
5791 // indicate the type of machine instruction, while secondary
5792 // and tertiary are often used for prefix options or addressing
5793 // modes.
5794 // ins_encode -- A list of encode classes with parameters. The encode class
5795 // name must have been defined in an 'enc_class' specification
5796 // in the encode section of the architecture description.
5799 // Load Integer
5800 instruct loadI(mRegI dst, memory mem) %{
5801 match(Set dst (LoadI mem));
5803 ins_cost(125);
5804 format %{ "lw $dst, $mem #@loadI" %}
5805 ins_encode (load_I_enc(dst, mem));
5806 ins_pipe( ialu_loadI );
5807 %}
5809 instruct loadI_convI2L(mRegL dst, memory mem) %{
5810 match(Set dst (ConvI2L (LoadI mem)));
5812 ins_cost(125);
5813 format %{ "lw $dst, $mem #@loadI_convI2L" %}
5814 ins_encode (load_I_enc(dst, mem));
5815 ins_pipe( ialu_loadI );
5816 %}
5818 // Load Integer (32 bit signed) to Byte (8 bit signed)
5819 instruct loadI2B(mRegI dst, memory mem, immI_24 twentyfour) %{
5820 match(Set dst (RShiftI (LShiftI (LoadI mem) twentyfour) twentyfour));
5822 ins_cost(125);
5823 format %{ "lb $dst, $mem\t# int -> byte #@loadI2B" %}
5824 ins_encode(load_B_enc(dst, mem));
5825 ins_pipe(ialu_loadI);
5826 %}
5828 // Load Integer (32 bit signed) to Unsigned Byte (8 bit UNsigned)
5829 instruct loadI2UB(mRegI dst, memory mem, immI_255 mask) %{
5830 match(Set dst (AndI (LoadI mem) mask));
5832 ins_cost(125);
5833 format %{ "lbu $dst, $mem\t# int -> ubyte #@loadI2UB" %}
5834 ins_encode(load_UB_enc(dst, mem));
5835 ins_pipe(ialu_loadI);
5836 %}
5838 // Load Integer (32 bit signed) to Short (16 bit signed)
5839 instruct loadI2S(mRegI dst, memory mem, immI_16 sixteen) %{
5840 match(Set dst (RShiftI (LShiftI (LoadI mem) sixteen) sixteen));
5842 ins_cost(125);
5843 format %{ "lh $dst, $mem\t# int -> short #@loadI2S" %}
5844 ins_encode(load_S_enc(dst, mem));
5845 ins_pipe(ialu_loadI);
5846 %}
5848 // Load Integer (32 bit signed) to Unsigned Short/Char (16 bit UNsigned)
5849 instruct loadI2US(mRegI dst, memory mem, immI_65535 mask) %{
5850 match(Set dst (AndI (LoadI mem) mask));
5852 ins_cost(125);
5853 format %{ "lhu $dst, $mem\t# int -> ushort/char #@loadI2US" %}
5854 ins_encode(load_C_enc(dst, mem));
5855 ins_pipe(ialu_loadI);
5856 %}
5858 // Load Long.
5859 instruct loadL(mRegL dst, memory mem) %{
5860 // predicate(!((LoadLNode*)n)->require_atomic_access());
5861 match(Set dst (LoadL mem));
5863 ins_cost(250);
5864 format %{ "ld $dst, $mem #@loadL" %}
5865 ins_encode(load_L_enc(dst, mem));
5866 ins_pipe( ialu_loadL );
5867 %}
5869 // Load Long - UNaligned
5870 instruct loadL_unaligned(mRegL dst, memory mem) %{
5871 match(Set dst (LoadL_unaligned mem));
5873 // FIXME: Jin: Need more effective ldl/ldr
5874 ins_cost(450);
5875 format %{ "ld $dst, $mem #@loadL_unaligned\n\t" %}
5876 ins_encode(load_L_enc(dst, mem));
5877 ins_pipe( ialu_loadL );
5878 %}
5880 // Store Long
5881 instruct storeL_reg(memory mem, mRegL src) %{
5882 match(Set mem (StoreL mem src));
5884 ins_cost(200);
5885 format %{ "sd $mem, $src #@storeL_reg\n" %}
5886 ins_encode(store_L_reg_enc(mem, src));
5887 ins_pipe( ialu_storeL );
5888 %}
5891 instruct storeL_immL0(memory mem, immL0 zero) %{
5892 match(Set mem (StoreL mem zero));
5894 ins_cost(180);
5895 format %{ "sd $mem, zero #@storeL_immL0" %}
5896 ins_encode(store_L_immL0_enc(mem, zero));
5897 ins_pipe( ialu_storeL );
5898 %}
5900 // Load Compressed Pointer
5901 instruct loadN(mRegN dst, memory mem)
5902 %{
5903 match(Set dst (LoadN mem));
5905 ins_cost(125); // XXX
5906 format %{ "lwu $dst, $mem\t# compressed ptr @ loadN" %}
5907 ins_encode (load_N_enc(dst, mem));
5908 ins_pipe( ialu_loadI ); // XXX
5909 %}
5911 // Load Pointer
5912 instruct loadP(mRegP dst, memory mem) %{
5913 match(Set dst (LoadP mem));
5915 ins_cost(125);
5916 format %{ "ld $dst, $mem #@loadP" %}
5917 ins_encode (load_P_enc(dst, mem));
5918 ins_pipe( ialu_loadI );
5919 %}
5921 // Load Klass Pointer
5922 instruct loadKlass(mRegP dst, memory mem) %{
5923 match(Set dst (LoadKlass mem));
5925 ins_cost(125);
5926 format %{ "MOV $dst,$mem @ loadKlass" %}
5927 ins_encode (load_P_enc(dst, mem));
5928 ins_pipe( ialu_loadI );
5929 %}
5931 // Load narrow Klass Pointer
5932 instruct loadNKlass(mRegN dst, memory mem)
5933 %{
5934 match(Set dst (LoadNKlass mem));
5936 ins_cost(125); // XXX
5937 format %{ "lwu $dst, $mem\t# compressed klass ptr @ loadNKlass" %}
5938 ins_encode (load_N_enc(dst, mem));
5939 ins_pipe( ialu_loadI ); // XXX
5940 %}
5942 // Load Constant
5943 instruct loadConI(mRegI dst, immI src) %{
5944 match(Set dst src);
5946 ins_cost(150);
5947 format %{ "mov $dst, $src #@loadConI" %}
5948 ins_encode %{
5949 Register dst = $dst$$Register;
5950 int value = $src$$constant;
5951 __ move(dst, value);
5952 %}
5953 ins_pipe( ialu_regI_regI );
5954 %}
5957 instruct loadConL_set64(mRegL dst, immL src) %{
5958 match(Set dst src);
5959 ins_cost(120);
5960 format %{ "li $dst, $src @ loadConL_set64" %}
5961 ins_encode %{
5962 __ set64($dst$$Register, $src$$constant);
5963 %}
5964 ins_pipe(ialu_regL_regL);
5965 %}
5967 /*
5968 // Load long value from constant table (predicated by immL_expensive).
5969 instruct loadConL_load(mRegL dst, immL_expensive src) %{
5970 match(Set dst src);
5971 ins_cost(150);
5972 format %{ "ld $dst, $constantoffset[$constanttablebase] # load long $src from table @ loadConL_ldx" %}
5973 ins_encode %{
5974 int con_offset = $constantoffset($src);
5976 if (Assembler::is_simm16(con_offset)) {
5977 __ ld($dst$$Register, $constanttablebase, con_offset);
5978 } else {
5979 __ set64(AT, con_offset);
5980 if (UseLoongsonISA) {
5981 __ gsldx($dst$$Register, $constanttablebase, AT, 0);
5982 } else {
5983 __ daddu(AT, $constanttablebase, AT);
5984 __ ld($dst$$Register, AT, 0);
5985 }
5986 }
5987 %}
5988 ins_pipe(ialu_loadI);
5989 %}
5990 */
5992 instruct loadConL16(mRegL dst, immL16 src) %{
5993 match(Set dst src);
5994 ins_cost(105);
5995 format %{ "mov $dst, $src #@loadConL16" %}
5996 ins_encode %{
5997 Register dst_reg = as_Register($dst$$reg);
5998 int value = $src$$constant;
5999 __ daddiu(dst_reg, R0, value);
6000 %}
6001 ins_pipe( ialu_regL_regL );
6002 %}
6005 instruct loadConL0(mRegL dst, immL0 src) %{
6006 match(Set dst src);
6007 ins_cost(100);
6008 format %{ "mov $dst, zero #@loadConL0" %}
6009 ins_encode %{
6010 Register dst_reg = as_Register($dst$$reg);
6011 __ daddu(dst_reg, R0, R0);
6012 %}
6013 ins_pipe( ialu_regL_regL );
6014 %}
6016 // Load Range
6017 instruct loadRange(mRegI dst, memory mem) %{
6018 match(Set dst (LoadRange mem));
6020 ins_cost(125);
6021 format %{ "MOV $dst,$mem @ loadRange" %}
6022 ins_encode(load_I_enc(dst, mem));
6023 ins_pipe( ialu_loadI );
6024 %}
6027 instruct storeP(memory mem, mRegP src ) %{
6028 match(Set mem (StoreP mem src));
6030 ins_cost(125);
6031 format %{ "sd $src, $mem #@storeP" %}
6032 ins_encode(store_P_reg_enc(mem, src));
6033 ins_pipe( ialu_storeI );
6034 %}
6036 // Store NULL Pointer, mark word, or other simple pointer constant.
6037 instruct storeImmP0(memory mem, immP0 zero) %{
6038 match(Set mem (StoreP mem zero));
6040 ins_cost(125);
6041 format %{ "mov $mem, $zero #@storeImmP0" %}
6042 ins_encode(store_P_immP0_enc(mem));
6043 ins_pipe( ialu_storeI );
6044 %}
6046 // Store Byte Immediate
6047 instruct storeImmB(memory mem, immI8 src) %{
6048 match(Set mem (StoreB mem src));
6050 ins_cost(150);
6051 format %{ "movb $mem, $src #@storeImmB" %}
6052 ins_encode(store_B_immI_enc(mem, src));
6053 ins_pipe( ialu_storeI );
6054 %}
6056 // Store Compressed Pointer
6057 instruct storeN(memory mem, mRegN src)
6058 %{
6059 match(Set mem (StoreN mem src));
6061 ins_cost(125); // XXX
6062 format %{ "sw $mem, $src\t# compressed ptr @ storeN" %}
6063 ins_encode(store_N_reg_enc(mem, src));
6064 ins_pipe( ialu_storeI );
6065 %}
6067 instruct storeNKlass(memory mem, mRegN src)
6068 %{
6069 match(Set mem (StoreNKlass mem src));
6071 ins_cost(125); // XXX
6072 format %{ "sw $mem, $src\t# compressed klass ptr @ storeNKlass" %}
6073 ins_encode(store_N_reg_enc(mem, src));
6074 ins_pipe( ialu_storeI );
6075 %}
6077 instruct storeImmN0(memory mem, immN0 zero)
6078 %{
6079 predicate(Universe::narrow_oop_base() == NULL && Universe::narrow_klass_base() == NULL);
6080 match(Set mem (StoreN mem zero));
6082 ins_cost(125); // XXX
6083 format %{ "storeN0 $mem, R12\t# compressed ptr" %}
6084 ins_encode(storeImmN0_enc(mem, zero));
6085 ins_pipe( ialu_storeI );
6086 %}
6088 // Store Byte
6089 instruct storeB(memory mem, mRegI src) %{
6090 match(Set mem (StoreB mem src));
6092 ins_cost(125);
6093 format %{ "sb $src, $mem #@storeB" %}
6094 ins_encode(store_B_reg_enc(mem, src));
6095 ins_pipe( ialu_storeI );
6096 %}
6098 instruct storeB_convL2I(memory mem, mRegL src) %{
6099 match(Set mem (StoreB mem (ConvL2I src)));
6101 ins_cost(125);
6102 format %{ "sb $src, $mem #@storeB_convL2I" %}
6103 ins_encode(store_B_reg_enc(mem, src));
6104 ins_pipe( ialu_storeI );
6105 %}
6107 // Load Byte (8bit signed)
6108 instruct loadB(mRegI dst, memory mem) %{
6109 match(Set dst (LoadB mem));
6111 ins_cost(125);
6112 format %{ "lb $dst, $mem #@loadB" %}
6113 ins_encode(load_B_enc(dst, mem));
6114 ins_pipe( ialu_loadI );
6115 %}
6117 instruct loadB_convI2L(mRegL dst, memory mem) %{
6118 match(Set dst (ConvI2L (LoadB mem)));
6120 ins_cost(125);
6121 format %{ "lb $dst, $mem #@loadB_convI2L" %}
6122 ins_encode(load_B_enc(dst, mem));
6123 ins_pipe( ialu_loadI );
6124 %}
6126 // Load Byte (8bit UNsigned)
6127 instruct loadUB(mRegI dst, memory mem) %{
6128 match(Set dst (LoadUB mem));
6130 ins_cost(125);
6131 format %{ "lbu $dst, $mem #@loadUB" %}
6132 ins_encode(load_UB_enc(dst, mem));
6133 ins_pipe( ialu_loadI );
6134 %}
6136 instruct loadUB_convI2L(mRegL dst, memory mem) %{
6137 match(Set dst (ConvI2L (LoadUB mem)));
6139 ins_cost(125);
6140 format %{ "lbu $dst, $mem #@loadUB_convI2L" %}
6141 ins_encode(load_UB_enc(dst, mem));
6142 ins_pipe( ialu_loadI );
6143 %}
6145 // Load Short (16bit signed)
6146 instruct loadS(mRegI dst, memory mem) %{
6147 match(Set dst (LoadS mem));
6149 ins_cost(125);
6150 format %{ "lh $dst, $mem #@loadS" %}
6151 ins_encode(load_S_enc(dst, mem));
6152 ins_pipe( ialu_loadI );
6153 %}
6155 // Load Short (16 bit signed) to Byte (8 bit signed)
6156 instruct loadS2B(mRegI dst, memory mem, immI_24 twentyfour) %{
6157 match(Set dst (RShiftI (LShiftI (LoadS mem) twentyfour) twentyfour));
6159 ins_cost(125);
6160 format %{ "lb $dst, $mem\t# short -> byte #@loadS2B" %}
6161 ins_encode(load_B_enc(dst, mem));
6162 ins_pipe(ialu_loadI);
6163 %}
6165 instruct loadS_convI2L(mRegL dst, memory mem) %{
6166 match(Set dst (ConvI2L (LoadS mem)));
6168 ins_cost(125);
6169 format %{ "lh $dst, $mem #@loadS_convI2L" %}
6170 ins_encode(load_S_enc(dst, mem));
6171 ins_pipe( ialu_loadI );
6172 %}
6174 // Store Integer Immediate
6175 instruct storeImmI(memory mem, immI src) %{
6176 match(Set mem (StoreI mem src));
6178 ins_cost(150);
6179 format %{ "mov $mem, $src #@storeImmI" %}
6180 ins_encode(store_I_immI_enc(mem, src));
6181 ins_pipe( ialu_storeI );
6182 %}
6184 // Store Integer
6185 instruct storeI(memory mem, mRegI src) %{
6186 match(Set mem (StoreI mem src));
6188 ins_cost(125);
6189 format %{ "sw $mem, $src #@storeI" %}
6190 ins_encode(store_I_reg_enc(mem, src));
6191 ins_pipe( ialu_storeI );
6192 %}
6194 instruct storeI_convL2I(memory mem, mRegL src) %{
6195 match(Set mem (StoreI mem (ConvL2I src)));
6197 ins_cost(125);
6198 format %{ "sw $mem, $src #@storeI_convL2I" %}
6199 ins_encode(store_I_reg_enc(mem, src));
6200 ins_pipe( ialu_storeI );
6201 %}
6203 // Load Float
6204 instruct loadF(regF dst, memory mem) %{
6205 match(Set dst (LoadF mem));
6207 ins_cost(150);
6208 format %{ "loadF $dst, $mem #@loadF" %}
6209 ins_encode(load_F_enc(dst, mem));
6210 ins_pipe( ialu_loadI );
6211 %}
6213 instruct loadConP_general(mRegP dst, immP src) %{
6214 match(Set dst src);
6216 ins_cost(120);
6217 format %{ "li $dst, $src #@loadConP_general" %}
6219 ins_encode %{
6220 Register dst = $dst$$Register;
6221 long* value = (long*)$src$$constant;
6223 if($src->constant_reloc() == relocInfo::metadata_type){
6224 int klass_index = __ oop_recorder()->find_index((Klass*)value);
6225 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
6227 __ relocate(rspec);
6228 __ li48(dst, (long)value);
6229 }else if($src->constant_reloc() == relocInfo::oop_type){
6230 int oop_index = __ oop_recorder()->find_index((jobject)value);
6231 RelocationHolder rspec = oop_Relocation::spec(oop_index);
6233 __ relocate(rspec);
6234 __ li48(dst, (long)value);
6235 } else if ($src->constant_reloc() == relocInfo::none) {
6236 __ set64(dst, (long)value);
6237 }
6238 %}
6240 ins_pipe( ialu_regI_regI );
6241 %}
6243 /*
6244 instruct loadConP_load(mRegP dst, immP_load src) %{
6245 match(Set dst src);
6247 ins_cost(100);
6248 format %{ "ld $dst, [$constanttablebase + $constantoffset] load from constant table: ptr=$src @ loadConP_load" %}
6250 ins_encode %{
6252 int con_offset = $constantoffset($src);
6254 if (Assembler::is_simm16(con_offset)) {
6255 __ ld($dst$$Register, $constanttablebase, con_offset);
6256 } else {
6257 __ set64(AT, con_offset);
6258 if (UseLoongsonISA) {
6259 __ gsldx($dst$$Register, $constanttablebase, AT, 0);
6260 } else {
6261 __ daddu(AT, $constanttablebase, AT);
6262 __ ld($dst$$Register, AT, 0);
6263 }
6264 }
6265 %}
6267 ins_pipe(ialu_loadI);
6268 %}
6269 */
6271 instruct loadConP_no_oop_cheap(mRegP dst, immP_no_oop_cheap src) %{
6272 match(Set dst src);
6274 ins_cost(80);
6275 format %{ "li $dst, $src @ loadConP_no_oop_cheap" %}
6277 ins_encode %{
6278 __ set64($dst$$Register, $src$$constant);
6279 %}
6281 ins_pipe(ialu_regI_regI);
6282 %}
6285 instruct loadConP_poll(mRegP dst, immP_poll src) %{
6286 match(Set dst src);
6288 ins_cost(50);
6289 format %{ "li $dst, $src #@loadConP_poll" %}
6291 ins_encode %{
6292 Register dst = $dst$$Register;
6293 intptr_t value = (intptr_t)$src$$constant;
6295 __ set64(dst, (jlong)value);
6296 %}
6298 ins_pipe( ialu_regI_regI );
6299 %}
6301 instruct loadConP0(mRegP dst, immP0 src)
6302 %{
6303 match(Set dst src);
6305 ins_cost(50);
6306 format %{ "mov $dst, R0\t# ptr" %}
6307 ins_encode %{
6308 Register dst_reg = $dst$$Register;
6309 __ daddu(dst_reg, R0, R0);
6310 %}
6311 ins_pipe( ialu_regI_regI );
6312 %}
6314 instruct loadConN0(mRegN dst, immN0 src) %{
6315 match(Set dst src);
6316 format %{ "move $dst, R0\t# compressed NULL ptr" %}
6317 ins_encode %{
6318 __ move($dst$$Register, R0);
6319 %}
6320 ins_pipe( ialu_regI_regI );
6321 %}
6323 instruct loadConN(mRegN dst, immN src) %{
6324 match(Set dst src);
6326 ins_cost(125);
6327 format %{ "li $dst, $src\t# compressed ptr @ loadConN" %}
6328 ins_encode %{
6329 Register dst = $dst$$Register;
6330 __ set_narrow_oop(dst, (jobject)$src$$constant);
6331 %}
6332 ins_pipe( ialu_regI_regI ); // XXX
6333 %}
6335 instruct loadConNKlass(mRegN dst, immNKlass src) %{
6336 match(Set dst src);
6338 ins_cost(125);
6339 format %{ "li $dst, $src\t# compressed klass ptr @ loadConNKlass" %}
6340 ins_encode %{
6341 Register dst = $dst$$Register;
6342 __ set_narrow_klass(dst, (Klass*)$src$$constant);
6343 %}
6344 ins_pipe( ialu_regI_regI ); // XXX
6345 %}
6347 //FIXME
6348 // Tail Call; Jump from runtime stub to Java code.
6349 // Also known as an 'interprocedural jump'.
6350 // Target of jump will eventually return to caller.
6351 // TailJump below removes the return address.
6352 instruct TailCalljmpInd(mRegP jump_target, mRegP method_oop) %{
6353 match(TailCall jump_target method_oop );
6354 ins_cost(300);
6355 format %{ "JMP $jump_target \t# @TailCalljmpInd" %}
6357 ins_encode %{
6358 Register target = $jump_target$$Register;
6359 Register oop = $method_oop$$Register;
6361 /* 2012/10/12 Jin: RA will be used in generate_forward_exception() */
6362 __ push(RA);
6364 __ move(S3, oop);
6365 __ jr(target);
6366 __ nop();
6367 %}
6369 ins_pipe( pipe_jump );
6370 %}
6372 // Create exception oop: created by stack-crawling runtime code.
6373 // Created exception is now available to this handler, and is setup
6374 // just prior to jumping to this handler. No code emitted.
6375 instruct CreateException( a0_RegP ex_oop )
6376 %{
6377 match(Set ex_oop (CreateEx));
6379 // use the following format syntax
6380 format %{ "# exception oop is in A0; no code emitted @CreateException" %}
6381 ins_encode %{
6382 /* Jin: X86 leaves this function empty */
6383 __ block_comment("CreateException is empty in X86/MIPS");
6384 %}
6385 ins_pipe( empty );
6386 // ins_pipe( pipe_jump );
6387 %}
6390 /* 2012/9/14 Jin: The mechanism of exception handling is clear now.
6392 - Common try/catch:
6393 2012/9/14 Jin: [stubGenerator_mips.cpp] generate_forward_exception()
6394 |- V0, V1 are created
6395 |- T9 <= SharedRuntime::exception_handler_for_return_address
6396 `- jr T9
6397 `- the caller's exception_handler
6398 `- jr OptoRuntime::exception_blob
6399 `- here
6400 - Rethrow(e.g. 'unwind'):
6401 * The callee:
6402 |- an exception is triggered during execution
6403 `- exits the callee method through RethrowException node
6404 |- The callee pushes exception_oop(T0) and exception_pc(RA)
6405 `- The callee jumps to OptoRuntime::rethrow_stub()
6406 * In OptoRuntime::rethrow_stub:
6407 |- The VM calls _rethrow_Java to determine the return address in the caller method
6408 `- exits the stub with tailjmpInd
6409 |- pops exception_oop(V0) and exception_pc(V1)
6410 `- jumps to the return address(usually an exception_handler)
6411 * The caller:
6412 `- continues processing the exception_blob with V0/V1
6413 */
6415 /*
6416 Disassembling OptoRuntime::rethrow_stub()
6418 ; locals
6419 0x2d3bf320: addiu sp, sp, 0xfffffff8
6420 0x2d3bf324: sw ra, 0x4(sp)
6421 0x2d3bf328: sw fp, 0x0(sp)
6422 0x2d3bf32c: addu fp, sp, zero
6423 0x2d3bf330: addiu sp, sp, 0xfffffff0
6424 0x2d3bf334: sw ra, 0x8(sp)
6425 0x2d3bf338: sw t0, 0x4(sp)
6426 0x2d3bf33c: sw sp, 0x0(sp)
6428 ; get_thread(S2)
6429 0x2d3bf340: addu s2, sp, zero
6430 0x2d3bf344: srl s2, s2, 12
6431 0x2d3bf348: sll s2, s2, 2
6432 0x2d3bf34c: lui at, 0x2c85
6433 0x2d3bf350: addu at, at, s2
6434 0x2d3bf354: lw s2, 0xffffcc80(at)
6436 0x2d3bf358: lw s0, 0x0(sp)
6437 0x2d3bf35c: sw s0, 0x118(s2) // last_sp -> threa
6438 0x2d3bf360: sw s2, 0xc(sp)
6440 ; OptoRuntime::rethrow_C(oopDesc* exception, JavaThread* thread, address ret_pc)
6441 0x2d3bf364: lw a0, 0x4(sp)
6442 0x2d3bf368: lw a1, 0xc(sp)
6443 0x2d3bf36c: lw a2, 0x8(sp)
6444 ;; Java_To_Runtime
6445 0x2d3bf370: lui t9, 0x2c34
6446 0x2d3bf374: addiu t9, t9, 0xffff8a48
6447 0x2d3bf378: jalr t9
6448 0x2d3bf37c: nop
6450 0x2d3bf380: addu s3, v0, zero ; S3: SharedRuntime::raw_exception_handler_for_return_address()
6452 0x2d3bf384: lw s0, 0xc(sp)
6453 0x2d3bf388: sw zero, 0x118(s0)
6454 0x2d3bf38c: sw zero, 0x11c(s0)
6455 0x2d3bf390: lw s1, 0x144(s0) ; ex_oop: S1
6456 0x2d3bf394: addu s2, s0, zero
6457 0x2d3bf398: sw zero, 0x144(s2)
6458 0x2d3bf39c: lw s0, 0x4(s2)
6459 0x2d3bf3a0: addiu s4, zero, 0x0
6460 0x2d3bf3a4: bne s0, s4, 0x2d3bf3d4
6461 0x2d3bf3a8: nop
6462 0x2d3bf3ac: addiu sp, sp, 0x10
6463 0x2d3bf3b0: addiu sp, sp, 0x8
6464 0x2d3bf3b4: lw ra, 0xfffffffc(sp)
6465 0x2d3bf3b8: lw fp, 0xfffffff8(sp)
6466 0x2d3bf3bc: lui at, 0x2b48
6467 0x2d3bf3c0: lw at, 0x100(at)
6469 ; tailjmpInd: Restores exception_oop & exception_pc
6470 0x2d3bf3c4: addu v1, ra, zero
6471 0x2d3bf3c8: addu v0, s1, zero
6472 0x2d3bf3cc: jr s3
6473 0x2d3bf3d0: nop
6474 ; Exception:
6475 0x2d3bf3d4: lui s1, 0x2cc8 ; generate_forward_exception()
6476 0x2d3bf3d8: addiu s1, s1, 0x40
6477 0x2d3bf3dc: addiu s2, zero, 0x0
6478 0x2d3bf3e0: addiu sp, sp, 0x10
6479 0x2d3bf3e4: addiu sp, sp, 0x8
6480 0x2d3bf3e8: lw ra, 0xfffffffc(sp)
6481 0x2d3bf3ec: lw fp, 0xfffffff8(sp)
6482 0x2d3bf3f0: lui at, 0x2b48
6483 0x2d3bf3f4: lw at, 0x100(at)
6484 ; TailCalljmpInd
6485 __ push(RA); ; to be used in generate_forward_exception()
6486 0x2d3bf3f8: addu t7, s2, zero
6487 0x2d3bf3fc: jr s1
6488 0x2d3bf400: nop
6489 */
6490 // Rethrow exception:
6491 // The exception oop will come in the first argument position.
6492 // Then JUMP (not call) to the rethrow stub code.
6493 instruct RethrowException()
6494 %{
6495 match(Rethrow);
6497 // use the following format syntax
6498 format %{ "JMP rethrow_stub #@RethrowException" %}
6499 ins_encode %{
6500 __ block_comment("@ RethrowException");
6502 cbuf.set_insts_mark();
6503 cbuf.relocate(cbuf.insts_mark(), runtime_call_Relocation::spec());
6505 // call OptoRuntime::rethrow_stub to get the exception handler in parent method
6506 __ li(T9, OptoRuntime::rethrow_stub());
6507 __ jr(T9);
6508 __ nop();
6509 %}
6510 ins_pipe( pipe_jump );
6511 %}
6513 instruct branchConP_zero(cmpOpU cmp, mRegP op1, immP0 zero, label labl) %{
6514 match(If cmp (CmpP op1 zero));
6515 effect(USE labl);
6517 ins_cost(180);
6518 format %{ "b$cmp $op1, R0, $labl #@branchConP_zero" %}
6520 ins_encode %{
6521 Register op1 = $op1$$Register;
6522 Register op2 = R0;
6523 Label &L = *($labl$$label);
6524 int flag = $cmp$$cmpcode;
6526 switch(flag)
6527 {
6528 case 0x01: //equal
6529 if (&L)
6530 __ beq(op1, op2, L);
6531 else
6532 __ beq(op1, op2, (int)0);
6533 break;
6534 case 0x02: //not_equal
6535 if (&L)
6536 __ bne(op1, op2, L);
6537 else
6538 __ bne(op1, op2, (int)0);
6539 break;
6540 /*
6541 case 0x03: //above
6542 __ sltu(AT, op2, op1);
6543 if(&L)
6544 __ bne(R0, AT, L);
6545 else
6546 __ bne(R0, AT, (int)0);
6547 break;
6548 case 0x04: //above_equal
6549 __ sltu(AT, op1, op2);
6550 if(&L)
6551 __ beq(AT, R0, L);
6552 else
6553 __ beq(AT, R0, (int)0);
6554 break;
6555 case 0x05: //below
6556 __ sltu(AT, op1, op2);
6557 if(&L)
6558 __ bne(R0, AT, L);
6559 else
6560 __ bne(R0, AT, (int)0);
6561 break;
6562 case 0x06: //below_equal
6563 __ sltu(AT, op2, op1);
6564 if(&L)
6565 __ beq(AT, R0, L);
6566 else
6567 __ beq(AT, R0, (int)0);
6568 break;
6569 */
6570 default:
6571 Unimplemented();
6572 }
6573 __ nop();
6574 %}
6576 ins_pc_relative(1);
6577 ins_pipe( pipe_alu_branch );
6578 %}
6581 instruct branchConP(cmpOpU cmp, mRegP op1, mRegP op2, label labl) %{
6582 match(If cmp (CmpP op1 op2));
6583 // predicate(can_branch_register(_kids[0]->_leaf, _kids[1]->_leaf));
6584 effect(USE labl);
6586 ins_cost(200);
6587 format %{ "b$cmp $op1, $op2, $labl #@branchConP" %}
6589 ins_encode %{
6590 Register op1 = $op1$$Register;
6591 Register op2 = $op2$$Register;
6592 Label &L = *($labl$$label);
6593 int flag = $cmp$$cmpcode;
6595 switch(flag)
6596 {
6597 case 0x01: //equal
6598 if (&L)
6599 __ beq(op1, op2, L);
6600 else
6601 __ beq(op1, op2, (int)0);
6602 break;
6603 case 0x02: //not_equal
6604 if (&L)
6605 __ bne(op1, op2, L);
6606 else
6607 __ bne(op1, op2, (int)0);
6608 break;
6609 case 0x03: //above
6610 __ sltu(AT, op2, op1);
6611 if(&L)
6612 __ bne(R0, AT, L);
6613 else
6614 __ bne(R0, AT, (int)0);
6615 break;
6616 case 0x04: //above_equal
6617 __ sltu(AT, op1, op2);
6618 if(&L)
6619 __ beq(AT, R0, L);
6620 else
6621 __ beq(AT, R0, (int)0);
6622 break;
6623 case 0x05: //below
6624 __ sltu(AT, op1, op2);
6625 if(&L)
6626 __ bne(R0, AT, L);
6627 else
6628 __ bne(R0, AT, (int)0);
6629 break;
6630 case 0x06: //below_equal
6631 __ sltu(AT, op2, op1);
6632 if(&L)
6633 __ beq(AT, R0, L);
6634 else
6635 __ beq(AT, R0, (int)0);
6636 break;
6637 default:
6638 Unimplemented();
6639 }
6640 __ nop();
6641 %}
6643 ins_pc_relative(1);
6644 ins_pipe( pipe_alu_branch );
6645 %}
6647 instruct cmpN_null_branch(cmpOp cmp, mRegN op1, immN0 null, label labl) %{
6648 match(If cmp (CmpN op1 null));
6649 effect(USE labl);
6651 ins_cost(180);
6652 format %{ "CMP $op1,0\t! compressed ptr\n\t"
6653 "BP$cmp $labl @ cmpN_null_branch" %}
6654 ins_encode %{
6655 Register op1 = $op1$$Register;
6656 Register op2 = R0;
6657 Label &L = *($labl$$label);
6658 int flag = $cmp$$cmpcode;
6660 switch(flag)
6661 {
6662 case 0x01: //equal
6663 if (&L)
6664 __ beq(op1, op2, L);
6665 else
6666 __ beq(op1, op2, (int)0);
6667 break;
6668 case 0x02: //not_equal
6669 if (&L)
6670 __ bne(op1, op2, L);
6671 else
6672 __ bne(op1, op2, (int)0);
6673 break;
6674 default:
6675 Unimplemented();
6676 }
6677 __ nop();
6678 %}
6679 //TODO: pipe_branchP or create pipe_branchN LEE
6680 ins_pc_relative(1);
6681 ins_pipe( pipe_alu_branch );
6682 %}
6684 instruct cmpN_reg_branch(cmpOp cmp, mRegN op1, mRegN op2, label labl) %{
6685 match(If cmp (CmpN op1 op2));
6686 effect(USE labl);
6688 ins_cost(180);
6689 format %{ "CMP $op1,$op2\t! compressed ptr\n\t"
6690 "BP$cmp $labl" %}
6691 ins_encode %{
6692 Register op1_reg = $op1$$Register;
6693 Register op2_reg = $op2$$Register;
6694 Label &L = *($labl$$label);
6695 int flag = $cmp$$cmpcode;
6697 switch(flag)
6698 {
6699 case 0x01: //equal
6700 if (&L)
6701 __ beq(op1_reg, op2_reg, L);
6702 else
6703 __ beq(op1_reg, op2_reg, (int)0);
6704 break;
6705 case 0x02: //not_equal
6706 if (&L)
6707 __ bne(op1_reg, op2_reg, L);
6708 else
6709 __ bne(op1_reg, op2_reg, (int)0);
6710 break;
6711 case 0x03: //above
6712 __ sltu(AT, op2_reg, op1_reg);
6713 if(&L)
6714 __ bne(R0, AT, L);
6715 else
6716 __ bne(R0, AT, (int)0);
6717 break;
6718 case 0x04: //above_equal
6719 __ sltu(AT, op1_reg, op2_reg);
6720 if(&L)
6721 __ beq(AT, R0, L);
6722 else
6723 __ beq(AT, R0, (int)0);
6724 break;
6725 case 0x05: //below
6726 __ sltu(AT, op1_reg, op2_reg);
6727 if(&L)
6728 __ bne(R0, AT, L);
6729 else
6730 __ bne(R0, AT, (int)0);
6731 break;
6732 case 0x06: //below_equal
6733 __ sltu(AT, op2_reg, op1_reg);
6734 if(&L)
6735 __ beq(AT, R0, L);
6736 else
6737 __ beq(AT, R0, (int)0);
6738 break;
6739 default:
6740 Unimplemented();
6741 }
6742 __ nop();
6743 %}
6744 ins_pc_relative(1);
6745 ins_pipe( pipe_alu_branch );
6746 %}
6748 instruct branchConIU_reg_reg(cmpOpU cmp, mRegI src1, mRegI src2, label labl) %{
6749 match( If cmp (CmpU src1 src2) );
6750 effect(USE labl);
6751 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_reg" %}
6753 ins_encode %{
6754 Register op1 = $src1$$Register;
6755 Register op2 = $src2$$Register;
6756 Label &L = *($labl$$label);
6757 int flag = $cmp$$cmpcode;
6759 switch(flag)
6760 {
6761 case 0x01: //equal
6762 if (&L)
6763 __ beq(op1, op2, L);
6764 else
6765 __ beq(op1, op2, (int)0);
6766 break;
6767 case 0x02: //not_equal
6768 if (&L)
6769 __ bne(op1, op2, L);
6770 else
6771 __ bne(op1, op2, (int)0);
6772 break;
6773 case 0x03: //above
6774 __ sltu(AT, op2, op1);
6775 if(&L)
6776 __ bne(AT, R0, L);
6777 else
6778 __ bne(AT, R0, (int)0);
6779 break;
6780 case 0x04: //above_equal
6781 __ sltu(AT, op1, op2);
6782 if(&L)
6783 __ beq(AT, R0, L);
6784 else
6785 __ beq(AT, R0, (int)0);
6786 break;
6787 case 0x05: //below
6788 __ sltu(AT, op1, op2);
6789 if(&L)
6790 __ bne(AT, R0, L);
6791 else
6792 __ bne(AT, R0, (int)0);
6793 break;
6794 case 0x06: //below_equal
6795 __ sltu(AT, op2, op1);
6796 if(&L)
6797 __ beq(AT, R0, L);
6798 else
6799 __ beq(AT, R0, (int)0);
6800 break;
6801 default:
6802 Unimplemented();
6803 }
6804 __ nop();
6805 %}
6807 ins_pc_relative(1);
6808 ins_pipe( pipe_alu_branch );
6809 %}
6812 instruct branchConIU_reg_imm(cmpOpU cmp, mRegI src1, immI src2, label labl) %{
6813 match( If cmp (CmpU src1 src2) );
6814 effect(USE labl);
6815 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_imm" %}
6817 ins_encode %{
6818 Register op1 = $src1$$Register;
6819 int val = $src2$$constant;
6820 Label &L = *($labl$$label);
6821 int flag = $cmp$$cmpcode;
6823 __ move(AT, val);
6824 switch(flag)
6825 {
6826 case 0x01: //equal
6827 if (&L)
6828 __ beq(op1, AT, L);
6829 else
6830 __ beq(op1, AT, (int)0);
6831 break;
6832 case 0x02: //not_equal
6833 if (&L)
6834 __ bne(op1, AT, L);
6835 else
6836 __ bne(op1, AT, (int)0);
6837 break;
6838 case 0x03: //above
6839 __ sltu(AT, AT, op1);
6840 if(&L)
6841 __ bne(R0, AT, L);
6842 else
6843 __ bne(R0, AT, (int)0);
6844 break;
6845 case 0x04: //above_equal
6846 __ sltu(AT, op1, AT);
6847 if(&L)
6848 __ beq(AT, R0, L);
6849 else
6850 __ beq(AT, R0, (int)0);
6851 break;
6852 case 0x05: //below
6853 __ sltu(AT, op1, AT);
6854 if(&L)
6855 __ bne(R0, AT, L);
6856 else
6857 __ bne(R0, AT, (int)0);
6858 break;
6859 case 0x06: //below_equal
6860 __ sltu(AT, AT, op1);
6861 if(&L)
6862 __ beq(AT, R0, L);
6863 else
6864 __ beq(AT, R0, (int)0);
6865 break;
6866 default:
6867 Unimplemented();
6868 }
6869 __ nop();
6870 %}
6872 ins_pc_relative(1);
6873 ins_pipe( pipe_alu_branch );
6874 %}
6876 instruct branchConI_reg_reg(cmpOp cmp, mRegI src1, mRegI src2, label labl) %{
6877 match( If cmp (CmpI src1 src2) );
6878 effect(USE labl);
6879 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_reg" %}
6881 ins_encode %{
6882 Register op1 = $src1$$Register;
6883 Register op2 = $src2$$Register;
6884 Label &L = *($labl$$label);
6885 int flag = $cmp$$cmpcode;
6887 switch(flag)
6888 {
6889 case 0x01: //equal
6890 if (&L)
6891 __ beq(op1, op2, L);
6892 else
6893 __ beq(op1, op2, (int)0);
6894 break;
6895 case 0x02: //not_equal
6896 if (&L)
6897 __ bne(op1, op2, L);
6898 else
6899 __ bne(op1, op2, (int)0);
6900 break;
6901 case 0x03: //above
6902 __ slt(AT, op2, op1);
6903 if(&L)
6904 __ bne(R0, AT, L);
6905 else
6906 __ bne(R0, AT, (int)0);
6907 break;
6908 case 0x04: //above_equal
6909 __ slt(AT, op1, op2);
6910 if(&L)
6911 __ beq(AT, R0, L);
6912 else
6913 __ beq(AT, R0, (int)0);
6914 break;
6915 case 0x05: //below
6916 __ slt(AT, op1, op2);
6917 if(&L)
6918 __ bne(R0, AT, L);
6919 else
6920 __ bne(R0, AT, (int)0);
6921 break;
6922 case 0x06: //below_equal
6923 __ slt(AT, op2, op1);
6924 if(&L)
6925 __ beq(AT, R0, L);
6926 else
6927 __ beq(AT, R0, (int)0);
6928 break;
6929 default:
6930 Unimplemented();
6931 }
6932 __ nop();
6933 %}
6935 ins_pc_relative(1);
6936 ins_pipe( pipe_alu_branch );
6937 %}
6939 instruct branchConI_reg_imm0(cmpOp cmp, mRegI src1, immI0 src2, label labl) %{
6940 match( If cmp (CmpI src1 src2) );
6941 effect(USE labl);
6942 ins_cost(170);
6943 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm0" %}
6945 ins_encode %{
6946 Register op1 = $src1$$Register;
6947 // int val = $src2$$constant;
6948 Label &L = *($labl$$label);
6949 int flag = $cmp$$cmpcode;
6951 //__ move(AT, val);
6952 switch(flag)
6953 {
6954 case 0x01: //equal
6955 if (&L)
6956 __ beq(op1, R0, L);
6957 else
6958 __ beq(op1, R0, (int)0);
6959 break;
6960 case 0x02: //not_equal
6961 if (&L)
6962 __ bne(op1, R0, L);
6963 else
6964 __ bne(op1, R0, (int)0);
6965 break;
6966 case 0x03: //greater
6967 if(&L)
6968 __ bgtz(op1, L);
6969 else
6970 __ bgtz(op1, (int)0);
6971 break;
6972 case 0x04: //greater_equal
6973 if(&L)
6974 __ bgez(op1, L);
6975 else
6976 __ bgez(op1, (int)0);
6977 break;
6978 case 0x05: //less
6979 if(&L)
6980 __ bltz(op1, L);
6981 else
6982 __ bltz(op1, (int)0);
6983 break;
6984 case 0x06: //less_equal
6985 if(&L)
6986 __ blez(op1, L);
6987 else
6988 __ blez(op1, (int)0);
6989 break;
6990 default:
6991 Unimplemented();
6992 }
6993 __ nop();
6994 %}
6996 ins_pc_relative(1);
6997 ins_pipe( pipe_alu_branch );
6998 %}
7001 instruct branchConI_reg_imm(cmpOp cmp, mRegI src1, immI src2, label labl) %{
7002 match( If cmp (CmpI src1 src2) );
7003 effect(USE labl);
7004 ins_cost(200);
7005 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm" %}
7007 ins_encode %{
7008 Register op1 = $src1$$Register;
7009 int val = $src2$$constant;
7010 Label &L = *($labl$$label);
7011 int flag = $cmp$$cmpcode;
7013 __ move(AT, val);
7014 switch(flag)
7015 {
7016 case 0x01: //equal
7017 if (&L)
7018 __ beq(op1, AT, L);
7019 else
7020 __ beq(op1, AT, (int)0);
7021 break;
7022 case 0x02: //not_equal
7023 if (&L)
7024 __ bne(op1, AT, L);
7025 else
7026 __ bne(op1, AT, (int)0);
7027 break;
7028 case 0x03: //greater
7029 __ slt(AT, AT, op1);
7030 if(&L)
7031 __ bne(R0, AT, L);
7032 else
7033 __ bne(R0, AT, (int)0);
7034 break;
7035 case 0x04: //greater_equal
7036 __ slt(AT, op1, AT);
7037 if(&L)
7038 __ beq(AT, R0, L);
7039 else
7040 __ beq(AT, R0, (int)0);
7041 break;
7042 case 0x05: //less
7043 __ slt(AT, op1, AT);
7044 if(&L)
7045 __ bne(R0, AT, L);
7046 else
7047 __ bne(R0, AT, (int)0);
7048 break;
7049 case 0x06: //less_equal
7050 __ slt(AT, AT, op1);
7051 if(&L)
7052 __ beq(AT, R0, L);
7053 else
7054 __ beq(AT, R0, (int)0);
7055 break;
7056 default:
7057 Unimplemented();
7058 }
7059 __ nop();
7060 %}
7062 ins_pc_relative(1);
7063 ins_pipe( pipe_alu_branch );
7064 %}
7066 instruct branchConIU_reg_imm0(cmpOpU cmp, mRegI src1, immI0 zero, label labl) %{
7067 match( If cmp (CmpU src1 zero) );
7068 effect(USE labl);
7069 format %{ "BR$cmp $src1, zero, $labl #@branchConIU_reg_imm0" %}
7071 ins_encode %{
7072 Register op1 = $src1$$Register;
7073 Label &L = *($labl$$label);
7074 int flag = $cmp$$cmpcode;
7076 switch(flag)
7077 {
7078 case 0x01: //equal
7079 if (&L)
7080 __ beq(op1, R0, L);
7081 else
7082 __ beq(op1, R0, (int)0);
7083 break;
7084 case 0x02: //not_equal
7085 if (&L)
7086 __ bne(op1, R0, L);
7087 else
7088 __ bne(op1, R0, (int)0);
7089 break;
7090 case 0x03: //above
7091 if(&L)
7092 __ bne(R0, op1, L);
7093 else
7094 __ bne(R0, op1, (int)0);
7095 break;
7096 case 0x04: //above_equal
7097 if(&L)
7098 __ beq(R0, R0, L);
7099 else
7100 __ beq(R0, R0, (int)0);
7101 break;
7102 case 0x05: //below
7103 return;
7104 break;
7105 case 0x06: //below_equal
7106 if(&L)
7107 __ beq(op1, R0, L);
7108 else
7109 __ beq(op1, R0, (int)0);
7110 break;
7111 default:
7112 Unimplemented();
7113 }
7114 __ nop();
7115 %}
7117 ins_pc_relative(1);
7118 ins_pipe( pipe_alu_branch );
7119 %}
7122 instruct branchConIU_reg_immI16(cmpOpU cmp, mRegI src1, immI16 src2, label labl) %{
7123 match( If cmp (CmpU src1 src2) );
7124 effect(USE labl);
7125 ins_cost(180);
7126 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_immI16" %}
7128 ins_encode %{
7129 Register op1 = $src1$$Register;
7130 int val = $src2$$constant;
7131 Label &L = *($labl$$label);
7132 int flag = $cmp$$cmpcode;
7134 switch(flag)
7135 {
7136 case 0x01: //equal
7137 __ move(AT, val);
7138 if (&L)
7139 __ beq(op1, AT, L);
7140 else
7141 __ beq(op1, AT, (int)0);
7142 break;
7143 case 0x02: //not_equal
7144 __ move(AT, val);
7145 if (&L)
7146 __ bne(op1, AT, L);
7147 else
7148 __ bne(op1, AT, (int)0);
7149 break;
7150 case 0x03: //above
7151 __ move(AT, val);
7152 __ sltu(AT, AT, op1);
7153 if(&L)
7154 __ bne(R0, AT, L);
7155 else
7156 __ bne(R0, AT, (int)0);
7157 break;
7158 case 0x04: //above_equal
7159 __ sltiu(AT, op1, val);
7160 if(&L)
7161 __ beq(AT, R0, L);
7162 else
7163 __ beq(AT, R0, (int)0);
7164 break;
7165 case 0x05: //below
7166 __ sltiu(AT, op1, val);
7167 if(&L)
7168 __ bne(R0, AT, L);
7169 else
7170 __ bne(R0, AT, (int)0);
7171 break;
7172 case 0x06: //below_equal
7173 __ move(AT, val);
7174 __ sltu(AT, AT, op1);
7175 if(&L)
7176 __ beq(AT, R0, L);
7177 else
7178 __ beq(AT, R0, (int)0);
7179 break;
7180 default:
7181 Unimplemented();
7182 }
7183 __ nop();
7184 %}
7186 ins_pc_relative(1);
7187 ins_pipe( pipe_alu_branch );
7188 %}
7191 instruct branchConL_regL_regL(cmpOp cmp, mRegL src1, mRegL src2, label labl) %{
7192 match( If cmp (CmpL src1 src2) );
7193 effect(USE labl);
7194 format %{ "BR$cmp $src1, $src2, $labl #@branchConL_regL_regL" %}
7195 ins_cost(250);
7197 ins_encode %{
7198 Register opr1_reg = as_Register($src1$$reg);
7199 Register opr2_reg = as_Register($src2$$reg);
7201 Label &target = *($labl$$label);
7202 int flag = $cmp$$cmpcode;
7204 switch(flag)
7205 {
7206 case 0x01: //equal
7207 if (&target)
7208 __ beq(opr1_reg, opr2_reg, target);
7209 else
7210 __ beq(opr1_reg, opr2_reg, (int)0);
7211 __ delayed()->nop();
7212 break;
7214 case 0x02: //not_equal
7215 if(&target)
7216 __ bne(opr1_reg, opr2_reg, target);
7217 else
7218 __ bne(opr1_reg, opr2_reg, (int)0);
7219 __ delayed()->nop();
7220 break;
7222 case 0x03: //greater
7223 __ slt(AT, opr2_reg, opr1_reg);
7224 if(&target)
7225 __ bne(AT, R0, target);
7226 else
7227 __ bne(AT, R0, (int)0);
7228 __ delayed()->nop();
7229 break;
7231 case 0x04: //greater_equal
7232 __ slt(AT, opr1_reg, opr2_reg);
7233 if(&target)
7234 __ beq(AT, R0, target);
7235 else
7236 __ beq(AT, R0, (int)0);
7237 __ delayed()->nop();
7239 break;
7241 case 0x05: //less
7242 __ slt(AT, opr1_reg, opr2_reg);
7243 if(&target)
7244 __ bne(AT, R0, target);
7245 else
7246 __ bne(AT, R0, (int)0);
7247 __ delayed()->nop();
7249 break;
7251 case 0x06: //less_equal
7252 __ slt(AT, opr2_reg, opr1_reg);
7254 if(&target)
7255 __ beq(AT, R0, target);
7256 else
7257 __ beq(AT, R0, (int)0);
7258 __ delayed()->nop();
7260 break;
7262 default:
7263 Unimplemented();
7264 }
7265 %}
7268 ins_pc_relative(1);
7269 ins_pipe( pipe_alu_branch );
7270 %}
7272 instruct branchConL_reg_immL16_sub(cmpOp cmp, mRegL src1, immL16_sub src2, label labl) %{
7273 match( If cmp (CmpL src1 src2) );
7274 effect(USE labl);
7275 ins_cost(180);
7276 format %{ "BR$cmp $src1, $src2, $labl #@branchConL_reg_immL16_sub" %}
7278 ins_encode %{
7279 Register op1 = $src1$$Register;
7280 int val = $src2$$constant;
7281 Label &L = *($labl$$label);
7282 int flag = $cmp$$cmpcode;
7284 __ daddiu(AT, op1, -1 * val);
7285 switch(flag)
7286 {
7287 case 0x01: //equal
7288 if (&L)
7289 __ beq(R0, AT, L);
7290 else
7291 __ beq(R0, AT, (int)0);
7292 break;
7293 case 0x02: //not_equal
7294 if (&L)
7295 __ bne(R0, AT, L);
7296 else
7297 __ bne(R0, AT, (int)0);
7298 break;
7299 case 0x03: //greater
7300 if(&L)
7301 __ bgtz(AT, L);
7302 else
7303 __ bgtz(AT, (int)0);
7304 break;
7305 case 0x04: //greater_equal
7306 if(&L)
7307 __ bgez(AT, L);
7308 else
7309 __ bgez(AT, (int)0);
7310 break;
7311 case 0x05: //less
7312 if(&L)
7313 __ bltz(AT, L);
7314 else
7315 __ bltz(AT, (int)0);
7316 break;
7317 case 0x06: //less_equal
7318 if(&L)
7319 __ blez(AT, L);
7320 else
7321 __ blez(AT, (int)0);
7322 break;
7323 default:
7324 Unimplemented();
7325 }
7326 __ nop();
7327 %}
7329 ins_pc_relative(1);
7330 ins_pipe( pipe_alu_branch );
7331 %}
7334 instruct branchConI_reg_imm16_sub(cmpOp cmp, mRegI src1, immI16_sub src2, label labl) %{
7335 match( If cmp (CmpI src1 src2) );
7336 effect(USE labl);
7337 ins_cost(180);
7338 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm16_sub" %}
7340 ins_encode %{
7341 Register op1 = $src1$$Register;
7342 int val = $src2$$constant;
7343 Label &L = *($labl$$label);
7344 int flag = $cmp$$cmpcode;
7346 __ addiu32(AT, op1, -1 * val);
7347 switch(flag)
7348 {
7349 case 0x01: //equal
7350 if (&L)
7351 __ beq(R0, AT, L);
7352 else
7353 __ beq(R0, AT, (int)0);
7354 break;
7355 case 0x02: //not_equal
7356 if (&L)
7357 __ bne(R0, AT, L);
7358 else
7359 __ bne(R0, AT, (int)0);
7360 break;
7361 case 0x03: //greater
7362 if(&L)
7363 __ bgtz(AT, L);
7364 else
7365 __ bgtz(AT, (int)0);
7366 break;
7367 case 0x04: //greater_equal
7368 if(&L)
7369 __ bgez(AT, L);
7370 else
7371 __ bgez(AT, (int)0);
7372 break;
7373 case 0x05: //less
7374 if(&L)
7375 __ bltz(AT, L);
7376 else
7377 __ bltz(AT, (int)0);
7378 break;
7379 case 0x06: //less_equal
7380 if(&L)
7381 __ blez(AT, L);
7382 else
7383 __ blez(AT, (int)0);
7384 break;
7385 default:
7386 Unimplemented();
7387 }
7388 __ nop();
7389 %}
7391 ins_pc_relative(1);
7392 ins_pipe( pipe_alu_branch );
7393 %}
7395 instruct branchConL_regL_immL0(cmpOp cmp, mRegL src1, immL0 zero, label labl) %{
7396 match( If cmp (CmpL src1 zero) );
7397 effect(USE labl);
7398 format %{ "BR$cmp $src1, zero, $labl #@branchConL_regL_immL0" %}
7399 ins_cost(150);
7401 ins_encode %{
7402 Register opr1_reg = as_Register($src1$$reg);
7403 Label &target = *($labl$$label);
7404 int flag = $cmp$$cmpcode;
7406 switch(flag)
7407 {
7408 case 0x01: //equal
7409 if (&target)
7410 __ beq(opr1_reg, R0, target);
7411 else
7412 __ beq(opr1_reg, R0, int(0));
7413 break;
7415 case 0x02: //not_equal
7416 if(&target)
7417 __ bne(opr1_reg, R0, target);
7418 else
7419 __ bne(opr1_reg, R0, (int)0);
7420 break;
7422 case 0x03: //greater
7423 if(&target)
7424 __ bgtz(opr1_reg, target);
7425 else
7426 __ bgtz(opr1_reg, (int)0);
7427 break;
7429 case 0x04: //greater_equal
7430 if(&target)
7431 __ bgez(opr1_reg, target);
7432 else
7433 __ bgez(opr1_reg, (int)0);
7434 break;
7436 case 0x05: //less
7437 __ slt(AT, opr1_reg, R0);
7438 if(&target)
7439 __ bne(AT, R0, target);
7440 else
7441 __ bne(AT, R0, (int)0);
7442 break;
7444 case 0x06: //less_equal
7445 if (&target)
7446 __ blez(opr1_reg, target);
7447 else
7448 __ blez(opr1_reg, int(0));
7449 break;
7451 default:
7452 Unimplemented();
7453 }
7454 __ delayed()->nop();
7455 %}
7458 ins_pc_relative(1);
7459 ins_pipe( pipe_alu_branch );
7460 %}
7463 //FIXME
7464 instruct branchConF_reg_reg(cmpOp cmp, regF src1, regF src2, label labl) %{
7465 match( If cmp (CmpF src1 src2) );
7466 effect(USE labl);
7467 format %{ "BR$cmp $src1, $src2, $labl #@branchConF_reg_reg" %}
7469 ins_encode %{
7470 FloatRegister reg_op1 = $src1$$FloatRegister;
7471 FloatRegister reg_op2 = $src2$$FloatRegister;
7472 Label &L = *($labl$$label);
7473 int flag = $cmp$$cmpcode;
7475 switch(flag)
7476 {
7477 case 0x01: //equal
7478 __ c_eq_s(reg_op1, reg_op2);
7479 if (&L)
7480 __ bc1t(L);
7481 else
7482 __ bc1t((int)0);
7483 break;
7484 case 0x02: //not_equal
7485 __ c_eq_s(reg_op1, reg_op2);
7486 if (&L)
7487 __ bc1f(L);
7488 else
7489 __ bc1f((int)0);
7490 break;
7491 case 0x03: //greater
7492 __ c_ule_s(reg_op1, reg_op2);
7493 if(&L)
7494 __ bc1f(L);
7495 else
7496 __ bc1f((int)0);
7497 break;
7498 case 0x04: //greater_equal
7499 __ c_ult_s(reg_op1, reg_op2);
7500 if(&L)
7501 __ bc1f(L);
7502 else
7503 __ bc1f((int)0);
7504 break;
7505 case 0x05: //less
7506 __ c_ult_s(reg_op1, reg_op2);
7507 if(&L)
7508 __ bc1t(L);
7509 else
7510 __ bc1t((int)0);
7511 break;
7512 case 0x06: //less_equal
7513 __ c_ule_s(reg_op1, reg_op2);
7514 if(&L)
7515 __ bc1t(L);
7516 else
7517 __ bc1t((int)0);
7518 break;
7519 default:
7520 Unimplemented();
7521 }
7522 __ nop();
7523 %}
7525 ins_pc_relative(1);
7526 ins_pipe(pipe_slow);
7527 %}
7529 instruct branchConD_reg_reg(cmpOp cmp, regD src1, regD src2, label labl) %{
7530 match( If cmp (CmpD src1 src2) );
7531 effect(USE labl);
7532 format %{ "BR$cmp $src1, $src2, $labl #@branchConD_reg_reg" %}
7534 ins_encode %{
7535 FloatRegister reg_op1 = $src1$$FloatRegister;
7536 FloatRegister reg_op2 = $src2$$FloatRegister;
7537 Label &L = *($labl$$label);
7538 int flag = $cmp$$cmpcode;
7540 switch(flag)
7541 {
7542 case 0x01: //equal
7543 __ c_eq_d(reg_op1, reg_op2);
7544 if (&L)
7545 __ bc1t(L);
7546 else
7547 __ bc1t((int)0);
7548 break;
7549 case 0x02: //not_equal
7550 //2016/4/19 aoqi: c_ueq_d cannot distinguish NaN from equal. Double.isNaN(Double) is implemented by 'f != f', so the use of c_ueq_d causes bugs.
7551 __ c_eq_d(reg_op1, reg_op2);
7552 if (&L)
7553 __ bc1f(L);
7554 else
7555 __ bc1f((int)0);
7556 break;
7557 case 0x03: //greater
7558 __ c_ule_d(reg_op1, reg_op2);
7559 if(&L)
7560 __ bc1f(L);
7561 else
7562 __ bc1f((int)0);
7563 break;
7564 case 0x04: //greater_equal
7565 __ c_ult_d(reg_op1, reg_op2);
7566 if(&L)
7567 __ bc1f(L);
7568 else
7569 __ bc1f((int)0);
7570 break;
7571 case 0x05: //less
7572 __ c_ult_d(reg_op1, reg_op2);
7573 if(&L)
7574 __ bc1t(L);
7575 else
7576 __ bc1t((int)0);
7577 break;
7578 case 0x06: //less_equal
7579 __ c_ule_d(reg_op1, reg_op2);
7580 if(&L)
7581 __ bc1t(L);
7582 else
7583 __ bc1t((int)0);
7584 break;
7585 default:
7586 Unimplemented();
7587 }
7588 __ nop();
7589 %}
7591 ins_pc_relative(1);
7592 ins_pipe(pipe_slow);
7593 %}
7596 // Call Runtime Instruction
7597 instruct CallRuntimeDirect(method meth) %{
7598 match(CallRuntime );
7599 effect(USE meth);
7601 ins_cost(300);
7602 format %{ "CALL,runtime #@CallRuntimeDirect" %}
7603 ins_encode( Java_To_Runtime( meth ) );
7604 ins_pipe( pipe_slow );
7605 ins_alignment(16);
7606 %}
7610 //------------------------MemBar Instructions-------------------------------
7611 //Memory barrier flavors
7613 instruct membar_acquire() %{
7614 match(MemBarAcquire);
7615 ins_cost(0);
7617 size(0);
7618 format %{ "MEMBAR-acquire (empty) @ membar_acquire" %}
7619 ins_encode();
7620 ins_pipe(empty);
7621 %}
7623 instruct load_fence() %{
7624 match(LoadFence);
7625 ins_cost(400);
7627 format %{ "MEMBAR @ load_fence" %}
7628 ins_encode %{
7629 __ sync();
7630 %}
7631 ins_pipe(pipe_slow);
7632 %}
7634 instruct membar_acquire_lock()
7635 %{
7636 match(MemBarAcquireLock);
7637 ins_cost(0);
7639 size(0);
7640 format %{ "MEMBAR-acquire (acquire as part of CAS in prior FastLock so empty encoding) @ membar_acquire_lock" %}
7641 ins_encode();
7642 ins_pipe(empty);
7643 %}
7645 instruct membar_release() %{
7646 match(MemBarRelease);
7647 ins_cost(0);
7649 size(0);
7650 format %{ "MEMBAR-release (empty) @ membar_release" %}
7651 ins_encode();
7652 ins_pipe(empty);
7653 %}
7655 instruct store_fence() %{
7656 match(StoreFence);
7657 ins_cost(400);
7659 format %{ "MEMBAR @ store_fence" %}
7661 ins_encode %{
7662 __ sync();
7663 %}
7665 ins_pipe(pipe_slow);
7666 %}
7668 instruct membar_release_lock()
7669 %{
7670 match(MemBarReleaseLock);
7671 ins_cost(0);
7673 size(0);
7674 format %{ "MEMBAR-release-lock (release in FastUnlock so empty) @ membar_release_lock" %}
7675 ins_encode();
7676 ins_pipe(empty);
7677 %}
7680 instruct membar_volatile() %{
7681 match(MemBarVolatile);
7682 ins_cost(400);
7684 format %{ "MEMBAR-volatile" %}
7685 ins_encode %{
7686 if( !os::is_MP() ) return; // Not needed on single CPU
7687 __ sync();
7689 %}
7690 ins_pipe(pipe_slow);
7691 %}
7693 instruct unnecessary_membar_volatile() %{
7694 match(MemBarVolatile);
7695 predicate(Matcher::post_store_load_barrier(n));
7696 ins_cost(0);
7698 size(0);
7699 format %{ "MEMBAR-volatile (unnecessary so empty encoding) @ unnecessary_membar_volatile" %}
7700 ins_encode( );
7701 ins_pipe(empty);
7702 %}
7704 instruct membar_storestore() %{
7705 match(MemBarStoreStore);
7707 ins_cost(0);
7708 size(0);
7709 format %{ "MEMBAR-storestore (empty encoding) @ membar_storestore" %}
7710 ins_encode( );
7711 ins_pipe(empty);
7712 %}
7714 //----------Move Instructions--------------------------------------------------
7715 instruct castX2P(mRegP dst, mRegL src) %{
7716 match(Set dst (CastX2P src));
7717 format %{ "castX2P $dst, $src @ castX2P" %}
7718 ins_encode %{
7719 Register src = $src$$Register;
7720 Register dst = $dst$$Register;
7722 if(src != dst)
7723 __ move(dst, src);
7724 %}
7725 ins_cost(10);
7726 ins_pipe( ialu_regI_mov );
7727 %}
7729 instruct castP2X(mRegL dst, mRegP src ) %{
7730 match(Set dst (CastP2X src));
7732 format %{ "mov $dst, $src\t #@castP2X" %}
7733 ins_encode %{
7734 Register src = $src$$Register;
7735 Register dst = $dst$$Register;
7737 if(src != dst)
7738 __ move(dst, src);
7739 %}
7740 ins_pipe( ialu_regI_mov );
7741 %}
7743 instruct MoveF2I_reg_reg(mRegI dst, regF src) %{
7744 match(Set dst (MoveF2I src));
7745 effect(DEF dst, USE src);
7746 ins_cost(85);
7747 format %{ "MoveF2I $dst, $src @ MoveF2I_reg_reg" %}
7748 ins_encode %{
7749 Register dst = as_Register($dst$$reg);
7750 FloatRegister src = as_FloatRegister($src$$reg);
7752 __ mfc1(dst, src);
7753 %}
7754 ins_pipe( pipe_slow );
7755 %}
7757 instruct MoveI2F_reg_reg(regF dst, mRegI src) %{
7758 match(Set dst (MoveI2F src));
7759 effect(DEF dst, USE src);
7760 ins_cost(85);
7761 format %{ "MoveI2F $dst, $src @ MoveI2F_reg_reg" %}
7762 ins_encode %{
7763 Register src = as_Register($src$$reg);
7764 FloatRegister dst = as_FloatRegister($dst$$reg);
7766 __ mtc1(src, dst);
7767 %}
7768 ins_pipe( pipe_slow );
7769 %}
7771 instruct MoveD2L_reg_reg(mRegL dst, regD src) %{
7772 match(Set dst (MoveD2L src));
7773 effect(DEF dst, USE src);
7774 ins_cost(85);
7775 format %{ "MoveD2L $dst, $src @ MoveD2L_reg_reg" %}
7776 ins_encode %{
7777 Register dst = as_Register($dst$$reg);
7778 FloatRegister src = as_FloatRegister($src$$reg);
7780 __ dmfc1(dst, src);
7781 %}
7782 ins_pipe( pipe_slow );
7783 %}
7785 instruct MoveL2D_reg_reg(regD dst, mRegL src) %{
7786 match(Set dst (MoveL2D src));
7787 effect(DEF dst, USE src);
7788 ins_cost(85);
7789 format %{ "MoveL2D $dst, $src @ MoveL2D_reg_reg" %}
7790 ins_encode %{
7791 FloatRegister dst = as_FloatRegister($dst$$reg);
7792 Register src = as_Register($src$$reg);
7794 __ dmtc1(src, dst);
7795 %}
7796 ins_pipe( pipe_slow );
7797 %}
7799 //----------Conditional Move---------------------------------------------------
7800 // Conditional move
7801 instruct cmovI_cmpI_reg_reg(mRegI dst, mRegI src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
7802 match(Set dst (CMoveI (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
7803 ins_cost(80);
7804 format %{
7805 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpI_reg_reg\n"
7806 "\tCMOV $dst,$src \t @cmovI_cmpI_reg_reg"
7807 %}
7809 ins_encode %{
7810 Register op1 = $tmp1$$Register;
7811 Register op2 = $tmp2$$Register;
7812 Register dst = $dst$$Register;
7813 Register src = $src$$Register;
7814 int flag = $cop$$cmpcode;
7816 switch(flag)
7817 {
7818 case 0x01: //equal
7819 __ subu32(AT, op1, op2);
7820 __ movz(dst, src, AT);
7821 break;
7823 case 0x02: //not_equal
7824 __ subu32(AT, op1, op2);
7825 __ movn(dst, src, AT);
7826 break;
7828 case 0x03: //great
7829 __ slt(AT, op2, op1);
7830 __ movn(dst, src, AT);
7831 break;
7833 case 0x04: //great_equal
7834 __ slt(AT, op1, op2);
7835 __ movz(dst, src, AT);
7836 break;
7838 case 0x05: //less
7839 __ slt(AT, op1, op2);
7840 __ movn(dst, src, AT);
7841 break;
7843 case 0x06: //less_equal
7844 __ slt(AT, op2, op1);
7845 __ movz(dst, src, AT);
7846 break;
7848 default:
7849 Unimplemented();
7850 }
7851 %}
7853 ins_pipe( pipe_slow );
7854 %}
7856 instruct cmovI_cmpP_reg_reg(mRegI dst, mRegI src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
7857 match(Set dst (CMoveI (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
7858 ins_cost(80);
7859 format %{
7860 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpP_reg_reg\n\t"
7861 "CMOV $dst,$src\t @cmovI_cmpP_reg_reg"
7862 %}
7863 ins_encode %{
7864 Register op1 = $tmp1$$Register;
7865 Register op2 = $tmp2$$Register;
7866 Register dst = $dst$$Register;
7867 Register src = $src$$Register;
7868 int flag = $cop$$cmpcode;
7870 switch(flag)
7871 {
7872 case 0x01: //equal
7873 __ subu(AT, op1, op2);
7874 __ movz(dst, src, AT);
7875 break;
7877 case 0x02: //not_equal
7878 __ subu(AT, op1, op2);
7879 __ movn(dst, src, AT);
7880 break;
7882 case 0x03: //above
7883 __ sltu(AT, op2, op1);
7884 __ movn(dst, src, AT);
7885 break;
7887 case 0x04: //above_equal
7888 __ sltu(AT, op1, op2);
7889 __ movz(dst, src, AT);
7890 break;
7892 case 0x05: //below
7893 __ sltu(AT, op1, op2);
7894 __ movn(dst, src, AT);
7895 break;
7897 case 0x06: //below_equal
7898 __ sltu(AT, op2, op1);
7899 __ movz(dst, src, AT);
7900 break;
7902 default:
7903 Unimplemented();
7904 }
7905 %}
7907 ins_pipe( pipe_slow );
7908 %}
7910 instruct cmovI_cmpN_reg_reg(mRegI dst, mRegI src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
7911 match(Set dst (CMoveI (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
7912 ins_cost(80);
7913 format %{
7914 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpN_reg_reg\n\t"
7915 "CMOV $dst,$src\t @cmovI_cmpN_reg_reg"
7916 %}
7917 ins_encode %{
7918 Register op1 = $tmp1$$Register;
7919 Register op2 = $tmp2$$Register;
7920 Register dst = $dst$$Register;
7921 Register src = $src$$Register;
7922 int flag = $cop$$cmpcode;
7924 switch(flag)
7925 {
7926 case 0x01: //equal
7927 __ subu32(AT, op1, op2);
7928 __ movz(dst, src, AT);
7929 break;
7931 case 0x02: //not_equal
7932 __ subu32(AT, op1, op2);
7933 __ movn(dst, src, AT);
7934 break;
7936 case 0x03: //above
7937 __ sltu(AT, op2, op1);
7938 __ movn(dst, src, AT);
7939 break;
7941 case 0x04: //above_equal
7942 __ sltu(AT, op1, op2);
7943 __ movz(dst, src, AT);
7944 break;
7946 case 0x05: //below
7947 __ sltu(AT, op1, op2);
7948 __ movn(dst, src, AT);
7949 break;
7951 case 0x06: //below_equal
7952 __ sltu(AT, op2, op1);
7953 __ movz(dst, src, AT);
7954 break;
7956 default:
7957 Unimplemented();
7958 }
7959 %}
7961 ins_pipe( pipe_slow );
7962 %}
7964 instruct cmovP_cmpN_reg_reg(mRegP dst, mRegP src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
7965 match(Set dst (CMoveP (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
7966 ins_cost(80);
7967 format %{
7968 "CMPU$cop $tmp1,$tmp2\t @cmovP_cmpN_reg_reg\n\t"
7969 "CMOV $dst,$src\t @cmovP_cmpN_reg_reg"
7970 %}
7971 ins_encode %{
7972 Register op1 = $tmp1$$Register;
7973 Register op2 = $tmp2$$Register;
7974 Register dst = $dst$$Register;
7975 Register src = $src$$Register;
7976 int flag = $cop$$cmpcode;
7978 switch(flag)
7979 {
7980 case 0x01: //equal
7981 __ subu32(AT, op1, op2);
7982 __ movz(dst, src, AT);
7983 break;
7985 case 0x02: //not_equal
7986 __ subu32(AT, op1, op2);
7987 __ movn(dst, src, AT);
7988 break;
7990 case 0x03: //above
7991 __ sltu(AT, op2, op1);
7992 __ movn(dst, src, AT);
7993 break;
7995 case 0x04: //above_equal
7996 __ sltu(AT, op1, op2);
7997 __ movz(dst, src, AT);
7998 break;
8000 case 0x05: //below
8001 __ sltu(AT, op1, op2);
8002 __ movn(dst, src, AT);
8003 break;
8005 case 0x06: //below_equal
8006 __ sltu(AT, op2, op1);
8007 __ movz(dst, src, AT);
8008 break;
8010 default:
8011 Unimplemented();
8012 }
8013 %}
8015 ins_pipe( pipe_slow );
8016 %}
8018 instruct cmovN_cmpP_reg_reg(mRegN dst, mRegN src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
8019 match(Set dst (CMoveN (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
8020 ins_cost(80);
8021 format %{
8022 "CMPU$cop $tmp1,$tmp2\t @cmovN_cmpP_reg_reg\n\t"
8023 "CMOV $dst,$src\t @cmovN_cmpP_reg_reg"
8024 %}
8025 ins_encode %{
8026 Register op1 = $tmp1$$Register;
8027 Register op2 = $tmp2$$Register;
8028 Register dst = $dst$$Register;
8029 Register src = $src$$Register;
8030 int flag = $cop$$cmpcode;
8032 switch(flag)
8033 {
8034 case 0x01: //equal
8035 __ subu(AT, op1, op2);
8036 __ movz(dst, src, AT);
8037 break;
8039 case 0x02: //not_equal
8040 __ subu(AT, op1, op2);
8041 __ movn(dst, src, AT);
8042 break;
8044 case 0x03: //above
8045 __ sltu(AT, op2, op1);
8046 __ movn(dst, src, AT);
8047 break;
8049 case 0x04: //above_equal
8050 __ sltu(AT, op1, op2);
8051 __ movz(dst, src, AT);
8052 break;
8054 case 0x05: //below
8055 __ sltu(AT, op1, op2);
8056 __ movn(dst, src, AT);
8057 break;
8059 case 0x06: //below_equal
8060 __ sltu(AT, op2, op1);
8061 __ movz(dst, src, AT);
8062 break;
8064 default:
8065 Unimplemented();
8066 }
8067 %}
8069 ins_pipe( pipe_slow );
8070 %}
8072 instruct cmovP_cmpD_reg_reg(mRegP dst, mRegP src, regD tmp1, regD tmp2, cmpOp cop ) %{
8073 match(Set dst (CMoveP (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8074 ins_cost(80);
8075 format %{
8076 "CMP$cop $tmp1, $tmp2\t @cmovP_cmpD_reg_reg\n"
8077 "\tCMOV $dst,$src \t @cmovP_cmpD_reg_reg"
8078 %}
8079 ins_encode %{
8080 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8081 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8082 Register dst = as_Register($dst$$reg);
8083 Register src = as_Register($src$$reg);
8085 int flag = $cop$$cmpcode;
8087 switch(flag)
8088 {
8089 case 0x01: //equal
8090 __ c_eq_d(reg_op1, reg_op2);
8091 __ movt(dst, src);
8092 break;
8093 case 0x02: //not_equal
8094 __ c_eq_d(reg_op1, reg_op2);
8095 __ movf(dst, src);
8096 break;
8097 case 0x03: //greater
8098 __ c_ole_d(reg_op1, reg_op2);
8099 __ movf(dst, src);
8100 break;
8101 case 0x04: //greater_equal
8102 __ c_olt_d(reg_op1, reg_op2);
8103 __ movf(dst, src);
8104 break;
8105 case 0x05: //less
8106 __ c_ult_d(reg_op1, reg_op2);
8107 __ movt(dst, src);
8108 break;
8109 case 0x06: //less_equal
8110 __ c_ule_d(reg_op1, reg_op2);
8111 __ movt(dst, src);
8112 break;
8113 default:
8114 Unimplemented();
8115 }
8116 %}
8118 ins_pipe( pipe_slow );
8119 %}
8122 instruct cmovN_cmpN_reg_reg(mRegN dst, mRegN src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8123 match(Set dst (CMoveN (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8124 ins_cost(80);
8125 format %{
8126 "CMPU$cop $tmp1,$tmp2\t @cmovN_cmpN_reg_reg\n\t"
8127 "CMOV $dst,$src\t @cmovN_cmpN_reg_reg"
8128 %}
8129 ins_encode %{
8130 Register op1 = $tmp1$$Register;
8131 Register op2 = $tmp2$$Register;
8132 Register dst = $dst$$Register;
8133 Register src = $src$$Register;
8134 int flag = $cop$$cmpcode;
8136 switch(flag)
8137 {
8138 case 0x01: //equal
8139 __ subu32(AT, op1, op2);
8140 __ movz(dst, src, AT);
8141 break;
8143 case 0x02: //not_equal
8144 __ subu32(AT, op1, op2);
8145 __ movn(dst, src, AT);
8146 break;
8148 case 0x03: //above
8149 __ sltu(AT, op2, op1);
8150 __ movn(dst, src, AT);
8151 break;
8153 case 0x04: //above_equal
8154 __ sltu(AT, op1, op2);
8155 __ movz(dst, src, AT);
8156 break;
8158 case 0x05: //below
8159 __ sltu(AT, op1, op2);
8160 __ movn(dst, src, AT);
8161 break;
8163 case 0x06: //below_equal
8164 __ sltu(AT, op2, op1);
8165 __ movz(dst, src, AT);
8166 break;
8168 default:
8169 Unimplemented();
8170 }
8171 %}
8173 ins_pipe( pipe_slow );
8174 %}
8177 instruct cmovI_cmpU_reg_reg(mRegI dst, mRegI src, mRegI tmp1, mRegI tmp2, cmpOpU cop ) %{
8178 match(Set dst (CMoveI (Binary cop (CmpU tmp1 tmp2)) (Binary dst src)));
8179 ins_cost(80);
8180 format %{
8181 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpU_reg_reg\n\t"
8182 "CMOV $dst,$src\t @cmovI_cmpU_reg_reg"
8183 %}
8184 ins_encode %{
8185 Register op1 = $tmp1$$Register;
8186 Register op2 = $tmp2$$Register;
8187 Register dst = $dst$$Register;
8188 Register src = $src$$Register;
8189 int flag = $cop$$cmpcode;
8191 switch(flag)
8192 {
8193 case 0x01: //equal
8194 __ subu(AT, op1, op2);
8195 __ movz(dst, src, AT);
8196 break;
8198 case 0x02: //not_equal
8199 __ subu(AT, op1, op2);
8200 __ movn(dst, src, AT);
8201 break;
8203 case 0x03: //above
8204 __ sltu(AT, op2, op1);
8205 __ movn(dst, src, AT);
8206 break;
8208 case 0x04: //above_equal
8209 __ sltu(AT, op1, op2);
8210 __ movz(dst, src, AT);
8211 break;
8213 case 0x05: //below
8214 __ sltu(AT, op1, op2);
8215 __ movn(dst, src, AT);
8216 break;
8218 case 0x06: //below_equal
8219 __ sltu(AT, op2, op1);
8220 __ movz(dst, src, AT);
8221 break;
8223 default:
8224 Unimplemented();
8225 }
8226 %}
8228 ins_pipe( pipe_slow );
8229 %}
8231 instruct cmovI_cmpL_reg_reg(mRegI dst, mRegI src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8232 match(Set dst (CMoveI (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8233 ins_cost(80);
8234 format %{
8235 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpL_reg_reg\n"
8236 "\tCMOV $dst,$src \t @cmovI_cmpL_reg_reg"
8237 %}
8238 ins_encode %{
8239 Register opr1 = as_Register($tmp1$$reg);
8240 Register opr2 = as_Register($tmp2$$reg);
8241 Register dst = $dst$$Register;
8242 Register src = $src$$Register;
8243 int flag = $cop$$cmpcode;
8245 switch(flag)
8246 {
8247 case 0x01: //equal
8248 __ subu(AT, opr1, opr2);
8249 __ movz(dst, src, AT);
8250 break;
8252 case 0x02: //not_equal
8253 __ subu(AT, opr1, opr2);
8254 __ movn(dst, src, AT);
8255 break;
8257 case 0x03: //greater
8258 __ slt(AT, opr2, opr1);
8259 __ movn(dst, src, AT);
8260 break;
8262 case 0x04: //greater_equal
8263 __ slt(AT, opr1, opr2);
8264 __ movz(dst, src, AT);
8265 break;
8267 case 0x05: //less
8268 __ slt(AT, opr1, opr2);
8269 __ movn(dst, src, AT);
8270 break;
8272 case 0x06: //less_equal
8273 __ slt(AT, opr2, opr1);
8274 __ movz(dst, src, AT);
8275 break;
8277 default:
8278 Unimplemented();
8279 }
8280 %}
8282 ins_pipe( pipe_slow );
8283 %}
8285 instruct cmovP_cmpL_reg_reg(mRegP dst, mRegP src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8286 match(Set dst (CMoveP (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8287 ins_cost(80);
8288 format %{
8289 "CMP$cop $tmp1, $tmp2\t @cmovP_cmpL_reg_reg\n"
8290 "\tCMOV $dst,$src \t @cmovP_cmpL_reg_reg"
8291 %}
8292 ins_encode %{
8293 Register opr1 = as_Register($tmp1$$reg);
8294 Register opr2 = as_Register($tmp2$$reg);
8295 Register dst = $dst$$Register;
8296 Register src = $src$$Register;
8297 int flag = $cop$$cmpcode;
8299 switch(flag)
8300 {
8301 case 0x01: //equal
8302 __ subu(AT, opr1, opr2);
8303 __ movz(dst, src, AT);
8304 break;
8306 case 0x02: //not_equal
8307 __ subu(AT, opr1, opr2);
8308 __ movn(dst, src, AT);
8309 break;
8311 case 0x03: //greater
8312 __ slt(AT, opr2, opr1);
8313 __ movn(dst, src, AT);
8314 break;
8316 case 0x04: //greater_equal
8317 __ slt(AT, opr1, opr2);
8318 __ movz(dst, src, AT);
8319 break;
8321 case 0x05: //less
8322 __ slt(AT, opr1, opr2);
8323 __ movn(dst, src, AT);
8324 break;
8326 case 0x06: //less_equal
8327 __ slt(AT, opr2, opr1);
8328 __ movz(dst, src, AT);
8329 break;
8331 default:
8332 Unimplemented();
8333 }
8334 %}
8336 ins_pipe( pipe_slow );
8337 %}
8339 instruct cmovI_cmpD_reg_reg(mRegI dst, mRegI src, regD tmp1, regD tmp2, cmpOp cop ) %{
8340 match(Set dst (CMoveI (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8341 ins_cost(80);
8342 format %{
8343 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpD_reg_reg\n"
8344 "\tCMOV $dst,$src \t @cmovI_cmpD_reg_reg"
8345 %}
8346 ins_encode %{
8347 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8348 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8349 Register dst = as_Register($dst$$reg);
8350 Register src = as_Register($src$$reg);
8352 int flag = $cop$$cmpcode;
8354 switch(flag)
8355 {
8356 case 0x01: //equal
8357 __ c_eq_d(reg_op1, reg_op2);
8358 __ movt(dst, src);
8359 break;
8360 case 0x02: //not_equal
8361 //2016/4/19 aoqi: See instruct branchConD_reg_reg. The change in branchConD_reg_reg fixed a bug. It seems similar here, so I made thesame change.
8362 __ c_eq_d(reg_op1, reg_op2);
8363 __ movf(dst, src);
8364 break;
8365 case 0x03: //greater
8366 __ c_ole_d(reg_op1, reg_op2);
8367 __ movf(dst, src);
8368 break;
8369 case 0x04: //greater_equal
8370 __ c_olt_d(reg_op1, reg_op2);
8371 __ movf(dst, src);
8372 break;
8373 case 0x05: //less
8374 __ c_ult_d(reg_op1, reg_op2);
8375 __ movt(dst, src);
8376 break;
8377 case 0x06: //less_equal
8378 __ c_ule_d(reg_op1, reg_op2);
8379 __ movt(dst, src);
8380 break;
8381 default:
8382 Unimplemented();
8383 }
8384 %}
8386 ins_pipe( pipe_slow );
8387 %}
8390 instruct cmovP_cmpP_reg_reg(mRegP dst, mRegP src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
8391 match(Set dst (CMoveP (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
8392 ins_cost(80);
8393 format %{
8394 "CMPU$cop $tmp1,$tmp2\t @cmovP_cmpP_reg_reg\n\t"
8395 "CMOV $dst,$src\t @cmovP_cmpP_reg_reg"
8396 %}
8397 ins_encode %{
8398 Register op1 = $tmp1$$Register;
8399 Register op2 = $tmp2$$Register;
8400 Register dst = $dst$$Register;
8401 Register src = $src$$Register;
8402 int flag = $cop$$cmpcode;
8404 switch(flag)
8405 {
8406 case 0x01: //equal
8407 __ subu(AT, op1, op2);
8408 __ movz(dst, src, AT);
8409 break;
8411 case 0x02: //not_equal
8412 __ subu(AT, op1, op2);
8413 __ movn(dst, src, AT);
8414 break;
8416 case 0x03: //above
8417 __ sltu(AT, op2, op1);
8418 __ movn(dst, src, AT);
8419 break;
8421 case 0x04: //above_equal
8422 __ sltu(AT, op1, op2);
8423 __ movz(dst, src, AT);
8424 break;
8426 case 0x05: //below
8427 __ sltu(AT, op1, op2);
8428 __ movn(dst, src, AT);
8429 break;
8431 case 0x06: //below_equal
8432 __ sltu(AT, op2, op1);
8433 __ movz(dst, src, AT);
8434 break;
8436 default:
8437 Unimplemented();
8438 }
8439 %}
8441 ins_pipe( pipe_slow );
8442 %}
8444 instruct cmovP_cmpI_reg_reg(mRegP dst, mRegP src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8445 match(Set dst (CMoveP (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8446 ins_cost(80);
8447 format %{
8448 "CMP$cop $tmp1,$tmp2\t @cmovP_cmpI_reg_reg\n\t"
8449 "CMOV $dst,$src\t @cmovP_cmpI_reg_reg"
8450 %}
8451 ins_encode %{
8452 Register op1 = $tmp1$$Register;
8453 Register op2 = $tmp2$$Register;
8454 Register dst = $dst$$Register;
8455 Register src = $src$$Register;
8456 int flag = $cop$$cmpcode;
8458 switch(flag)
8459 {
8460 case 0x01: //equal
8461 __ subu32(AT, op1, op2);
8462 __ movz(dst, src, AT);
8463 break;
8465 case 0x02: //not_equal
8466 __ subu32(AT, op1, op2);
8467 __ movn(dst, src, AT);
8468 break;
8470 case 0x03: //above
8471 __ slt(AT, op2, op1);
8472 __ movn(dst, src, AT);
8473 break;
8475 case 0x04: //above_equal
8476 __ slt(AT, op1, op2);
8477 __ movz(dst, src, AT);
8478 break;
8480 case 0x05: //below
8481 __ slt(AT, op1, op2);
8482 __ movn(dst, src, AT);
8483 break;
8485 case 0x06: //below_equal
8486 __ slt(AT, op2, op1);
8487 __ movz(dst, src, AT);
8488 break;
8490 default:
8491 Unimplemented();
8492 }
8493 %}
8495 ins_pipe( pipe_slow );
8496 %}
8498 instruct cmovN_cmpI_reg_reg(mRegN dst, mRegN src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8499 match(Set dst (CMoveN (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8500 ins_cost(80);
8501 format %{
8502 "CMP$cop $tmp1,$tmp2\t @cmovN_cmpI_reg_reg\n\t"
8503 "CMOV $dst,$src\t @cmovN_cmpI_reg_reg"
8504 %}
8505 ins_encode %{
8506 Register op1 = $tmp1$$Register;
8507 Register op2 = $tmp2$$Register;
8508 Register dst = $dst$$Register;
8509 Register src = $src$$Register;
8510 int flag = $cop$$cmpcode;
8512 switch(flag)
8513 {
8514 case 0x01: //equal
8515 __ subu32(AT, op1, op2);
8516 __ movz(dst, src, AT);
8517 break;
8519 case 0x02: //not_equal
8520 __ subu32(AT, op1, op2);
8521 __ movn(dst, src, AT);
8522 break;
8524 case 0x03: //above
8525 __ slt(AT, op2, op1);
8526 __ movn(dst, src, AT);
8527 break;
8529 case 0x04: //above_equal
8530 __ slt(AT, op1, op2);
8531 __ movz(dst, src, AT);
8532 break;
8534 case 0x05: //below
8535 __ slt(AT, op1, op2);
8536 __ movn(dst, src, AT);
8537 break;
8539 case 0x06: //below_equal
8540 __ slt(AT, op2, op1);
8541 __ movz(dst, src, AT);
8542 break;
8544 default:
8545 Unimplemented();
8546 }
8547 %}
8549 ins_pipe( pipe_slow );
8550 %}
8553 instruct cmovL_cmpI_reg_reg(mRegL dst, mRegL src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8554 match(Set dst (CMoveL (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8555 ins_cost(80);
8556 format %{
8557 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpI_reg_reg\n"
8558 "\tCMOV $dst,$src \t @cmovL_cmpI_reg_reg"
8559 %}
8561 ins_encode %{
8562 Register op1 = $tmp1$$Register;
8563 Register op2 = $tmp2$$Register;
8564 Register dst = as_Register($dst$$reg);
8565 Register src = as_Register($src$$reg);
8566 int flag = $cop$$cmpcode;
8568 switch(flag)
8569 {
8570 case 0x01: //equal
8571 __ subu32(AT, op1, op2);
8572 __ movz(dst, src, AT);
8573 break;
8575 case 0x02: //not_equal
8576 __ subu32(AT, op1, op2);
8577 __ movn(dst, src, AT);
8578 break;
8580 case 0x03: //great
8581 __ slt(AT, op2, op1);
8582 __ movn(dst, src, AT);
8583 break;
8585 case 0x04: //great_equal
8586 __ slt(AT, op1, op2);
8587 __ movz(dst, src, AT);
8588 break;
8590 case 0x05: //less
8591 __ slt(AT, op1, op2);
8592 __ movn(dst, src, AT);
8593 break;
8595 case 0x06: //less_equal
8596 __ slt(AT, op2, op1);
8597 __ movz(dst, src, AT);
8598 break;
8600 default:
8601 Unimplemented();
8602 }
8603 %}
8605 ins_pipe( pipe_slow );
8606 %}
8608 instruct cmovL_cmpL_reg_reg(mRegL dst, mRegL src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8609 match(Set dst (CMoveL (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8610 ins_cost(80);
8611 format %{
8612 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpL_reg_reg\n"
8613 "\tCMOV $dst,$src \t @cmovL_cmpL_reg_reg"
8614 %}
8615 ins_encode %{
8616 Register opr1 = as_Register($tmp1$$reg);
8617 Register opr2 = as_Register($tmp2$$reg);
8618 Register dst = as_Register($dst$$reg);
8619 Register src = as_Register($src$$reg);
8620 int flag = $cop$$cmpcode;
8622 switch(flag)
8623 {
8624 case 0x01: //equal
8625 __ subu(AT, opr1, opr2);
8626 __ movz(dst, src, AT);
8627 break;
8629 case 0x02: //not_equal
8630 __ subu(AT, opr1, opr2);
8631 __ movn(dst, src, AT);
8632 break;
8634 case 0x03: //greater
8635 __ slt(AT, opr2, opr1);
8636 __ movn(dst, src, AT);
8637 break;
8639 case 0x04: //greater_equal
8640 __ slt(AT, opr1, opr2);
8641 __ movz(dst, src, AT);
8642 break;
8644 case 0x05: //less
8645 __ slt(AT, opr1, opr2);
8646 __ movn(dst, src, AT);
8647 break;
8649 case 0x06: //less_equal
8650 __ slt(AT, opr2, opr1);
8651 __ movz(dst, src, AT);
8652 break;
8654 default:
8655 Unimplemented();
8656 }
8657 %}
8659 ins_pipe( pipe_slow );
8660 %}
8662 instruct cmovL_cmpN_reg_reg(mRegL dst, mRegL src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8663 match(Set dst (CMoveL (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8664 ins_cost(80);
8665 format %{
8666 "CMPU$cop $tmp1,$tmp2\t @cmovL_cmpN_reg_reg\n\t"
8667 "CMOV $dst,$src\t @cmovL_cmpN_reg_reg"
8668 %}
8669 ins_encode %{
8670 Register op1 = $tmp1$$Register;
8671 Register op2 = $tmp2$$Register;
8672 Register dst = $dst$$Register;
8673 Register src = $src$$Register;
8674 int flag = $cop$$cmpcode;
8676 switch(flag)
8677 {
8678 case 0x01: //equal
8679 __ subu32(AT, op1, op2);
8680 __ movz(dst, src, AT);
8681 break;
8683 case 0x02: //not_equal
8684 __ subu32(AT, op1, op2);
8685 __ movn(dst, src, AT);
8686 break;
8688 case 0x03: //above
8689 __ sltu(AT, op2, op1);
8690 __ movn(dst, src, AT);
8691 break;
8693 case 0x04: //above_equal
8694 __ sltu(AT, op1, op2);
8695 __ movz(dst, src, AT);
8696 break;
8698 case 0x05: //below
8699 __ sltu(AT, op1, op2);
8700 __ movn(dst, src, AT);
8701 break;
8703 case 0x06: //below_equal
8704 __ sltu(AT, op2, op1);
8705 __ movz(dst, src, AT);
8706 break;
8708 default:
8709 Unimplemented();
8710 }
8711 %}
8713 ins_pipe( pipe_slow );
8714 %}
8717 instruct cmovL_cmpD_reg_reg(mRegL dst, mRegL src, regD tmp1, regD tmp2, cmpOp cop ) %{
8718 match(Set dst (CMoveL (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8719 ins_cost(80);
8720 format %{
8721 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpD_reg_reg\n"
8722 "\tCMOV $dst,$src \t @cmovL_cmpD_reg_reg"
8723 %}
8724 ins_encode %{
8725 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8726 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8727 Register dst = as_Register($dst$$reg);
8728 Register src = as_Register($src$$reg);
8730 int flag = $cop$$cmpcode;
8732 switch(flag)
8733 {
8734 case 0x01: //equal
8735 __ c_eq_d(reg_op1, reg_op2);
8736 __ movt(dst, src);
8737 break;
8738 case 0x02: //not_equal
8739 __ c_eq_d(reg_op1, reg_op2);
8740 __ movf(dst, src);
8741 break;
8742 case 0x03: //greater
8743 __ c_ole_d(reg_op1, reg_op2);
8744 __ movf(dst, src);
8745 break;
8746 case 0x04: //greater_equal
8747 __ c_olt_d(reg_op1, reg_op2);
8748 __ movf(dst, src);
8749 break;
8750 case 0x05: //less
8751 __ c_ult_d(reg_op1, reg_op2);
8752 __ movt(dst, src);
8753 break;
8754 case 0x06: //less_equal
8755 __ c_ule_d(reg_op1, reg_op2);
8756 __ movt(dst, src);
8757 break;
8758 default:
8759 Unimplemented();
8760 }
8761 %}
8763 ins_pipe( pipe_slow );
8764 %}
8766 instruct cmovD_cmpD_reg_reg(regD dst, regD src, regD tmp1, regD tmp2, cmpOp cop ) %{
8767 match(Set dst (CMoveD (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8768 ins_cost(200);
8769 format %{
8770 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpD_reg_reg\n"
8771 "\tCMOV $dst,$src \t @cmovD_cmpD_reg_reg"
8772 %}
8773 ins_encode %{
8774 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8775 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8776 FloatRegister dst = as_FloatRegister($dst$$reg);
8777 FloatRegister src = as_FloatRegister($src$$reg);
8779 int flag = $cop$$cmpcode;
8781 Label L;
8783 switch(flag)
8784 {
8785 case 0x01: //equal
8786 __ c_eq_d(reg_op1, reg_op2);
8787 __ bc1f(L);
8788 __ nop();
8789 __ mov_d(dst, src);
8790 __ bind(L);
8791 break;
8792 case 0x02: //not_equal
8793 //2016/4/19 aoqi: See instruct branchConD_reg_reg. The change in branchConD_reg_reg fixed a bug. It seems similar here, so I made thesame change.
8794 __ c_eq_d(reg_op1, reg_op2);
8795 __ bc1t(L);
8796 __ nop();
8797 __ mov_d(dst, src);
8798 __ bind(L);
8799 break;
8800 case 0x03: //greater
8801 __ c_ole_d(reg_op1, reg_op2);
8802 __ bc1t(L);
8803 __ nop();
8804 __ mov_d(dst, src);
8805 __ bind(L);
8806 break;
8807 case 0x04: //greater_equal
8808 __ c_olt_d(reg_op1, reg_op2);
8809 __ bc1t(L);
8810 __ nop();
8811 __ mov_d(dst, src);
8812 __ bind(L);
8813 break;
8814 case 0x05: //less
8815 __ c_ult_d(reg_op1, reg_op2);
8816 __ bc1f(L);
8817 __ nop();
8818 __ mov_d(dst, src);
8819 __ bind(L);
8820 break;
8821 case 0x06: //less_equal
8822 __ c_ule_d(reg_op1, reg_op2);
8823 __ bc1f(L);
8824 __ nop();
8825 __ mov_d(dst, src);
8826 __ bind(L);
8827 break;
8828 default:
8829 Unimplemented();
8830 }
8831 %}
8833 ins_pipe( pipe_slow );
8834 %}
8836 instruct cmovF_cmpI_reg_reg(regF dst, regF src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8837 match(Set dst (CMoveF (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8838 ins_cost(200);
8839 format %{
8840 "CMP$cop $tmp1, $tmp2\t @cmovF_cmpI_reg_reg\n"
8841 "\tCMOV $dst, $src \t @cmovF_cmpI_reg_reg"
8842 %}
8844 ins_encode %{
8845 Register op1 = $tmp1$$Register;
8846 Register op2 = $tmp2$$Register;
8847 FloatRegister dst = as_FloatRegister($dst$$reg);
8848 FloatRegister src = as_FloatRegister($src$$reg);
8849 int flag = $cop$$cmpcode;
8850 Label L;
8852 switch(flag)
8853 {
8854 case 0x01: //equal
8855 __ bne(op1, op2, L);
8856 __ nop();
8857 __ mov_s(dst, src);
8858 __ bind(L);
8859 break;
8860 case 0x02: //not_equal
8861 __ beq(op1, op2, L);
8862 __ nop();
8863 __ mov_s(dst, src);
8864 __ bind(L);
8865 break;
8866 case 0x03: //great
8867 __ slt(AT, op2, op1);
8868 __ beq(AT, R0, L);
8869 __ nop();
8870 __ mov_s(dst, src);
8871 __ bind(L);
8872 break;
8873 case 0x04: //great_equal
8874 __ slt(AT, op1, op2);
8875 __ bne(AT, R0, L);
8876 __ nop();
8877 __ mov_s(dst, src);
8878 __ bind(L);
8879 break;
8880 case 0x05: //less
8881 __ slt(AT, op1, op2);
8882 __ beq(AT, R0, L);
8883 __ nop();
8884 __ mov_s(dst, src);
8885 __ bind(L);
8886 break;
8887 case 0x06: //less_equal
8888 __ slt(AT, op2, op1);
8889 __ bne(AT, R0, L);
8890 __ nop();
8891 __ mov_s(dst, src);
8892 __ bind(L);
8893 break;
8894 default:
8895 Unimplemented();
8896 }
8897 %}
8899 ins_pipe( pipe_slow );
8900 %}
8902 instruct cmovD_cmpI_reg_reg(regD dst, regD src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8903 match(Set dst (CMoveD (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8904 ins_cost(200);
8905 format %{
8906 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpI_reg_reg\n"
8907 "\tCMOV $dst, $src \t @cmovD_cmpI_reg_reg"
8908 %}
8910 ins_encode %{
8911 Register op1 = $tmp1$$Register;
8912 Register op2 = $tmp2$$Register;
8913 FloatRegister dst = as_FloatRegister($dst$$reg);
8914 FloatRegister src = as_FloatRegister($src$$reg);
8915 int flag = $cop$$cmpcode;
8916 Label L;
8918 switch(flag)
8919 {
8920 case 0x01: //equal
8921 __ bne(op1, op2, L);
8922 __ nop();
8923 __ mov_d(dst, src);
8924 __ bind(L);
8925 break;
8926 case 0x02: //not_equal
8927 __ beq(op1, op2, L);
8928 __ nop();
8929 __ mov_d(dst, src);
8930 __ bind(L);
8931 break;
8932 case 0x03: //great
8933 __ slt(AT, op2, op1);
8934 __ beq(AT, R0, L);
8935 __ nop();
8936 __ mov_d(dst, src);
8937 __ bind(L);
8938 break;
8939 case 0x04: //great_equal
8940 __ slt(AT, op1, op2);
8941 __ bne(AT, R0, L);
8942 __ nop();
8943 __ mov_d(dst, src);
8944 __ bind(L);
8945 break;
8946 case 0x05: //less
8947 __ slt(AT, op1, op2);
8948 __ beq(AT, R0, L);
8949 __ nop();
8950 __ mov_d(dst, src);
8951 __ bind(L);
8952 break;
8953 case 0x06: //less_equal
8954 __ slt(AT, op2, op1);
8955 __ bne(AT, R0, L);
8956 __ nop();
8957 __ mov_d(dst, src);
8958 __ bind(L);
8959 break;
8960 default:
8961 Unimplemented();
8962 }
8963 %}
8965 ins_pipe( pipe_slow );
8966 %}
8968 instruct cmovD_cmpP_reg_reg(regD dst, regD src, mRegP tmp1, mRegP tmp2, cmpOp cop ) %{
8969 match(Set dst (CMoveD (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
8970 ins_cost(200);
8971 format %{
8972 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpP_reg_reg\n"
8973 "\tCMOV $dst, $src \t @cmovD_cmpP_reg_reg"
8974 %}
8976 ins_encode %{
8977 Register op1 = $tmp1$$Register;
8978 Register op2 = $tmp2$$Register;
8979 FloatRegister dst = as_FloatRegister($dst$$reg);
8980 FloatRegister src = as_FloatRegister($src$$reg);
8981 int flag = $cop$$cmpcode;
8982 Label L;
8984 switch(flag)
8985 {
8986 case 0x01: //equal
8987 __ bne(op1, op2, L);
8988 __ nop();
8989 __ mov_d(dst, src);
8990 __ bind(L);
8991 break;
8992 case 0x02: //not_equal
8993 __ beq(op1, op2, L);
8994 __ nop();
8995 __ mov_d(dst, src);
8996 __ bind(L);
8997 break;
8998 case 0x03: //great
8999 __ slt(AT, op2, op1);
9000 __ beq(AT, R0, L);
9001 __ nop();
9002 __ mov_d(dst, src);
9003 __ bind(L);
9004 break;
9005 case 0x04: //great_equal
9006 __ slt(AT, op1, op2);
9007 __ bne(AT, R0, L);
9008 __ nop();
9009 __ mov_d(dst, src);
9010 __ bind(L);
9011 break;
9012 case 0x05: //less
9013 __ slt(AT, op1, op2);
9014 __ beq(AT, R0, L);
9015 __ nop();
9016 __ mov_d(dst, src);
9017 __ bind(L);
9018 break;
9019 case 0x06: //less_equal
9020 __ slt(AT, op2, op1);
9021 __ bne(AT, R0, L);
9022 __ nop();
9023 __ mov_d(dst, src);
9024 __ bind(L);
9025 break;
9026 default:
9027 Unimplemented();
9028 }
9029 %}
9031 ins_pipe( pipe_slow );
9032 %}
9034 //FIXME
9035 instruct cmovI_cmpF_reg_reg(mRegI dst, mRegI src, regF tmp1, regF tmp2, cmpOp cop ) %{
9036 match(Set dst (CMoveI (Binary cop (CmpF tmp1 tmp2)) (Binary dst src)));
9037 ins_cost(80);
9038 format %{
9039 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpF_reg_reg\n"
9040 "\tCMOV $dst,$src \t @cmovI_cmpF_reg_reg"
9041 %}
9043 ins_encode %{
9044 FloatRegister reg_op1 = $tmp1$$FloatRegister;
9045 FloatRegister reg_op2 = $tmp2$$FloatRegister;
9046 Register dst = $dst$$Register;
9047 Register src = $src$$Register;
9048 int flag = $cop$$cmpcode;
9050 switch(flag)
9051 {
9052 case 0x01: //equal
9053 __ c_eq_s(reg_op1, reg_op2);
9054 __ movt(dst, src);
9055 break;
9056 case 0x02: //not_equal
9057 __ c_eq_s(reg_op1, reg_op2);
9058 __ movf(dst, src);
9059 break;
9060 case 0x03: //greater
9061 __ c_ole_s(reg_op1, reg_op2);
9062 __ movf(dst, src);
9063 break;
9064 case 0x04: //greater_equal
9065 __ c_olt_s(reg_op1, reg_op2);
9066 __ movf(dst, src);
9067 break;
9068 case 0x05: //less
9069 __ c_ult_s(reg_op1, reg_op2);
9070 __ movt(dst, src);
9071 break;
9072 case 0x06: //less_equal
9073 __ c_ule_s(reg_op1, reg_op2);
9074 __ movt(dst, src);
9075 break;
9076 default:
9077 Unimplemented();
9078 }
9079 %}
9080 ins_pipe( pipe_slow );
9081 %}
9083 instruct cmovF_cmpF_reg_reg(regF dst, regF src, regF tmp1, regF tmp2, cmpOp cop ) %{
9084 match(Set dst (CMoveF (Binary cop (CmpF tmp1 tmp2)) (Binary dst src)));
9085 ins_cost(200);
9086 format %{
9087 "CMP$cop $tmp1, $tmp2\t @cmovF_cmpF_reg_reg\n"
9088 "\tCMOV $dst,$src \t @cmovF_cmpF_reg_reg"
9089 %}
9091 ins_encode %{
9092 FloatRegister reg_op1 = $tmp1$$FloatRegister;
9093 FloatRegister reg_op2 = $tmp2$$FloatRegister;
9094 FloatRegister dst = $dst$$FloatRegister;
9095 FloatRegister src = $src$$FloatRegister;
9096 Label L;
9097 int flag = $cop$$cmpcode;
9099 switch(flag)
9100 {
9101 case 0x01: //equal
9102 __ c_eq_s(reg_op1, reg_op2);
9103 __ bc1f(L);
9104 __ nop();
9105 __ mov_s(dst, src);
9106 __ bind(L);
9107 break;
9108 case 0x02: //not_equal
9109 __ c_eq_s(reg_op1, reg_op2);
9110 __ bc1t(L);
9111 __ nop();
9112 __ mov_s(dst, src);
9113 __ bind(L);
9114 break;
9115 case 0x03: //greater
9116 __ c_ole_s(reg_op1, reg_op2);
9117 __ bc1t(L);
9118 __ nop();
9119 __ mov_s(dst, src);
9120 __ bind(L);
9121 break;
9122 case 0x04: //greater_equal
9123 __ c_olt_s(reg_op1, reg_op2);
9124 __ bc1t(L);
9125 __ nop();
9126 __ mov_s(dst, src);
9127 __ bind(L);
9128 break;
9129 case 0x05: //less
9130 __ c_ult_s(reg_op1, reg_op2);
9131 __ bc1f(L);
9132 __ nop();
9133 __ mov_s(dst, src);
9134 __ bind(L);
9135 break;
9136 case 0x06: //less_equal
9137 __ c_ule_s(reg_op1, reg_op2);
9138 __ bc1f(L);
9139 __ nop();
9140 __ mov_s(dst, src);
9141 __ bind(L);
9142 break;
9143 default:
9144 Unimplemented();
9145 }
9146 %}
9147 ins_pipe( pipe_slow );
9148 %}
9150 // Manifest a CmpL result in an integer register. Very painful.
9151 // This is the test to avoid.
9152 instruct cmpL3_reg_reg(mRegI dst, mRegL src1, mRegL src2) %{
9153 match(Set dst (CmpL3 src1 src2));
9154 ins_cost(1000);
9155 format %{ "cmpL3 $dst, $src1, $src2 @ cmpL3_reg_reg" %}
9156 ins_encode %{
9157 Register opr1 = as_Register($src1$$reg);
9158 Register opr2 = as_Register($src2$$reg);
9159 Register dst = as_Register($dst$$reg);
9161 Label Done;
9163 __ subu(AT, opr1, opr2);
9164 __ bltz(AT, Done);
9165 __ delayed()->daddiu(dst, R0, -1);
9167 __ move(dst, 1);
9168 __ movz(dst, R0, AT);
9170 __ bind(Done);
9171 %}
9172 ins_pipe( pipe_slow );
9173 %}
9175 //
9176 // less_rsult = -1
9177 // greater_result = 1
9178 // equal_result = 0
9179 // nan_result = -1
9180 //
9181 instruct cmpF3_reg_reg(mRegI dst, regF src1, regF src2) %{
9182 match(Set dst (CmpF3 src1 src2));
9183 ins_cost(1000);
9184 format %{ "cmpF3 $dst, $src1, $src2 @ cmpF3_reg_reg" %}
9185 ins_encode %{
9186 FloatRegister src1 = as_FloatRegister($src1$$reg);
9187 FloatRegister src2 = as_FloatRegister($src2$$reg);
9188 Register dst = as_Register($dst$$reg);
9190 Label Done;
9192 __ c_ult_s(src1, src2);
9193 __ bc1t(Done);
9194 __ delayed()->daddiu(dst, R0, -1);
9196 __ c_eq_s(src1, src2);
9197 __ move(dst, 1);
9198 __ movt(dst, R0);
9200 __ bind(Done);
9201 %}
9202 ins_pipe( pipe_slow );
9203 %}
9205 instruct cmpD3_reg_reg(mRegI dst, regD src1, regD src2) %{
9206 match(Set dst (CmpD3 src1 src2));
9207 ins_cost(1000);
9208 format %{ "cmpD3 $dst, $src1, $src2 @ cmpD3_reg_reg" %}
9209 ins_encode %{
9210 FloatRegister src1 = as_FloatRegister($src1$$reg);
9211 FloatRegister src2 = as_FloatRegister($src2$$reg);
9212 Register dst = as_Register($dst$$reg);
9214 Label Done;
9216 __ c_ult_d(src1, src2);
9217 __ bc1t(Done);
9218 __ delayed()->daddiu(dst, R0, -1);
9220 __ c_eq_d(src1, src2);
9221 __ move(dst, 1);
9222 __ movt(dst, R0);
9224 __ bind(Done);
9225 %}
9226 ins_pipe( pipe_slow );
9227 %}
9229 instruct clear_array(mRegL cnt, mRegP base, Universe dummy) %{
9230 match(Set dummy (ClearArray cnt base));
9231 format %{ "CLEAR_ARRAY base = $base, cnt = $cnt # Clear doublewords" %}
9232 ins_encode %{
9233 //Assume cnt is the number of bytes in an array to be cleared,
9234 //and base points to the starting address of the array.
9235 Register base = $base$$Register;
9236 Register num = $cnt$$Register;
9237 Label Loop, done;
9239 /* 2012/9/21 Jin: according to X86, $cnt is caculated by doublewords(8 bytes) */
9240 __ move(T9, num); /* T9 = words */
9241 __ beq(T9, R0, done);
9242 __ nop();
9243 __ move(AT, base);
9245 __ bind(Loop);
9246 __ sd(R0, Address(AT, 0));
9247 __ daddi(AT, AT, wordSize);
9248 __ daddi(T9, T9, -1);
9249 __ bne(T9, R0, Loop);
9250 __ delayed()->nop();
9251 __ bind(done);
9252 %}
9253 ins_pipe( pipe_slow );
9254 %}
9256 instruct string_compare(a4_RegP str1, mA5RegI cnt1, a6_RegP str2, mA7RegI cnt2, no_Ax_mRegI result) %{
9257 match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
9258 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2);
9260 format %{ "String Compare $str1[len: $cnt1], $str2[len: $cnt2] -> $result @ string_compare" %}
9261 ins_encode %{
9262 // Get the first character position in both strings
9263 // [8] char array, [12] offset, [16] count
9264 Register str1 = $str1$$Register;
9265 Register str2 = $str2$$Register;
9266 Register cnt1 = $cnt1$$Register;
9267 Register cnt2 = $cnt2$$Register;
9268 Register result = $result$$Register;
9270 Label L, Loop, haveResult, done;
9272 // compute the and difference of lengths (in result)
9273 __ subu(result, cnt1, cnt2); // result holds the difference of two lengths
9275 // compute the shorter length (in cnt1)
9276 __ slt(AT, cnt2, cnt1);
9277 __ movn(cnt1, cnt2, AT);
9279 // Now the shorter length is in cnt1 and cnt2 can be used as a tmp register
9280 __ bind(Loop); // Loop begin
9281 __ beq(cnt1, R0, done);
9282 __ delayed()->lhu(AT, str1, 0);;
9284 // compare current character
9285 __ lhu(cnt2, str2, 0);
9286 __ bne(AT, cnt2, haveResult);
9287 __ delayed()->addi(str1, str1, 2);
9288 __ addi(str2, str2, 2);
9289 __ b(Loop);
9290 __ delayed()->addi(cnt1, cnt1, -1); // Loop end
9292 __ bind(haveResult);
9293 __ subu(result, AT, cnt2);
9295 __ bind(done);
9296 %}
9298 ins_pipe( pipe_slow );
9299 %}
9301 // intrinsic optimization
9302 instruct string_equals(a4_RegP str1, a5_RegP str2, mA6RegI cnt, mA7RegI temp, no_Ax_mRegI result) %{
9303 match(Set result (StrEquals (Binary str1 str2) cnt));
9304 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL temp);
9306 format %{ "String Equal $str1, $str2, len:$cnt tmp:$temp -> $result @ string_equals" %}
9307 ins_encode %{
9308 // Get the first character position in both strings
9309 // [8] char array, [12] offset, [16] count
9310 Register str1 = $str1$$Register;
9311 Register str2 = $str2$$Register;
9312 Register cnt = $cnt$$Register;
9313 Register tmp = $temp$$Register;
9314 Register result = $result$$Register;
9316 Label Loop, done;
9319 __ beq(str1, str2, done); // same char[] ?
9320 __ daddiu(result, R0, 1);
9322 __ bind(Loop); // Loop begin
9323 __ beq(cnt, R0, done);
9324 __ daddiu(result, R0, 1); // count == 0
9326 // compare current character
9327 __ lhu(AT, str1, 0);;
9328 __ lhu(tmp, str2, 0);
9329 __ bne(AT, tmp, done);
9330 __ delayed()->daddi(result, R0, 0);
9331 __ addi(str1, str1, 2);
9332 __ addi(str2, str2, 2);
9333 __ b(Loop);
9334 __ delayed()->addi(cnt, cnt, -1); // Loop end
9336 __ bind(done);
9337 %}
9339 ins_pipe( pipe_slow );
9340 %}
9342 //----------Arithmetic Instructions-------------------------------------------
9343 //----------Addition Instructions---------------------------------------------
9344 instruct addI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9345 match(Set dst (AddI src1 src2));
9347 format %{ "add $dst, $src1, $src2 #@addI_Reg_Reg" %}
9348 ins_encode %{
9349 Register dst = $dst$$Register;
9350 Register src1 = $src1$$Register;
9351 Register src2 = $src2$$Register;
9352 __ addu32(dst, src1, src2);
9353 %}
9354 ins_pipe( ialu_regI_regI );
9355 %}
9357 instruct addI_Reg_imm(mRegI dst, mRegI src1, immI src2) %{
9358 match(Set dst (AddI src1 src2));
9360 format %{ "add $dst, $src1, $src2 #@addI_Reg_imm" %}
9361 ins_encode %{
9362 Register dst = $dst$$Register;
9363 Register src1 = $src1$$Register;
9364 int imm = $src2$$constant;
9366 if(Assembler::is_simm16(imm)) {
9367 __ addiu32(dst, src1, imm);
9368 } else {
9369 __ move(AT, imm);
9370 __ addu32(dst, src1, AT);
9371 }
9372 %}
9373 ins_pipe( ialu_regI_regI );
9374 %}
9376 instruct addP_reg_reg(mRegP dst, mRegP src1, mRegL src2) %{
9377 match(Set dst (AddP src1 src2));
9379 format %{ "dadd $dst, $src1, $src2 #@addP_reg_reg" %}
9381 ins_encode %{
9382 Register dst = $dst$$Register;
9383 Register src1 = $src1$$Register;
9384 Register src2 = $src2$$Register;
9385 __ daddu(dst, src1, src2);
9386 %}
9388 ins_pipe( ialu_regI_regI );
9389 %}
9391 instruct addP_reg_reg_convI2L(mRegP dst, mRegP src1, mRegI src2) %{
9392 match(Set dst (AddP src1 (ConvI2L src2)));
9394 format %{ "dadd $dst, $src1, $src2 #@addP_reg_reg_convI2L" %}
9396 ins_encode %{
9397 Register dst = $dst$$Register;
9398 Register src1 = $src1$$Register;
9399 Register src2 = $src2$$Register;
9400 __ daddu(dst, src1, src2);
9401 %}
9403 ins_pipe( ialu_regI_regI );
9404 %}
9406 instruct addP_reg_imm(mRegP dst, mRegP src1, immL src2) %{
9407 match(Set dst (AddP src1 src2));
9409 format %{ "daddi $dst, $src1, $src2 #@addP_reg_imm" %}
9410 ins_encode %{
9411 Register src1 = $src1$$Register;
9412 long src2 = $src2$$constant;
9413 Register dst = $dst$$Register;
9415 if(Assembler::is_simm16(src2)) {
9416 __ daddiu(dst, src1, src2);
9417 } else {
9418 __ set64(AT, src2);
9419 __ daddu(dst, src1, AT);
9420 }
9421 %}
9422 ins_pipe( ialu_regI_imm16 );
9423 %}
9425 // Add Long Register with Register
9426 instruct addL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
9427 match(Set dst (AddL src1 src2));
9428 ins_cost(200);
9429 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_Reg\t" %}
9431 ins_encode %{
9432 Register dst_reg = as_Register($dst$$reg);
9433 Register src1_reg = as_Register($src1$$reg);
9434 Register src2_reg = as_Register($src2$$reg);
9436 __ daddu(dst_reg, src1_reg, src2_reg);
9437 %}
9439 ins_pipe( ialu_regL_regL );
9440 %}
9442 instruct addL_Reg_imm(mRegL dst, mRegL src1, immL16 src2)
9443 %{
9444 match(Set dst (AddL src1 src2));
9446 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_imm " %}
9447 ins_encode %{
9448 Register dst_reg = as_Register($dst$$reg);
9449 Register src1_reg = as_Register($src1$$reg);
9450 int src2_imm = $src2$$constant;
9452 __ daddiu(dst_reg, src1_reg, src2_imm);
9453 %}
9455 ins_pipe( ialu_regL_regL );
9456 %}
9458 instruct addL_RegI2L_imm(mRegL dst, mRegI src1, immL16 src2)
9459 %{
9460 match(Set dst (AddL (ConvI2L src1) src2));
9462 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_imm " %}
9463 ins_encode %{
9464 Register dst_reg = as_Register($dst$$reg);
9465 Register src1_reg = as_Register($src1$$reg);
9466 int src2_imm = $src2$$constant;
9468 __ daddiu(dst_reg, src1_reg, src2_imm);
9469 %}
9471 ins_pipe( ialu_regL_regL );
9472 %}
9474 instruct addL_RegI2L_Reg(mRegL dst, mRegI src1, mRegL src2) %{
9475 match(Set dst (AddL (ConvI2L src1) src2));
9476 ins_cost(200);
9477 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_Reg\t" %}
9479 ins_encode %{
9480 Register dst_reg = as_Register($dst$$reg);
9481 Register src1_reg = as_Register($src1$$reg);
9482 Register src2_reg = as_Register($src2$$reg);
9484 __ daddu(dst_reg, src1_reg, src2_reg);
9485 %}
9487 ins_pipe( ialu_regL_regL );
9488 %}
9490 instruct addL_RegI2L_RegI2L(mRegL dst, mRegI src1, mRegI src2) %{
9491 match(Set dst (AddL (ConvI2L src1) (ConvI2L src2)));
9492 ins_cost(200);
9493 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_RegI2L\t" %}
9495 ins_encode %{
9496 Register dst_reg = as_Register($dst$$reg);
9497 Register src1_reg = as_Register($src1$$reg);
9498 Register src2_reg = as_Register($src2$$reg);
9500 __ daddu(dst_reg, src1_reg, src2_reg);
9501 %}
9503 ins_pipe( ialu_regL_regL );
9504 %}
9506 instruct addL_Reg_RegI2L(mRegL dst, mRegL src1, mRegI src2) %{
9507 match(Set dst (AddL src1 (ConvI2L src2)));
9508 ins_cost(200);
9509 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_RegI2L\t" %}
9511 ins_encode %{
9512 Register dst_reg = as_Register($dst$$reg);
9513 Register src1_reg = as_Register($src1$$reg);
9514 Register src2_reg = as_Register($src2$$reg);
9516 __ daddu(dst_reg, src1_reg, src2_reg);
9517 %}
9519 ins_pipe( ialu_regL_regL );
9520 %}
9522 //----------Subtraction Instructions-------------------------------------------
9523 // Integer Subtraction Instructions
9524 instruct subI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9525 match(Set dst (SubI src1 src2));
9526 ins_cost(100);
9528 format %{ "sub $dst, $src1, $src2 #@subI_Reg_Reg" %}
9529 ins_encode %{
9530 Register dst = $dst$$Register;
9531 Register src1 = $src1$$Register;
9532 Register src2 = $src2$$Register;
9533 __ subu32(dst, src1, src2);
9534 %}
9535 ins_pipe( ialu_regI_regI );
9536 %}
9538 instruct subI_Reg_immI16_sub(mRegI dst, mRegI src1, immI16_sub src2) %{
9539 match(Set dst (SubI src1 src2));
9540 ins_cost(80);
9542 format %{ "sub $dst, $src1, $src2 #@subI_Reg_immI16_sub" %}
9543 ins_encode %{
9544 Register dst = $dst$$Register;
9545 Register src1 = $src1$$Register;
9546 __ addiu32(dst, src1, -1 * $src2$$constant);
9547 %}
9548 ins_pipe( ialu_regI_regI );
9549 %}
9551 instruct negI_Reg(mRegI dst, immI0 zero, mRegI src) %{
9552 match(Set dst (SubI zero src));
9553 ins_cost(80);
9555 format %{ "neg $dst, $src #@negI_Reg" %}
9556 ins_encode %{
9557 Register dst = $dst$$Register;
9558 Register src = $src$$Register;
9559 __ subu32(dst, R0, src);
9560 %}
9561 ins_pipe( ialu_regI_regI );
9562 %}
9564 instruct negL_Reg(mRegL dst, immL0 zero, mRegL src) %{
9565 match(Set dst (SubL zero src));
9566 ins_cost(80);
9568 format %{ "neg $dst, $src #@negL_Reg" %}
9569 ins_encode %{
9570 Register dst = $dst$$Register;
9571 Register src = $src$$Register;
9572 __ subu(dst, R0, src);
9573 %}
9574 ins_pipe( ialu_regI_regI );
9575 %}
9577 instruct subL_Reg_immL16_sub(mRegL dst, mRegL src1, immL16_sub src2) %{
9578 match(Set dst (SubL src1 src2));
9579 ins_cost(80);
9581 format %{ "sub $dst, $src1, $src2 #@subL_Reg_immL16_sub" %}
9582 ins_encode %{
9583 Register dst = $dst$$Register;
9584 Register src1 = $src1$$Register;
9585 __ daddiu(dst, src1, -1 * $src2$$constant);
9586 %}
9587 ins_pipe( ialu_regI_regI );
9588 %}
9590 // Subtract Long Register with Register.
9591 instruct subL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
9592 match(Set dst (SubL src1 src2));
9593 ins_cost(100);
9594 format %{ "SubL $dst, $src1, $src2 @ subL_Reg_Reg" %}
9595 ins_encode %{
9596 Register dst = as_Register($dst$$reg);
9597 Register src1 = as_Register($src1$$reg);
9598 Register src2 = as_Register($src2$$reg);
9600 __ subu(dst, src1, src2);
9601 %}
9602 ins_pipe( ialu_regL_regL );
9603 %}
9605 instruct subL_Reg_RegI2L(mRegL dst, mRegL src1, mRegI src2) %{
9606 match(Set dst (SubL src1 (ConvI2L src2)));
9607 ins_cost(100);
9608 format %{ "SubL $dst, $src1, $src2 @ subL_Reg_RegI2L" %}
9609 ins_encode %{
9610 Register dst = as_Register($dst$$reg);
9611 Register src1 = as_Register($src1$$reg);
9612 Register src2 = as_Register($src2$$reg);
9614 __ subu(dst, src1, src2);
9615 %}
9616 ins_pipe( ialu_regL_regL );
9617 %}
9619 instruct subL_RegI2L_Reg(mRegL dst, mRegI src1, mRegL src2) %{
9620 match(Set dst (SubL (ConvI2L src1) src2));
9621 ins_cost(200);
9622 format %{ "SubL $dst, $src1, $src2 @ subL_RegI2L_Reg" %}
9623 ins_encode %{
9624 Register dst = as_Register($dst$$reg);
9625 Register src1 = as_Register($src1$$reg);
9626 Register src2 = as_Register($src2$$reg);
9628 __ subu(dst, src1, src2);
9629 %}
9630 ins_pipe( ialu_regL_regL );
9631 %}
9633 instruct subL_RegI2L_RegI2L(mRegL dst, mRegI src1, mRegI src2) %{
9634 match(Set dst (SubL (ConvI2L src1) (ConvI2L src2)));
9635 ins_cost(200);
9636 format %{ "SubL $dst, $src1, $src2 @ subL_RegI2L_RegI2L" %}
9637 ins_encode %{
9638 Register dst = as_Register($dst$$reg);
9639 Register src1 = as_Register($src1$$reg);
9640 Register src2 = as_Register($src2$$reg);
9642 __ subu(dst, src1, src2);
9643 %}
9644 ins_pipe( ialu_regL_regL );
9645 %}
9647 // Integer MOD with Register
9648 instruct modI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9649 match(Set dst (ModI src1 src2));
9650 ins_cost(300);
9651 format %{ "modi $dst, $src1, $src2 @ modI_Reg_Reg" %}
9652 ins_encode %{
9653 Register dst = $dst$$Register;
9654 Register src1 = $src1$$Register;
9655 Register src2 = $src2$$Register;
9657 //if (UseLoongsonISA) {
9658 if (0) {
9659 // 2016.08.10
9660 // Experiments show that gsmod is slower that div+mfhi.
9661 // So I just disable it here.
9662 __ gsmod(dst, src1, src2);
9663 } else {
9664 __ div(src1, src2);
9665 __ mfhi(dst);
9666 }
9667 %}
9669 //ins_pipe( ialu_mod );
9670 ins_pipe( ialu_regI_regI );
9671 %}
9673 instruct modL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
9674 match(Set dst (ModL src1 src2));
9675 format %{ "modL $dst, $src1, $src2 @modL_reg_reg" %}
9677 ins_encode %{
9678 Register dst = as_Register($dst$$reg);
9679 Register op1 = as_Register($src1$$reg);
9680 Register op2 = as_Register($src2$$reg);
9682 if (UseLoongsonISA) {
9683 __ gsdmod(dst, op1, op2);
9684 } else {
9685 __ ddiv(op1, op2);
9686 __ mfhi(dst);
9687 }
9688 %}
9689 ins_pipe( pipe_slow );
9690 %}
9692 instruct mulI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9693 match(Set dst (MulI src1 src2));
9695 ins_cost(300);
9696 format %{ "mul $dst, $src1, $src2 @ mulI_Reg_Reg" %}
9697 ins_encode %{
9698 Register src1 = $src1$$Register;
9699 Register src2 = $src2$$Register;
9700 Register dst = $dst$$Register;
9702 __ mul(dst, src1, src2);
9703 %}
9704 ins_pipe( ialu_mult );
9705 %}
9707 instruct maddI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2, mRegI src3) %{
9708 match(Set dst (AddI (MulI src1 src2) src3));
9710 ins_cost(999);
9711 format %{ "madd $dst, $src1 * $src2 + $src3 #@maddI_Reg_Reg" %}
9712 ins_encode %{
9713 Register src1 = $src1$$Register;
9714 Register src2 = $src2$$Register;
9715 Register src3 = $src3$$Register;
9716 Register dst = $dst$$Register;
9718 __ mtlo(src3);
9719 __ madd(src1, src2);
9720 __ mflo(dst);
9721 %}
9722 ins_pipe( ialu_mult );
9723 %}
9725 instruct divI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9726 match(Set dst (DivI src1 src2));
9728 ins_cost(300);
9729 format %{ "div $dst, $src1, $src2 @ divI_Reg_Reg" %}
9730 ins_encode %{
9731 Register src1 = $src1$$Register;
9732 Register src2 = $src2$$Register;
9733 Register dst = $dst$$Register;
9735 /* 2012/4/21 Jin: In MIPS, div does not cause exception.
9736 We must trap an exception manually. */
9737 __ teq(R0, src2, 0x7);
9739 if (UseLoongsonISA) {
9740 __ gsdiv(dst, src1, src2);
9741 } else {
9742 __ div(src1, src2);
9744 __ nop();
9745 __ nop();
9746 __ mflo(dst);
9747 }
9748 %}
9749 ins_pipe( ialu_mod );
9750 %}
9752 instruct divF_Reg_Reg(regF dst, regF src1, regF src2) %{
9753 match(Set dst (DivF src1 src2));
9755 ins_cost(300);
9756 format %{ "divF $dst, $src1, $src2 @ divF_Reg_Reg" %}
9757 ins_encode %{
9758 FloatRegister src1 = $src1$$FloatRegister;
9759 FloatRegister src2 = $src2$$FloatRegister;
9760 FloatRegister dst = $dst$$FloatRegister;
9762 /* Here do we need to trap an exception manually ? */
9763 __ div_s(dst, src1, src2);
9764 %}
9765 ins_pipe( pipe_slow );
9766 %}
9768 instruct divD_Reg_Reg(regD dst, regD src1, regD src2) %{
9769 match(Set dst (DivD src1 src2));
9771 ins_cost(300);
9772 format %{ "divD $dst, $src1, $src2 @ divD_Reg_Reg" %}
9773 ins_encode %{
9774 FloatRegister src1 = $src1$$FloatRegister;
9775 FloatRegister src2 = $src2$$FloatRegister;
9776 FloatRegister dst = $dst$$FloatRegister;
9778 /* Here do we need to trap an exception manually ? */
9779 __ div_d(dst, src1, src2);
9780 %}
9781 ins_pipe( pipe_slow );
9782 %}
9784 instruct mulL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
9785 match(Set dst (MulL src1 src2));
9786 format %{ "mulL $dst, $src1, $src2 @mulL_reg_reg" %}
9787 ins_encode %{
9788 Register dst = as_Register($dst$$reg);
9789 Register op1 = as_Register($src1$$reg);
9790 Register op2 = as_Register($src2$$reg);
9792 if (UseLoongsonISA) {
9793 __ gsdmult(dst, op1, op2);
9794 } else {
9795 __ dmult(op1, op2);
9796 __ mflo(dst);
9797 }
9798 %}
9799 ins_pipe( pipe_slow );
9800 %}
9802 instruct mulL_reg_regI2L(mRegL dst, mRegL src1, mRegI src2) %{
9803 match(Set dst (MulL src1 (ConvI2L src2)));
9804 format %{ "mulL $dst, $src1, $src2 @mulL_reg_regI2L" %}
9805 ins_encode %{
9806 Register dst = as_Register($dst$$reg);
9807 Register op1 = as_Register($src1$$reg);
9808 Register op2 = as_Register($src2$$reg);
9810 if (UseLoongsonISA) {
9811 __ gsdmult(dst, op1, op2);
9812 } else {
9813 __ dmult(op1, op2);
9814 __ mflo(dst);
9815 }
9816 %}
9817 ins_pipe( pipe_slow );
9818 %}
9820 instruct divL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
9821 match(Set dst (DivL src1 src2));
9822 format %{ "divL $dst, $src1, $src2 @divL_reg_reg" %}
9824 ins_encode %{
9825 Register dst = as_Register($dst$$reg);
9826 Register op1 = as_Register($src1$$reg);
9827 Register op2 = as_Register($src2$$reg);
9829 if (UseLoongsonISA) {
9830 __ gsddiv(dst, op1, op2);
9831 } else {
9832 __ ddiv(op1, op2);
9833 __ mflo(dst);
9834 }
9835 %}
9836 ins_pipe( pipe_slow );
9837 %}
9839 instruct addF_reg_reg(regF dst, regF src1, regF src2) %{
9840 match(Set dst (AddF src1 src2));
9841 format %{ "AddF $dst, $src1, $src2 @addF_reg_reg" %}
9842 ins_encode %{
9843 FloatRegister src1 = as_FloatRegister($src1$$reg);
9844 FloatRegister src2 = as_FloatRegister($src2$$reg);
9845 FloatRegister dst = as_FloatRegister($dst$$reg);
9847 __ add_s(dst, src1, src2);
9848 %}
9849 ins_pipe( fpu_regF_regF );
9850 %}
9852 instruct subF_reg_reg(regF dst, regF src1, regF src2) %{
9853 match(Set dst (SubF src1 src2));
9854 format %{ "SubF $dst, $src1, $src2 @subF_reg_reg" %}
9855 ins_encode %{
9856 FloatRegister src1 = as_FloatRegister($src1$$reg);
9857 FloatRegister src2 = as_FloatRegister($src2$$reg);
9858 FloatRegister dst = as_FloatRegister($dst$$reg);
9860 __ sub_s(dst, src1, src2);
9861 %}
9862 ins_pipe( fpu_regF_regF );
9863 %}
9864 instruct addD_reg_reg(regD dst, regD src1, regD src2) %{
9865 match(Set dst (AddD src1 src2));
9866 format %{ "AddD $dst, $src1, $src2 @addD_reg_reg" %}
9867 ins_encode %{
9868 FloatRegister src1 = as_FloatRegister($src1$$reg);
9869 FloatRegister src2 = as_FloatRegister($src2$$reg);
9870 FloatRegister dst = as_FloatRegister($dst$$reg);
9872 __ add_d(dst, src1, src2);
9873 %}
9874 ins_pipe( fpu_regF_regF );
9875 %}
9877 instruct subD_reg_reg(regD dst, regD src1, regD src2) %{
9878 match(Set dst (SubD src1 src2));
9879 format %{ "SubD $dst, $src1, $src2 @subD_reg_reg" %}
9880 ins_encode %{
9881 FloatRegister src1 = as_FloatRegister($src1$$reg);
9882 FloatRegister src2 = as_FloatRegister($src2$$reg);
9883 FloatRegister dst = as_FloatRegister($dst$$reg);
9885 __ sub_d(dst, src1, src2);
9886 %}
9887 ins_pipe( fpu_regF_regF );
9888 %}
9890 instruct negF_reg(regF dst, regF src) %{
9891 match(Set dst (NegF src));
9892 format %{ "negF $dst, $src @negF_reg" %}
9893 ins_encode %{
9894 FloatRegister src = as_FloatRegister($src$$reg);
9895 FloatRegister dst = as_FloatRegister($dst$$reg);
9897 __ neg_s(dst, src);
9898 %}
9899 ins_pipe( fpu_regF_regF );
9900 %}
9902 instruct negD_reg(regD dst, regD src) %{
9903 match(Set dst (NegD src));
9904 format %{ "negD $dst, $src @negD_reg" %}
9905 ins_encode %{
9906 FloatRegister src = as_FloatRegister($src$$reg);
9907 FloatRegister dst = as_FloatRegister($dst$$reg);
9909 __ neg_d(dst, src);
9910 %}
9911 ins_pipe( fpu_regF_regF );
9912 %}
9915 instruct mulF_reg_reg(regF dst, regF src1, regF src2) %{
9916 match(Set dst (MulF src1 src2));
9917 format %{ "MULF $dst, $src1, $src2 @mulF_reg_reg" %}
9918 ins_encode %{
9919 FloatRegister src1 = $src1$$FloatRegister;
9920 FloatRegister src2 = $src2$$FloatRegister;
9921 FloatRegister dst = $dst$$FloatRegister;
9923 __ mul_s(dst, src1, src2);
9924 %}
9925 ins_pipe( fpu_regF_regF );
9926 %}
9928 instruct maddF_reg_reg(regF dst, regF src1, regF src2, regF src3) %{
9929 match(Set dst (AddF (MulF src1 src2) src3));
9930 // For compatibility reason (e.g. on the Loongson platform), disable this guy.
9931 ins_cost(44444);
9932 format %{ "maddF $dst, $src1, $src2, $src3 @maddF_reg_reg" %}
9933 ins_encode %{
9934 FloatRegister src1 = $src1$$FloatRegister;
9935 FloatRegister src2 = $src2$$FloatRegister;
9936 FloatRegister src3 = $src3$$FloatRegister;
9937 FloatRegister dst = $dst$$FloatRegister;
9939 __ madd_s(dst, src1, src2, src3);
9940 %}
9941 ins_pipe( fpu_regF_regF );
9942 %}
9944 // Mul two double precision floating piont number
9945 instruct mulD_reg_reg(regD dst, regD src1, regD src2) %{
9946 match(Set dst (MulD src1 src2));
9947 format %{ "MULD $dst, $src1, $src2 @mulD_reg_reg" %}
9948 ins_encode %{
9949 FloatRegister src1 = $src1$$FloatRegister;
9950 FloatRegister src2 = $src2$$FloatRegister;
9951 FloatRegister dst = $dst$$FloatRegister;
9953 __ mul_d(dst, src1, src2);
9954 %}
9955 ins_pipe( fpu_regF_regF );
9956 %}
9958 instruct maddD_reg_reg(regD dst, regD src1, regD src2, regD src3) %{
9959 match(Set dst (AddD (MulD src1 src2) src3));
9960 // For compatibility reason (e.g. on the Loongson platform), disable this guy.
9961 ins_cost(44444);
9962 format %{ "maddD $dst, $src1, $src2, $src3 @maddD_reg_reg" %}
9963 ins_encode %{
9964 FloatRegister src1 = $src1$$FloatRegister;
9965 FloatRegister src2 = $src2$$FloatRegister;
9966 FloatRegister src3 = $src3$$FloatRegister;
9967 FloatRegister dst = $dst$$FloatRegister;
9969 __ madd_d(dst, src1, src2, src3);
9970 %}
9971 ins_pipe( fpu_regF_regF );
9972 %}
9974 instruct absF_reg(regF dst, regF src) %{
9975 match(Set dst (AbsF src));
9976 ins_cost(100);
9977 format %{ "absF $dst, $src @absF_reg" %}
9978 ins_encode %{
9979 FloatRegister src = as_FloatRegister($src$$reg);
9980 FloatRegister dst = as_FloatRegister($dst$$reg);
9982 __ abs_s(dst, src);
9983 %}
9984 ins_pipe( fpu_regF_regF );
9985 %}
9988 // intrinsics for math_native.
9989 // AbsD SqrtD CosD SinD TanD LogD Log10D
9991 instruct absD_reg(regD dst, regD src) %{
9992 match(Set dst (AbsD src));
9993 ins_cost(100);
9994 format %{ "absD $dst, $src @absD_reg" %}
9995 ins_encode %{
9996 FloatRegister src = as_FloatRegister($src$$reg);
9997 FloatRegister dst = as_FloatRegister($dst$$reg);
9999 __ abs_d(dst, src);
10000 %}
10001 ins_pipe( fpu_regF_regF );
10002 %}
10004 instruct sqrtD_reg(regD dst, regD src) %{
10005 match(Set dst (SqrtD src));
10006 ins_cost(100);
10007 format %{ "SqrtD $dst, $src @sqrtD_reg" %}
10008 ins_encode %{
10009 FloatRegister src = as_FloatRegister($src$$reg);
10010 FloatRegister dst = as_FloatRegister($dst$$reg);
10012 __ sqrt_d(dst, src);
10013 %}
10014 ins_pipe( fpu_regF_regF );
10015 %}
10017 instruct sqrtF_reg(regF dst, regF src) %{
10018 match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
10019 ins_cost(100);
10020 format %{ "SqrtF $dst, $src @sqrtF_reg" %}
10021 ins_encode %{
10022 FloatRegister src = as_FloatRegister($src$$reg);
10023 FloatRegister dst = as_FloatRegister($dst$$reg);
10025 __ sqrt_s(dst, src);
10026 %}
10027 ins_pipe( fpu_regF_regF );
10028 %}
10029 //----------------------------------Logical Instructions----------------------
10030 //__________________________________Integer Logical Instructions-------------
10032 //And Instuctions
10033 // And Register with Immediate
10034 instruct andI_Reg_immI(mRegI dst, mRegI src1, immI src2) %{
10035 match(Set dst (AndI src1 src2));
10037 format %{ "and $dst, $src1, $src2 #@andI_Reg_immI" %}
10038 ins_encode %{
10039 Register dst = $dst$$Register;
10040 Register src = $src1$$Register;
10041 int val = $src2$$constant;
10043 __ move(AT, val);
10044 __ andr(dst, src, AT);
10045 %}
10046 ins_pipe( ialu_regI_regI );
10047 %}
10049 instruct andI_Reg_imm_0_65535(mRegI dst, mRegI src1, immI_0_65535 src2) %{
10050 match(Set dst (AndI src1 src2));
10051 ins_cost(60);
10053 format %{ "and $dst, $src1, $src2 #@andI_Reg_imm_0_65535" %}
10054 ins_encode %{
10055 Register dst = $dst$$Register;
10056 Register src = $src1$$Register;
10057 int val = $src2$$constant;
10059 __ andi(dst, src, val);
10060 %}
10061 ins_pipe( ialu_regI_regI );
10062 %}
10064 instruct andI_Reg_immI_nonneg_mask(mRegI dst, mRegI src1, immI_nonneg_mask mask) %{
10065 match(Set dst (AndI src1 mask));
10066 ins_cost(60);
10068 format %{ "and $dst, $src1, $mask #@andI_Reg_immI_nonneg_mask" %}
10069 ins_encode %{
10070 Register dst = $dst$$Register;
10071 Register src = $src1$$Register;
10072 int size = Assembler::is_int_mask($mask$$constant);
10074 __ ext(dst, src, 0, size);
10075 %}
10076 ins_pipe( ialu_regI_regI );
10077 %}
10079 instruct andL_Reg_immL_nonneg_mask(mRegL dst, mRegL src1, immL_nonneg_mask mask) %{
10080 match(Set dst (AndL src1 mask));
10081 ins_cost(60);
10083 format %{ "and $dst, $src1, $mask #@andL_Reg_immL_nonneg_mask" %}
10084 ins_encode %{
10085 Register dst = $dst$$Register;
10086 Register src = $src1$$Register;
10087 int size = Assembler::is_jlong_mask($mask$$constant);
10089 __ dext(dst, src, 0, size);
10090 %}
10091 ins_pipe( ialu_regI_regI );
10092 %}
10094 instruct xorI_Reg_imm_0_65535(mRegI dst, mRegI src1, immI_0_65535 src2) %{
10095 match(Set dst (XorI src1 src2));
10096 ins_cost(60);
10098 format %{ "xori $dst, $src1, $src2 #@xorI_Reg_imm_0_65535" %}
10099 ins_encode %{
10100 Register dst = $dst$$Register;
10101 Register src = $src1$$Register;
10102 int val = $src2$$constant;
10104 __ xori(dst, src, val);
10105 %}
10106 ins_pipe( ialu_regI_regI );
10107 %}
10109 instruct xorI_Reg_immI_M1(mRegI dst, mRegI src1, immI_M1 M1) %{
10110 match(Set dst (XorI src1 M1));
10111 predicate(UseLoongsonISA);
10112 ins_cost(60);
10114 format %{ "xor $dst, $src1, $M1 #@xorI_Reg_immI_M1" %}
10115 ins_encode %{
10116 Register dst = $dst$$Register;
10117 Register src = $src1$$Register;
10119 __ gsorn(dst, R0, src);
10120 %}
10121 ins_pipe( ialu_regI_regI );
10122 %}
10124 instruct xorL2I_Reg_immI_M1(mRegI dst, mRegL src1, immI_M1 M1) %{
10125 match(Set dst (XorI (ConvL2I src1) M1));
10126 predicate(UseLoongsonISA);
10127 ins_cost(60);
10129 format %{ "xor $dst, $src1, $M1 #@xorL2I_Reg_immI_M1" %}
10130 ins_encode %{
10131 Register dst = $dst$$Register;
10132 Register src = $src1$$Register;
10134 __ gsorn(dst, R0, src);
10135 %}
10136 ins_pipe( ialu_regI_regI );
10137 %}
10139 instruct xorL_Reg_imm_0_65535(mRegL dst, mRegL src1, immL_0_65535 src2) %{
10140 match(Set dst (XorL src1 src2));
10141 ins_cost(60);
10143 format %{ "xori $dst, $src1, $src2 #@xorL_Reg_imm_0_65535" %}
10144 ins_encode %{
10145 Register dst = $dst$$Register;
10146 Register src = $src1$$Register;
10147 int val = $src2$$constant;
10149 __ xori(dst, src, val);
10150 %}
10151 ins_pipe( ialu_regI_regI );
10152 %}
10154 /*
10155 instruct xorL_Reg_immL_M1(mRegL dst, mRegL src1, immL_M1 M1) %{
10156 match(Set dst (XorL src1 M1));
10157 predicate(UseLoongsonISA);
10158 ins_cost(60);
10160 format %{ "xor $dst, $src1, $M1 #@xorL_Reg_immL_M1" %}
10161 ins_encode %{
10162 Register dst = $dst$$Register;
10163 Register src = $src1$$Register;
10165 __ gsorn(dst, R0, src);
10166 %}
10167 ins_pipe( ialu_regI_regI );
10168 %}
10169 */
10171 instruct lbu_and_lmask(mRegI dst, memory mem, immI_255 mask) %{
10172 match(Set dst (AndI mask (LoadB mem)));
10173 ins_cost(60);
10175 format %{ "lhu $dst, $mem #@lbu_and_lmask" %}
10176 ins_encode(load_UB_enc(dst, mem));
10177 ins_pipe( ialu_loadI );
10178 %}
10180 instruct lbu_and_rmask(mRegI dst, memory mem, immI_255 mask) %{
10181 match(Set dst (AndI (LoadB mem) mask));
10182 ins_cost(60);
10184 format %{ "lhu $dst, $mem #@lbu_and_rmask" %}
10185 ins_encode(load_UB_enc(dst, mem));
10186 ins_pipe( ialu_loadI );
10187 %}
10189 instruct andI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10190 match(Set dst (AndI src1 src2));
10192 format %{ "and $dst, $src1, $src2 #@andI_Reg_Reg" %}
10193 ins_encode %{
10194 Register dst = $dst$$Register;
10195 Register src1 = $src1$$Register;
10196 Register src2 = $src2$$Register;
10197 __ andr(dst, src1, src2);
10198 %}
10199 ins_pipe( ialu_regI_regI );
10200 %}
10202 instruct andnI_Reg_nReg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10203 match(Set dst (AndI src1 (XorI src2 M1)));
10204 predicate(UseLoongsonISA);
10206 format %{ "andn $dst, $src1, $src2 #@andnI_Reg_nReg" %}
10207 ins_encode %{
10208 Register dst = $dst$$Register;
10209 Register src1 = $src1$$Register;
10210 Register src2 = $src2$$Register;
10212 __ gsandn(dst, src1, src2);
10213 %}
10214 ins_pipe( ialu_regI_regI );
10215 %}
10217 instruct ornI_Reg_nReg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10218 match(Set dst (OrI src1 (XorI src2 M1)));
10219 predicate(UseLoongsonISA);
10221 format %{ "orn $dst, $src1, $src2 #@ornI_Reg_nReg" %}
10222 ins_encode %{
10223 Register dst = $dst$$Register;
10224 Register src1 = $src1$$Register;
10225 Register src2 = $src2$$Register;
10227 __ gsorn(dst, src1, src2);
10228 %}
10229 ins_pipe( ialu_regI_regI );
10230 %}
10232 instruct andnI_nReg_Reg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10233 match(Set dst (AndI (XorI src1 M1) src2));
10234 predicate(UseLoongsonISA);
10236 format %{ "andn $dst, $src2, $src1 #@andnI_nReg_Reg" %}
10237 ins_encode %{
10238 Register dst = $dst$$Register;
10239 Register src1 = $src1$$Register;
10240 Register src2 = $src2$$Register;
10242 __ gsandn(dst, src2, src1);
10243 %}
10244 ins_pipe( ialu_regI_regI );
10245 %}
10247 instruct ornI_nReg_Reg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10248 match(Set dst (OrI (XorI src1 M1) src2));
10249 predicate(UseLoongsonISA);
10251 format %{ "orn $dst, $src2, $src1 #@ornI_nReg_Reg" %}
10252 ins_encode %{
10253 Register dst = $dst$$Register;
10254 Register src1 = $src1$$Register;
10255 Register src2 = $src2$$Register;
10257 __ gsorn(dst, src2, src1);
10258 %}
10259 ins_pipe( ialu_regI_regI );
10260 %}
10262 // And Long Register with Register
10263 instruct andL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10264 match(Set dst (AndL src1 src2));
10265 format %{ "AND $dst, $src1, $src2 @ andL_Reg_Reg\n\t" %}
10266 ins_encode %{
10267 Register dst_reg = as_Register($dst$$reg);
10268 Register src1_reg = as_Register($src1$$reg);
10269 Register src2_reg = as_Register($src2$$reg);
10271 __ andr(dst_reg, src1_reg, src2_reg);
10272 %}
10273 ins_pipe( ialu_regL_regL );
10274 %}
10276 instruct andL_Reg_Reg_convI2L(mRegL dst, mRegL src1, mRegI src2) %{
10277 match(Set dst (AndL src1 (ConvI2L src2)));
10278 format %{ "AND $dst, $src1, $src2 @ andL_Reg_Reg_convI2L\n\t" %}
10279 ins_encode %{
10280 Register dst_reg = as_Register($dst$$reg);
10281 Register src1_reg = as_Register($src1$$reg);
10282 Register src2_reg = as_Register($src2$$reg);
10284 __ andr(dst_reg, src1_reg, src2_reg);
10285 %}
10286 ins_pipe( ialu_regL_regL );
10287 %}
10289 instruct andL_Reg_imm_0_65535(mRegL dst, mRegL src1, immL_0_65535 src2) %{
10290 match(Set dst (AndL src1 src2));
10291 ins_cost(60);
10293 format %{ "and $dst, $src1, $src2 #@andL_Reg_imm_0_65535" %}
10294 ins_encode %{
10295 Register dst = $dst$$Register;
10296 Register src = $src1$$Register;
10297 long val = $src2$$constant;
10299 __ andi(dst, src, val);
10300 %}
10301 ins_pipe( ialu_regI_regI );
10302 %}
10304 instruct andL2I_Reg_imm_0_65535(mRegI dst, mRegL src1, immL_0_65535 src2) %{
10305 match(Set dst (ConvL2I (AndL src1 src2)));
10306 ins_cost(60);
10308 format %{ "and $dst, $src1, $src2 #@andL2I_Reg_imm_0_65535" %}
10309 ins_encode %{
10310 Register dst = $dst$$Register;
10311 Register src = $src1$$Register;
10312 long val = $src2$$constant;
10314 __ andi(dst, src, val);
10315 %}
10316 ins_pipe( ialu_regI_regI );
10317 %}
10319 /*
10320 instruct andnL_Reg_nReg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10321 match(Set dst (AndL src1 (XorL src2 M1)));
10322 predicate(UseLoongsonISA);
10324 format %{ "andn $dst, $src1, $src2 #@andnL_Reg_nReg" %}
10325 ins_encode %{
10326 Register dst = $dst$$Register;
10327 Register src1 = $src1$$Register;
10328 Register src2 = $src2$$Register;
10330 __ gsandn(dst, src1, src2);
10331 %}
10332 ins_pipe( ialu_regI_regI );
10333 %}
10334 */
10336 /*
10337 instruct ornL_Reg_nReg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10338 match(Set dst (OrL src1 (XorL src2 M1)));
10339 predicate(UseLoongsonISA);
10341 format %{ "orn $dst, $src1, $src2 #@ornL_Reg_nReg" %}
10342 ins_encode %{
10343 Register dst = $dst$$Register;
10344 Register src1 = $src1$$Register;
10345 Register src2 = $src2$$Register;
10347 __ gsorn(dst, src1, src2);
10348 %}
10349 ins_pipe( ialu_regI_regI );
10350 %}
10351 */
10353 /*
10354 instruct andnL_nReg_Reg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10355 match(Set dst (AndL (XorL src1 M1) src2));
10356 predicate(UseLoongsonISA);
10358 format %{ "andn $dst, $src2, $src1 #@andnL_nReg_Reg" %}
10359 ins_encode %{
10360 Register dst = $dst$$Register;
10361 Register src1 = $src1$$Register;
10362 Register src2 = $src2$$Register;
10364 __ gsandn(dst, src2, src1);
10365 %}
10366 ins_pipe( ialu_regI_regI );
10367 %}
10368 */
10370 /*
10371 instruct ornL_nReg_Reg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10372 match(Set dst (OrL (XorL src1 M1) src2));
10373 predicate(UseLoongsonISA);
10375 format %{ "orn $dst, $src2, $src1 #@ornL_nReg_Reg" %}
10376 ins_encode %{
10377 Register dst = $dst$$Register;
10378 Register src1 = $src1$$Register;
10379 Register src2 = $src2$$Register;
10381 __ gsorn(dst, src2, src1);
10382 %}
10383 ins_pipe( ialu_regI_regI );
10384 %}
10385 */
10387 instruct andL_Reg_immL_M8(mRegL dst, immL_M8 M8) %{
10388 match(Set dst (AndL dst M8));
10389 ins_cost(60);
10391 format %{ "and $dst, $dst, $M8 #@andL_Reg_immL_M8" %}
10392 ins_encode %{
10393 Register dst = $dst$$Register;
10395 __ dins(dst, R0, 0, 3);
10396 %}
10397 ins_pipe( ialu_regI_regI );
10398 %}
10400 instruct andL_Reg_immL_M5(mRegL dst, immL_M5 M5) %{
10401 match(Set dst (AndL dst M5));
10402 ins_cost(60);
10404 format %{ "and $dst, $dst, $M5 #@andL_Reg_immL_M5" %}
10405 ins_encode %{
10406 Register dst = $dst$$Register;
10408 __ dins(dst, R0, 2, 1);
10409 %}
10410 ins_pipe( ialu_regI_regI );
10411 %}
10413 instruct andL_Reg_immL_M7(mRegL dst, immL_M7 M7) %{
10414 match(Set dst (AndL dst M7));
10415 ins_cost(60);
10417 format %{ "and $dst, $dst, $M7 #@andL_Reg_immL_M7" %}
10418 ins_encode %{
10419 Register dst = $dst$$Register;
10421 __ dins(dst, R0, 1, 2);
10422 %}
10423 ins_pipe( ialu_regI_regI );
10424 %}
10426 instruct andL_Reg_immL_M4(mRegL dst, immL_M4 M4) %{
10427 match(Set dst (AndL dst M4));
10428 ins_cost(60);
10430 format %{ "and $dst, $dst, $M4 #@andL_Reg_immL_M4" %}
10431 ins_encode %{
10432 Register dst = $dst$$Register;
10434 __ dins(dst, R0, 0, 2);
10435 %}
10436 ins_pipe( ialu_regI_regI );
10437 %}
10439 instruct andL_Reg_immL_M121(mRegL dst, immL_M121 M121) %{
10440 match(Set dst (AndL dst M121));
10441 ins_cost(60);
10443 format %{ "and $dst, $dst, $M121 #@andL_Reg_immL_M121" %}
10444 ins_encode %{
10445 Register dst = $dst$$Register;
10447 __ dins(dst, R0, 3, 4);
10448 %}
10449 ins_pipe( ialu_regI_regI );
10450 %}
10452 // Or Long Register with Register
10453 instruct orL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10454 match(Set dst (OrL src1 src2));
10455 format %{ "OR $dst, $src1, $src2 @ orL_Reg_Reg\t" %}
10456 ins_encode %{
10457 Register dst_reg = $dst$$Register;
10458 Register src1_reg = $src1$$Register;
10459 Register src2_reg = $src2$$Register;
10461 __ orr(dst_reg, src1_reg, src2_reg);
10462 %}
10463 ins_pipe( ialu_regL_regL );
10464 %}
10466 instruct orL_Reg_P2XReg(mRegL dst, mRegP src1, mRegL src2) %{
10467 match(Set dst (OrL (CastP2X src1) src2));
10468 format %{ "OR $dst, $src1, $src2 @ orL_Reg_P2XReg\t" %}
10469 ins_encode %{
10470 Register dst_reg = $dst$$Register;
10471 Register src1_reg = $src1$$Register;
10472 Register src2_reg = $src2$$Register;
10474 __ orr(dst_reg, src1_reg, src2_reg);
10475 %}
10476 ins_pipe( ialu_regL_regL );
10477 %}
10479 // Xor Long Register with Register
10480 instruct xorL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10481 match(Set dst (XorL src1 src2));
10482 format %{ "XOR $dst, $src1, $src2 @ xorL_Reg_Reg\t" %}
10483 ins_encode %{
10484 Register dst_reg = as_Register($dst$$reg);
10485 Register src1_reg = as_Register($src1$$reg);
10486 Register src2_reg = as_Register($src2$$reg);
10488 __ xorr(dst_reg, src1_reg, src2_reg);
10489 %}
10490 ins_pipe( ialu_regL_regL );
10491 %}
10493 // Shift Left by 8-bit immediate
10494 instruct salI_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
10495 match(Set dst (LShiftI src shift));
10497 format %{ "SHL $dst, $src, $shift #@salI_Reg_imm" %}
10498 ins_encode %{
10499 Register src = $src$$Register;
10500 Register dst = $dst$$Register;
10501 int shamt = $shift$$constant;
10503 __ sll(dst, src, shamt);
10504 %}
10505 ins_pipe( ialu_regI_regI );
10506 %}
10508 instruct salL2I_Reg_imm(mRegI dst, mRegL src, immI8 shift) %{
10509 match(Set dst (LShiftI (ConvL2I src) shift));
10511 format %{ "SHL $dst, $src, $shift #@salL2I_Reg_imm" %}
10512 ins_encode %{
10513 Register src = $src$$Register;
10514 Register dst = $dst$$Register;
10515 int shamt = $shift$$constant;
10517 __ sll(dst, src, shamt);
10518 %}
10519 ins_pipe( ialu_regI_regI );
10520 %}
10522 instruct salI_Reg_imm_and_M65536(mRegI dst, mRegI src, immI_16 shift, immI_M65536 mask) %{
10523 match(Set dst (AndI (LShiftI src shift) mask));
10525 format %{ "SHL $dst, $src, $shift #@salI_Reg_imm_and_M65536" %}
10526 ins_encode %{
10527 Register src = $src$$Register;
10528 Register dst = $dst$$Register;
10530 __ sll(dst, src, 16);
10531 %}
10532 ins_pipe( ialu_regI_regI );
10533 %}
10535 instruct land7_2_s(mRegI dst, mRegL src, immL7 seven, immI_16 sixteen)
10536 %{
10537 match(Set dst (RShiftI (LShiftI (ConvL2I (AndL src seven)) sixteen) sixteen));
10539 format %{ "andi $dst, $src, 7\t# @land7_2_s" %}
10540 ins_encode %{
10541 Register src = $src$$Register;
10542 Register dst = $dst$$Register;
10544 __ andi(dst, src, 7);
10545 %}
10546 ins_pipe(ialu_regI_regI);
10547 %}
10549 instruct ori2s(mRegI dst, mRegI src1, immI_0_32767 src2, immI_16 sixteen)
10550 %{
10551 match(Set dst (RShiftI (LShiftI (OrI src1 src2) sixteen) sixteen));
10553 format %{ "ori $dst, $src1, $src2\t# @ori2s" %}
10554 ins_encode %{
10555 Register src = $src1$$Register;
10556 int val = $src2$$constant;
10557 Register dst = $dst$$Register;
10559 __ ori(dst, src, val);
10560 %}
10561 ins_pipe(ialu_regI_regI);
10562 %}
10564 // Logical Shift Right by 16, followed by Arithmetic Shift Left by 16.
10565 // This idiom is used by the compiler the i2s bytecode.
10566 instruct i2s(mRegI dst, mRegI src, immI_16 sixteen)
10567 %{
10568 match(Set dst (RShiftI (LShiftI src sixteen) sixteen));
10570 format %{ "i2s $dst, $src\t# @i2s" %}
10571 ins_encode %{
10572 Register src = $src$$Register;
10573 Register dst = $dst$$Register;
10575 __ seh(dst, src);
10576 %}
10577 ins_pipe(ialu_regI_regI);
10578 %}
10580 // Logical Shift Right by 24, followed by Arithmetic Shift Left by 24.
10581 // This idiom is used by the compiler for the i2b bytecode.
10582 instruct i2b(mRegI dst, mRegI src, immI_24 twentyfour)
10583 %{
10584 match(Set dst (RShiftI (LShiftI src twentyfour) twentyfour));
10586 format %{ "i2b $dst, $src\t# @i2b" %}
10587 ins_encode %{
10588 Register src = $src$$Register;
10589 Register dst = $dst$$Register;
10591 __ seb(dst, src);
10592 %}
10593 ins_pipe(ialu_regI_regI);
10594 %}
10597 instruct salI_RegL2I_imm(mRegI dst, mRegL src, immI8 shift) %{
10598 match(Set dst (LShiftI (ConvL2I src) shift));
10600 format %{ "SHL $dst, $src, $shift #@salI_RegL2I_imm" %}
10601 ins_encode %{
10602 Register src = $src$$Register;
10603 Register dst = $dst$$Register;
10604 int shamt = $shift$$constant;
10606 __ sll(dst, src, shamt);
10607 %}
10608 ins_pipe( ialu_regI_regI );
10609 %}
10611 // Shift Left by 8-bit immediate
10612 instruct salI_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
10613 match(Set dst (LShiftI src shift));
10615 format %{ "SHL $dst, $src, $shift #@salI_Reg_Reg" %}
10616 ins_encode %{
10617 Register src = $src$$Register;
10618 Register dst = $dst$$Register;
10619 Register shamt = $shift$$Register;
10620 __ sllv(dst, src, shamt);
10621 %}
10622 ins_pipe( ialu_regI_regI );
10623 %}
10626 // Shift Left Long
10627 instruct salL_Reg_imm(mRegL dst, mRegL src, immI8 shift) %{
10628 //predicate(UseNewLongLShift);
10629 match(Set dst (LShiftL src shift));
10630 ins_cost(100);
10631 format %{ "salL $dst, $src, $shift @ salL_Reg_imm" %}
10632 ins_encode %{
10633 Register src_reg = as_Register($src$$reg);
10634 Register dst_reg = as_Register($dst$$reg);
10635 int shamt = $shift$$constant;
10637 if (__ is_simm(shamt, 5))
10638 __ dsll(dst_reg, src_reg, shamt);
10639 else
10640 {
10641 int sa = Assembler::low(shamt, 6);
10642 if (sa < 32) {
10643 __ dsll(dst_reg, src_reg, sa);
10644 } else {
10645 __ dsll32(dst_reg, src_reg, sa - 32);
10646 }
10647 }
10648 %}
10649 ins_pipe( ialu_regL_regL );
10650 %}
10652 instruct salL_RegI2L_imm(mRegL dst, mRegI src, immI8 shift) %{
10653 //predicate(UseNewLongLShift);
10654 match(Set dst (LShiftL (ConvI2L src) shift));
10655 ins_cost(100);
10656 format %{ "salL $dst, $src, $shift @ salL_RegI2L_imm" %}
10657 ins_encode %{
10658 Register src_reg = as_Register($src$$reg);
10659 Register dst_reg = as_Register($dst$$reg);
10660 int shamt = $shift$$constant;
10662 if (__ is_simm(shamt, 5))
10663 __ dsll(dst_reg, src_reg, shamt);
10664 else
10665 {
10666 int sa = Assembler::low(shamt, 6);
10667 if (sa < 32) {
10668 __ dsll(dst_reg, src_reg, sa);
10669 } else {
10670 __ dsll32(dst_reg, src_reg, sa - 32);
10671 }
10672 }
10673 %}
10674 ins_pipe( ialu_regL_regL );
10675 %}
10677 // Shift Left Long
10678 instruct salL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
10679 //predicate(UseNewLongLShift);
10680 match(Set dst (LShiftL src shift));
10681 ins_cost(100);
10682 format %{ "salL $dst, $src, $shift @ salL_Reg_Reg" %}
10683 ins_encode %{
10684 Register src_reg = as_Register($src$$reg);
10685 Register dst_reg = as_Register($dst$$reg);
10687 __ dsllv(dst_reg, src_reg, $shift$$Register);
10688 %}
10689 ins_pipe( ialu_regL_regL );
10690 %}
10692 instruct salL_convI2L_Reg_imm(mRegL dst, mRegI src, immI8 shift) %{
10693 match(Set dst (LShiftL (ConvI2L src) shift));
10694 ins_cost(100);
10695 format %{ "salL $dst, $src, $shift @ salL_convI2L_Reg_imm" %}
10696 ins_encode %{
10697 Register src_reg = as_Register($src$$reg);
10698 Register dst_reg = as_Register($dst$$reg);
10699 int shamt = $shift$$constant;
10701 if (__ is_simm(shamt, 5)) {
10702 __ dsll(dst_reg, src_reg, shamt);
10703 } else {
10704 int sa = Assembler::low(shamt, 6);
10705 if (sa < 32) {
10706 __ dsll(dst_reg, src_reg, sa);
10707 } else {
10708 __ dsll32(dst_reg, src_reg, sa - 32);
10709 }
10710 }
10711 %}
10712 ins_pipe( ialu_regL_regL );
10713 %}
10715 // Shift Right Long
10716 instruct sarL_Reg_imm(mRegL dst, mRegL src, immI8 shift) %{
10717 match(Set dst (RShiftL src shift));
10718 ins_cost(100);
10719 format %{ "sarL $dst, $src, $shift @ sarL_Reg_imm" %}
10720 ins_encode %{
10721 Register src_reg = as_Register($src$$reg);
10722 Register dst_reg = as_Register($dst$$reg);
10723 int shamt = ($shift$$constant & 0x3f);
10724 if (__ is_simm(shamt, 5))
10725 __ dsra(dst_reg, src_reg, shamt);
10726 else {
10727 int sa = Assembler::low(shamt, 6);
10728 if (sa < 32) {
10729 __ dsra(dst_reg, src_reg, sa);
10730 } else {
10731 __ dsra32(dst_reg, src_reg, sa - 32);
10732 }
10733 }
10734 %}
10735 ins_pipe( ialu_regL_regL );
10736 %}
10738 instruct sarL2I_Reg_immI_32_63(mRegI dst, mRegL src, immI_32_63 shift) %{
10739 match(Set dst (ConvL2I (RShiftL src shift)));
10740 ins_cost(100);
10741 format %{ "sarL $dst, $src, $shift @ sarL2I_Reg_immI_32_63" %}
10742 ins_encode %{
10743 Register src_reg = as_Register($src$$reg);
10744 Register dst_reg = as_Register($dst$$reg);
10745 int shamt = $shift$$constant;
10747 __ dsra32(dst_reg, src_reg, shamt - 32);
10748 %}
10749 ins_pipe( ialu_regL_regL );
10750 %}
10752 // Shift Right Long arithmetically
10753 instruct sarL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
10754 //predicate(UseNewLongLShift);
10755 match(Set dst (RShiftL src shift));
10756 ins_cost(100);
10757 format %{ "sarL $dst, $src, $shift @ sarL_Reg_Reg" %}
10758 ins_encode %{
10759 Register src_reg = as_Register($src$$reg);
10760 Register dst_reg = as_Register($dst$$reg);
10762 __ dsrav(dst_reg, src_reg, $shift$$Register);
10763 %}
10764 ins_pipe( ialu_regL_regL );
10765 %}
10767 // Shift Right Long logically
10768 instruct slrL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
10769 match(Set dst (URShiftL src shift));
10770 ins_cost(100);
10771 format %{ "slrL $dst, $src, $shift @ slrL_Reg_Reg" %}
10772 ins_encode %{
10773 Register src_reg = as_Register($src$$reg);
10774 Register dst_reg = as_Register($dst$$reg);
10776 __ dsrlv(dst_reg, src_reg, $shift$$Register);
10777 %}
10778 ins_pipe( ialu_regL_regL );
10779 %}
10781 instruct slrL_Reg_immI_0_31(mRegL dst, mRegL src, immI_0_31 shift) %{
10782 match(Set dst (URShiftL src shift));
10783 ins_cost(80);
10784 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_0_31" %}
10785 ins_encode %{
10786 Register src_reg = as_Register($src$$reg);
10787 Register dst_reg = as_Register($dst$$reg);
10788 int shamt = $shift$$constant;
10790 __ dsrl(dst_reg, src_reg, shamt);
10791 %}
10792 ins_pipe( ialu_regL_regL );
10793 %}
10795 instruct slrL_Reg_immI_0_31_and_max_int(mRegI dst, mRegL src, immI_0_31 shift, immI_MaxI max_int) %{
10796 match(Set dst (AndI (ConvL2I (URShiftL src shift)) max_int));
10797 ins_cost(80);
10798 format %{ "dext $dst, $src, $shift, 31 @ slrL_Reg_immI_0_31_and_max_int" %}
10799 ins_encode %{
10800 Register src_reg = as_Register($src$$reg);
10801 Register dst_reg = as_Register($dst$$reg);
10802 int shamt = $shift$$constant;
10804 __ dext(dst_reg, src_reg, shamt, 31);
10805 %}
10806 ins_pipe( ialu_regL_regL );
10807 %}
10809 instruct slrL_P2XReg_immI_0_31(mRegL dst, mRegP src, immI_0_31 shift) %{
10810 match(Set dst (URShiftL (CastP2X src) shift));
10811 ins_cost(80);
10812 format %{ "slrL $dst, $src, $shift @ slrL_P2XReg_immI_0_31" %}
10813 ins_encode %{
10814 Register src_reg = as_Register($src$$reg);
10815 Register dst_reg = as_Register($dst$$reg);
10816 int shamt = $shift$$constant;
10818 __ dsrl(dst_reg, src_reg, shamt);
10819 %}
10820 ins_pipe( ialu_regL_regL );
10821 %}
10823 instruct slrL_Reg_immI_32_63(mRegL dst, mRegL src, immI_32_63 shift) %{
10824 match(Set dst (URShiftL src shift));
10825 ins_cost(80);
10826 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_32_63" %}
10827 ins_encode %{
10828 Register src_reg = as_Register($src$$reg);
10829 Register dst_reg = as_Register($dst$$reg);
10830 int shamt = $shift$$constant;
10832 __ dsrl32(dst_reg, src_reg, shamt - 32);
10833 %}
10834 ins_pipe( ialu_regL_regL );
10835 %}
10837 instruct slrL_Reg_immI_convL2I(mRegI dst, mRegL src, immI_32_63 shift) %{
10838 match(Set dst (ConvL2I (URShiftL src shift)));
10839 predicate(n->in(1)->in(2)->get_int() > 32);
10840 ins_cost(80);
10841 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_convL2I" %}
10842 ins_encode %{
10843 Register src_reg = as_Register($src$$reg);
10844 Register dst_reg = as_Register($dst$$reg);
10845 int shamt = $shift$$constant;
10847 __ dsrl32(dst_reg, src_reg, shamt - 32);
10848 %}
10849 ins_pipe( ialu_regL_regL );
10850 %}
10852 instruct slrL_P2XReg_immI_32_63(mRegL dst, mRegP src, immI_32_63 shift) %{
10853 match(Set dst (URShiftL (CastP2X src) shift));
10854 ins_cost(80);
10855 format %{ "slrL $dst, $src, $shift @ slrL_P2XReg_immI_32_63" %}
10856 ins_encode %{
10857 Register src_reg = as_Register($src$$reg);
10858 Register dst_reg = as_Register($dst$$reg);
10859 int shamt = $shift$$constant;
10861 __ dsrl32(dst_reg, src_reg, shamt - 32);
10862 %}
10863 ins_pipe( ialu_regL_regL );
10864 %}
10866 // Xor Instructions
10867 // Xor Register with Register
10868 instruct xorI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10869 match(Set dst (XorI src1 src2));
10871 format %{ "XOR $dst, $src1, $src2 #@xorI_Reg_Reg" %}
10873 ins_encode %{
10874 Register dst = $dst$$Register;
10875 Register src1 = $src1$$Register;
10876 Register src2 = $src2$$Register;
10877 __ xorr(dst, src1, src2);
10878 __ sll(dst, dst, 0); /* long -> int */
10879 %}
10881 ins_pipe( ialu_regI_regI );
10882 %}
10884 // Or Instructions
10885 // Or Register with Register
10886 instruct orI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10887 match(Set dst (OrI src1 src2));
10889 format %{ "OR $dst, $src1, $src2 #@orI_Reg_Reg" %}
10890 ins_encode %{
10891 Register dst = $dst$$Register;
10892 Register src1 = $src1$$Register;
10893 Register src2 = $src2$$Register;
10894 __ orr(dst, src1, src2);
10895 %}
10897 ins_pipe( ialu_regI_regI );
10898 %}
10900 instruct rotI_shr_logical_Reg(mRegI dst, mRegI src, immI_0_31 rshift, immI_0_31 lshift, immI_1 one) %{
10901 match(Set dst (OrI (URShiftI src rshift) (LShiftI (AndI src one) lshift)));
10902 predicate(32 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int())));
10904 format %{ "rotr $dst, $src, 1 ...\n\t"
10905 "srl $dst, $dst, ($rshift-1) @ rotI_shr_logical_Reg" %}
10906 ins_encode %{
10907 Register dst = $dst$$Register;
10908 Register src = $src$$Register;
10909 int rshift = $rshift$$constant;
10911 __ rotr(dst, src, 1);
10912 if (rshift - 1) {
10913 __ srl(dst, dst, rshift - 1);
10914 }
10915 %}
10917 ins_pipe( ialu_regI_regI );
10918 %}
10920 instruct orI_Reg_castP2X(mRegL dst, mRegL src1, mRegP src2) %{
10921 match(Set dst (OrI src1 (CastP2X src2)));
10923 format %{ "OR $dst, $src1, $src2 #@orI_Reg_castP2X" %}
10924 ins_encode %{
10925 Register dst = $dst$$Register;
10926 Register src1 = $src1$$Register;
10927 Register src2 = $src2$$Register;
10928 __ orr(dst, src1, src2);
10929 %}
10931 ins_pipe( ialu_regI_regI );
10932 %}
10934 // Logical Shift Right by 8-bit immediate
10935 instruct shr_logical_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
10936 match(Set dst (URShiftI src shift));
10937 // effect(KILL cr);
10939 format %{ "SRL $dst, $src, $shift #@shr_logical_Reg_imm" %}
10940 ins_encode %{
10941 Register src = $src$$Register;
10942 Register dst = $dst$$Register;
10943 int shift = $shift$$constant;
10945 __ srl(dst, src, shift);
10946 %}
10947 ins_pipe( ialu_regI_regI );
10948 %}
10950 instruct shr_logical_Reg_imm_nonneg_mask(mRegI dst, mRegI src, immI_0_31 shift, immI_nonneg_mask mask) %{
10951 match(Set dst (AndI (URShiftI src shift) mask));
10953 format %{ "ext $dst, $src, $shift, one-bits($mask) #@shr_logical_Reg_imm_nonneg_mask" %}
10954 ins_encode %{
10955 Register src = $src$$Register;
10956 Register dst = $dst$$Register;
10957 int pos = $shift$$constant;
10958 int size = Assembler::is_int_mask($mask$$constant);
10960 __ ext(dst, src, pos, size);
10961 %}
10962 ins_pipe( ialu_regI_regI );
10963 %}
10965 instruct rolI_Reg_immI_0_31(mRegI dst, immI_0_31 lshift, immI_0_31 rshift)
10966 %{
10967 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
10968 match(Set dst (OrI (LShiftI dst lshift) (URShiftI dst rshift)));
10970 ins_cost(100);
10971 format %{ "rotr $dst, $dst, $rshift #@rolI_Reg_immI_0_31" %}
10972 ins_encode %{
10973 Register dst = $dst$$Register;
10974 int sa = $rshift$$constant;
10976 __ rotr(dst, dst, sa);
10977 %}
10978 ins_pipe( ialu_regI_regI );
10979 %}
10981 instruct rolL_Reg_immI_0_31(mRegL dst, immI_32_63 lshift, immI_0_31 rshift)
10982 %{
10983 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
10984 match(Set dst (OrL (LShiftL dst lshift) (URShiftL dst rshift)));
10986 ins_cost(100);
10987 format %{ "rotr $dst, $dst, $rshift #@rolL_Reg_immI_0_31" %}
10988 ins_encode %{
10989 Register dst = $dst$$Register;
10990 int sa = $rshift$$constant;
10992 __ drotr(dst, dst, sa);
10993 %}
10994 ins_pipe( ialu_regI_regI );
10995 %}
10997 instruct rolL_Reg_immI_32_63(mRegL dst, immI_0_31 lshift, immI_32_63 rshift)
10998 %{
10999 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11000 match(Set dst (OrL (LShiftL dst lshift) (URShiftL dst rshift)));
11002 ins_cost(100);
11003 format %{ "rotr $dst, $dst, $rshift #@rolL_Reg_immI_32_63" %}
11004 ins_encode %{
11005 Register dst = $dst$$Register;
11006 int sa = $rshift$$constant;
11008 __ drotr32(dst, dst, sa - 32);
11009 %}
11010 ins_pipe( ialu_regI_regI );
11011 %}
11013 instruct rorI_Reg_immI_0_31(mRegI dst, immI_0_31 rshift, immI_0_31 lshift)
11014 %{
11015 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
11016 match(Set dst (OrI (URShiftI dst rshift) (LShiftI dst lshift)));
11018 ins_cost(100);
11019 format %{ "rotr $dst, $dst, $rshift #@rorI_Reg_immI_0_31" %}
11020 ins_encode %{
11021 Register dst = $dst$$Register;
11022 int sa = $rshift$$constant;
11024 __ rotr(dst, dst, sa);
11025 %}
11026 ins_pipe( ialu_regI_regI );
11027 %}
11029 instruct rorL_Reg_immI_0_31(mRegL dst, immI_0_31 rshift, immI_32_63 lshift)
11030 %{
11031 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11032 match(Set dst (OrL (URShiftL dst rshift) (LShiftL dst lshift)));
11034 ins_cost(100);
11035 format %{ "rotr $dst, $dst, $rshift #@rorL_Reg_immI_0_31" %}
11036 ins_encode %{
11037 Register dst = $dst$$Register;
11038 int sa = $rshift$$constant;
11040 __ drotr(dst, dst, sa);
11041 %}
11042 ins_pipe( ialu_regI_regI );
11043 %}
11045 instruct rorL_Reg_immI_32_63(mRegL dst, immI_32_63 rshift, immI_0_31 lshift)
11046 %{
11047 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11048 match(Set dst (OrL (URShiftL dst rshift) (LShiftL dst lshift)));
11050 ins_cost(100);
11051 format %{ "rotr $dst, $dst, $rshift #@rorL_Reg_immI_32_63" %}
11052 ins_encode %{
11053 Register dst = $dst$$Register;
11054 int sa = $rshift$$constant;
11056 __ drotr32(dst, dst, sa - 32);
11057 %}
11058 ins_pipe( ialu_regI_regI );
11059 %}
11061 // Logical Shift Right
11062 instruct shr_logical_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
11063 match(Set dst (URShiftI src shift));
11065 format %{ "SRL $dst, $src, $shift #@shr_logical_Reg_Reg" %}
11066 ins_encode %{
11067 Register src = $src$$Register;
11068 Register dst = $dst$$Register;
11069 Register shift = $shift$$Register;
11070 __ srlv(dst, src, shift);
11071 %}
11072 ins_pipe( ialu_regI_regI );
11073 %}
11076 instruct shr_arith_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
11077 match(Set dst (RShiftI src shift));
11078 // effect(KILL cr);
11080 format %{ "SRA $dst, $src, $shift #@shr_arith_Reg_imm" %}
11081 ins_encode %{
11082 Register src = $src$$Register;
11083 Register dst = $dst$$Register;
11084 int shift = $shift$$constant;
11085 __ sra(dst, src, shift);
11086 %}
11087 ins_pipe( ialu_regI_regI );
11088 %}
11090 instruct shr_arith_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
11091 match(Set dst (RShiftI src shift));
11092 // effect(KILL cr);
11094 format %{ "SRA $dst, $src, $shift #@shr_arith_Reg_Reg" %}
11095 ins_encode %{
11096 Register src = $src$$Register;
11097 Register dst = $dst$$Register;
11098 Register shift = $shift$$Register;
11099 __ srav(dst, src, shift);
11100 %}
11101 ins_pipe( ialu_regI_regI );
11102 %}
11104 //----------Convert Int to Boolean---------------------------------------------
11106 instruct convI2B(mRegI dst, mRegI src) %{
11107 match(Set dst (Conv2B src));
11109 ins_cost(100);
11110 format %{ "convI2B $dst, $src @ convI2B" %}
11111 ins_encode %{
11112 Register dst = as_Register($dst$$reg);
11113 Register src = as_Register($src$$reg);
11115 if (dst != src) {
11116 __ daddiu(dst, R0, 1);
11117 __ movz(dst, R0, src);
11118 } else {
11119 __ move(AT, src);
11120 __ daddiu(dst, R0, 1);
11121 __ movz(dst, R0, AT);
11122 }
11123 %}
11125 ins_pipe( ialu_regL_regL );
11126 %}
11128 instruct convI2L_reg( mRegL dst, mRegI src) %{
11129 match(Set dst (ConvI2L src));
11131 ins_cost(100);
11132 format %{ "SLL $dst, $src @ convI2L_reg\t" %}
11133 ins_encode %{
11134 Register dst = as_Register($dst$$reg);
11135 Register src = as_Register($src$$reg);
11137 if(dst != src) __ sll(dst, src, 0);
11138 %}
11139 ins_pipe( ialu_regL_regL );
11140 %}
11143 instruct convL2I_reg( mRegI dst, mRegL src ) %{
11144 match(Set dst (ConvL2I src));
11146 format %{ "MOV $dst, $src @ convL2I_reg" %}
11147 ins_encode %{
11148 Register dst = as_Register($dst$$reg);
11149 Register src = as_Register($src$$reg);
11151 __ sll(dst, src, 0);
11152 %}
11154 ins_pipe( ialu_regI_regI );
11155 %}
11157 instruct convL2I2L_reg( mRegL dst, mRegL src ) %{
11158 match(Set dst (ConvI2L (ConvL2I src)));
11160 format %{ "sll $dst, $src, 0 @ convL2I2L_reg" %}
11161 ins_encode %{
11162 Register dst = as_Register($dst$$reg);
11163 Register src = as_Register($src$$reg);
11165 __ sll(dst, src, 0);
11166 %}
11168 ins_pipe( ialu_regI_regI );
11169 %}
11171 instruct convL2D_reg( regD dst, mRegL src ) %{
11172 match(Set dst (ConvL2D src));
11173 format %{ "convL2D $dst, $src @ convL2D_reg" %}
11174 ins_encode %{
11175 Register src = as_Register($src$$reg);
11176 FloatRegister dst = as_FloatRegister($dst$$reg);
11178 __ dmtc1(src, dst);
11179 __ cvt_d_l(dst, dst);
11180 %}
11182 ins_pipe( pipe_slow );
11183 %}
11185 instruct convD2L_reg_fast( mRegL dst, regD src ) %{
11186 match(Set dst (ConvD2L src));
11187 ins_cost(150);
11188 format %{ "convD2L $dst, $src @ convD2L_reg_fast" %}
11189 ins_encode %{
11190 Register dst = as_Register($dst$$reg);
11191 FloatRegister src = as_FloatRegister($src$$reg);
11193 Label Done;
11195 __ trunc_l_d(F30, src);
11196 // max_long: 0x7fffffffffffffff
11197 // __ set64(AT, 0x7fffffffffffffff);
11198 __ daddiu(AT, R0, -1);
11199 __ dsrl(AT, AT, 1);
11200 __ dmfc1(dst, F30);
11202 __ bne(dst, AT, Done);
11203 __ delayed()->mtc1(R0, F30);
11205 __ cvt_d_w(F30, F30);
11206 __ c_ult_d(src, F30);
11207 __ bc1f(Done);
11208 __ delayed()->daddiu(T9, R0, -1);
11210 __ c_un_d(src, src); //NaN?
11211 __ subu(dst, T9, AT);
11212 __ movt(dst, R0);
11214 __ bind(Done);
11215 %}
11217 ins_pipe( pipe_slow );
11218 %}
11220 instruct convD2L_reg_slow( mRegL dst, regD src ) %{
11221 match(Set dst (ConvD2L src));
11222 ins_cost(250);
11223 format %{ "convD2L $dst, $src @ convD2L_reg_slow" %}
11224 ins_encode %{
11225 Register dst = as_Register($dst$$reg);
11226 FloatRegister src = as_FloatRegister($src$$reg);
11228 Label L;
11230 __ c_un_d(src, src); //NaN?
11231 __ bc1t(L);
11232 __ delayed();
11233 __ move(dst, R0);
11235 __ trunc_l_d(F30, src);
11236 __ cfc1(AT, 31);
11237 __ li(T9, 0x10000);
11238 __ andr(AT, AT, T9);
11239 __ beq(AT, R0, L);
11240 __ delayed()->dmfc1(dst, F30);
11242 __ mov_d(F12, src);
11243 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1);
11244 __ move(dst, V0);
11245 __ bind(L);
11246 %}
11248 ins_pipe( pipe_slow );
11249 %}
11251 instruct convF2I_reg_fast( mRegI dst, regF src ) %{
11252 match(Set dst (ConvF2I src));
11253 ins_cost(150);
11254 format %{ "convf2i $dst, $src @ convF2I_reg_fast" %}
11255 ins_encode %{
11256 Register dreg = $dst$$Register;
11257 FloatRegister fval = $src$$FloatRegister;
11259 __ trunc_w_s(F30, fval);
11260 __ mfc1(dreg, F30);
11261 __ c_un_s(fval, fval); //NaN?
11262 __ movt(dreg, R0);
11263 %}
11265 ins_pipe( pipe_slow );
11266 %}
11268 instruct convF2I_reg_slow( mRegI dst, regF src ) %{
11269 match(Set dst (ConvF2I src));
11270 ins_cost(250);
11271 format %{ "convf2i $dst, $src @ convF2I_reg_slow" %}
11272 ins_encode %{
11273 Register dreg = $dst$$Register;
11274 FloatRegister fval = $src$$FloatRegister;
11275 Label L;
11277 __ c_un_s(fval, fval); //NaN?
11278 __ bc1t(L);
11279 __ delayed();
11280 __ move(dreg, R0);
11282 __ trunc_w_s(F30, fval);
11284 /* Call SharedRuntime:f2i() to do valid convention */
11285 __ cfc1(AT, 31);
11286 __ li(T9, 0x10000);
11287 __ andr(AT, AT, T9);
11288 __ beq(AT, R0, L);
11289 __ delayed()->mfc1(dreg, F30);
11291 __ mov_s(F12, fval);
11293 /* 2014/01/08 Fu : This bug was found when running ezDS's control-panel.
11294 * J 982 C2 javax.swing.text.BoxView.layoutMajorAxis(II[I[I)V (283 bytes) @ 0x000000555c46aa74
11295 *
11296 * An interger array index has been assigned to V0, and then changed from 1 to Integer.MAX_VALUE.
11297 * V0 is corrupted during call_VM_leaf(), and should be preserved.
11298 */
11299 if(dreg != V0) {
11300 __ push(V0);
11301 }
11302 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
11303 if(dreg != V0) {
11304 __ move(dreg, V0);
11305 __ pop(V0);
11306 }
11307 __ bind(L);
11308 %}
11310 ins_pipe( pipe_slow );
11311 %}
11313 instruct convF2L_reg_fast( mRegL dst, regF src ) %{
11314 match(Set dst (ConvF2L src));
11315 ins_cost(150);
11316 format %{ "convf2l $dst, $src @ convF2L_reg_fast" %}
11317 ins_encode %{
11318 Register dreg = $dst$$Register;
11319 FloatRegister fval = $src$$FloatRegister;
11321 __ trunc_l_s(F30, fval);
11322 __ dmfc1(dreg, F30);
11323 __ c_un_s(fval, fval); //NaN?
11324 __ movt(dreg, R0);
11325 %}
11327 ins_pipe( pipe_slow );
11328 %}
11330 instruct convF2L_reg_slow( mRegL dst, regF src ) %{
11331 match(Set dst (ConvF2L src));
11332 ins_cost(250);
11333 format %{ "convf2l $dst, $src @ convF2L_reg_slow" %}
11334 ins_encode %{
11335 Register dst = as_Register($dst$$reg);
11336 FloatRegister fval = $src$$FloatRegister;
11337 Label L;
11339 __ c_un_s(fval, fval); //NaN?
11340 __ bc1t(L);
11341 __ delayed();
11342 __ move(dst, R0);
11344 __ trunc_l_s(F30, fval);
11345 __ cfc1(AT, 31);
11346 __ li(T9, 0x10000);
11347 __ andr(AT, AT, T9);
11348 __ beq(AT, R0, L);
11349 __ delayed()->dmfc1(dst, F30);
11351 __ mov_s(F12, fval);
11352 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
11353 __ move(dst, V0);
11354 __ bind(L);
11355 %}
11357 ins_pipe( pipe_slow );
11358 %}
11360 instruct convL2F_reg( regF dst, mRegL src ) %{
11361 match(Set dst (ConvL2F src));
11362 format %{ "convl2f $dst, $src @ convL2F_reg" %}
11363 ins_encode %{
11364 FloatRegister dst = $dst$$FloatRegister;
11365 Register src = as_Register($src$$reg);
11366 Label L;
11368 __ dmtc1(src, dst);
11369 __ cvt_s_l(dst, dst);
11370 %}
11372 ins_pipe( pipe_slow );
11373 %}
11375 instruct convI2F_reg( regF dst, mRegI src ) %{
11376 match(Set dst (ConvI2F src));
11377 format %{ "convi2f $dst, $src @ convI2F_reg" %}
11378 ins_encode %{
11379 Register src = $src$$Register;
11380 FloatRegister dst = $dst$$FloatRegister;
11382 __ mtc1(src, dst);
11383 __ cvt_s_w(dst, dst);
11384 %}
11386 ins_pipe( fpu_regF_regF );
11387 %}
11389 instruct cmpLTMask_immI0( mRegI dst, mRegI p, immI0 zero ) %{
11390 match(Set dst (CmpLTMask p zero));
11391 ins_cost(100);
11393 format %{ "sra $dst, $p, 31 @ cmpLTMask_immI0" %}
11394 ins_encode %{
11395 Register src = $p$$Register;
11396 Register dst = $dst$$Register;
11398 __ sra(dst, src, 31);
11399 %}
11400 ins_pipe( pipe_slow );
11401 %}
11404 instruct cmpLTMask( mRegI dst, mRegI p, mRegI q ) %{
11405 match(Set dst (CmpLTMask p q));
11406 ins_cost(400);
11408 format %{ "cmpLTMask $dst, $p, $q @ cmpLTMask" %}
11409 ins_encode %{
11410 Register p = $p$$Register;
11411 Register q = $q$$Register;
11412 Register dst = $dst$$Register;
11414 __ slt(dst, p, q);
11415 __ subu(dst, R0, dst);
11416 %}
11417 ins_pipe( pipe_slow );
11418 %}
11420 instruct convP2B(mRegI dst, mRegP src) %{
11421 match(Set dst (Conv2B src));
11423 ins_cost(100);
11424 format %{ "convP2B $dst, $src @ convP2B" %}
11425 ins_encode %{
11426 Register dst = as_Register($dst$$reg);
11427 Register src = as_Register($src$$reg);
11429 if (dst != src) {
11430 __ daddiu(dst, R0, 1);
11431 __ movz(dst, R0, src);
11432 } else {
11433 __ move(AT, src);
11434 __ daddiu(dst, R0, 1);
11435 __ movz(dst, R0, AT);
11436 }
11437 %}
11439 ins_pipe( ialu_regL_regL );
11440 %}
11443 instruct convI2D_reg_reg(regD dst, mRegI src) %{
11444 match(Set dst (ConvI2D src));
11445 format %{ "conI2D $dst, $src @convI2D_reg" %}
11446 ins_encode %{
11447 Register src = $src$$Register;
11448 FloatRegister dst = $dst$$FloatRegister;
11449 __ mtc1(src, dst);
11450 __ cvt_d_w(dst, dst);
11451 %}
11452 ins_pipe( fpu_regF_regF );
11453 %}
11455 instruct convF2D_reg_reg(regD dst, regF src) %{
11456 match(Set dst (ConvF2D src));
11457 format %{ "convF2D $dst, $src\t# @convF2D_reg_reg" %}
11458 ins_encode %{
11459 FloatRegister dst = $dst$$FloatRegister;
11460 FloatRegister src = $src$$FloatRegister;
11462 __ cvt_d_s(dst, src);
11463 %}
11464 ins_pipe( fpu_regF_regF );
11465 %}
11467 instruct convD2F_reg_reg(regF dst, regD src) %{
11468 match(Set dst (ConvD2F src));
11469 format %{ "convD2F $dst, $src\t# @convD2F_reg_reg" %}
11470 ins_encode %{
11471 FloatRegister dst = $dst$$FloatRegister;
11472 FloatRegister src = $src$$FloatRegister;
11474 __ cvt_s_d(dst, src);
11475 %}
11476 ins_pipe( fpu_regF_regF );
11477 %}
11479 // Convert a double to an int. If the double is a NAN, stuff a zero in instead.
11480 instruct convD2I_reg_reg_fast( mRegI dst, regD src ) %{
11481 match(Set dst (ConvD2I src));
11483 ins_cost(150);
11484 format %{ "convD2I $dst, $src\t# @ convD2I_reg_reg_fast" %}
11486 ins_encode %{
11487 FloatRegister src = $src$$FloatRegister;
11488 Register dst = $dst$$Register;
11490 Label Done;
11492 __ trunc_w_d(F30, src);
11493 // max_int: 2147483647
11494 __ move(AT, 0x7fffffff);
11495 __ mfc1(dst, F30);
11497 __ bne(dst, AT, Done);
11498 __ delayed()->mtc1(R0, F30);
11500 __ cvt_d_w(F30, F30);
11501 __ c_ult_d(src, F30);
11502 __ bc1f(Done);
11503 __ delayed()->addiu(T9, R0, -1);
11505 __ c_un_d(src, src); //NaN?
11506 __ subu32(dst, T9, AT);
11507 __ movt(dst, R0);
11509 __ bind(Done);
11510 %}
11511 ins_pipe( pipe_slow );
11512 %}
11514 instruct convD2I_reg_reg_slow( mRegI dst, regD src ) %{
11515 match(Set dst (ConvD2I src));
11517 ins_cost(250);
11518 format %{ "convD2I $dst, $src\t# @ convD2I_reg_reg_slow" %}
11520 ins_encode %{
11521 FloatRegister src = $src$$FloatRegister;
11522 Register dst = $dst$$Register;
11523 Label L;
11525 __ trunc_w_d(F30, src);
11526 __ cfc1(AT, 31);
11527 __ li(T9, 0x10000);
11528 __ andr(AT, AT, T9);
11529 __ beq(AT, R0, L);
11530 __ delayed()->mfc1(dst, F30);
11532 __ mov_d(F12, src);
11533 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 1);
11534 __ move(dst, V0);
11535 __ bind(L);
11537 %}
11538 ins_pipe( pipe_slow );
11539 %}
11541 // Convert oop pointer into compressed form
11542 instruct encodeHeapOop(mRegN dst, mRegP src) %{
11543 predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
11544 match(Set dst (EncodeP src));
11545 format %{ "encode_heap_oop $dst,$src" %}
11546 ins_encode %{
11547 Register src = $src$$Register;
11548 Register dst = $dst$$Register;
11549 if (src != dst) {
11550 __ move(dst, src);
11551 }
11552 __ encode_heap_oop(dst);
11553 %}
11554 ins_pipe( ialu_regL_regL );
11555 %}
11557 instruct encodeHeapOop_not_null(mRegN dst, mRegP src) %{
11558 predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
11559 match(Set dst (EncodeP src));
11560 format %{ "encode_heap_oop_not_null $dst,$src @ encodeHeapOop_not_null" %}
11561 ins_encode %{
11562 __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
11563 %}
11564 ins_pipe( ialu_regL_regL );
11565 %}
11567 instruct decodeHeapOop(mRegP dst, mRegN src) %{
11568 predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
11569 n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
11570 match(Set dst (DecodeN src));
11571 format %{ "decode_heap_oop $dst,$src @ decodeHeapOop" %}
11572 ins_encode %{
11573 Register s = $src$$Register;
11574 Register d = $dst$$Register;
11575 if (s != d) {
11576 __ move(d, s);
11577 }
11578 __ decode_heap_oop(d);
11579 %}
11580 ins_pipe( ialu_regL_regL );
11581 %}
11583 instruct decodeHeapOop_not_null(mRegP dst, mRegN src) %{
11584 predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
11585 n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
11586 match(Set dst (DecodeN src));
11587 format %{ "decode_heap_oop_not_null $dst,$src @ decodeHeapOop_not_null" %}
11588 ins_encode %{
11589 Register s = $src$$Register;
11590 Register d = $dst$$Register;
11591 if (s != d) {
11592 __ decode_heap_oop_not_null(d, s);
11593 } else {
11594 __ decode_heap_oop_not_null(d);
11595 }
11596 %}
11597 ins_pipe( ialu_regL_regL );
11598 %}
11600 instruct encodeKlass_not_null(mRegN dst, mRegP src) %{
11601 match(Set dst (EncodePKlass src));
11602 format %{ "encode_heap_oop_not_null $dst,$src @ encodeKlass_not_null" %}
11603 ins_encode %{
11604 __ encode_klass_not_null($dst$$Register, $src$$Register);
11605 %}
11606 ins_pipe( ialu_regL_regL );
11607 %}
11609 instruct decodeKlass_not_null(mRegP dst, mRegN src) %{
11610 match(Set dst (DecodeNKlass src));
11611 format %{ "decode_heap_klass_not_null $dst,$src" %}
11612 ins_encode %{
11613 Register s = $src$$Register;
11614 Register d = $dst$$Register;
11615 if (s != d) {
11616 __ decode_klass_not_null(d, s);
11617 } else {
11618 __ decode_klass_not_null(d);
11619 }
11620 %}
11621 ins_pipe( ialu_regL_regL );
11622 %}
11624 //FIXME
11625 instruct tlsLoadP(mRegP dst) %{
11626 match(Set dst (ThreadLocal));
11628 ins_cost(0);
11629 format %{ " get_thread in $dst #@tlsLoadP" %}
11630 ins_encode %{
11631 Register dst = $dst$$Register;
11632 #ifdef OPT_THREAD
11633 __ move(dst, TREG);
11634 #else
11635 __ get_thread(dst);
11636 #endif
11637 %}
11639 ins_pipe( ialu_loadI );
11640 %}
11643 instruct checkCastPP( mRegP dst ) %{
11644 match(Set dst (CheckCastPP dst));
11646 format %{ "#checkcastPP of $dst (empty encoding) #@chekCastPP" %}
11647 ins_encode( /*empty encoding*/ );
11648 ins_pipe( empty );
11649 %}
11651 instruct castPP(mRegP dst)
11652 %{
11653 match(Set dst (CastPP dst));
11655 size(0);
11656 format %{ "# castPP of $dst" %}
11657 ins_encode(/* empty encoding */);
11658 ins_pipe(empty);
11659 %}
11661 instruct castII( mRegI dst ) %{
11662 match(Set dst (CastII dst));
11663 format %{ "#castII of $dst empty encoding" %}
11664 ins_encode( /*empty encoding*/ );
11665 ins_cost(0);
11666 ins_pipe( empty );
11667 %}
11669 // Return Instruction
11670 // Remove the return address & jump to it.
11671 instruct Ret() %{
11672 match(Return);
11673 format %{ "RET #@Ret" %}
11675 ins_encode %{
11676 __ jr(RA);
11677 __ nop();
11678 %}
11680 ins_pipe( pipe_jump );
11681 %}
11683 /*
11684 // For Loongson CPUs, jr seems too slow, so this rule shouldn't be imported.
11685 instruct jumpXtnd(mRegL switch_val) %{
11686 match(Jump switch_val);
11688 ins_cost(350);
11690 format %{ "load T9 <-- [$constanttablebase, $switch_val, $constantoffset] @ jumpXtnd\n\t"
11691 "jr T9\n\t"
11692 "nop" %}
11693 ins_encode %{
11694 Register table_base = $constanttablebase;
11695 int con_offset = $constantoffset;
11696 Register switch_reg = $switch_val$$Register;
11698 if (UseLoongsonISA) {
11699 if (Assembler::is_simm(con_offset, 8)) {
11700 __ gsldx(T9, table_base, switch_reg, con_offset);
11701 } else if (Assembler::is_simm16(con_offset)) {
11702 __ daddu(T9, table_base, switch_reg);
11703 __ ld(T9, T9, con_offset);
11704 } else {
11705 __ move(T9, con_offset);
11706 __ daddu(AT, table_base, switch_reg);
11707 __ gsldx(T9, AT, T9, 0);
11708 }
11709 } else {
11710 if (Assembler::is_simm16(con_offset)) {
11711 __ daddu(T9, table_base, switch_reg);
11712 __ ld(T9, T9, con_offset);
11713 } else {
11714 __ move(T9, con_offset);
11715 __ daddu(AT, table_base, switch_reg);
11716 __ daddu(AT, T9, AT);
11717 __ ld(T9, AT, 0);
11718 }
11719 }
11721 __ jr(T9);
11722 __ nop();
11724 %}
11725 ins_pipe(pipe_jump);
11726 %}
11727 */
11729 // Jump Direct - Label defines a relative address from JMP
11730 instruct jmpDir(label labl) %{
11731 match(Goto);
11732 effect(USE labl);
11734 ins_cost(300);
11735 format %{ "JMP $labl #@jmpDir" %}
11737 ins_encode %{
11738 Label &L = *($labl$$label);
11739 if(&L)
11740 __ b(L);
11741 else
11742 __ b(int(0));
11743 __ nop();
11744 %}
11746 ins_pipe( pipe_jump );
11747 ins_pc_relative(1);
11748 %}
11752 // Tail Jump; remove the return address; jump to target.
11753 // TailCall above leaves the return address around.
11754 // TailJump is used in only one place, the rethrow_Java stub (fancy_jump=2).
11755 // ex_oop (Exception Oop) is needed in %o0 at the jump. As there would be a
11756 // "restore" before this instruction (in Epilogue), we need to materialize it
11757 // in %i0.
11758 //FIXME
11759 instruct tailjmpInd(mRegP jump_target,mRegP ex_oop) %{
11760 match( TailJump jump_target ex_oop );
11761 ins_cost(200);
11762 format %{ "Jmp $jump_target ; ex_oop = $ex_oop #@tailjmpInd" %}
11763 ins_encode %{
11764 Register target = $jump_target$$Register;
11766 /* 2012/9/14 Jin: V0, V1 are indicated in:
11767 * [stubGenerator_mips.cpp] generate_forward_exception()
11768 * [runtime_mips.cpp] OptoRuntime::generate_exception_blob()
11769 */
11770 Register oop = $ex_oop$$Register;
11771 Register exception_oop = V0;
11772 Register exception_pc = V1;
11774 __ move(exception_pc, RA);
11775 __ move(exception_oop, oop);
11777 __ jr(target);
11778 __ nop();
11779 %}
11780 ins_pipe( pipe_jump );
11781 %}
11783 // ============================================================================
11784 // Procedure Call/Return Instructions
11785 // Call Java Static Instruction
11786 // Note: If this code changes, the corresponding ret_addr_offset() and
11787 // compute_padding() functions will have to be adjusted.
11788 instruct CallStaticJavaDirect(method meth) %{
11789 match(CallStaticJava);
11790 effect(USE meth);
11792 ins_cost(300);
11793 format %{ "CALL,static #@CallStaticJavaDirect " %}
11794 ins_encode( Java_Static_Call( meth ) );
11795 ins_pipe( pipe_slow );
11796 ins_pc_relative(1);
11797 ins_alignment(16);
11798 %}
11800 // Call Java Dynamic Instruction
11801 // Note: If this code changes, the corresponding ret_addr_offset() and
11802 // compute_padding() functions will have to be adjusted.
11803 instruct CallDynamicJavaDirect(method meth) %{
11804 match(CallDynamicJava);
11805 effect(USE meth);
11807 ins_cost(300);
11808 format %{"MOV IC_Klass, (oop)-1 @ CallDynamicJavaDirect\n\t"
11809 "CallDynamic @ CallDynamicJavaDirect" %}
11810 ins_encode( Java_Dynamic_Call( meth ) );
11811 ins_pipe( pipe_slow );
11812 ins_pc_relative(1);
11813 ins_alignment(16);
11814 %}
11816 instruct CallLeafNoFPDirect(method meth) %{
11817 match(CallLeafNoFP);
11818 effect(USE meth);
11820 ins_cost(300);
11821 format %{ "CALL_LEAF_NOFP,runtime " %}
11822 ins_encode(Java_To_Runtime(meth));
11823 ins_pipe( pipe_slow );
11824 ins_pc_relative(1);
11825 ins_alignment(16);
11826 %}
11828 // Prefetch instructions.
11830 instruct prefetchrNTA( memory mem ) %{
11831 match(PrefetchRead mem);
11832 ins_cost(125);
11834 format %{ "pref $mem\t# Prefetch into non-temporal cache for read @ prefetchrNTA" %}
11835 ins_encode %{
11836 int base = $mem$$base;
11837 int index = $mem$$index;
11838 int scale = $mem$$scale;
11839 int disp = $mem$$disp;
11841 if( index != 0 ) {
11842 if (scale == 0) {
11843 __ daddu(AT, as_Register(base), as_Register(index));
11844 } else {
11845 __ dsll(AT, as_Register(index), scale);
11846 __ daddu(AT, as_Register(base), AT);
11847 }
11848 } else {
11849 __ move(AT, as_Register(base));
11850 }
11851 if( Assembler::is_simm16(disp) ) {
11852 __ daddiu(AT, as_Register(base), disp);
11853 __ daddiu(AT, AT, disp);
11854 } else {
11855 __ move(T9, disp);
11856 __ daddu(AT, as_Register(base), T9);
11857 }
11858 __ pref(0, AT, 0); //hint: 0:load
11859 %}
11860 ins_pipe(pipe_slow);
11861 %}
11863 instruct prefetchwNTA( memory mem ) %{
11864 match(PrefetchWrite mem);
11865 ins_cost(125);
11866 format %{ "pref $mem\t# Prefetch to non-temporal cache for write @ prefetchwNTA" %}
11867 ins_encode %{
11868 int base = $mem$$base;
11869 int index = $mem$$index;
11870 int scale = $mem$$scale;
11871 int disp = $mem$$disp;
11873 if( index != 0 ) {
11874 if (scale == 0) {
11875 __ daddu(AT, as_Register(base), as_Register(index));
11876 } else {
11877 __ dsll(AT, as_Register(index), scale);
11878 __ daddu(AT, as_Register(base), AT);
11879 }
11880 } else {
11881 __ move(AT, as_Register(base));
11882 }
11883 if( Assembler::is_simm16(disp) ) {
11884 __ daddiu(AT, as_Register(base), disp);
11885 __ daddiu(AT, AT, disp);
11886 } else {
11887 __ move(T9, disp);
11888 __ daddu(AT, as_Register(base), T9);
11889 }
11890 __ pref(1, AT, 0); //hint: 1:store
11891 %}
11892 ins_pipe(pipe_slow);
11893 %}
11895 // Prefetch instructions for allocation.
11897 instruct prefetchAllocNTA( memory mem ) %{
11898 match(PrefetchAllocation mem);
11899 ins_cost(125);
11900 format %{ "pref $mem\t# Prefetch allocation @ prefetchAllocNTA" %}
11901 ins_encode %{
11902 int base = $mem$$base;
11903 int index = $mem$$index;
11904 int scale = $mem$$scale;
11905 int disp = $mem$$disp;
11907 Register dst = R0;
11909 if( index != 0 ) {
11910 if( Assembler::is_simm16(disp) ) {
11911 if( UseLoongsonISA ) {
11912 if (scale == 0) {
11913 __ gslbx(dst, as_Register(base), as_Register(index), disp);
11914 } else {
11915 __ dsll(AT, as_Register(index), scale);
11916 __ gslbx(dst, as_Register(base), AT, disp);
11917 }
11918 } else {
11919 if (scale == 0) {
11920 __ addu(AT, as_Register(base), as_Register(index));
11921 } else {
11922 __ dsll(AT, as_Register(index), scale);
11923 __ addu(AT, as_Register(base), AT);
11924 }
11925 __ lb(dst, AT, disp);
11926 }
11927 } else {
11928 if (scale == 0) {
11929 __ addu(AT, as_Register(base), as_Register(index));
11930 } else {
11931 __ dsll(AT, as_Register(index), scale);
11932 __ addu(AT, as_Register(base), AT);
11933 }
11934 __ move(T9, disp);
11935 if( UseLoongsonISA ) {
11936 __ gslbx(dst, AT, T9, 0);
11937 } else {
11938 __ addu(AT, AT, T9);
11939 __ lb(dst, AT, 0);
11940 }
11941 }
11942 } else {
11943 if( Assembler::is_simm16(disp) ) {
11944 __ lb(dst, as_Register(base), disp);
11945 } else {
11946 __ move(T9, disp);
11947 if( UseLoongsonISA ) {
11948 __ gslbx(dst, as_Register(base), T9, 0);
11949 } else {
11950 __ addu(AT, as_Register(base), T9);
11951 __ lb(dst, AT, 0);
11952 }
11953 }
11954 }
11955 %}
11956 ins_pipe(pipe_slow);
11957 %}
11960 // Call runtime without safepoint
11961 instruct CallLeafDirect(method meth) %{
11962 match(CallLeaf);
11963 effect(USE meth);
11965 ins_cost(300);
11966 format %{ "CALL_LEAF,runtime #@CallLeafDirect " %}
11967 ins_encode(Java_To_Runtime(meth));
11968 ins_pipe( pipe_slow );
11969 ins_pc_relative(1);
11970 ins_alignment(16);
11971 %}
11973 // Load Char (16bit unsigned)
11974 instruct loadUS(mRegI dst, memory mem) %{
11975 match(Set dst (LoadUS mem));
11977 ins_cost(125);
11978 format %{ "loadUS $dst,$mem @ loadC" %}
11979 ins_encode(load_C_enc(dst, mem));
11980 ins_pipe( ialu_loadI );
11981 %}
11983 instruct loadUS_convI2L(mRegL dst, memory mem) %{
11984 match(Set dst (ConvI2L (LoadUS mem)));
11986 ins_cost(125);
11987 format %{ "loadUS $dst,$mem @ loadUS_convI2L" %}
11988 ins_encode(load_C_enc(dst, mem));
11989 ins_pipe( ialu_loadI );
11990 %}
11992 // Store Char (16bit unsigned)
11993 instruct storeC(memory mem, mRegI src) %{
11994 match(Set mem (StoreC mem src));
11996 ins_cost(125);
11997 format %{ "storeC $src, $mem @ storeC" %}
11998 ins_encode(store_C_reg_enc(mem, src));
11999 ins_pipe( ialu_loadI );
12000 %}
12002 instruct storeC0(memory mem, immI0 zero) %{
12003 match(Set mem (StoreC mem zero));
12005 ins_cost(125);
12006 format %{ "storeC $zero, $mem @ storeC0" %}
12007 ins_encode(store_C0_enc(mem));
12008 ins_pipe( ialu_loadI );
12009 %}
12012 instruct loadConF0(regF dst, immF0 zero) %{
12013 match(Set dst zero);
12014 ins_cost(100);
12016 format %{ "mov $dst, zero @ loadConF0\n"%}
12017 ins_encode %{
12018 FloatRegister dst = $dst$$FloatRegister;
12020 __ mtc1(R0, dst);
12021 %}
12022 ins_pipe( fpu_loadF );
12023 %}
12026 instruct loadConF(regF dst, immF src) %{
12027 match(Set dst src);
12028 ins_cost(125);
12030 format %{ "lwc1 $dst, $constantoffset[$constanttablebase] # load FLOAT $src from table @ loadConF" %}
12031 ins_encode %{
12032 int con_offset = $constantoffset($src);
12034 if (Assembler::is_simm16(con_offset)) {
12035 __ lwc1($dst$$FloatRegister, $constanttablebase, con_offset);
12036 } else {
12037 __ set64(AT, con_offset);
12038 if (UseLoongsonISA) {
12039 __ gslwxc1($dst$$FloatRegister, $constanttablebase, AT, 0);
12040 } else {
12041 __ daddu(AT, $constanttablebase, AT);
12042 __ lwc1($dst$$FloatRegister, AT, 0);
12043 }
12044 }
12045 %}
12046 ins_pipe( fpu_loadF );
12047 %}
12050 instruct loadConD0(regD dst, immD0 zero) %{
12051 match(Set dst zero);
12052 ins_cost(100);
12054 format %{ "mov $dst, zero @ loadConD0"%}
12055 ins_encode %{
12056 FloatRegister dst = as_FloatRegister($dst$$reg);
12058 __ dmtc1(R0, dst);
12059 %}
12060 ins_pipe( fpu_loadF );
12061 %}
12063 instruct loadConD(regD dst, immD src) %{
12064 match(Set dst src);
12065 ins_cost(125);
12067 format %{ "ldc1 $dst, $constantoffset[$constanttablebase] # load DOUBLE $src from table @ loadConD" %}
12068 ins_encode %{
12069 int con_offset = $constantoffset($src);
12071 if (Assembler::is_simm16(con_offset)) {
12072 __ ldc1($dst$$FloatRegister, $constanttablebase, con_offset);
12073 } else {
12074 __ set64(AT, con_offset);
12075 if (UseLoongsonISA) {
12076 __ gsldxc1($dst$$FloatRegister, $constanttablebase, AT, 0);
12077 } else {
12078 __ daddu(AT, $constanttablebase, AT);
12079 __ ldc1($dst$$FloatRegister, AT, 0);
12080 }
12081 }
12082 %}
12083 ins_pipe( fpu_loadF );
12084 %}
12086 // Store register Float value (it is faster than store from FPU register)
12087 instruct storeF_reg( memory mem, regF src) %{
12088 match(Set mem (StoreF mem src));
12090 ins_cost(50);
12091 format %{ "store $mem, $src\t# store float @ storeF_reg" %}
12092 ins_encode(store_F_reg_enc(mem, src));
12093 ins_pipe( fpu_storeF );
12094 %}
12096 instruct storeF_imm0( memory mem, immF0 zero) %{
12097 match(Set mem (StoreF mem zero));
12099 ins_cost(40);
12100 format %{ "store $mem, zero\t# store float @ storeF_imm0" %}
12101 ins_encode %{
12102 int base = $mem$$base;
12103 int index = $mem$$index;
12104 int scale = $mem$$scale;
12105 int disp = $mem$$disp;
12107 if( index != 0 ) {
12108 if(scale != 0) {
12109 __ dsll(T9, as_Register(index), scale);
12110 __ addu(AT, as_Register(base), T9);
12111 } else {
12112 __ daddu(AT, as_Register(base), as_Register(index));
12113 }
12114 if( Assembler::is_simm16(disp) ) {
12115 __ sw(R0, AT, disp);
12116 } else {
12117 __ move(T9, disp);
12118 __ addu(AT, AT, T9);
12119 __ sw(R0, AT, 0);
12120 }
12122 } else {
12123 if( Assembler::is_simm16(disp) ) {
12124 __ sw(R0, as_Register(base), disp);
12125 } else {
12126 __ move(T9, disp);
12127 __ addu(AT, as_Register(base), T9);
12128 __ sw(R0, AT, 0);
12129 }
12130 }
12131 %}
12132 ins_pipe( ialu_storeI );
12133 %}
12135 // Load Double
12136 instruct loadD(regD dst, memory mem) %{
12137 match(Set dst (LoadD mem));
12139 ins_cost(150);
12140 format %{ "loadD $dst, $mem #@loadD" %}
12141 ins_encode(load_D_enc(dst, mem));
12142 ins_pipe( ialu_loadI );
12143 %}
12145 // Load Double - UNaligned
12146 instruct loadD_unaligned(regD dst, memory mem ) %{
12147 match(Set dst (LoadD_unaligned mem));
12148 ins_cost(250);
12149 // FIXME: Jin: Need more effective ldl/ldr
12150 format %{ "loadD_unaligned $dst, $mem #@loadD_unaligned" %}
12151 ins_encode(load_D_enc(dst, mem));
12152 ins_pipe( ialu_loadI );
12153 %}
12155 instruct storeD_reg( memory mem, regD src) %{
12156 match(Set mem (StoreD mem src));
12158 ins_cost(50);
12159 format %{ "store $mem, $src\t# store float @ storeD_reg" %}
12160 ins_encode(store_D_reg_enc(mem, src));
12161 ins_pipe( fpu_storeF );
12162 %}
12164 instruct storeD_imm0( memory mem, immD0 zero) %{
12165 match(Set mem (StoreD mem zero));
12167 ins_cost(40);
12168 format %{ "store $mem, zero\t# store float @ storeD_imm0" %}
12169 ins_encode %{
12170 int base = $mem$$base;
12171 int index = $mem$$index;
12172 int scale = $mem$$scale;
12173 int disp = $mem$$disp;
12175 __ mtc1(R0, F30);
12176 __ cvt_d_w(F30, F30);
12178 if( index != 0 ) {
12179 if(scale != 0) {
12180 __ dsll(T9, as_Register(index), scale);
12181 __ addu(AT, as_Register(base), T9);
12182 } else {
12183 __ daddu(AT, as_Register(base), as_Register(index));
12184 }
12185 if( Assembler::is_simm16(disp) ) {
12186 __ sdc1(F30, AT, disp);
12187 } else {
12188 __ move(T9, disp);
12189 __ addu(AT, AT, T9);
12190 __ sdc1(F30, AT, 0);
12191 }
12193 } else {
12194 if( Assembler::is_simm16(disp) ) {
12195 __ sdc1(F30, as_Register(base), disp);
12196 } else {
12197 __ move(T9, disp);
12198 __ addu(AT, as_Register(base), T9);
12199 __ sdc1(F30, AT, 0);
12200 }
12201 }
12202 %}
12203 ins_pipe( ialu_storeI );
12204 %}
12206 instruct loadSSI(mRegI dst, stackSlotI src)
12207 %{
12208 match(Set dst src);
12210 ins_cost(125);
12211 format %{ "lw $dst, $src\t# int stk @ loadSSI" %}
12212 ins_encode %{
12213 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSI) !");
12214 __ lw($dst$$Register, SP, $src$$disp);
12215 %}
12216 ins_pipe(ialu_loadI);
12217 %}
12219 instruct storeSSI(stackSlotI dst, mRegI src)
12220 %{
12221 match(Set dst src);
12223 ins_cost(100);
12224 format %{ "sw $dst, $src\t# int stk @ storeSSI" %}
12225 ins_encode %{
12226 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSI) !");
12227 __ sw($src$$Register, SP, $dst$$disp);
12228 %}
12229 ins_pipe(ialu_storeI);
12230 %}
12232 instruct loadSSL(mRegL dst, stackSlotL src)
12233 %{
12234 match(Set dst src);
12236 ins_cost(125);
12237 format %{ "ld $dst, $src\t# long stk @ loadSSL" %}
12238 ins_encode %{
12239 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSL) !");
12240 __ ld($dst$$Register, SP, $src$$disp);
12241 %}
12242 ins_pipe(ialu_loadI);
12243 %}
12245 instruct storeSSL(stackSlotL dst, mRegL src)
12246 %{
12247 match(Set dst src);
12249 ins_cost(100);
12250 format %{ "sd $dst, $src\t# long stk @ storeSSL" %}
12251 ins_encode %{
12252 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSL) !");
12253 __ sd($src$$Register, SP, $dst$$disp);
12254 %}
12255 ins_pipe(ialu_storeI);
12256 %}
12258 instruct loadSSP(mRegP dst, stackSlotP src)
12259 %{
12260 match(Set dst src);
12262 ins_cost(125);
12263 format %{ "ld $dst, $src\t# ptr stk @ loadSSP" %}
12264 ins_encode %{
12265 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSP) !");
12266 __ ld($dst$$Register, SP, $src$$disp);
12267 %}
12268 ins_pipe(ialu_loadI);
12269 %}
12271 instruct storeSSP(stackSlotP dst, mRegP src)
12272 %{
12273 match(Set dst src);
12275 ins_cost(100);
12276 format %{ "sd $dst, $src\t# ptr stk @ storeSSP" %}
12277 ins_encode %{
12278 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSP) !");
12279 __ sd($src$$Register, SP, $dst$$disp);
12280 %}
12281 ins_pipe(ialu_storeI);
12282 %}
12284 instruct loadSSF(regF dst, stackSlotF src)
12285 %{
12286 match(Set dst src);
12288 ins_cost(125);
12289 format %{ "lwc1 $dst, $src\t# float stk @ loadSSF" %}
12290 ins_encode %{
12291 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSF) !");
12292 __ lwc1($dst$$FloatRegister, SP, $src$$disp);
12293 %}
12294 ins_pipe(ialu_loadI);
12295 %}
12297 instruct storeSSF(stackSlotF dst, regF src)
12298 %{
12299 match(Set dst src);
12301 ins_cost(100);
12302 format %{ "swc1 $dst, $src\t# float stk @ storeSSF" %}
12303 ins_encode %{
12304 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSF) !");
12305 __ swc1($src$$FloatRegister, SP, $dst$$disp);
12306 %}
12307 ins_pipe(fpu_storeF);
12308 %}
12310 // Use the same format since predicate() can not be used here.
12311 instruct loadSSD(regD dst, stackSlotD src)
12312 %{
12313 match(Set dst src);
12315 ins_cost(125);
12316 format %{ "ldc1 $dst, $src\t# double stk @ loadSSD" %}
12317 ins_encode %{
12318 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSD) !");
12319 __ ldc1($dst$$FloatRegister, SP, $src$$disp);
12320 %}
12321 ins_pipe(ialu_loadI);
12322 %}
12324 instruct storeSSD(stackSlotD dst, regD src)
12325 %{
12326 match(Set dst src);
12328 ins_cost(100);
12329 format %{ "sdc1 $dst, $src\t# double stk @ storeSSD" %}
12330 ins_encode %{
12331 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSD) !");
12332 __ sdc1($src$$FloatRegister, SP, $dst$$disp);
12333 %}
12334 ins_pipe(fpu_storeF);
12335 %}
12337 instruct cmpFastLock( FlagsReg cr, mRegP object, s0_RegP box, mRegI tmp, mRegP scr) %{
12338 match( Set cr (FastLock object box) );
12339 effect( TEMP tmp, TEMP scr, USE_KILL box );
12340 ins_cost(300);
12341 format %{ "FASTLOCK $cr $object, $box, $tmp #@ cmpFastLock" %}
12342 ins_encode %{
12343 __ fast_lock($object$$Register, $box$$Register, $tmp$$Register, $scr$$Register);
12344 %}
12346 ins_pipe( pipe_slow );
12347 ins_pc_relative(1);
12348 %}
12350 instruct cmpFastUnlock( FlagsReg cr, mRegP object, s0_RegP box, mRegP tmp ) %{
12351 match( Set cr (FastUnlock object box) );
12352 effect( TEMP tmp, USE_KILL box );
12353 ins_cost(300);
12354 format %{ "FASTUNLOCK $object, $box, $tmp #@cmpFastUnlock" %}
12355 ins_encode %{
12356 __ fast_unlock($object$$Register, $box$$Register, $tmp$$Register);
12357 %}
12359 ins_pipe( pipe_slow );
12360 ins_pc_relative(1);
12361 %}
12363 // Store CMS card-mark Immediate
12364 instruct storeImmCM(memory mem, immI8 src) %{
12365 match(Set mem (StoreCM mem src));
12367 ins_cost(150);
12368 format %{ "MOV8 $mem,$src\t! CMS card-mark imm0" %}
12369 // opcode(0xC6);
12370 ins_encode(store_B_immI_enc_sync(mem, src));
12371 ins_pipe( ialu_storeI );
12372 %}
12374 // Die now
12375 instruct ShouldNotReachHere( )
12376 %{
12377 match(Halt);
12378 ins_cost(300);
12380 // Use the following format syntax
12381 format %{ "ILLTRAP ;#@ShouldNotReachHere" %}
12382 ins_encode %{
12383 // Here we should emit illtrap !
12385 __ stop("in ShoudNotReachHere");
12387 %}
12388 ins_pipe( pipe_jump );
12389 %}
12391 instruct leaP8Narrow(mRegP dst, indOffset8Narrow mem)
12392 %{
12393 predicate(Universe::narrow_oop_shift() == 0);
12394 match(Set dst mem);
12396 ins_cost(110);
12397 format %{ "leaq $dst, $mem\t# ptr off8narrow @ leaP8Narrow" %}
12398 ins_encode %{
12399 Register dst = $dst$$Register;
12400 Register base = as_Register($mem$$base);
12401 int disp = $mem$$disp;
12403 __ daddiu(dst, base, disp);
12404 %}
12405 ins_pipe( ialu_regI_imm16 );
12406 %}
12408 instruct leaPPosIdxScaleOff8(mRegP dst, basePosIndexScaleOffset8 mem)
12409 %{
12410 match(Set dst mem);
12412 ins_cost(110);
12413 format %{ "leaq $dst, $mem\t# @ PosIdxScaleOff8" %}
12414 ins_encode %{
12415 Register dst = $dst$$Register;
12416 Register base = as_Register($mem$$base);
12417 Register index = as_Register($mem$$index);
12418 int scale = $mem$$scale;
12419 int disp = $mem$$disp;
12421 if (scale == 0) {
12422 __ daddu(AT, base, index);
12423 __ daddiu(dst, AT, disp);
12424 } else {
12425 __ dsll(AT, index, scale);
12426 __ daddu(AT, base, AT);
12427 __ daddiu(dst, AT, disp);
12428 }
12429 %}
12431 ins_pipe( ialu_regI_imm16 );
12432 %}
12434 instruct leaPIdxScale(mRegP dst, indIndexScale mem)
12435 %{
12436 match(Set dst mem);
12438 ins_cost(110);
12439 format %{ "leaq $dst, $mem\t# @ leaPIdxScale" %}
12440 ins_encode %{
12441 Register dst = $dst$$Register;
12442 Register base = as_Register($mem$$base);
12443 Register index = as_Register($mem$$index);
12444 int scale = $mem$$scale;
12446 if (scale == 0) {
12447 __ daddu(dst, base, index);
12448 } else {
12449 __ dsll(AT, index, scale);
12450 __ daddu(dst, base, AT);
12451 }
12452 %}
12454 ins_pipe( ialu_regI_imm16 );
12455 %}
12457 // Jump Direct Conditional - Label defines a relative address from Jcc+1
12458 instruct jmpLoopEnd(cmpOp cop, mRegI src1, mRegI src2, label labl) %{
12459 match(CountedLoopEnd cop (CmpI src1 src2));
12460 effect(USE labl);
12462 ins_cost(300);
12463 format %{ "J$cop $src1, $src2, $labl\t# Loop end @ jmpLoopEnd" %}
12464 ins_encode %{
12465 Register op1 = $src1$$Register;
12466 Register op2 = $src2$$Register;
12467 Label &L = *($labl$$label);
12468 int flag = $cop$$cmpcode;
12470 switch(flag)
12471 {
12472 case 0x01: //equal
12473 if (&L)
12474 __ beq(op1, op2, L);
12475 else
12476 __ beq(op1, op2, (int)0);
12477 break;
12478 case 0x02: //not_equal
12479 if (&L)
12480 __ bne(op1, op2, L);
12481 else
12482 __ bne(op1, op2, (int)0);
12483 break;
12484 case 0x03: //above
12485 __ slt(AT, op2, op1);
12486 if(&L)
12487 __ bne(AT, R0, L);
12488 else
12489 __ bne(AT, R0, (int)0);
12490 break;
12491 case 0x04: //above_equal
12492 __ slt(AT, op1, op2);
12493 if(&L)
12494 __ beq(AT, R0, L);
12495 else
12496 __ beq(AT, R0, (int)0);
12497 break;
12498 case 0x05: //below
12499 __ slt(AT, op1, op2);
12500 if(&L)
12501 __ bne(AT, R0, L);
12502 else
12503 __ bne(AT, R0, (int)0);
12504 break;
12505 case 0x06: //below_equal
12506 __ slt(AT, op2, op1);
12507 if(&L)
12508 __ beq(AT, R0, L);
12509 else
12510 __ beq(AT, R0, (int)0);
12511 break;
12512 default:
12513 Unimplemented();
12514 }
12515 __ nop();
12516 %}
12517 ins_pipe( pipe_jump );
12518 ins_pc_relative(1);
12519 %}
12522 instruct jmpLoopEnd_reg_imm16_sub(cmpOp cop, mRegI src1, immI16_sub src2, label labl) %{
12523 match(CountedLoopEnd cop (CmpI src1 src2));
12524 effect(USE labl);
12526 ins_cost(250);
12527 format %{ "J$cop $src1, $src2, $labl\t# Loop end @ jmpLoopEnd_reg_imm16_sub" %}
12528 ins_encode %{
12529 Register op1 = $src1$$Register;
12530 int op2 = $src2$$constant;
12531 Label &L = *($labl$$label);
12532 int flag = $cop$$cmpcode;
12534 __ addiu32(AT, op1, -1 * op2);
12536 switch(flag)
12537 {
12538 case 0x01: //equal
12539 if (&L)
12540 __ beq(AT, R0, L);
12541 else
12542 __ beq(AT, R0, (int)0);
12543 break;
12544 case 0x02: //not_equal
12545 if (&L)
12546 __ bne(AT, R0, L);
12547 else
12548 __ bne(AT, R0, (int)0);
12549 break;
12550 case 0x03: //above
12551 if(&L)
12552 __ bgtz(AT, L);
12553 else
12554 __ bgtz(AT, (int)0);
12555 break;
12556 case 0x04: //above_equal
12557 if(&L)
12558 __ bgez(AT, L);
12559 else
12560 __ bgez(AT,(int)0);
12561 break;
12562 case 0x05: //below
12563 if(&L)
12564 __ bltz(AT, L);
12565 else
12566 __ bltz(AT, (int)0);
12567 break;
12568 case 0x06: //below_equal
12569 if(&L)
12570 __ blez(AT, L);
12571 else
12572 __ blez(AT, (int)0);
12573 break;
12574 default:
12575 Unimplemented();
12576 }
12577 __ nop();
12578 %}
12579 ins_pipe( pipe_jump );
12580 ins_pc_relative(1);
12581 %}
12584 /*
12585 // Jump Direct Conditional - Label defines a relative address from Jcc+1
12586 instruct jmpLoopEndU(cmpOpU cop, eFlagsRegU cmp, label labl) %{
12587 match(CountedLoopEnd cop cmp);
12588 effect(USE labl);
12590 ins_cost(300);
12591 format %{ "J$cop,u $labl\t# Loop end" %}
12592 size(6);
12593 opcode(0x0F, 0x80);
12594 ins_encode( Jcc( cop, labl) );
12595 ins_pipe( pipe_jump );
12596 ins_pc_relative(1);
12597 %}
12599 instruct jmpLoopEndUCF(cmpOpUCF cop, eFlagsRegUCF cmp, label labl) %{
12600 match(CountedLoopEnd cop cmp);
12601 effect(USE labl);
12603 ins_cost(200);
12604 format %{ "J$cop,u $labl\t# Loop end" %}
12605 opcode(0x0F, 0x80);
12606 ins_encode( Jcc( cop, labl) );
12607 ins_pipe( pipe_jump );
12608 ins_pc_relative(1);
12609 %}
12610 */
12612 // This match pattern is created for StoreIConditional since I cannot match IfNode without a RegFlags! fujie 2012/07/17
12613 instruct jmpCon_flags(cmpOp cop, FlagsReg cr, label labl) %{
12614 match(If cop cr);
12615 effect(USE labl);
12617 ins_cost(300);
12618 format %{ "J$cop $labl #mips uses AT as eflag @jmpCon_flags" %}
12620 ins_encode %{
12621 Label &L = *($labl$$label);
12622 switch($cop$$cmpcode)
12623 {
12624 case 0x01: //equal
12625 if (&L)
12626 __ bne(AT, R0, L);
12627 else
12628 __ bne(AT, R0, (int)0);
12629 break;
12630 case 0x02: //not equal
12631 if (&L)
12632 __ beq(AT, R0, L);
12633 else
12634 __ beq(AT, R0, (int)0);
12635 break;
12636 default:
12637 Unimplemented();
12638 }
12639 __ nop();
12640 %}
12642 ins_pipe( pipe_jump );
12643 ins_pc_relative(1);
12644 %}
12647 // ============================================================================
12648 // The 2nd slow-half of a subtype check. Scan the subklass's 2ndary superklass
12649 // array for an instance of the superklass. Set a hidden internal cache on a
12650 // hit (cache is checked with exposed code in gen_subtype_check()). Return
12651 // NZ for a miss or zero for a hit. The encoding ALSO sets flags.
12652 instruct partialSubtypeCheck( mRegP result, no_T8_mRegP sub, no_T8_mRegP super, mT8RegI tmp ) %{
12653 match(Set result (PartialSubtypeCheck sub super));
12654 effect(KILL tmp);
12655 ins_cost(1100); // slightly larger than the next version
12656 format %{ "partialSubtypeCheck result=$result, sub=$sub, super=$super, tmp=$tmp " %}
12658 ins_encode( enc_PartialSubtypeCheck(result, sub, super, tmp) );
12659 ins_pipe( pipe_slow );
12660 %}
12663 // Conditional-store of an int value.
12664 // ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG on Intel.
12665 instruct storeIConditional( memory mem, mRegI oldval, mRegI newval, FlagsReg cr ) %{
12666 match(Set cr (StoreIConditional mem (Binary oldval newval)));
12667 // effect(KILL oldval);
12668 format %{ "CMPXCHG $newval, $mem, $oldval \t# @storeIConditional" %}
12670 ins_encode %{
12671 Register oldval = $oldval$$Register;
12672 Register newval = $newval$$Register;
12673 Address addr(as_Register($mem$$base), $mem$$disp);
12674 Label again, failure;
12676 // int base = $mem$$base;
12677 int index = $mem$$index;
12678 int scale = $mem$$scale;
12679 int disp = $mem$$disp;
12681 guarantee(Assembler::is_simm16(disp), "");
12683 if( index != 0 ) {
12684 __ stop("in storeIConditional: index != 0");
12685 } else {
12686 __ bind(again);
12687 if(UseSyncLevel <= 1000) __ sync();
12688 __ ll(AT, addr);
12689 __ bne(AT, oldval, failure);
12690 __ delayed()->addu(AT, R0, R0);
12692 __ addu(AT, newval, R0);
12693 __ sc(AT, addr);
12694 __ beq(AT, R0, again);
12695 __ delayed()->addiu(AT, R0, 0xFF);
12696 __ bind(failure);
12697 __ sync();
12698 }
12699 %}
12701 ins_pipe( long_memory_op );
12702 %}
12704 // Conditional-store of a long value.
12705 // ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG.
12706 instruct storeLConditional(memory mem, t2RegL oldval, mRegL newval, FlagsReg cr )
12707 %{
12708 match(Set cr (StoreLConditional mem (Binary oldval newval)));
12709 effect(KILL oldval);
12711 format %{ "cmpxchg $mem, $newval\t# If $oldval == $mem then store $newval into $mem" %}
12712 ins_encode%{
12713 Register oldval = $oldval$$Register;
12714 Register newval = $newval$$Register;
12715 Address addr((Register)$mem$$base, $mem$$disp);
12717 int index = $mem$$index;
12718 int scale = $mem$$scale;
12719 int disp = $mem$$disp;
12721 guarantee(Assembler::is_simm16(disp), "");
12723 if( index != 0 ) {
12724 __ stop("in storeIConditional: index != 0");
12725 } else {
12726 __ cmpxchg(newval, addr, oldval);
12727 }
12728 %}
12729 ins_pipe( long_memory_op );
12730 %}
12733 instruct compareAndSwapI( mRegI res, mRegP mem_ptr, mS2RegI oldval, mRegI newval) %{
12734 match(Set res (CompareAndSwapI mem_ptr (Binary oldval newval)));
12735 effect(KILL oldval);
12736 // match(CompareAndSwapI mem_ptr (Binary oldval newval));
12737 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapI\n\t"
12738 "MOV $res, 1 @ compareAndSwapI\n\t"
12739 "BNE AT, R0 @ compareAndSwapI\n\t"
12740 "MOV $res, 0 @ compareAndSwapI\n"
12741 "L:" %}
12742 ins_encode %{
12743 Register newval = $newval$$Register;
12744 Register oldval = $oldval$$Register;
12745 Register res = $res$$Register;
12746 Address addr($mem_ptr$$Register, 0);
12747 Label L;
12749 __ cmpxchg32(newval, addr, oldval);
12750 __ move(res, AT);
12751 %}
12752 ins_pipe( long_memory_op );
12753 %}
12755 //FIXME:
12756 instruct compareAndSwapP( mRegI res, mRegP mem_ptr, s2_RegP oldval, mRegP newval) %{
12757 match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval)));
12758 effect(KILL oldval);
12759 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapP\n\t"
12760 "MOV $res, AT @ compareAndSwapP\n\t"
12761 "L:" %}
12762 ins_encode %{
12763 Register newval = $newval$$Register;
12764 Register oldval = $oldval$$Register;
12765 Register res = $res$$Register;
12766 Address addr($mem_ptr$$Register, 0);
12767 Label L;
12769 __ cmpxchg(newval, addr, oldval);
12770 __ move(res, AT);
12771 %}
12772 ins_pipe( long_memory_op );
12773 %}
12775 instruct compareAndSwapN( mRegI res, mRegP mem_ptr, t2_RegN oldval, mRegN newval) %{
12776 match(Set res (CompareAndSwapN mem_ptr (Binary oldval newval)));
12777 effect(KILL oldval);
12778 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapN\n\t"
12779 "MOV $res, AT @ compareAndSwapN\n\t"
12780 "L:" %}
12781 ins_encode %{
12782 Register newval = $newval$$Register;
12783 Register oldval = $oldval$$Register;
12784 Register res = $res$$Register;
12785 Address addr($mem_ptr$$Register, 0);
12786 Label L;
12788 /* 2013/7/19 Jin: cmpxchg32 is implemented with ll/sc, which will do sign extension.
12789 * Thus, we should extend oldval's sign for correct comparision.
12790 */
12791 __ sll(oldval, oldval, 0);
12793 __ cmpxchg32(newval, addr, oldval);
12794 __ move(res, AT);
12795 %}
12796 ins_pipe( long_memory_op );
12797 %}
12799 //----------Max and Min--------------------------------------------------------
12800 // Min Instructions
12801 ////
12802 // *** Min and Max using the conditional move are slower than the
12803 // *** branch version on a Pentium III.
12804 // // Conditional move for min
12805 //instruct cmovI_reg_lt( eRegI op2, eRegI op1, eFlagsReg cr ) %{
12806 // effect( USE_DEF op2, USE op1, USE cr );
12807 // format %{ "CMOVlt $op2,$op1\t! min" %}
12808 // opcode(0x4C,0x0F);
12809 // ins_encode( OpcS, OpcP, RegReg( op2, op1 ) );
12810 // ins_pipe( pipe_cmov_reg );
12811 //%}
12812 //
12813 //// Min Register with Register (P6 version)
12814 //instruct minI_eReg_p6( eRegI op1, eRegI op2 ) %{
12815 // predicate(VM_Version::supports_cmov() );
12816 // match(Set op2 (MinI op1 op2));
12817 // ins_cost(200);
12818 // expand %{
12819 // eFlagsReg cr;
12820 // compI_eReg(cr,op1,op2);
12821 // cmovI_reg_lt(op2,op1,cr);
12822 // %}
12823 //%}
12825 // Min Register with Register (generic version)
12826 instruct minI_Reg_Reg(mRegI dst, mRegI src) %{
12827 match(Set dst (MinI dst src));
12828 //effect(KILL flags);
12829 ins_cost(80);
12831 format %{ "MIN $dst, $src @minI_Reg_Reg" %}
12832 ins_encode %{
12833 Register dst = $dst$$Register;
12834 Register src = $src$$Register;
12836 __ slt(AT, src, dst);
12837 __ movn(dst, src, AT);
12839 %}
12841 ins_pipe( pipe_slow );
12842 %}
12844 // Max Register with Register
12845 // *** Min and Max using the conditional move are slower than the
12846 // *** branch version on a Pentium III.
12847 // // Conditional move for max
12848 //instruct cmovI_reg_gt( eRegI op2, eRegI op1, eFlagsReg cr ) %{
12849 // effect( USE_DEF op2, USE op1, USE cr );
12850 // format %{ "CMOVgt $op2,$op1\t! max" %}
12851 // opcode(0x4F,0x0F);
12852 // ins_encode( OpcS, OpcP, RegReg( op2, op1 ) );
12853 // ins_pipe( pipe_cmov_reg );
12854 //%}
12855 //
12856 // // Max Register with Register (P6 version)
12857 //instruct maxI_eReg_p6( eRegI op1, eRegI op2 ) %{
12858 // predicate(VM_Version::supports_cmov() );
12859 // match(Set op2 (MaxI op1 op2));
12860 // ins_cost(200);
12861 // expand %{
12862 // eFlagsReg cr;
12863 // compI_eReg(cr,op1,op2);
12864 // cmovI_reg_gt(op2,op1,cr);
12865 // %}
12866 //%}
12868 // Max Register with Register (generic version)
12869 instruct maxI_Reg_Reg(mRegI dst, mRegI src) %{
12870 match(Set dst (MaxI dst src));
12871 ins_cost(80);
12873 format %{ "MAX $dst, $src @maxI_Reg_Reg" %}
12875 ins_encode %{
12876 Register dst = $dst$$Register;
12877 Register src = $src$$Register;
12879 __ slt(AT, dst, src);
12880 __ movn(dst, src, AT);
12882 %}
12884 ins_pipe( pipe_slow );
12885 %}
12887 instruct maxI_Reg_zero(mRegI dst, immI0 zero) %{
12888 match(Set dst (MaxI dst zero));
12889 ins_cost(50);
12891 format %{ "MAX $dst, 0 @maxI_Reg_zero" %}
12893 ins_encode %{
12894 Register dst = $dst$$Register;
12896 __ slt(AT, dst, R0);
12897 __ movn(dst, R0, AT);
12899 %}
12901 ins_pipe( pipe_slow );
12902 %}
12904 instruct zerox_long_reg_reg(mRegL dst, mRegL src, immL_32bits mask)
12905 %{
12906 match(Set dst (AndL src mask));
12908 format %{ "movl $dst, $src\t# zero-extend long @ zerox_long_reg_reg" %}
12909 ins_encode %{
12910 Register dst = $dst$$Register;
12911 Register src = $src$$Register;
12913 __ dext(dst, src, 0, 32);
12914 %}
12915 ins_pipe(ialu_regI_regI);
12916 %}
12918 instruct combine_i2l(mRegL dst, mRegI src1, immL_32bits mask, mRegI src2, immI_32 shift32)
12919 %{
12920 match(Set dst (OrL (AndL (ConvI2L src1) mask) (LShiftL (ConvI2L src2) shift32)));
12922 format %{ "combine_i2l $dst, $src2(H), $src1(L) @ combine_i2l" %}
12923 ins_encode %{
12924 Register dst = $dst$$Register;
12925 Register src1 = $src1$$Register;
12926 Register src2 = $src2$$Register;
12928 if (src1 == dst) {
12929 __ dinsu(dst, src2, 32, 32);
12930 } else if (src2 == dst) {
12931 __ dsll32(dst, dst, 0);
12932 __ dins(dst, src1, 0, 32);
12933 } else {
12934 __ dext(dst, src1, 0, 32);
12935 __ dinsu(dst, src2, 32, 32);
12936 }
12937 %}
12938 ins_pipe(ialu_regI_regI);
12939 %}
12941 // Zero-extend convert int to long
12942 instruct convI2L_reg_reg_zex(mRegL dst, mRegI src, immL_32bits mask)
12943 %{
12944 match(Set dst (AndL (ConvI2L src) mask));
12946 format %{ "movl $dst, $src\t# i2l zero-extend @ convI2L_reg_reg_zex" %}
12947 ins_encode %{
12948 Register dst = $dst$$Register;
12949 Register src = $src$$Register;
12951 __ dext(dst, src, 0, 32);
12952 %}
12953 ins_pipe(ialu_regI_regI);
12954 %}
12956 instruct convL2I2L_reg_reg_zex(mRegL dst, mRegL src, immL_32bits mask)
12957 %{
12958 match(Set dst (AndL (ConvI2L (ConvL2I src)) mask));
12960 format %{ "movl $dst, $src\t# i2l zero-extend @ convL2I2L_reg_reg_zex" %}
12961 ins_encode %{
12962 Register dst = $dst$$Register;
12963 Register src = $src$$Register;
12965 __ dext(dst, src, 0, 32);
12966 %}
12967 ins_pipe(ialu_regI_regI);
12968 %}
12970 // Match loading integer and casting it to unsigned int in long register.
12971 // LoadI + ConvI2L + AndL 0xffffffff.
12972 instruct loadUI2L_rmask(mRegL dst, memory mem, immL_32bits mask) %{
12973 match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
12975 format %{ "lwu $dst, $mem \t// zero-extend to long @ loadUI2L_rmask" %}
12976 ins_encode (load_N_enc(dst, mem));
12977 ins_pipe(ialu_loadI);
12978 %}
12980 instruct loadUI2L_lmask(mRegL dst, memory mem, immL_32bits mask) %{
12981 match(Set dst (AndL mask (ConvI2L (LoadI mem))));
12983 format %{ "lwu $dst, $mem \t// zero-extend to long @ loadUI2L_lmask" %}
12984 ins_encode (load_N_enc(dst, mem));
12985 ins_pipe(ialu_loadI);
12986 %}
12989 // ============================================================================
12990 // Safepoint Instruction
12991 instruct safePoint_poll(mRegP poll) %{
12992 match(SafePoint poll);
12993 effect(USE poll);
12995 ins_cost(125);
12996 format %{ "Safepoint @ [$poll] : poll for GC @ safePoint_poll" %}
12998 ins_encode %{
12999 Register poll_reg = $poll$$Register;
13001 __ block_comment("Safepoint:");
13002 __ relocate(relocInfo::poll_type);
13003 __ lw(AT, poll_reg, 0);
13004 %}
13006 ins_pipe( ialu_storeI );
13007 %}
13009 //----------Arithmetic Conversion Instructions---------------------------------
13011 instruct roundFloat_nop(regF dst)
13012 %{
13013 match(Set dst (RoundFloat dst));
13015 ins_cost(0);
13016 ins_encode();
13017 ins_pipe(empty);
13018 %}
13020 instruct roundDouble_nop(regD dst)
13021 %{
13022 match(Set dst (RoundDouble dst));
13024 ins_cost(0);
13025 ins_encode();
13026 ins_pipe(empty);
13027 %}
13029 //---------- Zeros Count Instructions ------------------------------------------
13030 // CountLeadingZerosINode CountTrailingZerosINode
13031 instruct countLeadingZerosI(mRegI dst, mRegI src) %{
13032 predicate(UseCountLeadingZerosInstruction);
13033 match(Set dst (CountLeadingZerosI src));
13035 format %{ "clz $dst, $src\t# count leading zeros (int)" %}
13036 ins_encode %{
13037 __ clz($dst$$Register, $src$$Register);
13038 %}
13039 ins_pipe( ialu_regL_regL );
13040 %}
13042 instruct countLeadingZerosL(mRegI dst, mRegL src) %{
13043 predicate(UseCountLeadingZerosInstruction);
13044 match(Set dst (CountLeadingZerosL src));
13046 format %{ "dclz $dst, $src\t# count leading zeros (long)" %}
13047 ins_encode %{
13048 __ dclz($dst$$Register, $src$$Register);
13049 %}
13050 ins_pipe( ialu_regL_regL );
13051 %}
13053 instruct countTrailingZerosI(mRegI dst, mRegI src) %{
13054 predicate(UseCountTrailingZerosInstruction);
13055 match(Set dst (CountTrailingZerosI src));
13057 format %{ "ctz $dst, $src\t# count trailing zeros (int)" %}
13058 ins_encode %{
13059 // ctz and dctz is gs instructions.
13060 __ ctz($dst$$Register, $src$$Register);
13061 %}
13062 ins_pipe( ialu_regL_regL );
13063 %}
13065 instruct countTrailingZerosL(mRegI dst, mRegL src) %{
13066 predicate(UseCountTrailingZerosInstruction);
13067 match(Set dst (CountTrailingZerosL src));
13069 format %{ "dcto $dst, $src\t# count trailing zeros (long)" %}
13070 ins_encode %{
13071 __ dctz($dst$$Register, $src$$Register);
13072 %}
13073 ins_pipe( ialu_regL_regL );
13074 %}
13076 // ====================VECTOR INSTRUCTIONS=====================================
13078 // Load vectors (8 bytes long)
13079 instruct loadV8(vecD dst, memory mem) %{
13080 predicate(n->as_LoadVector()->memory_size() == 8);
13081 match(Set dst (LoadVector mem));
13082 ins_cost(125);
13083 format %{ "load $dst, $mem\t! load vector (8 bytes)" %}
13084 ins_encode(load_D_enc(dst, mem));
13085 ins_pipe( fpu_loadF );
13086 %}
13088 // Store vectors (8 bytes long)
13089 instruct storeV8(memory mem, vecD src) %{
13090 predicate(n->as_StoreVector()->memory_size() == 8);
13091 match(Set mem (StoreVector mem src));
13092 ins_cost(145);
13093 format %{ "store $mem, $src\t! store vector (8 bytes)" %}
13094 ins_encode(store_D_reg_enc(mem, src));
13095 ins_pipe( fpu_storeF );
13096 %}
13098 instruct Repl8B(vecD dst, mRegI src) %{
13099 predicate(n->as_Vector()->length() == 8);
13100 match(Set dst (ReplicateB src));
13101 format %{ "replv_ob AT, $src\n\t"
13102 "dmtc1 AT, $dst\t! replicate8B" %}
13103 ins_encode %{
13104 __ replv_ob(AT, $src$$Register);
13105 __ dmtc1(AT, $dst$$FloatRegister);
13106 %}
13107 ins_pipe( pipe_mtc1 );
13108 %}
13110 instruct Repl8B_imm(vecD dst, immI con) %{
13111 predicate(n->as_Vector()->length() == 8);
13112 match(Set dst (ReplicateB con));
13113 format %{ "repl_ob AT, [$con]\n\t"
13114 "dmtc1 AT, $dst,0x00\t! replicate8B($con)" %}
13115 ins_encode %{
13116 int val = $con$$constant;
13117 __ repl_ob(AT, val);
13118 __ dmtc1(AT, $dst$$FloatRegister);
13119 %}
13120 ins_pipe( pipe_mtc1 );
13121 %}
13123 instruct Repl8B_zero(vecD dst, immI0 zero) %{
13124 predicate(n->as_Vector()->length() == 8);
13125 match(Set dst (ReplicateB zero));
13126 format %{ "dmtc1 R0, $dst\t! replicate8B zero" %}
13127 ins_encode %{
13128 __ dmtc1(R0, $dst$$FloatRegister);
13129 %}
13130 ins_pipe( pipe_mtc1 );
13131 %}
13133 instruct Repl8B_M1(vecD dst, immI_M1 M1) %{
13134 predicate(n->as_Vector()->length() == 8);
13135 match(Set dst (ReplicateB M1));
13136 format %{ "dmtc1 -1, $dst\t! replicate8B -1" %}
13137 ins_encode %{
13138 __ nor(AT, R0, R0);
13139 __ dmtc1(AT, $dst$$FloatRegister);
13140 %}
13141 ins_pipe( pipe_mtc1 );
13142 %}
13144 instruct Repl4S(vecD dst, mRegI src) %{
13145 predicate(n->as_Vector()->length() == 4);
13146 match(Set dst (ReplicateS src));
13147 format %{ "replv_qh AT, $src\n\t"
13148 "dmtc1 AT, $dst\t! replicate4S" %}
13149 ins_encode %{
13150 __ replv_qh(AT, $src$$Register);
13151 __ dmtc1(AT, $dst$$FloatRegister);
13152 %}
13153 ins_pipe( pipe_mtc1 );
13154 %}
13156 instruct Repl4S_imm(vecD dst, immI con) %{
13157 predicate(n->as_Vector()->length() == 4);
13158 match(Set dst (ReplicateS con));
13159 format %{ "replv_qh AT, [$con]\n\t"
13160 "dmtc1 AT, $dst\t! replicate4S($con)" %}
13161 ins_encode %{
13162 int val = $con$$constant;
13163 if ( Assembler::is_simm(val, 10)) {
13164 //repl_qh supports 10 bits immediate
13165 __ repl_qh(AT, val);
13166 } else {
13167 __ li32(AT, val);
13168 __ replv_qh(AT, AT);
13169 }
13170 __ dmtc1(AT, $dst$$FloatRegister);
13171 %}
13172 ins_pipe( pipe_mtc1 );
13173 %}
13175 instruct Repl4S_zero(vecD dst, immI0 zero) %{
13176 predicate(n->as_Vector()->length() == 4);
13177 match(Set dst (ReplicateS zero));
13178 format %{ "dmtc1 R0, $dst\t! replicate4S zero" %}
13179 ins_encode %{
13180 __ dmtc1(R0, $dst$$FloatRegister);
13181 %}
13182 ins_pipe( pipe_mtc1 );
13183 %}
13185 instruct Repl4S_M1(vecD dst, immI_M1 M1) %{
13186 predicate(n->as_Vector()->length() == 4);
13187 match(Set dst (ReplicateS M1));
13188 format %{ "dmtc1 -1, $dst\t! replicate4S -1" %}
13189 ins_encode %{
13190 __ nor(AT, R0, R0);
13191 __ dmtc1(AT, $dst$$FloatRegister);
13192 %}
13193 ins_pipe( pipe_mtc1 );
13194 %}
13196 // Replicate integer (4 byte) scalar to be vector
13197 instruct Repl2I(vecD dst, mRegI src) %{
13198 predicate(n->as_Vector()->length() == 2);
13199 match(Set dst (ReplicateI src));
13200 format %{ "dins AT, $src, 0, 32\n\t"
13201 "dinsu AT, $src, 32, 32\n\t"
13202 "dmtc1 AT, $dst\t! replicate2I" %}
13203 ins_encode %{
13204 __ dins(AT, $src$$Register, 0, 32);
13205 __ dinsu(AT, $src$$Register, 32, 32);
13206 __ dmtc1(AT, $dst$$FloatRegister);
13207 %}
13208 ins_pipe( pipe_mtc1 );
13209 %}
13211 // Replicate integer (4 byte) scalar immediate to be vector by loading from const table.
13212 instruct Repl2I_imm(vecD dst, immI con, mA7RegI tmp) %{
13213 predicate(n->as_Vector()->length() == 2);
13214 match(Set dst (ReplicateI con));
13215 effect(KILL tmp);
13216 format %{ "li32 AT, [$con], 32\n\t"
13217 "replv_pw AT, AT\n\t"
13218 "dmtc1 AT, $dst\t! replicate2I($con)" %}
13219 ins_encode %{
13220 int val = $con$$constant;
13221 __ li32(AT, val);
13222 __ replv_pw(AT, AT);
13223 __ dmtc1(AT, $dst$$FloatRegister);
13224 %}
13225 ins_pipe( pipe_mtc1 );
13226 %}
13228 // Replicate integer (4 byte) scalar zero to be vector
13229 instruct Repl2I_zero(vecD dst, immI0 zero) %{
13230 predicate(n->as_Vector()->length() == 2);
13231 match(Set dst (ReplicateI zero));
13232 format %{ "dmtc1 R0, $dst\t! replicate2I zero" %}
13233 ins_encode %{
13234 __ dmtc1(R0, $dst$$FloatRegister);
13235 %}
13236 ins_pipe( pipe_mtc1 );
13237 %}
13239 // Replicate integer (4 byte) scalar -1 to be vector
13240 instruct Repl2I_M1(vecD dst, immI_M1 M1) %{
13241 predicate(n->as_Vector()->length() == 2);
13242 match(Set dst (ReplicateI M1));
13243 format %{ "dmtc1 -1, $dst\t! replicate2I -1, use AT" %}
13244 ins_encode %{
13245 __ nor(AT, R0, R0);
13246 __ dmtc1(AT, $dst$$FloatRegister);
13247 %}
13248 ins_pipe( pipe_mtc1 );
13249 %}
13251 // Replicate float (4 byte) scalar to be vector
13252 instruct Repl2F(vecD dst, regF src) %{
13253 predicate(n->as_Vector()->length() == 2);
13254 match(Set dst (ReplicateF src));
13255 format %{ "cvt.ps $dst, $src, $src\t! replicate2F" %}
13256 ins_encode %{
13257 __ cvt_ps_s($dst$$FloatRegister, $src$$FloatRegister, $src$$FloatRegister);
13258 %}
13259 ins_pipe( pipe_slow );
13260 %}
13262 // Replicate float (4 byte) scalar zero to be vector
13263 instruct Repl2F_zero(vecD dst, immF0 zero) %{
13264 predicate(n->as_Vector()->length() == 2);
13265 match(Set dst (ReplicateF zero));
13266 format %{ "dmtc1 R0, $dst\t! replicate2F zero" %}
13267 ins_encode %{
13268 __ dmtc1(R0, $dst$$FloatRegister);
13269 %}
13270 ins_pipe( pipe_mtc1 );
13271 %}
13274 // ====================VECTOR ARITHMETIC=======================================
13276 // --------------------------------- ADD --------------------------------------
13278 // Floats vector add
13279 instruct vadd2F(vecD dst, vecD src) %{
13280 predicate(n->as_Vector()->length() == 2);
13281 match(Set dst (AddVF dst src));
13282 format %{ "add.ps $dst,$src\t! add packed2F" %}
13283 ins_encode %{
13284 __ add_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
13285 %}
13286 ins_pipe( pipe_slow );
13287 %}
13289 instruct vadd2F3(vecD dst, vecD src1, vecD src2) %{
13290 predicate(n->as_Vector()->length() == 2);
13291 match(Set dst (AddVF src1 src2));
13292 format %{ "add.ps $dst,$src1,$src2\t! add packed2F" %}
13293 ins_encode %{
13294 __ add_ps($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
13295 %}
13296 ins_pipe( fpu_regF_regF );
13297 %}
13299 // --------------------------------- SUB --------------------------------------
13301 // Floats vector sub
13302 instruct vsub2F(vecD dst, vecD src) %{
13303 predicate(n->as_Vector()->length() == 2);
13304 match(Set dst (SubVF dst src));
13305 format %{ "sub.ps $dst,$src\t! sub packed2F" %}
13306 ins_encode %{
13307 __ sub_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
13308 %}
13309 ins_pipe( fpu_regF_regF );
13310 %}
13312 // --------------------------------- MUL --------------------------------------
13314 // Floats vector mul
13315 instruct vmul2F(vecD dst, vecD src) %{
13316 predicate(n->as_Vector()->length() == 2);
13317 match(Set dst (MulVF dst src));
13318 format %{ "mul.ps $dst, $src\t! mul packed2F" %}
13319 ins_encode %{
13320 __ mul_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
13321 %}
13322 ins_pipe( fpu_regF_regF );
13323 %}
13325 instruct vmul2F3(vecD dst, vecD src1, vecD src2) %{
13326 predicate(n->as_Vector()->length() == 2);
13327 match(Set dst (MulVF src1 src2));
13328 format %{ "mul.ps $dst, $src1, $src2\t! mul packed2F" %}
13329 ins_encode %{
13330 __ mul_ps($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
13331 %}
13332 ins_pipe( fpu_regF_regF );
13333 %}
13335 // --------------------------------- DIV --------------------------------------
13336 // MIPS do not have div.ps
13339 //----------PEEPHOLE RULES-----------------------------------------------------
13340 // These must follow all instruction definitions as they use the names
13341 // defined in the instructions definitions.
13342 //
13343 // peepmatch ( root_instr_name [preceeding_instruction]* );
13344 //
13345 // peepconstraint %{
13346 // (instruction_number.operand_name relational_op instruction_number.operand_name
13347 // [, ...] );
13348 // // instruction numbers are zero-based using left to right order in peepmatch
13349 //
13350 // peepreplace ( instr_name ( [instruction_number.operand_name]* ) );
13351 // // provide an instruction_number.operand_name for each operand that appears
13352 // // in the replacement instruction's match rule
13353 //
13354 // ---------VM FLAGS---------------------------------------------------------
13355 //
13356 // All peephole optimizations can be turned off using -XX:-OptoPeephole
13357 //
13358 // Each peephole rule is given an identifying number starting with zero and
13359 // increasing by one in the order seen by the parser. An individual peephole
13360 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
13361 // on the command-line.
13362 //
13363 // ---------CURRENT LIMITATIONS----------------------------------------------
13364 //
13365 // Only match adjacent instructions in same basic block
13366 // Only equality constraints
13367 // Only constraints between operands, not (0.dest_reg == EAX_enc)
13368 // Only one replacement instruction
13369 //
13370 // ---------EXAMPLE----------------------------------------------------------
13371 //
13372 // // pertinent parts of existing instructions in architecture description
13373 // instruct movI(eRegI dst, eRegI src) %{
13374 // match(Set dst (CopyI src));
13375 // %}
13376 //
13377 // instruct incI_eReg(eRegI dst, immI1 src, eFlagsReg cr) %{
13378 // match(Set dst (AddI dst src));
13379 // effect(KILL cr);
13380 // %}
13381 //
13382 // // Change (inc mov) to lea
13383 // peephole %{
13384 // // increment preceeded by register-register move
13385 // peepmatch ( incI_eReg movI );
13386 // // require that the destination register of the increment
13387 // // match the destination register of the move
13388 // peepconstraint ( 0.dst == 1.dst );
13389 // // construct a replacement instruction that sets
13390 // // the destination to ( move's source register + one )
13391 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13392 // %}
13393 //
13394 // Implementation no longer uses movX instructions since
13395 // machine-independent system no longer uses CopyX nodes.
13396 //
13397 // peephole %{
13398 // peepmatch ( incI_eReg movI );
13399 // peepconstraint ( 0.dst == 1.dst );
13400 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13401 // %}
13402 //
13403 // peephole %{
13404 // peepmatch ( decI_eReg movI );
13405 // peepconstraint ( 0.dst == 1.dst );
13406 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13407 // %}
13408 //
13409 // peephole %{
13410 // peepmatch ( addI_eReg_imm movI );
13411 // peepconstraint ( 0.dst == 1.dst );
13412 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13413 // %}
13414 //
13415 // peephole %{
13416 // peepmatch ( addP_eReg_imm movP );
13417 // peepconstraint ( 0.dst == 1.dst );
13418 // peepreplace ( leaP_eReg_immI( 0.dst 1.src 0.src ) );
13419 // %}
13421 // // Change load of spilled value to only a spill
13422 // instruct storeI(memory mem, eRegI src) %{
13423 // match(Set mem (StoreI mem src));
13424 // %}
13425 //
13426 // instruct loadI(eRegI dst, memory mem) %{
13427 // match(Set dst (LoadI mem));
13428 // %}
13429 //
13430 //peephole %{
13431 // peepmatch ( loadI storeI );
13432 // peepconstraint ( 1.src == 0.dst, 1.mem == 0.mem );
13433 // peepreplace ( storeI( 1.mem 1.mem 1.src ) );
13434 //%}
13436 //----------SMARTSPILL RULES---------------------------------------------------
13437 // These must follow all instruction definitions as they use the names
13438 // defined in the instructions definitions.