Sat, 18 Feb 2017 08:51:49 +0800
[C2] Use gssdx in store_P_reg_enc for Loongson CPUs.
1 //
2 // Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
3 // Copyright (c) 2015, 2016, Loongson Technology. All rights reserved.
4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 //
6 // This code is free software; you can redistribute it and/or modify it
7 // under the terms of the GNU General Public License version 2 only, as
8 // published by the Free Software Foundation.
9 //
10 // This code is distributed in the hope that it will be useful, but WITHOUT
11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 // version 2 for more details (a copy is included in the LICENSE file that
14 // accompanied this code).
15 //
16 // You should have received a copy of the GNU General Public License version
17 // 2 along with this work; if not, write to the Free Software Foundation,
18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 //
20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 // or visit www.oracle.com if you need additional information or have any
22 // questions.
23 //
24 //
26 // GodSon3 Architecture Description File
28 //----------REGISTER DEFINITION BLOCK------------------------------------------
29 // This information is used by the matcher and the register allocator to
30 // describe individual registers and classes of registers within the target
31 // archtecture.
33 // format:
34 // reg_def name (call convention, c-call convention, ideal type, encoding);
35 // call convention :
36 // NS = No-Save
37 // SOC = Save-On-Call
38 // SOE = Save-On-Entry
39 // AS = Always-Save
40 // ideal type :
41 // see opto/opcodes.hpp for more info
42 // reg_class name (reg, ...);
43 // alloc_class name (reg, ...);
44 register %{
46 // General Registers
47 // Integer Registers
48 reg_def R0 ( NS, NS, Op_RegI, 0, VMRegImpl::Bad());
49 reg_def AT ( NS, NS, Op_RegI, 1, AT->as_VMReg());
50 reg_def AT_H ( NS, NS, Op_RegI, 1, AT->as_VMReg()->next());
51 reg_def V0 (SOC, SOC, Op_RegI, 2, V0->as_VMReg());
52 reg_def V0_H (SOC, SOC, Op_RegI, 2, V0->as_VMReg()->next());
53 reg_def V1 (SOC, SOC, Op_RegI, 3, V1->as_VMReg());
54 reg_def V1_H (SOC, SOC, Op_RegI, 3, V1->as_VMReg()->next());
55 reg_def A0 (SOC, SOC, Op_RegI, 4, A0->as_VMReg());
56 reg_def A0_H (SOC, SOC, Op_RegI, 4, A0->as_VMReg()->next());
57 reg_def A1 (SOC, SOC, Op_RegI, 5, A1->as_VMReg());
58 reg_def A1_H (SOC, SOC, Op_RegI, 5, A1->as_VMReg()->next());
59 reg_def A2 (SOC, SOC, Op_RegI, 6, A2->as_VMReg());
60 reg_def A2_H (SOC, SOC, Op_RegI, 6, A2->as_VMReg()->next());
61 reg_def A3 (SOC, SOC, Op_RegI, 7, A3->as_VMReg());
62 reg_def A3_H (SOC, SOC, Op_RegI, 7, A3->as_VMReg()->next());
63 reg_def A4 (SOC, SOC, Op_RegI, 8, A4->as_VMReg());
64 reg_def A4_H (SOC, SOC, Op_RegI, 8, A4->as_VMReg()->next());
65 reg_def A5 (SOC, SOC, Op_RegI, 9, A5->as_VMReg());
66 reg_def A5_H (SOC, SOC, Op_RegI, 9, A5->as_VMReg()->next());
67 reg_def A6 (SOC, SOC, Op_RegI, 10, A6->as_VMReg());
68 reg_def A6_H (SOC, SOC, Op_RegI, 10, A6->as_VMReg()->next());
69 reg_def A7 (SOC, SOC, Op_RegI, 11, A7->as_VMReg());
70 reg_def A7_H (SOC, SOC, Op_RegI, 11, A7->as_VMReg()->next());
71 reg_def T0 (SOC, SOC, Op_RegI, 12, T0->as_VMReg());
72 reg_def T0_H (SOC, SOC, Op_RegI, 12, T0->as_VMReg()->next());
73 reg_def T1 (SOC, SOC, Op_RegI, 13, T1->as_VMReg());
74 reg_def T1_H (SOC, SOC, Op_RegI, 13, T1->as_VMReg()->next());
75 reg_def T2 (SOC, SOC, Op_RegI, 14, T2->as_VMReg());
76 reg_def T2_H (SOC, SOC, Op_RegI, 14, T2->as_VMReg()->next());
77 reg_def T3 (SOC, SOC, Op_RegI, 15, T3->as_VMReg());
78 reg_def T3_H (SOC, SOC, Op_RegI, 15, T3->as_VMReg()->next());
79 reg_def S0 (SOC, SOE, Op_RegI, 16, S0->as_VMReg());
80 reg_def S0_H (SOC, SOE, Op_RegI, 16, S0->as_VMReg()->next());
81 reg_def S1 (SOC, SOE, Op_RegI, 17, S1->as_VMReg());
82 reg_def S1_H (SOC, SOE, Op_RegI, 17, S1->as_VMReg()->next());
83 reg_def S2 (SOC, SOE, Op_RegI, 18, S2->as_VMReg());
84 reg_def S2_H (SOC, SOE, Op_RegI, 18, S2->as_VMReg()->next());
85 reg_def S3 (SOC, SOE, Op_RegI, 19, S3->as_VMReg());
86 reg_def S3_H (SOC, SOE, Op_RegI, 19, S3->as_VMReg()->next());
87 reg_def S4 (SOC, SOE, Op_RegI, 20, S4->as_VMReg());
88 reg_def S4_H (SOC, SOE, Op_RegI, 20, S4->as_VMReg()->next());
89 reg_def S5 (SOC, SOE, Op_RegI, 21, S5->as_VMReg());
90 reg_def S5_H (SOC, SOE, Op_RegI, 21, S5->as_VMReg()->next());
91 reg_def S6 (SOC, SOE, Op_RegI, 22, S6->as_VMReg());
92 reg_def S6_H (SOC, SOE, Op_RegI, 22, S6->as_VMReg()->next());
93 reg_def S7 (SOC, SOE, Op_RegI, 23, S7->as_VMReg());
94 reg_def S7_H (SOC, SOE, Op_RegI, 23, S7->as_VMReg()->next());
95 reg_def T8 (SOC, SOC, Op_RegI, 24, T8->as_VMReg());
96 reg_def T8_H (SOC, SOC, Op_RegI, 24, T8->as_VMReg()->next());
97 reg_def T9 (SOC, SOC, Op_RegI, 25, T9->as_VMReg());
98 reg_def T9_H (SOC, SOC, Op_RegI, 25, T9->as_VMReg()->next());
100 // Special Registers
101 reg_def K0 ( NS, NS, Op_RegI, 26, K0->as_VMReg());
102 reg_def K1 ( NS, NS, Op_RegI, 27, K1->as_VMReg());
103 reg_def GP ( NS, NS, Op_RegI, 28, GP->as_VMReg());
104 reg_def GP_H ( NS, NS, Op_RegI, 28, GP->as_VMReg()->next());
105 reg_def SP ( NS, NS, Op_RegI, 29, SP->as_VMReg());
106 reg_def SP_H ( NS, NS, Op_RegI, 29, SP->as_VMReg()->next());
107 reg_def FP ( NS, NS, Op_RegI, 30, FP->as_VMReg());
108 reg_def FP_H ( NS, NS, Op_RegI, 30, FP->as_VMReg()->next());
109 reg_def RA ( NS, NS, Op_RegI, 31, RA->as_VMReg());
110 reg_def RA_H ( NS, NS, Op_RegI, 31, RA->as_VMReg()->next());
112 // Floating registers.
113 reg_def F0 ( SOC, SOC, Op_RegF, 0, F0->as_VMReg());
114 reg_def F0_H ( SOC, SOC, Op_RegF, 0, F0->as_VMReg()->next());
115 reg_def F1 ( SOC, SOC, Op_RegF, 1, F1->as_VMReg());
116 reg_def F1_H ( SOC, SOC, Op_RegF, 1, F1->as_VMReg()->next());
117 reg_def F2 ( SOC, SOC, Op_RegF, 2, F2->as_VMReg());
118 reg_def F2_H ( SOC, SOC, Op_RegF, 2, F2->as_VMReg()->next());
119 reg_def F3 ( SOC, SOC, Op_RegF, 3, F3->as_VMReg());
120 reg_def F3_H ( SOC, SOC, Op_RegF, 3, F3->as_VMReg()->next());
121 reg_def F4 ( SOC, SOC, Op_RegF, 4, F4->as_VMReg());
122 reg_def F4_H ( SOC, SOC, Op_RegF, 4, F4->as_VMReg()->next());
123 reg_def F5 ( SOC, SOC, Op_RegF, 5, F5->as_VMReg());
124 reg_def F5_H ( SOC, SOC, Op_RegF, 5, F5->as_VMReg()->next());
125 reg_def F6 ( SOC, SOC, Op_RegF, 6, F6->as_VMReg());
126 reg_def F6_H ( SOC, SOC, Op_RegF, 6, F6->as_VMReg()->next());
127 reg_def F7 ( SOC, SOC, Op_RegF, 7, F7->as_VMReg());
128 reg_def F7_H ( SOC, SOC, Op_RegF, 7, F7->as_VMReg()->next());
129 reg_def F8 ( SOC, SOC, Op_RegF, 8, F8->as_VMReg());
130 reg_def F8_H ( SOC, SOC, Op_RegF, 8, F8->as_VMReg()->next());
131 reg_def F9 ( SOC, SOC, Op_RegF, 9, F9->as_VMReg());
132 reg_def F9_H ( SOC, SOC, Op_RegF, 9, F9->as_VMReg()->next());
133 reg_def F10 ( SOC, SOC, Op_RegF, 10, F10->as_VMReg());
134 reg_def F10_H ( SOC, SOC, Op_RegF, 10, F10->as_VMReg()->next());
135 reg_def F11 ( SOC, SOC, Op_RegF, 11, F11->as_VMReg());
136 reg_def F11_H ( SOC, SOC, Op_RegF, 11, F11->as_VMReg()->next());
137 reg_def F12 ( SOC, SOC, Op_RegF, 12, F12->as_VMReg());
138 reg_def F12_H ( SOC, SOC, Op_RegF, 12, F12->as_VMReg()->next());
139 reg_def F13 ( SOC, SOC, Op_RegF, 13, F13->as_VMReg());
140 reg_def F13_H ( SOC, SOC, Op_RegF, 13, F13->as_VMReg()->next());
141 reg_def F14 ( SOC, SOC, Op_RegF, 14, F14->as_VMReg());
142 reg_def F14_H ( SOC, SOC, Op_RegF, 14, F14->as_VMReg()->next());
143 reg_def F15 ( SOC, SOC, Op_RegF, 15, F15->as_VMReg());
144 reg_def F15_H ( SOC, SOC, Op_RegF, 15, F15->as_VMReg()->next());
145 reg_def F16 ( SOC, SOC, Op_RegF, 16, F16->as_VMReg());
146 reg_def F16_H ( SOC, SOC, Op_RegF, 16, F16->as_VMReg()->next());
147 reg_def F17 ( SOC, SOC, Op_RegF, 17, F17->as_VMReg());
148 reg_def F17_H ( SOC, SOC, Op_RegF, 17, F17->as_VMReg()->next());
149 reg_def F18 ( SOC, SOC, Op_RegF, 18, F18->as_VMReg());
150 reg_def F18_H ( SOC, SOC, Op_RegF, 18, F18->as_VMReg()->next());
151 reg_def F19 ( SOC, SOC, Op_RegF, 19, F19->as_VMReg());
152 reg_def F19_H ( SOC, SOC, Op_RegF, 19, F19->as_VMReg()->next());
153 reg_def F20 ( SOC, SOC, Op_RegF, 20, F20->as_VMReg());
154 reg_def F20_H ( SOC, SOC, Op_RegF, 20, F20->as_VMReg()->next());
155 reg_def F21 ( SOC, SOC, Op_RegF, 21, F21->as_VMReg());
156 reg_def F21_H ( SOC, SOC, Op_RegF, 21, F21->as_VMReg()->next());
157 reg_def F22 ( SOC, SOC, Op_RegF, 22, F22->as_VMReg());
158 reg_def F22_H ( SOC, SOC, Op_RegF, 22, F22->as_VMReg()->next());
159 reg_def F23 ( SOC, SOC, Op_RegF, 23, F23->as_VMReg());
160 reg_def F23_H ( SOC, SOC, Op_RegF, 23, F23->as_VMReg()->next());
161 reg_def F24 ( SOC, SOC, Op_RegF, 24, F24->as_VMReg());
162 reg_def F24_H ( SOC, SOC, Op_RegF, 24, F24->as_VMReg()->next());
163 reg_def F25 ( SOC, SOC, Op_RegF, 25, F25->as_VMReg());
164 reg_def F25_H ( SOC, SOC, Op_RegF, 25, F25->as_VMReg()->next());
165 reg_def F26 ( SOC, SOC, Op_RegF, 26, F26->as_VMReg());
166 reg_def F26_H ( SOC, SOC, Op_RegF, 26, F26->as_VMReg()->next());
167 reg_def F27 ( SOC, SOC, Op_RegF, 27, F27->as_VMReg());
168 reg_def F27_H ( SOC, SOC, Op_RegF, 27, F27->as_VMReg()->next());
169 reg_def F28 ( SOC, SOC, Op_RegF, 28, F28->as_VMReg());
170 reg_def F28_H ( SOC, SOC, Op_RegF, 28, F28->as_VMReg()->next());
171 reg_def F29 ( SOC, SOC, Op_RegF, 29, F29->as_VMReg());
172 reg_def F29_H ( SOC, SOC, Op_RegF, 29, F29->as_VMReg()->next());
173 reg_def F30 ( SOC, SOC, Op_RegF, 30, F30->as_VMReg());
174 reg_def F30_H ( SOC, SOC, Op_RegF, 30, F30->as_VMReg()->next());
175 reg_def F31 ( SOC, SOC, Op_RegF, 31, F31->as_VMReg());
176 reg_def F31_H ( SOC, SOC, Op_RegF, 31, F31->as_VMReg()->next());
179 // ----------------------------
180 // Special Registers
181 // Condition Codes Flag Registers
182 reg_def MIPS_FLAG (SOC, SOC, Op_RegFlags, 1, as_Register(1)->as_VMReg());
183 //S6 is used for get_thread(S6)
184 //S5 is uesd for heapbase of compressed oop
185 alloc_class chunk0(
186 S7, S7_H,
187 S0, S0_H,
188 S1, S1_H,
189 S2, S2_H,
190 S4, S4_H,
191 S5, S5_H,
192 S6, S6_H,
193 S3, S3_H,
194 T2, T2_H,
195 T3, T3_H,
196 T8, T8_H,
197 T9, T9_H,
198 T1, T1_H, // inline_cache_reg
199 V1, V1_H,
200 A7, A7_H,
201 A6, A6_H,
202 A5, A5_H,
203 A4, A4_H,
204 V0, V0_H,
205 A3, A3_H,
206 A2, A2_H,
207 A1, A1_H,
208 A0, A0_H,
209 T0, T0_H,
210 GP, GP_H
211 RA, RA_H,
212 SP, SP_H, // stack_pointer
213 FP, FP_H // frame_pointer
214 );
216 alloc_class chunk1( F0, F0_H,
217 F1, F1_H,
218 F2, F2_H,
219 F3, F3_H,
220 F4, F4_H,
221 F5, F5_H,
222 F6, F6_H,
223 F7, F7_H,
224 F8, F8_H,
225 F9, F9_H,
226 F10, F10_H,
227 F11, F11_H,
228 F20, F20_H,
229 F21, F21_H,
230 F22, F22_H,
231 F23, F23_H,
232 F24, F24_H,
233 F25, F25_H,
234 F26, F26_H,
235 F27, F27_H,
236 F28, F28_H,
237 F19, F19_H,
238 F18, F18_H,
239 F17, F17_H,
240 F16, F16_H,
241 F15, F15_H,
242 F14, F14_H,
243 F13, F13_H,
244 F12, F12_H,
245 F29, F29_H,
246 F30, F30_H,
247 F31, F31_H);
249 alloc_class chunk2(MIPS_FLAG);
251 reg_class s_reg( S0, S1, S2, S3, S4, S5, S6, S7 );
252 reg_class s0_reg( S0 );
253 reg_class s1_reg( S1 );
254 reg_class s2_reg( S2 );
255 reg_class s3_reg( S3 );
256 reg_class s4_reg( S4 );
257 reg_class s5_reg( S5 );
258 reg_class s6_reg( S6 );
259 reg_class s7_reg( S7 );
261 reg_class t_reg( T0, T1, T2, T3, T8, T9 );
262 reg_class t0_reg( T0 );
263 reg_class t1_reg( T1 );
264 reg_class t2_reg( T2 );
265 reg_class t3_reg( T3 );
266 reg_class t8_reg( T8 );
267 reg_class t9_reg( T9 );
269 reg_class a_reg( A0, A1, A2, A3, A4, A5, A6, A7 );
270 reg_class a0_reg( A0 );
271 reg_class a1_reg( A1 );
272 reg_class a2_reg( A2 );
273 reg_class a3_reg( A3 );
274 reg_class a4_reg( A4 );
275 reg_class a5_reg( A5 );
276 reg_class a6_reg( A6 );
277 reg_class a7_reg( A7 );
279 reg_class v0_reg( V0 );
280 reg_class v1_reg( V1 );
282 reg_class sp_reg( SP, SP_H );
283 reg_class fp_reg( FP, FP_H );
285 reg_class mips_flags(MIPS_FLAG);
287 reg_class v0_long_reg( V0, V0_H );
288 reg_class v1_long_reg( V1, V1_H );
289 reg_class a0_long_reg( A0, A0_H );
290 reg_class a1_long_reg( A1, A1_H );
291 reg_class a2_long_reg( A2, A2_H );
292 reg_class a3_long_reg( A3, A3_H );
293 reg_class a4_long_reg( A4, A4_H );
294 reg_class a5_long_reg( A5, A5_H );
295 reg_class a6_long_reg( A6, A6_H );
296 reg_class a7_long_reg( A7, A7_H );
297 reg_class t0_long_reg( T0, T0_H );
298 reg_class t1_long_reg( T1, T1_H );
299 reg_class t2_long_reg( T2, T2_H );
300 reg_class t3_long_reg( T3, T3_H );
301 reg_class t8_long_reg( T8, T8_H );
302 reg_class t9_long_reg( T9, T9_H );
303 reg_class s0_long_reg( S0, S0_H );
304 reg_class s1_long_reg( S1, S1_H );
305 reg_class s2_long_reg( S2, S2_H );
306 reg_class s3_long_reg( S3, S3_H );
307 reg_class s4_long_reg( S4, S4_H );
308 reg_class s5_long_reg( S5, S5_H );
309 reg_class s6_long_reg( S6, S6_H );
310 reg_class s7_long_reg( S7, S7_H );
312 reg_class int_reg( S7, S0, S1, S2, S4, S3, T8, T2, T3, T1, V1, A7, A6, A5, A4, V0, A3, A2, A1, A0, T0 );
314 reg_class no_Ax_int_reg( S7, S0, S1, S2, S4, S3, T8, T2, T3, T1, V1, V0, T0 );
316 reg_class p_reg(
317 S7, S7_H,
318 S0, S0_H,
319 S1, S1_H,
320 S2, S2_H,
321 S4, S4_H,
322 S3, S3_H,
323 T8, T8_H,
324 T2, T2_H,
325 T3, T3_H,
326 T1, T1_H,
327 A7, A7_H,
328 A6, A6_H,
329 A5, A5_H,
330 A4, A4_H,
331 A3, A3_H,
332 A2, A2_H,
333 A1, A1_H,
334 A0, A0_H,
335 T0, T0_H
336 );
338 reg_class no_T8_p_reg(
339 S7, S7_H,
340 S0, S0_H,
341 S1, S1_H,
342 S2, S2_H,
343 S4, S4_H,
344 S3, S3_H,
345 T2, T2_H,
346 T3, T3_H,
347 T1, T1_H,
348 A7, A7_H,
349 A6, A6_H,
350 A5, A5_H,
351 A4, A4_H,
352 A3, A3_H,
353 A2, A2_H,
354 A1, A1_H,
355 A0, A0_H,
356 T0, T0_H
357 );
359 reg_class long_reg(
360 S7, S7_H,
361 S0, S0_H,
362 S1, S1_H,
363 S2, S2_H,
364 S4, S4_H,
365 S3, S3_H,
366 T8, T8_H,
367 T2, T2_H,
368 T3, T3_H,
369 T1, T1_H,
370 A7, A7_H,
371 A6, A6_H,
372 A5, A5_H,
373 A4, A4_H,
374 A3, A3_H,
375 A2, A2_H,
376 A1, A1_H,
377 A0, A0_H,
378 T0, T0_H
379 );
382 // Floating point registers.
383 // 2012/8/23 Fu: F30/F31 are used as temporary registers in D2I
384 // 2016/12/1 aoqi: F31 are not used as temporary registers in D2I
385 reg_class flt_reg( F0, F1, F2, F3, F4, F5, F6, F7, F8, F9, F10, F11, F12, F13, F14, F15, F16, F17 F18, F19, F20, F21, F22, F23, F24, F25, F26, F27, F28, F29, F31);
386 reg_class dbl_reg( F0, F0_H,
387 F1, F1_H,
388 F2, F2_H,
389 F3, F3_H,
390 F4, F4_H,
391 F5, F5_H,
392 F6, F6_H,
393 F7, F7_H,
394 F8, F8_H,
395 F9, F9_H,
396 F10, F10_H,
397 F11, F11_H,
398 F12, F12_H,
399 F13, F13_H,
400 F14, F14_H,
401 F15, F15_H,
402 F16, F16_H,
403 F17, F17_H,
404 F18, F18_H,
405 F19, F19_H,
406 F20, F20_H,
407 F21, F21_H,
408 F22, F22_H,
409 F23, F23_H,
410 F24, F24_H,
411 F25, F25_H,
412 F26, F26_H,
413 F27, F27_H,
414 F28, F28_H,
415 F29, F29_H,
416 F31, F31_H);
418 reg_class flt_arg0( F12 );
419 reg_class dbl_arg0( F12, F12_H );
420 reg_class dbl_arg1( F14, F14_H );
422 %}
424 //----------DEFINITION BLOCK---------------------------------------------------
425 // Define name --> value mappings to inform the ADLC of an integer valued name
426 // Current support includes integer values in the range [0, 0x7FFFFFFF]
427 // Format:
428 // int_def <name> ( <int_value>, <expression>);
429 // Generated Code in ad_<arch>.hpp
430 // #define <name> (<expression>)
431 // // value == <int_value>
432 // Generated code in ad_<arch>.cpp adlc_verification()
433 // assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
434 //
435 definitions %{
436 int_def DEFAULT_COST ( 100, 100);
437 int_def HUGE_COST (1000000, 1000000);
439 // Memory refs are twice as expensive as run-of-the-mill.
440 int_def MEMORY_REF_COST ( 200, DEFAULT_COST * 2);
442 // Branches are even more expensive.
443 int_def BRANCH_COST ( 300, DEFAULT_COST * 3);
444 // we use jr instruction to construct call, so more expensive
445 // by yjl 2/28/2006
446 int_def CALL_COST ( 500, DEFAULT_COST * 5);
447 /*
448 int_def EQUAL ( 1, 1 );
449 int_def NOT_EQUAL ( 2, 2 );
450 int_def GREATER ( 3, 3 );
451 int_def GREATER_EQUAL ( 4, 4 );
452 int_def LESS ( 5, 5 );
453 int_def LESS_EQUAL ( 6, 6 );
454 */
455 %}
459 //----------SOURCE BLOCK-------------------------------------------------------
460 // This is a block of C++ code which provides values, functions, and
461 // definitions necessary in the rest of the architecture description
463 source_hpp %{
464 // Header information of the source block.
465 // Method declarations/definitions which are used outside
466 // the ad-scope can conveniently be defined here.
467 //
468 // To keep related declarations/definitions/uses close together,
469 // we switch between source %{ }% and source_hpp %{ }% freely as needed.
471 class CallStubImpl {
473 //--------------------------------------------------------------
474 //---< Used for optimization in Compile::shorten_branches >---
475 //--------------------------------------------------------------
477 public:
478 // Size of call trampoline stub.
479 static uint size_call_trampoline() {
480 return 0; // no call trampolines on this platform
481 }
483 // number of relocations needed by a call trampoline stub
484 static uint reloc_call_trampoline() {
485 return 0; // no call trampolines on this platform
486 }
487 };
489 class HandlerImpl {
491 public:
493 static int emit_exception_handler(CodeBuffer &cbuf);
494 static int emit_deopt_handler(CodeBuffer& cbuf);
496 static uint size_exception_handler() {
497 // NativeCall instruction size is the same as NativeJump.
498 // exception handler starts out as jump and can be patched to
499 // a call be deoptimization. (4932387)
500 // Note that this value is also credited (in output.cpp) to
501 // the size of the code section.
502 // return NativeJump::instruction_size;
503 int size = NativeCall::instruction_size;
504 return round_to(size, 16);
505 }
507 #ifdef _LP64
508 static uint size_deopt_handler() {
509 int size = NativeCall::instruction_size;
510 return round_to(size, 16);
511 }
512 #else
513 static uint size_deopt_handler() {
514 // NativeCall instruction size is the same as NativeJump.
515 // exception handler starts out as jump and can be patched to
516 // a call be deoptimization. (4932387)
517 // Note that this value is also credited (in output.cpp) to
518 // the size of the code section.
519 return 5 + NativeJump::instruction_size; // pushl(); jmp;
520 }
521 #endif
522 };
524 %} // end source_hpp
526 source %{
528 #define NO_INDEX 0
529 #define RELOC_IMM64 Assembler::imm_operand
530 #define RELOC_DISP32 Assembler::disp32_operand
533 #define __ _masm.
536 // Emit exception handler code.
537 // Stuff framesize into a register and call a VM stub routine.
538 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf) {
539 /*
540 // Note that the code buffer's insts_mark is always relative to insts.
541 // That's why we must use the macroassembler to generate a handler.
542 MacroAssembler _masm(&cbuf);
543 address base = __ start_a_stub(size_exception_handler());
544 if (base == NULL) return 0; // CodeBuffer::expand failed
545 int offset = __ offset();
546 __ jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
547 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
548 __ end_a_stub();
549 return offset;
550 */
551 // Note that the code buffer's insts_mark is always relative to insts.
552 // That's why we must use the macroassembler to generate a handler.
553 MacroAssembler _masm(&cbuf);
554 address base =
555 __ start_a_stub(size_exception_handler());
556 if (base == NULL) return 0; // CodeBuffer::expand failed
557 int offset = __ offset();
559 __ block_comment("; emit_exception_handler");
561 /* 2012/9/25 FIXME Jin: According to X86, we should use direct jumpt.
562 * * However, this will trigger an assert after the 40th method:
563 * *
564 * * 39 b java.lang.Throwable::<init> (25 bytes)
565 * * --- ns java.lang.Throwable::fillInStackTrace
566 * * 40 !b java.net.URLClassLoader::findClass (29 bytes)
567 * * /vm/opto/runtime.cpp, 900 , assert(caller.is_compiled_frame(),"must be")
568 * * 40 made not entrant (2) java.net.URLClassLoader::findClass (29 bytes)
569 * *
570 * * If we change from JR to JALR, the assert will disappear, but WebClient will
571 * * fail after the 403th method with unknown reason.
572 * */
573 __ li48(T9, (long)OptoRuntime::exception_blob()->entry_point());
574 __ jr(T9);
575 __ delayed()->nop();
576 __ align(16);
577 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
578 __ end_a_stub();
579 return offset;
580 }
582 // Emit deopt handler code.
583 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) {
584 // Note that the code buffer's insts_mark is always relative to insts.
585 // That's why we must use the macroassembler to generate a handler.
586 MacroAssembler _masm(&cbuf);
587 address base =
588 __ start_a_stub(size_deopt_handler());
590 // FIXME
591 if (base == NULL) return 0; // CodeBuffer::expand failed
592 int offset = __ offset();
594 __ block_comment("; emit_deopt_handler");
596 cbuf.set_insts_mark();
597 __ relocate(relocInfo::runtime_call_type);
599 __ li48(T9, (long)SharedRuntime::deopt_blob()->unpack());
600 __ jalr(T9);
601 __ delayed()->nop();
602 __ align(16);
603 assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
604 __ end_a_stub();
605 return offset;
606 }
609 const bool Matcher::match_rule_supported(int opcode) {
610 if (!has_match_rule(opcode))
611 return false;
613 switch (opcode) {
614 //Op_CountLeadingZerosI Op_CountLeadingZerosL can be deleted, all MIPS CPUs support clz & dclz.
615 case Op_CountLeadingZerosI:
616 case Op_CountLeadingZerosL:
617 if (!UseCountLeadingZerosInstruction)
618 return false;
619 break;
620 case Op_CountTrailingZerosI:
621 case Op_CountTrailingZerosL:
622 if (!UseCountTrailingZerosInstruction)
623 return false;
624 break;
625 }
627 return true; // Per default match rules are supported.
628 }
630 //FIXME
631 // emit call stub, compiled java to interpreter
632 void emit_java_to_interp(CodeBuffer &cbuf ) {
633 // Stub is fixed up when the corresponding call is converted from calling
634 // compiled code to calling interpreted code.
635 // mov rbx,0
636 // jmp -1
638 address mark = cbuf.insts_mark(); // get mark within main instrs section
640 // Note that the code buffer's insts_mark is always relative to insts.
641 // That's why we must use the macroassembler to generate a stub.
642 MacroAssembler _masm(&cbuf);
644 address base =
645 __ start_a_stub(Compile::MAX_stubs_size);
646 if (base == NULL) return; // CodeBuffer::expand failed
647 // static stub relocation stores the instruction address of the call
649 __ relocate(static_stub_Relocation::spec(mark), 0);
651 /* 2012/10/29 Jin: Rmethod contains methodOop, it should be relocated for GC */
652 /*
653 int oop_index = __ oop_recorder()->allocate_index(NULL);
654 RelocationHolder rspec = oop_Relocation::spec(oop_index);
655 __ relocate(rspec);
656 */
658 // static stub relocation also tags the methodOop in the code-stream.
659 __ li48(S3, (long)0);
660 // This is recognized as unresolved by relocs/nativeInst/ic code
662 __ relocate(relocInfo::runtime_call_type);
664 cbuf.set_insts_mark();
665 address call_pc = (address)-1;
666 __ li48(AT, (long)call_pc);
667 __ jr(AT);
668 __ nop();
669 __ align(16);
670 __ end_a_stub();
671 // Update current stubs pointer and restore code_end.
672 }
674 // size of call stub, compiled java to interpretor
675 uint size_java_to_interp() {
676 int size = 4 * 4 + NativeCall::instruction_size; // sizeof(li48) + NativeCall::instruction_size
677 return round_to(size, 16);
678 }
680 // relocation entries for call stub, compiled java to interpreter
681 uint reloc_java_to_interp() {
682 return 16; // in emit_java_to_interp + in Java_Static_Call
683 }
685 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
686 if( Assembler::is_simm16(offset) ) return true;
687 else
688 {
689 assert(false, "Not implemented yet !" );
690 Unimplemented();
691 }
692 }
695 // No additional cost for CMOVL.
696 const int Matcher::long_cmove_cost() { return 0; }
698 // No CMOVF/CMOVD with SSE2
699 const int Matcher::float_cmove_cost() { return ConditionalMoveLimit; }
701 // Does the CPU require late expand (see block.cpp for description of late expand)?
702 const bool Matcher::require_postalloc_expand = false;
704 // Should the Matcher clone shifts on addressing modes, expecting them
705 // to be subsumed into complex addressing expressions or compute them
706 // into registers? True for Intel but false for most RISCs
707 const bool Matcher::clone_shift_expressions = false;
709 // Do we need to mask the count passed to shift instructions or does
710 // the cpu only look at the lower 5/6 bits anyway?
711 const bool Matcher::need_masked_shift_count = false;
713 bool Matcher::narrow_oop_use_complex_address() {
714 NOT_LP64(ShouldNotCallThis());
715 assert(UseCompressedOops, "only for compressed oops code");
716 return false;
717 }
719 bool Matcher::narrow_klass_use_complex_address() {
720 NOT_LP64(ShouldNotCallThis());
721 assert(UseCompressedClassPointers, "only for compressed klass code");
722 return false;
723 }
725 // This is UltraSparc specific, true just means we have fast l2f conversion
726 const bool Matcher::convL2FSupported(void) {
727 return true;
728 }
730 // Max vector size in bytes. 0 if not supported.
731 const int Matcher::vector_width_in_bytes(BasicType bt) {
732 assert(MaxVectorSize == 8, "");
733 return 8;
734 }
736 // Vector ideal reg
737 const int Matcher::vector_ideal_reg(int size) {
738 assert(MaxVectorSize == 8, "");
739 switch(size) {
740 case 8: return Op_VecD;
741 }
742 ShouldNotReachHere();
743 return 0;
744 }
746 // Only lowest bits of xmm reg are used for vector shift count.
747 const int Matcher::vector_shift_count_ideal_reg(int size) {
748 fatal("vector shift is not supported");
749 return Node::NotAMachineReg;
750 }
752 // Limits on vector size (number of elements) loaded into vector.
753 const int Matcher::max_vector_size(const BasicType bt) {
754 assert(is_java_primitive(bt), "only primitive type vectors");
755 return vector_width_in_bytes(bt)/type2aelembytes(bt);
756 }
758 const int Matcher::min_vector_size(const BasicType bt) {
759 return max_vector_size(bt); // Same as max.
760 }
762 // MIPS supports misaligned vectors store/load? FIXME
763 const bool Matcher::misaligned_vectors_ok() {
764 return false;
765 //return !AlignVector; // can be changed by flag
766 }
768 // Register for DIVI projection of divmodI
769 RegMask Matcher::divI_proj_mask() {
770 ShouldNotReachHere();
771 return RegMask();
772 }
774 // Register for MODI projection of divmodI
775 RegMask Matcher::modI_proj_mask() {
776 ShouldNotReachHere();
777 return RegMask();
778 }
780 // Register for DIVL projection of divmodL
781 RegMask Matcher::divL_proj_mask() {
782 ShouldNotReachHere();
783 return RegMask();
784 }
786 int Matcher::regnum_to_fpu_offset(int regnum) {
787 return regnum - 32; // The FP registers are in the second chunk
788 }
791 const bool Matcher::isSimpleConstant64(jlong value) {
792 // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
793 return true;
794 }
797 // Return whether or not this register is ever used as an argument. This
798 // function is used on startup to build the trampoline stubs in generateOptoStub.
799 // Registers not mentioned will be killed by the VM call in the trampoline, and
800 // arguments in those registers not be available to the callee.
801 bool Matcher::can_be_java_arg( int reg ) {
802 /* Refer to: [sharedRuntime_mips_64.cpp] SharedRuntime::java_calling_convention() */
803 if ( reg == T0_num || reg == T0_H_num
804 || reg == A0_num || reg == A0_H_num
805 || reg == A1_num || reg == A1_H_num
806 || reg == A2_num || reg == A2_H_num
807 || reg == A3_num || reg == A3_H_num
808 || reg == A4_num || reg == A4_H_num
809 || reg == A5_num || reg == A5_H_num
810 || reg == A6_num || reg == A6_H_num
811 || reg == A7_num || reg == A7_H_num )
812 return true;
814 if ( reg == F12_num || reg == F12_H_num
815 || reg == F13_num || reg == F13_H_num
816 || reg == F14_num || reg == F14_H_num
817 || reg == F15_num || reg == F15_H_num
818 || reg == F16_num || reg == F16_H_num
819 || reg == F17_num || reg == F17_H_num
820 || reg == F18_num || reg == F18_H_num
821 || reg == F19_num || reg == F19_H_num )
822 return true;
824 return false;
825 }
827 bool Matcher::is_spillable_arg( int reg ) {
828 return can_be_java_arg(reg);
829 }
831 bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) {
832 return false;
833 }
835 // Register for MODL projection of divmodL
836 RegMask Matcher::modL_proj_mask() {
837 ShouldNotReachHere();
838 return RegMask();
839 }
841 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
842 return FP_REG_mask();
843 }
845 // MIPS doesn't support AES intrinsics
846 const bool Matcher::pass_original_key_for_aes() {
847 return false;
848 }
850 // The address of the call instruction needs to be 16-byte aligned to
851 // ensure that it does not span a cache line so that it can be patched.
853 int CallStaticJavaDirectNode::compute_padding(int current_offset) const {
854 //lui
855 //ori
856 //dsll
857 //ori
859 //jalr
860 //nop
862 return round_to(current_offset, alignment_required()) - current_offset;
863 }
865 // The address of the call instruction needs to be 16-byte aligned to
866 // ensure that it does not span a cache line so that it can be patched.
867 int CallDynamicJavaDirectNode::compute_padding(int current_offset) const {
868 //li64 <--- skip
870 //lui
871 //ori
872 //dsll
873 //ori
875 //jalr
876 //nop
878 current_offset += 4 * 6; // skip li64
879 return round_to(current_offset, alignment_required()) - current_offset;
880 }
882 int CallLeafNoFPDirectNode::compute_padding(int current_offset) const {
883 //lui
884 //ori
885 //dsll
886 //ori
888 //jalr
889 //nop
891 return round_to(current_offset, alignment_required()) - current_offset;
892 }
894 int CallLeafDirectNode::compute_padding(int current_offset) const {
895 //lui
896 //ori
897 //dsll
898 //ori
900 //jalr
901 //nop
903 return round_to(current_offset, alignment_required()) - current_offset;
904 }
906 int CallRuntimeDirectNode::compute_padding(int current_offset) const {
907 //lui
908 //ori
909 //dsll
910 //ori
912 //jalr
913 //nop
915 return round_to(current_offset, alignment_required()) - current_offset;
916 }
918 // If CPU can load and store mis-aligned doubles directly then no fixup is
919 // needed. Else we split the double into 2 integer pieces and move it
920 // piece-by-piece. Only happens when passing doubles into C code as the
921 // Java calling convention forces doubles to be aligned.
922 const bool Matcher::misaligned_doubles_ok = false;
923 // Do floats take an entire double register or just half?
924 //const bool Matcher::float_in_double = true;
925 bool Matcher::float_in_double() { return false; }
926 // Threshold size for cleararray.
927 const int Matcher::init_array_short_size = 8 * BytesPerLong;
928 // Do ints take an entire long register or just half?
929 const bool Matcher::int_in_long = true;
930 // Is it better to copy float constants, or load them directly from memory?
931 // Intel can load a float constant from a direct address, requiring no
932 // extra registers. Most RISCs will have to materialize an address into a
933 // register first, so they would do better to copy the constant from stack.
934 const bool Matcher::rematerialize_float_constants = false;
935 // Advertise here if the CPU requires explicit rounding operations
936 // to implement the UseStrictFP mode.
937 const bool Matcher::strict_fp_requires_explicit_rounding = false;
938 // The ecx parameter to rep stos for the ClearArray node is in dwords.
939 const bool Matcher::init_array_count_is_in_bytes = false;
942 // Indicate if the safepoint node needs the polling page as an input.
943 // Since MIPS doesn't have absolute addressing, it needs.
944 bool SafePointNode::needs_polling_address_input() {
945 return true;
946 }
948 // !!!!! Special hack to get all type of calls to specify the byte offset
949 // from the start of the call to the point where the return address
950 // will point.
951 int MachCallStaticJavaNode::ret_addr_offset() {
952 assert(NativeCall::instruction_size == 24, "in MachCallStaticJavaNode::ret_addr_offset");
953 //The value ought to be 16 bytes.
954 //lui
955 //ori
956 //dsll
957 //ori
958 //jalr
959 //nop
960 return NativeCall::instruction_size;
961 }
963 int MachCallDynamicJavaNode::ret_addr_offset() {
964 /* 2012/9/10 Jin: must be kept in sync with Java_Dynamic_Call */
966 // return NativeCall::instruction_size;
967 assert(NativeCall::instruction_size == 24, "in MachCallDynamicJavaNode::ret_addr_offset");
968 //The value ought to be 4 + 16 bytes.
969 //lui IC_Klass,
970 //ori IC_Klass,
971 //dsll IC_Klass
972 //ori IC_Klass
973 //lui T9
974 //ori T9
975 //dsll T9
976 //ori T9
977 //jalr T9
978 //nop
979 return 6 * 4 + NativeCall::instruction_size;
981 }
983 //=============================================================================
985 // Figure out which register class each belongs in: rc_int, rc_float, rc_stack
986 enum RC { rc_bad, rc_int, rc_float, rc_stack };
987 static enum RC rc_class( OptoReg::Name reg ) {
988 if( !OptoReg::is_valid(reg) ) return rc_bad;
989 if (OptoReg::is_stack(reg)) return rc_stack;
990 VMReg r = OptoReg::as_VMReg(reg);
991 if (r->is_Register()) return rc_int;
992 assert(r->is_FloatRegister(), "must be");
993 return rc_float;
994 }
996 uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream* st ) const {
997 // Get registers to move
998 OptoReg::Name src_second = ra_->get_reg_second(in(1));
999 OptoReg::Name src_first = ra_->get_reg_first(in(1));
1000 OptoReg::Name dst_second = ra_->get_reg_second(this );
1001 OptoReg::Name dst_first = ra_->get_reg_first(this );
1003 enum RC src_second_rc = rc_class(src_second);
1004 enum RC src_first_rc = rc_class(src_first);
1005 enum RC dst_second_rc = rc_class(dst_second);
1006 enum RC dst_first_rc = rc_class(dst_first);
1008 assert(OptoReg::is_valid(src_first) && OptoReg::is_valid(dst_first), "must move at least 1 register" );
1010 // Generate spill code!
1011 int size = 0;
1013 if( src_first == dst_first && src_second == dst_second )
1014 return 0; // Self copy, no move
1016 if (src_first_rc == rc_stack) {
1017 // mem ->
1018 if (dst_first_rc == rc_stack) {
1019 // mem -> mem
1020 assert(src_second != dst_first, "overlap");
1021 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1022 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1023 // 64-bit
1024 int src_offset = ra_->reg2offset(src_first);
1025 int dst_offset = ra_->reg2offset(dst_first);
1026 if (cbuf) {
1027 MacroAssembler _masm(cbuf);
1028 __ ld(AT, Address(SP, src_offset));
1029 __ sd(AT, Address(SP, dst_offset));
1030 #ifndef PRODUCT
1031 } else {
1032 if(!do_size){
1033 if (size != 0) st->print("\n\t");
1034 st->print("ld AT, [SP + #%d]\t# 64-bit mem-mem spill 1\n\t"
1035 "sd AT, [SP + #%d]",
1036 src_offset, dst_offset);
1037 }
1038 #endif
1039 }
1040 size += 8;
1041 } else {
1042 // 32-bit
1043 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1044 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1045 // No pushl/popl, so:
1046 int src_offset = ra_->reg2offset(src_first);
1047 int dst_offset = ra_->reg2offset(dst_first);
1048 if (cbuf) {
1049 MacroAssembler _masm(cbuf);
1050 __ lw(AT, Address(SP, src_offset));
1051 __ sw(AT, Address(SP, dst_offset));
1052 #ifndef PRODUCT
1053 } else {
1054 if(!do_size){
1055 if (size != 0) st->print("\n\t");
1056 st->print("lw AT, [SP + #%d] spill 2\n\t"
1057 "sw AT, [SP + #%d]\n\t",
1058 src_offset, dst_offset);
1059 }
1060 #endif
1061 }
1062 size += 8;
1063 }
1064 return size;
1065 } else if (dst_first_rc == rc_int) {
1066 // mem -> gpr
1067 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1068 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1069 // 64-bit
1070 int offset = ra_->reg2offset(src_first);
1071 if (cbuf) {
1072 MacroAssembler _masm(cbuf);
1073 __ ld(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1074 #ifndef PRODUCT
1075 } else {
1076 if(!do_size){
1077 if (size != 0) st->print("\n\t");
1078 st->print("ld %s, [SP + #%d]\t# spill 3",
1079 Matcher::regName[dst_first],
1080 offset);
1081 }
1082 #endif
1083 }
1084 size += 4;
1085 } else {
1086 // 32-bit
1087 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1088 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1089 int offset = ra_->reg2offset(src_first);
1090 if (cbuf) {
1091 MacroAssembler _masm(cbuf);
1092 if (this->ideal_reg() == Op_RegI)
1093 __ lw(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1094 else
1095 __ lwu(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1096 #ifndef PRODUCT
1097 } else {
1098 if(!do_size){
1099 if (size != 0) st->print("\n\t");
1100 if (this->ideal_reg() == Op_RegI)
1101 st->print("lw %s, [SP + #%d]\t# spill 4",
1102 Matcher::regName[dst_first],
1103 offset);
1104 else
1105 st->print("lwu %s, [SP + #%d]\t# spill 5",
1106 Matcher::regName[dst_first],
1107 offset);
1108 }
1109 #endif
1110 }
1111 size += 4;
1112 }
1113 return size;
1114 } else if (dst_first_rc == rc_float) {
1115 // mem-> xmm
1116 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1117 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1118 // 64-bit
1119 int offset = ra_->reg2offset(src_first);
1120 if (cbuf) {
1121 MacroAssembler _masm(cbuf);
1122 __ ldc1( as_FloatRegister(Matcher::_regEncode[dst_first]), Address(SP, offset));
1123 #ifndef PRODUCT
1124 } else {
1125 if(!do_size){
1126 if (size != 0) st->print("\n\t");
1127 st->print("ldc1 %s, [SP + #%d]\t# spill 6",
1128 Matcher::regName[dst_first],
1129 offset);
1130 }
1131 #endif
1132 }
1133 size += 4;
1134 } else {
1135 // 32-bit
1136 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1137 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1138 int offset = ra_->reg2offset(src_first);
1139 if (cbuf) {
1140 MacroAssembler _masm(cbuf);
1141 __ lwc1( as_FloatRegister(Matcher::_regEncode[dst_first]), Address(SP, offset));
1142 #ifndef PRODUCT
1143 } else {
1144 if(!do_size){
1145 if (size != 0) st->print("\n\t");
1146 st->print("lwc1 %s, [SP + #%d]\t# spill 7",
1147 Matcher::regName[dst_first],
1148 offset);
1149 }
1150 #endif
1151 }
1152 size += 4;
1153 }
1154 return size;
1155 }
1156 } else if (src_first_rc == rc_int) {
1157 // gpr ->
1158 if (dst_first_rc == rc_stack) {
1159 // gpr -> mem
1160 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1161 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1162 // 64-bit
1163 int offset = ra_->reg2offset(dst_first);
1164 if (cbuf) {
1165 MacroAssembler _masm(cbuf);
1166 __ sd(as_Register(Matcher::_regEncode[src_first]), Address(SP, offset));
1167 #ifndef PRODUCT
1168 } else {
1169 if(!do_size){
1170 if (size != 0) st->print("\n\t");
1171 st->print("sd %s, [SP + #%d] # spill 8",
1172 Matcher::regName[src_first],
1173 offset);
1174 }
1175 #endif
1176 }
1177 size += 4;
1178 } else {
1179 // 32-bit
1180 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1181 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1182 int offset = ra_->reg2offset(dst_first);
1183 if (cbuf) {
1184 MacroAssembler _masm(cbuf);
1185 __ sw(as_Register(Matcher::_regEncode[src_first]), Address(SP, offset));
1186 #ifndef PRODUCT
1187 } else {
1188 if(!do_size){
1189 if (size != 0) st->print("\n\t");
1190 st->print("sw %s, [SP + #%d]\t# spill 9",
1191 Matcher::regName[src_first], offset);
1192 }
1193 #endif
1194 }
1195 size += 4;
1196 }
1197 return size;
1198 } else if (dst_first_rc == rc_int) {
1199 // gpr -> gpr
1200 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1201 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1202 // 64-bit
1203 if (cbuf) {
1204 MacroAssembler _masm(cbuf);
1205 __ move(as_Register(Matcher::_regEncode[dst_first]),
1206 as_Register(Matcher::_regEncode[src_first]));
1207 #ifndef PRODUCT
1208 } else {
1209 if(!do_size){
1210 if (size != 0) st->print("\n\t");
1211 st->print("move(64bit) %s <-- %s\t# spill 10",
1212 Matcher::regName[dst_first],
1213 Matcher::regName[src_first]);
1214 }
1215 #endif
1216 }
1217 size += 4;
1218 return size;
1219 } else {
1220 // 32-bit
1221 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1222 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1223 if (cbuf) {
1224 MacroAssembler _masm(cbuf);
1225 if (this->ideal_reg() == Op_RegI)
1226 __ move_u32(as_Register(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first]));
1227 else
1228 __ daddu(as_Register(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first]), R0);
1230 #ifndef PRODUCT
1231 } else {
1232 if(!do_size){
1233 if (size != 0) st->print("\n\t");
1234 st->print("move(32-bit) %s <-- %s\t# spill 11",
1235 Matcher::regName[dst_first],
1236 Matcher::regName[src_first]);
1237 }
1238 #endif
1239 }
1240 size += 4;
1241 return size;
1242 }
1243 } else if (dst_first_rc == rc_float) {
1244 // gpr -> xmm
1245 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1246 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1247 // 64-bit
1248 if (cbuf) {
1249 MacroAssembler _masm(cbuf);
1250 __ dmtc1(as_Register(Matcher::_regEncode[src_first]), as_FloatRegister(Matcher::_regEncode[dst_first]));
1251 #ifndef PRODUCT
1252 } else {
1253 if(!do_size){
1254 if (size != 0) st->print("\n\t");
1255 st->print("dmtc1 %s, %s\t# spill 12",
1256 Matcher::regName[dst_first],
1257 Matcher::regName[src_first]);
1258 }
1259 #endif
1260 }
1261 size += 4;
1262 } else {
1263 // 32-bit
1264 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1265 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1266 if (cbuf) {
1267 MacroAssembler _masm(cbuf);
1268 __ mtc1( as_Register(Matcher::_regEncode[src_first]), as_FloatRegister(Matcher::_regEncode[dst_first]) );
1269 #ifndef PRODUCT
1270 } else {
1271 if(!do_size){
1272 if (size != 0) st->print("\n\t");
1273 st->print("mtc1 %s, %s\t# spill 13",
1274 Matcher::regName[dst_first],
1275 Matcher::regName[src_first]);
1276 }
1277 #endif
1278 }
1279 size += 4;
1280 }
1281 return size;
1282 }
1283 } else if (src_first_rc == rc_float) {
1284 // xmm ->
1285 if (dst_first_rc == rc_stack) {
1286 // xmm -> mem
1287 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1288 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1289 // 64-bit
1290 int offset = ra_->reg2offset(dst_first);
1291 if (cbuf) {
1292 MacroAssembler _masm(cbuf);
1293 __ sdc1( as_FloatRegister(Matcher::_regEncode[src_first]), Address(SP, offset) );
1294 #ifndef PRODUCT
1295 } else {
1296 if(!do_size){
1297 if (size != 0) st->print("\n\t");
1298 st->print("sdc1 %s, [SP + #%d]\t# spill 14",
1299 Matcher::regName[src_first],
1300 offset);
1301 }
1302 #endif
1303 }
1304 size += 4;
1305 } else {
1306 // 32-bit
1307 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1308 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1309 int offset = ra_->reg2offset(dst_first);
1310 if (cbuf) {
1311 MacroAssembler _masm(cbuf);
1312 __ swc1(as_FloatRegister(Matcher::_regEncode[src_first]), Address(SP, offset));
1313 #ifndef PRODUCT
1314 } else {
1315 if(!do_size){
1316 if (size != 0) st->print("\n\t");
1317 st->print("swc1 %s, [SP + #%d]\t# spill 15",
1318 Matcher::regName[src_first],
1319 offset);
1320 }
1321 #endif
1322 }
1323 size += 4;
1324 }
1325 return size;
1326 } else if (dst_first_rc == rc_int) {
1327 // xmm -> gpr
1328 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1329 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1330 // 64-bit
1331 if (cbuf) {
1332 MacroAssembler _masm(cbuf);
1333 __ dmfc1( as_Register(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1334 #ifndef PRODUCT
1335 } else {
1336 if(!do_size){
1337 if (size != 0) st->print("\n\t");
1338 st->print("dmfc1 %s, %s\t# spill 16",
1339 Matcher::regName[dst_first],
1340 Matcher::regName[src_first]);
1341 }
1342 #endif
1343 }
1344 size += 4;
1345 } else {
1346 // 32-bit
1347 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1348 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1349 if (cbuf) {
1350 MacroAssembler _masm(cbuf);
1351 __ mfc1( as_Register(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1352 #ifndef PRODUCT
1353 } else {
1354 if(!do_size){
1355 if (size != 0) st->print("\n\t");
1356 st->print("mfc1 %s, %s\t# spill 17",
1357 Matcher::regName[dst_first],
1358 Matcher::regName[src_first]);
1359 }
1360 #endif
1361 }
1362 size += 4;
1363 }
1364 return size;
1365 } else if (dst_first_rc == rc_float) {
1366 // xmm -> xmm
1367 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1368 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1369 // 64-bit
1370 if (cbuf) {
1371 MacroAssembler _masm(cbuf);
1372 __ mov_d( as_FloatRegister(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1373 #ifndef PRODUCT
1374 } else {
1375 if(!do_size){
1376 if (size != 0) st->print("\n\t");
1377 st->print("mov_d %s <-- %s\t# spill 18",
1378 Matcher::regName[dst_first],
1379 Matcher::regName[src_first]);
1380 }
1381 #endif
1382 }
1383 size += 4;
1384 } else {
1385 // 32-bit
1386 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1387 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1388 if (cbuf) {
1389 MacroAssembler _masm(cbuf);
1390 __ mov_s( as_FloatRegister(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1391 #ifndef PRODUCT
1392 } else {
1393 if(!do_size){
1394 if (size != 0) st->print("\n\t");
1395 st->print("mov_s %s <-- %s\t# spill 19",
1396 Matcher::regName[dst_first],
1397 Matcher::regName[src_first]);
1398 }
1399 #endif
1400 }
1401 size += 4;
1402 }
1403 return size;
1404 }
1405 }
1407 assert(0," foo ");
1408 Unimplemented();
1409 return size;
1411 }
1413 #ifndef PRODUCT
1414 void MachSpillCopyNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1415 implementation( NULL, ra_, false, st );
1416 }
1417 #endif
1419 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1420 implementation( &cbuf, ra_, false, NULL );
1421 }
1423 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
1424 return implementation( NULL, ra_, true, NULL );
1425 }
1427 //=============================================================================
1428 #
1430 #ifndef PRODUCT
1431 void MachBreakpointNode::format( PhaseRegAlloc *, outputStream* st ) const {
1432 st->print("INT3");
1433 }
1434 #endif
1436 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc* ra_) const {
1437 MacroAssembler _masm(&cbuf);
1438 __ int3();
1439 }
1441 uint MachBreakpointNode::size(PhaseRegAlloc* ra_) const {
1442 return MachNode::size(ra_);
1443 }
1446 //=============================================================================
1447 #ifndef PRODUCT
1448 void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1449 Compile *C = ra_->C;
1450 int framesize = C->frame_size_in_bytes();
1452 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1454 st->print("daddiu SP, SP, %d # Rlease stack @ MachEpilogNode",framesize);
1455 st->cr(); st->print("\t");
1456 if (UseLoongsonISA) {
1457 st->print("gslq RA, FP, SP, %d # Restore FP & RA @ MachEpilogNode", -wordSize*2);
1458 } else {
1459 st->print("ld RA, SP, %d # Restore RA @ MachEpilogNode", -wordSize);
1460 st->cr(); st->print("\t");
1461 st->print("ld FP, SP, %d # Restore FP @ MachEpilogNode", -wordSize*2);
1462 }
1464 if( do_polling() && C->is_method_compilation() ) {
1465 st->print("Poll Safepoint # MachEpilogNode");
1466 }
1467 }
1468 #endif
1470 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1471 Compile *C = ra_->C;
1472 MacroAssembler _masm(&cbuf);
1473 int framesize = C->frame_size_in_bytes();
1475 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1477 __ daddiu(SP, SP, framesize);
1479 if (UseLoongsonISA) {
1480 __ gslq(RA, FP, SP, -wordSize*2);
1481 } else {
1482 __ ld(RA, SP, -wordSize );
1483 __ ld(FP, SP, -wordSize*2 );
1484 }
1486 /* 2012/11/19 Jin: The epilog in a RuntimeStub should not contain a safepoint */
1487 if( do_polling() && C->is_method_compilation() ) {
1488 #ifndef OPT_SAFEPOINT
1489 __ set64(AT, (long)os::get_polling_page());
1490 __ relocate(relocInfo::poll_return_type);
1491 __ lw(AT, AT, 0);
1492 #else
1493 __ lui(AT, Assembler::split_high((intptr_t)os::get_polling_page()));
1494 __ relocate(relocInfo::poll_return_type);
1495 __ lw(AT, AT, Assembler::split_low((intptr_t)os::get_polling_page()));
1496 #endif
1497 }
1498 }
1500 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
1501 return MachNode::size(ra_); // too many variables; just compute it the hard way fujie debug
1502 }
1504 int MachEpilogNode::reloc() const {
1505 return 0; // a large enough number
1506 }
1508 const Pipeline * MachEpilogNode::pipeline() const {
1509 return MachNode::pipeline_class();
1510 }
1512 int MachEpilogNode::safepoint_offset() const { return 0; }
1514 //=============================================================================
1516 #ifndef PRODUCT
1517 void BoxLockNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1518 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1519 int reg = ra_->get_reg_first(this);
1520 st->print("ADDI %s, SP, %d @BoxLockNode",Matcher::regName[reg],offset);
1521 }
1522 #endif
1525 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
1526 return 4;
1527 }
1529 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1530 MacroAssembler _masm(&cbuf);
1531 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1532 int reg = ra_->get_encode(this);
1534 __ addi(as_Register(reg), SP, offset);
1535 /*
1536 if( offset >= 128 ) {
1537 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
1538 emit_rm(cbuf, 0x2, reg, 0x04);
1539 emit_rm(cbuf, 0x0, 0x04, SP_enc);
1540 emit_d32(cbuf, offset);
1541 }
1542 else {
1543 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
1544 emit_rm(cbuf, 0x1, reg, 0x04);
1545 emit_rm(cbuf, 0x0, 0x04, SP_enc);
1546 emit_d8(cbuf, offset);
1547 }
1548 */
1549 }
1552 //static int sizeof_FFree_Float_Stack_All = -1;
1554 int MachCallRuntimeNode::ret_addr_offset() {
1555 //lui
1556 //ori
1557 //dsll
1558 //ori
1559 //jalr
1560 //nop
1561 assert(NativeCall::instruction_size == 24, "in MachCallRuntimeNode::ret_addr_offset()");
1562 return NativeCall::instruction_size;
1563 // return 16;
1564 }
1570 //=============================================================================
1571 #ifndef PRODUCT
1572 void MachNopNode::format( PhaseRegAlloc *, outputStream* st ) const {
1573 st->print("NOP \t# %d bytes pad for loops and calls", 4 * _count);
1574 }
1575 #endif
1577 void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc * ) const {
1578 MacroAssembler _masm(&cbuf);
1579 int i = 0;
1580 for(i = 0; i < _count; i++)
1581 __ nop();
1582 }
1584 uint MachNopNode::size(PhaseRegAlloc *) const {
1585 return 4 * _count;
1586 }
1587 const Pipeline* MachNopNode::pipeline() const {
1588 return MachNode::pipeline_class();
1589 }
1591 //=============================================================================
1593 //=============================================================================
1594 #ifndef PRODUCT
1595 void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1596 st->print_cr("load_klass(AT, T0)");
1597 st->print_cr("\tbeq(AT, iCache, L)");
1598 st->print_cr("\tnop");
1599 st->print_cr("\tjmp(SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type)");
1600 st->print_cr("\tnop");
1601 st->print_cr("\tnop");
1602 st->print_cr(" L:");
1603 }
1604 #endif
1607 void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1608 MacroAssembler _masm(&cbuf);
1609 #ifdef ASSERT
1610 //uint code_size = cbuf.code_size();
1611 #endif
1612 int ic_reg = Matcher::inline_cache_reg_encode();
1613 Label L;
1614 Register receiver = T0;
1615 Register iCache = as_Register(ic_reg);
1616 __ load_klass(AT, receiver);
1617 __ beq(AT, iCache, L);
1618 __ nop();
1620 __ relocate(relocInfo::runtime_call_type);
1621 __ li48(T9, (long)SharedRuntime::get_ic_miss_stub());
1622 __ jr(T9);
1623 __ nop();
1625 /* WARNING these NOPs are critical so that verified entry point is properly
1626 * 8 bytes aligned for patching by NativeJump::patch_verified_entry() */
1627 __ align(CodeEntryAlignment);
1628 __ bind(L);
1629 }
1631 uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
1632 return MachNode::size(ra_);
1633 }
1637 //=============================================================================
1639 const RegMask& MachConstantBaseNode::_out_RegMask = P_REG_mask();
1641 int Compile::ConstantTable::calculate_table_base_offset() const {
1642 return 0; // absolute addressing, no offset
1643 }
1645 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
1646 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
1647 ShouldNotReachHere();
1648 }
1650 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
1651 Compile* C = ra_->C;
1652 Compile::ConstantTable& constant_table = C->constant_table();
1653 MacroAssembler _masm(&cbuf);
1655 Register Rtoc = as_Register(ra_->get_encode(this));
1656 CodeSection* consts_section = __ code()->consts();
1657 int consts_size = consts_section->align_at_start(consts_section->size());
1658 assert(constant_table.size() == consts_size, "must be equal");
1660 if (consts_section->size()) {
1661 // Materialize the constant table base.
1662 address baseaddr = consts_section->start() + -(constant_table.table_base_offset());
1663 // RelocationHolder rspec = internal_word_Relocation::spec(baseaddr);
1664 __ relocate(relocInfo::internal_pc_type);
1665 __ li48(Rtoc, (long)baseaddr);
1666 }
1667 }
1669 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
1670 // li48 (4 insts)
1671 return 4 * 4;
1672 }
1674 #ifndef PRODUCT
1675 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
1676 Register r = as_Register(ra_->get_encode(this));
1677 st->print("li48 %s, &constanttable (constant table base) @ MachConstantBaseNode", r->name());
1678 }
1679 #endif
1682 //=============================================================================
1683 #ifndef PRODUCT
1684 void MachPrologNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1685 Compile* C = ra_->C;
1687 int framesize = C->frame_size_in_bytes();
1688 int bangsize = C->bang_size_in_bytes();
1689 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1691 // Calls to C2R adapters often do not accept exceptional returns.
1692 // We require that their callers must bang for them. But be careful, because
1693 // some VM calls (such as call site linkage) can use several kilobytes of
1694 // stack. But the stack safety zone should account for that.
1695 // See bugs 4446381, 4468289, 4497237.
1696 if (C->need_stack_bang(bangsize)) {
1697 st->print_cr("# stack bang"); st->print("\t");
1698 }
1699 if (UseLoongsonISA) {
1700 st->print("gssq RA, FP, %d(SP) @ MachPrologNode\n\t", -wordSize*2);
1701 } else {
1702 st->print("sd RA, %d(SP) @ MachPrologNode\n\t", -wordSize);
1703 st->print("sd FP, %d(SP) @ MachPrologNode\n\t", -wordSize*2);
1704 }
1705 st->print("daddiu FP, SP, -%d \n\t", wordSize*2);
1706 st->print("daddiu SP, SP, -%d \t",framesize);
1707 }
1708 #endif
1711 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1712 Compile* C = ra_->C;
1713 MacroAssembler _masm(&cbuf);
1715 int framesize = C->frame_size_in_bytes();
1716 int bangsize = C->bang_size_in_bytes();
1718 // __ verified_entry(framesize, C->need_stack_bang(bangsize)?bangsize:0, false);
1720 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1722 if (C->need_stack_bang(framesize)) {
1723 __ generate_stack_overflow_check(framesize);
1724 }
1726 if (UseLoongsonISA) {
1727 __ gssq(RA, FP, SP, -wordSize*2);
1728 } else {
1729 __ sd(RA, SP, -wordSize);
1730 __ sd(FP, SP, -wordSize*2);
1731 }
1732 __ daddiu(FP, SP, -wordSize*2);
1733 __ daddiu(SP, SP, -framesize);
1734 __ nop(); /* 2013.10.22 Jin: Make enough room for patch_verified_entry() */
1735 __ nop();
1737 C->set_frame_complete(cbuf.insts_size());
1738 if (C->has_mach_constant_base_node()) {
1739 // NOTE: We set the table base offset here because users might be
1740 // emitted before MachConstantBaseNode.
1741 Compile::ConstantTable& constant_table = C->constant_table();
1742 constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
1743 }
1745 }
1748 uint MachPrologNode::size(PhaseRegAlloc *ra_) const {
1749 //fprintf(stderr, "\nPrologNode::size(ra_)= %d \n", MachNode::size(ra_));//fujie debug
1750 return MachNode::size(ra_); // too many variables; just compute it the hard way
1751 }
1753 int MachPrologNode::reloc() const {
1754 return 0; // a large enough number
1755 }
1757 %}
1759 //----------ENCODING BLOCK-----------------------------------------------------
1760 // This block specifies the encoding classes used by the compiler to output
1761 // byte streams. Encoding classes generate functions which are called by
1762 // Machine Instruction Nodes in order to generate the bit encoding of the
1763 // instruction. Operands specify their base encoding interface with the
1764 // interface keyword. There are currently supported four interfaces,
1765 // REG_INTER, CONST_INTER, MEMORY_INTER, & COND_INTER. REG_INTER causes an
1766 // operand to generate a function which returns its register number when
1767 // queried. CONST_INTER causes an operand to generate a function which
1768 // returns the value of the constant when queried. MEMORY_INTER causes an
1769 // operand to generate four functions which return the Base Register, the
1770 // Index Register, the Scale Value, and the Offset Value of the operand when
1771 // queried. COND_INTER causes an operand to generate six functions which
1772 // return the encoding code (ie - encoding bits for the instruction)
1773 // associated with each basic boolean condition for a conditional instruction.
1774 // Instructions specify two basic values for encoding. They use the
1775 // ins_encode keyword to specify their encoding class (which must be one of
1776 // the class names specified in the encoding block), and they use the
1777 // opcode keyword to specify, in order, their primary, secondary, and
1778 // tertiary opcode. Only the opcode sections which a particular instruction
1779 // needs for encoding need to be specified.
1780 encode %{
1781 /*
1782 Alias:
1783 1044 b java.io.ObjectInputStream::readHandle (130 bytes)
1784 118 B14: # B19 B15 <- B13 Freq: 0.899955
1785 118 add S1, S2, V0 #@addP_reg_reg
1786 11c lb S0, [S1 + #-8257524] #@loadB
1787 120 BReq S0, #3, B19 #@branchConI_reg_imm P=0.100000 C=-1.000000
1788 */
1789 //Load byte signed
1790 enc_class load_B_enc (mRegI dst, memory mem) %{
1791 MacroAssembler _masm(&cbuf);
1792 int dst = $dst$$reg;
1793 int base = $mem$$base;
1794 int index = $mem$$index;
1795 int scale = $mem$$scale;
1796 int disp = $mem$$disp;
1798 if( index != 0 ) {
1799 if( Assembler::is_simm16(disp) ) {
1800 if( UseLoongsonISA ) {
1801 if (scale == 0) {
1802 __ gslbx(as_Register(dst), as_Register(base), as_Register(index), disp);
1803 } else {
1804 __ dsll(AT, as_Register(index), scale);
1805 __ gslbx(as_Register(dst), as_Register(base), AT, disp);
1806 }
1807 } else {
1808 if (scale == 0) {
1809 __ addu(AT, as_Register(base), as_Register(index));
1810 } else {
1811 __ dsll(AT, as_Register(index), scale);
1812 __ addu(AT, as_Register(base), AT);
1813 }
1814 __ lb(as_Register(dst), AT, disp);
1815 }
1816 } else {
1817 if (scale == 0) {
1818 __ addu(AT, as_Register(base), as_Register(index));
1819 } else {
1820 __ dsll(AT, as_Register(index), scale);
1821 __ addu(AT, as_Register(base), AT);
1822 }
1823 __ move(T9, disp);
1824 if( UseLoongsonISA ) {
1825 __ gslbx(as_Register(dst), AT, T9, 0);
1826 } else {
1827 __ addu(AT, AT, T9);
1828 __ lb(as_Register(dst), AT, 0);
1829 }
1830 }
1831 } else {
1832 if( Assembler::is_simm16(disp) ) {
1833 __ lb(as_Register(dst), as_Register(base), disp);
1834 } else {
1835 __ move(T9, disp);
1836 if( UseLoongsonISA ) {
1837 __ gslbx(as_Register(dst), as_Register(base), T9, 0);
1838 } else {
1839 __ addu(AT, as_Register(base), T9);
1840 __ lb(as_Register(dst), AT, 0);
1841 }
1842 }
1843 }
1844 %}
1846 //Load byte unsigned
1847 enc_class load_UB_enc (mRegI dst, memory mem) %{
1848 MacroAssembler _masm(&cbuf);
1849 int dst = $dst$$reg;
1850 int base = $mem$$base;
1851 int index = $mem$$index;
1852 int scale = $mem$$scale;
1853 int disp = $mem$$disp;
1855 if( index != 0 ) {
1856 if (scale == 0) {
1857 __ daddu(AT, as_Register(base), as_Register(index));
1858 } else {
1859 __ dsll(AT, as_Register(index), scale);
1860 __ daddu(AT, as_Register(base), AT);
1861 }
1862 if( Assembler::is_simm16(disp) ) {
1863 __ lbu(as_Register(dst), AT, disp);
1864 } else {
1865 __ move(T9, disp);
1866 __ daddu(AT, AT, T9);
1867 __ lbu(as_Register(dst), AT, 0);
1868 }
1869 } else {
1870 if( Assembler::is_simm16(disp) ) {
1871 __ lbu(as_Register(dst), as_Register(base), disp);
1872 } else {
1873 __ move(T9, disp);
1874 __ daddu(AT, as_Register(base), T9);
1875 __ lbu(as_Register(dst), AT, 0);
1876 }
1877 }
1878 %}
1880 enc_class store_B_reg_enc (memory mem, mRegI src) %{
1881 MacroAssembler _masm(&cbuf);
1882 int src = $src$$reg;
1883 int base = $mem$$base;
1884 int index = $mem$$index;
1885 int scale = $mem$$scale;
1886 int disp = $mem$$disp;
1888 if( index != 0 ) {
1889 if (scale == 0) {
1890 if( Assembler::is_simm(disp, 8) ) {
1891 if (UseLoongsonISA) {
1892 __ gssbx(as_Register(src), as_Register(base), as_Register(index), disp);
1893 } else {
1894 __ addu(AT, as_Register(base), as_Register(index));
1895 __ sb(as_Register(src), AT, disp);
1896 }
1897 } else if( Assembler::is_simm16(disp) ) {
1898 __ addu(AT, as_Register(base), as_Register(index));
1899 __ sb(as_Register(src), AT, disp);
1900 } else {
1901 __ addu(AT, as_Register(base), as_Register(index));
1902 __ move(T9, disp);
1903 if (UseLoongsonISA) {
1904 __ gssbx(as_Register(src), AT, T9, 0);
1905 } else {
1906 __ addu(AT, AT, T9);
1907 __ sb(as_Register(src), AT, 0);
1908 }
1909 }
1910 } else {
1911 __ dsll(AT, as_Register(index), scale);
1912 if( Assembler::is_simm(disp, 8) ) {
1913 if (UseLoongsonISA) {
1914 __ gssbx(as_Register(src), AT, as_Register(base), disp);
1915 } else {
1916 __ addu(AT, as_Register(base), AT);
1917 __ sb(as_Register(src), AT, disp);
1918 }
1919 } else if( Assembler::is_simm16(disp) ) {
1920 __ addu(AT, as_Register(base), AT);
1921 __ sb(as_Register(src), AT, disp);
1922 } else {
1923 __ addu(AT, as_Register(base), AT);
1924 __ move(T9, disp);
1925 if (UseLoongsonISA) {
1926 __ gssbx(as_Register(src), AT, T9, 0);
1927 } else {
1928 __ addu(AT, AT, T9);
1929 __ sb(as_Register(src), AT, 0);
1930 }
1931 }
1932 }
1933 } else {
1934 if( Assembler::is_simm16(disp) ) {
1935 __ sb(as_Register(src), as_Register(base), disp);
1936 } else {
1937 __ move(T9, disp);
1938 if (UseLoongsonISA) {
1939 __ gssbx(as_Register(src), as_Register(base), T9, 0);
1940 } else {
1941 __ addu(AT, as_Register(base), T9);
1942 __ sb(as_Register(src), AT, 0);
1943 }
1944 }
1945 }
1946 %}
1948 enc_class store_B_immI_enc (memory mem, immI8 src) %{
1949 MacroAssembler _masm(&cbuf);
1950 int base = $mem$$base;
1951 int index = $mem$$index;
1952 int scale = $mem$$scale;
1953 int disp = $mem$$disp;
1954 int value = $src$$constant;
1956 if( index != 0 ) {
1957 if (!UseLoongsonISA) {
1958 if (scale == 0) {
1959 __ daddu(AT, as_Register(base), as_Register(index));
1960 } else {
1961 __ dsll(AT, as_Register(index), scale);
1962 __ daddu(AT, as_Register(base), AT);
1963 }
1964 if( Assembler::is_simm16(disp) ) {
1965 if (value == 0) {
1966 __ sb(R0, AT, disp);
1967 } else {
1968 __ move(T9, value);
1969 __ sb(T9, AT, disp);
1970 }
1971 } else {
1972 if (value == 0) {
1973 __ move(T9, disp);
1974 __ daddu(AT, AT, T9);
1975 __ sb(R0, AT, 0);
1976 } else {
1977 __ move(T9, disp);
1978 __ daddu(AT, AT, T9);
1979 __ move(T9, value);
1980 __ sb(T9, AT, 0);
1981 }
1982 }
1983 } else {
1985 if (scale == 0) {
1986 if( Assembler::is_simm(disp, 8) ) {
1987 if (value == 0) {
1988 __ gssbx(R0, as_Register(base), as_Register(index), disp);
1989 } else {
1990 __ move(T9, value);
1991 __ gssbx(T9, as_Register(base), as_Register(index), disp);
1992 }
1993 } else if( Assembler::is_simm16(disp) ) {
1994 __ daddu(AT, as_Register(base), as_Register(index));
1995 if (value == 0) {
1996 __ sb(R0, AT, disp);
1997 } else {
1998 __ move(T9, value);
1999 __ sb(T9, AT, disp);
2000 }
2001 } else {
2002 if (value == 0) {
2003 __ daddu(AT, as_Register(base), as_Register(index));
2004 __ move(T9, disp);
2005 __ gssbx(R0, AT, T9, 0);
2006 } else {
2007 __ move(AT, disp);
2008 __ move(T9, value);
2009 __ daddu(AT, as_Register(base), AT);
2010 __ gssbx(T9, AT, as_Register(index), 0);
2011 }
2012 }
2014 } else {
2016 if( Assembler::is_simm(disp, 8) ) {
2017 __ dsll(AT, as_Register(index), scale);
2018 if (value == 0) {
2019 __ gssbx(R0, as_Register(base), AT, disp);
2020 } else {
2021 __ move(T9, value);
2022 __ gssbx(T9, as_Register(base), AT, disp);
2023 }
2024 } else if( Assembler::is_simm16(disp) ) {
2025 __ dsll(AT, as_Register(index), scale);
2026 __ daddu(AT, as_Register(base), AT);
2027 if (value == 0) {
2028 __ sb(R0, AT, disp);
2029 } else {
2030 __ move(T9, value);
2031 __ sb(T9, AT, disp);
2032 }
2033 } else {
2034 __ dsll(AT, as_Register(index), scale);
2035 if (value == 0) {
2036 __ daddu(AT, as_Register(base), AT);
2037 __ move(T9, disp);
2038 __ gssbx(R0, AT, T9, 0);
2039 } else {
2040 __ move(T9, disp);
2041 __ daddu(AT, AT, T9);
2042 __ move(T9, value);
2043 __ gssbx(T9, as_Register(base), AT, 0);
2044 }
2045 }
2046 }
2047 }
2048 } else {
2049 if( Assembler::is_simm16(disp) ) {
2050 if (value == 0) {
2051 __ sb(R0, as_Register(base), disp);
2052 } else {
2053 __ move(AT, value);
2054 __ sb(AT, as_Register(base), disp);
2055 }
2056 } else {
2057 if (value == 0) {
2058 __ move(T9, disp);
2059 if (UseLoongsonISA) {
2060 __ gssbx(R0, as_Register(base), T9, 0);
2061 } else {
2062 __ daddu(AT, as_Register(base), T9);
2063 __ sb(R0, AT, 0);
2064 }
2065 } else {
2066 __ move(T9, disp);
2067 if (UseLoongsonISA) {
2068 __ move(AT, value);
2069 __ gssbx(AT, as_Register(base), T9, 0);
2070 } else {
2071 __ daddu(AT, as_Register(base), T9);
2072 __ move(T9, value);
2073 __ sb(T9, AT, 0);
2074 }
2075 }
2076 }
2077 }
2078 %}
2081 enc_class store_B_immI_enc_sync (memory mem, immI8 src) %{
2082 MacroAssembler _masm(&cbuf);
2083 int base = $mem$$base;
2084 int index = $mem$$index;
2085 int scale = $mem$$scale;
2086 int disp = $mem$$disp;
2087 int value = $src$$constant;
2089 if( index != 0 ) {
2090 if (scale == 0) {
2091 __ daddu(AT, as_Register(base), as_Register(index));
2092 } else {
2093 __ dsll(AT, as_Register(index), scale);
2094 __ daddu(AT, as_Register(base), AT);
2095 }
2096 if( Assembler::is_simm16(disp) ) {
2097 if (value == 0) {
2098 __ sb(R0, AT, disp);
2099 } else {
2100 __ move(T9, value);
2101 __ sb(T9, AT, disp);
2102 }
2103 } else {
2104 if (value == 0) {
2105 __ move(T9, disp);
2106 __ daddu(AT, AT, T9);
2107 __ sb(R0, AT, 0);
2108 } else {
2109 __ move(T9, disp);
2110 __ daddu(AT, AT, T9);
2111 __ move(T9, value);
2112 __ sb(T9, AT, 0);
2113 }
2114 }
2115 } else {
2116 if( Assembler::is_simm16(disp) ) {
2117 if (value == 0) {
2118 __ sb(R0, as_Register(base), disp);
2119 } else {
2120 __ move(AT, value);
2121 __ sb(AT, as_Register(base), disp);
2122 }
2123 } else {
2124 if (value == 0) {
2125 __ move(T9, disp);
2126 __ daddu(AT, as_Register(base), T9);
2127 __ sb(R0, AT, 0);
2128 } else {
2129 __ move(T9, disp);
2130 __ daddu(AT, as_Register(base), T9);
2131 __ move(T9, value);
2132 __ sb(T9, AT, 0);
2133 }
2134 }
2135 }
2137 __ sync();
2138 %}
2140 // Load Short (16bit signed)
2141 enc_class load_S_enc (mRegI dst, memory mem) %{
2142 MacroAssembler _masm(&cbuf);
2143 int dst = $dst$$reg;
2144 int base = $mem$$base;
2145 int index = $mem$$index;
2146 int scale = $mem$$scale;
2147 int disp = $mem$$disp;
2149 if( index != 0 ) {
2150 if (scale == 0) {
2151 __ daddu(AT, as_Register(base), as_Register(index));
2152 } else {
2153 __ dsll(AT, as_Register(index), scale);
2154 __ daddu(AT, as_Register(base), AT);
2155 }
2156 if( Assembler::is_simm16(disp) ) {
2157 __ lh(as_Register(dst), AT, disp);
2158 } else {
2159 __ move(T9, disp);
2160 __ addu(AT, AT, T9);
2161 __ lh(as_Register(dst), AT, 0);
2162 }
2163 } else {
2164 if( Assembler::is_simm16(disp) ) {
2165 __ lh(as_Register(dst), as_Register(base), disp);
2166 } else {
2167 __ move(T9, disp);
2168 __ addu(AT, as_Register(base), T9);
2169 __ lh(as_Register(dst), AT, 0);
2170 }
2171 }
2172 %}
2174 // Load Char (16bit unsigned)
2175 enc_class load_C_enc (mRegI dst, memory mem) %{
2176 MacroAssembler _masm(&cbuf);
2177 int dst = $dst$$reg;
2178 int base = $mem$$base;
2179 int index = $mem$$index;
2180 int scale = $mem$$scale;
2181 int disp = $mem$$disp;
2183 if( index != 0 ) {
2184 if (scale == 0) {
2185 __ daddu(AT, as_Register(base), as_Register(index));
2186 } else {
2187 __ dsll(AT, as_Register(index), scale);
2188 __ daddu(AT, as_Register(base), AT);
2189 }
2190 if( Assembler::is_simm16(disp) ) {
2191 __ lhu(as_Register(dst), AT, disp);
2192 } else {
2193 __ move(T9, disp);
2194 __ addu(AT, AT, T9);
2195 __ lhu(as_Register(dst), AT, 0);
2196 }
2197 } else {
2198 if( Assembler::is_simm16(disp) ) {
2199 __ lhu(as_Register(dst), as_Register(base), disp);
2200 } else {
2201 __ move(T9, disp);
2202 __ daddu(AT, as_Register(base), T9);
2203 __ lhu(as_Register(dst), AT, 0);
2204 }
2205 }
2206 %}
2208 // Store Char (16bit unsigned)
2209 enc_class store_C_reg_enc (memory mem, mRegI src) %{
2210 MacroAssembler _masm(&cbuf);
2211 int src = $src$$reg;
2212 int base = $mem$$base;
2213 int index = $mem$$index;
2214 int scale = $mem$$scale;
2215 int disp = $mem$$disp;
2217 if( index != 0 ) {
2218 if( Assembler::is_simm16(disp) ) {
2219 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2220 if (scale == 0) {
2221 __ gsshx(as_Register(src), as_Register(base), as_Register(index), disp);
2222 } else {
2223 __ dsll(AT, as_Register(index), scale);
2224 __ gsshx(as_Register(src), as_Register(base), AT, disp);
2225 }
2226 } else {
2227 if (scale == 0) {
2228 __ addu(AT, as_Register(base), as_Register(index));
2229 } else {
2230 __ dsll(AT, as_Register(index), scale);
2231 __ addu(AT, as_Register(base), AT);
2232 }
2233 __ sh(as_Register(src), AT, disp);
2234 }
2235 } else {
2236 if (scale == 0) {
2237 __ addu(AT, as_Register(base), as_Register(index));
2238 } else {
2239 __ dsll(AT, as_Register(index), scale);
2240 __ addu(AT, as_Register(base), AT);
2241 }
2242 __ move(T9, disp);
2243 if( UseLoongsonISA ) {
2244 __ gsshx(as_Register(src), AT, T9, 0);
2245 } else {
2246 __ addu(AT, AT, T9);
2247 __ sh(as_Register(src), AT, 0);
2248 }
2249 }
2250 } else {
2251 if( Assembler::is_simm16(disp) ) {
2252 __ sh(as_Register(src), as_Register(base), disp);
2253 } else {
2254 __ move(T9, disp);
2255 if( UseLoongsonISA ) {
2256 __ gsshx(as_Register(src), as_Register(base), T9, 0);
2257 } else {
2258 __ addu(AT, as_Register(base), T9);
2259 __ sh(as_Register(src), AT, 0);
2260 }
2261 }
2262 }
2263 %}
2265 enc_class store_C0_enc (memory mem) %{
2266 MacroAssembler _masm(&cbuf);
2267 int base = $mem$$base;
2268 int index = $mem$$index;
2269 int scale = $mem$$scale;
2270 int disp = $mem$$disp;
2272 if( index != 0 ) {
2273 if( Assembler::is_simm16(disp) ) {
2274 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2275 if (scale == 0) {
2276 __ gsshx(R0, as_Register(base), as_Register(index), disp);
2277 } else {
2278 __ dsll(AT, as_Register(index), scale);
2279 __ gsshx(R0, as_Register(base), AT, disp);
2280 }
2281 } else {
2282 if (scale == 0) {
2283 __ addu(AT, as_Register(base), as_Register(index));
2284 } else {
2285 __ dsll(AT, as_Register(index), scale);
2286 __ addu(AT, as_Register(base), AT);
2287 }
2288 __ sh(R0, AT, disp);
2289 }
2290 } else {
2291 if (scale == 0) {
2292 __ addu(AT, as_Register(base), as_Register(index));
2293 } else {
2294 __ dsll(AT, as_Register(index), scale);
2295 __ addu(AT, as_Register(base), AT);
2296 }
2297 __ move(T9, disp);
2298 if( UseLoongsonISA ) {
2299 __ gsshx(R0, AT, T9, 0);
2300 } else {
2301 __ addu(AT, AT, T9);
2302 __ sh(R0, AT, 0);
2303 }
2304 }
2305 } else {
2306 if( Assembler::is_simm16(disp) ) {
2307 __ sh(R0, as_Register(base), disp);
2308 } else {
2309 __ move(T9, disp);
2310 if( UseLoongsonISA ) {
2311 __ gsshx(R0, as_Register(base), T9, 0);
2312 } else {
2313 __ addu(AT, as_Register(base), T9);
2314 __ sh(R0, AT, 0);
2315 }
2316 }
2317 }
2318 %}
2320 enc_class load_I_enc (mRegI dst, memory mem) %{
2321 MacroAssembler _masm(&cbuf);
2322 int dst = $dst$$reg;
2323 int base = $mem$$base;
2324 int index = $mem$$index;
2325 int scale = $mem$$scale;
2326 int disp = $mem$$disp;
2328 if( index != 0 ) {
2329 if( Assembler::is_simm16(disp) ) {
2330 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2331 if (scale == 0) {
2332 __ gslwx(as_Register(dst), as_Register(base), as_Register(index), disp);
2333 } else {
2334 __ dsll(AT, as_Register(index), scale);
2335 __ gslwx(as_Register(dst), as_Register(base), AT, disp);
2336 }
2337 } else {
2338 if (scale == 0) {
2339 __ addu(AT, as_Register(base), as_Register(index));
2340 } else {
2341 __ dsll(AT, as_Register(index), scale);
2342 __ addu(AT, as_Register(base), AT);
2343 }
2344 __ lw(as_Register(dst), AT, disp);
2345 }
2346 } else {
2347 if (scale == 0) {
2348 __ addu(AT, as_Register(base), as_Register(index));
2349 } else {
2350 __ dsll(AT, as_Register(index), scale);
2351 __ addu(AT, as_Register(base), AT);
2352 }
2353 __ move(T9, disp);
2354 if( UseLoongsonISA ) {
2355 __ gslwx(as_Register(dst), AT, T9, 0);
2356 } else {
2357 __ addu(AT, AT, T9);
2358 __ lw(as_Register(dst), AT, 0);
2359 }
2360 }
2361 } else {
2362 if( Assembler::is_simm16(disp) ) {
2363 __ lw(as_Register(dst), as_Register(base), disp);
2364 } else {
2365 __ move(T9, disp);
2366 if( UseLoongsonISA ) {
2367 __ gslwx(as_Register(dst), as_Register(base), T9, 0);
2368 } else {
2369 __ addu(AT, as_Register(base), T9);
2370 __ lw(as_Register(dst), AT, 0);
2371 }
2372 }
2373 }
2374 %}
2376 enc_class store_I_reg_enc (memory mem, mRegI src) %{
2377 MacroAssembler _masm(&cbuf);
2378 int src = $src$$reg;
2379 int base = $mem$$base;
2380 int index = $mem$$index;
2381 int scale = $mem$$scale;
2382 int disp = $mem$$disp;
2384 if( index != 0 ) {
2385 if( Assembler::is_simm16(disp) ) {
2386 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2387 if (scale == 0) {
2388 __ gsswx(as_Register(src), as_Register(base), as_Register(index), disp);
2389 } else {
2390 __ dsll(AT, as_Register(index), scale);
2391 __ gsswx(as_Register(src), as_Register(base), AT, disp);
2392 }
2393 } else {
2394 if (scale == 0) {
2395 __ addu(AT, as_Register(base), as_Register(index));
2396 } else {
2397 __ dsll(AT, as_Register(index), scale);
2398 __ addu(AT, as_Register(base), AT);
2399 }
2400 __ sw(as_Register(src), AT, disp);
2401 }
2402 } else {
2403 if (scale == 0) {
2404 __ addu(AT, as_Register(base), as_Register(index));
2405 } else {
2406 __ dsll(AT, as_Register(index), scale);
2407 __ addu(AT, as_Register(base), AT);
2408 }
2409 __ move(T9, disp);
2410 if( UseLoongsonISA ) {
2411 __ gsswx(as_Register(src), AT, T9, 0);
2412 } else {
2413 __ addu(AT, AT, T9);
2414 __ sw(as_Register(src), AT, 0);
2415 }
2416 }
2417 } else {
2418 if( Assembler::is_simm16(disp) ) {
2419 __ sw(as_Register(src), as_Register(base), disp);
2420 } else {
2421 __ move(T9, disp);
2422 if( UseLoongsonISA ) {
2423 __ gsswx(as_Register(src), as_Register(base), T9, 0);
2424 } else {
2425 __ addu(AT, as_Register(base), T9);
2426 __ sw(as_Register(src), AT, 0);
2427 }
2428 }
2429 }
2430 %}
2432 enc_class store_I_immI_enc (memory mem, immI src) %{
2433 MacroAssembler _masm(&cbuf);
2434 int base = $mem$$base;
2435 int index = $mem$$index;
2436 int scale = $mem$$scale;
2437 int disp = $mem$$disp;
2438 int value = $src$$constant;
2440 if( index != 0 ) {
2441 if (scale == 0) {
2442 __ daddu(AT, as_Register(base), as_Register(index));
2443 } else {
2444 __ dsll(AT, as_Register(index), scale);
2445 __ daddu(AT, as_Register(base), AT);
2446 }
2447 if( Assembler::is_simm16(disp) ) {
2448 if (value == 0) {
2449 __ sw(R0, AT, disp);
2450 } else {
2451 __ move(T9, value);
2452 __ sw(T9, AT, disp);
2453 }
2454 } else {
2455 if (value == 0) {
2456 __ move(T9, disp);
2457 __ addu(AT, AT, T9);
2458 __ sw(R0, AT, 0);
2459 } else {
2460 __ move(T9, disp);
2461 __ addu(AT, AT, T9);
2462 __ move(T9, value);
2463 __ sw(T9, AT, 0);
2464 }
2465 }
2466 } else {
2467 if( Assembler::is_simm16(disp) ) {
2468 if (value == 0) {
2469 __ sw(R0, as_Register(base), disp);
2470 } else {
2471 __ move(AT, value);
2472 __ sw(AT, as_Register(base), disp);
2473 }
2474 } else {
2475 if (value == 0) {
2476 __ move(T9, disp);
2477 __ addu(AT, as_Register(base), T9);
2478 __ sw(R0, AT, 0);
2479 } else {
2480 __ move(T9, disp);
2481 __ addu(AT, as_Register(base), T9);
2482 __ move(T9, value);
2483 __ sw(T9, AT, 0);
2484 }
2485 }
2486 }
2487 %}
2489 enc_class load_N_enc (mRegN dst, memory mem) %{
2490 MacroAssembler _masm(&cbuf);
2491 int dst = $dst$$reg;
2492 int base = $mem$$base;
2493 int index = $mem$$index;
2494 int scale = $mem$$scale;
2495 int disp = $mem$$disp;
2496 relocInfo::relocType disp_reloc = $mem->disp_reloc();
2497 assert(disp_reloc == relocInfo::none, "cannot have disp");
2499 if( index != 0 ) {
2500 if (scale == 0) {
2501 __ daddu(AT, as_Register(base), as_Register(index));
2502 } else {
2503 __ dsll(AT, as_Register(index), scale);
2504 __ daddu(AT, as_Register(base), AT);
2505 }
2506 if( Assembler::is_simm16(disp) ) {
2507 __ lwu(as_Register(dst), AT, disp);
2508 } else {
2509 __ li(T9, disp);
2510 __ daddu(AT, AT, T9);
2511 __ lwu(as_Register(dst), AT, 0);
2512 }
2513 } else {
2514 if( Assembler::is_simm16(disp) ) {
2515 __ lwu(as_Register(dst), as_Register(base), disp);
2516 } else {
2517 __ li(T9, disp);
2518 __ daddu(AT, as_Register(base), T9);
2519 __ lwu(as_Register(dst), AT, 0);
2520 }
2521 }
2523 %}
2526 enc_class load_P_enc (mRegP dst, memory mem) %{
2527 MacroAssembler _masm(&cbuf);
2528 int dst = $dst$$reg;
2529 int base = $mem$$base;
2530 int index = $mem$$index;
2531 int scale = $mem$$scale;
2532 int disp = $mem$$disp;
2533 relocInfo::relocType disp_reloc = $mem->disp_reloc();
2534 assert(disp_reloc == relocInfo::none, "cannot have disp");
2536 if( index != 0 ) {
2537 if ( UseLoongsonISA ) {
2538 if ( Assembler::is_simm(disp, 8) ) {
2539 if ( scale != 0 ) {
2540 __ dsll(AT, as_Register(index), scale);
2541 __ gsldx(as_Register(dst), as_Register(base), AT, disp);
2542 } else {
2543 __ gsldx(as_Register(dst), as_Register(base), as_Register(index), disp);
2544 }
2545 } else if ( Assembler::is_simm16(disp) ){
2546 if ( scale != 0 ) {
2547 __ dsll(AT, as_Register(index), scale);
2548 __ daddu(AT, AT, as_Register(base));
2549 } else {
2550 __ daddu(AT, as_Register(index), as_Register(base));
2551 }
2552 __ ld(as_Register(dst), AT, disp);
2553 } else {
2554 if ( scale != 0 ) {
2555 __ dsll(AT, as_Register(index), scale);
2556 __ move(T9, disp);
2557 __ daddu(AT, AT, T9);
2558 } else {
2559 __ move(T9, disp);
2560 __ daddu(AT, as_Register(index), T9);
2561 }
2562 __ gsldx(as_Register(dst), as_Register(base), AT, 0);
2563 }
2564 } else { //not use loongson isa
2565 if (scale == 0) {
2566 __ daddu(AT, as_Register(base), as_Register(index));
2567 } else {
2568 __ dsll(AT, as_Register(index), scale);
2569 __ daddu(AT, as_Register(base), AT);
2570 }
2571 if( Assembler::is_simm16(disp) ) {
2572 __ ld(as_Register(dst), AT, disp);
2573 } else {
2574 __ li(T9, disp);
2575 __ daddu(AT, AT, T9);
2576 __ ld(as_Register(dst), AT, 0);
2577 }
2578 }
2579 } else {
2580 if ( UseLoongsonISA ) {
2581 if ( Assembler::is_simm16(disp) ){
2582 __ ld(as_Register(dst), as_Register(base), disp);
2583 } else {
2584 __ li(T9, disp);
2585 __ gsldx(as_Register(dst), as_Register(base), T9, 0);
2586 }
2587 } else { //not use loongson isa
2588 if( Assembler::is_simm16(disp) ) {
2589 __ ld(as_Register(dst), as_Register(base), disp);
2590 } else {
2591 __ li(T9, disp);
2592 __ daddu(AT, as_Register(base), T9);
2593 __ ld(as_Register(dst), AT, 0);
2594 }
2595 }
2596 }
2597 // if( disp_reloc != relocInfo::none) __ ld(as_Register(dst), as_Register(dst), 0);
2598 %}
2600 enc_class store_P_reg_enc (memory mem, mRegP src) %{
2601 MacroAssembler _masm(&cbuf);
2602 int src = $src$$reg;
2603 int base = $mem$$base;
2604 int index = $mem$$index;
2605 int scale = $mem$$scale;
2606 int disp = $mem$$disp;
2608 if( index != 0 ) {
2609 if ( UseLoongsonISA ){
2610 if ( Assembler::is_simm(disp, 8) ) {
2611 if ( scale == 0 ) {
2612 __ gssdx(as_Register(src), as_Register(base), as_Register(index), disp);
2613 } else {
2614 __ dsll(AT, as_Register(index), scale);
2615 __ gssdx(as_Register(src), as_Register(base), AT, disp);
2616 }
2617 } else if ( Assembler::is_simm16(disp) ) {
2618 if ( scale == 0 ) {
2619 __ daddu(AT, as_Register(base), as_Register(index));
2620 } else {
2621 __ dsll(AT, as_Register(index), scale);
2622 __ daddu(AT, as_Register(base), AT);
2623 }
2624 __ sd(as_Register(src), AT, disp);
2625 } else {
2626 if ( scale == 0 ) {
2627 __ move(T9, disp);
2628 __ daddu(AT, as_Register(index), T9);
2629 } else {
2630 __ dsll(AT, as_Register(index), scale);
2631 __ move(T9, disp);
2632 __ daddu(AT, AT, T9);
2633 }
2634 __ gssdx(as_Register(src), as_Register(base), AT, 0);
2635 }
2636 } else { //not use loongson isa
2637 if (scale == 0) {
2638 __ daddu(AT, as_Register(base), as_Register(index));
2639 } else {
2640 __ dsll(AT, as_Register(index), scale);
2641 __ daddu(AT, as_Register(base), AT);
2642 }
2643 if( Assembler::is_simm16(disp) ) {
2644 __ sd(as_Register(src), AT, disp);
2645 } else {
2646 __ move(T9, disp);
2647 __ daddu(AT, AT, T9);
2648 __ sd(as_Register(src), AT, 0);
2649 }
2650 }
2651 } else {
2652 if ( UseLoongsonISA ) {
2653 if ( Assembler::is_simm16(disp) ) {
2654 __ sd(as_Register(src), as_Register(base), disp);
2655 } else {
2656 __ move(T9, disp);
2657 __ gssdx(as_Register(src), as_Register(base), T9, 0);
2658 }
2659 } else {
2660 if( Assembler::is_simm16(disp) ) {
2661 __ sd(as_Register(src), as_Register(base), disp);
2662 } else {
2663 __ move(T9, disp);
2664 __ daddu(AT, as_Register(base), T9);
2665 __ sd(as_Register(src), AT, 0);
2666 }
2667 }
2668 }
2669 %}
2671 enc_class store_N_reg_enc (memory mem, mRegN src) %{
2672 MacroAssembler _masm(&cbuf);
2673 int src = $src$$reg;
2674 int base = $mem$$base;
2675 int index = $mem$$index;
2676 int scale = $mem$$scale;
2677 int disp = $mem$$disp;
2679 if( index != 0 ) {
2680 if ( UseLoongsonISA ){
2681 if ( Assembler::is_simm(disp, 8) ) {
2682 if ( scale == 0 ) {
2683 __ gsswx(as_Register(src), as_Register(base), as_Register(index), disp);
2684 } else {
2685 __ dsll(AT, as_Register(index), scale);
2686 __ gsswx(as_Register(src), as_Register(base), AT, disp);
2687 }
2688 } else if ( Assembler::is_simm16(disp) ) {
2689 if ( scale == 0 ) {
2690 __ daddu(AT, as_Register(base), as_Register(index));
2691 } else {
2692 __ dsll(AT, as_Register(index), scale);
2693 __ daddu(AT, as_Register(base), AT);
2694 }
2695 __ sw(as_Register(src), AT, disp);
2696 } else {
2697 if ( scale == 0 ) {
2698 __ move(T9, disp);
2699 __ daddu(AT, as_Register(index), T9);
2700 } else {
2701 __ dsll(AT, as_Register(index), scale);
2702 __ move(T9, disp);
2703 __ daddu(AT, AT, T9);
2704 }
2705 __ gsswx(as_Register(src), as_Register(base), AT, 0);
2706 }
2707 } else { //not use loongson isa
2708 if (scale == 0) {
2709 __ daddu(AT, as_Register(base), as_Register(index));
2710 } else {
2711 __ dsll(AT, as_Register(index), scale);
2712 __ daddu(AT, as_Register(base), AT);
2713 }
2714 if( Assembler::is_simm16(disp) ) {
2715 __ sw(as_Register(src), AT, disp);
2716 } else {
2717 __ move(T9, disp);
2718 __ addu(AT, AT, T9);
2719 __ sw(as_Register(src), AT, 0);
2720 }
2721 }
2722 } else {
2723 if ( UseLoongsonISA ) {
2724 if ( Assembler::is_simm16(disp) ) {
2725 __ sw(as_Register(src), as_Register(base), disp);
2726 } else {
2727 __ move(T9, disp);
2728 __ gsswx(as_Register(src), as_Register(base), T9, 0);
2729 }
2730 } else {
2731 if( Assembler::is_simm16(disp) ) {
2732 __ sw(as_Register(src), as_Register(base), disp);
2733 } else {
2734 __ move(T9, disp);
2735 __ addu(AT, as_Register(base), T9);
2736 __ sw(as_Register(src), AT, 0);
2737 }
2738 }
2739 }
2740 %}
2742 enc_class store_P_immP0_enc (memory mem) %{
2743 MacroAssembler _masm(&cbuf);
2744 int base = $mem$$base;
2745 int index = $mem$$index;
2746 int scale = $mem$$scale;
2747 int disp = $mem$$disp;
2749 if( index != 0 ) {
2750 if (scale == 0) {
2751 if( Assembler::is_simm16(disp) ) {
2752 if (UseLoongsonISA && Assembler::is_simm(disp, 8)) {
2753 __ gssdx(R0, as_Register(base), as_Register(index), disp);
2754 } else {
2755 __ daddu(AT, as_Register(base), as_Register(index));
2756 __ sd(R0, AT, disp);
2757 }
2758 } else {
2759 __ daddu(AT, as_Register(base), as_Register(index));
2760 __ move(T9, disp);
2761 if(UseLoongsonISA) {
2762 __ gssdx(R0, AT, T9, 0);
2763 } else {
2764 __ daddu(AT, AT, T9);
2765 __ sd(R0, AT, 0);
2766 }
2767 }
2768 } else {
2769 __ dsll(AT, as_Register(index), scale);
2770 if( Assembler::is_simm16(disp) ) {
2771 if (UseLoongsonISA && Assembler::is_simm(disp, 8)) {
2772 __ gssdx(R0, as_Register(base), AT, disp);
2773 } else {
2774 __ daddu(AT, as_Register(base), AT);
2775 __ sd(R0, AT, disp);
2776 }
2777 } else {
2778 __ daddu(AT, as_Register(base), AT);
2779 __ move(T9, disp);
2780 if (UseLoongsonISA) {
2781 __ gssdx(R0, AT, T9, 0);
2782 } else {
2783 __ daddu(AT, AT, T9);
2784 __ sd(R0, AT, 0);
2785 }
2786 }
2787 }
2788 } else {
2789 if( Assembler::is_simm16(disp) ) {
2790 __ sd(R0, as_Register(base), disp);
2791 } else {
2792 __ move(T9, disp);
2793 if (UseLoongsonISA) {
2794 __ gssdx(R0, as_Register(base), T9, 0);
2795 } else {
2796 __ daddu(AT, as_Register(base), T9);
2797 __ sd(R0, AT, 0);
2798 }
2799 }
2800 }
2801 %}
2804 enc_class storeImmN0_enc(memory mem, ImmN0 src) %{
2805 MacroAssembler _masm(&cbuf);
2806 int base = $mem$$base;
2807 int index = $mem$$index;
2808 int scale = $mem$$scale;
2809 int disp = $mem$$disp;
2811 if(index!=0){
2812 if (scale == 0) {
2813 __ daddu(AT, as_Register(base), as_Register(index));
2814 } else {
2815 __ dsll(AT, as_Register(index), scale);
2816 __ daddu(AT, as_Register(base), AT);
2817 }
2819 if( Assembler::is_simm16(disp) ) {
2820 __ sw(R0, AT, disp);
2821 } else {
2822 __ move(T9, disp);
2823 __ daddu(AT, AT, T9);
2824 __ sw(R0, AT, 0);
2825 }
2826 }
2827 else {
2828 if( Assembler::is_simm16(disp) ) {
2829 __ sw(R0, as_Register(base), disp);
2830 } else {
2831 __ move(T9, disp);
2832 __ daddu(AT, as_Register(base), T9);
2833 __ sw(R0, AT, 0);
2834 }
2835 }
2836 %}
2838 enc_class load_L_enc (mRegL dst, memory mem) %{
2839 MacroAssembler _masm(&cbuf);
2840 int base = $mem$$base;
2841 int index = $mem$$index;
2842 int scale = $mem$$scale;
2843 int disp = $mem$$disp;
2844 Register dst_reg = as_Register($dst$$reg);
2846 /*********************2013/03/27**************************
2847 * Jin: $base may contain a null object.
2848 * Server JIT force the exception_offset to be the pos of
2849 * the first instruction.
2850 * I insert such a 'null_check' at the beginning.
2851 *******************************************************/
2853 __ lw(AT, as_Register(base), 0);
2855 /*********************2012/10/04**************************
2856 * Error case found in SortTest
2857 * 337 b java.util.Arrays::sort1 (401 bytes)
2858 * B73:
2859 * d34 lw T4.lo, [T4 + #16] #@loadL-lo
2860 * lw T4.hi, [T4 + #16]+4 #@loadL-hi
2861 *
2862 * The original instructions generated here are :
2863 * __ lw(dst_lo, as_Register(base), disp);
2864 * __ lw(dst_hi, as_Register(base), disp + 4);
2865 *******************************************************/
2867 if( index != 0 ) {
2868 if (scale == 0) {
2869 __ daddu(AT, as_Register(base), as_Register(index));
2870 } else {
2871 __ dsll(AT, as_Register(index), scale);
2872 __ daddu(AT, as_Register(base), AT);
2873 }
2874 if( Assembler::is_simm16(disp) ) {
2875 __ ld(dst_reg, AT, disp);
2876 } else {
2877 __ move(T9, disp);
2878 __ daddu(AT, AT, T9);
2879 __ ld(dst_reg, AT, 0);
2880 }
2881 } else {
2882 if( Assembler::is_simm16(disp) ) {
2883 __ move(AT, as_Register(base));
2884 __ ld(dst_reg, AT, disp);
2885 } else {
2886 __ move(T9, disp);
2887 __ daddu(AT, as_Register(base), T9);
2888 __ ld(dst_reg, AT, 0);
2889 }
2890 }
2891 %}
2893 enc_class store_L_reg_enc (memory mem, mRegL src) %{
2894 MacroAssembler _masm(&cbuf);
2895 int base = $mem$$base;
2896 int index = $mem$$index;
2897 int scale = $mem$$scale;
2898 int disp = $mem$$disp;
2899 Register src_reg = as_Register($src$$reg);
2901 if( index != 0 ) {
2902 if (scale == 0) {
2903 __ daddu(AT, as_Register(base), as_Register(index));
2904 } else {
2905 __ dsll(AT, as_Register(index), scale);
2906 __ daddu(AT, as_Register(base), AT);
2907 }
2908 if( Assembler::is_simm16(disp) ) {
2909 __ sd(src_reg, AT, disp);
2910 } else {
2911 __ move(T9, disp);
2912 __ daddu(AT, AT, T9);
2913 __ sd(src_reg, AT, 0);
2914 }
2915 } else {
2916 if( Assembler::is_simm16(disp) ) {
2917 __ move(AT, as_Register(base));
2918 __ sd(src_reg, AT, disp);
2919 } else {
2920 __ move(T9, disp);
2921 __ daddu(AT, as_Register(base), T9);
2922 __ sd(src_reg, AT, 0);
2923 }
2924 }
2925 %}
2927 enc_class store_L_immL0_enc (memory mem, immL0 src) %{
2928 MacroAssembler _masm(&cbuf);
2929 int base = $mem$$base;
2930 int index = $mem$$index;
2931 int scale = $mem$$scale;
2932 int disp = $mem$$disp;
2934 if( index != 0 ) {
2935 if (scale == 0) {
2936 __ daddu(AT, as_Register(base), as_Register(index));
2937 } else {
2938 __ dsll(AT, as_Register(index), scale);
2939 __ daddu(AT, as_Register(base), AT);
2940 }
2941 if( Assembler::is_simm16(disp) ) {
2942 __ sd(R0, AT, disp);
2943 } else {
2944 __ move(T9, disp);
2945 __ addu(AT, AT, T9);
2946 __ sd(R0, AT, 0);
2947 }
2948 } else {
2949 if( Assembler::is_simm16(disp) ) {
2950 __ move(AT, as_Register(base));
2951 __ sd(R0, AT, disp);
2952 } else {
2953 __ move(T9, disp);
2954 __ addu(AT, as_Register(base), T9);
2955 __ sd(R0, AT, 0);
2956 }
2957 }
2958 %}
2960 enc_class load_F_enc (regF dst, memory mem) %{
2961 MacroAssembler _masm(&cbuf);
2962 int base = $mem$$base;
2963 int index = $mem$$index;
2964 int scale = $mem$$scale;
2965 int disp = $mem$$disp;
2966 FloatRegister dst = $dst$$FloatRegister;
2968 if( index != 0 ) {
2969 if( Assembler::is_simm16(disp) ) {
2970 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2971 if (scale == 0) {
2972 __ gslwxc1(dst, as_Register(base), as_Register(index), disp);
2973 } else {
2974 __ dsll(AT, as_Register(index), scale);
2975 __ gslwxc1(dst, as_Register(base), AT, disp);
2976 }
2977 } else {
2978 if (scale == 0) {
2979 __ daddu(AT, as_Register(base), as_Register(index));
2980 } else {
2981 __ dsll(AT, as_Register(index), scale);
2982 __ daddu(AT, as_Register(base), AT);
2983 }
2984 __ lwc1(dst, AT, disp);
2985 }
2986 } else {
2987 if (scale == 0) {
2988 __ daddu(AT, as_Register(base), as_Register(index));
2989 } else {
2990 __ dsll(AT, as_Register(index), scale);
2991 __ daddu(AT, as_Register(base), AT);
2992 }
2993 __ move(T9, disp);
2994 if( UseLoongsonISA ) {
2995 __ gslwxc1(dst, AT, T9, 0);
2996 } else {
2997 __ daddu(AT, AT, T9);
2998 __ lwc1(dst, AT, 0);
2999 }
3000 }
3001 } else {
3002 if( Assembler::is_simm16(disp) ) {
3003 __ lwc1(dst, as_Register(base), disp);
3004 } else {
3005 __ move(T9, disp);
3006 if( UseLoongsonISA ) {
3007 __ gslwxc1(dst, as_Register(base), T9, 0);
3008 } else {
3009 __ daddu(AT, as_Register(base), T9);
3010 __ lwc1(dst, AT, 0);
3011 }
3012 }
3013 }
3014 %}
3016 enc_class store_F_reg_enc (memory mem, regF src) %{
3017 MacroAssembler _masm(&cbuf);
3018 int base = $mem$$base;
3019 int index = $mem$$index;
3020 int scale = $mem$$scale;
3021 int disp = $mem$$disp;
3022 FloatRegister src = $src$$FloatRegister;
3024 if( index != 0 ) {
3025 if( Assembler::is_simm16(disp) ) {
3026 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3027 if (scale == 0) {
3028 __ gsswxc1(src, as_Register(base), as_Register(index), disp);
3029 } else {
3030 __ dsll(AT, as_Register(index), scale);
3031 __ gsswxc1(src, as_Register(base), AT, disp);
3032 }
3033 } else {
3034 if (scale == 0) {
3035 __ daddu(AT, as_Register(base), as_Register(index));
3036 } else {
3037 __ dsll(AT, as_Register(index), scale);
3038 __ daddu(AT, as_Register(base), AT);
3039 }
3040 __ swc1(src, AT, disp);
3041 }
3042 } else {
3043 if (scale == 0) {
3044 __ daddu(AT, as_Register(base), as_Register(index));
3045 } else {
3046 __ dsll(AT, as_Register(index), scale);
3047 __ daddu(AT, as_Register(base), AT);
3048 }
3049 __ move(T9, disp);
3050 if( UseLoongsonISA ) {
3051 __ gsswxc1(src, AT, T9, 0);
3052 } else {
3053 __ daddu(AT, AT, T9);
3054 __ swc1(src, AT, 0);
3055 }
3056 }
3057 } else {
3058 if( Assembler::is_simm16(disp) ) {
3059 __ swc1(src, as_Register(base), disp);
3060 } else {
3061 __ move(T9, disp);
3062 if( UseLoongsonISA ) {
3063 __ gslwxc1(src, as_Register(base), T9, 0);
3064 } else {
3065 __ daddu(AT, as_Register(base), T9);
3066 __ swc1(src, AT, 0);
3067 }
3068 }
3069 }
3070 %}
3072 enc_class load_D_enc (regD dst, memory mem) %{
3073 MacroAssembler _masm(&cbuf);
3074 int base = $mem$$base;
3075 int index = $mem$$index;
3076 int scale = $mem$$scale;
3077 int disp = $mem$$disp;
3078 FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3080 if( index != 0 ) {
3081 if( Assembler::is_simm16(disp) ) {
3082 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3083 if (scale == 0) {
3084 __ gsldxc1(dst_reg, as_Register(base), as_Register(index), disp);
3085 } else {
3086 __ dsll(AT, as_Register(index), scale);
3087 __ gsldxc1(dst_reg, as_Register(base), AT, disp);
3088 }
3089 } else {
3090 if (scale == 0) {
3091 __ daddu(AT, as_Register(base), as_Register(index));
3092 } else {
3093 __ dsll(AT, as_Register(index), scale);
3094 __ daddu(AT, as_Register(base), AT);
3095 }
3096 __ ldc1(dst_reg, AT, disp);
3097 }
3098 } else {
3099 if (scale == 0) {
3100 __ daddu(AT, as_Register(base), as_Register(index));
3101 } else {
3102 __ dsll(AT, as_Register(index), scale);
3103 __ daddu(AT, as_Register(base), AT);
3104 }
3105 __ move(T9, disp);
3106 if( UseLoongsonISA ) {
3107 __ gsldxc1(dst_reg, AT, T9, 0);
3108 } else {
3109 __ addu(AT, AT, T9);
3110 __ ldc1(dst_reg, AT, 0);
3111 }
3112 }
3113 } else {
3114 if( Assembler::is_simm16(disp) ) {
3115 __ ldc1(dst_reg, as_Register(base), disp);
3116 } else {
3117 __ move(T9, disp);
3118 if( UseLoongsonISA ) {
3119 __ gsldxc1(dst_reg, as_Register(base), T9, 0);
3120 } else {
3121 __ addu(AT, as_Register(base), T9);
3122 __ ldc1(dst_reg, AT, 0);
3123 }
3124 }
3125 }
3126 %}
3128 enc_class store_D_reg_enc (memory mem, regD src) %{
3129 MacroAssembler _masm(&cbuf);
3130 int base = $mem$$base;
3131 int index = $mem$$index;
3132 int scale = $mem$$scale;
3133 int disp = $mem$$disp;
3134 FloatRegister src_reg = as_FloatRegister($src$$reg);
3136 if( index != 0 ) {
3137 if( Assembler::is_simm16(disp) ) {
3138 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3139 if (scale == 0) {
3140 __ gssdxc1(src_reg, as_Register(base), as_Register(index), disp);
3141 } else {
3142 __ dsll(AT, as_Register(index), scale);
3143 __ gssdxc1(src_reg, as_Register(base), AT, disp);
3144 }
3145 } else {
3146 if (scale == 0) {
3147 __ daddu(AT, as_Register(base), as_Register(index));
3148 } else {
3149 __ dsll(AT, as_Register(index), scale);
3150 __ daddu(AT, as_Register(base), AT);
3151 }
3152 __ sdc1(src_reg, AT, disp);
3153 }
3154 } else {
3155 if (scale == 0) {
3156 __ daddu(AT, as_Register(base), as_Register(index));
3157 } else {
3158 __ dsll(AT, as_Register(index), scale);
3159 __ daddu(AT, as_Register(base), AT);
3160 }
3161 __ move(T9, disp);
3162 if( UseLoongsonISA ) {
3163 __ gssdxc1(src_reg, AT, T9, 0);
3164 } else {
3165 __ addu(AT, AT, T9);
3166 __ sdc1(src_reg, AT, 0);
3167 }
3168 }
3169 } else {
3170 if( Assembler::is_simm16(disp) ) {
3171 __ sdc1(src_reg, as_Register(base), disp);
3172 } else {
3173 __ move(T9, disp);
3174 if( UseLoongsonISA ) {
3175 __ gssdxc1(src_reg, as_Register(base), T9, 0);
3176 } else {
3177 __ addu(AT, as_Register(base), T9);
3178 __ sdc1(src_reg, AT, 0);
3179 }
3180 }
3181 }
3182 %}
3184 enc_class Java_To_Runtime (method meth) %{ // CALL Java_To_Runtime, Java_To_Runtime_Leaf
3185 MacroAssembler _masm(&cbuf);
3186 // This is the instruction starting address for relocation info.
3187 __ block_comment("Java_To_Runtime");
3188 cbuf.set_insts_mark();
3189 __ relocate(relocInfo::runtime_call_type);
3191 __ li48(T9, (long)$meth$$method);
3192 __ jalr(T9);
3193 __ nop();
3194 %}
3196 enc_class Java_Static_Call (method meth) %{ // JAVA STATIC CALL
3197 // CALL to fixup routine. Fixup routine uses ScopeDesc info to determine
3198 // who we intended to call.
3199 MacroAssembler _masm(&cbuf);
3200 cbuf.set_insts_mark();
3202 if ( !_method ) {
3203 __ relocate(relocInfo::runtime_call_type);
3204 } else if(_optimized_virtual) {
3205 __ relocate(relocInfo::opt_virtual_call_type);
3206 } else {
3207 __ relocate(relocInfo::static_call_type);
3208 }
3210 __ li(T9, $meth$$method);
3211 __ jalr(T9);
3212 __ nop();
3213 if( _method ) { // Emit stub for static call
3214 emit_java_to_interp(cbuf);
3215 }
3216 %}
3219 /*
3220 * [Ref: LIR_Assembler::ic_call() ]
3221 */
3222 enc_class Java_Dynamic_Call (method meth) %{ // JAVA DYNAMIC CALL
3223 MacroAssembler _masm(&cbuf);
3224 __ block_comment("Java_Dynamic_Call");
3225 __ ic_call((address)$meth$$method);
3226 %}
3229 enc_class Set_Flags_After_Fast_Lock_Unlock(FlagsReg cr) %{
3230 Register flags = $cr$$Register;
3231 Label L;
3233 MacroAssembler _masm(&cbuf);
3235 __ addu(flags, R0, R0);
3236 __ beq(AT, R0, L);
3237 __ delayed()->nop();
3238 __ move(flags, 0xFFFFFFFF);
3239 __ bind(L);
3240 %}
3242 enc_class enc_PartialSubtypeCheck(mRegP result, mRegP sub, mRegP super, mRegI tmp) %{
3243 Register result = $result$$Register;
3244 Register sub = $sub$$Register;
3245 Register super = $super$$Register;
3246 Register length = $tmp$$Register;
3247 Register tmp = T9;
3248 Label miss;
3250 /* 2012/9/28 Jin: result may be the same as sub
3251 * 47c B40: # B21 B41 <- B20 Freq: 0.155379
3252 * 47c partialSubtypeCheck result=S1, sub=S1, super=S3, length=S0
3253 * 4bc mov S2, NULL #@loadConP
3254 * 4c0 beq S1, S2, B21 #@branchConP P=0.999999 C=-1.000000
3255 */
3256 MacroAssembler _masm(&cbuf);
3257 Label done;
3258 __ check_klass_subtype_slow_path(sub, super, length, tmp,
3259 NULL, &miss,
3260 /*set_cond_codes:*/ true);
3261 /* 2013/7/22 Jin: Refer to X86_64's RDI */
3262 __ move(result, 0);
3263 __ b(done);
3264 __ nop();
3266 __ bind(miss);
3267 __ move(result, 1);
3268 __ bind(done);
3269 %}
3271 %}
3274 //---------MIPS FRAME--------------------------------------------------------------
3275 // Definition of frame structure and management information.
3276 //
3277 // S T A C K L A Y O U T Allocators stack-slot number
3278 // | (to get allocators register number
3279 // G Owned by | | v add SharedInfo::stack0)
3280 // r CALLER | |
3281 // o | +--------+ pad to even-align allocators stack-slot
3282 // w V | pad0 | numbers; owned by CALLER
3283 // t -----------+--------+----> Matcher::_in_arg_limit, unaligned
3284 // h ^ | in | 5
3285 // | | args | 4 Holes in incoming args owned by SELF
3286 // | | old | | 3
3287 // | | SP-+--------+----> Matcher::_old_SP, even aligned
3288 // v | | ret | 3 return address
3289 // Owned by +--------+
3290 // Self | pad2 | 2 pad to align old SP
3291 // | +--------+ 1
3292 // | | locks | 0
3293 // | +--------+----> SharedInfo::stack0, even aligned
3294 // | | pad1 | 11 pad to align new SP
3295 // | +--------+
3296 // | | | 10
3297 // | | spills | 9 spills
3298 // V | | 8 (pad0 slot for callee)
3299 // -----------+--------+----> Matcher::_out_arg_limit, unaligned
3300 // ^ | out | 7
3301 // | | args | 6 Holes in outgoing args owned by CALLEE
3302 // Owned by new | |
3303 // Callee SP-+--------+----> Matcher::_new_SP, even aligned
3304 // | |
3305 //
3306 // Note 1: Only region 8-11 is determined by the allocator. Region 0-5 is
3307 // known from SELF's arguments and the Java calling convention.
3308 // Region 6-7 is determined per call site.
3309 // Note 2: If the calling convention leaves holes in the incoming argument
3310 // area, those holes are owned by SELF. Holes in the outgoing area
3311 // are owned by the CALLEE. Holes should not be nessecary in the
3312 // incoming area, as the Java calling convention is completely under
3313 // the control of the AD file. Doubles can be sorted and packed to
3314 // avoid holes. Holes in the outgoing arguments may be nessecary for
3315 // varargs C calling conventions.
3316 // Note 3: Region 0-3 is even aligned, with pad2 as needed. Region 3-5 is
3317 // even aligned with pad0 as needed.
3318 // Region 6 is even aligned. Region 6-7 is NOT even aligned;
3319 // region 6-11 is even aligned; it may be padded out more so that
3320 // the region from SP to FP meets the minimum stack alignment.
3321 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
3322 // alignment. Region 11, pad1, may be dynamically extended so that
3323 // SP meets the minimum alignment.
3326 frame %{
3328 stack_direction(TOWARDS_LOW);
3330 // These two registers define part of the calling convention
3331 // between compiled code and the interpreter.
3332 // SEE StartI2CNode::calling_convention & StartC2INode::calling_convention & StartOSRNode::calling_convention
3333 // for more information. by yjl 3/16/2006
3335 inline_cache_reg(T1); // Inline Cache Register
3336 interpreter_method_oop_reg(S3); // Method Oop Register when calling interpreter
3337 /*
3338 inline_cache_reg(T1); // Inline Cache Register or methodOop for I2C
3339 interpreter_arg_ptr_reg(A0); // Argument pointer for I2C adapters
3340 */
3342 // Optional: name the operand used by cisc-spilling to access [stack_pointer + offset]
3343 cisc_spilling_operand_name(indOffset32);
3345 // Number of stack slots consumed by locking an object
3346 // generate Compile::sync_stack_slots
3347 #ifdef _LP64
3348 sync_stack_slots(2);
3349 #else
3350 sync_stack_slots(1);
3351 #endif
3353 frame_pointer(SP);
3355 // Interpreter stores its frame pointer in a register which is
3356 // stored to the stack by I2CAdaptors.
3357 // I2CAdaptors convert from interpreted java to compiled java.
3359 interpreter_frame_pointer(FP);
3361 // generate Matcher::stack_alignment
3362 stack_alignment(StackAlignmentInBytes); //wordSize = sizeof(char*);
3364 // Number of stack slots between incoming argument block and the start of
3365 // a new frame. The PROLOG must add this many slots to the stack. The
3366 // EPILOG must remove this many slots. Intel needs one slot for
3367 // return address.
3368 // generate Matcher::in_preserve_stack_slots
3369 //in_preserve_stack_slots(VerifyStackAtCalls + 2); //Now VerifyStackAtCalls is defined as false ! Leave one stack slot for ra and fp
3370 in_preserve_stack_slots(4); //Now VerifyStackAtCalls is defined as false ! Leave two stack slots for ra and fp
3372 // Number of outgoing stack slots killed above the out_preserve_stack_slots
3373 // for calls to C. Supports the var-args backing area for register parms.
3374 varargs_C_out_slots_killed(0);
3376 // The after-PROLOG location of the return address. Location of
3377 // return address specifies a type (REG or STACK) and a number
3378 // representing the register number (i.e. - use a register name) or
3379 // stack slot.
3380 // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
3381 // Otherwise, it is above the locks and verification slot and alignment word
3382 //return_addr(STACK -1+ round_to(1+VerifyStackAtCalls+Compile::current()->sync()*Compile::current()->sync_stack_slots(),WordsPerLong));
3383 return_addr(REG RA);
3385 // Body of function which returns an integer array locating
3386 // arguments either in registers or in stack slots. Passed an array
3387 // of ideal registers called "sig" and a "length" count. Stack-slot
3388 // offsets are based on outgoing arguments, i.e. a CALLER setting up
3389 // arguments for a CALLEE. Incoming stack arguments are
3390 // automatically biased by the preserve_stack_slots field above.
3393 // will generated to Matcher::calling_convention(OptoRegPair *sig, uint length, bool is_outgoing)
3394 // StartNode::calling_convention call this. by yjl 3/16/2006
3395 calling_convention %{
3396 SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
3397 %}
3402 // Body of function which returns an integer array locating
3403 // arguments either in registers or in stack slots. Passed an array
3404 // of ideal registers called "sig" and a "length" count. Stack-slot
3405 // offsets are based on outgoing arguments, i.e. a CALLER setting up
3406 // arguments for a CALLEE. Incoming stack arguments are
3407 // automatically biased by the preserve_stack_slots field above.
3410 // SEE CallRuntimeNode::calling_convention for more information. by yjl 3/16/2006
3411 c_calling_convention %{
3412 (void) SharedRuntime::c_calling_convention(sig_bt, regs, /*regs2=*/NULL, length);
3413 %}
3416 // Location of C & interpreter return values
3417 // register(s) contain(s) return value for Op_StartI2C and Op_StartOSR.
3418 // SEE Matcher::match. by yjl 3/16/2006
3419 c_return_value %{
3420 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3421 /* -- , -- , Op_RegN, Op_RegI, Op_RegP, Op_RegF, Op_RegD, Op_RegL */
3422 static int lo[Op_RegL+1] = { 0, 0, V0_num, V0_num, V0_num, F0_num, F0_num, V0_num };
3423 static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, V0_H_num, OptoReg::Bad, F0_H_num, V0_H_num };
3424 return OptoRegPair(hi[ideal_reg],lo[ideal_reg]);
3425 %}
3427 // Location of return values
3428 // register(s) contain(s) return value for Op_StartC2I and Op_Start.
3429 // SEE Matcher::match. by yjl 3/16/2006
3431 return_value %{
3432 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3433 /* -- , -- , Op_RegN, Op_RegI, Op_RegP, Op_RegF, Op_RegD, Op_RegL */
3434 static int lo[Op_RegL+1] = { 0, 0, V0_num, V0_num, V0_num, F0_num, F0_num, V0_num };
3435 static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, V0_H_num, OptoReg::Bad, F0_H_num, V0_H_num};
3436 return OptoRegPair(hi[ideal_reg],lo[ideal_reg]);
3437 %}
3439 %}
3441 //----------ATTRIBUTES---------------------------------------------------------
3442 //----------Operand Attributes-------------------------------------------------
3443 op_attrib op_cost(0); // Required cost attribute
3445 //----------Instruction Attributes---------------------------------------------
3446 ins_attrib ins_cost(100); // Required cost attribute
3447 ins_attrib ins_size(32); // Required size attribute (in bits)
3448 ins_attrib ins_pc_relative(0); // Required PC Relative flag
3449 ins_attrib ins_short_branch(0); // Required flag: is this instruction a
3450 // non-matching short branch variant of some
3451 // long branch?
3452 ins_attrib ins_alignment(4); // Required alignment attribute (must be a power of 2)
3453 // specifies the alignment that some part of the instruction (not
3454 // necessarily the start) requires. If > 1, a compute_padding()
3455 // function must be provided for the instruction
3457 //----------OPERANDS-----------------------------------------------------------
3458 // Operand definitions must precede instruction definitions for correct parsing
3459 // in the ADLC because operands constitute user defined types which are used in
3460 // instruction definitions.
3462 // Vectors
3463 operand vecD() %{
3464 constraint(ALLOC_IN_RC(dbl_reg));
3465 match(VecD);
3467 format %{ %}
3468 interface(REG_INTER);
3469 %}
3471 // Flags register, used as output of compare instructions
3472 operand FlagsReg() %{
3473 constraint(ALLOC_IN_RC(mips_flags));
3474 match(RegFlags);
3476 format %{ "EFLAGS" %}
3477 interface(REG_INTER);
3478 %}
3480 //----------Simple Operands----------------------------------------------------
3481 //TODO: Should we need to define some more special immediate number ?
3482 // Immediate Operands
3483 // Integer Immediate
3484 operand immI() %{
3485 match(ConI);
3486 //TODO: should not match immI8 here LEE
3487 match(immI8);
3489 op_cost(20);
3490 format %{ %}
3491 interface(CONST_INTER);
3492 %}
3494 // Long Immediate 8-bit
3495 operand immL8()
3496 %{
3497 predicate(-0x80L <= n->get_long() && n->get_long() < 0x80L);
3498 match(ConL);
3500 op_cost(5);
3501 format %{ %}
3502 interface(CONST_INTER);
3503 %}
3505 // Constant for test vs zero
3506 operand immI0() %{
3507 predicate(n->get_int() == 0);
3508 match(ConI);
3510 op_cost(0);
3511 format %{ %}
3512 interface(CONST_INTER);
3513 %}
3515 // Constant for increment
3516 operand immI1() %{
3517 predicate(n->get_int() == 1);
3518 match(ConI);
3520 op_cost(0);
3521 format %{ %}
3522 interface(CONST_INTER);
3523 %}
3525 // Constant for decrement
3526 operand immI_M1() %{
3527 predicate(n->get_int() == -1);
3528 match(ConI);
3530 op_cost(0);
3531 format %{ %}
3532 interface(CONST_INTER);
3533 %}
3535 operand immI_MaxI() %{
3536 predicate(n->get_int() == 2147483647);
3537 match(ConI);
3539 op_cost(0);
3540 format %{ %}
3541 interface(CONST_INTER);
3542 %}
3544 // Valid scale values for addressing modes
3545 operand immI2() %{
3546 predicate(0 <= n->get_int() && (n->get_int() <= 3));
3547 match(ConI);
3549 format %{ %}
3550 interface(CONST_INTER);
3551 %}
3553 operand immI8() %{
3554 predicate((-128 <= n->get_int()) && (n->get_int() <= 127));
3555 match(ConI);
3557 op_cost(5);
3558 format %{ %}
3559 interface(CONST_INTER);
3560 %}
3562 operand immI16() %{
3563 predicate((-32768 <= n->get_int()) && (n->get_int() <= 32767));
3564 match(ConI);
3566 op_cost(10);
3567 format %{ %}
3568 interface(CONST_INTER);
3569 %}
3571 // Constant for long shifts
3572 operand immI_32() %{
3573 predicate( n->get_int() == 32 );
3574 match(ConI);
3576 op_cost(0);
3577 format %{ %}
3578 interface(CONST_INTER);
3579 %}
3581 operand immI_63() %{
3582 predicate( n->get_int() == 63 );
3583 match(ConI);
3585 op_cost(0);
3586 format %{ %}
3587 interface(CONST_INTER);
3588 %}
3590 operand immI_0_31() %{
3591 predicate( n->get_int() >= 0 && n->get_int() <= 31 );
3592 match(ConI);
3594 op_cost(0);
3595 format %{ %}
3596 interface(CONST_INTER);
3597 %}
3599 // Operand for non-negtive integer mask
3600 operand immI_nonneg_mask() %{
3601 predicate( (n->get_int() >= 0) && (Assembler::is_int_mask(n->get_int()) != -1) );
3602 match(ConI);
3604 op_cost(0);
3605 format %{ %}
3606 interface(CONST_INTER);
3607 %}
3609 operand immI_32_63() %{
3610 predicate( n->get_int() >= 32 && n->get_int() <= 63 );
3611 match(ConI);
3612 op_cost(0);
3614 format %{ %}
3615 interface(CONST_INTER);
3616 %}
3618 operand immI16_sub() %{
3619 predicate((-32767 <= n->get_int()) && (n->get_int() <= 32768));
3620 match(ConI);
3622 op_cost(10);
3623 format %{ %}
3624 interface(CONST_INTER);
3625 %}
3627 operand immI_0_32767() %{
3628 predicate( n->get_int() >= 0 && n->get_int() <= 32767 );
3629 match(ConI);
3630 op_cost(0);
3632 format %{ %}
3633 interface(CONST_INTER);
3634 %}
3636 operand immI_0_65535() %{
3637 predicate( n->get_int() >= 0 && n->get_int() <= 65535 );
3638 match(ConI);
3639 op_cost(0);
3641 format %{ %}
3642 interface(CONST_INTER);
3643 %}
3645 operand immI_1() %{
3646 predicate( n->get_int() == 1 );
3647 match(ConI);
3649 op_cost(0);
3650 format %{ %}
3651 interface(CONST_INTER);
3652 %}
3654 operand immI_2() %{
3655 predicate( n->get_int() == 2 );
3656 match(ConI);
3658 op_cost(0);
3659 format %{ %}
3660 interface(CONST_INTER);
3661 %}
3663 operand immI_3() %{
3664 predicate( n->get_int() == 3 );
3665 match(ConI);
3667 op_cost(0);
3668 format %{ %}
3669 interface(CONST_INTER);
3670 %}
3672 operand immI_7() %{
3673 predicate( n->get_int() == 7 );
3674 match(ConI);
3676 format %{ %}
3677 interface(CONST_INTER);
3678 %}
3680 // Immediates for special shifts (sign extend)
3682 // Constants for increment
3683 operand immI_16() %{
3684 predicate( n->get_int() == 16 );
3685 match(ConI);
3687 format %{ %}
3688 interface(CONST_INTER);
3689 %}
3691 operand immI_24() %{
3692 predicate( n->get_int() == 24 );
3693 match(ConI);
3695 format %{ %}
3696 interface(CONST_INTER);
3697 %}
3699 // Constant for byte-wide masking
3700 operand immI_255() %{
3701 predicate( n->get_int() == 255 );
3702 match(ConI);
3704 op_cost(0);
3705 format %{ %}
3706 interface(CONST_INTER);
3707 %}
3709 operand immI_65535() %{
3710 predicate( n->get_int() == 65535 );
3711 match(ConI);
3713 op_cost(5);
3714 format %{ %}
3715 interface(CONST_INTER);
3716 %}
3718 operand immI_65536() %{
3719 predicate( n->get_int() == 65536 );
3720 match(ConI);
3722 op_cost(5);
3723 format %{ %}
3724 interface(CONST_INTER);
3725 %}
3727 operand immI_M65536() %{
3728 predicate( n->get_int() == -65536 );
3729 match(ConI);
3731 op_cost(5);
3732 format %{ %}
3733 interface(CONST_INTER);
3734 %}
3736 // Pointer Immediate
3737 operand immP() %{
3738 match(ConP);
3740 op_cost(10);
3741 format %{ %}
3742 interface(CONST_INTER);
3743 %}
3745 // NULL Pointer Immediate
3746 operand immP0() %{
3747 predicate( n->get_ptr() == 0 );
3748 match(ConP);
3749 op_cost(0);
3751 format %{ %}
3752 interface(CONST_INTER);
3753 %}
3755 // Pointer Immediate: 64-bit
3756 operand immP_set() %{
3757 match(ConP);
3759 op_cost(5);
3760 // formats are generated automatically for constants and base registers
3761 format %{ %}
3762 interface(CONST_INTER);
3763 %}
3765 // Pointer Immediate: 64-bit
3766 operand immP_load() %{
3767 predicate(n->bottom_type()->isa_oop_ptr() || (MacroAssembler::insts_for_set64(n->get_ptr()) > 3));
3768 match(ConP);
3770 op_cost(5);
3771 // formats are generated automatically for constants and base registers
3772 format %{ %}
3773 interface(CONST_INTER);
3774 %}
3776 // Pointer Immediate: 64-bit
3777 operand immP_no_oop_cheap() %{
3778 predicate(!n->bottom_type()->isa_oop_ptr() && (MacroAssembler::insts_for_set64(n->get_ptr()) <= 3));
3779 match(ConP);
3781 op_cost(5);
3782 // formats are generated automatically for constants and base registers
3783 format %{ %}
3784 interface(CONST_INTER);
3785 %}
3787 // Pointer for polling page
3788 operand immP_poll() %{
3789 predicate(n->get_ptr() != 0 && n->get_ptr() == (intptr_t)os::get_polling_page());
3790 match(ConP);
3791 op_cost(5);
3793 format %{ %}
3794 interface(CONST_INTER);
3795 %}
3797 // Pointer Immediate
3798 operand immN() %{
3799 match(ConN);
3801 op_cost(10);
3802 format %{ %}
3803 interface(CONST_INTER);
3804 %}
3806 operand immNKlass() %{
3807 match(ConNKlass);
3809 op_cost(10);
3810 format %{ %}
3811 interface(CONST_INTER);
3812 %}
3814 // NULL Pointer Immediate
3815 operand immN0() %{
3816 predicate(n->get_narrowcon() == 0);
3817 match(ConN);
3819 op_cost(5);
3820 format %{ %}
3821 interface(CONST_INTER);
3822 %}
3824 // Long Immediate
3825 operand immL() %{
3826 match(ConL);
3828 op_cost(20);
3829 format %{ %}
3830 interface(CONST_INTER);
3831 %}
3833 // Long Immediate zero
3834 operand immL0() %{
3835 predicate( n->get_long() == 0L );
3836 match(ConL);
3837 op_cost(0);
3839 format %{ %}
3840 interface(CONST_INTER);
3841 %}
3843 operand immL7() %{
3844 predicate( n->get_long() == 7L );
3845 match(ConL);
3846 op_cost(0);
3848 format %{ %}
3849 interface(CONST_INTER);
3850 %}
3852 operand immL_M1() %{
3853 predicate( n->get_long() == -1L );
3854 match(ConL);
3855 op_cost(0);
3857 format %{ %}
3858 interface(CONST_INTER);
3859 %}
3861 // bit 0..2 zero
3862 operand immL_M8() %{
3863 predicate( n->get_long() == -8L );
3864 match(ConL);
3865 op_cost(0);
3867 format %{ %}
3868 interface(CONST_INTER);
3869 %}
3871 // bit 2 zero
3872 operand immL_M5() %{
3873 predicate( n->get_long() == -5L );
3874 match(ConL);
3875 op_cost(0);
3877 format %{ %}
3878 interface(CONST_INTER);
3879 %}
3881 // bit 1..2 zero
3882 operand immL_M7() %{
3883 predicate( n->get_long() == -7L );
3884 match(ConL);
3885 op_cost(0);
3887 format %{ %}
3888 interface(CONST_INTER);
3889 %}
3891 // bit 0..1 zero
3892 operand immL_M4() %{
3893 predicate( n->get_long() == -4L );
3894 match(ConL);
3895 op_cost(0);
3897 format %{ %}
3898 interface(CONST_INTER);
3899 %}
3901 // bit 3..6 zero
3902 operand immL_M121() %{
3903 predicate( n->get_long() == -121L );
3904 match(ConL);
3905 op_cost(0);
3907 format %{ %}
3908 interface(CONST_INTER);
3909 %}
3911 // Long immediate from 0 to 127.
3912 // Used for a shorter form of long mul by 10.
3913 operand immL_127() %{
3914 predicate((0 <= n->get_long()) && (n->get_long() <= 127));
3915 match(ConL);
3916 op_cost(0);
3918 format %{ %}
3919 interface(CONST_INTER);
3920 %}
3922 // Operand for non-negtive long mask
3923 operand immL_nonneg_mask() %{
3924 predicate( (n->get_long() >= 0) && (Assembler::is_jlong_mask(n->get_long()) != -1) );
3925 match(ConL);
3927 op_cost(0);
3928 format %{ %}
3929 interface(CONST_INTER);
3930 %}
3932 operand immL_0_65535() %{
3933 predicate( n->get_long() >= 0 && n->get_long() <= 65535 );
3934 match(ConL);
3935 op_cost(0);
3937 format %{ %}
3938 interface(CONST_INTER);
3939 %}
3941 // Long Immediate: cheap (materialize in <= 3 instructions)
3942 operand immL_cheap() %{
3943 predicate(MacroAssembler::insts_for_set64(n->get_long()) <= 3);
3944 match(ConL);
3945 op_cost(0);
3947 format %{ %}
3948 interface(CONST_INTER);
3949 %}
3951 // Long Immediate: expensive (materialize in > 3 instructions)
3952 operand immL_expensive() %{
3953 predicate(MacroAssembler::insts_for_set64(n->get_long()) > 3);
3954 match(ConL);
3955 op_cost(0);
3957 format %{ %}
3958 interface(CONST_INTER);
3959 %}
3961 operand immL16() %{
3962 predicate((-32768 <= n->get_long()) && (n->get_long() <= 32767));
3963 match(ConL);
3965 op_cost(10);
3966 format %{ %}
3967 interface(CONST_INTER);
3968 %}
3970 operand immL16_sub() %{
3971 predicate((-32767 <= n->get_long()) && (n->get_long() <= 32768));
3972 match(ConL);
3974 op_cost(10);
3975 format %{ %}
3976 interface(CONST_INTER);
3977 %}
3979 // Long Immediate: low 32-bit mask
3980 operand immL_32bits() %{
3981 predicate(n->get_long() == 0xFFFFFFFFL);
3982 match(ConL);
3983 op_cost(20);
3985 format %{ %}
3986 interface(CONST_INTER);
3987 %}
3989 // Long Immediate 32-bit signed
3990 operand immL32()
3991 %{
3992 predicate(n->get_long() == (int) (n->get_long()));
3993 match(ConL);
3995 op_cost(15);
3996 format %{ %}
3997 interface(CONST_INTER);
3998 %}
4001 //single-precision floating-point zero
4002 operand immF0() %{
4003 predicate(jint_cast(n->getf()) == 0);
4004 match(ConF);
4006 op_cost(5);
4007 format %{ %}
4008 interface(CONST_INTER);
4009 %}
4011 //single-precision floating-point immediate
4012 operand immF() %{
4013 match(ConF);
4015 op_cost(20);
4016 format %{ %}
4017 interface(CONST_INTER);
4018 %}
4020 //double-precision floating-point zero
4021 operand immD0() %{
4022 predicate(jlong_cast(n->getd()) == 0);
4023 match(ConD);
4025 op_cost(5);
4026 format %{ %}
4027 interface(CONST_INTER);
4028 %}
4030 //double-precision floating-point immediate
4031 operand immD() %{
4032 match(ConD);
4034 op_cost(20);
4035 format %{ %}
4036 interface(CONST_INTER);
4037 %}
4039 // Register Operands
4040 // Integer Register
4041 operand mRegI() %{
4042 constraint(ALLOC_IN_RC(int_reg));
4043 match(RegI);
4045 format %{ %}
4046 interface(REG_INTER);
4047 %}
4049 operand no_Ax_mRegI() %{
4050 constraint(ALLOC_IN_RC(no_Ax_int_reg));
4051 match(RegI);
4052 match(mRegI);
4054 format %{ %}
4055 interface(REG_INTER);
4056 %}
4058 operand mS0RegI() %{
4059 constraint(ALLOC_IN_RC(s0_reg));
4060 match(RegI);
4061 match(mRegI);
4063 format %{ "S0" %}
4064 interface(REG_INTER);
4065 %}
4067 operand mS1RegI() %{
4068 constraint(ALLOC_IN_RC(s1_reg));
4069 match(RegI);
4070 match(mRegI);
4072 format %{ "S1" %}
4073 interface(REG_INTER);
4074 %}
4076 operand mS2RegI() %{
4077 constraint(ALLOC_IN_RC(s2_reg));
4078 match(RegI);
4079 match(mRegI);
4081 format %{ "S2" %}
4082 interface(REG_INTER);
4083 %}
4085 operand mS3RegI() %{
4086 constraint(ALLOC_IN_RC(s3_reg));
4087 match(RegI);
4088 match(mRegI);
4090 format %{ "S3" %}
4091 interface(REG_INTER);
4092 %}
4094 operand mS4RegI() %{
4095 constraint(ALLOC_IN_RC(s4_reg));
4096 match(RegI);
4097 match(mRegI);
4099 format %{ "S4" %}
4100 interface(REG_INTER);
4101 %}
4103 operand mS5RegI() %{
4104 constraint(ALLOC_IN_RC(s5_reg));
4105 match(RegI);
4106 match(mRegI);
4108 format %{ "S5" %}
4109 interface(REG_INTER);
4110 %}
4112 operand mS6RegI() %{
4113 constraint(ALLOC_IN_RC(s6_reg));
4114 match(RegI);
4115 match(mRegI);
4117 format %{ "S6" %}
4118 interface(REG_INTER);
4119 %}
4121 operand mS7RegI() %{
4122 constraint(ALLOC_IN_RC(s7_reg));
4123 match(RegI);
4124 match(mRegI);
4126 format %{ "S7" %}
4127 interface(REG_INTER);
4128 %}
4131 operand mT0RegI() %{
4132 constraint(ALLOC_IN_RC(t0_reg));
4133 match(RegI);
4134 match(mRegI);
4136 format %{ "T0" %}
4137 interface(REG_INTER);
4138 %}
4140 operand mT1RegI() %{
4141 constraint(ALLOC_IN_RC(t1_reg));
4142 match(RegI);
4143 match(mRegI);
4145 format %{ "T1" %}
4146 interface(REG_INTER);
4147 %}
4149 operand mT2RegI() %{
4150 constraint(ALLOC_IN_RC(t2_reg));
4151 match(RegI);
4152 match(mRegI);
4154 format %{ "T2" %}
4155 interface(REG_INTER);
4156 %}
4158 operand mT3RegI() %{
4159 constraint(ALLOC_IN_RC(t3_reg));
4160 match(RegI);
4161 match(mRegI);
4163 format %{ "T3" %}
4164 interface(REG_INTER);
4165 %}
4167 operand mT8RegI() %{
4168 constraint(ALLOC_IN_RC(t8_reg));
4169 match(RegI);
4170 match(mRegI);
4172 format %{ "T8" %}
4173 interface(REG_INTER);
4174 %}
4176 operand mT9RegI() %{
4177 constraint(ALLOC_IN_RC(t9_reg));
4178 match(RegI);
4179 match(mRegI);
4181 format %{ "T9" %}
4182 interface(REG_INTER);
4183 %}
4185 operand mA0RegI() %{
4186 constraint(ALLOC_IN_RC(a0_reg));
4187 match(RegI);
4188 match(mRegI);
4190 format %{ "A0" %}
4191 interface(REG_INTER);
4192 %}
4194 operand mA1RegI() %{
4195 constraint(ALLOC_IN_RC(a1_reg));
4196 match(RegI);
4197 match(mRegI);
4199 format %{ "A1" %}
4200 interface(REG_INTER);
4201 %}
4203 operand mA2RegI() %{
4204 constraint(ALLOC_IN_RC(a2_reg));
4205 match(RegI);
4206 match(mRegI);
4208 format %{ "A2" %}
4209 interface(REG_INTER);
4210 %}
4212 operand mA3RegI() %{
4213 constraint(ALLOC_IN_RC(a3_reg));
4214 match(RegI);
4215 match(mRegI);
4217 format %{ "A3" %}
4218 interface(REG_INTER);
4219 %}
4221 operand mA4RegI() %{
4222 constraint(ALLOC_IN_RC(a4_reg));
4223 match(RegI);
4224 match(mRegI);
4226 format %{ "A4" %}
4227 interface(REG_INTER);
4228 %}
4230 operand mA5RegI() %{
4231 constraint(ALLOC_IN_RC(a5_reg));
4232 match(RegI);
4233 match(mRegI);
4235 format %{ "A5" %}
4236 interface(REG_INTER);
4237 %}
4239 operand mA6RegI() %{
4240 constraint(ALLOC_IN_RC(a6_reg));
4241 match(RegI);
4242 match(mRegI);
4244 format %{ "A6" %}
4245 interface(REG_INTER);
4246 %}
4248 operand mA7RegI() %{
4249 constraint(ALLOC_IN_RC(a7_reg));
4250 match(RegI);
4251 match(mRegI);
4253 format %{ "A7" %}
4254 interface(REG_INTER);
4255 %}
4257 operand mV0RegI() %{
4258 constraint(ALLOC_IN_RC(v0_reg));
4259 match(RegI);
4260 match(mRegI);
4262 format %{ "V0" %}
4263 interface(REG_INTER);
4264 %}
4266 operand mV1RegI() %{
4267 constraint(ALLOC_IN_RC(v1_reg));
4268 match(RegI);
4269 match(mRegI);
4271 format %{ "V1" %}
4272 interface(REG_INTER);
4273 %}
4275 operand mRegN() %{
4276 constraint(ALLOC_IN_RC(int_reg));
4277 match(RegN);
4279 format %{ %}
4280 interface(REG_INTER);
4281 %}
4283 operand t0_RegN() %{
4284 constraint(ALLOC_IN_RC(t0_reg));
4285 match(RegN);
4286 match(mRegN);
4288 format %{ %}
4289 interface(REG_INTER);
4290 %}
4292 operand t1_RegN() %{
4293 constraint(ALLOC_IN_RC(t1_reg));
4294 match(RegN);
4295 match(mRegN);
4297 format %{ %}
4298 interface(REG_INTER);
4299 %}
4301 operand t2_RegN() %{
4302 constraint(ALLOC_IN_RC(t2_reg));
4303 match(RegN);
4304 match(mRegN);
4306 format %{ %}
4307 interface(REG_INTER);
4308 %}
4310 operand t3_RegN() %{
4311 constraint(ALLOC_IN_RC(t3_reg));
4312 match(RegN);
4313 match(mRegN);
4315 format %{ %}
4316 interface(REG_INTER);
4317 %}
4319 operand t8_RegN() %{
4320 constraint(ALLOC_IN_RC(t8_reg));
4321 match(RegN);
4322 match(mRegN);
4324 format %{ %}
4325 interface(REG_INTER);
4326 %}
4328 operand t9_RegN() %{
4329 constraint(ALLOC_IN_RC(t9_reg));
4330 match(RegN);
4331 match(mRegN);
4333 format %{ %}
4334 interface(REG_INTER);
4335 %}
4337 operand a0_RegN() %{
4338 constraint(ALLOC_IN_RC(a0_reg));
4339 match(RegN);
4340 match(mRegN);
4342 format %{ %}
4343 interface(REG_INTER);
4344 %}
4346 operand a1_RegN() %{
4347 constraint(ALLOC_IN_RC(a1_reg));
4348 match(RegN);
4349 match(mRegN);
4351 format %{ %}
4352 interface(REG_INTER);
4353 %}
4355 operand a2_RegN() %{
4356 constraint(ALLOC_IN_RC(a2_reg));
4357 match(RegN);
4358 match(mRegN);
4360 format %{ %}
4361 interface(REG_INTER);
4362 %}
4364 operand a3_RegN() %{
4365 constraint(ALLOC_IN_RC(a3_reg));
4366 match(RegN);
4367 match(mRegN);
4369 format %{ %}
4370 interface(REG_INTER);
4371 %}
4373 operand a4_RegN() %{
4374 constraint(ALLOC_IN_RC(a4_reg));
4375 match(RegN);
4376 match(mRegN);
4378 format %{ %}
4379 interface(REG_INTER);
4380 %}
4382 operand a5_RegN() %{
4383 constraint(ALLOC_IN_RC(a5_reg));
4384 match(RegN);
4385 match(mRegN);
4387 format %{ %}
4388 interface(REG_INTER);
4389 %}
4391 operand a6_RegN() %{
4392 constraint(ALLOC_IN_RC(a6_reg));
4393 match(RegN);
4394 match(mRegN);
4396 format %{ %}
4397 interface(REG_INTER);
4398 %}
4400 operand a7_RegN() %{
4401 constraint(ALLOC_IN_RC(a7_reg));
4402 match(RegN);
4403 match(mRegN);
4405 format %{ %}
4406 interface(REG_INTER);
4407 %}
4409 operand s0_RegN() %{
4410 constraint(ALLOC_IN_RC(s0_reg));
4411 match(RegN);
4412 match(mRegN);
4414 format %{ %}
4415 interface(REG_INTER);
4416 %}
4418 operand s1_RegN() %{
4419 constraint(ALLOC_IN_RC(s1_reg));
4420 match(RegN);
4421 match(mRegN);
4423 format %{ %}
4424 interface(REG_INTER);
4425 %}
4427 operand s2_RegN() %{
4428 constraint(ALLOC_IN_RC(s2_reg));
4429 match(RegN);
4430 match(mRegN);
4432 format %{ %}
4433 interface(REG_INTER);
4434 %}
4436 operand s3_RegN() %{
4437 constraint(ALLOC_IN_RC(s3_reg));
4438 match(RegN);
4439 match(mRegN);
4441 format %{ %}
4442 interface(REG_INTER);
4443 %}
4445 operand s4_RegN() %{
4446 constraint(ALLOC_IN_RC(s4_reg));
4447 match(RegN);
4448 match(mRegN);
4450 format %{ %}
4451 interface(REG_INTER);
4452 %}
4454 operand s5_RegN() %{
4455 constraint(ALLOC_IN_RC(s5_reg));
4456 match(RegN);
4457 match(mRegN);
4459 format %{ %}
4460 interface(REG_INTER);
4461 %}
4463 operand s6_RegN() %{
4464 constraint(ALLOC_IN_RC(s6_reg));
4465 match(RegN);
4466 match(mRegN);
4468 format %{ %}
4469 interface(REG_INTER);
4470 %}
4472 operand s7_RegN() %{
4473 constraint(ALLOC_IN_RC(s7_reg));
4474 match(RegN);
4475 match(mRegN);
4477 format %{ %}
4478 interface(REG_INTER);
4479 %}
4481 operand v0_RegN() %{
4482 constraint(ALLOC_IN_RC(v0_reg));
4483 match(RegN);
4484 match(mRegN);
4486 format %{ %}
4487 interface(REG_INTER);
4488 %}
4490 operand v1_RegN() %{
4491 constraint(ALLOC_IN_RC(v1_reg));
4492 match(RegN);
4493 match(mRegN);
4495 format %{ %}
4496 interface(REG_INTER);
4497 %}
4499 // Pointer Register
4500 operand mRegP() %{
4501 constraint(ALLOC_IN_RC(p_reg));
4502 match(RegP);
4504 format %{ %}
4505 interface(REG_INTER);
4506 %}
4508 operand no_T8_mRegP() %{
4509 constraint(ALLOC_IN_RC(no_T8_p_reg));
4510 match(RegP);
4511 match(mRegP);
4513 format %{ %}
4514 interface(REG_INTER);
4515 %}
4517 operand s0_RegP()
4518 %{
4519 constraint(ALLOC_IN_RC(s0_long_reg));
4520 match(RegP);
4521 match(mRegP);
4522 match(no_T8_mRegP);
4524 format %{ %}
4525 interface(REG_INTER);
4526 %}
4528 operand s1_RegP()
4529 %{
4530 constraint(ALLOC_IN_RC(s1_long_reg));
4531 match(RegP);
4532 match(mRegP);
4533 match(no_T8_mRegP);
4535 format %{ %}
4536 interface(REG_INTER);
4537 %}
4539 operand s2_RegP()
4540 %{
4541 constraint(ALLOC_IN_RC(s2_long_reg));
4542 match(RegP);
4543 match(mRegP);
4544 match(no_T8_mRegP);
4546 format %{ %}
4547 interface(REG_INTER);
4548 %}
4550 operand s3_RegP()
4551 %{
4552 constraint(ALLOC_IN_RC(s3_long_reg));
4553 match(RegP);
4554 match(mRegP);
4555 match(no_T8_mRegP);
4557 format %{ %}
4558 interface(REG_INTER);
4559 %}
4561 operand s4_RegP()
4562 %{
4563 constraint(ALLOC_IN_RC(s4_long_reg));
4564 match(RegP);
4565 match(mRegP);
4566 match(no_T8_mRegP);
4568 format %{ %}
4569 interface(REG_INTER);
4570 %}
4572 operand s5_RegP()
4573 %{
4574 constraint(ALLOC_IN_RC(s5_long_reg));
4575 match(RegP);
4576 match(mRegP);
4577 match(no_T8_mRegP);
4579 format %{ %}
4580 interface(REG_INTER);
4581 %}
4583 operand s6_RegP()
4584 %{
4585 constraint(ALLOC_IN_RC(s6_long_reg));
4586 match(RegP);
4587 match(mRegP);
4588 match(no_T8_mRegP);
4590 format %{ %}
4591 interface(REG_INTER);
4592 %}
4594 operand s7_RegP()
4595 %{
4596 constraint(ALLOC_IN_RC(s7_long_reg));
4597 match(RegP);
4598 match(mRegP);
4599 match(no_T8_mRegP);
4601 format %{ %}
4602 interface(REG_INTER);
4603 %}
4605 operand t0_RegP()
4606 %{
4607 constraint(ALLOC_IN_RC(t0_long_reg));
4608 match(RegP);
4609 match(mRegP);
4610 match(no_T8_mRegP);
4612 format %{ %}
4613 interface(REG_INTER);
4614 %}
4616 operand t1_RegP()
4617 %{
4618 constraint(ALLOC_IN_RC(t1_long_reg));
4619 match(RegP);
4620 match(mRegP);
4621 match(no_T8_mRegP);
4623 format %{ %}
4624 interface(REG_INTER);
4625 %}
4627 operand t2_RegP()
4628 %{
4629 constraint(ALLOC_IN_RC(t2_long_reg));
4630 match(RegP);
4631 match(mRegP);
4632 match(no_T8_mRegP);
4634 format %{ %}
4635 interface(REG_INTER);
4636 %}
4638 operand t3_RegP()
4639 %{
4640 constraint(ALLOC_IN_RC(t3_long_reg));
4641 match(RegP);
4642 match(mRegP);
4643 match(no_T8_mRegP);
4645 format %{ %}
4646 interface(REG_INTER);
4647 %}
4649 operand t8_RegP()
4650 %{
4651 constraint(ALLOC_IN_RC(t8_long_reg));
4652 match(RegP);
4653 match(mRegP);
4655 format %{ %}
4656 interface(REG_INTER);
4657 %}
4659 operand t9_RegP()
4660 %{
4661 constraint(ALLOC_IN_RC(t9_long_reg));
4662 match(RegP);
4663 match(mRegP);
4664 match(no_T8_mRegP);
4666 format %{ %}
4667 interface(REG_INTER);
4668 %}
4670 operand a0_RegP()
4671 %{
4672 constraint(ALLOC_IN_RC(a0_long_reg));
4673 match(RegP);
4674 match(mRegP);
4675 match(no_T8_mRegP);
4677 format %{ %}
4678 interface(REG_INTER);
4679 %}
4681 operand a1_RegP()
4682 %{
4683 constraint(ALLOC_IN_RC(a1_long_reg));
4684 match(RegP);
4685 match(mRegP);
4686 match(no_T8_mRegP);
4688 format %{ %}
4689 interface(REG_INTER);
4690 %}
4692 operand a2_RegP()
4693 %{
4694 constraint(ALLOC_IN_RC(a2_long_reg));
4695 match(RegP);
4696 match(mRegP);
4697 match(no_T8_mRegP);
4699 format %{ %}
4700 interface(REG_INTER);
4701 %}
4703 operand a3_RegP()
4704 %{
4705 constraint(ALLOC_IN_RC(a3_long_reg));
4706 match(RegP);
4707 match(mRegP);
4708 match(no_T8_mRegP);
4710 format %{ %}
4711 interface(REG_INTER);
4712 %}
4714 operand a4_RegP()
4715 %{
4716 constraint(ALLOC_IN_RC(a4_long_reg));
4717 match(RegP);
4718 match(mRegP);
4719 match(no_T8_mRegP);
4721 format %{ %}
4722 interface(REG_INTER);
4723 %}
4726 operand a5_RegP()
4727 %{
4728 constraint(ALLOC_IN_RC(a5_long_reg));
4729 match(RegP);
4730 match(mRegP);
4731 match(no_T8_mRegP);
4733 format %{ %}
4734 interface(REG_INTER);
4735 %}
4737 operand a6_RegP()
4738 %{
4739 constraint(ALLOC_IN_RC(a6_long_reg));
4740 match(RegP);
4741 match(mRegP);
4742 match(no_T8_mRegP);
4744 format %{ %}
4745 interface(REG_INTER);
4746 %}
4748 operand a7_RegP()
4749 %{
4750 constraint(ALLOC_IN_RC(a7_long_reg));
4751 match(RegP);
4752 match(mRegP);
4753 match(no_T8_mRegP);
4755 format %{ %}
4756 interface(REG_INTER);
4757 %}
4759 operand v0_RegP()
4760 %{
4761 constraint(ALLOC_IN_RC(v0_long_reg));
4762 match(RegP);
4763 match(mRegP);
4764 match(no_T8_mRegP);
4766 format %{ %}
4767 interface(REG_INTER);
4768 %}
4770 operand v1_RegP()
4771 %{
4772 constraint(ALLOC_IN_RC(v1_long_reg));
4773 match(RegP);
4774 match(mRegP);
4775 match(no_T8_mRegP);
4777 format %{ %}
4778 interface(REG_INTER);
4779 %}
4781 /*
4782 operand mSPRegP(mRegP reg) %{
4783 constraint(ALLOC_IN_RC(sp_reg));
4784 match(reg);
4786 format %{ "SP" %}
4787 interface(REG_INTER);
4788 %}
4790 operand mFPRegP(mRegP reg) %{
4791 constraint(ALLOC_IN_RC(fp_reg));
4792 match(reg);
4794 format %{ "FP" %}
4795 interface(REG_INTER);
4796 %}
4797 */
4799 operand mRegL() %{
4800 constraint(ALLOC_IN_RC(long_reg));
4801 match(RegL);
4803 format %{ %}
4804 interface(REG_INTER);
4805 %}
4807 operand v0RegL() %{
4808 constraint(ALLOC_IN_RC(v0_long_reg));
4809 match(RegL);
4810 match(mRegL);
4812 format %{ %}
4813 interface(REG_INTER);
4814 %}
4816 operand v1RegL() %{
4817 constraint(ALLOC_IN_RC(v1_long_reg));
4818 match(RegL);
4819 match(mRegL);
4821 format %{ %}
4822 interface(REG_INTER);
4823 %}
4825 operand a0RegL() %{
4826 constraint(ALLOC_IN_RC(a0_long_reg));
4827 match(RegL);
4828 match(mRegL);
4830 format %{ "A0" %}
4831 interface(REG_INTER);
4832 %}
4834 operand a1RegL() %{
4835 constraint(ALLOC_IN_RC(a1_long_reg));
4836 match(RegL);
4837 match(mRegL);
4839 format %{ %}
4840 interface(REG_INTER);
4841 %}
4843 operand a2RegL() %{
4844 constraint(ALLOC_IN_RC(a2_long_reg));
4845 match(RegL);
4846 match(mRegL);
4848 format %{ %}
4849 interface(REG_INTER);
4850 %}
4852 operand a3RegL() %{
4853 constraint(ALLOC_IN_RC(a3_long_reg));
4854 match(RegL);
4855 match(mRegL);
4857 format %{ %}
4858 interface(REG_INTER);
4859 %}
4861 operand t0RegL() %{
4862 constraint(ALLOC_IN_RC(t0_long_reg));
4863 match(RegL);
4864 match(mRegL);
4866 format %{ %}
4867 interface(REG_INTER);
4868 %}
4870 operand t1RegL() %{
4871 constraint(ALLOC_IN_RC(t1_long_reg));
4872 match(RegL);
4873 match(mRegL);
4875 format %{ %}
4876 interface(REG_INTER);
4877 %}
4879 operand t2RegL() %{
4880 constraint(ALLOC_IN_RC(t2_long_reg));
4881 match(RegL);
4882 match(mRegL);
4884 format %{ %}
4885 interface(REG_INTER);
4886 %}
4888 operand t3RegL() %{
4889 constraint(ALLOC_IN_RC(t3_long_reg));
4890 match(RegL);
4891 match(mRegL);
4893 format %{ %}
4894 interface(REG_INTER);
4895 %}
4897 operand t8RegL() %{
4898 constraint(ALLOC_IN_RC(t8_long_reg));
4899 match(RegL);
4900 match(mRegL);
4902 format %{ %}
4903 interface(REG_INTER);
4904 %}
4906 operand a4RegL() %{
4907 constraint(ALLOC_IN_RC(a4_long_reg));
4908 match(RegL);
4909 match(mRegL);
4911 format %{ %}
4912 interface(REG_INTER);
4913 %}
4915 operand a5RegL() %{
4916 constraint(ALLOC_IN_RC(a5_long_reg));
4917 match(RegL);
4918 match(mRegL);
4920 format %{ %}
4921 interface(REG_INTER);
4922 %}
4924 operand a6RegL() %{
4925 constraint(ALLOC_IN_RC(a6_long_reg));
4926 match(RegL);
4927 match(mRegL);
4929 format %{ %}
4930 interface(REG_INTER);
4931 %}
4933 operand a7RegL() %{
4934 constraint(ALLOC_IN_RC(a7_long_reg));
4935 match(RegL);
4936 match(mRegL);
4938 format %{ %}
4939 interface(REG_INTER);
4940 %}
4942 operand s0RegL() %{
4943 constraint(ALLOC_IN_RC(s0_long_reg));
4944 match(RegL);
4945 match(mRegL);
4947 format %{ %}
4948 interface(REG_INTER);
4949 %}
4951 operand s1RegL() %{
4952 constraint(ALLOC_IN_RC(s1_long_reg));
4953 match(RegL);
4954 match(mRegL);
4956 format %{ %}
4957 interface(REG_INTER);
4958 %}
4960 operand s2RegL() %{
4961 constraint(ALLOC_IN_RC(s2_long_reg));
4962 match(RegL);
4963 match(mRegL);
4965 format %{ %}
4966 interface(REG_INTER);
4967 %}
4969 operand s3RegL() %{
4970 constraint(ALLOC_IN_RC(s3_long_reg));
4971 match(RegL);
4972 match(mRegL);
4974 format %{ %}
4975 interface(REG_INTER);
4976 %}
4978 operand s4RegL() %{
4979 constraint(ALLOC_IN_RC(s4_long_reg));
4980 match(RegL);
4981 match(mRegL);
4983 format %{ %}
4984 interface(REG_INTER);
4985 %}
4987 operand s7RegL() %{
4988 constraint(ALLOC_IN_RC(s7_long_reg));
4989 match(RegL);
4990 match(mRegL);
4992 format %{ %}
4993 interface(REG_INTER);
4994 %}
4996 // Floating register operands
4997 operand regF() %{
4998 constraint(ALLOC_IN_RC(flt_reg));
4999 match(RegF);
5001 format %{ %}
5002 interface(REG_INTER);
5003 %}
5005 //Double Precision Floating register operands
5006 operand regD() %{
5007 constraint(ALLOC_IN_RC(dbl_reg));
5008 match(RegD);
5010 format %{ %}
5011 interface(REG_INTER);
5012 %}
5014 //----------Memory Operands----------------------------------------------------
5015 // Indirect Memory Operand
5016 operand indirect(mRegP reg) %{
5017 constraint(ALLOC_IN_RC(p_reg));
5018 match(reg);
5020 format %{ "[$reg] @ indirect" %}
5021 interface(MEMORY_INTER) %{
5022 base($reg);
5023 index(0x0); /* NO_INDEX */
5024 scale(0x0);
5025 disp(0x0);
5026 %}
5027 %}
5029 // Indirect Memory Plus Short Offset Operand
5030 operand indOffset8(mRegP reg, immL8 off)
5031 %{
5032 constraint(ALLOC_IN_RC(p_reg));
5033 match(AddP reg off);
5035 format %{ "[$reg + $off (8-bit)] @ indOffset8" %}
5036 interface(MEMORY_INTER) %{
5037 base($reg);
5038 index(0x0); /* NO_INDEX */
5039 scale(0x0);
5040 disp($off);
5041 %}
5042 %}
5044 // Indirect Memory Times Scale Plus Index Register
5045 operand indIndexScale(mRegP reg, mRegL lreg, immI2 scale)
5046 %{
5047 constraint(ALLOC_IN_RC(p_reg));
5048 match(AddP reg (LShiftL lreg scale));
5050 op_cost(10);
5051 format %{"[$reg + $lreg << $scale] @ indIndexScale" %}
5052 interface(MEMORY_INTER) %{
5053 base($reg);
5054 index($lreg);
5055 scale($scale);
5056 disp(0x0);
5057 %}
5058 %}
5061 // [base + index + offset]
5062 operand baseIndexOffset8(mRegP base, mRegL index, immL8 off)
5063 %{
5064 constraint(ALLOC_IN_RC(p_reg));
5065 op_cost(5);
5066 match(AddP (AddP base index) off);
5068 format %{ "[$base + $index + $off (8-bit)] @ baseIndexOffset8" %}
5069 interface(MEMORY_INTER) %{
5070 base($base);
5071 index($index);
5072 scale(0x0);
5073 disp($off);
5074 %}
5075 %}
5077 // [base + index + offset]
5078 operand baseIndexOffset8_convI2L(mRegP base, mRegI index, immL8 off)
5079 %{
5080 constraint(ALLOC_IN_RC(p_reg));
5081 op_cost(5);
5082 match(AddP (AddP base (ConvI2L index)) off);
5084 format %{ "[$base + $index + $off (8-bit)] @ baseIndexOffset8_convI2L" %}
5085 interface(MEMORY_INTER) %{
5086 base($base);
5087 index($index);
5088 scale(0x0);
5089 disp($off);
5090 %}
5091 %}
5093 // Indirect Memory Times Scale Plus Index Register Plus Offset Operand
5094 operand indIndexScaleOffset8(mRegP reg, immL8 off, mRegL lreg, immI2 scale)
5095 %{
5096 constraint(ALLOC_IN_RC(p_reg));
5097 match(AddP (AddP reg (LShiftL lreg scale)) off);
5099 op_cost(10);
5100 format %{"[$reg + $off + $lreg << $scale] @ indIndexScaleOffset8" %}
5101 interface(MEMORY_INTER) %{
5102 base($reg);
5103 index($lreg);
5104 scale($scale);
5105 disp($off);
5106 %}
5107 %}
5109 operand indIndexScaleOffset8_convI2L(mRegP reg, immL8 off, mRegI ireg, immI2 scale)
5110 %{
5111 constraint(ALLOC_IN_RC(p_reg));
5112 match(AddP (AddP reg (LShiftL (ConvI2L ireg) scale)) off);
5114 op_cost(10);
5115 format %{"[$reg + $off + $ireg << $scale] @ indIndexScaleOffset8_convI2L" %}
5116 interface(MEMORY_INTER) %{
5117 base($reg);
5118 index($ireg);
5119 scale($scale);
5120 disp($off);
5121 %}
5122 %}
5124 // [base + index<<scale + offset]
5125 operand basePosIndexScaleOffset8(mRegP base, mRegI index, immL8 off, immI_0_31 scale)
5126 %{
5127 constraint(ALLOC_IN_RC(p_reg));
5128 //predicate(n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
5129 op_cost(10);
5130 match(AddP (AddP base (LShiftL (ConvI2L index) scale)) off);
5132 format %{ "[$base + $index << $scale + $off (8-bit)] @ basePosIndexScaleOffset8" %}
5133 interface(MEMORY_INTER) %{
5134 base($base);
5135 index($index);
5136 scale($scale);
5137 disp($off);
5138 %}
5139 %}
5141 // Indirect Memory Times Scale Plus Index Register Plus Offset Operand
5142 operand indIndexScaleOffsetNarrow(mRegN reg, immL8 off, mRegL lreg, immI2 scale)
5143 %{
5144 predicate(Universe::narrow_oop_shift() == 0);
5145 constraint(ALLOC_IN_RC(p_reg));
5146 match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
5148 op_cost(10);
5149 format %{"[$reg + $off + $lreg << $scale] @ indIndexScaleOffsetNarrow" %}
5150 interface(MEMORY_INTER) %{
5151 base($reg);
5152 index($lreg);
5153 scale($scale);
5154 disp($off);
5155 %}
5156 %}
5158 // [base + index<<scale + offset] for compressd Oops
5159 operand indPosIndexI2LScaleOffset8Narrow(mRegN base, mRegI index, immL8 off, immI_0_31 scale)
5160 %{
5161 constraint(ALLOC_IN_RC(p_reg));
5162 //predicate(Universe::narrow_oop_shift() == 0 && n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
5163 predicate(Universe::narrow_oop_shift() == 0);
5164 op_cost(10);
5165 match(AddP (AddP (DecodeN base) (LShiftL (ConvI2L index) scale)) off);
5167 format %{ "[$base + $index << $scale + $off (8-bit)] @ indPosIndexI2LScaleOffset8Narrow" %}
5168 interface(MEMORY_INTER) %{
5169 base($base);
5170 index($index);
5171 scale($scale);
5172 disp($off);
5173 %}
5174 %}
5176 //FIXME: I think it's better to limit the immI to be 16-bit at most!
5177 // Indirect Memory Plus Long Offset Operand
5178 operand indOffset32(mRegP reg, immL32 off) %{
5179 constraint(ALLOC_IN_RC(p_reg));
5180 op_cost(20);
5181 match(AddP reg off);
5183 format %{ "[$reg + $off (32-bit)] @ indOffset32" %}
5184 interface(MEMORY_INTER) %{
5185 base($reg);
5186 index(0x0); /* NO_INDEX */
5187 scale(0x0);
5188 disp($off);
5189 %}
5190 %}
5192 // Indirect Memory Plus Index Register
5193 operand indIndex(mRegP addr, mRegL index) %{
5194 constraint(ALLOC_IN_RC(p_reg));
5195 match(AddP addr index);
5197 op_cost(20);
5198 format %{"[$addr + $index] @ indIndex" %}
5199 interface(MEMORY_INTER) %{
5200 base($addr);
5201 index($index);
5202 scale(0x0);
5203 disp(0x0);
5204 %}
5205 %}
5207 operand indirectNarrowKlass(mRegN reg)
5208 %{
5209 predicate(Universe::narrow_klass_shift() == 0);
5210 constraint(ALLOC_IN_RC(p_reg));
5211 op_cost(10);
5212 match(DecodeNKlass reg);
5214 format %{ "[$reg] @ indirectNarrowKlass" %}
5215 interface(MEMORY_INTER) %{
5216 base($reg);
5217 index(0x0);
5218 scale(0x0);
5219 disp(0x0);
5220 %}
5221 %}
5223 operand indOffset8NarrowKlass(mRegN reg, immL8 off)
5224 %{
5225 predicate(Universe::narrow_klass_shift() == 0);
5226 constraint(ALLOC_IN_RC(p_reg));
5227 op_cost(10);
5228 match(AddP (DecodeNKlass reg) off);
5230 format %{ "[$reg + $off (8-bit)] @ indOffset8NarrowKlass" %}
5231 interface(MEMORY_INTER) %{
5232 base($reg);
5233 index(0x0);
5234 scale(0x0);
5235 disp($off);
5236 %}
5237 %}
5239 operand indOffset32NarrowKlass(mRegN reg, immL32 off)
5240 %{
5241 predicate(Universe::narrow_klass_shift() == 0);
5242 constraint(ALLOC_IN_RC(p_reg));
5243 op_cost(10);
5244 match(AddP (DecodeNKlass reg) off);
5246 format %{ "[$reg + $off (32-bit)] @ indOffset32NarrowKlass" %}
5247 interface(MEMORY_INTER) %{
5248 base($reg);
5249 index(0x0);
5250 scale(0x0);
5251 disp($off);
5252 %}
5253 %}
5255 operand indIndexOffsetNarrowKlass(mRegN reg, mRegL lreg, immL32 off)
5256 %{
5257 predicate(Universe::narrow_klass_shift() == 0);
5258 constraint(ALLOC_IN_RC(p_reg));
5259 match(AddP (AddP (DecodeNKlass reg) lreg) off);
5261 op_cost(10);
5262 format %{"[$reg + $off + $lreg] @ indIndexOffsetNarrowKlass" %}
5263 interface(MEMORY_INTER) %{
5264 base($reg);
5265 index($lreg);
5266 scale(0x0);
5267 disp($off);
5268 %}
5269 %}
5271 operand indIndexNarrowKlass(mRegN reg, mRegL lreg)
5272 %{
5273 predicate(Universe::narrow_klass_shift() == 0);
5274 constraint(ALLOC_IN_RC(p_reg));
5275 match(AddP (DecodeNKlass reg) lreg);
5277 op_cost(10);
5278 format %{"[$reg + $lreg] @ indIndexNarrowKlass" %}
5279 interface(MEMORY_INTER) %{
5280 base($reg);
5281 index($lreg);
5282 scale(0x0);
5283 disp(0x0);
5284 %}
5285 %}
5287 // Indirect Memory Operand
5288 operand indirectNarrow(mRegN reg)
5289 %{
5290 predicate(Universe::narrow_oop_shift() == 0);
5291 constraint(ALLOC_IN_RC(p_reg));
5292 op_cost(10);
5293 match(DecodeN reg);
5295 format %{ "[$reg] @ indirectNarrow" %}
5296 interface(MEMORY_INTER) %{
5297 base($reg);
5298 index(0x0);
5299 scale(0x0);
5300 disp(0x0);
5301 %}
5302 %}
5304 // Indirect Memory Plus Short Offset Operand
5305 operand indOffset8Narrow(mRegN reg, immL8 off)
5306 %{
5307 predicate(Universe::narrow_oop_shift() == 0);
5308 constraint(ALLOC_IN_RC(p_reg));
5309 op_cost(10);
5310 match(AddP (DecodeN reg) off);
5312 format %{ "[$reg + $off (8-bit)] @ indOffset8Narrow" %}
5313 interface(MEMORY_INTER) %{
5314 base($reg);
5315 index(0x0);
5316 scale(0x0);
5317 disp($off);
5318 %}
5319 %}
5321 // Indirect Memory Plus Index Register Plus Offset Operand
5322 operand indIndexOffset8Narrow(mRegN reg, mRegL lreg, immL8 off)
5323 %{
5324 predicate(Universe::narrow_oop_shift() == 0);
5325 constraint(ALLOC_IN_RC(p_reg));
5326 match(AddP (AddP (DecodeN reg) lreg) off);
5328 op_cost(10);
5329 format %{"[$reg + $off + $lreg] @ indIndexOffset8Narrow" %}
5330 interface(MEMORY_INTER) %{
5331 base($reg);
5332 index($lreg);
5333 scale(0x0);
5334 disp($off);
5335 %}
5336 %}
5338 //----------Load Long Memory Operands------------------------------------------
5339 // The load-long idiom will use it's address expression again after loading
5340 // the first word of the long. If the load-long destination overlaps with
5341 // registers used in the addressing expression, the 2nd half will be loaded
5342 // from a clobbered address. Fix this by requiring that load-long use
5343 // address registers that do not overlap with the load-long target.
5345 // load-long support
5346 operand load_long_RegP() %{
5347 constraint(ALLOC_IN_RC(p_reg));
5348 match(RegP);
5349 match(mRegP);
5350 op_cost(100);
5351 format %{ %}
5352 interface(REG_INTER);
5353 %}
5355 // Indirect Memory Operand Long
5356 operand load_long_indirect(load_long_RegP reg) %{
5357 constraint(ALLOC_IN_RC(p_reg));
5358 match(reg);
5360 format %{ "[$reg]" %}
5361 interface(MEMORY_INTER) %{
5362 base($reg);
5363 index(0x0);
5364 scale(0x0);
5365 disp(0x0);
5366 %}
5367 %}
5369 // Indirect Memory Plus Long Offset Operand
5370 operand load_long_indOffset32(load_long_RegP reg, immL32 off) %{
5371 match(AddP reg off);
5373 format %{ "[$reg + $off]" %}
5374 interface(MEMORY_INTER) %{
5375 base($reg);
5376 index(0x0);
5377 scale(0x0);
5378 disp($off);
5379 %}
5380 %}
5382 //----------Conditional Branch Operands----------------------------------------
5383 // Comparison Op - This is the operation of the comparison, and is limited to
5384 // the following set of codes:
5385 // L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
5386 //
5387 // Other attributes of the comparison, such as unsignedness, are specified
5388 // by the comparison instruction that sets a condition code flags register.
5389 // That result is represented by a flags operand whose subtype is appropriate
5390 // to the unsignedness (etc.) of the comparison.
5391 //
5392 // Later, the instruction which matches both the Comparison Op (a Bool) and
5393 // the flags (produced by the Cmp) specifies the coding of the comparison op
5394 // by matching a specific subtype of Bool operand below, such as cmpOpU.
5396 // Comparision Code
5397 operand cmpOp() %{
5398 match(Bool);
5400 format %{ "" %}
5401 interface(COND_INTER) %{
5402 equal(0x01);
5403 not_equal(0x02);
5404 greater(0x03);
5405 greater_equal(0x04);
5406 less(0x05);
5407 less_equal(0x06);
5408 overflow(0x7);
5409 no_overflow(0x8);
5410 %}
5411 %}
5414 // Comparision Code
5415 // Comparison Code, unsigned compare. Used by FP also, with
5416 // C2 (unordered) turned into GT or LT already. The other bits
5417 // C0 and C3 are turned into Carry & Zero flags.
5418 operand cmpOpU() %{
5419 match(Bool);
5421 format %{ "" %}
5422 interface(COND_INTER) %{
5423 equal(0x01);
5424 not_equal(0x02);
5425 greater(0x03);
5426 greater_equal(0x04);
5427 less(0x05);
5428 less_equal(0x06);
5429 overflow(0x7);
5430 no_overflow(0x8);
5431 %}
5432 %}
5434 /*
5435 // Comparison Code, unsigned compare. Used by FP also, with
5436 // C2 (unordered) turned into GT or LT already. The other bits
5437 // C0 and C3 are turned into Carry & Zero flags.
5438 operand cmpOpU() %{
5439 match(Bool);
5441 format %{ "" %}
5442 interface(COND_INTER) %{
5443 equal(0x4);
5444 not_equal(0x5);
5445 less(0x2);
5446 greater_equal(0x3);
5447 less_equal(0x6);
5448 greater(0x7);
5449 %}
5450 %}
5451 */
5452 /*
5453 // Comparison Code for FP conditional move
5454 operand cmpOp_fcmov() %{
5455 match(Bool);
5457 format %{ "" %}
5458 interface(COND_INTER) %{
5459 equal (0x01);
5460 not_equal (0x02);
5461 greater (0x03);
5462 greater_equal(0x04);
5463 less (0x05);
5464 less_equal (0x06);
5465 %}
5466 %}
5468 // Comparision Code used in long compares
5469 operand cmpOp_commute() %{
5470 match(Bool);
5472 format %{ "" %}
5473 interface(COND_INTER) %{
5474 equal(0x4);
5475 not_equal(0x5);
5476 less(0xF);
5477 greater_equal(0xE);
5478 less_equal(0xD);
5479 greater(0xC);
5480 %}
5481 %}
5482 */
5484 //----------Special Memory Operands--------------------------------------------
5485 // Stack Slot Operand - This operand is used for loading and storing temporary
5486 // values on the stack where a match requires a value to
5487 // flow through memory.
5488 operand stackSlotP(sRegP reg) %{
5489 constraint(ALLOC_IN_RC(stack_slots));
5490 // No match rule because this operand is only generated in matching
5491 op_cost(50);
5492 format %{ "[$reg]" %}
5493 interface(MEMORY_INTER) %{
5494 base(0x1d); // SP
5495 index(0x0); // No Index
5496 scale(0x0); // No Scale
5497 disp($reg); // Stack Offset
5498 %}
5499 %}
5501 operand stackSlotI(sRegI reg) %{
5502 constraint(ALLOC_IN_RC(stack_slots));
5503 // No match rule because this operand is only generated in matching
5504 op_cost(50);
5505 format %{ "[$reg]" %}
5506 interface(MEMORY_INTER) %{
5507 base(0x1d); // SP
5508 index(0x0); // No Index
5509 scale(0x0); // No Scale
5510 disp($reg); // Stack Offset
5511 %}
5512 %}
5514 operand stackSlotF(sRegF reg) %{
5515 constraint(ALLOC_IN_RC(stack_slots));
5516 // No match rule because this operand is only generated in matching
5517 op_cost(50);
5518 format %{ "[$reg]" %}
5519 interface(MEMORY_INTER) %{
5520 base(0x1d); // SP
5521 index(0x0); // No Index
5522 scale(0x0); // No Scale
5523 disp($reg); // Stack Offset
5524 %}
5525 %}
5527 operand stackSlotD(sRegD reg) %{
5528 constraint(ALLOC_IN_RC(stack_slots));
5529 // No match rule because this operand is only generated in matching
5530 op_cost(50);
5531 format %{ "[$reg]" %}
5532 interface(MEMORY_INTER) %{
5533 base(0x1d); // SP
5534 index(0x0); // No Index
5535 scale(0x0); // No Scale
5536 disp($reg); // Stack Offset
5537 %}
5538 %}
5540 operand stackSlotL(sRegL reg) %{
5541 constraint(ALLOC_IN_RC(stack_slots));
5542 // No match rule because this operand is only generated in matching
5543 op_cost(50);
5544 format %{ "[$reg]" %}
5545 interface(MEMORY_INTER) %{
5546 base(0x1d); // SP
5547 index(0x0); // No Index
5548 scale(0x0); // No Scale
5549 disp($reg); // Stack Offset
5550 %}
5551 %}
5554 //------------------------OPERAND CLASSES--------------------------------------
5555 //opclass memory( direct, indirect, indOffset16, indOffset32, indOffset32X, indIndexOffset );
5556 opclass memory( indirect, indirectNarrow, indOffset8, indOffset32, indIndex, indIndexScale, load_long_indirect, load_long_indOffset32, baseIndexOffset8, baseIndexOffset8_convI2L, indIndexScaleOffset8, indIndexScaleOffset8_convI2L, basePosIndexScaleOffset8, indIndexScaleOffsetNarrow, indPosIndexI2LScaleOffset8Narrow, indOffset8Narrow, indIndexOffset8Narrow);
5559 //----------PIPELINE-----------------------------------------------------------
5560 // Rules which define the behavior of the target architectures pipeline.
5562 pipeline %{
5564 //----------ATTRIBUTES---------------------------------------------------------
5565 attributes %{
5566 fixed_size_instructions; // Fixed size instructions
5567 branch_has_delay_slot; // branch have delay slot in gs2
5568 max_instructions_per_bundle = 1; // 1 instruction per bundle
5569 max_bundles_per_cycle = 4; // Up to 4 bundles per cycle
5570 bundle_unit_size=4;
5571 instruction_unit_size = 4; // An instruction is 4 bytes long
5572 instruction_fetch_unit_size = 16; // The processor fetches one line
5573 instruction_fetch_units = 1; // of 16 bytes
5575 // List of nop instructions
5576 nops( MachNop );
5577 %}
5579 //----------RESOURCES----------------------------------------------------------
5580 // Resources are the functional units available to the machine
5582 resources(D1, D2, D3, D4, DECODE = D1 | D2 | D3| D4, ALU1, ALU2, ALU = ALU1 | ALU2, FPU1, FPU2, FPU = FPU1 | FPU2, MEM, BR);
5584 //----------PIPELINE DESCRIPTION-----------------------------------------------
5585 // Pipeline Description specifies the stages in the machine's pipeline
5587 // IF: fetch
5588 // ID: decode
5589 // RD: read
5590 // CA: caculate
5591 // WB: write back
5592 // CM: commit
5594 pipe_desc(IF, ID, RD, CA, WB, CM);
5597 //----------PIPELINE CLASSES---------------------------------------------------
5598 // Pipeline Classes describe the stages in which input and output are
5599 // referenced by the hardware pipeline.
5601 //No.1 Integer ALU reg-reg operation : dst <-- reg1 op reg2
5602 pipe_class ialu_regI_regI(mRegI dst, mRegI src1, mRegI src2) %{
5603 single_instruction;
5604 src1 : RD(read);
5605 src2 : RD(read);
5606 dst : WB(write)+1;
5607 DECODE : ID;
5608 ALU : CA;
5609 %}
5611 //No.19 Integer mult operation : dst <-- reg1 mult reg2
5612 pipe_class ialu_mult(mRegI dst, mRegI src1, mRegI src2) %{
5613 src1 : RD(read);
5614 src2 : RD(read);
5615 dst : WB(write)+5;
5616 DECODE : ID;
5617 ALU2 : CA;
5618 %}
5620 pipe_class mulL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
5621 src1 : RD(read);
5622 src2 : RD(read);
5623 dst : WB(write)+10;
5624 DECODE : ID;
5625 ALU2 : CA;
5626 %}
5628 //No.19 Integer div operation : dst <-- reg1 div reg2
5629 pipe_class ialu_div(mRegI dst, mRegI src1, mRegI src2) %{
5630 src1 : RD(read);
5631 src2 : RD(read);
5632 dst : WB(write)+10;
5633 DECODE : ID;
5634 ALU2 : CA;
5635 %}
5637 //No.19 Integer mod operation : dst <-- reg1 mod reg2
5638 pipe_class ialu_mod(mRegI dst, mRegI src1, mRegI src2) %{
5639 instruction_count(2);
5640 src1 : RD(read);
5641 src2 : RD(read);
5642 dst : WB(write)+10;
5643 DECODE : ID;
5644 ALU2 : CA;
5645 %}
5647 //No.15 Long ALU reg-reg operation : dst <-- reg1 op reg2
5648 pipe_class ialu_regL_regL(mRegL dst, mRegL src1, mRegL src2) %{
5649 instruction_count(2);
5650 src1 : RD(read);
5651 src2 : RD(read);
5652 dst : WB(write);
5653 DECODE : ID;
5654 ALU : CA;
5655 %}
5657 //No.18 Long ALU reg-imm16 operation : dst <-- reg1 op imm16
5658 pipe_class ialu_regL_imm16(mRegL dst, mRegL src) %{
5659 instruction_count(2);
5660 src : RD(read);
5661 dst : WB(write);
5662 DECODE : ID;
5663 ALU : CA;
5664 %}
5666 //no.16 load Long from memory :
5667 pipe_class ialu_loadL(mRegL dst, memory mem) %{
5668 instruction_count(2);
5669 mem : RD(read);
5670 dst : WB(write)+5;
5671 DECODE : ID;
5672 MEM : RD;
5673 %}
5675 //No.17 Store Long to Memory :
5676 pipe_class ialu_storeL(mRegL src, memory mem) %{
5677 instruction_count(2);
5678 mem : RD(read);
5679 src : RD(read);
5680 DECODE : ID;
5681 MEM : RD;
5682 %}
5684 //No.2 Integer ALU reg-imm16 operation : dst <-- reg1 op imm16
5685 pipe_class ialu_regI_imm16(mRegI dst, mRegI src) %{
5686 single_instruction;
5687 src : RD(read);
5688 dst : WB(write);
5689 DECODE : ID;
5690 ALU : CA;
5691 %}
5693 //No.3 Integer move operation : dst <-- reg
5694 pipe_class ialu_regI_mov(mRegI dst, mRegI src) %{
5695 src : RD(read);
5696 dst : WB(write);
5697 DECODE : ID;
5698 ALU : CA;
5699 %}
5701 //No.4 No instructions : do nothing
5702 pipe_class empty( ) %{
5703 instruction_count(0);
5704 %}
5706 //No.5 UnConditional branch :
5707 pipe_class pipe_jump( label labl ) %{
5708 multiple_bundles;
5709 DECODE : ID;
5710 BR : RD;
5711 %}
5713 //No.6 ALU Conditional branch :
5714 pipe_class pipe_alu_branch(mRegI src1, mRegI src2, label labl ) %{
5715 multiple_bundles;
5716 src1 : RD(read);
5717 src2 : RD(read);
5718 DECODE : ID;
5719 BR : RD;
5720 %}
5722 //no.7 load integer from memory :
5723 pipe_class ialu_loadI(mRegI dst, memory mem) %{
5724 mem : RD(read);
5725 dst : WB(write)+3;
5726 DECODE : ID;
5727 MEM : RD;
5728 %}
5730 //No.8 Store Integer to Memory :
5731 pipe_class ialu_storeI(mRegI src, memory mem) %{
5732 mem : RD(read);
5733 src : RD(read);
5734 DECODE : ID;
5735 MEM : RD;
5736 %}
5739 //No.10 Floating FPU reg-reg operation : dst <-- reg1 op reg2
5740 pipe_class fpu_regF_regF(regF dst, regF src1, regF src2) %{
5741 src1 : RD(read);
5742 src2 : RD(read);
5743 dst : WB(write);
5744 DECODE : ID;
5745 FPU : CA;
5746 %}
5748 //No.22 Floating div operation : dst <-- reg1 div reg2
5749 pipe_class fpu_div(regF dst, regF src1, regF src2) %{
5750 src1 : RD(read);
5751 src2 : RD(read);
5752 dst : WB(write);
5753 DECODE : ID;
5754 FPU2 : CA;
5755 %}
5757 pipe_class fcvt_I2D(regD dst, mRegI src) %{
5758 src : RD(read);
5759 dst : WB(write);
5760 DECODE : ID;
5761 FPU1 : CA;
5762 %}
5764 pipe_class fcvt_D2I(mRegI dst, regD src) %{
5765 src : RD(read);
5766 dst : WB(write);
5767 DECODE : ID;
5768 FPU1 : CA;
5769 %}
5771 pipe_class pipe_mfc1(mRegI dst, regD src) %{
5772 src : RD(read);
5773 dst : WB(write);
5774 DECODE : ID;
5775 MEM : RD;
5776 %}
5778 pipe_class pipe_mtc1(regD dst, mRegI src) %{
5779 src : RD(read);
5780 dst : WB(write);
5781 DECODE : ID;
5782 MEM : RD(5);
5783 %}
5785 //No.23 Floating sqrt operation : dst <-- reg1 sqrt reg2
5786 pipe_class fpu_sqrt(regF dst, regF src1, regF src2) %{
5787 multiple_bundles;
5788 src1 : RD(read);
5789 src2 : RD(read);
5790 dst : WB(write);
5791 DECODE : ID;
5792 FPU2 : CA;
5793 %}
5795 //No.11 Load Floating from Memory :
5796 pipe_class fpu_loadF(regF dst, memory mem) %{
5797 instruction_count(1);
5798 mem : RD(read);
5799 dst : WB(write)+3;
5800 DECODE : ID;
5801 MEM : RD;
5802 %}
5804 //No.12 Store Floating to Memory :
5805 pipe_class fpu_storeF(regF src, memory mem) %{
5806 instruction_count(1);
5807 mem : RD(read);
5808 src : RD(read);
5809 DECODE : ID;
5810 MEM : RD;
5811 %}
5813 //No.13 FPU Conditional branch :
5814 pipe_class pipe_fpu_branch(regF src1, regF src2, label labl ) %{
5815 multiple_bundles;
5816 src1 : RD(read);
5817 src2 : RD(read);
5818 DECODE : ID;
5819 BR : RD;
5820 %}
5822 //No.14 Floating FPU reg operation : dst <-- op reg
5823 pipe_class fpu1_regF(regF dst, regF src) %{
5824 src : RD(read);
5825 dst : WB(write);
5826 DECODE : ID;
5827 FPU : CA;
5828 %}
5830 pipe_class long_memory_op() %{
5831 instruction_count(10); multiple_bundles; force_serialization;
5832 fixed_latency(30);
5833 %}
5835 pipe_class simple_call() %{
5836 instruction_count(10); multiple_bundles; force_serialization;
5837 fixed_latency(200);
5838 BR : RD;
5839 %}
5841 pipe_class call() %{
5842 instruction_count(10); multiple_bundles; force_serialization;
5843 fixed_latency(200);
5844 %}
5846 //FIXME:
5847 //No.9 Piple slow : for multi-instructions
5848 pipe_class pipe_slow( ) %{
5849 instruction_count(20);
5850 force_serialization;
5851 multiple_bundles;
5852 fixed_latency(50);
5853 %}
5855 %}
5859 //----------INSTRUCTIONS-------------------------------------------------------
5860 //
5861 // match -- States which machine-independent subtree may be replaced
5862 // by this instruction.
5863 // ins_cost -- The estimated cost of this instruction is used by instruction
5864 // selection to identify a minimum cost tree of machine
5865 // instructions that matches a tree of machine-independent
5866 // instructions.
5867 // format -- A string providing the disassembly for this instruction.
5868 // The value of an instruction's operand may be inserted
5869 // by referring to it with a '$' prefix.
5870 // opcode -- Three instruction opcodes may be provided. These are referred
5871 // to within an encode class as $primary, $secondary, and $tertiary
5872 // respectively. The primary opcode is commonly used to
5873 // indicate the type of machine instruction, while secondary
5874 // and tertiary are often used for prefix options or addressing
5875 // modes.
5876 // ins_encode -- A list of encode classes with parameters. The encode class
5877 // name must have been defined in an 'enc_class' specification
5878 // in the encode section of the architecture description.
5881 // Load Integer
5882 instruct loadI(mRegI dst, memory mem) %{
5883 match(Set dst (LoadI mem));
5885 ins_cost(125);
5886 format %{ "lw $dst, $mem #@loadI" %}
5887 ins_encode (load_I_enc(dst, mem));
5888 ins_pipe( ialu_loadI );
5889 %}
5891 instruct loadI_convI2L(mRegL dst, memory mem) %{
5892 match(Set dst (ConvI2L (LoadI mem)));
5894 ins_cost(125);
5895 format %{ "lw $dst, $mem #@loadI_convI2L" %}
5896 ins_encode (load_I_enc(dst, mem));
5897 ins_pipe( ialu_loadI );
5898 %}
5900 // Load Integer (32 bit signed) to Byte (8 bit signed)
5901 instruct loadI2B(mRegI dst, memory mem, immI_24 twentyfour) %{
5902 match(Set dst (RShiftI (LShiftI (LoadI mem) twentyfour) twentyfour));
5904 ins_cost(125);
5905 format %{ "lb $dst, $mem\t# int -> byte #@loadI2B" %}
5906 ins_encode(load_B_enc(dst, mem));
5907 ins_pipe(ialu_loadI);
5908 %}
5910 // Load Integer (32 bit signed) to Unsigned Byte (8 bit UNsigned)
5911 instruct loadI2UB(mRegI dst, memory mem, immI_255 mask) %{
5912 match(Set dst (AndI (LoadI mem) mask));
5914 ins_cost(125);
5915 format %{ "lbu $dst, $mem\t# int -> ubyte #@loadI2UB" %}
5916 ins_encode(load_UB_enc(dst, mem));
5917 ins_pipe(ialu_loadI);
5918 %}
5920 // Load Integer (32 bit signed) to Short (16 bit signed)
5921 instruct loadI2S(mRegI dst, memory mem, immI_16 sixteen) %{
5922 match(Set dst (RShiftI (LShiftI (LoadI mem) sixteen) sixteen));
5924 ins_cost(125);
5925 format %{ "lh $dst, $mem\t# int -> short #@loadI2S" %}
5926 ins_encode(load_S_enc(dst, mem));
5927 ins_pipe(ialu_loadI);
5928 %}
5930 // Load Integer (32 bit signed) to Unsigned Short/Char (16 bit UNsigned)
5931 instruct loadI2US(mRegI dst, memory mem, immI_65535 mask) %{
5932 match(Set dst (AndI (LoadI mem) mask));
5934 ins_cost(125);
5935 format %{ "lhu $dst, $mem\t# int -> ushort/char #@loadI2US" %}
5936 ins_encode(load_C_enc(dst, mem));
5937 ins_pipe(ialu_loadI);
5938 %}
5940 // Load Long.
5941 instruct loadL(mRegL dst, memory mem) %{
5942 // predicate(!((LoadLNode*)n)->require_atomic_access());
5943 match(Set dst (LoadL mem));
5945 ins_cost(250);
5946 format %{ "ld $dst, $mem #@loadL" %}
5947 ins_encode(load_L_enc(dst, mem));
5948 ins_pipe( ialu_loadL );
5949 %}
5951 // Load Long - UNaligned
5952 instruct loadL_unaligned(mRegL dst, memory mem) %{
5953 match(Set dst (LoadL_unaligned mem));
5955 // FIXME: Jin: Need more effective ldl/ldr
5956 ins_cost(450);
5957 format %{ "ld $dst, $mem #@loadL_unaligned\n\t" %}
5958 ins_encode(load_L_enc(dst, mem));
5959 ins_pipe( ialu_loadL );
5960 %}
5962 // Store Long
5963 instruct storeL_reg(memory mem, mRegL src) %{
5964 match(Set mem (StoreL mem src));
5966 ins_cost(200);
5967 format %{ "sd $mem, $src #@storeL_reg\n" %}
5968 ins_encode(store_L_reg_enc(mem, src));
5969 ins_pipe( ialu_storeL );
5970 %}
5973 instruct storeL_immL0(memory mem, immL0 zero) %{
5974 match(Set mem (StoreL mem zero));
5976 ins_cost(180);
5977 format %{ "sd $mem, zero #@storeL_immL0" %}
5978 ins_encode(store_L_immL0_enc(mem, zero));
5979 ins_pipe( ialu_storeL );
5980 %}
5982 // Load Compressed Pointer
5983 instruct loadN(mRegN dst, memory mem)
5984 %{
5985 match(Set dst (LoadN mem));
5987 ins_cost(125); // XXX
5988 format %{ "lwu $dst, $mem\t# compressed ptr @ loadN" %}
5989 ins_encode (load_N_enc(dst, mem));
5990 ins_pipe( ialu_loadI ); // XXX
5991 %}
5993 // Load Pointer
5994 instruct loadP(mRegP dst, memory mem) %{
5995 match(Set dst (LoadP mem));
5997 ins_cost(125);
5998 format %{ "ld $dst, $mem #@loadP" %}
5999 ins_encode (load_P_enc(dst, mem));
6000 ins_pipe( ialu_loadI );
6001 %}
6003 // Load Klass Pointer
6004 instruct loadKlass(mRegP dst, memory mem) %{
6005 match(Set dst (LoadKlass mem));
6007 ins_cost(125);
6008 format %{ "MOV $dst,$mem @ loadKlass" %}
6009 ins_encode (load_P_enc(dst, mem));
6010 ins_pipe( ialu_loadI );
6011 %}
6013 // Load narrow Klass Pointer
6014 instruct loadNKlass(mRegN dst, memory mem)
6015 %{
6016 match(Set dst (LoadNKlass mem));
6018 ins_cost(125); // XXX
6019 format %{ "lwu $dst, $mem\t# compressed klass ptr @ loadNKlass" %}
6020 ins_encode (load_N_enc(dst, mem));
6021 ins_pipe( ialu_loadI ); // XXX
6022 %}
6024 // Load Constant
6025 instruct loadConI(mRegI dst, immI src) %{
6026 match(Set dst src);
6028 ins_cost(150);
6029 format %{ "mov $dst, $src #@loadConI" %}
6030 ins_encode %{
6031 Register dst = $dst$$Register;
6032 int value = $src$$constant;
6033 __ move(dst, value);
6034 %}
6035 ins_pipe( ialu_regI_regI );
6036 %}
6039 instruct loadConL_set64(mRegL dst, immL src) %{
6040 match(Set dst src);
6041 ins_cost(120);
6042 format %{ "li $dst, $src @ loadConL_set64" %}
6043 ins_encode %{
6044 __ set64($dst$$Register, $src$$constant);
6045 %}
6046 ins_pipe(ialu_regL_regL);
6047 %}
6049 /*
6050 // Load long value from constant table (predicated by immL_expensive).
6051 instruct loadConL_load(mRegL dst, immL_expensive src) %{
6052 match(Set dst src);
6053 ins_cost(150);
6054 format %{ "ld $dst, $constantoffset[$constanttablebase] # load long $src from table @ loadConL_ldx" %}
6055 ins_encode %{
6056 int con_offset = $constantoffset($src);
6058 if (Assembler::is_simm16(con_offset)) {
6059 __ ld($dst$$Register, $constanttablebase, con_offset);
6060 } else {
6061 __ set64(AT, con_offset);
6062 if (UseLoongsonISA) {
6063 __ gsldx($dst$$Register, $constanttablebase, AT, 0);
6064 } else {
6065 __ daddu(AT, $constanttablebase, AT);
6066 __ ld($dst$$Register, AT, 0);
6067 }
6068 }
6069 %}
6070 ins_pipe(ialu_loadI);
6071 %}
6072 */
6074 instruct loadConL16(mRegL dst, immL16 src) %{
6075 match(Set dst src);
6076 ins_cost(105);
6077 format %{ "mov $dst, $src #@loadConL16" %}
6078 ins_encode %{
6079 Register dst_reg = as_Register($dst$$reg);
6080 int value = $src$$constant;
6081 __ daddiu(dst_reg, R0, value);
6082 %}
6083 ins_pipe( ialu_regL_regL );
6084 %}
6087 instruct loadConL0(mRegL dst, immL0 src) %{
6088 match(Set dst src);
6089 ins_cost(100);
6090 format %{ "mov $dst, zero #@loadConL0" %}
6091 ins_encode %{
6092 Register dst_reg = as_Register($dst$$reg);
6093 __ daddu(dst_reg, R0, R0);
6094 %}
6095 ins_pipe( ialu_regL_regL );
6096 %}
6098 // Load Range
6099 instruct loadRange(mRegI dst, memory mem) %{
6100 match(Set dst (LoadRange mem));
6102 ins_cost(125);
6103 format %{ "MOV $dst,$mem @ loadRange" %}
6104 ins_encode(load_I_enc(dst, mem));
6105 ins_pipe( ialu_loadI );
6106 %}
6109 instruct storeP(memory mem, mRegP src ) %{
6110 match(Set mem (StoreP mem src));
6112 ins_cost(125);
6113 format %{ "sd $src, $mem #@storeP" %}
6114 ins_encode(store_P_reg_enc(mem, src));
6115 ins_pipe( ialu_storeI );
6116 %}
6118 // Store NULL Pointer, mark word, or other simple pointer constant.
6119 instruct storeImmP0(memory mem, immP0 zero) %{
6120 match(Set mem (StoreP mem zero));
6122 ins_cost(125);
6123 format %{ "mov $mem, $zero #@storeImmP0" %}
6124 ins_encode(store_P_immP0_enc(mem));
6125 ins_pipe( ialu_storeI );
6126 %}
6128 // Store Byte Immediate
6129 instruct storeImmB(memory mem, immI8 src) %{
6130 match(Set mem (StoreB mem src));
6132 ins_cost(150);
6133 format %{ "movb $mem, $src #@storeImmB" %}
6134 ins_encode(store_B_immI_enc(mem, src));
6135 ins_pipe( ialu_storeI );
6136 %}
6138 // Store Compressed Pointer
6139 instruct storeN(memory mem, mRegN src)
6140 %{
6141 match(Set mem (StoreN mem src));
6143 ins_cost(125); // XXX
6144 format %{ "sw $mem, $src\t# compressed ptr @ storeN" %}
6145 ins_encode(store_N_reg_enc(mem, src));
6146 ins_pipe( ialu_storeI );
6147 %}
6149 instruct storeNKlass(memory mem, mRegN src)
6150 %{
6151 match(Set mem (StoreNKlass mem src));
6153 ins_cost(125); // XXX
6154 format %{ "sw $mem, $src\t# compressed klass ptr @ storeNKlass" %}
6155 ins_encode(store_N_reg_enc(mem, src));
6156 ins_pipe( ialu_storeI );
6157 %}
6159 instruct storeImmN0(memory mem, immN0 zero)
6160 %{
6161 predicate(Universe::narrow_oop_base() == NULL && Universe::narrow_klass_base() == NULL);
6162 match(Set mem (StoreN mem zero));
6164 ins_cost(125); // XXX
6165 format %{ "storeN0 $mem, R12\t# compressed ptr" %}
6166 ins_encode(storeImmN0_enc(mem, zero));
6167 ins_pipe( ialu_storeI );
6168 %}
6170 // Store Byte
6171 instruct storeB(memory mem, mRegI src) %{
6172 match(Set mem (StoreB mem src));
6174 ins_cost(125);
6175 format %{ "sb $src, $mem #@storeB" %}
6176 ins_encode(store_B_reg_enc(mem, src));
6177 ins_pipe( ialu_storeI );
6178 %}
6180 instruct storeB_convL2I(memory mem, mRegL src) %{
6181 match(Set mem (StoreB mem (ConvL2I src)));
6183 ins_cost(125);
6184 format %{ "sb $src, $mem #@storeB_convL2I" %}
6185 ins_encode(store_B_reg_enc(mem, src));
6186 ins_pipe( ialu_storeI );
6187 %}
6189 // Load Byte (8bit signed)
6190 instruct loadB(mRegI dst, memory mem) %{
6191 match(Set dst (LoadB mem));
6193 ins_cost(125);
6194 format %{ "lb $dst, $mem #@loadB" %}
6195 ins_encode(load_B_enc(dst, mem));
6196 ins_pipe( ialu_loadI );
6197 %}
6199 instruct loadB_convI2L(mRegL dst, memory mem) %{
6200 match(Set dst (ConvI2L (LoadB mem)));
6202 ins_cost(125);
6203 format %{ "lb $dst, $mem #@loadB_convI2L" %}
6204 ins_encode(load_B_enc(dst, mem));
6205 ins_pipe( ialu_loadI );
6206 %}
6208 // Load Byte (8bit UNsigned)
6209 instruct loadUB(mRegI dst, memory mem) %{
6210 match(Set dst (LoadUB mem));
6212 ins_cost(125);
6213 format %{ "lbu $dst, $mem #@loadUB" %}
6214 ins_encode(load_UB_enc(dst, mem));
6215 ins_pipe( ialu_loadI );
6216 %}
6218 instruct loadUB_convI2L(mRegL dst, memory mem) %{
6219 match(Set dst (ConvI2L (LoadUB mem)));
6221 ins_cost(125);
6222 format %{ "lbu $dst, $mem #@loadUB_convI2L" %}
6223 ins_encode(load_UB_enc(dst, mem));
6224 ins_pipe( ialu_loadI );
6225 %}
6227 // Load Short (16bit signed)
6228 instruct loadS(mRegI dst, memory mem) %{
6229 match(Set dst (LoadS mem));
6231 ins_cost(125);
6232 format %{ "lh $dst, $mem #@loadS" %}
6233 ins_encode(load_S_enc(dst, mem));
6234 ins_pipe( ialu_loadI );
6235 %}
6237 // Load Short (16 bit signed) to Byte (8 bit signed)
6238 instruct loadS2B(mRegI dst, memory mem, immI_24 twentyfour) %{
6239 match(Set dst (RShiftI (LShiftI (LoadS mem) twentyfour) twentyfour));
6241 ins_cost(125);
6242 format %{ "lb $dst, $mem\t# short -> byte #@loadS2B" %}
6243 ins_encode(load_B_enc(dst, mem));
6244 ins_pipe(ialu_loadI);
6245 %}
6247 instruct loadS_convI2L(mRegL dst, memory mem) %{
6248 match(Set dst (ConvI2L (LoadS mem)));
6250 ins_cost(125);
6251 format %{ "lh $dst, $mem #@loadS_convI2L" %}
6252 ins_encode(load_S_enc(dst, mem));
6253 ins_pipe( ialu_loadI );
6254 %}
6256 // Store Integer Immediate
6257 instruct storeImmI(memory mem, immI src) %{
6258 match(Set mem (StoreI mem src));
6260 ins_cost(150);
6261 format %{ "mov $mem, $src #@storeImmI" %}
6262 ins_encode(store_I_immI_enc(mem, src));
6263 ins_pipe( ialu_storeI );
6264 %}
6266 // Store Integer
6267 instruct storeI(memory mem, mRegI src) %{
6268 match(Set mem (StoreI mem src));
6270 ins_cost(125);
6271 format %{ "sw $mem, $src #@storeI" %}
6272 ins_encode(store_I_reg_enc(mem, src));
6273 ins_pipe( ialu_storeI );
6274 %}
6276 instruct storeI_convL2I(memory mem, mRegL src) %{
6277 match(Set mem (StoreI mem (ConvL2I src)));
6279 ins_cost(125);
6280 format %{ "sw $mem, $src #@storeI_convL2I" %}
6281 ins_encode(store_I_reg_enc(mem, src));
6282 ins_pipe( ialu_storeI );
6283 %}
6285 // Load Float
6286 instruct loadF(regF dst, memory mem) %{
6287 match(Set dst (LoadF mem));
6289 ins_cost(150);
6290 format %{ "loadF $dst, $mem #@loadF" %}
6291 ins_encode(load_F_enc(dst, mem));
6292 ins_pipe( ialu_loadI );
6293 %}
6295 instruct loadConP_general(mRegP dst, immP src) %{
6296 match(Set dst src);
6298 ins_cost(120);
6299 format %{ "li $dst, $src #@loadConP_general" %}
6301 ins_encode %{
6302 Register dst = $dst$$Register;
6303 long* value = (long*)$src$$constant;
6305 if($src->constant_reloc() == relocInfo::metadata_type){
6306 int klass_index = __ oop_recorder()->find_index((Klass*)value);
6307 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
6309 __ relocate(rspec);
6310 __ li48(dst, (long)value);
6311 }else if($src->constant_reloc() == relocInfo::oop_type){
6312 int oop_index = __ oop_recorder()->find_index((jobject)value);
6313 RelocationHolder rspec = oop_Relocation::spec(oop_index);
6315 __ relocate(rspec);
6316 __ li48(dst, (long)value);
6317 } else if ($src->constant_reloc() == relocInfo::none) {
6318 __ set64(dst, (long)value);
6319 }
6320 %}
6322 ins_pipe( ialu_regI_regI );
6323 %}
6325 /*
6326 instruct loadConP_load(mRegP dst, immP_load src) %{
6327 match(Set dst src);
6329 ins_cost(100);
6330 format %{ "ld $dst, [$constanttablebase + $constantoffset] load from constant table: ptr=$src @ loadConP_load" %}
6332 ins_encode %{
6334 int con_offset = $constantoffset($src);
6336 if (Assembler::is_simm16(con_offset)) {
6337 __ ld($dst$$Register, $constanttablebase, con_offset);
6338 } else {
6339 __ set64(AT, con_offset);
6340 if (UseLoongsonISA) {
6341 __ gsldx($dst$$Register, $constanttablebase, AT, 0);
6342 } else {
6343 __ daddu(AT, $constanttablebase, AT);
6344 __ ld($dst$$Register, AT, 0);
6345 }
6346 }
6347 %}
6349 ins_pipe(ialu_loadI);
6350 %}
6351 */
6353 instruct loadConP_no_oop_cheap(mRegP dst, immP_no_oop_cheap src) %{
6354 match(Set dst src);
6356 ins_cost(80);
6357 format %{ "li $dst, $src @ loadConP_no_oop_cheap" %}
6359 ins_encode %{
6360 __ set64($dst$$Register, $src$$constant);
6361 %}
6363 ins_pipe(ialu_regI_regI);
6364 %}
6367 instruct loadConP_poll(mRegP dst, immP_poll src) %{
6368 match(Set dst src);
6370 ins_cost(50);
6371 format %{ "li $dst, $src #@loadConP_poll" %}
6373 ins_encode %{
6374 Register dst = $dst$$Register;
6375 intptr_t value = (intptr_t)$src$$constant;
6377 __ set64(dst, (jlong)value);
6378 %}
6380 ins_pipe( ialu_regI_regI );
6381 %}
6383 instruct loadConP0(mRegP dst, immP0 src)
6384 %{
6385 match(Set dst src);
6387 ins_cost(50);
6388 format %{ "mov $dst, R0\t# ptr" %}
6389 ins_encode %{
6390 Register dst_reg = $dst$$Register;
6391 __ daddu(dst_reg, R0, R0);
6392 %}
6393 ins_pipe( ialu_regI_regI );
6394 %}
6396 instruct loadConN0(mRegN dst, immN0 src) %{
6397 match(Set dst src);
6398 format %{ "move $dst, R0\t# compressed NULL ptr" %}
6399 ins_encode %{
6400 __ move($dst$$Register, R0);
6401 %}
6402 ins_pipe( ialu_regI_regI );
6403 %}
6405 instruct loadConN(mRegN dst, immN src) %{
6406 match(Set dst src);
6408 ins_cost(125);
6409 format %{ "li $dst, $src\t# compressed ptr @ loadConN" %}
6410 ins_encode %{
6411 Register dst = $dst$$Register;
6412 __ set_narrow_oop(dst, (jobject)$src$$constant);
6413 %}
6414 ins_pipe( ialu_regI_regI ); // XXX
6415 %}
6417 instruct loadConNKlass(mRegN dst, immNKlass src) %{
6418 match(Set dst src);
6420 ins_cost(125);
6421 format %{ "li $dst, $src\t# compressed klass ptr @ loadConNKlass" %}
6422 ins_encode %{
6423 Register dst = $dst$$Register;
6424 __ set_narrow_klass(dst, (Klass*)$src$$constant);
6425 %}
6426 ins_pipe( ialu_regI_regI ); // XXX
6427 %}
6429 //FIXME
6430 // Tail Call; Jump from runtime stub to Java code.
6431 // Also known as an 'interprocedural jump'.
6432 // Target of jump will eventually return to caller.
6433 // TailJump below removes the return address.
6434 instruct TailCalljmpInd(mRegP jump_target, mRegP method_oop) %{
6435 match(TailCall jump_target method_oop );
6436 ins_cost(300);
6437 format %{ "JMP $jump_target \t# @TailCalljmpInd" %}
6439 ins_encode %{
6440 Register target = $jump_target$$Register;
6441 Register oop = $method_oop$$Register;
6443 /* 2012/10/12 Jin: RA will be used in generate_forward_exception() */
6444 __ push(RA);
6446 __ move(S3, oop);
6447 __ jr(target);
6448 __ nop();
6449 %}
6451 ins_pipe( pipe_jump );
6452 %}
6454 // Create exception oop: created by stack-crawling runtime code.
6455 // Created exception is now available to this handler, and is setup
6456 // just prior to jumping to this handler. No code emitted.
6457 instruct CreateException( a0_RegP ex_oop )
6458 %{
6459 match(Set ex_oop (CreateEx));
6461 // use the following format syntax
6462 format %{ "# exception oop is in A0; no code emitted @CreateException" %}
6463 ins_encode %{
6464 /* Jin: X86 leaves this function empty */
6465 __ block_comment("CreateException is empty in X86/MIPS");
6466 %}
6467 ins_pipe( empty );
6468 // ins_pipe( pipe_jump );
6469 %}
6472 /* 2012/9/14 Jin: The mechanism of exception handling is clear now.
6474 - Common try/catch:
6475 2012/9/14 Jin: [stubGenerator_mips.cpp] generate_forward_exception()
6476 |- V0, V1 are created
6477 |- T9 <= SharedRuntime::exception_handler_for_return_address
6478 `- jr T9
6479 `- the caller's exception_handler
6480 `- jr OptoRuntime::exception_blob
6481 `- here
6482 - Rethrow(e.g. 'unwind'):
6483 * The callee:
6484 |- an exception is triggered during execution
6485 `- exits the callee method through RethrowException node
6486 |- The callee pushes exception_oop(T0) and exception_pc(RA)
6487 `- The callee jumps to OptoRuntime::rethrow_stub()
6488 * In OptoRuntime::rethrow_stub:
6489 |- The VM calls _rethrow_Java to determine the return address in the caller method
6490 `- exits the stub with tailjmpInd
6491 |- pops exception_oop(V0) and exception_pc(V1)
6492 `- jumps to the return address(usually an exception_handler)
6493 * The caller:
6494 `- continues processing the exception_blob with V0/V1
6495 */
6497 /*
6498 Disassembling OptoRuntime::rethrow_stub()
6500 ; locals
6501 0x2d3bf320: addiu sp, sp, 0xfffffff8
6502 0x2d3bf324: sw ra, 0x4(sp)
6503 0x2d3bf328: sw fp, 0x0(sp)
6504 0x2d3bf32c: addu fp, sp, zero
6505 0x2d3bf330: addiu sp, sp, 0xfffffff0
6506 0x2d3bf334: sw ra, 0x8(sp)
6507 0x2d3bf338: sw t0, 0x4(sp)
6508 0x2d3bf33c: sw sp, 0x0(sp)
6510 ; get_thread(S2)
6511 0x2d3bf340: addu s2, sp, zero
6512 0x2d3bf344: srl s2, s2, 12
6513 0x2d3bf348: sll s2, s2, 2
6514 0x2d3bf34c: lui at, 0x2c85
6515 0x2d3bf350: addu at, at, s2
6516 0x2d3bf354: lw s2, 0xffffcc80(at)
6518 0x2d3bf358: lw s0, 0x0(sp)
6519 0x2d3bf35c: sw s0, 0x118(s2) // last_sp -> threa
6520 0x2d3bf360: sw s2, 0xc(sp)
6522 ; OptoRuntime::rethrow_C(oopDesc* exception, JavaThread* thread, address ret_pc)
6523 0x2d3bf364: lw a0, 0x4(sp)
6524 0x2d3bf368: lw a1, 0xc(sp)
6525 0x2d3bf36c: lw a2, 0x8(sp)
6526 ;; Java_To_Runtime
6527 0x2d3bf370: lui t9, 0x2c34
6528 0x2d3bf374: addiu t9, t9, 0xffff8a48
6529 0x2d3bf378: jalr t9
6530 0x2d3bf37c: nop
6532 0x2d3bf380: addu s3, v0, zero ; S3: SharedRuntime::raw_exception_handler_for_return_address()
6534 0x2d3bf384: lw s0, 0xc(sp)
6535 0x2d3bf388: sw zero, 0x118(s0)
6536 0x2d3bf38c: sw zero, 0x11c(s0)
6537 0x2d3bf390: lw s1, 0x144(s0) ; ex_oop: S1
6538 0x2d3bf394: addu s2, s0, zero
6539 0x2d3bf398: sw zero, 0x144(s2)
6540 0x2d3bf39c: lw s0, 0x4(s2)
6541 0x2d3bf3a0: addiu s4, zero, 0x0
6542 0x2d3bf3a4: bne s0, s4, 0x2d3bf3d4
6543 0x2d3bf3a8: nop
6544 0x2d3bf3ac: addiu sp, sp, 0x10
6545 0x2d3bf3b0: addiu sp, sp, 0x8
6546 0x2d3bf3b4: lw ra, 0xfffffffc(sp)
6547 0x2d3bf3b8: lw fp, 0xfffffff8(sp)
6548 0x2d3bf3bc: lui at, 0x2b48
6549 0x2d3bf3c0: lw at, 0x100(at)
6551 ; tailjmpInd: Restores exception_oop & exception_pc
6552 0x2d3bf3c4: addu v1, ra, zero
6553 0x2d3bf3c8: addu v0, s1, zero
6554 0x2d3bf3cc: jr s3
6555 0x2d3bf3d0: nop
6556 ; Exception:
6557 0x2d3bf3d4: lui s1, 0x2cc8 ; generate_forward_exception()
6558 0x2d3bf3d8: addiu s1, s1, 0x40
6559 0x2d3bf3dc: addiu s2, zero, 0x0
6560 0x2d3bf3e0: addiu sp, sp, 0x10
6561 0x2d3bf3e4: addiu sp, sp, 0x8
6562 0x2d3bf3e8: lw ra, 0xfffffffc(sp)
6563 0x2d3bf3ec: lw fp, 0xfffffff8(sp)
6564 0x2d3bf3f0: lui at, 0x2b48
6565 0x2d3bf3f4: lw at, 0x100(at)
6566 ; TailCalljmpInd
6567 __ push(RA); ; to be used in generate_forward_exception()
6568 0x2d3bf3f8: addu t7, s2, zero
6569 0x2d3bf3fc: jr s1
6570 0x2d3bf400: nop
6571 */
6572 // Rethrow exception:
6573 // The exception oop will come in the first argument position.
6574 // Then JUMP (not call) to the rethrow stub code.
6575 instruct RethrowException()
6576 %{
6577 match(Rethrow);
6579 // use the following format syntax
6580 format %{ "JMP rethrow_stub #@RethrowException" %}
6581 ins_encode %{
6582 __ block_comment("@ RethrowException");
6584 cbuf.set_insts_mark();
6585 cbuf.relocate(cbuf.insts_mark(), runtime_call_Relocation::spec());
6587 // call OptoRuntime::rethrow_stub to get the exception handler in parent method
6588 __ li(T9, OptoRuntime::rethrow_stub());
6589 __ jr(T9);
6590 __ nop();
6591 %}
6592 ins_pipe( pipe_jump );
6593 %}
6595 instruct branchConP_zero(cmpOpU cmp, mRegP op1, immP0 zero, label labl) %{
6596 match(If cmp (CmpP op1 zero));
6597 effect(USE labl);
6599 ins_cost(180);
6600 format %{ "b$cmp $op1, R0, $labl #@branchConP_zero" %}
6602 ins_encode %{
6603 Register op1 = $op1$$Register;
6604 Register op2 = R0;
6605 Label &L = *($labl$$label);
6606 int flag = $cmp$$cmpcode;
6608 switch(flag)
6609 {
6610 case 0x01: //equal
6611 if (&L)
6612 __ beq(op1, op2, L);
6613 else
6614 __ beq(op1, op2, (int)0);
6615 break;
6616 case 0x02: //not_equal
6617 if (&L)
6618 __ bne(op1, op2, L);
6619 else
6620 __ bne(op1, op2, (int)0);
6621 break;
6622 /*
6623 case 0x03: //above
6624 __ sltu(AT, op2, op1);
6625 if(&L)
6626 __ bne(R0, AT, L);
6627 else
6628 __ bne(R0, AT, (int)0);
6629 break;
6630 case 0x04: //above_equal
6631 __ sltu(AT, op1, op2);
6632 if(&L)
6633 __ beq(AT, R0, L);
6634 else
6635 __ beq(AT, R0, (int)0);
6636 break;
6637 case 0x05: //below
6638 __ sltu(AT, op1, op2);
6639 if(&L)
6640 __ bne(R0, AT, L);
6641 else
6642 __ bne(R0, AT, (int)0);
6643 break;
6644 case 0x06: //below_equal
6645 __ sltu(AT, op2, op1);
6646 if(&L)
6647 __ beq(AT, R0, L);
6648 else
6649 __ beq(AT, R0, (int)0);
6650 break;
6651 */
6652 default:
6653 Unimplemented();
6654 }
6655 __ nop();
6656 %}
6658 ins_pc_relative(1);
6659 ins_pipe( pipe_alu_branch );
6660 %}
6663 instruct branchConP(cmpOpU cmp, mRegP op1, mRegP op2, label labl) %{
6664 match(If cmp (CmpP op1 op2));
6665 // predicate(can_branch_register(_kids[0]->_leaf, _kids[1]->_leaf));
6666 effect(USE labl);
6668 ins_cost(200);
6669 format %{ "b$cmp $op1, $op2, $labl #@branchConP" %}
6671 ins_encode %{
6672 Register op1 = $op1$$Register;
6673 Register op2 = $op2$$Register;
6674 Label &L = *($labl$$label);
6675 int flag = $cmp$$cmpcode;
6677 switch(flag)
6678 {
6679 case 0x01: //equal
6680 if (&L)
6681 __ beq(op1, op2, L);
6682 else
6683 __ beq(op1, op2, (int)0);
6684 break;
6685 case 0x02: //not_equal
6686 if (&L)
6687 __ bne(op1, op2, L);
6688 else
6689 __ bne(op1, op2, (int)0);
6690 break;
6691 case 0x03: //above
6692 __ sltu(AT, op2, op1);
6693 if(&L)
6694 __ bne(R0, AT, L);
6695 else
6696 __ bne(R0, AT, (int)0);
6697 break;
6698 case 0x04: //above_equal
6699 __ sltu(AT, op1, op2);
6700 if(&L)
6701 __ beq(AT, R0, L);
6702 else
6703 __ beq(AT, R0, (int)0);
6704 break;
6705 case 0x05: //below
6706 __ sltu(AT, op1, op2);
6707 if(&L)
6708 __ bne(R0, AT, L);
6709 else
6710 __ bne(R0, AT, (int)0);
6711 break;
6712 case 0x06: //below_equal
6713 __ sltu(AT, op2, op1);
6714 if(&L)
6715 __ beq(AT, R0, L);
6716 else
6717 __ beq(AT, R0, (int)0);
6718 break;
6719 default:
6720 Unimplemented();
6721 }
6722 __ nop();
6723 %}
6725 ins_pc_relative(1);
6726 ins_pipe( pipe_alu_branch );
6727 %}
6729 instruct cmpN_null_branch(cmpOp cmp, mRegN op1, immN0 null, label labl) %{
6730 match(If cmp (CmpN op1 null));
6731 effect(USE labl);
6733 ins_cost(180);
6734 format %{ "CMP $op1,0\t! compressed ptr\n\t"
6735 "BP$cmp $labl @ cmpN_null_branch" %}
6736 ins_encode %{
6737 Register op1 = $op1$$Register;
6738 Register op2 = R0;
6739 Label &L = *($labl$$label);
6740 int flag = $cmp$$cmpcode;
6742 switch(flag)
6743 {
6744 case 0x01: //equal
6745 if (&L)
6746 __ beq(op1, op2, L);
6747 else
6748 __ beq(op1, op2, (int)0);
6749 break;
6750 case 0x02: //not_equal
6751 if (&L)
6752 __ bne(op1, op2, L);
6753 else
6754 __ bne(op1, op2, (int)0);
6755 break;
6756 default:
6757 Unimplemented();
6758 }
6759 __ nop();
6760 %}
6761 //TODO: pipe_branchP or create pipe_branchN LEE
6762 ins_pc_relative(1);
6763 ins_pipe( pipe_alu_branch );
6764 %}
6766 instruct cmpN_reg_branch(cmpOp cmp, mRegN op1, mRegN op2, label labl) %{
6767 match(If cmp (CmpN op1 op2));
6768 effect(USE labl);
6770 ins_cost(180);
6771 format %{ "CMP $op1,$op2\t! compressed ptr\n\t"
6772 "BP$cmp $labl" %}
6773 ins_encode %{
6774 Register op1_reg = $op1$$Register;
6775 Register op2_reg = $op2$$Register;
6776 Label &L = *($labl$$label);
6777 int flag = $cmp$$cmpcode;
6779 switch(flag)
6780 {
6781 case 0x01: //equal
6782 if (&L)
6783 __ beq(op1_reg, op2_reg, L);
6784 else
6785 __ beq(op1_reg, op2_reg, (int)0);
6786 break;
6787 case 0x02: //not_equal
6788 if (&L)
6789 __ bne(op1_reg, op2_reg, L);
6790 else
6791 __ bne(op1_reg, op2_reg, (int)0);
6792 break;
6793 case 0x03: //above
6794 __ sltu(AT, op2_reg, op1_reg);
6795 if(&L)
6796 __ bne(R0, AT, L);
6797 else
6798 __ bne(R0, AT, (int)0);
6799 break;
6800 case 0x04: //above_equal
6801 __ sltu(AT, op1_reg, op2_reg);
6802 if(&L)
6803 __ beq(AT, R0, L);
6804 else
6805 __ beq(AT, R0, (int)0);
6806 break;
6807 case 0x05: //below
6808 __ sltu(AT, op1_reg, op2_reg);
6809 if(&L)
6810 __ bne(R0, AT, L);
6811 else
6812 __ bne(R0, AT, (int)0);
6813 break;
6814 case 0x06: //below_equal
6815 __ sltu(AT, op2_reg, op1_reg);
6816 if(&L)
6817 __ beq(AT, R0, L);
6818 else
6819 __ beq(AT, R0, (int)0);
6820 break;
6821 default:
6822 Unimplemented();
6823 }
6824 __ nop();
6825 %}
6826 ins_pc_relative(1);
6827 ins_pipe( pipe_alu_branch );
6828 %}
6830 instruct branchConIU_reg_reg(cmpOpU cmp, mRegI src1, mRegI src2, label labl) %{
6831 match( If cmp (CmpU src1 src2) );
6832 effect(USE labl);
6833 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_reg" %}
6835 ins_encode %{
6836 Register op1 = $src1$$Register;
6837 Register op2 = $src2$$Register;
6838 Label &L = *($labl$$label);
6839 int flag = $cmp$$cmpcode;
6841 switch(flag)
6842 {
6843 case 0x01: //equal
6844 if (&L)
6845 __ beq(op1, op2, L);
6846 else
6847 __ beq(op1, op2, (int)0);
6848 break;
6849 case 0x02: //not_equal
6850 if (&L)
6851 __ bne(op1, op2, L);
6852 else
6853 __ bne(op1, op2, (int)0);
6854 break;
6855 case 0x03: //above
6856 __ sltu(AT, op2, op1);
6857 if(&L)
6858 __ bne(AT, R0, L);
6859 else
6860 __ bne(AT, R0, (int)0);
6861 break;
6862 case 0x04: //above_equal
6863 __ sltu(AT, op1, op2);
6864 if(&L)
6865 __ beq(AT, R0, L);
6866 else
6867 __ beq(AT, R0, (int)0);
6868 break;
6869 case 0x05: //below
6870 __ sltu(AT, op1, op2);
6871 if(&L)
6872 __ bne(AT, R0, L);
6873 else
6874 __ bne(AT, R0, (int)0);
6875 break;
6876 case 0x06: //below_equal
6877 __ sltu(AT, op2, op1);
6878 if(&L)
6879 __ beq(AT, R0, L);
6880 else
6881 __ beq(AT, R0, (int)0);
6882 break;
6883 default:
6884 Unimplemented();
6885 }
6886 __ nop();
6887 %}
6889 ins_pc_relative(1);
6890 ins_pipe( pipe_alu_branch );
6891 %}
6894 instruct branchConIU_reg_imm(cmpOpU cmp, mRegI src1, immI src2, label labl) %{
6895 match( If cmp (CmpU src1 src2) );
6896 effect(USE labl);
6897 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_imm" %}
6899 ins_encode %{
6900 Register op1 = $src1$$Register;
6901 int val = $src2$$constant;
6902 Label &L = *($labl$$label);
6903 int flag = $cmp$$cmpcode;
6905 __ move(AT, val);
6906 switch(flag)
6907 {
6908 case 0x01: //equal
6909 if (&L)
6910 __ beq(op1, AT, L);
6911 else
6912 __ beq(op1, AT, (int)0);
6913 break;
6914 case 0x02: //not_equal
6915 if (&L)
6916 __ bne(op1, AT, L);
6917 else
6918 __ bne(op1, AT, (int)0);
6919 break;
6920 case 0x03: //above
6921 __ sltu(AT, AT, op1);
6922 if(&L)
6923 __ bne(R0, AT, L);
6924 else
6925 __ bne(R0, AT, (int)0);
6926 break;
6927 case 0x04: //above_equal
6928 __ sltu(AT, op1, AT);
6929 if(&L)
6930 __ beq(AT, R0, L);
6931 else
6932 __ beq(AT, R0, (int)0);
6933 break;
6934 case 0x05: //below
6935 __ sltu(AT, op1, AT);
6936 if(&L)
6937 __ bne(R0, AT, L);
6938 else
6939 __ bne(R0, AT, (int)0);
6940 break;
6941 case 0x06: //below_equal
6942 __ sltu(AT, AT, op1);
6943 if(&L)
6944 __ beq(AT, R0, L);
6945 else
6946 __ beq(AT, R0, (int)0);
6947 break;
6948 default:
6949 Unimplemented();
6950 }
6951 __ nop();
6952 %}
6954 ins_pc_relative(1);
6955 ins_pipe( pipe_alu_branch );
6956 %}
6958 instruct branchConI_reg_reg(cmpOp cmp, mRegI src1, mRegI src2, label labl) %{
6959 match( If cmp (CmpI src1 src2) );
6960 effect(USE labl);
6961 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_reg" %}
6963 ins_encode %{
6964 Register op1 = $src1$$Register;
6965 Register op2 = $src2$$Register;
6966 Label &L = *($labl$$label);
6967 int flag = $cmp$$cmpcode;
6969 switch(flag)
6970 {
6971 case 0x01: //equal
6972 if (&L)
6973 __ beq(op1, op2, L);
6974 else
6975 __ beq(op1, op2, (int)0);
6976 break;
6977 case 0x02: //not_equal
6978 if (&L)
6979 __ bne(op1, op2, L);
6980 else
6981 __ bne(op1, op2, (int)0);
6982 break;
6983 case 0x03: //above
6984 __ slt(AT, op2, op1);
6985 if(&L)
6986 __ bne(R0, AT, L);
6987 else
6988 __ bne(R0, AT, (int)0);
6989 break;
6990 case 0x04: //above_equal
6991 __ slt(AT, op1, op2);
6992 if(&L)
6993 __ beq(AT, R0, L);
6994 else
6995 __ beq(AT, R0, (int)0);
6996 break;
6997 case 0x05: //below
6998 __ slt(AT, op1, op2);
6999 if(&L)
7000 __ bne(R0, AT, L);
7001 else
7002 __ bne(R0, AT, (int)0);
7003 break;
7004 case 0x06: //below_equal
7005 __ slt(AT, op2, op1);
7006 if(&L)
7007 __ beq(AT, R0, L);
7008 else
7009 __ beq(AT, R0, (int)0);
7010 break;
7011 default:
7012 Unimplemented();
7013 }
7014 __ nop();
7015 %}
7017 ins_pc_relative(1);
7018 ins_pipe( pipe_alu_branch );
7019 %}
7021 instruct branchConI_reg_imm0(cmpOp cmp, mRegI src1, immI0 src2, label labl) %{
7022 match( If cmp (CmpI src1 src2) );
7023 effect(USE labl);
7024 ins_cost(170);
7025 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm0" %}
7027 ins_encode %{
7028 Register op1 = $src1$$Register;
7029 // int val = $src2$$constant;
7030 Label &L = *($labl$$label);
7031 int flag = $cmp$$cmpcode;
7033 //__ move(AT, val);
7034 switch(flag)
7035 {
7036 case 0x01: //equal
7037 if (&L)
7038 __ beq(op1, R0, L);
7039 else
7040 __ beq(op1, R0, (int)0);
7041 break;
7042 case 0x02: //not_equal
7043 if (&L)
7044 __ bne(op1, R0, L);
7045 else
7046 __ bne(op1, R0, (int)0);
7047 break;
7048 case 0x03: //greater
7049 if(&L)
7050 __ bgtz(op1, L);
7051 else
7052 __ bgtz(op1, (int)0);
7053 break;
7054 case 0x04: //greater_equal
7055 if(&L)
7056 __ bgez(op1, L);
7057 else
7058 __ bgez(op1, (int)0);
7059 break;
7060 case 0x05: //less
7061 if(&L)
7062 __ bltz(op1, L);
7063 else
7064 __ bltz(op1, (int)0);
7065 break;
7066 case 0x06: //less_equal
7067 if(&L)
7068 __ blez(op1, L);
7069 else
7070 __ blez(op1, (int)0);
7071 break;
7072 default:
7073 Unimplemented();
7074 }
7075 __ nop();
7076 %}
7078 ins_pc_relative(1);
7079 ins_pipe( pipe_alu_branch );
7080 %}
7083 instruct branchConI_reg_imm(cmpOp cmp, mRegI src1, immI src2, label labl) %{
7084 match( If cmp (CmpI src1 src2) );
7085 effect(USE labl);
7086 ins_cost(200);
7087 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm" %}
7089 ins_encode %{
7090 Register op1 = $src1$$Register;
7091 int val = $src2$$constant;
7092 Label &L = *($labl$$label);
7093 int flag = $cmp$$cmpcode;
7095 __ move(AT, val);
7096 switch(flag)
7097 {
7098 case 0x01: //equal
7099 if (&L)
7100 __ beq(op1, AT, L);
7101 else
7102 __ beq(op1, AT, (int)0);
7103 break;
7104 case 0x02: //not_equal
7105 if (&L)
7106 __ bne(op1, AT, L);
7107 else
7108 __ bne(op1, AT, (int)0);
7109 break;
7110 case 0x03: //greater
7111 __ slt(AT, AT, op1);
7112 if(&L)
7113 __ bne(R0, AT, L);
7114 else
7115 __ bne(R0, AT, (int)0);
7116 break;
7117 case 0x04: //greater_equal
7118 __ slt(AT, op1, AT);
7119 if(&L)
7120 __ beq(AT, R0, L);
7121 else
7122 __ beq(AT, R0, (int)0);
7123 break;
7124 case 0x05: //less
7125 __ slt(AT, op1, AT);
7126 if(&L)
7127 __ bne(R0, AT, L);
7128 else
7129 __ bne(R0, AT, (int)0);
7130 break;
7131 case 0x06: //less_equal
7132 __ slt(AT, AT, op1);
7133 if(&L)
7134 __ beq(AT, R0, L);
7135 else
7136 __ beq(AT, R0, (int)0);
7137 break;
7138 default:
7139 Unimplemented();
7140 }
7141 __ nop();
7142 %}
7144 ins_pc_relative(1);
7145 ins_pipe( pipe_alu_branch );
7146 %}
7148 instruct branchConIU_reg_imm0(cmpOpU cmp, mRegI src1, immI0 zero, label labl) %{
7149 match( If cmp (CmpU src1 zero) );
7150 effect(USE labl);
7151 format %{ "BR$cmp $src1, zero, $labl #@branchConIU_reg_imm0" %}
7153 ins_encode %{
7154 Register op1 = $src1$$Register;
7155 Label &L = *($labl$$label);
7156 int flag = $cmp$$cmpcode;
7158 switch(flag)
7159 {
7160 case 0x01: //equal
7161 if (&L)
7162 __ beq(op1, R0, L);
7163 else
7164 __ beq(op1, R0, (int)0);
7165 break;
7166 case 0x02: //not_equal
7167 if (&L)
7168 __ bne(op1, R0, L);
7169 else
7170 __ bne(op1, R0, (int)0);
7171 break;
7172 case 0x03: //above
7173 if(&L)
7174 __ bne(R0, op1, L);
7175 else
7176 __ bne(R0, op1, (int)0);
7177 break;
7178 case 0x04: //above_equal
7179 if(&L)
7180 __ beq(R0, R0, L);
7181 else
7182 __ beq(R0, R0, (int)0);
7183 break;
7184 case 0x05: //below
7185 return;
7186 break;
7187 case 0x06: //below_equal
7188 if(&L)
7189 __ beq(op1, R0, L);
7190 else
7191 __ beq(op1, R0, (int)0);
7192 break;
7193 default:
7194 Unimplemented();
7195 }
7196 __ nop();
7197 %}
7199 ins_pc_relative(1);
7200 ins_pipe( pipe_alu_branch );
7201 %}
7204 instruct branchConIU_reg_immI16(cmpOpU cmp, mRegI src1, immI16 src2, label labl) %{
7205 match( If cmp (CmpU src1 src2) );
7206 effect(USE labl);
7207 ins_cost(180);
7208 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_immI16" %}
7210 ins_encode %{
7211 Register op1 = $src1$$Register;
7212 int val = $src2$$constant;
7213 Label &L = *($labl$$label);
7214 int flag = $cmp$$cmpcode;
7216 switch(flag)
7217 {
7218 case 0x01: //equal
7219 __ move(AT, val);
7220 if (&L)
7221 __ beq(op1, AT, L);
7222 else
7223 __ beq(op1, AT, (int)0);
7224 break;
7225 case 0x02: //not_equal
7226 __ move(AT, val);
7227 if (&L)
7228 __ bne(op1, AT, L);
7229 else
7230 __ bne(op1, AT, (int)0);
7231 break;
7232 case 0x03: //above
7233 __ move(AT, val);
7234 __ sltu(AT, AT, op1);
7235 if(&L)
7236 __ bne(R0, AT, L);
7237 else
7238 __ bne(R0, AT, (int)0);
7239 break;
7240 case 0x04: //above_equal
7241 __ sltiu(AT, op1, val);
7242 if(&L)
7243 __ beq(AT, R0, L);
7244 else
7245 __ beq(AT, R0, (int)0);
7246 break;
7247 case 0x05: //below
7248 __ sltiu(AT, op1, val);
7249 if(&L)
7250 __ bne(R0, AT, L);
7251 else
7252 __ bne(R0, AT, (int)0);
7253 break;
7254 case 0x06: //below_equal
7255 __ move(AT, val);
7256 __ sltu(AT, AT, op1);
7257 if(&L)
7258 __ beq(AT, R0, L);
7259 else
7260 __ beq(AT, R0, (int)0);
7261 break;
7262 default:
7263 Unimplemented();
7264 }
7265 __ nop();
7266 %}
7268 ins_pc_relative(1);
7269 ins_pipe( pipe_alu_branch );
7270 %}
7273 instruct branchConL_regL_regL(cmpOp cmp, mRegL src1, mRegL src2, label labl) %{
7274 match( If cmp (CmpL src1 src2) );
7275 effect(USE labl);
7276 format %{ "BR$cmp $src1, $src2, $labl #@branchConL_regL_regL" %}
7277 ins_cost(250);
7279 ins_encode %{
7280 Register opr1_reg = as_Register($src1$$reg);
7281 Register opr2_reg = as_Register($src2$$reg);
7283 Label &target = *($labl$$label);
7284 int flag = $cmp$$cmpcode;
7286 switch(flag)
7287 {
7288 case 0x01: //equal
7289 if (&target)
7290 __ beq(opr1_reg, opr2_reg, target);
7291 else
7292 __ beq(opr1_reg, opr2_reg, (int)0);
7293 __ delayed()->nop();
7294 break;
7296 case 0x02: //not_equal
7297 if(&target)
7298 __ bne(opr1_reg, opr2_reg, target);
7299 else
7300 __ bne(opr1_reg, opr2_reg, (int)0);
7301 __ delayed()->nop();
7302 break;
7304 case 0x03: //greater
7305 __ slt(AT, opr2_reg, opr1_reg);
7306 if(&target)
7307 __ bne(AT, R0, target);
7308 else
7309 __ bne(AT, R0, (int)0);
7310 __ delayed()->nop();
7311 break;
7313 case 0x04: //greater_equal
7314 __ slt(AT, opr1_reg, opr2_reg);
7315 if(&target)
7316 __ beq(AT, R0, target);
7317 else
7318 __ beq(AT, R0, (int)0);
7319 __ delayed()->nop();
7321 break;
7323 case 0x05: //less
7324 __ slt(AT, opr1_reg, opr2_reg);
7325 if(&target)
7326 __ bne(AT, R0, target);
7327 else
7328 __ bne(AT, R0, (int)0);
7329 __ delayed()->nop();
7331 break;
7333 case 0x06: //less_equal
7334 __ slt(AT, opr2_reg, opr1_reg);
7336 if(&target)
7337 __ beq(AT, R0, target);
7338 else
7339 __ beq(AT, R0, (int)0);
7340 __ delayed()->nop();
7342 break;
7344 default:
7345 Unimplemented();
7346 }
7347 %}
7350 ins_pc_relative(1);
7351 ins_pipe( pipe_alu_branch );
7352 %}
7354 instruct branchConL_reg_immL16_sub(cmpOp cmp, mRegL src1, immL16_sub src2, label labl) %{
7355 match( If cmp (CmpL src1 src2) );
7356 effect(USE labl);
7357 ins_cost(180);
7358 format %{ "BR$cmp $src1, $src2, $labl #@branchConL_reg_immL16_sub" %}
7360 ins_encode %{
7361 Register op1 = $src1$$Register;
7362 int val = $src2$$constant;
7363 Label &L = *($labl$$label);
7364 int flag = $cmp$$cmpcode;
7366 __ daddiu(AT, op1, -1 * val);
7367 switch(flag)
7368 {
7369 case 0x01: //equal
7370 if (&L)
7371 __ beq(R0, AT, L);
7372 else
7373 __ beq(R0, AT, (int)0);
7374 break;
7375 case 0x02: //not_equal
7376 if (&L)
7377 __ bne(R0, AT, L);
7378 else
7379 __ bne(R0, AT, (int)0);
7380 break;
7381 case 0x03: //greater
7382 if(&L)
7383 __ bgtz(AT, L);
7384 else
7385 __ bgtz(AT, (int)0);
7386 break;
7387 case 0x04: //greater_equal
7388 if(&L)
7389 __ bgez(AT, L);
7390 else
7391 __ bgez(AT, (int)0);
7392 break;
7393 case 0x05: //less
7394 if(&L)
7395 __ bltz(AT, L);
7396 else
7397 __ bltz(AT, (int)0);
7398 break;
7399 case 0x06: //less_equal
7400 if(&L)
7401 __ blez(AT, L);
7402 else
7403 __ blez(AT, (int)0);
7404 break;
7405 default:
7406 Unimplemented();
7407 }
7408 __ nop();
7409 %}
7411 ins_pc_relative(1);
7412 ins_pipe( pipe_alu_branch );
7413 %}
7416 instruct branchConI_reg_imm16_sub(cmpOp cmp, mRegI src1, immI16_sub src2, label labl) %{
7417 match( If cmp (CmpI src1 src2) );
7418 effect(USE labl);
7419 ins_cost(180);
7420 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm16_sub" %}
7422 ins_encode %{
7423 Register op1 = $src1$$Register;
7424 int val = $src2$$constant;
7425 Label &L = *($labl$$label);
7426 int flag = $cmp$$cmpcode;
7428 __ addiu32(AT, op1, -1 * val);
7429 switch(flag)
7430 {
7431 case 0x01: //equal
7432 if (&L)
7433 __ beq(R0, AT, L);
7434 else
7435 __ beq(R0, AT, (int)0);
7436 break;
7437 case 0x02: //not_equal
7438 if (&L)
7439 __ bne(R0, AT, L);
7440 else
7441 __ bne(R0, AT, (int)0);
7442 break;
7443 case 0x03: //greater
7444 if(&L)
7445 __ bgtz(AT, L);
7446 else
7447 __ bgtz(AT, (int)0);
7448 break;
7449 case 0x04: //greater_equal
7450 if(&L)
7451 __ bgez(AT, L);
7452 else
7453 __ bgez(AT, (int)0);
7454 break;
7455 case 0x05: //less
7456 if(&L)
7457 __ bltz(AT, L);
7458 else
7459 __ bltz(AT, (int)0);
7460 break;
7461 case 0x06: //less_equal
7462 if(&L)
7463 __ blez(AT, L);
7464 else
7465 __ blez(AT, (int)0);
7466 break;
7467 default:
7468 Unimplemented();
7469 }
7470 __ nop();
7471 %}
7473 ins_pc_relative(1);
7474 ins_pipe( pipe_alu_branch );
7475 %}
7477 instruct branchConL_regL_immL0(cmpOp cmp, mRegL src1, immL0 zero, label labl) %{
7478 match( If cmp (CmpL src1 zero) );
7479 effect(USE labl);
7480 format %{ "BR$cmp $src1, zero, $labl #@branchConL_regL_immL0" %}
7481 ins_cost(150);
7483 ins_encode %{
7484 Register opr1_reg = as_Register($src1$$reg);
7485 Label &target = *($labl$$label);
7486 int flag = $cmp$$cmpcode;
7488 switch(flag)
7489 {
7490 case 0x01: //equal
7491 if (&target)
7492 __ beq(opr1_reg, R0, target);
7493 else
7494 __ beq(opr1_reg, R0, int(0));
7495 break;
7497 case 0x02: //not_equal
7498 if(&target)
7499 __ bne(opr1_reg, R0, target);
7500 else
7501 __ bne(opr1_reg, R0, (int)0);
7502 break;
7504 case 0x03: //greater
7505 if(&target)
7506 __ bgtz(opr1_reg, target);
7507 else
7508 __ bgtz(opr1_reg, (int)0);
7509 break;
7511 case 0x04: //greater_equal
7512 if(&target)
7513 __ bgez(opr1_reg, target);
7514 else
7515 __ bgez(opr1_reg, (int)0);
7516 break;
7518 case 0x05: //less
7519 __ slt(AT, opr1_reg, R0);
7520 if(&target)
7521 __ bne(AT, R0, target);
7522 else
7523 __ bne(AT, R0, (int)0);
7524 break;
7526 case 0x06: //less_equal
7527 if (&target)
7528 __ blez(opr1_reg, target);
7529 else
7530 __ blez(opr1_reg, int(0));
7531 break;
7533 default:
7534 Unimplemented();
7535 }
7536 __ delayed()->nop();
7537 %}
7540 ins_pc_relative(1);
7541 ins_pipe( pipe_alu_branch );
7542 %}
7545 //FIXME
7546 instruct branchConF_reg_reg(cmpOp cmp, regF src1, regF src2, label labl) %{
7547 match( If cmp (CmpF src1 src2) );
7548 effect(USE labl);
7549 format %{ "BR$cmp $src1, $src2, $labl #@branchConF_reg_reg" %}
7551 ins_encode %{
7552 FloatRegister reg_op1 = $src1$$FloatRegister;
7553 FloatRegister reg_op2 = $src2$$FloatRegister;
7554 Label &L = *($labl$$label);
7555 int flag = $cmp$$cmpcode;
7557 switch(flag)
7558 {
7559 case 0x01: //equal
7560 __ c_eq_s(reg_op1, reg_op2);
7561 if (&L)
7562 __ bc1t(L);
7563 else
7564 __ bc1t((int)0);
7565 break;
7566 case 0x02: //not_equal
7567 __ c_eq_s(reg_op1, reg_op2);
7568 if (&L)
7569 __ bc1f(L);
7570 else
7571 __ bc1f((int)0);
7572 break;
7573 case 0x03: //greater
7574 __ c_ule_s(reg_op1, reg_op2);
7575 if(&L)
7576 __ bc1f(L);
7577 else
7578 __ bc1f((int)0);
7579 break;
7580 case 0x04: //greater_equal
7581 __ c_ult_s(reg_op1, reg_op2);
7582 if(&L)
7583 __ bc1f(L);
7584 else
7585 __ bc1f((int)0);
7586 break;
7587 case 0x05: //less
7588 __ c_ult_s(reg_op1, reg_op2);
7589 if(&L)
7590 __ bc1t(L);
7591 else
7592 __ bc1t((int)0);
7593 break;
7594 case 0x06: //less_equal
7595 __ c_ule_s(reg_op1, reg_op2);
7596 if(&L)
7597 __ bc1t(L);
7598 else
7599 __ bc1t((int)0);
7600 break;
7601 default:
7602 Unimplemented();
7603 }
7604 __ nop();
7605 %}
7607 ins_pc_relative(1);
7608 ins_pipe(pipe_slow);
7609 %}
7611 instruct branchConD_reg_reg(cmpOp cmp, regD src1, regD src2, label labl) %{
7612 match( If cmp (CmpD src1 src2) );
7613 effect(USE labl);
7614 format %{ "BR$cmp $src1, $src2, $labl #@branchConD_reg_reg" %}
7616 ins_encode %{
7617 FloatRegister reg_op1 = $src1$$FloatRegister;
7618 FloatRegister reg_op2 = $src2$$FloatRegister;
7619 Label &L = *($labl$$label);
7620 int flag = $cmp$$cmpcode;
7622 switch(flag)
7623 {
7624 case 0x01: //equal
7625 __ c_eq_d(reg_op1, reg_op2);
7626 if (&L)
7627 __ bc1t(L);
7628 else
7629 __ bc1t((int)0);
7630 break;
7631 case 0x02: //not_equal
7632 //2016/4/19 aoqi: c_ueq_d cannot distinguish NaN from equal. Double.isNaN(Double) is implemented by 'f != f', so the use of c_ueq_d causes bugs.
7633 __ c_eq_d(reg_op1, reg_op2);
7634 if (&L)
7635 __ bc1f(L);
7636 else
7637 __ bc1f((int)0);
7638 break;
7639 case 0x03: //greater
7640 __ c_ule_d(reg_op1, reg_op2);
7641 if(&L)
7642 __ bc1f(L);
7643 else
7644 __ bc1f((int)0);
7645 break;
7646 case 0x04: //greater_equal
7647 __ c_ult_d(reg_op1, reg_op2);
7648 if(&L)
7649 __ bc1f(L);
7650 else
7651 __ bc1f((int)0);
7652 break;
7653 case 0x05: //less
7654 __ c_ult_d(reg_op1, reg_op2);
7655 if(&L)
7656 __ bc1t(L);
7657 else
7658 __ bc1t((int)0);
7659 break;
7660 case 0x06: //less_equal
7661 __ c_ule_d(reg_op1, reg_op2);
7662 if(&L)
7663 __ bc1t(L);
7664 else
7665 __ bc1t((int)0);
7666 break;
7667 default:
7668 Unimplemented();
7669 }
7670 __ nop();
7671 %}
7673 ins_pc_relative(1);
7674 ins_pipe(pipe_slow);
7675 %}
7678 // Call Runtime Instruction
7679 instruct CallRuntimeDirect(method meth) %{
7680 match(CallRuntime );
7681 effect(USE meth);
7683 ins_cost(300);
7684 format %{ "CALL,runtime #@CallRuntimeDirect" %}
7685 ins_encode( Java_To_Runtime( meth ) );
7686 ins_pipe( pipe_slow );
7687 ins_alignment(16);
7688 %}
7692 //------------------------MemBar Instructions-------------------------------
7693 //Memory barrier flavors
7695 instruct membar_acquire() %{
7696 match(MemBarAcquire);
7697 ins_cost(0);
7699 size(0);
7700 format %{ "MEMBAR-acquire (empty) @ membar_acquire" %}
7701 ins_encode();
7702 ins_pipe(empty);
7703 %}
7705 instruct load_fence() %{
7706 match(LoadFence);
7707 ins_cost(400);
7709 format %{ "MEMBAR @ load_fence" %}
7710 ins_encode %{
7711 __ sync();
7712 %}
7713 ins_pipe(pipe_slow);
7714 %}
7716 instruct membar_acquire_lock()
7717 %{
7718 match(MemBarAcquireLock);
7719 ins_cost(0);
7721 size(0);
7722 format %{ "MEMBAR-acquire (acquire as part of CAS in prior FastLock so empty encoding) @ membar_acquire_lock" %}
7723 ins_encode();
7724 ins_pipe(empty);
7725 %}
7727 instruct membar_release() %{
7728 match(MemBarRelease);
7729 ins_cost(0);
7731 size(0);
7732 format %{ "MEMBAR-release (empty) @ membar_release" %}
7733 ins_encode();
7734 ins_pipe(empty);
7735 %}
7737 instruct store_fence() %{
7738 match(StoreFence);
7739 ins_cost(400);
7741 format %{ "MEMBAR @ store_fence" %}
7743 ins_encode %{
7744 __ sync();
7745 %}
7747 ins_pipe(pipe_slow);
7748 %}
7750 instruct membar_release_lock()
7751 %{
7752 match(MemBarReleaseLock);
7753 ins_cost(0);
7755 size(0);
7756 format %{ "MEMBAR-release-lock (release in FastUnlock so empty) @ membar_release_lock" %}
7757 ins_encode();
7758 ins_pipe(empty);
7759 %}
7762 instruct membar_volatile() %{
7763 match(MemBarVolatile);
7764 ins_cost(400);
7766 format %{ "MEMBAR-volatile" %}
7767 ins_encode %{
7768 if( !os::is_MP() ) return; // Not needed on single CPU
7769 __ sync();
7771 %}
7772 ins_pipe(pipe_slow);
7773 %}
7775 instruct unnecessary_membar_volatile() %{
7776 match(MemBarVolatile);
7777 predicate(Matcher::post_store_load_barrier(n));
7778 ins_cost(0);
7780 size(0);
7781 format %{ "MEMBAR-volatile (unnecessary so empty encoding) @ unnecessary_membar_volatile" %}
7782 ins_encode( );
7783 ins_pipe(empty);
7784 %}
7786 instruct membar_storestore() %{
7787 match(MemBarStoreStore);
7789 ins_cost(0);
7790 size(0);
7791 format %{ "MEMBAR-storestore (empty encoding) @ membar_storestore" %}
7792 ins_encode( );
7793 ins_pipe(empty);
7794 %}
7796 //----------Move Instructions--------------------------------------------------
7797 instruct castX2P(mRegP dst, mRegL src) %{
7798 match(Set dst (CastX2P src));
7799 format %{ "castX2P $dst, $src @ castX2P" %}
7800 ins_encode %{
7801 Register src = $src$$Register;
7802 Register dst = $dst$$Register;
7804 if(src != dst)
7805 __ move(dst, src);
7806 %}
7807 ins_cost(10);
7808 ins_pipe( ialu_regI_mov );
7809 %}
7811 instruct castP2X(mRegL dst, mRegP src ) %{
7812 match(Set dst (CastP2X src));
7814 format %{ "mov $dst, $src\t #@castP2X" %}
7815 ins_encode %{
7816 Register src = $src$$Register;
7817 Register dst = $dst$$Register;
7819 if(src != dst)
7820 __ move(dst, src);
7821 %}
7822 ins_pipe( ialu_regI_mov );
7823 %}
7825 instruct MoveF2I_reg_reg(mRegI dst, regF src) %{
7826 match(Set dst (MoveF2I src));
7827 effect(DEF dst, USE src);
7828 ins_cost(85);
7829 format %{ "MoveF2I $dst, $src @ MoveF2I_reg_reg" %}
7830 ins_encode %{
7831 Register dst = as_Register($dst$$reg);
7832 FloatRegister src = as_FloatRegister($src$$reg);
7834 __ mfc1(dst, src);
7835 %}
7836 ins_pipe( pipe_slow );
7837 %}
7839 instruct MoveI2F_reg_reg(regF dst, mRegI src) %{
7840 match(Set dst (MoveI2F src));
7841 effect(DEF dst, USE src);
7842 ins_cost(85);
7843 format %{ "MoveI2F $dst, $src @ MoveI2F_reg_reg" %}
7844 ins_encode %{
7845 Register src = as_Register($src$$reg);
7846 FloatRegister dst = as_FloatRegister($dst$$reg);
7848 __ mtc1(src, dst);
7849 %}
7850 ins_pipe( pipe_slow );
7851 %}
7853 instruct MoveD2L_reg_reg(mRegL dst, regD src) %{
7854 match(Set dst (MoveD2L src));
7855 effect(DEF dst, USE src);
7856 ins_cost(85);
7857 format %{ "MoveD2L $dst, $src @ MoveD2L_reg_reg" %}
7858 ins_encode %{
7859 Register dst = as_Register($dst$$reg);
7860 FloatRegister src = as_FloatRegister($src$$reg);
7862 __ dmfc1(dst, src);
7863 %}
7864 ins_pipe( pipe_slow );
7865 %}
7867 instruct MoveL2D_reg_reg(regD dst, mRegL src) %{
7868 match(Set dst (MoveL2D src));
7869 effect(DEF dst, USE src);
7870 ins_cost(85);
7871 format %{ "MoveL2D $dst, $src @ MoveL2D_reg_reg" %}
7872 ins_encode %{
7873 FloatRegister dst = as_FloatRegister($dst$$reg);
7874 Register src = as_Register($src$$reg);
7876 __ dmtc1(src, dst);
7877 %}
7878 ins_pipe( pipe_slow );
7879 %}
7881 //----------Conditional Move---------------------------------------------------
7882 // Conditional move
7883 instruct cmovI_cmpI_reg_reg(mRegI dst, mRegI src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
7884 match(Set dst (CMoveI (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
7885 ins_cost(80);
7886 format %{
7887 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpI_reg_reg\n"
7888 "\tCMOV $dst,$src \t @cmovI_cmpI_reg_reg"
7889 %}
7891 ins_encode %{
7892 Register op1 = $tmp1$$Register;
7893 Register op2 = $tmp2$$Register;
7894 Register dst = $dst$$Register;
7895 Register src = $src$$Register;
7896 int flag = $cop$$cmpcode;
7898 switch(flag)
7899 {
7900 case 0x01: //equal
7901 __ subu32(AT, op1, op2);
7902 __ movz(dst, src, AT);
7903 break;
7905 case 0x02: //not_equal
7906 __ subu32(AT, op1, op2);
7907 __ movn(dst, src, AT);
7908 break;
7910 case 0x03: //great
7911 __ slt(AT, op2, op1);
7912 __ movn(dst, src, AT);
7913 break;
7915 case 0x04: //great_equal
7916 __ slt(AT, op1, op2);
7917 __ movz(dst, src, AT);
7918 break;
7920 case 0x05: //less
7921 __ slt(AT, op1, op2);
7922 __ movn(dst, src, AT);
7923 break;
7925 case 0x06: //less_equal
7926 __ slt(AT, op2, op1);
7927 __ movz(dst, src, AT);
7928 break;
7930 default:
7931 Unimplemented();
7932 }
7933 %}
7935 ins_pipe( pipe_slow );
7936 %}
7938 instruct cmovI_cmpP_reg_reg(mRegI dst, mRegI src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
7939 match(Set dst (CMoveI (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
7940 ins_cost(80);
7941 format %{
7942 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpP_reg_reg\n\t"
7943 "CMOV $dst,$src\t @cmovI_cmpP_reg_reg"
7944 %}
7945 ins_encode %{
7946 Register op1 = $tmp1$$Register;
7947 Register op2 = $tmp2$$Register;
7948 Register dst = $dst$$Register;
7949 Register src = $src$$Register;
7950 int flag = $cop$$cmpcode;
7952 switch(flag)
7953 {
7954 case 0x01: //equal
7955 __ subu(AT, op1, op2);
7956 __ movz(dst, src, AT);
7957 break;
7959 case 0x02: //not_equal
7960 __ subu(AT, op1, op2);
7961 __ movn(dst, src, AT);
7962 break;
7964 case 0x03: //above
7965 __ sltu(AT, op2, op1);
7966 __ movn(dst, src, AT);
7967 break;
7969 case 0x04: //above_equal
7970 __ sltu(AT, op1, op2);
7971 __ movz(dst, src, AT);
7972 break;
7974 case 0x05: //below
7975 __ sltu(AT, op1, op2);
7976 __ movn(dst, src, AT);
7977 break;
7979 case 0x06: //below_equal
7980 __ sltu(AT, op2, op1);
7981 __ movz(dst, src, AT);
7982 break;
7984 default:
7985 Unimplemented();
7986 }
7987 %}
7989 ins_pipe( pipe_slow );
7990 %}
7992 instruct cmovI_cmpN_reg_reg(mRegI dst, mRegI src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
7993 match(Set dst (CMoveI (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
7994 ins_cost(80);
7995 format %{
7996 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpN_reg_reg\n\t"
7997 "CMOV $dst,$src\t @cmovI_cmpN_reg_reg"
7998 %}
7999 ins_encode %{
8000 Register op1 = $tmp1$$Register;
8001 Register op2 = $tmp2$$Register;
8002 Register dst = $dst$$Register;
8003 Register src = $src$$Register;
8004 int flag = $cop$$cmpcode;
8006 switch(flag)
8007 {
8008 case 0x01: //equal
8009 __ subu32(AT, op1, op2);
8010 __ movz(dst, src, AT);
8011 break;
8013 case 0x02: //not_equal
8014 __ subu32(AT, op1, op2);
8015 __ movn(dst, src, AT);
8016 break;
8018 case 0x03: //above
8019 __ sltu(AT, op2, op1);
8020 __ movn(dst, src, AT);
8021 break;
8023 case 0x04: //above_equal
8024 __ sltu(AT, op1, op2);
8025 __ movz(dst, src, AT);
8026 break;
8028 case 0x05: //below
8029 __ sltu(AT, op1, op2);
8030 __ movn(dst, src, AT);
8031 break;
8033 case 0x06: //below_equal
8034 __ sltu(AT, op2, op1);
8035 __ movz(dst, src, AT);
8036 break;
8038 default:
8039 Unimplemented();
8040 }
8041 %}
8043 ins_pipe( pipe_slow );
8044 %}
8046 instruct cmovP_cmpN_reg_reg(mRegP dst, mRegP src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8047 match(Set dst (CMoveP (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8048 ins_cost(80);
8049 format %{
8050 "CMPU$cop $tmp1,$tmp2\t @cmovP_cmpN_reg_reg\n\t"
8051 "CMOV $dst,$src\t @cmovP_cmpN_reg_reg"
8052 %}
8053 ins_encode %{
8054 Register op1 = $tmp1$$Register;
8055 Register op2 = $tmp2$$Register;
8056 Register dst = $dst$$Register;
8057 Register src = $src$$Register;
8058 int flag = $cop$$cmpcode;
8060 switch(flag)
8061 {
8062 case 0x01: //equal
8063 __ subu32(AT, op1, op2);
8064 __ movz(dst, src, AT);
8065 break;
8067 case 0x02: //not_equal
8068 __ subu32(AT, op1, op2);
8069 __ movn(dst, src, AT);
8070 break;
8072 case 0x03: //above
8073 __ sltu(AT, op2, op1);
8074 __ movn(dst, src, AT);
8075 break;
8077 case 0x04: //above_equal
8078 __ sltu(AT, op1, op2);
8079 __ movz(dst, src, AT);
8080 break;
8082 case 0x05: //below
8083 __ sltu(AT, op1, op2);
8084 __ movn(dst, src, AT);
8085 break;
8087 case 0x06: //below_equal
8088 __ sltu(AT, op2, op1);
8089 __ movz(dst, src, AT);
8090 break;
8092 default:
8093 Unimplemented();
8094 }
8095 %}
8097 ins_pipe( pipe_slow );
8098 %}
8100 instruct cmovN_cmpP_reg_reg(mRegN dst, mRegN src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
8101 match(Set dst (CMoveN (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
8102 ins_cost(80);
8103 format %{
8104 "CMPU$cop $tmp1,$tmp2\t @cmovN_cmpP_reg_reg\n\t"
8105 "CMOV $dst,$src\t @cmovN_cmpP_reg_reg"
8106 %}
8107 ins_encode %{
8108 Register op1 = $tmp1$$Register;
8109 Register op2 = $tmp2$$Register;
8110 Register dst = $dst$$Register;
8111 Register src = $src$$Register;
8112 int flag = $cop$$cmpcode;
8114 switch(flag)
8115 {
8116 case 0x01: //equal
8117 __ subu(AT, op1, op2);
8118 __ movz(dst, src, AT);
8119 break;
8121 case 0x02: //not_equal
8122 __ subu(AT, op1, op2);
8123 __ movn(dst, src, AT);
8124 break;
8126 case 0x03: //above
8127 __ sltu(AT, op2, op1);
8128 __ movn(dst, src, AT);
8129 break;
8131 case 0x04: //above_equal
8132 __ sltu(AT, op1, op2);
8133 __ movz(dst, src, AT);
8134 break;
8136 case 0x05: //below
8137 __ sltu(AT, op1, op2);
8138 __ movn(dst, src, AT);
8139 break;
8141 case 0x06: //below_equal
8142 __ sltu(AT, op2, op1);
8143 __ movz(dst, src, AT);
8144 break;
8146 default:
8147 Unimplemented();
8148 }
8149 %}
8151 ins_pipe( pipe_slow );
8152 %}
8154 instruct cmovP_cmpD_reg_reg(mRegP dst, mRegP src, regD tmp1, regD tmp2, cmpOp cop ) %{
8155 match(Set dst (CMoveP (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8156 ins_cost(80);
8157 format %{
8158 "CMP$cop $tmp1, $tmp2\t @cmovP_cmpD_reg_reg\n"
8159 "\tCMOV $dst,$src \t @cmovP_cmpD_reg_reg"
8160 %}
8161 ins_encode %{
8162 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8163 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8164 Register dst = as_Register($dst$$reg);
8165 Register src = as_Register($src$$reg);
8167 int flag = $cop$$cmpcode;
8169 switch(flag)
8170 {
8171 case 0x01: //equal
8172 __ c_eq_d(reg_op1, reg_op2);
8173 __ movt(dst, src);
8174 break;
8175 case 0x02: //not_equal
8176 __ c_eq_d(reg_op1, reg_op2);
8177 __ movf(dst, src);
8178 break;
8179 case 0x03: //greater
8180 __ c_ole_d(reg_op1, reg_op2);
8181 __ movf(dst, src);
8182 break;
8183 case 0x04: //greater_equal
8184 __ c_olt_d(reg_op1, reg_op2);
8185 __ movf(dst, src);
8186 break;
8187 case 0x05: //less
8188 __ c_ult_d(reg_op1, reg_op2);
8189 __ movt(dst, src);
8190 break;
8191 case 0x06: //less_equal
8192 __ c_ule_d(reg_op1, reg_op2);
8193 __ movt(dst, src);
8194 break;
8195 default:
8196 Unimplemented();
8197 }
8198 %}
8200 ins_pipe( pipe_slow );
8201 %}
8204 instruct cmovN_cmpN_reg_reg(mRegN dst, mRegN src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8205 match(Set dst (CMoveN (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8206 ins_cost(80);
8207 format %{
8208 "CMPU$cop $tmp1,$tmp2\t @cmovN_cmpN_reg_reg\n\t"
8209 "CMOV $dst,$src\t @cmovN_cmpN_reg_reg"
8210 %}
8211 ins_encode %{
8212 Register op1 = $tmp1$$Register;
8213 Register op2 = $tmp2$$Register;
8214 Register dst = $dst$$Register;
8215 Register src = $src$$Register;
8216 int flag = $cop$$cmpcode;
8218 switch(flag)
8219 {
8220 case 0x01: //equal
8221 __ subu32(AT, op1, op2);
8222 __ movz(dst, src, AT);
8223 break;
8225 case 0x02: //not_equal
8226 __ subu32(AT, op1, op2);
8227 __ movn(dst, src, AT);
8228 break;
8230 case 0x03: //above
8231 __ sltu(AT, op2, op1);
8232 __ movn(dst, src, AT);
8233 break;
8235 case 0x04: //above_equal
8236 __ sltu(AT, op1, op2);
8237 __ movz(dst, src, AT);
8238 break;
8240 case 0x05: //below
8241 __ sltu(AT, op1, op2);
8242 __ movn(dst, src, AT);
8243 break;
8245 case 0x06: //below_equal
8246 __ sltu(AT, op2, op1);
8247 __ movz(dst, src, AT);
8248 break;
8250 default:
8251 Unimplemented();
8252 }
8253 %}
8255 ins_pipe( pipe_slow );
8256 %}
8259 instruct cmovI_cmpU_reg_reg(mRegI dst, mRegI src, mRegI tmp1, mRegI tmp2, cmpOpU cop ) %{
8260 match(Set dst (CMoveI (Binary cop (CmpU tmp1 tmp2)) (Binary dst src)));
8261 ins_cost(80);
8262 format %{
8263 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpU_reg_reg\n\t"
8264 "CMOV $dst,$src\t @cmovI_cmpU_reg_reg"
8265 %}
8266 ins_encode %{
8267 Register op1 = $tmp1$$Register;
8268 Register op2 = $tmp2$$Register;
8269 Register dst = $dst$$Register;
8270 Register src = $src$$Register;
8271 int flag = $cop$$cmpcode;
8273 switch(flag)
8274 {
8275 case 0x01: //equal
8276 __ subu(AT, op1, op2);
8277 __ movz(dst, src, AT);
8278 break;
8280 case 0x02: //not_equal
8281 __ subu(AT, op1, op2);
8282 __ movn(dst, src, AT);
8283 break;
8285 case 0x03: //above
8286 __ sltu(AT, op2, op1);
8287 __ movn(dst, src, AT);
8288 break;
8290 case 0x04: //above_equal
8291 __ sltu(AT, op1, op2);
8292 __ movz(dst, src, AT);
8293 break;
8295 case 0x05: //below
8296 __ sltu(AT, op1, op2);
8297 __ movn(dst, src, AT);
8298 break;
8300 case 0x06: //below_equal
8301 __ sltu(AT, op2, op1);
8302 __ movz(dst, src, AT);
8303 break;
8305 default:
8306 Unimplemented();
8307 }
8308 %}
8310 ins_pipe( pipe_slow );
8311 %}
8313 instruct cmovI_cmpL_reg_reg(mRegI dst, mRegI src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8314 match(Set dst (CMoveI (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8315 ins_cost(80);
8316 format %{
8317 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpL_reg_reg\n"
8318 "\tCMOV $dst,$src \t @cmovI_cmpL_reg_reg"
8319 %}
8320 ins_encode %{
8321 Register opr1 = as_Register($tmp1$$reg);
8322 Register opr2 = as_Register($tmp2$$reg);
8323 Register dst = $dst$$Register;
8324 Register src = $src$$Register;
8325 int flag = $cop$$cmpcode;
8327 switch(flag)
8328 {
8329 case 0x01: //equal
8330 __ subu(AT, opr1, opr2);
8331 __ movz(dst, src, AT);
8332 break;
8334 case 0x02: //not_equal
8335 __ subu(AT, opr1, opr2);
8336 __ movn(dst, src, AT);
8337 break;
8339 case 0x03: //greater
8340 __ slt(AT, opr2, opr1);
8341 __ movn(dst, src, AT);
8342 break;
8344 case 0x04: //greater_equal
8345 __ slt(AT, opr1, opr2);
8346 __ movz(dst, src, AT);
8347 break;
8349 case 0x05: //less
8350 __ slt(AT, opr1, opr2);
8351 __ movn(dst, src, AT);
8352 break;
8354 case 0x06: //less_equal
8355 __ slt(AT, opr2, opr1);
8356 __ movz(dst, src, AT);
8357 break;
8359 default:
8360 Unimplemented();
8361 }
8362 %}
8364 ins_pipe( pipe_slow );
8365 %}
8367 instruct cmovP_cmpL_reg_reg(mRegP dst, mRegP src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8368 match(Set dst (CMoveP (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8369 ins_cost(80);
8370 format %{
8371 "CMP$cop $tmp1, $tmp2\t @cmovP_cmpL_reg_reg\n"
8372 "\tCMOV $dst,$src \t @cmovP_cmpL_reg_reg"
8373 %}
8374 ins_encode %{
8375 Register opr1 = as_Register($tmp1$$reg);
8376 Register opr2 = as_Register($tmp2$$reg);
8377 Register dst = $dst$$Register;
8378 Register src = $src$$Register;
8379 int flag = $cop$$cmpcode;
8381 switch(flag)
8382 {
8383 case 0x01: //equal
8384 __ subu(AT, opr1, opr2);
8385 __ movz(dst, src, AT);
8386 break;
8388 case 0x02: //not_equal
8389 __ subu(AT, opr1, opr2);
8390 __ movn(dst, src, AT);
8391 break;
8393 case 0x03: //greater
8394 __ slt(AT, opr2, opr1);
8395 __ movn(dst, src, AT);
8396 break;
8398 case 0x04: //greater_equal
8399 __ slt(AT, opr1, opr2);
8400 __ movz(dst, src, AT);
8401 break;
8403 case 0x05: //less
8404 __ slt(AT, opr1, opr2);
8405 __ movn(dst, src, AT);
8406 break;
8408 case 0x06: //less_equal
8409 __ slt(AT, opr2, opr1);
8410 __ movz(dst, src, AT);
8411 break;
8413 default:
8414 Unimplemented();
8415 }
8416 %}
8418 ins_pipe( pipe_slow );
8419 %}
8421 instruct cmovI_cmpD_reg_reg(mRegI dst, mRegI src, regD tmp1, regD tmp2, cmpOp cop ) %{
8422 match(Set dst (CMoveI (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8423 ins_cost(80);
8424 format %{
8425 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpD_reg_reg\n"
8426 "\tCMOV $dst,$src \t @cmovI_cmpD_reg_reg"
8427 %}
8428 ins_encode %{
8429 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8430 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8431 Register dst = as_Register($dst$$reg);
8432 Register src = as_Register($src$$reg);
8434 int flag = $cop$$cmpcode;
8436 switch(flag)
8437 {
8438 case 0x01: //equal
8439 __ c_eq_d(reg_op1, reg_op2);
8440 __ movt(dst, src);
8441 break;
8442 case 0x02: //not_equal
8443 //2016/4/19 aoqi: See instruct branchConD_reg_reg. The change in branchConD_reg_reg fixed a bug. It seems similar here, so I made thesame change.
8444 __ c_eq_d(reg_op1, reg_op2);
8445 __ movf(dst, src);
8446 break;
8447 case 0x03: //greater
8448 __ c_ole_d(reg_op1, reg_op2);
8449 __ movf(dst, src);
8450 break;
8451 case 0x04: //greater_equal
8452 __ c_olt_d(reg_op1, reg_op2);
8453 __ movf(dst, src);
8454 break;
8455 case 0x05: //less
8456 __ c_ult_d(reg_op1, reg_op2);
8457 __ movt(dst, src);
8458 break;
8459 case 0x06: //less_equal
8460 __ c_ule_d(reg_op1, reg_op2);
8461 __ movt(dst, src);
8462 break;
8463 default:
8464 Unimplemented();
8465 }
8466 %}
8468 ins_pipe( pipe_slow );
8469 %}
8472 instruct cmovP_cmpP_reg_reg(mRegP dst, mRegP src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
8473 match(Set dst (CMoveP (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
8474 ins_cost(80);
8475 format %{
8476 "CMPU$cop $tmp1,$tmp2\t @cmovP_cmpP_reg_reg\n\t"
8477 "CMOV $dst,$src\t @cmovP_cmpP_reg_reg"
8478 %}
8479 ins_encode %{
8480 Register op1 = $tmp1$$Register;
8481 Register op2 = $tmp2$$Register;
8482 Register dst = $dst$$Register;
8483 Register src = $src$$Register;
8484 int flag = $cop$$cmpcode;
8486 switch(flag)
8487 {
8488 case 0x01: //equal
8489 __ subu(AT, op1, op2);
8490 __ movz(dst, src, AT);
8491 break;
8493 case 0x02: //not_equal
8494 __ subu(AT, op1, op2);
8495 __ movn(dst, src, AT);
8496 break;
8498 case 0x03: //above
8499 __ sltu(AT, op2, op1);
8500 __ movn(dst, src, AT);
8501 break;
8503 case 0x04: //above_equal
8504 __ sltu(AT, op1, op2);
8505 __ movz(dst, src, AT);
8506 break;
8508 case 0x05: //below
8509 __ sltu(AT, op1, op2);
8510 __ movn(dst, src, AT);
8511 break;
8513 case 0x06: //below_equal
8514 __ sltu(AT, op2, op1);
8515 __ movz(dst, src, AT);
8516 break;
8518 default:
8519 Unimplemented();
8520 }
8521 %}
8523 ins_pipe( pipe_slow );
8524 %}
8526 instruct cmovP_cmpI_reg_reg(mRegP dst, mRegP src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8527 match(Set dst (CMoveP (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8528 ins_cost(80);
8529 format %{
8530 "CMP$cop $tmp1,$tmp2\t @cmovP_cmpI_reg_reg\n\t"
8531 "CMOV $dst,$src\t @cmovP_cmpI_reg_reg"
8532 %}
8533 ins_encode %{
8534 Register op1 = $tmp1$$Register;
8535 Register op2 = $tmp2$$Register;
8536 Register dst = $dst$$Register;
8537 Register src = $src$$Register;
8538 int flag = $cop$$cmpcode;
8540 switch(flag)
8541 {
8542 case 0x01: //equal
8543 __ subu32(AT, op1, op2);
8544 __ movz(dst, src, AT);
8545 break;
8547 case 0x02: //not_equal
8548 __ subu32(AT, op1, op2);
8549 __ movn(dst, src, AT);
8550 break;
8552 case 0x03: //above
8553 __ slt(AT, op2, op1);
8554 __ movn(dst, src, AT);
8555 break;
8557 case 0x04: //above_equal
8558 __ slt(AT, op1, op2);
8559 __ movz(dst, src, AT);
8560 break;
8562 case 0x05: //below
8563 __ slt(AT, op1, op2);
8564 __ movn(dst, src, AT);
8565 break;
8567 case 0x06: //below_equal
8568 __ slt(AT, op2, op1);
8569 __ movz(dst, src, AT);
8570 break;
8572 default:
8573 Unimplemented();
8574 }
8575 %}
8577 ins_pipe( pipe_slow );
8578 %}
8580 instruct cmovN_cmpI_reg_reg(mRegN dst, mRegN src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8581 match(Set dst (CMoveN (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8582 ins_cost(80);
8583 format %{
8584 "CMP$cop $tmp1,$tmp2\t @cmovN_cmpI_reg_reg\n\t"
8585 "CMOV $dst,$src\t @cmovN_cmpI_reg_reg"
8586 %}
8587 ins_encode %{
8588 Register op1 = $tmp1$$Register;
8589 Register op2 = $tmp2$$Register;
8590 Register dst = $dst$$Register;
8591 Register src = $src$$Register;
8592 int flag = $cop$$cmpcode;
8594 switch(flag)
8595 {
8596 case 0x01: //equal
8597 __ subu32(AT, op1, op2);
8598 __ movz(dst, src, AT);
8599 break;
8601 case 0x02: //not_equal
8602 __ subu32(AT, op1, op2);
8603 __ movn(dst, src, AT);
8604 break;
8606 case 0x03: //above
8607 __ slt(AT, op2, op1);
8608 __ movn(dst, src, AT);
8609 break;
8611 case 0x04: //above_equal
8612 __ slt(AT, op1, op2);
8613 __ movz(dst, src, AT);
8614 break;
8616 case 0x05: //below
8617 __ slt(AT, op1, op2);
8618 __ movn(dst, src, AT);
8619 break;
8621 case 0x06: //below_equal
8622 __ slt(AT, op2, op1);
8623 __ movz(dst, src, AT);
8624 break;
8626 default:
8627 Unimplemented();
8628 }
8629 %}
8631 ins_pipe( pipe_slow );
8632 %}
8635 instruct cmovL_cmpI_reg_reg(mRegL dst, mRegL src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8636 match(Set dst (CMoveL (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8637 ins_cost(80);
8638 format %{
8639 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpI_reg_reg\n"
8640 "\tCMOV $dst,$src \t @cmovL_cmpI_reg_reg"
8641 %}
8643 ins_encode %{
8644 Register op1 = $tmp1$$Register;
8645 Register op2 = $tmp2$$Register;
8646 Register dst = as_Register($dst$$reg);
8647 Register src = as_Register($src$$reg);
8648 int flag = $cop$$cmpcode;
8650 switch(flag)
8651 {
8652 case 0x01: //equal
8653 __ subu32(AT, op1, op2);
8654 __ movz(dst, src, AT);
8655 break;
8657 case 0x02: //not_equal
8658 __ subu32(AT, op1, op2);
8659 __ movn(dst, src, AT);
8660 break;
8662 case 0x03: //great
8663 __ slt(AT, op2, op1);
8664 __ movn(dst, src, AT);
8665 break;
8667 case 0x04: //great_equal
8668 __ slt(AT, op1, op2);
8669 __ movz(dst, src, AT);
8670 break;
8672 case 0x05: //less
8673 __ slt(AT, op1, op2);
8674 __ movn(dst, src, AT);
8675 break;
8677 case 0x06: //less_equal
8678 __ slt(AT, op2, op1);
8679 __ movz(dst, src, AT);
8680 break;
8682 default:
8683 Unimplemented();
8684 }
8685 %}
8687 ins_pipe( pipe_slow );
8688 %}
8690 instruct cmovL_cmpL_reg_reg(mRegL dst, mRegL src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8691 match(Set dst (CMoveL (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8692 ins_cost(80);
8693 format %{
8694 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpL_reg_reg\n"
8695 "\tCMOV $dst,$src \t @cmovL_cmpL_reg_reg"
8696 %}
8697 ins_encode %{
8698 Register opr1 = as_Register($tmp1$$reg);
8699 Register opr2 = as_Register($tmp2$$reg);
8700 Register dst = as_Register($dst$$reg);
8701 Register src = as_Register($src$$reg);
8702 int flag = $cop$$cmpcode;
8704 switch(flag)
8705 {
8706 case 0x01: //equal
8707 __ subu(AT, opr1, opr2);
8708 __ movz(dst, src, AT);
8709 break;
8711 case 0x02: //not_equal
8712 __ subu(AT, opr1, opr2);
8713 __ movn(dst, src, AT);
8714 break;
8716 case 0x03: //greater
8717 __ slt(AT, opr2, opr1);
8718 __ movn(dst, src, AT);
8719 break;
8721 case 0x04: //greater_equal
8722 __ slt(AT, opr1, opr2);
8723 __ movz(dst, src, AT);
8724 break;
8726 case 0x05: //less
8727 __ slt(AT, opr1, opr2);
8728 __ movn(dst, src, AT);
8729 break;
8731 case 0x06: //less_equal
8732 __ slt(AT, opr2, opr1);
8733 __ movz(dst, src, AT);
8734 break;
8736 default:
8737 Unimplemented();
8738 }
8739 %}
8741 ins_pipe( pipe_slow );
8742 %}
8744 instruct cmovL_cmpN_reg_reg(mRegL dst, mRegL src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8745 match(Set dst (CMoveL (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8746 ins_cost(80);
8747 format %{
8748 "CMPU$cop $tmp1,$tmp2\t @cmovL_cmpN_reg_reg\n\t"
8749 "CMOV $dst,$src\t @cmovL_cmpN_reg_reg"
8750 %}
8751 ins_encode %{
8752 Register op1 = $tmp1$$Register;
8753 Register op2 = $tmp2$$Register;
8754 Register dst = $dst$$Register;
8755 Register src = $src$$Register;
8756 int flag = $cop$$cmpcode;
8758 switch(flag)
8759 {
8760 case 0x01: //equal
8761 __ subu32(AT, op1, op2);
8762 __ movz(dst, src, AT);
8763 break;
8765 case 0x02: //not_equal
8766 __ subu32(AT, op1, op2);
8767 __ movn(dst, src, AT);
8768 break;
8770 case 0x03: //above
8771 __ sltu(AT, op2, op1);
8772 __ movn(dst, src, AT);
8773 break;
8775 case 0x04: //above_equal
8776 __ sltu(AT, op1, op2);
8777 __ movz(dst, src, AT);
8778 break;
8780 case 0x05: //below
8781 __ sltu(AT, op1, op2);
8782 __ movn(dst, src, AT);
8783 break;
8785 case 0x06: //below_equal
8786 __ sltu(AT, op2, op1);
8787 __ movz(dst, src, AT);
8788 break;
8790 default:
8791 Unimplemented();
8792 }
8793 %}
8795 ins_pipe( pipe_slow );
8796 %}
8799 instruct cmovL_cmpD_reg_reg(mRegL dst, mRegL src, regD tmp1, regD tmp2, cmpOp cop ) %{
8800 match(Set dst (CMoveL (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8801 ins_cost(80);
8802 format %{
8803 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpD_reg_reg\n"
8804 "\tCMOV $dst,$src \t @cmovL_cmpD_reg_reg"
8805 %}
8806 ins_encode %{
8807 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8808 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8809 Register dst = as_Register($dst$$reg);
8810 Register src = as_Register($src$$reg);
8812 int flag = $cop$$cmpcode;
8814 switch(flag)
8815 {
8816 case 0x01: //equal
8817 __ c_eq_d(reg_op1, reg_op2);
8818 __ movt(dst, src);
8819 break;
8820 case 0x02: //not_equal
8821 __ c_eq_d(reg_op1, reg_op2);
8822 __ movf(dst, src);
8823 break;
8824 case 0x03: //greater
8825 __ c_ole_d(reg_op1, reg_op2);
8826 __ movf(dst, src);
8827 break;
8828 case 0x04: //greater_equal
8829 __ c_olt_d(reg_op1, reg_op2);
8830 __ movf(dst, src);
8831 break;
8832 case 0x05: //less
8833 __ c_ult_d(reg_op1, reg_op2);
8834 __ movt(dst, src);
8835 break;
8836 case 0x06: //less_equal
8837 __ c_ule_d(reg_op1, reg_op2);
8838 __ movt(dst, src);
8839 break;
8840 default:
8841 Unimplemented();
8842 }
8843 %}
8845 ins_pipe( pipe_slow );
8846 %}
8848 instruct cmovD_cmpD_reg_reg(regD dst, regD src, regD tmp1, regD tmp2, cmpOp cop ) %{
8849 match(Set dst (CMoveD (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8850 ins_cost(200);
8851 format %{
8852 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpD_reg_reg\n"
8853 "\tCMOV $dst,$src \t @cmovD_cmpD_reg_reg"
8854 %}
8855 ins_encode %{
8856 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8857 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8858 FloatRegister dst = as_FloatRegister($dst$$reg);
8859 FloatRegister src = as_FloatRegister($src$$reg);
8861 int flag = $cop$$cmpcode;
8863 Label L;
8865 switch(flag)
8866 {
8867 case 0x01: //equal
8868 __ c_eq_d(reg_op1, reg_op2);
8869 __ bc1f(L);
8870 __ nop();
8871 __ mov_d(dst, src);
8872 __ bind(L);
8873 break;
8874 case 0x02: //not_equal
8875 //2016/4/19 aoqi: See instruct branchConD_reg_reg. The change in branchConD_reg_reg fixed a bug. It seems similar here, so I made thesame change.
8876 __ c_eq_d(reg_op1, reg_op2);
8877 __ bc1t(L);
8878 __ nop();
8879 __ mov_d(dst, src);
8880 __ bind(L);
8881 break;
8882 case 0x03: //greater
8883 __ c_ole_d(reg_op1, reg_op2);
8884 __ bc1t(L);
8885 __ nop();
8886 __ mov_d(dst, src);
8887 __ bind(L);
8888 break;
8889 case 0x04: //greater_equal
8890 __ c_olt_d(reg_op1, reg_op2);
8891 __ bc1t(L);
8892 __ nop();
8893 __ mov_d(dst, src);
8894 __ bind(L);
8895 break;
8896 case 0x05: //less
8897 __ c_ult_d(reg_op1, reg_op2);
8898 __ bc1f(L);
8899 __ nop();
8900 __ mov_d(dst, src);
8901 __ bind(L);
8902 break;
8903 case 0x06: //less_equal
8904 __ c_ule_d(reg_op1, reg_op2);
8905 __ bc1f(L);
8906 __ nop();
8907 __ mov_d(dst, src);
8908 __ bind(L);
8909 break;
8910 default:
8911 Unimplemented();
8912 }
8913 %}
8915 ins_pipe( pipe_slow );
8916 %}
8918 instruct cmovF_cmpI_reg_reg(regF dst, regF src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8919 match(Set dst (CMoveF (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8920 ins_cost(200);
8921 format %{
8922 "CMP$cop $tmp1, $tmp2\t @cmovF_cmpI_reg_reg\n"
8923 "\tCMOV $dst, $src \t @cmovF_cmpI_reg_reg"
8924 %}
8926 ins_encode %{
8927 Register op1 = $tmp1$$Register;
8928 Register op2 = $tmp2$$Register;
8929 FloatRegister dst = as_FloatRegister($dst$$reg);
8930 FloatRegister src = as_FloatRegister($src$$reg);
8931 int flag = $cop$$cmpcode;
8932 Label L;
8934 switch(flag)
8935 {
8936 case 0x01: //equal
8937 __ bne(op1, op2, L);
8938 __ nop();
8939 __ mov_s(dst, src);
8940 __ bind(L);
8941 break;
8942 case 0x02: //not_equal
8943 __ beq(op1, op2, L);
8944 __ nop();
8945 __ mov_s(dst, src);
8946 __ bind(L);
8947 break;
8948 case 0x03: //great
8949 __ slt(AT, op2, op1);
8950 __ beq(AT, R0, L);
8951 __ nop();
8952 __ mov_s(dst, src);
8953 __ bind(L);
8954 break;
8955 case 0x04: //great_equal
8956 __ slt(AT, op1, op2);
8957 __ bne(AT, R0, L);
8958 __ nop();
8959 __ mov_s(dst, src);
8960 __ bind(L);
8961 break;
8962 case 0x05: //less
8963 __ slt(AT, op1, op2);
8964 __ beq(AT, R0, L);
8965 __ nop();
8966 __ mov_s(dst, src);
8967 __ bind(L);
8968 break;
8969 case 0x06: //less_equal
8970 __ slt(AT, op2, op1);
8971 __ bne(AT, R0, L);
8972 __ nop();
8973 __ mov_s(dst, src);
8974 __ bind(L);
8975 break;
8976 default:
8977 Unimplemented();
8978 }
8979 %}
8981 ins_pipe( pipe_slow );
8982 %}
8984 instruct cmovD_cmpI_reg_reg(regD dst, regD src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8985 match(Set dst (CMoveD (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8986 ins_cost(200);
8987 format %{
8988 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpI_reg_reg\n"
8989 "\tCMOV $dst, $src \t @cmovD_cmpI_reg_reg"
8990 %}
8992 ins_encode %{
8993 Register op1 = $tmp1$$Register;
8994 Register op2 = $tmp2$$Register;
8995 FloatRegister dst = as_FloatRegister($dst$$reg);
8996 FloatRegister src = as_FloatRegister($src$$reg);
8997 int flag = $cop$$cmpcode;
8998 Label L;
9000 switch(flag)
9001 {
9002 case 0x01: //equal
9003 __ bne(op1, op2, L);
9004 __ nop();
9005 __ mov_d(dst, src);
9006 __ bind(L);
9007 break;
9008 case 0x02: //not_equal
9009 __ beq(op1, op2, L);
9010 __ nop();
9011 __ mov_d(dst, src);
9012 __ bind(L);
9013 break;
9014 case 0x03: //great
9015 __ slt(AT, op2, op1);
9016 __ beq(AT, R0, L);
9017 __ nop();
9018 __ mov_d(dst, src);
9019 __ bind(L);
9020 break;
9021 case 0x04: //great_equal
9022 __ slt(AT, op1, op2);
9023 __ bne(AT, R0, L);
9024 __ nop();
9025 __ mov_d(dst, src);
9026 __ bind(L);
9027 break;
9028 case 0x05: //less
9029 __ slt(AT, op1, op2);
9030 __ beq(AT, R0, L);
9031 __ nop();
9032 __ mov_d(dst, src);
9033 __ bind(L);
9034 break;
9035 case 0x06: //less_equal
9036 __ slt(AT, op2, op1);
9037 __ bne(AT, R0, L);
9038 __ nop();
9039 __ mov_d(dst, src);
9040 __ bind(L);
9041 break;
9042 default:
9043 Unimplemented();
9044 }
9045 %}
9047 ins_pipe( pipe_slow );
9048 %}
9050 instruct cmovD_cmpP_reg_reg(regD dst, regD src, mRegP tmp1, mRegP tmp2, cmpOp cop ) %{
9051 match(Set dst (CMoveD (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
9052 ins_cost(200);
9053 format %{
9054 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpP_reg_reg\n"
9055 "\tCMOV $dst, $src \t @cmovD_cmpP_reg_reg"
9056 %}
9058 ins_encode %{
9059 Register op1 = $tmp1$$Register;
9060 Register op2 = $tmp2$$Register;
9061 FloatRegister dst = as_FloatRegister($dst$$reg);
9062 FloatRegister src = as_FloatRegister($src$$reg);
9063 int flag = $cop$$cmpcode;
9064 Label L;
9066 switch(flag)
9067 {
9068 case 0x01: //equal
9069 __ bne(op1, op2, L);
9070 __ nop();
9071 __ mov_d(dst, src);
9072 __ bind(L);
9073 break;
9074 case 0x02: //not_equal
9075 __ beq(op1, op2, L);
9076 __ nop();
9077 __ mov_d(dst, src);
9078 __ bind(L);
9079 break;
9080 case 0x03: //great
9081 __ slt(AT, op2, op1);
9082 __ beq(AT, R0, L);
9083 __ nop();
9084 __ mov_d(dst, src);
9085 __ bind(L);
9086 break;
9087 case 0x04: //great_equal
9088 __ slt(AT, op1, op2);
9089 __ bne(AT, R0, L);
9090 __ nop();
9091 __ mov_d(dst, src);
9092 __ bind(L);
9093 break;
9094 case 0x05: //less
9095 __ slt(AT, op1, op2);
9096 __ beq(AT, R0, L);
9097 __ nop();
9098 __ mov_d(dst, src);
9099 __ bind(L);
9100 break;
9101 case 0x06: //less_equal
9102 __ slt(AT, op2, op1);
9103 __ bne(AT, R0, L);
9104 __ nop();
9105 __ mov_d(dst, src);
9106 __ bind(L);
9107 break;
9108 default:
9109 Unimplemented();
9110 }
9111 %}
9113 ins_pipe( pipe_slow );
9114 %}
9116 //FIXME
9117 instruct cmovI_cmpF_reg_reg(mRegI dst, mRegI src, regF tmp1, regF tmp2, cmpOp cop ) %{
9118 match(Set dst (CMoveI (Binary cop (CmpF tmp1 tmp2)) (Binary dst src)));
9119 ins_cost(80);
9120 format %{
9121 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpF_reg_reg\n"
9122 "\tCMOV $dst,$src \t @cmovI_cmpF_reg_reg"
9123 %}
9125 ins_encode %{
9126 FloatRegister reg_op1 = $tmp1$$FloatRegister;
9127 FloatRegister reg_op2 = $tmp2$$FloatRegister;
9128 Register dst = $dst$$Register;
9129 Register src = $src$$Register;
9130 int flag = $cop$$cmpcode;
9132 switch(flag)
9133 {
9134 case 0x01: //equal
9135 __ c_eq_s(reg_op1, reg_op2);
9136 __ movt(dst, src);
9137 break;
9138 case 0x02: //not_equal
9139 __ c_eq_s(reg_op1, reg_op2);
9140 __ movf(dst, src);
9141 break;
9142 case 0x03: //greater
9143 __ c_ole_s(reg_op1, reg_op2);
9144 __ movf(dst, src);
9145 break;
9146 case 0x04: //greater_equal
9147 __ c_olt_s(reg_op1, reg_op2);
9148 __ movf(dst, src);
9149 break;
9150 case 0x05: //less
9151 __ c_ult_s(reg_op1, reg_op2);
9152 __ movt(dst, src);
9153 break;
9154 case 0x06: //less_equal
9155 __ c_ule_s(reg_op1, reg_op2);
9156 __ movt(dst, src);
9157 break;
9158 default:
9159 Unimplemented();
9160 }
9161 %}
9162 ins_pipe( pipe_slow );
9163 %}
9165 instruct cmovF_cmpF_reg_reg(regF dst, regF src, regF tmp1, regF tmp2, cmpOp cop ) %{
9166 match(Set dst (CMoveF (Binary cop (CmpF tmp1 tmp2)) (Binary dst src)));
9167 ins_cost(200);
9168 format %{
9169 "CMP$cop $tmp1, $tmp2\t @cmovF_cmpF_reg_reg\n"
9170 "\tCMOV $dst,$src \t @cmovF_cmpF_reg_reg"
9171 %}
9173 ins_encode %{
9174 FloatRegister reg_op1 = $tmp1$$FloatRegister;
9175 FloatRegister reg_op2 = $tmp2$$FloatRegister;
9176 FloatRegister dst = $dst$$FloatRegister;
9177 FloatRegister src = $src$$FloatRegister;
9178 Label L;
9179 int flag = $cop$$cmpcode;
9181 switch(flag)
9182 {
9183 case 0x01: //equal
9184 __ c_eq_s(reg_op1, reg_op2);
9185 __ bc1f(L);
9186 __ nop();
9187 __ mov_s(dst, src);
9188 __ bind(L);
9189 break;
9190 case 0x02: //not_equal
9191 __ c_eq_s(reg_op1, reg_op2);
9192 __ bc1t(L);
9193 __ nop();
9194 __ mov_s(dst, src);
9195 __ bind(L);
9196 break;
9197 case 0x03: //greater
9198 __ c_ole_s(reg_op1, reg_op2);
9199 __ bc1t(L);
9200 __ nop();
9201 __ mov_s(dst, src);
9202 __ bind(L);
9203 break;
9204 case 0x04: //greater_equal
9205 __ c_olt_s(reg_op1, reg_op2);
9206 __ bc1t(L);
9207 __ nop();
9208 __ mov_s(dst, src);
9209 __ bind(L);
9210 break;
9211 case 0x05: //less
9212 __ c_ult_s(reg_op1, reg_op2);
9213 __ bc1f(L);
9214 __ nop();
9215 __ mov_s(dst, src);
9216 __ bind(L);
9217 break;
9218 case 0x06: //less_equal
9219 __ c_ule_s(reg_op1, reg_op2);
9220 __ bc1f(L);
9221 __ nop();
9222 __ mov_s(dst, src);
9223 __ bind(L);
9224 break;
9225 default:
9226 Unimplemented();
9227 }
9228 %}
9229 ins_pipe( pipe_slow );
9230 %}
9232 // Manifest a CmpL result in an integer register. Very painful.
9233 // This is the test to avoid.
9234 instruct cmpL3_reg_reg(mRegI dst, mRegL src1, mRegL src2) %{
9235 match(Set dst (CmpL3 src1 src2));
9236 ins_cost(1000);
9237 format %{ "cmpL3 $dst, $src1, $src2 @ cmpL3_reg_reg" %}
9238 ins_encode %{
9239 Register opr1 = as_Register($src1$$reg);
9240 Register opr2 = as_Register($src2$$reg);
9241 Register dst = as_Register($dst$$reg);
9243 Label Done;
9245 __ subu(AT, opr1, opr2);
9246 __ bltz(AT, Done);
9247 __ delayed()->daddiu(dst, R0, -1);
9249 __ move(dst, 1);
9250 __ movz(dst, R0, AT);
9252 __ bind(Done);
9253 %}
9254 ins_pipe( pipe_slow );
9255 %}
9257 //
9258 // less_rsult = -1
9259 // greater_result = 1
9260 // equal_result = 0
9261 // nan_result = -1
9262 //
9263 instruct cmpF3_reg_reg(mRegI dst, regF src1, regF src2) %{
9264 match(Set dst (CmpF3 src1 src2));
9265 ins_cost(1000);
9266 format %{ "cmpF3 $dst, $src1, $src2 @ cmpF3_reg_reg" %}
9267 ins_encode %{
9268 FloatRegister src1 = as_FloatRegister($src1$$reg);
9269 FloatRegister src2 = as_FloatRegister($src2$$reg);
9270 Register dst = as_Register($dst$$reg);
9272 Label Done;
9274 __ c_ult_s(src1, src2);
9275 __ bc1t(Done);
9276 __ delayed()->daddiu(dst, R0, -1);
9278 __ c_eq_s(src1, src2);
9279 __ move(dst, 1);
9280 __ movt(dst, R0);
9282 __ bind(Done);
9283 %}
9284 ins_pipe( pipe_slow );
9285 %}
9287 instruct cmpD3_reg_reg(mRegI dst, regD src1, regD src2) %{
9288 match(Set dst (CmpD3 src1 src2));
9289 ins_cost(1000);
9290 format %{ "cmpD3 $dst, $src1, $src2 @ cmpD3_reg_reg" %}
9291 ins_encode %{
9292 FloatRegister src1 = as_FloatRegister($src1$$reg);
9293 FloatRegister src2 = as_FloatRegister($src2$$reg);
9294 Register dst = as_Register($dst$$reg);
9296 Label Done;
9298 __ c_ult_d(src1, src2);
9299 __ bc1t(Done);
9300 __ delayed()->daddiu(dst, R0, -1);
9302 __ c_eq_d(src1, src2);
9303 __ move(dst, 1);
9304 __ movt(dst, R0);
9306 __ bind(Done);
9307 %}
9308 ins_pipe( pipe_slow );
9309 %}
9311 instruct clear_array(mRegL cnt, mRegP base, Universe dummy) %{
9312 match(Set dummy (ClearArray cnt base));
9313 format %{ "CLEAR_ARRAY base = $base, cnt = $cnt # Clear doublewords" %}
9314 ins_encode %{
9315 //Assume cnt is the number of bytes in an array to be cleared,
9316 //and base points to the starting address of the array.
9317 Register base = $base$$Register;
9318 Register num = $cnt$$Register;
9319 Label Loop, done;
9321 /* 2012/9/21 Jin: according to X86, $cnt is caculated by doublewords(8 bytes) */
9322 __ move(T9, num); /* T9 = words */
9323 __ beq(T9, R0, done);
9324 __ nop();
9325 __ move(AT, base);
9327 __ bind(Loop);
9328 __ sd(R0, Address(AT, 0));
9329 __ daddi(AT, AT, wordSize);
9330 __ daddi(T9, T9, -1);
9331 __ bne(T9, R0, Loop);
9332 __ delayed()->nop();
9333 __ bind(done);
9334 %}
9335 ins_pipe( pipe_slow );
9336 %}
9338 instruct string_compare(a4_RegP str1, mA5RegI cnt1, a6_RegP str2, mA7RegI cnt2, no_Ax_mRegI result) %{
9339 match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
9340 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2);
9342 format %{ "String Compare $str1[len: $cnt1], $str2[len: $cnt2] -> $result @ string_compare" %}
9343 ins_encode %{
9344 // Get the first character position in both strings
9345 // [8] char array, [12] offset, [16] count
9346 Register str1 = $str1$$Register;
9347 Register str2 = $str2$$Register;
9348 Register cnt1 = $cnt1$$Register;
9349 Register cnt2 = $cnt2$$Register;
9350 Register result = $result$$Register;
9352 Label L, Loop, haveResult, done;
9354 // compute the and difference of lengths (in result)
9355 __ subu(result, cnt1, cnt2); // result holds the difference of two lengths
9357 // compute the shorter length (in cnt1)
9358 __ slt(AT, cnt2, cnt1);
9359 __ movn(cnt1, cnt2, AT);
9361 // Now the shorter length is in cnt1 and cnt2 can be used as a tmp register
9362 __ bind(Loop); // Loop begin
9363 __ beq(cnt1, R0, done);
9364 __ delayed()->lhu(AT, str1, 0);;
9366 // compare current character
9367 __ lhu(cnt2, str2, 0);
9368 __ bne(AT, cnt2, haveResult);
9369 __ delayed()->addi(str1, str1, 2);
9370 __ addi(str2, str2, 2);
9371 __ b(Loop);
9372 __ delayed()->addi(cnt1, cnt1, -1); // Loop end
9374 __ bind(haveResult);
9375 __ subu(result, AT, cnt2);
9377 __ bind(done);
9378 %}
9380 ins_pipe( pipe_slow );
9381 %}
9383 // intrinsic optimization
9384 instruct string_equals(a4_RegP str1, a5_RegP str2, mA6RegI cnt, mA7RegI temp, no_Ax_mRegI result) %{
9385 match(Set result (StrEquals (Binary str1 str2) cnt));
9386 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL temp);
9388 format %{ "String Equal $str1, $str2, len:$cnt tmp:$temp -> $result @ string_equals" %}
9389 ins_encode %{
9390 // Get the first character position in both strings
9391 // [8] char array, [12] offset, [16] count
9392 Register str1 = $str1$$Register;
9393 Register str2 = $str2$$Register;
9394 Register cnt = $cnt$$Register;
9395 Register tmp = $temp$$Register;
9396 Register result = $result$$Register;
9398 Label Loop, done;
9401 __ beq(str1, str2, done); // same char[] ?
9402 __ daddiu(result, R0, 1);
9404 __ bind(Loop); // Loop begin
9405 __ beq(cnt, R0, done);
9406 __ daddiu(result, R0, 1); // count == 0
9408 // compare current character
9409 __ lhu(AT, str1, 0);;
9410 __ lhu(tmp, str2, 0);
9411 __ bne(AT, tmp, done);
9412 __ delayed()->daddi(result, R0, 0);
9413 __ addi(str1, str1, 2);
9414 __ addi(str2, str2, 2);
9415 __ b(Loop);
9416 __ delayed()->addi(cnt, cnt, -1); // Loop end
9418 __ bind(done);
9419 %}
9421 ins_pipe( pipe_slow );
9422 %}
9424 //----------Arithmetic Instructions-------------------------------------------
9425 //----------Addition Instructions---------------------------------------------
9426 instruct addI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9427 match(Set dst (AddI src1 src2));
9429 format %{ "add $dst, $src1, $src2 #@addI_Reg_Reg" %}
9430 ins_encode %{
9431 Register dst = $dst$$Register;
9432 Register src1 = $src1$$Register;
9433 Register src2 = $src2$$Register;
9434 __ addu32(dst, src1, src2);
9435 %}
9436 ins_pipe( ialu_regI_regI );
9437 %}
9439 instruct addI_Reg_imm(mRegI dst, mRegI src1, immI src2) %{
9440 match(Set dst (AddI src1 src2));
9442 format %{ "add $dst, $src1, $src2 #@addI_Reg_imm" %}
9443 ins_encode %{
9444 Register dst = $dst$$Register;
9445 Register src1 = $src1$$Register;
9446 int imm = $src2$$constant;
9448 if(Assembler::is_simm16(imm)) {
9449 __ addiu32(dst, src1, imm);
9450 } else {
9451 __ move(AT, imm);
9452 __ addu32(dst, src1, AT);
9453 }
9454 %}
9455 ins_pipe( ialu_regI_regI );
9456 %}
9458 instruct addP_reg_reg(mRegP dst, mRegP src1, mRegL src2) %{
9459 match(Set dst (AddP src1 src2));
9461 format %{ "dadd $dst, $src1, $src2 #@addP_reg_reg" %}
9463 ins_encode %{
9464 Register dst = $dst$$Register;
9465 Register src1 = $src1$$Register;
9466 Register src2 = $src2$$Register;
9467 __ daddu(dst, src1, src2);
9468 %}
9470 ins_pipe( ialu_regI_regI );
9471 %}
9473 instruct addP_reg_reg_convI2L(mRegP dst, mRegP src1, mRegI src2) %{
9474 match(Set dst (AddP src1 (ConvI2L src2)));
9476 format %{ "dadd $dst, $src1, $src2 #@addP_reg_reg_convI2L" %}
9478 ins_encode %{
9479 Register dst = $dst$$Register;
9480 Register src1 = $src1$$Register;
9481 Register src2 = $src2$$Register;
9482 __ daddu(dst, src1, src2);
9483 %}
9485 ins_pipe( ialu_regI_regI );
9486 %}
9488 instruct addP_reg_imm(mRegP dst, mRegP src1, immL src2) %{
9489 match(Set dst (AddP src1 src2));
9491 format %{ "daddi $dst, $src1, $src2 #@addP_reg_imm" %}
9492 ins_encode %{
9493 Register src1 = $src1$$Register;
9494 long src2 = $src2$$constant;
9495 Register dst = $dst$$Register;
9497 if(Assembler::is_simm16(src2)) {
9498 __ daddiu(dst, src1, src2);
9499 } else {
9500 __ set64(AT, src2);
9501 __ daddu(dst, src1, AT);
9502 }
9503 %}
9504 ins_pipe( ialu_regI_imm16 );
9505 %}
9507 // Add Long Register with Register
9508 instruct addL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
9509 match(Set dst (AddL src1 src2));
9510 ins_cost(200);
9511 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_Reg\t" %}
9513 ins_encode %{
9514 Register dst_reg = as_Register($dst$$reg);
9515 Register src1_reg = as_Register($src1$$reg);
9516 Register src2_reg = as_Register($src2$$reg);
9518 __ daddu(dst_reg, src1_reg, src2_reg);
9519 %}
9521 ins_pipe( ialu_regL_regL );
9522 %}
9524 instruct addL_Reg_imm(mRegL dst, mRegL src1, immL16 src2)
9525 %{
9526 match(Set dst (AddL src1 src2));
9528 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_imm " %}
9529 ins_encode %{
9530 Register dst_reg = as_Register($dst$$reg);
9531 Register src1_reg = as_Register($src1$$reg);
9532 int src2_imm = $src2$$constant;
9534 __ daddiu(dst_reg, src1_reg, src2_imm);
9535 %}
9537 ins_pipe( ialu_regL_regL );
9538 %}
9540 instruct addL_RegI2L_imm(mRegL dst, mRegI src1, immL16 src2)
9541 %{
9542 match(Set dst (AddL (ConvI2L src1) src2));
9544 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_imm " %}
9545 ins_encode %{
9546 Register dst_reg = as_Register($dst$$reg);
9547 Register src1_reg = as_Register($src1$$reg);
9548 int src2_imm = $src2$$constant;
9550 __ daddiu(dst_reg, src1_reg, src2_imm);
9551 %}
9553 ins_pipe( ialu_regL_regL );
9554 %}
9556 instruct addL_RegI2L_Reg(mRegL dst, mRegI src1, mRegL src2) %{
9557 match(Set dst (AddL (ConvI2L src1) src2));
9558 ins_cost(200);
9559 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_Reg\t" %}
9561 ins_encode %{
9562 Register dst_reg = as_Register($dst$$reg);
9563 Register src1_reg = as_Register($src1$$reg);
9564 Register src2_reg = as_Register($src2$$reg);
9566 __ daddu(dst_reg, src1_reg, src2_reg);
9567 %}
9569 ins_pipe( ialu_regL_regL );
9570 %}
9572 instruct addL_RegI2L_RegI2L(mRegL dst, mRegI src1, mRegI src2) %{
9573 match(Set dst (AddL (ConvI2L src1) (ConvI2L src2)));
9574 ins_cost(200);
9575 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_RegI2L\t" %}
9577 ins_encode %{
9578 Register dst_reg = as_Register($dst$$reg);
9579 Register src1_reg = as_Register($src1$$reg);
9580 Register src2_reg = as_Register($src2$$reg);
9582 __ daddu(dst_reg, src1_reg, src2_reg);
9583 %}
9585 ins_pipe( ialu_regL_regL );
9586 %}
9588 instruct addL_Reg_RegI2L(mRegL dst, mRegL src1, mRegI src2) %{
9589 match(Set dst (AddL src1 (ConvI2L src2)));
9590 ins_cost(200);
9591 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_RegI2L\t" %}
9593 ins_encode %{
9594 Register dst_reg = as_Register($dst$$reg);
9595 Register src1_reg = as_Register($src1$$reg);
9596 Register src2_reg = as_Register($src2$$reg);
9598 __ daddu(dst_reg, src1_reg, src2_reg);
9599 %}
9601 ins_pipe( ialu_regL_regL );
9602 %}
9604 //----------Subtraction Instructions-------------------------------------------
9605 // Integer Subtraction Instructions
9606 instruct subI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9607 match(Set dst (SubI src1 src2));
9608 ins_cost(100);
9610 format %{ "sub $dst, $src1, $src2 #@subI_Reg_Reg" %}
9611 ins_encode %{
9612 Register dst = $dst$$Register;
9613 Register src1 = $src1$$Register;
9614 Register src2 = $src2$$Register;
9615 __ subu32(dst, src1, src2);
9616 %}
9617 ins_pipe( ialu_regI_regI );
9618 %}
9620 instruct subI_Reg_immI16_sub(mRegI dst, mRegI src1, immI16_sub src2) %{
9621 match(Set dst (SubI src1 src2));
9622 ins_cost(80);
9624 format %{ "sub $dst, $src1, $src2 #@subI_Reg_immI16_sub" %}
9625 ins_encode %{
9626 Register dst = $dst$$Register;
9627 Register src1 = $src1$$Register;
9628 __ addiu32(dst, src1, -1 * $src2$$constant);
9629 %}
9630 ins_pipe( ialu_regI_regI );
9631 %}
9633 instruct negI_Reg(mRegI dst, immI0 zero, mRegI src) %{
9634 match(Set dst (SubI zero src));
9635 ins_cost(80);
9637 format %{ "neg $dst, $src #@negI_Reg" %}
9638 ins_encode %{
9639 Register dst = $dst$$Register;
9640 Register src = $src$$Register;
9641 __ subu32(dst, R0, src);
9642 %}
9643 ins_pipe( ialu_regI_regI );
9644 %}
9646 instruct negL_Reg(mRegL dst, immL0 zero, mRegL src) %{
9647 match(Set dst (SubL zero src));
9648 ins_cost(80);
9650 format %{ "neg $dst, $src #@negL_Reg" %}
9651 ins_encode %{
9652 Register dst = $dst$$Register;
9653 Register src = $src$$Register;
9654 __ subu(dst, R0, src);
9655 %}
9656 ins_pipe( ialu_regI_regI );
9657 %}
9659 instruct subL_Reg_immL16_sub(mRegL dst, mRegL src1, immL16_sub src2) %{
9660 match(Set dst (SubL src1 src2));
9661 ins_cost(80);
9663 format %{ "sub $dst, $src1, $src2 #@subL_Reg_immL16_sub" %}
9664 ins_encode %{
9665 Register dst = $dst$$Register;
9666 Register src1 = $src1$$Register;
9667 __ daddiu(dst, src1, -1 * $src2$$constant);
9668 %}
9669 ins_pipe( ialu_regI_regI );
9670 %}
9672 // Subtract Long Register with Register.
9673 instruct subL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
9674 match(Set dst (SubL src1 src2));
9675 ins_cost(100);
9676 format %{ "SubL $dst, $src1, $src2 @ subL_Reg_Reg" %}
9677 ins_encode %{
9678 Register dst = as_Register($dst$$reg);
9679 Register src1 = as_Register($src1$$reg);
9680 Register src2 = as_Register($src2$$reg);
9682 __ subu(dst, src1, src2);
9683 %}
9684 ins_pipe( ialu_regL_regL );
9685 %}
9687 instruct subL_Reg_RegI2L(mRegL dst, mRegL src1, mRegI src2) %{
9688 match(Set dst (SubL src1 (ConvI2L src2)));
9689 ins_cost(100);
9690 format %{ "SubL $dst, $src1, $src2 @ subL_Reg_RegI2L" %}
9691 ins_encode %{
9692 Register dst = as_Register($dst$$reg);
9693 Register src1 = as_Register($src1$$reg);
9694 Register src2 = as_Register($src2$$reg);
9696 __ subu(dst, src1, src2);
9697 %}
9698 ins_pipe( ialu_regL_regL );
9699 %}
9701 instruct subL_RegI2L_Reg(mRegL dst, mRegI src1, mRegL src2) %{
9702 match(Set dst (SubL (ConvI2L src1) src2));
9703 ins_cost(200);
9704 format %{ "SubL $dst, $src1, $src2 @ subL_RegI2L_Reg" %}
9705 ins_encode %{
9706 Register dst = as_Register($dst$$reg);
9707 Register src1 = as_Register($src1$$reg);
9708 Register src2 = as_Register($src2$$reg);
9710 __ subu(dst, src1, src2);
9711 %}
9712 ins_pipe( ialu_regL_regL );
9713 %}
9715 instruct subL_RegI2L_RegI2L(mRegL dst, mRegI src1, mRegI src2) %{
9716 match(Set dst (SubL (ConvI2L src1) (ConvI2L src2)));
9717 ins_cost(200);
9718 format %{ "SubL $dst, $src1, $src2 @ subL_RegI2L_RegI2L" %}
9719 ins_encode %{
9720 Register dst = as_Register($dst$$reg);
9721 Register src1 = as_Register($src1$$reg);
9722 Register src2 = as_Register($src2$$reg);
9724 __ subu(dst, src1, src2);
9725 %}
9726 ins_pipe( ialu_regL_regL );
9727 %}
9729 // Integer MOD with Register
9730 instruct modI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9731 match(Set dst (ModI src1 src2));
9732 ins_cost(300);
9733 format %{ "modi $dst, $src1, $src2 @ modI_Reg_Reg" %}
9734 ins_encode %{
9735 Register dst = $dst$$Register;
9736 Register src1 = $src1$$Register;
9737 Register src2 = $src2$$Register;
9739 //if (UseLoongsonISA) {
9740 if (0) {
9741 // 2016.08.10
9742 // Experiments show that gsmod is slower that div+mfhi.
9743 // So I just disable it here.
9744 __ gsmod(dst, src1, src2);
9745 } else {
9746 __ div(src1, src2);
9747 __ mfhi(dst);
9748 }
9749 %}
9751 //ins_pipe( ialu_mod );
9752 ins_pipe( ialu_regI_regI );
9753 %}
9755 instruct modL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
9756 match(Set dst (ModL src1 src2));
9757 format %{ "modL $dst, $src1, $src2 @modL_reg_reg" %}
9759 ins_encode %{
9760 Register dst = as_Register($dst$$reg);
9761 Register op1 = as_Register($src1$$reg);
9762 Register op2 = as_Register($src2$$reg);
9764 if (UseLoongsonISA) {
9765 __ gsdmod(dst, op1, op2);
9766 } else {
9767 __ ddiv(op1, op2);
9768 __ mfhi(dst);
9769 }
9770 %}
9771 ins_pipe( pipe_slow );
9772 %}
9774 instruct mulI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9775 match(Set dst (MulI src1 src2));
9777 ins_cost(300);
9778 format %{ "mul $dst, $src1, $src2 @ mulI_Reg_Reg" %}
9779 ins_encode %{
9780 Register src1 = $src1$$Register;
9781 Register src2 = $src2$$Register;
9782 Register dst = $dst$$Register;
9784 __ mul(dst, src1, src2);
9785 %}
9786 ins_pipe( ialu_mult );
9787 %}
9789 instruct maddI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2, mRegI src3) %{
9790 match(Set dst (AddI (MulI src1 src2) src3));
9792 ins_cost(999);
9793 format %{ "madd $dst, $src1 * $src2 + $src3 #@maddI_Reg_Reg" %}
9794 ins_encode %{
9795 Register src1 = $src1$$Register;
9796 Register src2 = $src2$$Register;
9797 Register src3 = $src3$$Register;
9798 Register dst = $dst$$Register;
9800 __ mtlo(src3);
9801 __ madd(src1, src2);
9802 __ mflo(dst);
9803 %}
9804 ins_pipe( ialu_mult );
9805 %}
9807 instruct divI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9808 match(Set dst (DivI src1 src2));
9810 ins_cost(300);
9811 format %{ "div $dst, $src1, $src2 @ divI_Reg_Reg" %}
9812 ins_encode %{
9813 Register src1 = $src1$$Register;
9814 Register src2 = $src2$$Register;
9815 Register dst = $dst$$Register;
9817 /* 2012/4/21 Jin: In MIPS, div does not cause exception.
9818 We must trap an exception manually. */
9819 __ teq(R0, src2, 0x7);
9821 if (UseLoongsonISA) {
9822 __ gsdiv(dst, src1, src2);
9823 } else {
9824 __ div(src1, src2);
9826 __ nop();
9827 __ nop();
9828 __ mflo(dst);
9829 }
9830 %}
9831 ins_pipe( ialu_mod );
9832 %}
9834 instruct divF_Reg_Reg(regF dst, regF src1, regF src2) %{
9835 match(Set dst (DivF src1 src2));
9837 ins_cost(300);
9838 format %{ "divF $dst, $src1, $src2 @ divF_Reg_Reg" %}
9839 ins_encode %{
9840 FloatRegister src1 = $src1$$FloatRegister;
9841 FloatRegister src2 = $src2$$FloatRegister;
9842 FloatRegister dst = $dst$$FloatRegister;
9844 /* Here do we need to trap an exception manually ? */
9845 __ div_s(dst, src1, src2);
9846 %}
9847 ins_pipe( pipe_slow );
9848 %}
9850 instruct divD_Reg_Reg(regD dst, regD src1, regD src2) %{
9851 match(Set dst (DivD src1 src2));
9853 ins_cost(300);
9854 format %{ "divD $dst, $src1, $src2 @ divD_Reg_Reg" %}
9855 ins_encode %{
9856 FloatRegister src1 = $src1$$FloatRegister;
9857 FloatRegister src2 = $src2$$FloatRegister;
9858 FloatRegister dst = $dst$$FloatRegister;
9860 /* Here do we need to trap an exception manually ? */
9861 __ div_d(dst, src1, src2);
9862 %}
9863 ins_pipe( pipe_slow );
9864 %}
9866 instruct mulL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
9867 match(Set dst (MulL src1 src2));
9868 format %{ "mulL $dst, $src1, $src2 @mulL_reg_reg" %}
9869 ins_encode %{
9870 Register dst = as_Register($dst$$reg);
9871 Register op1 = as_Register($src1$$reg);
9872 Register op2 = as_Register($src2$$reg);
9874 if (UseLoongsonISA) {
9875 __ gsdmult(dst, op1, op2);
9876 } else {
9877 __ dmult(op1, op2);
9878 __ mflo(dst);
9879 }
9880 %}
9881 ins_pipe( pipe_slow );
9882 %}
9884 instruct mulL_reg_regI2L(mRegL dst, mRegL src1, mRegI src2) %{
9885 match(Set dst (MulL src1 (ConvI2L src2)));
9886 format %{ "mulL $dst, $src1, $src2 @mulL_reg_regI2L" %}
9887 ins_encode %{
9888 Register dst = as_Register($dst$$reg);
9889 Register op1 = as_Register($src1$$reg);
9890 Register op2 = as_Register($src2$$reg);
9892 if (UseLoongsonISA) {
9893 __ gsdmult(dst, op1, op2);
9894 } else {
9895 __ dmult(op1, op2);
9896 __ mflo(dst);
9897 }
9898 %}
9899 ins_pipe( pipe_slow );
9900 %}
9902 instruct divL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
9903 match(Set dst (DivL src1 src2));
9904 format %{ "divL $dst, $src1, $src2 @divL_reg_reg" %}
9906 ins_encode %{
9907 Register dst = as_Register($dst$$reg);
9908 Register op1 = as_Register($src1$$reg);
9909 Register op2 = as_Register($src2$$reg);
9911 if (UseLoongsonISA) {
9912 __ gsddiv(dst, op1, op2);
9913 } else {
9914 __ ddiv(op1, op2);
9915 __ mflo(dst);
9916 }
9917 %}
9918 ins_pipe( pipe_slow );
9919 %}
9921 instruct addF_reg_reg(regF dst, regF src1, regF src2) %{
9922 match(Set dst (AddF src1 src2));
9923 format %{ "AddF $dst, $src1, $src2 @addF_reg_reg" %}
9924 ins_encode %{
9925 FloatRegister src1 = as_FloatRegister($src1$$reg);
9926 FloatRegister src2 = as_FloatRegister($src2$$reg);
9927 FloatRegister dst = as_FloatRegister($dst$$reg);
9929 __ add_s(dst, src1, src2);
9930 %}
9931 ins_pipe( fpu_regF_regF );
9932 %}
9934 instruct subF_reg_reg(regF dst, regF src1, regF src2) %{
9935 match(Set dst (SubF src1 src2));
9936 format %{ "SubF $dst, $src1, $src2 @subF_reg_reg" %}
9937 ins_encode %{
9938 FloatRegister src1 = as_FloatRegister($src1$$reg);
9939 FloatRegister src2 = as_FloatRegister($src2$$reg);
9940 FloatRegister dst = as_FloatRegister($dst$$reg);
9942 __ sub_s(dst, src1, src2);
9943 %}
9944 ins_pipe( fpu_regF_regF );
9945 %}
9946 instruct addD_reg_reg(regD dst, regD src1, regD src2) %{
9947 match(Set dst (AddD src1 src2));
9948 format %{ "AddD $dst, $src1, $src2 @addD_reg_reg" %}
9949 ins_encode %{
9950 FloatRegister src1 = as_FloatRegister($src1$$reg);
9951 FloatRegister src2 = as_FloatRegister($src2$$reg);
9952 FloatRegister dst = as_FloatRegister($dst$$reg);
9954 __ add_d(dst, src1, src2);
9955 %}
9956 ins_pipe( fpu_regF_regF );
9957 %}
9959 instruct subD_reg_reg(regD dst, regD src1, regD src2) %{
9960 match(Set dst (SubD src1 src2));
9961 format %{ "SubD $dst, $src1, $src2 @subD_reg_reg" %}
9962 ins_encode %{
9963 FloatRegister src1 = as_FloatRegister($src1$$reg);
9964 FloatRegister src2 = as_FloatRegister($src2$$reg);
9965 FloatRegister dst = as_FloatRegister($dst$$reg);
9967 __ sub_d(dst, src1, src2);
9968 %}
9969 ins_pipe( fpu_regF_regF );
9970 %}
9972 instruct negF_reg(regF dst, regF src) %{
9973 match(Set dst (NegF src));
9974 format %{ "negF $dst, $src @negF_reg" %}
9975 ins_encode %{
9976 FloatRegister src = as_FloatRegister($src$$reg);
9977 FloatRegister dst = as_FloatRegister($dst$$reg);
9979 __ neg_s(dst, src);
9980 %}
9981 ins_pipe( fpu_regF_regF );
9982 %}
9984 instruct negD_reg(regD dst, regD src) %{
9985 match(Set dst (NegD src));
9986 format %{ "negD $dst, $src @negD_reg" %}
9987 ins_encode %{
9988 FloatRegister src = as_FloatRegister($src$$reg);
9989 FloatRegister dst = as_FloatRegister($dst$$reg);
9991 __ neg_d(dst, src);
9992 %}
9993 ins_pipe( fpu_regF_regF );
9994 %}
9997 instruct mulF_reg_reg(regF dst, regF src1, regF src2) %{
9998 match(Set dst (MulF src1 src2));
9999 format %{ "MULF $dst, $src1, $src2 @mulF_reg_reg" %}
10000 ins_encode %{
10001 FloatRegister src1 = $src1$$FloatRegister;
10002 FloatRegister src2 = $src2$$FloatRegister;
10003 FloatRegister dst = $dst$$FloatRegister;
10005 __ mul_s(dst, src1, src2);
10006 %}
10007 ins_pipe( fpu_regF_regF );
10008 %}
10010 instruct maddF_reg_reg(regF dst, regF src1, regF src2, regF src3) %{
10011 match(Set dst (AddF (MulF src1 src2) src3));
10012 // For compatibility reason (e.g. on the Loongson platform), disable this guy.
10013 ins_cost(44444);
10014 format %{ "maddF $dst, $src1, $src2, $src3 @maddF_reg_reg" %}
10015 ins_encode %{
10016 FloatRegister src1 = $src1$$FloatRegister;
10017 FloatRegister src2 = $src2$$FloatRegister;
10018 FloatRegister src3 = $src3$$FloatRegister;
10019 FloatRegister dst = $dst$$FloatRegister;
10021 __ madd_s(dst, src1, src2, src3);
10022 %}
10023 ins_pipe( fpu_regF_regF );
10024 %}
10026 // Mul two double precision floating piont number
10027 instruct mulD_reg_reg(regD dst, regD src1, regD src2) %{
10028 match(Set dst (MulD src1 src2));
10029 format %{ "MULD $dst, $src1, $src2 @mulD_reg_reg" %}
10030 ins_encode %{
10031 FloatRegister src1 = $src1$$FloatRegister;
10032 FloatRegister src2 = $src2$$FloatRegister;
10033 FloatRegister dst = $dst$$FloatRegister;
10035 __ mul_d(dst, src1, src2);
10036 %}
10037 ins_pipe( fpu_regF_regF );
10038 %}
10040 instruct maddD_reg_reg(regD dst, regD src1, regD src2, regD src3) %{
10041 match(Set dst (AddD (MulD src1 src2) src3));
10042 // For compatibility reason (e.g. on the Loongson platform), disable this guy.
10043 ins_cost(44444);
10044 format %{ "maddD $dst, $src1, $src2, $src3 @maddD_reg_reg" %}
10045 ins_encode %{
10046 FloatRegister src1 = $src1$$FloatRegister;
10047 FloatRegister src2 = $src2$$FloatRegister;
10048 FloatRegister src3 = $src3$$FloatRegister;
10049 FloatRegister dst = $dst$$FloatRegister;
10051 __ madd_d(dst, src1, src2, src3);
10052 %}
10053 ins_pipe( fpu_regF_regF );
10054 %}
10056 instruct absF_reg(regF dst, regF src) %{
10057 match(Set dst (AbsF src));
10058 ins_cost(100);
10059 format %{ "absF $dst, $src @absF_reg" %}
10060 ins_encode %{
10061 FloatRegister src = as_FloatRegister($src$$reg);
10062 FloatRegister dst = as_FloatRegister($dst$$reg);
10064 __ abs_s(dst, src);
10065 %}
10066 ins_pipe( fpu_regF_regF );
10067 %}
10070 // intrinsics for math_native.
10071 // AbsD SqrtD CosD SinD TanD LogD Log10D
10073 instruct absD_reg(regD dst, regD src) %{
10074 match(Set dst (AbsD src));
10075 ins_cost(100);
10076 format %{ "absD $dst, $src @absD_reg" %}
10077 ins_encode %{
10078 FloatRegister src = as_FloatRegister($src$$reg);
10079 FloatRegister dst = as_FloatRegister($dst$$reg);
10081 __ abs_d(dst, src);
10082 %}
10083 ins_pipe( fpu_regF_regF );
10084 %}
10086 instruct sqrtD_reg(regD dst, regD src) %{
10087 match(Set dst (SqrtD src));
10088 ins_cost(100);
10089 format %{ "SqrtD $dst, $src @sqrtD_reg" %}
10090 ins_encode %{
10091 FloatRegister src = as_FloatRegister($src$$reg);
10092 FloatRegister dst = as_FloatRegister($dst$$reg);
10094 __ sqrt_d(dst, src);
10095 %}
10096 ins_pipe( fpu_regF_regF );
10097 %}
10099 instruct sqrtF_reg(regF dst, regF src) %{
10100 match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
10101 ins_cost(100);
10102 format %{ "SqrtF $dst, $src @sqrtF_reg" %}
10103 ins_encode %{
10104 FloatRegister src = as_FloatRegister($src$$reg);
10105 FloatRegister dst = as_FloatRegister($dst$$reg);
10107 __ sqrt_s(dst, src);
10108 %}
10109 ins_pipe( fpu_regF_regF );
10110 %}
10111 //----------------------------------Logical Instructions----------------------
10112 //__________________________________Integer Logical Instructions-------------
10114 //And Instuctions
10115 // And Register with Immediate
10116 instruct andI_Reg_immI(mRegI dst, mRegI src1, immI src2) %{
10117 match(Set dst (AndI src1 src2));
10119 format %{ "and $dst, $src1, $src2 #@andI_Reg_immI" %}
10120 ins_encode %{
10121 Register dst = $dst$$Register;
10122 Register src = $src1$$Register;
10123 int val = $src2$$constant;
10125 __ move(AT, val);
10126 __ andr(dst, src, AT);
10127 %}
10128 ins_pipe( ialu_regI_regI );
10129 %}
10131 instruct andI_Reg_imm_0_65535(mRegI dst, mRegI src1, immI_0_65535 src2) %{
10132 match(Set dst (AndI src1 src2));
10133 ins_cost(60);
10135 format %{ "and $dst, $src1, $src2 #@andI_Reg_imm_0_65535" %}
10136 ins_encode %{
10137 Register dst = $dst$$Register;
10138 Register src = $src1$$Register;
10139 int val = $src2$$constant;
10141 __ andi(dst, src, val);
10142 %}
10143 ins_pipe( ialu_regI_regI );
10144 %}
10146 instruct andI_Reg_immI_nonneg_mask(mRegI dst, mRegI src1, immI_nonneg_mask mask) %{
10147 match(Set dst (AndI src1 mask));
10148 ins_cost(60);
10150 format %{ "and $dst, $src1, $mask #@andI_Reg_immI_nonneg_mask" %}
10151 ins_encode %{
10152 Register dst = $dst$$Register;
10153 Register src = $src1$$Register;
10154 int size = Assembler::is_int_mask($mask$$constant);
10156 __ ext(dst, src, 0, size);
10157 %}
10158 ins_pipe( ialu_regI_regI );
10159 %}
10161 instruct andL_Reg_immL_nonneg_mask(mRegL dst, mRegL src1, immL_nonneg_mask mask) %{
10162 match(Set dst (AndL src1 mask));
10163 ins_cost(60);
10165 format %{ "and $dst, $src1, $mask #@andL_Reg_immL_nonneg_mask" %}
10166 ins_encode %{
10167 Register dst = $dst$$Register;
10168 Register src = $src1$$Register;
10169 int size = Assembler::is_jlong_mask($mask$$constant);
10171 __ dext(dst, src, 0, size);
10172 %}
10173 ins_pipe( ialu_regI_regI );
10174 %}
10176 instruct xorI_Reg_imm_0_65535(mRegI dst, mRegI src1, immI_0_65535 src2) %{
10177 match(Set dst (XorI src1 src2));
10178 ins_cost(60);
10180 format %{ "xori $dst, $src1, $src2 #@xorI_Reg_imm_0_65535" %}
10181 ins_encode %{
10182 Register dst = $dst$$Register;
10183 Register src = $src1$$Register;
10184 int val = $src2$$constant;
10186 __ xori(dst, src, val);
10187 %}
10188 ins_pipe( ialu_regI_regI );
10189 %}
10191 instruct xorI_Reg_immI_M1(mRegI dst, mRegI src1, immI_M1 M1) %{
10192 match(Set dst (XorI src1 M1));
10193 predicate(UseLoongsonISA);
10194 ins_cost(60);
10196 format %{ "xor $dst, $src1, $M1 #@xorI_Reg_immI_M1" %}
10197 ins_encode %{
10198 Register dst = $dst$$Register;
10199 Register src = $src1$$Register;
10201 __ gsorn(dst, R0, src);
10202 %}
10203 ins_pipe( ialu_regI_regI );
10204 %}
10206 instruct xorL2I_Reg_immI_M1(mRegI dst, mRegL src1, immI_M1 M1) %{
10207 match(Set dst (XorI (ConvL2I src1) M1));
10208 predicate(UseLoongsonISA);
10209 ins_cost(60);
10211 format %{ "xor $dst, $src1, $M1 #@xorL2I_Reg_immI_M1" %}
10212 ins_encode %{
10213 Register dst = $dst$$Register;
10214 Register src = $src1$$Register;
10216 __ gsorn(dst, R0, src);
10217 %}
10218 ins_pipe( ialu_regI_regI );
10219 %}
10221 instruct xorL_Reg_imm_0_65535(mRegL dst, mRegL src1, immL_0_65535 src2) %{
10222 match(Set dst (XorL src1 src2));
10223 ins_cost(60);
10225 format %{ "xori $dst, $src1, $src2 #@xorL_Reg_imm_0_65535" %}
10226 ins_encode %{
10227 Register dst = $dst$$Register;
10228 Register src = $src1$$Register;
10229 int val = $src2$$constant;
10231 __ xori(dst, src, val);
10232 %}
10233 ins_pipe( ialu_regI_regI );
10234 %}
10236 /*
10237 instruct xorL_Reg_immL_M1(mRegL dst, mRegL src1, immL_M1 M1) %{
10238 match(Set dst (XorL src1 M1));
10239 predicate(UseLoongsonISA);
10240 ins_cost(60);
10242 format %{ "xor $dst, $src1, $M1 #@xorL_Reg_immL_M1" %}
10243 ins_encode %{
10244 Register dst = $dst$$Register;
10245 Register src = $src1$$Register;
10247 __ gsorn(dst, R0, src);
10248 %}
10249 ins_pipe( ialu_regI_regI );
10250 %}
10251 */
10253 instruct lbu_and_lmask(mRegI dst, memory mem, immI_255 mask) %{
10254 match(Set dst (AndI mask (LoadB mem)));
10255 ins_cost(60);
10257 format %{ "lhu $dst, $mem #@lbu_and_lmask" %}
10258 ins_encode(load_UB_enc(dst, mem));
10259 ins_pipe( ialu_loadI );
10260 %}
10262 instruct lbu_and_rmask(mRegI dst, memory mem, immI_255 mask) %{
10263 match(Set dst (AndI (LoadB mem) mask));
10264 ins_cost(60);
10266 format %{ "lhu $dst, $mem #@lbu_and_rmask" %}
10267 ins_encode(load_UB_enc(dst, mem));
10268 ins_pipe( ialu_loadI );
10269 %}
10271 instruct andI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10272 match(Set dst (AndI src1 src2));
10274 format %{ "and $dst, $src1, $src2 #@andI_Reg_Reg" %}
10275 ins_encode %{
10276 Register dst = $dst$$Register;
10277 Register src1 = $src1$$Register;
10278 Register src2 = $src2$$Register;
10279 __ andr(dst, src1, src2);
10280 %}
10281 ins_pipe( ialu_regI_regI );
10282 %}
10284 instruct andnI_Reg_nReg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10285 match(Set dst (AndI src1 (XorI src2 M1)));
10286 predicate(UseLoongsonISA);
10288 format %{ "andn $dst, $src1, $src2 #@andnI_Reg_nReg" %}
10289 ins_encode %{
10290 Register dst = $dst$$Register;
10291 Register src1 = $src1$$Register;
10292 Register src2 = $src2$$Register;
10294 __ gsandn(dst, src1, src2);
10295 %}
10296 ins_pipe( ialu_regI_regI );
10297 %}
10299 instruct ornI_Reg_nReg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10300 match(Set dst (OrI src1 (XorI src2 M1)));
10301 predicate(UseLoongsonISA);
10303 format %{ "orn $dst, $src1, $src2 #@ornI_Reg_nReg" %}
10304 ins_encode %{
10305 Register dst = $dst$$Register;
10306 Register src1 = $src1$$Register;
10307 Register src2 = $src2$$Register;
10309 __ gsorn(dst, src1, src2);
10310 %}
10311 ins_pipe( ialu_regI_regI );
10312 %}
10314 instruct andnI_nReg_Reg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10315 match(Set dst (AndI (XorI src1 M1) src2));
10316 predicate(UseLoongsonISA);
10318 format %{ "andn $dst, $src2, $src1 #@andnI_nReg_Reg" %}
10319 ins_encode %{
10320 Register dst = $dst$$Register;
10321 Register src1 = $src1$$Register;
10322 Register src2 = $src2$$Register;
10324 __ gsandn(dst, src2, src1);
10325 %}
10326 ins_pipe( ialu_regI_regI );
10327 %}
10329 instruct ornI_nReg_Reg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10330 match(Set dst (OrI (XorI src1 M1) src2));
10331 predicate(UseLoongsonISA);
10333 format %{ "orn $dst, $src2, $src1 #@ornI_nReg_Reg" %}
10334 ins_encode %{
10335 Register dst = $dst$$Register;
10336 Register src1 = $src1$$Register;
10337 Register src2 = $src2$$Register;
10339 __ gsorn(dst, src2, src1);
10340 %}
10341 ins_pipe( ialu_regI_regI );
10342 %}
10344 // And Long Register with Register
10345 instruct andL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10346 match(Set dst (AndL src1 src2));
10347 format %{ "AND $dst, $src1, $src2 @ andL_Reg_Reg\n\t" %}
10348 ins_encode %{
10349 Register dst_reg = as_Register($dst$$reg);
10350 Register src1_reg = as_Register($src1$$reg);
10351 Register src2_reg = as_Register($src2$$reg);
10353 __ andr(dst_reg, src1_reg, src2_reg);
10354 %}
10355 ins_pipe( ialu_regL_regL );
10356 %}
10358 instruct andL_Reg_Reg_convI2L(mRegL dst, mRegL src1, mRegI src2) %{
10359 match(Set dst (AndL src1 (ConvI2L src2)));
10360 format %{ "AND $dst, $src1, $src2 @ andL_Reg_Reg_convI2L\n\t" %}
10361 ins_encode %{
10362 Register dst_reg = as_Register($dst$$reg);
10363 Register src1_reg = as_Register($src1$$reg);
10364 Register src2_reg = as_Register($src2$$reg);
10366 __ andr(dst_reg, src1_reg, src2_reg);
10367 %}
10368 ins_pipe( ialu_regL_regL );
10369 %}
10371 instruct andL_Reg_imm_0_65535(mRegL dst, mRegL src1, immL_0_65535 src2) %{
10372 match(Set dst (AndL src1 src2));
10373 ins_cost(60);
10375 format %{ "and $dst, $src1, $src2 #@andL_Reg_imm_0_65535" %}
10376 ins_encode %{
10377 Register dst = $dst$$Register;
10378 Register src = $src1$$Register;
10379 long val = $src2$$constant;
10381 __ andi(dst, src, val);
10382 %}
10383 ins_pipe( ialu_regI_regI );
10384 %}
10386 instruct andL2I_Reg_imm_0_65535(mRegI dst, mRegL src1, immL_0_65535 src2) %{
10387 match(Set dst (ConvL2I (AndL src1 src2)));
10388 ins_cost(60);
10390 format %{ "and $dst, $src1, $src2 #@andL2I_Reg_imm_0_65535" %}
10391 ins_encode %{
10392 Register dst = $dst$$Register;
10393 Register src = $src1$$Register;
10394 long val = $src2$$constant;
10396 __ andi(dst, src, val);
10397 %}
10398 ins_pipe( ialu_regI_regI );
10399 %}
10401 /*
10402 instruct andnL_Reg_nReg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10403 match(Set dst (AndL src1 (XorL src2 M1)));
10404 predicate(UseLoongsonISA);
10406 format %{ "andn $dst, $src1, $src2 #@andnL_Reg_nReg" %}
10407 ins_encode %{
10408 Register dst = $dst$$Register;
10409 Register src1 = $src1$$Register;
10410 Register src2 = $src2$$Register;
10412 __ gsandn(dst, src1, src2);
10413 %}
10414 ins_pipe( ialu_regI_regI );
10415 %}
10416 */
10418 /*
10419 instruct ornL_Reg_nReg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10420 match(Set dst (OrL src1 (XorL src2 M1)));
10421 predicate(UseLoongsonISA);
10423 format %{ "orn $dst, $src1, $src2 #@ornL_Reg_nReg" %}
10424 ins_encode %{
10425 Register dst = $dst$$Register;
10426 Register src1 = $src1$$Register;
10427 Register src2 = $src2$$Register;
10429 __ gsorn(dst, src1, src2);
10430 %}
10431 ins_pipe( ialu_regI_regI );
10432 %}
10433 */
10435 /*
10436 instruct andnL_nReg_Reg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10437 match(Set dst (AndL (XorL src1 M1) src2));
10438 predicate(UseLoongsonISA);
10440 format %{ "andn $dst, $src2, $src1 #@andnL_nReg_Reg" %}
10441 ins_encode %{
10442 Register dst = $dst$$Register;
10443 Register src1 = $src1$$Register;
10444 Register src2 = $src2$$Register;
10446 __ gsandn(dst, src2, src1);
10447 %}
10448 ins_pipe( ialu_regI_regI );
10449 %}
10450 */
10452 /*
10453 instruct ornL_nReg_Reg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10454 match(Set dst (OrL (XorL src1 M1) src2));
10455 predicate(UseLoongsonISA);
10457 format %{ "orn $dst, $src2, $src1 #@ornL_nReg_Reg" %}
10458 ins_encode %{
10459 Register dst = $dst$$Register;
10460 Register src1 = $src1$$Register;
10461 Register src2 = $src2$$Register;
10463 __ gsorn(dst, src2, src1);
10464 %}
10465 ins_pipe( ialu_regI_regI );
10466 %}
10467 */
10469 instruct andL_Reg_immL_M8(mRegL dst, immL_M8 M8) %{
10470 match(Set dst (AndL dst M8));
10471 ins_cost(60);
10473 format %{ "and $dst, $dst, $M8 #@andL_Reg_immL_M8" %}
10474 ins_encode %{
10475 Register dst = $dst$$Register;
10477 __ dins(dst, R0, 0, 3);
10478 %}
10479 ins_pipe( ialu_regI_regI );
10480 %}
10482 instruct andL_Reg_immL_M5(mRegL dst, immL_M5 M5) %{
10483 match(Set dst (AndL dst M5));
10484 ins_cost(60);
10486 format %{ "and $dst, $dst, $M5 #@andL_Reg_immL_M5" %}
10487 ins_encode %{
10488 Register dst = $dst$$Register;
10490 __ dins(dst, R0, 2, 1);
10491 %}
10492 ins_pipe( ialu_regI_regI );
10493 %}
10495 instruct andL_Reg_immL_M7(mRegL dst, immL_M7 M7) %{
10496 match(Set dst (AndL dst M7));
10497 ins_cost(60);
10499 format %{ "and $dst, $dst, $M7 #@andL_Reg_immL_M7" %}
10500 ins_encode %{
10501 Register dst = $dst$$Register;
10503 __ dins(dst, R0, 1, 2);
10504 %}
10505 ins_pipe( ialu_regI_regI );
10506 %}
10508 instruct andL_Reg_immL_M4(mRegL dst, immL_M4 M4) %{
10509 match(Set dst (AndL dst M4));
10510 ins_cost(60);
10512 format %{ "and $dst, $dst, $M4 #@andL_Reg_immL_M4" %}
10513 ins_encode %{
10514 Register dst = $dst$$Register;
10516 __ dins(dst, R0, 0, 2);
10517 %}
10518 ins_pipe( ialu_regI_regI );
10519 %}
10521 instruct andL_Reg_immL_M121(mRegL dst, immL_M121 M121) %{
10522 match(Set dst (AndL dst M121));
10523 ins_cost(60);
10525 format %{ "and $dst, $dst, $M121 #@andL_Reg_immL_M121" %}
10526 ins_encode %{
10527 Register dst = $dst$$Register;
10529 __ dins(dst, R0, 3, 4);
10530 %}
10531 ins_pipe( ialu_regI_regI );
10532 %}
10534 // Or Long Register with Register
10535 instruct orL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10536 match(Set dst (OrL src1 src2));
10537 format %{ "OR $dst, $src1, $src2 @ orL_Reg_Reg\t" %}
10538 ins_encode %{
10539 Register dst_reg = $dst$$Register;
10540 Register src1_reg = $src1$$Register;
10541 Register src2_reg = $src2$$Register;
10543 __ orr(dst_reg, src1_reg, src2_reg);
10544 %}
10545 ins_pipe( ialu_regL_regL );
10546 %}
10548 instruct orL_Reg_P2XReg(mRegL dst, mRegP src1, mRegL src2) %{
10549 match(Set dst (OrL (CastP2X src1) src2));
10550 format %{ "OR $dst, $src1, $src2 @ orL_Reg_P2XReg\t" %}
10551 ins_encode %{
10552 Register dst_reg = $dst$$Register;
10553 Register src1_reg = $src1$$Register;
10554 Register src2_reg = $src2$$Register;
10556 __ orr(dst_reg, src1_reg, src2_reg);
10557 %}
10558 ins_pipe( ialu_regL_regL );
10559 %}
10561 // Xor Long Register with Register
10562 instruct xorL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10563 match(Set dst (XorL src1 src2));
10564 format %{ "XOR $dst, $src1, $src2 @ xorL_Reg_Reg\t" %}
10565 ins_encode %{
10566 Register dst_reg = as_Register($dst$$reg);
10567 Register src1_reg = as_Register($src1$$reg);
10568 Register src2_reg = as_Register($src2$$reg);
10570 __ xorr(dst_reg, src1_reg, src2_reg);
10571 %}
10572 ins_pipe( ialu_regL_regL );
10573 %}
10575 // Shift Left by 8-bit immediate
10576 instruct salI_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
10577 match(Set dst (LShiftI src shift));
10579 format %{ "SHL $dst, $src, $shift #@salI_Reg_imm" %}
10580 ins_encode %{
10581 Register src = $src$$Register;
10582 Register dst = $dst$$Register;
10583 int shamt = $shift$$constant;
10585 __ sll(dst, src, shamt);
10586 %}
10587 ins_pipe( ialu_regI_regI );
10588 %}
10590 instruct salL2I_Reg_imm(mRegI dst, mRegL src, immI8 shift) %{
10591 match(Set dst (LShiftI (ConvL2I src) shift));
10593 format %{ "SHL $dst, $src, $shift #@salL2I_Reg_imm" %}
10594 ins_encode %{
10595 Register src = $src$$Register;
10596 Register dst = $dst$$Register;
10597 int shamt = $shift$$constant;
10599 __ sll(dst, src, shamt);
10600 %}
10601 ins_pipe( ialu_regI_regI );
10602 %}
10604 instruct salI_Reg_imm_and_M65536(mRegI dst, mRegI src, immI_16 shift, immI_M65536 mask) %{
10605 match(Set dst (AndI (LShiftI src shift) mask));
10607 format %{ "SHL $dst, $src, $shift #@salI_Reg_imm_and_M65536" %}
10608 ins_encode %{
10609 Register src = $src$$Register;
10610 Register dst = $dst$$Register;
10612 __ sll(dst, src, 16);
10613 %}
10614 ins_pipe( ialu_regI_regI );
10615 %}
10617 instruct land7_2_s(mRegI dst, mRegL src, immL7 seven, immI_16 sixteen)
10618 %{
10619 match(Set dst (RShiftI (LShiftI (ConvL2I (AndL src seven)) sixteen) sixteen));
10621 format %{ "andi $dst, $src, 7\t# @land7_2_s" %}
10622 ins_encode %{
10623 Register src = $src$$Register;
10624 Register dst = $dst$$Register;
10626 __ andi(dst, src, 7);
10627 %}
10628 ins_pipe(ialu_regI_regI);
10629 %}
10631 instruct ori2s(mRegI dst, mRegI src1, immI_0_32767 src2, immI_16 sixteen)
10632 %{
10633 match(Set dst (RShiftI (LShiftI (OrI src1 src2) sixteen) sixteen));
10635 format %{ "ori $dst, $src1, $src2\t# @ori2s" %}
10636 ins_encode %{
10637 Register src = $src1$$Register;
10638 int val = $src2$$constant;
10639 Register dst = $dst$$Register;
10641 __ ori(dst, src, val);
10642 %}
10643 ins_pipe(ialu_regI_regI);
10644 %}
10646 // Logical Shift Right by 16, followed by Arithmetic Shift Left by 16.
10647 // This idiom is used by the compiler the i2s bytecode.
10648 instruct i2s(mRegI dst, mRegI src, immI_16 sixteen)
10649 %{
10650 match(Set dst (RShiftI (LShiftI src sixteen) sixteen));
10652 format %{ "i2s $dst, $src\t# @i2s" %}
10653 ins_encode %{
10654 Register src = $src$$Register;
10655 Register dst = $dst$$Register;
10657 __ seh(dst, src);
10658 %}
10659 ins_pipe(ialu_regI_regI);
10660 %}
10662 // Logical Shift Right by 24, followed by Arithmetic Shift Left by 24.
10663 // This idiom is used by the compiler for the i2b bytecode.
10664 instruct i2b(mRegI dst, mRegI src, immI_24 twentyfour)
10665 %{
10666 match(Set dst (RShiftI (LShiftI src twentyfour) twentyfour));
10668 format %{ "i2b $dst, $src\t# @i2b" %}
10669 ins_encode %{
10670 Register src = $src$$Register;
10671 Register dst = $dst$$Register;
10673 __ seb(dst, src);
10674 %}
10675 ins_pipe(ialu_regI_regI);
10676 %}
10679 instruct salI_RegL2I_imm(mRegI dst, mRegL src, immI8 shift) %{
10680 match(Set dst (LShiftI (ConvL2I src) shift));
10682 format %{ "SHL $dst, $src, $shift #@salI_RegL2I_imm" %}
10683 ins_encode %{
10684 Register src = $src$$Register;
10685 Register dst = $dst$$Register;
10686 int shamt = $shift$$constant;
10688 __ sll(dst, src, shamt);
10689 %}
10690 ins_pipe( ialu_regI_regI );
10691 %}
10693 // Shift Left by 8-bit immediate
10694 instruct salI_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
10695 match(Set dst (LShiftI src shift));
10697 format %{ "SHL $dst, $src, $shift #@salI_Reg_Reg" %}
10698 ins_encode %{
10699 Register src = $src$$Register;
10700 Register dst = $dst$$Register;
10701 Register shamt = $shift$$Register;
10702 __ sllv(dst, src, shamt);
10703 %}
10704 ins_pipe( ialu_regI_regI );
10705 %}
10708 // Shift Left Long
10709 instruct salL_Reg_imm(mRegL dst, mRegL src, immI8 shift) %{
10710 //predicate(UseNewLongLShift);
10711 match(Set dst (LShiftL src shift));
10712 ins_cost(100);
10713 format %{ "salL $dst, $src, $shift @ salL_Reg_imm" %}
10714 ins_encode %{
10715 Register src_reg = as_Register($src$$reg);
10716 Register dst_reg = as_Register($dst$$reg);
10717 int shamt = $shift$$constant;
10719 if (__ is_simm(shamt, 5))
10720 __ dsll(dst_reg, src_reg, shamt);
10721 else
10722 {
10723 int sa = Assembler::low(shamt, 6);
10724 if (sa < 32) {
10725 __ dsll(dst_reg, src_reg, sa);
10726 } else {
10727 __ dsll32(dst_reg, src_reg, sa - 32);
10728 }
10729 }
10730 %}
10731 ins_pipe( ialu_regL_regL );
10732 %}
10734 instruct salL_RegI2L_imm(mRegL dst, mRegI src, immI8 shift) %{
10735 //predicate(UseNewLongLShift);
10736 match(Set dst (LShiftL (ConvI2L src) shift));
10737 ins_cost(100);
10738 format %{ "salL $dst, $src, $shift @ salL_RegI2L_imm" %}
10739 ins_encode %{
10740 Register src_reg = as_Register($src$$reg);
10741 Register dst_reg = as_Register($dst$$reg);
10742 int shamt = $shift$$constant;
10744 if (__ is_simm(shamt, 5))
10745 __ dsll(dst_reg, src_reg, shamt);
10746 else
10747 {
10748 int sa = Assembler::low(shamt, 6);
10749 if (sa < 32) {
10750 __ dsll(dst_reg, src_reg, sa);
10751 } else {
10752 __ dsll32(dst_reg, src_reg, sa - 32);
10753 }
10754 }
10755 %}
10756 ins_pipe( ialu_regL_regL );
10757 %}
10759 // Shift Left Long
10760 instruct salL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
10761 //predicate(UseNewLongLShift);
10762 match(Set dst (LShiftL src shift));
10763 ins_cost(100);
10764 format %{ "salL $dst, $src, $shift @ salL_Reg_Reg" %}
10765 ins_encode %{
10766 Register src_reg = as_Register($src$$reg);
10767 Register dst_reg = as_Register($dst$$reg);
10769 __ dsllv(dst_reg, src_reg, $shift$$Register);
10770 %}
10771 ins_pipe( ialu_regL_regL );
10772 %}
10774 instruct salL_convI2L_Reg_imm(mRegL dst, mRegI src, immI8 shift) %{
10775 match(Set dst (LShiftL (ConvI2L src) shift));
10776 ins_cost(100);
10777 format %{ "salL $dst, $src, $shift @ salL_convI2L_Reg_imm" %}
10778 ins_encode %{
10779 Register src_reg = as_Register($src$$reg);
10780 Register dst_reg = as_Register($dst$$reg);
10781 int shamt = $shift$$constant;
10783 if (__ is_simm(shamt, 5)) {
10784 __ dsll(dst_reg, src_reg, shamt);
10785 } else {
10786 int sa = Assembler::low(shamt, 6);
10787 if (sa < 32) {
10788 __ dsll(dst_reg, src_reg, sa);
10789 } else {
10790 __ dsll32(dst_reg, src_reg, sa - 32);
10791 }
10792 }
10793 %}
10794 ins_pipe( ialu_regL_regL );
10795 %}
10797 // Shift Right Long
10798 instruct sarL_Reg_imm(mRegL dst, mRegL src, immI8 shift) %{
10799 match(Set dst (RShiftL src shift));
10800 ins_cost(100);
10801 format %{ "sarL $dst, $src, $shift @ sarL_Reg_imm" %}
10802 ins_encode %{
10803 Register src_reg = as_Register($src$$reg);
10804 Register dst_reg = as_Register($dst$$reg);
10805 int shamt = ($shift$$constant & 0x3f);
10806 if (__ is_simm(shamt, 5))
10807 __ dsra(dst_reg, src_reg, shamt);
10808 else {
10809 int sa = Assembler::low(shamt, 6);
10810 if (sa < 32) {
10811 __ dsra(dst_reg, src_reg, sa);
10812 } else {
10813 __ dsra32(dst_reg, src_reg, sa - 32);
10814 }
10815 }
10816 %}
10817 ins_pipe( ialu_regL_regL );
10818 %}
10820 instruct sarL2I_Reg_immI_32_63(mRegI dst, mRegL src, immI_32_63 shift) %{
10821 match(Set dst (ConvL2I (RShiftL src shift)));
10822 ins_cost(100);
10823 format %{ "sarL $dst, $src, $shift @ sarL2I_Reg_immI_32_63" %}
10824 ins_encode %{
10825 Register src_reg = as_Register($src$$reg);
10826 Register dst_reg = as_Register($dst$$reg);
10827 int shamt = $shift$$constant;
10829 __ dsra32(dst_reg, src_reg, shamt - 32);
10830 %}
10831 ins_pipe( ialu_regL_regL );
10832 %}
10834 // Shift Right Long arithmetically
10835 instruct sarL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
10836 //predicate(UseNewLongLShift);
10837 match(Set dst (RShiftL src shift));
10838 ins_cost(100);
10839 format %{ "sarL $dst, $src, $shift @ sarL_Reg_Reg" %}
10840 ins_encode %{
10841 Register src_reg = as_Register($src$$reg);
10842 Register dst_reg = as_Register($dst$$reg);
10844 __ dsrav(dst_reg, src_reg, $shift$$Register);
10845 %}
10846 ins_pipe( ialu_regL_regL );
10847 %}
10849 // Shift Right Long logically
10850 instruct slrL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
10851 match(Set dst (URShiftL src shift));
10852 ins_cost(100);
10853 format %{ "slrL $dst, $src, $shift @ slrL_Reg_Reg" %}
10854 ins_encode %{
10855 Register src_reg = as_Register($src$$reg);
10856 Register dst_reg = as_Register($dst$$reg);
10858 __ dsrlv(dst_reg, src_reg, $shift$$Register);
10859 %}
10860 ins_pipe( ialu_regL_regL );
10861 %}
10863 instruct slrL_Reg_immI_0_31(mRegL dst, mRegL src, immI_0_31 shift) %{
10864 match(Set dst (URShiftL src shift));
10865 ins_cost(80);
10866 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_0_31" %}
10867 ins_encode %{
10868 Register src_reg = as_Register($src$$reg);
10869 Register dst_reg = as_Register($dst$$reg);
10870 int shamt = $shift$$constant;
10872 __ dsrl(dst_reg, src_reg, shamt);
10873 %}
10874 ins_pipe( ialu_regL_regL );
10875 %}
10877 instruct slrL_Reg_immI_0_31_and_max_int(mRegI dst, mRegL src, immI_0_31 shift, immI_MaxI max_int) %{
10878 match(Set dst (AndI (ConvL2I (URShiftL src shift)) max_int));
10879 ins_cost(80);
10880 format %{ "dext $dst, $src, $shift, 31 @ slrL_Reg_immI_0_31_and_max_int" %}
10881 ins_encode %{
10882 Register src_reg = as_Register($src$$reg);
10883 Register dst_reg = as_Register($dst$$reg);
10884 int shamt = $shift$$constant;
10886 __ dext(dst_reg, src_reg, shamt, 31);
10887 %}
10888 ins_pipe( ialu_regL_regL );
10889 %}
10891 instruct slrL_P2XReg_immI_0_31(mRegL dst, mRegP src, immI_0_31 shift) %{
10892 match(Set dst (URShiftL (CastP2X src) shift));
10893 ins_cost(80);
10894 format %{ "slrL $dst, $src, $shift @ slrL_P2XReg_immI_0_31" %}
10895 ins_encode %{
10896 Register src_reg = as_Register($src$$reg);
10897 Register dst_reg = as_Register($dst$$reg);
10898 int shamt = $shift$$constant;
10900 __ dsrl(dst_reg, src_reg, shamt);
10901 %}
10902 ins_pipe( ialu_regL_regL );
10903 %}
10905 instruct slrL_Reg_immI_32_63(mRegL dst, mRegL src, immI_32_63 shift) %{
10906 match(Set dst (URShiftL src shift));
10907 ins_cost(80);
10908 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_32_63" %}
10909 ins_encode %{
10910 Register src_reg = as_Register($src$$reg);
10911 Register dst_reg = as_Register($dst$$reg);
10912 int shamt = $shift$$constant;
10914 __ dsrl32(dst_reg, src_reg, shamt - 32);
10915 %}
10916 ins_pipe( ialu_regL_regL );
10917 %}
10919 instruct slrL_Reg_immI_convL2I(mRegI dst, mRegL src, immI_32_63 shift) %{
10920 match(Set dst (ConvL2I (URShiftL src shift)));
10921 predicate(n->in(1)->in(2)->get_int() > 32);
10922 ins_cost(80);
10923 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_convL2I" %}
10924 ins_encode %{
10925 Register src_reg = as_Register($src$$reg);
10926 Register dst_reg = as_Register($dst$$reg);
10927 int shamt = $shift$$constant;
10929 __ dsrl32(dst_reg, src_reg, shamt - 32);
10930 %}
10931 ins_pipe( ialu_regL_regL );
10932 %}
10934 instruct slrL_P2XReg_immI_32_63(mRegL dst, mRegP src, immI_32_63 shift) %{
10935 match(Set dst (URShiftL (CastP2X src) shift));
10936 ins_cost(80);
10937 format %{ "slrL $dst, $src, $shift @ slrL_P2XReg_immI_32_63" %}
10938 ins_encode %{
10939 Register src_reg = as_Register($src$$reg);
10940 Register dst_reg = as_Register($dst$$reg);
10941 int shamt = $shift$$constant;
10943 __ dsrl32(dst_reg, src_reg, shamt - 32);
10944 %}
10945 ins_pipe( ialu_regL_regL );
10946 %}
10948 // Xor Instructions
10949 // Xor Register with Register
10950 instruct xorI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10951 match(Set dst (XorI src1 src2));
10953 format %{ "XOR $dst, $src1, $src2 #@xorI_Reg_Reg" %}
10955 ins_encode %{
10956 Register dst = $dst$$Register;
10957 Register src1 = $src1$$Register;
10958 Register src2 = $src2$$Register;
10959 __ xorr(dst, src1, src2);
10960 __ sll(dst, dst, 0); /* long -> int */
10961 %}
10963 ins_pipe( ialu_regI_regI );
10964 %}
10966 // Or Instructions
10967 // Or Register with Register
10968 instruct orI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10969 match(Set dst (OrI src1 src2));
10971 format %{ "OR $dst, $src1, $src2 #@orI_Reg_Reg" %}
10972 ins_encode %{
10973 Register dst = $dst$$Register;
10974 Register src1 = $src1$$Register;
10975 Register src2 = $src2$$Register;
10976 __ orr(dst, src1, src2);
10977 %}
10979 ins_pipe( ialu_regI_regI );
10980 %}
10982 instruct rotI_shr_logical_Reg(mRegI dst, mRegI src, immI_0_31 rshift, immI_0_31 lshift, immI_1 one) %{
10983 match(Set dst (OrI (URShiftI src rshift) (LShiftI (AndI src one) lshift)));
10984 predicate(32 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int())));
10986 format %{ "rotr $dst, $src, 1 ...\n\t"
10987 "srl $dst, $dst, ($rshift-1) @ rotI_shr_logical_Reg" %}
10988 ins_encode %{
10989 Register dst = $dst$$Register;
10990 Register src = $src$$Register;
10991 int rshift = $rshift$$constant;
10993 __ rotr(dst, src, 1);
10994 if (rshift - 1) {
10995 __ srl(dst, dst, rshift - 1);
10996 }
10997 %}
10999 ins_pipe( ialu_regI_regI );
11000 %}
11002 instruct orI_Reg_castP2X(mRegL dst, mRegL src1, mRegP src2) %{
11003 match(Set dst (OrI src1 (CastP2X src2)));
11005 format %{ "OR $dst, $src1, $src2 #@orI_Reg_castP2X" %}
11006 ins_encode %{
11007 Register dst = $dst$$Register;
11008 Register src1 = $src1$$Register;
11009 Register src2 = $src2$$Register;
11010 __ orr(dst, src1, src2);
11011 %}
11013 ins_pipe( ialu_regI_regI );
11014 %}
11016 // Logical Shift Right by 8-bit immediate
11017 instruct shr_logical_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
11018 match(Set dst (URShiftI src shift));
11019 // effect(KILL cr);
11021 format %{ "SRL $dst, $src, $shift #@shr_logical_Reg_imm" %}
11022 ins_encode %{
11023 Register src = $src$$Register;
11024 Register dst = $dst$$Register;
11025 int shift = $shift$$constant;
11027 __ srl(dst, src, shift);
11028 %}
11029 ins_pipe( ialu_regI_regI );
11030 %}
11032 instruct shr_logical_Reg_imm_nonneg_mask(mRegI dst, mRegI src, immI_0_31 shift, immI_nonneg_mask mask) %{
11033 match(Set dst (AndI (URShiftI src shift) mask));
11035 format %{ "ext $dst, $src, $shift, one-bits($mask) #@shr_logical_Reg_imm_nonneg_mask" %}
11036 ins_encode %{
11037 Register src = $src$$Register;
11038 Register dst = $dst$$Register;
11039 int pos = $shift$$constant;
11040 int size = Assembler::is_int_mask($mask$$constant);
11042 __ ext(dst, src, pos, size);
11043 %}
11044 ins_pipe( ialu_regI_regI );
11045 %}
11047 instruct rolI_Reg_immI_0_31(mRegI dst, immI_0_31 lshift, immI_0_31 rshift)
11048 %{
11049 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
11050 match(Set dst (OrI (LShiftI dst lshift) (URShiftI dst rshift)));
11052 ins_cost(100);
11053 format %{ "rotr $dst, $dst, $rshift #@rolI_Reg_immI_0_31" %}
11054 ins_encode %{
11055 Register dst = $dst$$Register;
11056 int sa = $rshift$$constant;
11058 __ rotr(dst, dst, sa);
11059 %}
11060 ins_pipe( ialu_regI_regI );
11061 %}
11063 instruct rolL_Reg_immI_0_31(mRegL dst, immI_32_63 lshift, immI_0_31 rshift)
11064 %{
11065 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11066 match(Set dst (OrL (LShiftL dst lshift) (URShiftL dst rshift)));
11068 ins_cost(100);
11069 format %{ "rotr $dst, $dst, $rshift #@rolL_Reg_immI_0_31" %}
11070 ins_encode %{
11071 Register dst = $dst$$Register;
11072 int sa = $rshift$$constant;
11074 __ drotr(dst, dst, sa);
11075 %}
11076 ins_pipe( ialu_regI_regI );
11077 %}
11079 instruct rolL_Reg_immI_32_63(mRegL dst, immI_0_31 lshift, immI_32_63 rshift)
11080 %{
11081 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11082 match(Set dst (OrL (LShiftL dst lshift) (URShiftL dst rshift)));
11084 ins_cost(100);
11085 format %{ "rotr $dst, $dst, $rshift #@rolL_Reg_immI_32_63" %}
11086 ins_encode %{
11087 Register dst = $dst$$Register;
11088 int sa = $rshift$$constant;
11090 __ drotr32(dst, dst, sa - 32);
11091 %}
11092 ins_pipe( ialu_regI_regI );
11093 %}
11095 instruct rorI_Reg_immI_0_31(mRegI dst, immI_0_31 rshift, immI_0_31 lshift)
11096 %{
11097 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
11098 match(Set dst (OrI (URShiftI dst rshift) (LShiftI dst lshift)));
11100 ins_cost(100);
11101 format %{ "rotr $dst, $dst, $rshift #@rorI_Reg_immI_0_31" %}
11102 ins_encode %{
11103 Register dst = $dst$$Register;
11104 int sa = $rshift$$constant;
11106 __ rotr(dst, dst, sa);
11107 %}
11108 ins_pipe( ialu_regI_regI );
11109 %}
11111 instruct rorL_Reg_immI_0_31(mRegL dst, immI_0_31 rshift, immI_32_63 lshift)
11112 %{
11113 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11114 match(Set dst (OrL (URShiftL dst rshift) (LShiftL dst lshift)));
11116 ins_cost(100);
11117 format %{ "rotr $dst, $dst, $rshift #@rorL_Reg_immI_0_31" %}
11118 ins_encode %{
11119 Register dst = $dst$$Register;
11120 int sa = $rshift$$constant;
11122 __ drotr(dst, dst, sa);
11123 %}
11124 ins_pipe( ialu_regI_regI );
11125 %}
11127 instruct rorL_Reg_immI_32_63(mRegL dst, immI_32_63 rshift, immI_0_31 lshift)
11128 %{
11129 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11130 match(Set dst (OrL (URShiftL dst rshift) (LShiftL dst lshift)));
11132 ins_cost(100);
11133 format %{ "rotr $dst, $dst, $rshift #@rorL_Reg_immI_32_63" %}
11134 ins_encode %{
11135 Register dst = $dst$$Register;
11136 int sa = $rshift$$constant;
11138 __ drotr32(dst, dst, sa - 32);
11139 %}
11140 ins_pipe( ialu_regI_regI );
11141 %}
11143 // Logical Shift Right
11144 instruct shr_logical_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
11145 match(Set dst (URShiftI src shift));
11147 format %{ "SRL $dst, $src, $shift #@shr_logical_Reg_Reg" %}
11148 ins_encode %{
11149 Register src = $src$$Register;
11150 Register dst = $dst$$Register;
11151 Register shift = $shift$$Register;
11152 __ srlv(dst, src, shift);
11153 %}
11154 ins_pipe( ialu_regI_regI );
11155 %}
11158 instruct shr_arith_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
11159 match(Set dst (RShiftI src shift));
11160 // effect(KILL cr);
11162 format %{ "SRA $dst, $src, $shift #@shr_arith_Reg_imm" %}
11163 ins_encode %{
11164 Register src = $src$$Register;
11165 Register dst = $dst$$Register;
11166 int shift = $shift$$constant;
11167 __ sra(dst, src, shift);
11168 %}
11169 ins_pipe( ialu_regI_regI );
11170 %}
11172 instruct shr_arith_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
11173 match(Set dst (RShiftI src shift));
11174 // effect(KILL cr);
11176 format %{ "SRA $dst, $src, $shift #@shr_arith_Reg_Reg" %}
11177 ins_encode %{
11178 Register src = $src$$Register;
11179 Register dst = $dst$$Register;
11180 Register shift = $shift$$Register;
11181 __ srav(dst, src, shift);
11182 %}
11183 ins_pipe( ialu_regI_regI );
11184 %}
11186 //----------Convert Int to Boolean---------------------------------------------
11188 instruct convI2B(mRegI dst, mRegI src) %{
11189 match(Set dst (Conv2B src));
11191 ins_cost(100);
11192 format %{ "convI2B $dst, $src @ convI2B" %}
11193 ins_encode %{
11194 Register dst = as_Register($dst$$reg);
11195 Register src = as_Register($src$$reg);
11197 if (dst != src) {
11198 __ daddiu(dst, R0, 1);
11199 __ movz(dst, R0, src);
11200 } else {
11201 __ move(AT, src);
11202 __ daddiu(dst, R0, 1);
11203 __ movz(dst, R0, AT);
11204 }
11205 %}
11207 ins_pipe( ialu_regL_regL );
11208 %}
11210 instruct convI2L_reg( mRegL dst, mRegI src) %{
11211 match(Set dst (ConvI2L src));
11213 ins_cost(100);
11214 format %{ "SLL $dst, $src @ convI2L_reg\t" %}
11215 ins_encode %{
11216 Register dst = as_Register($dst$$reg);
11217 Register src = as_Register($src$$reg);
11219 if(dst != src) __ sll(dst, src, 0);
11220 %}
11221 ins_pipe( ialu_regL_regL );
11222 %}
11225 instruct convL2I_reg( mRegI dst, mRegL src ) %{
11226 match(Set dst (ConvL2I src));
11228 format %{ "MOV $dst, $src @ convL2I_reg" %}
11229 ins_encode %{
11230 Register dst = as_Register($dst$$reg);
11231 Register src = as_Register($src$$reg);
11233 __ sll(dst, src, 0);
11234 %}
11236 ins_pipe( ialu_regI_regI );
11237 %}
11239 instruct convL2I2L_reg( mRegL dst, mRegL src ) %{
11240 match(Set dst (ConvI2L (ConvL2I src)));
11242 format %{ "sll $dst, $src, 0 @ convL2I2L_reg" %}
11243 ins_encode %{
11244 Register dst = as_Register($dst$$reg);
11245 Register src = as_Register($src$$reg);
11247 __ sll(dst, src, 0);
11248 %}
11250 ins_pipe( ialu_regI_regI );
11251 %}
11253 instruct convL2D_reg( regD dst, mRegL src ) %{
11254 match(Set dst (ConvL2D src));
11255 format %{ "convL2D $dst, $src @ convL2D_reg" %}
11256 ins_encode %{
11257 Register src = as_Register($src$$reg);
11258 FloatRegister dst = as_FloatRegister($dst$$reg);
11260 __ dmtc1(src, dst);
11261 __ cvt_d_l(dst, dst);
11262 %}
11264 ins_pipe( pipe_slow );
11265 %}
11267 instruct convD2L_reg_fast( mRegL dst, regD src ) %{
11268 match(Set dst (ConvD2L src));
11269 ins_cost(150);
11270 format %{ "convD2L $dst, $src @ convD2L_reg_fast" %}
11271 ins_encode %{
11272 Register dst = as_Register($dst$$reg);
11273 FloatRegister src = as_FloatRegister($src$$reg);
11275 Label Done;
11277 __ trunc_l_d(F30, src);
11278 // max_long: 0x7fffffffffffffff
11279 // __ set64(AT, 0x7fffffffffffffff);
11280 __ daddiu(AT, R0, -1);
11281 __ dsrl(AT, AT, 1);
11282 __ dmfc1(dst, F30);
11284 __ bne(dst, AT, Done);
11285 __ delayed()->mtc1(R0, F30);
11287 __ cvt_d_w(F30, F30);
11288 __ c_ult_d(src, F30);
11289 __ bc1f(Done);
11290 __ delayed()->daddiu(T9, R0, -1);
11292 __ c_un_d(src, src); //NaN?
11293 __ subu(dst, T9, AT);
11294 __ movt(dst, R0);
11296 __ bind(Done);
11297 %}
11299 ins_pipe( pipe_slow );
11300 %}
11302 instruct convD2L_reg_slow( mRegL dst, regD src ) %{
11303 match(Set dst (ConvD2L src));
11304 ins_cost(250);
11305 format %{ "convD2L $dst, $src @ convD2L_reg_slow" %}
11306 ins_encode %{
11307 Register dst = as_Register($dst$$reg);
11308 FloatRegister src = as_FloatRegister($src$$reg);
11310 Label L;
11312 __ c_un_d(src, src); //NaN?
11313 __ bc1t(L);
11314 __ delayed();
11315 __ move(dst, R0);
11317 __ trunc_l_d(F30, src);
11318 __ cfc1(AT, 31);
11319 __ li(T9, 0x10000);
11320 __ andr(AT, AT, T9);
11321 __ beq(AT, R0, L);
11322 __ delayed()->dmfc1(dst, F30);
11324 __ mov_d(F12, src);
11325 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1);
11326 __ move(dst, V0);
11327 __ bind(L);
11328 %}
11330 ins_pipe( pipe_slow );
11331 %}
11333 instruct convF2I_reg_fast( mRegI dst, regF src ) %{
11334 match(Set dst (ConvF2I src));
11335 ins_cost(150);
11336 format %{ "convf2i $dst, $src @ convF2I_reg_fast" %}
11337 ins_encode %{
11338 Register dreg = $dst$$Register;
11339 FloatRegister fval = $src$$FloatRegister;
11341 __ trunc_w_s(F30, fval);
11342 __ mfc1(dreg, F30);
11343 __ c_un_s(fval, fval); //NaN?
11344 __ movt(dreg, R0);
11345 %}
11347 ins_pipe( pipe_slow );
11348 %}
11350 instruct convF2I_reg_slow( mRegI dst, regF src ) %{
11351 match(Set dst (ConvF2I src));
11352 ins_cost(250);
11353 format %{ "convf2i $dst, $src @ convF2I_reg_slow" %}
11354 ins_encode %{
11355 Register dreg = $dst$$Register;
11356 FloatRegister fval = $src$$FloatRegister;
11357 Label L;
11359 __ c_un_s(fval, fval); //NaN?
11360 __ bc1t(L);
11361 __ delayed();
11362 __ move(dreg, R0);
11364 __ trunc_w_s(F30, fval);
11366 /* Call SharedRuntime:f2i() to do valid convention */
11367 __ cfc1(AT, 31);
11368 __ li(T9, 0x10000);
11369 __ andr(AT, AT, T9);
11370 __ beq(AT, R0, L);
11371 __ delayed()->mfc1(dreg, F30);
11373 __ mov_s(F12, fval);
11375 /* 2014/01/08 Fu : This bug was found when running ezDS's control-panel.
11376 * J 982 C2 javax.swing.text.BoxView.layoutMajorAxis(II[I[I)V (283 bytes) @ 0x000000555c46aa74
11377 *
11378 * An interger array index has been assigned to V0, and then changed from 1 to Integer.MAX_VALUE.
11379 * V0 is corrupted during call_VM_leaf(), and should be preserved.
11380 */
11381 if(dreg != V0) {
11382 __ push(V0);
11383 }
11384 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
11385 if(dreg != V0) {
11386 __ move(dreg, V0);
11387 __ pop(V0);
11388 }
11389 __ bind(L);
11390 %}
11392 ins_pipe( pipe_slow );
11393 %}
11395 instruct convF2L_reg_fast( mRegL dst, regF src ) %{
11396 match(Set dst (ConvF2L src));
11397 ins_cost(150);
11398 format %{ "convf2l $dst, $src @ convF2L_reg_fast" %}
11399 ins_encode %{
11400 Register dreg = $dst$$Register;
11401 FloatRegister fval = $src$$FloatRegister;
11403 __ trunc_l_s(F30, fval);
11404 __ dmfc1(dreg, F30);
11405 __ c_un_s(fval, fval); //NaN?
11406 __ movt(dreg, R0);
11407 %}
11409 ins_pipe( pipe_slow );
11410 %}
11412 instruct convF2L_reg_slow( mRegL dst, regF src ) %{
11413 match(Set dst (ConvF2L src));
11414 ins_cost(250);
11415 format %{ "convf2l $dst, $src @ convF2L_reg_slow" %}
11416 ins_encode %{
11417 Register dst = as_Register($dst$$reg);
11418 FloatRegister fval = $src$$FloatRegister;
11419 Label L;
11421 __ c_un_s(fval, fval); //NaN?
11422 __ bc1t(L);
11423 __ delayed();
11424 __ move(dst, R0);
11426 __ trunc_l_s(F30, fval);
11427 __ cfc1(AT, 31);
11428 __ li(T9, 0x10000);
11429 __ andr(AT, AT, T9);
11430 __ beq(AT, R0, L);
11431 __ delayed()->dmfc1(dst, F30);
11433 __ mov_s(F12, fval);
11434 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
11435 __ move(dst, V0);
11436 __ bind(L);
11437 %}
11439 ins_pipe( pipe_slow );
11440 %}
11442 instruct convL2F_reg( regF dst, mRegL src ) %{
11443 match(Set dst (ConvL2F src));
11444 format %{ "convl2f $dst, $src @ convL2F_reg" %}
11445 ins_encode %{
11446 FloatRegister dst = $dst$$FloatRegister;
11447 Register src = as_Register($src$$reg);
11448 Label L;
11450 __ dmtc1(src, dst);
11451 __ cvt_s_l(dst, dst);
11452 %}
11454 ins_pipe( pipe_slow );
11455 %}
11457 instruct convI2F_reg( regF dst, mRegI src ) %{
11458 match(Set dst (ConvI2F src));
11459 format %{ "convi2f $dst, $src @ convI2F_reg" %}
11460 ins_encode %{
11461 Register src = $src$$Register;
11462 FloatRegister dst = $dst$$FloatRegister;
11464 __ mtc1(src, dst);
11465 __ cvt_s_w(dst, dst);
11466 %}
11468 ins_pipe( fpu_regF_regF );
11469 %}
11471 instruct cmpLTMask_immI0( mRegI dst, mRegI p, immI0 zero ) %{
11472 match(Set dst (CmpLTMask p zero));
11473 ins_cost(100);
11475 format %{ "sra $dst, $p, 31 @ cmpLTMask_immI0" %}
11476 ins_encode %{
11477 Register src = $p$$Register;
11478 Register dst = $dst$$Register;
11480 __ sra(dst, src, 31);
11481 %}
11482 ins_pipe( pipe_slow );
11483 %}
11486 instruct cmpLTMask( mRegI dst, mRegI p, mRegI q ) %{
11487 match(Set dst (CmpLTMask p q));
11488 ins_cost(400);
11490 format %{ "cmpLTMask $dst, $p, $q @ cmpLTMask" %}
11491 ins_encode %{
11492 Register p = $p$$Register;
11493 Register q = $q$$Register;
11494 Register dst = $dst$$Register;
11496 __ slt(dst, p, q);
11497 __ subu(dst, R0, dst);
11498 %}
11499 ins_pipe( pipe_slow );
11500 %}
11502 instruct convP2B(mRegI dst, mRegP src) %{
11503 match(Set dst (Conv2B src));
11505 ins_cost(100);
11506 format %{ "convP2B $dst, $src @ convP2B" %}
11507 ins_encode %{
11508 Register dst = as_Register($dst$$reg);
11509 Register src = as_Register($src$$reg);
11511 if (dst != src) {
11512 __ daddiu(dst, R0, 1);
11513 __ movz(dst, R0, src);
11514 } else {
11515 __ move(AT, src);
11516 __ daddiu(dst, R0, 1);
11517 __ movz(dst, R0, AT);
11518 }
11519 %}
11521 ins_pipe( ialu_regL_regL );
11522 %}
11525 instruct convI2D_reg_reg(regD dst, mRegI src) %{
11526 match(Set dst (ConvI2D src));
11527 format %{ "conI2D $dst, $src @convI2D_reg" %}
11528 ins_encode %{
11529 Register src = $src$$Register;
11530 FloatRegister dst = $dst$$FloatRegister;
11531 __ mtc1(src, dst);
11532 __ cvt_d_w(dst, dst);
11533 %}
11534 ins_pipe( fpu_regF_regF );
11535 %}
11537 instruct convF2D_reg_reg(regD dst, regF src) %{
11538 match(Set dst (ConvF2D src));
11539 format %{ "convF2D $dst, $src\t# @convF2D_reg_reg" %}
11540 ins_encode %{
11541 FloatRegister dst = $dst$$FloatRegister;
11542 FloatRegister src = $src$$FloatRegister;
11544 __ cvt_d_s(dst, src);
11545 %}
11546 ins_pipe( fpu_regF_regF );
11547 %}
11549 instruct convD2F_reg_reg(regF dst, regD src) %{
11550 match(Set dst (ConvD2F src));
11551 format %{ "convD2F $dst, $src\t# @convD2F_reg_reg" %}
11552 ins_encode %{
11553 FloatRegister dst = $dst$$FloatRegister;
11554 FloatRegister src = $src$$FloatRegister;
11556 __ cvt_s_d(dst, src);
11557 %}
11558 ins_pipe( fpu_regF_regF );
11559 %}
11561 // Convert a double to an int. If the double is a NAN, stuff a zero in instead.
11562 instruct convD2I_reg_reg_fast( mRegI dst, regD src ) %{
11563 match(Set dst (ConvD2I src));
11565 ins_cost(150);
11566 format %{ "convD2I $dst, $src\t# @ convD2I_reg_reg_fast" %}
11568 ins_encode %{
11569 FloatRegister src = $src$$FloatRegister;
11570 Register dst = $dst$$Register;
11572 Label Done;
11574 __ trunc_w_d(F30, src);
11575 // max_int: 2147483647
11576 __ move(AT, 0x7fffffff);
11577 __ mfc1(dst, F30);
11579 __ bne(dst, AT, Done);
11580 __ delayed()->mtc1(R0, F30);
11582 __ cvt_d_w(F30, F30);
11583 __ c_ult_d(src, F30);
11584 __ bc1f(Done);
11585 __ delayed()->addiu(T9, R0, -1);
11587 __ c_un_d(src, src); //NaN?
11588 __ subu32(dst, T9, AT);
11589 __ movt(dst, R0);
11591 __ bind(Done);
11592 %}
11593 ins_pipe( pipe_slow );
11594 %}
11596 instruct convD2I_reg_reg_slow( mRegI dst, regD src ) %{
11597 match(Set dst (ConvD2I src));
11599 ins_cost(250);
11600 format %{ "convD2I $dst, $src\t# @ convD2I_reg_reg_slow" %}
11602 ins_encode %{
11603 FloatRegister src = $src$$FloatRegister;
11604 Register dst = $dst$$Register;
11605 Label L;
11607 __ trunc_w_d(F30, src);
11608 __ cfc1(AT, 31);
11609 __ li(T9, 0x10000);
11610 __ andr(AT, AT, T9);
11611 __ beq(AT, R0, L);
11612 __ delayed()->mfc1(dst, F30);
11614 __ mov_d(F12, src);
11615 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 1);
11616 __ move(dst, V0);
11617 __ bind(L);
11619 %}
11620 ins_pipe( pipe_slow );
11621 %}
11623 // Convert oop pointer into compressed form
11624 instruct encodeHeapOop(mRegN dst, mRegP src) %{
11625 predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
11626 match(Set dst (EncodeP src));
11627 format %{ "encode_heap_oop $dst,$src" %}
11628 ins_encode %{
11629 Register src = $src$$Register;
11630 Register dst = $dst$$Register;
11631 if (src != dst) {
11632 __ move(dst, src);
11633 }
11634 __ encode_heap_oop(dst);
11635 %}
11636 ins_pipe( ialu_regL_regL );
11637 %}
11639 instruct encodeHeapOop_not_null(mRegN dst, mRegP src) %{
11640 predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
11641 match(Set dst (EncodeP src));
11642 format %{ "encode_heap_oop_not_null $dst,$src @ encodeHeapOop_not_null" %}
11643 ins_encode %{
11644 __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
11645 %}
11646 ins_pipe( ialu_regL_regL );
11647 %}
11649 instruct decodeHeapOop(mRegP dst, mRegN src) %{
11650 predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
11651 n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
11652 match(Set dst (DecodeN src));
11653 format %{ "decode_heap_oop $dst,$src @ decodeHeapOop" %}
11654 ins_encode %{
11655 Register s = $src$$Register;
11656 Register d = $dst$$Register;
11657 if (s != d) {
11658 __ move(d, s);
11659 }
11660 __ decode_heap_oop(d);
11661 %}
11662 ins_pipe( ialu_regL_regL );
11663 %}
11665 instruct decodeHeapOop_not_null(mRegP dst, mRegN src) %{
11666 predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
11667 n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
11668 match(Set dst (DecodeN src));
11669 format %{ "decode_heap_oop_not_null $dst,$src @ decodeHeapOop_not_null" %}
11670 ins_encode %{
11671 Register s = $src$$Register;
11672 Register d = $dst$$Register;
11673 if (s != d) {
11674 __ decode_heap_oop_not_null(d, s);
11675 } else {
11676 __ decode_heap_oop_not_null(d);
11677 }
11678 %}
11679 ins_pipe( ialu_regL_regL );
11680 %}
11682 instruct encodeKlass_not_null(mRegN dst, mRegP src) %{
11683 match(Set dst (EncodePKlass src));
11684 format %{ "encode_heap_oop_not_null $dst,$src @ encodeKlass_not_null" %}
11685 ins_encode %{
11686 __ encode_klass_not_null($dst$$Register, $src$$Register);
11687 %}
11688 ins_pipe( ialu_regL_regL );
11689 %}
11691 instruct decodeKlass_not_null(mRegP dst, mRegN src) %{
11692 match(Set dst (DecodeNKlass src));
11693 format %{ "decode_heap_klass_not_null $dst,$src" %}
11694 ins_encode %{
11695 Register s = $src$$Register;
11696 Register d = $dst$$Register;
11697 if (s != d) {
11698 __ decode_klass_not_null(d, s);
11699 } else {
11700 __ decode_klass_not_null(d);
11701 }
11702 %}
11703 ins_pipe( ialu_regL_regL );
11704 %}
11706 //FIXME
11707 instruct tlsLoadP(mRegP dst) %{
11708 match(Set dst (ThreadLocal));
11710 ins_cost(0);
11711 format %{ " get_thread in $dst #@tlsLoadP" %}
11712 ins_encode %{
11713 Register dst = $dst$$Register;
11714 #ifdef OPT_THREAD
11715 __ move(dst, TREG);
11716 #else
11717 __ get_thread(dst);
11718 #endif
11719 %}
11721 ins_pipe( ialu_loadI );
11722 %}
11725 instruct checkCastPP( mRegP dst ) %{
11726 match(Set dst (CheckCastPP dst));
11728 format %{ "#checkcastPP of $dst (empty encoding) #@chekCastPP" %}
11729 ins_encode( /*empty encoding*/ );
11730 ins_pipe( empty );
11731 %}
11733 instruct castPP(mRegP dst)
11734 %{
11735 match(Set dst (CastPP dst));
11737 size(0);
11738 format %{ "# castPP of $dst" %}
11739 ins_encode(/* empty encoding */);
11740 ins_pipe(empty);
11741 %}
11743 instruct castII( mRegI dst ) %{
11744 match(Set dst (CastII dst));
11745 format %{ "#castII of $dst empty encoding" %}
11746 ins_encode( /*empty encoding*/ );
11747 ins_cost(0);
11748 ins_pipe( empty );
11749 %}
11751 // Return Instruction
11752 // Remove the return address & jump to it.
11753 instruct Ret() %{
11754 match(Return);
11755 format %{ "RET #@Ret" %}
11757 ins_encode %{
11758 __ jr(RA);
11759 __ nop();
11760 %}
11762 ins_pipe( pipe_jump );
11763 %}
11765 /*
11766 // For Loongson CPUs, jr seems too slow, so this rule shouldn't be imported.
11767 instruct jumpXtnd(mRegL switch_val) %{
11768 match(Jump switch_val);
11770 ins_cost(350);
11772 format %{ "load T9 <-- [$constanttablebase, $switch_val, $constantoffset] @ jumpXtnd\n\t"
11773 "jr T9\n\t"
11774 "nop" %}
11775 ins_encode %{
11776 Register table_base = $constanttablebase;
11777 int con_offset = $constantoffset;
11778 Register switch_reg = $switch_val$$Register;
11780 if (UseLoongsonISA) {
11781 if (Assembler::is_simm(con_offset, 8)) {
11782 __ gsldx(T9, table_base, switch_reg, con_offset);
11783 } else if (Assembler::is_simm16(con_offset)) {
11784 __ daddu(T9, table_base, switch_reg);
11785 __ ld(T9, T9, con_offset);
11786 } else {
11787 __ move(T9, con_offset);
11788 __ daddu(AT, table_base, switch_reg);
11789 __ gsldx(T9, AT, T9, 0);
11790 }
11791 } else {
11792 if (Assembler::is_simm16(con_offset)) {
11793 __ daddu(T9, table_base, switch_reg);
11794 __ ld(T9, T9, con_offset);
11795 } else {
11796 __ move(T9, con_offset);
11797 __ daddu(AT, table_base, switch_reg);
11798 __ daddu(AT, T9, AT);
11799 __ ld(T9, AT, 0);
11800 }
11801 }
11803 __ jr(T9);
11804 __ nop();
11806 %}
11807 ins_pipe(pipe_jump);
11808 %}
11809 */
11811 // Jump Direct - Label defines a relative address from JMP
11812 instruct jmpDir(label labl) %{
11813 match(Goto);
11814 effect(USE labl);
11816 ins_cost(300);
11817 format %{ "JMP $labl #@jmpDir" %}
11819 ins_encode %{
11820 Label &L = *($labl$$label);
11821 if(&L)
11822 __ b(L);
11823 else
11824 __ b(int(0));
11825 __ nop();
11826 %}
11828 ins_pipe( pipe_jump );
11829 ins_pc_relative(1);
11830 %}
11834 // Tail Jump; remove the return address; jump to target.
11835 // TailCall above leaves the return address around.
11836 // TailJump is used in only one place, the rethrow_Java stub (fancy_jump=2).
11837 // ex_oop (Exception Oop) is needed in %o0 at the jump. As there would be a
11838 // "restore" before this instruction (in Epilogue), we need to materialize it
11839 // in %i0.
11840 //FIXME
11841 instruct tailjmpInd(mRegP jump_target,mRegP ex_oop) %{
11842 match( TailJump jump_target ex_oop );
11843 ins_cost(200);
11844 format %{ "Jmp $jump_target ; ex_oop = $ex_oop #@tailjmpInd" %}
11845 ins_encode %{
11846 Register target = $jump_target$$Register;
11848 /* 2012/9/14 Jin: V0, V1 are indicated in:
11849 * [stubGenerator_mips.cpp] generate_forward_exception()
11850 * [runtime_mips.cpp] OptoRuntime::generate_exception_blob()
11851 */
11852 Register oop = $ex_oop$$Register;
11853 Register exception_oop = V0;
11854 Register exception_pc = V1;
11856 __ move(exception_pc, RA);
11857 __ move(exception_oop, oop);
11859 __ jr(target);
11860 __ nop();
11861 %}
11862 ins_pipe( pipe_jump );
11863 %}
11865 // ============================================================================
11866 // Procedure Call/Return Instructions
11867 // Call Java Static Instruction
11868 // Note: If this code changes, the corresponding ret_addr_offset() and
11869 // compute_padding() functions will have to be adjusted.
11870 instruct CallStaticJavaDirect(method meth) %{
11871 match(CallStaticJava);
11872 effect(USE meth);
11874 ins_cost(300);
11875 format %{ "CALL,static #@CallStaticJavaDirect " %}
11876 ins_encode( Java_Static_Call( meth ) );
11877 ins_pipe( pipe_slow );
11878 ins_pc_relative(1);
11879 ins_alignment(16);
11880 %}
11882 // Call Java Dynamic Instruction
11883 // Note: If this code changes, the corresponding ret_addr_offset() and
11884 // compute_padding() functions will have to be adjusted.
11885 instruct CallDynamicJavaDirect(method meth) %{
11886 match(CallDynamicJava);
11887 effect(USE meth);
11889 ins_cost(300);
11890 format %{"MOV IC_Klass, (oop)-1 @ CallDynamicJavaDirect\n\t"
11891 "CallDynamic @ CallDynamicJavaDirect" %}
11892 ins_encode( Java_Dynamic_Call( meth ) );
11893 ins_pipe( pipe_slow );
11894 ins_pc_relative(1);
11895 ins_alignment(16);
11896 %}
11898 instruct CallLeafNoFPDirect(method meth) %{
11899 match(CallLeafNoFP);
11900 effect(USE meth);
11902 ins_cost(300);
11903 format %{ "CALL_LEAF_NOFP,runtime " %}
11904 ins_encode(Java_To_Runtime(meth));
11905 ins_pipe( pipe_slow );
11906 ins_pc_relative(1);
11907 ins_alignment(16);
11908 %}
11910 // Prefetch instructions.
11912 instruct prefetchrNTA( memory mem ) %{
11913 match(PrefetchRead mem);
11914 ins_cost(125);
11916 format %{ "pref $mem\t# Prefetch into non-temporal cache for read @ prefetchrNTA" %}
11917 ins_encode %{
11918 int base = $mem$$base;
11919 int index = $mem$$index;
11920 int scale = $mem$$scale;
11921 int disp = $mem$$disp;
11923 if( index != 0 ) {
11924 if (scale == 0) {
11925 __ daddu(AT, as_Register(base), as_Register(index));
11926 } else {
11927 __ dsll(AT, as_Register(index), scale);
11928 __ daddu(AT, as_Register(base), AT);
11929 }
11930 } else {
11931 __ move(AT, as_Register(base));
11932 }
11933 if( Assembler::is_simm16(disp) ) {
11934 __ daddiu(AT, as_Register(base), disp);
11935 __ daddiu(AT, AT, disp);
11936 } else {
11937 __ move(T9, disp);
11938 __ daddu(AT, as_Register(base), T9);
11939 }
11940 __ pref(0, AT, 0); //hint: 0:load
11941 %}
11942 ins_pipe(pipe_slow);
11943 %}
11945 instruct prefetchwNTA( memory mem ) %{
11946 match(PrefetchWrite mem);
11947 ins_cost(125);
11948 format %{ "pref $mem\t# Prefetch to non-temporal cache for write @ prefetchwNTA" %}
11949 ins_encode %{
11950 int base = $mem$$base;
11951 int index = $mem$$index;
11952 int scale = $mem$$scale;
11953 int disp = $mem$$disp;
11955 if( index != 0 ) {
11956 if (scale == 0) {
11957 __ daddu(AT, as_Register(base), as_Register(index));
11958 } else {
11959 __ dsll(AT, as_Register(index), scale);
11960 __ daddu(AT, as_Register(base), AT);
11961 }
11962 } else {
11963 __ move(AT, as_Register(base));
11964 }
11965 if( Assembler::is_simm16(disp) ) {
11966 __ daddiu(AT, as_Register(base), disp);
11967 __ daddiu(AT, AT, disp);
11968 } else {
11969 __ move(T9, disp);
11970 __ daddu(AT, as_Register(base), T9);
11971 }
11972 __ pref(1, AT, 0); //hint: 1:store
11973 %}
11974 ins_pipe(pipe_slow);
11975 %}
11977 // Prefetch instructions for allocation.
11979 instruct prefetchAllocNTA( memory mem ) %{
11980 match(PrefetchAllocation mem);
11981 ins_cost(125);
11982 format %{ "pref $mem\t# Prefetch allocation @ prefetchAllocNTA" %}
11983 ins_encode %{
11984 int base = $mem$$base;
11985 int index = $mem$$index;
11986 int scale = $mem$$scale;
11987 int disp = $mem$$disp;
11989 Register dst = R0;
11991 if( index != 0 ) {
11992 if( Assembler::is_simm16(disp) ) {
11993 if( UseLoongsonISA ) {
11994 if (scale == 0) {
11995 __ gslbx(dst, as_Register(base), as_Register(index), disp);
11996 } else {
11997 __ dsll(AT, as_Register(index), scale);
11998 __ gslbx(dst, as_Register(base), AT, disp);
11999 }
12000 } else {
12001 if (scale == 0) {
12002 __ addu(AT, as_Register(base), as_Register(index));
12003 } else {
12004 __ dsll(AT, as_Register(index), scale);
12005 __ addu(AT, as_Register(base), AT);
12006 }
12007 __ lb(dst, AT, disp);
12008 }
12009 } else {
12010 if (scale == 0) {
12011 __ addu(AT, as_Register(base), as_Register(index));
12012 } else {
12013 __ dsll(AT, as_Register(index), scale);
12014 __ addu(AT, as_Register(base), AT);
12015 }
12016 __ move(T9, disp);
12017 if( UseLoongsonISA ) {
12018 __ gslbx(dst, AT, T9, 0);
12019 } else {
12020 __ addu(AT, AT, T9);
12021 __ lb(dst, AT, 0);
12022 }
12023 }
12024 } else {
12025 if( Assembler::is_simm16(disp) ) {
12026 __ lb(dst, as_Register(base), disp);
12027 } else {
12028 __ move(T9, disp);
12029 if( UseLoongsonISA ) {
12030 __ gslbx(dst, as_Register(base), T9, 0);
12031 } else {
12032 __ addu(AT, as_Register(base), T9);
12033 __ lb(dst, AT, 0);
12034 }
12035 }
12036 }
12037 %}
12038 ins_pipe(pipe_slow);
12039 %}
12042 // Call runtime without safepoint
12043 instruct CallLeafDirect(method meth) %{
12044 match(CallLeaf);
12045 effect(USE meth);
12047 ins_cost(300);
12048 format %{ "CALL_LEAF,runtime #@CallLeafDirect " %}
12049 ins_encode(Java_To_Runtime(meth));
12050 ins_pipe( pipe_slow );
12051 ins_pc_relative(1);
12052 ins_alignment(16);
12053 %}
12055 // Load Char (16bit unsigned)
12056 instruct loadUS(mRegI dst, memory mem) %{
12057 match(Set dst (LoadUS mem));
12059 ins_cost(125);
12060 format %{ "loadUS $dst,$mem @ loadC" %}
12061 ins_encode(load_C_enc(dst, mem));
12062 ins_pipe( ialu_loadI );
12063 %}
12065 instruct loadUS_convI2L(mRegL dst, memory mem) %{
12066 match(Set dst (ConvI2L (LoadUS mem)));
12068 ins_cost(125);
12069 format %{ "loadUS $dst,$mem @ loadUS_convI2L" %}
12070 ins_encode(load_C_enc(dst, mem));
12071 ins_pipe( ialu_loadI );
12072 %}
12074 // Store Char (16bit unsigned)
12075 instruct storeC(memory mem, mRegI src) %{
12076 match(Set mem (StoreC mem src));
12078 ins_cost(125);
12079 format %{ "storeC $src, $mem @ storeC" %}
12080 ins_encode(store_C_reg_enc(mem, src));
12081 ins_pipe( ialu_loadI );
12082 %}
12084 instruct storeC0(memory mem, immI0 zero) %{
12085 match(Set mem (StoreC mem zero));
12087 ins_cost(125);
12088 format %{ "storeC $zero, $mem @ storeC0" %}
12089 ins_encode(store_C0_enc(mem));
12090 ins_pipe( ialu_loadI );
12091 %}
12094 instruct loadConF0(regF dst, immF0 zero) %{
12095 match(Set dst zero);
12096 ins_cost(100);
12098 format %{ "mov $dst, zero @ loadConF0\n"%}
12099 ins_encode %{
12100 FloatRegister dst = $dst$$FloatRegister;
12102 __ mtc1(R0, dst);
12103 %}
12104 ins_pipe( fpu_loadF );
12105 %}
12108 instruct loadConF(regF dst, immF src) %{
12109 match(Set dst src);
12110 ins_cost(125);
12112 format %{ "lwc1 $dst, $constantoffset[$constanttablebase] # load FLOAT $src from table @ loadConF" %}
12113 ins_encode %{
12114 int con_offset = $constantoffset($src);
12116 if (Assembler::is_simm16(con_offset)) {
12117 __ lwc1($dst$$FloatRegister, $constanttablebase, con_offset);
12118 } else {
12119 __ set64(AT, con_offset);
12120 if (UseLoongsonISA) {
12121 __ gslwxc1($dst$$FloatRegister, $constanttablebase, AT, 0);
12122 } else {
12123 __ daddu(AT, $constanttablebase, AT);
12124 __ lwc1($dst$$FloatRegister, AT, 0);
12125 }
12126 }
12127 %}
12128 ins_pipe( fpu_loadF );
12129 %}
12132 instruct loadConD0(regD dst, immD0 zero) %{
12133 match(Set dst zero);
12134 ins_cost(100);
12136 format %{ "mov $dst, zero @ loadConD0"%}
12137 ins_encode %{
12138 FloatRegister dst = as_FloatRegister($dst$$reg);
12140 __ dmtc1(R0, dst);
12141 %}
12142 ins_pipe( fpu_loadF );
12143 %}
12145 instruct loadConD(regD dst, immD src) %{
12146 match(Set dst src);
12147 ins_cost(125);
12149 format %{ "ldc1 $dst, $constantoffset[$constanttablebase] # load DOUBLE $src from table @ loadConD" %}
12150 ins_encode %{
12151 int con_offset = $constantoffset($src);
12153 if (Assembler::is_simm16(con_offset)) {
12154 __ ldc1($dst$$FloatRegister, $constanttablebase, con_offset);
12155 } else {
12156 __ set64(AT, con_offset);
12157 if (UseLoongsonISA) {
12158 __ gsldxc1($dst$$FloatRegister, $constanttablebase, AT, 0);
12159 } else {
12160 __ daddu(AT, $constanttablebase, AT);
12161 __ ldc1($dst$$FloatRegister, AT, 0);
12162 }
12163 }
12164 %}
12165 ins_pipe( fpu_loadF );
12166 %}
12168 // Store register Float value (it is faster than store from FPU register)
12169 instruct storeF_reg( memory mem, regF src) %{
12170 match(Set mem (StoreF mem src));
12172 ins_cost(50);
12173 format %{ "store $mem, $src\t# store float @ storeF_reg" %}
12174 ins_encode(store_F_reg_enc(mem, src));
12175 ins_pipe( fpu_storeF );
12176 %}
12178 instruct storeF_imm0( memory mem, immF0 zero) %{
12179 match(Set mem (StoreF mem zero));
12181 ins_cost(40);
12182 format %{ "store $mem, zero\t# store float @ storeF_imm0" %}
12183 ins_encode %{
12184 int base = $mem$$base;
12185 int index = $mem$$index;
12186 int scale = $mem$$scale;
12187 int disp = $mem$$disp;
12189 if( index != 0 ) {
12190 if(scale != 0) {
12191 __ dsll(T9, as_Register(index), scale);
12192 __ addu(AT, as_Register(base), T9);
12193 } else {
12194 __ daddu(AT, as_Register(base), as_Register(index));
12195 }
12196 if( Assembler::is_simm16(disp) ) {
12197 __ sw(R0, AT, disp);
12198 } else {
12199 __ move(T9, disp);
12200 __ addu(AT, AT, T9);
12201 __ sw(R0, AT, 0);
12202 }
12204 } else {
12205 if( Assembler::is_simm16(disp) ) {
12206 __ sw(R0, as_Register(base), disp);
12207 } else {
12208 __ move(T9, disp);
12209 __ addu(AT, as_Register(base), T9);
12210 __ sw(R0, AT, 0);
12211 }
12212 }
12213 %}
12214 ins_pipe( ialu_storeI );
12215 %}
12217 // Load Double
12218 instruct loadD(regD dst, memory mem) %{
12219 match(Set dst (LoadD mem));
12221 ins_cost(150);
12222 format %{ "loadD $dst, $mem #@loadD" %}
12223 ins_encode(load_D_enc(dst, mem));
12224 ins_pipe( ialu_loadI );
12225 %}
12227 // Load Double - UNaligned
12228 instruct loadD_unaligned(regD dst, memory mem ) %{
12229 match(Set dst (LoadD_unaligned mem));
12230 ins_cost(250);
12231 // FIXME: Jin: Need more effective ldl/ldr
12232 format %{ "loadD_unaligned $dst, $mem #@loadD_unaligned" %}
12233 ins_encode(load_D_enc(dst, mem));
12234 ins_pipe( ialu_loadI );
12235 %}
12237 instruct storeD_reg( memory mem, regD src) %{
12238 match(Set mem (StoreD mem src));
12240 ins_cost(50);
12241 format %{ "store $mem, $src\t# store float @ storeD_reg" %}
12242 ins_encode(store_D_reg_enc(mem, src));
12243 ins_pipe( fpu_storeF );
12244 %}
12246 instruct storeD_imm0( memory mem, immD0 zero) %{
12247 match(Set mem (StoreD mem zero));
12249 ins_cost(40);
12250 format %{ "store $mem, zero\t# store float @ storeD_imm0" %}
12251 ins_encode %{
12252 int base = $mem$$base;
12253 int index = $mem$$index;
12254 int scale = $mem$$scale;
12255 int disp = $mem$$disp;
12257 __ mtc1(R0, F30);
12258 __ cvt_d_w(F30, F30);
12260 if( index != 0 ) {
12261 if(scale != 0) {
12262 __ dsll(T9, as_Register(index), scale);
12263 __ addu(AT, as_Register(base), T9);
12264 } else {
12265 __ daddu(AT, as_Register(base), as_Register(index));
12266 }
12267 if( Assembler::is_simm16(disp) ) {
12268 __ sdc1(F30, AT, disp);
12269 } else {
12270 __ move(T9, disp);
12271 __ addu(AT, AT, T9);
12272 __ sdc1(F30, AT, 0);
12273 }
12275 } else {
12276 if( Assembler::is_simm16(disp) ) {
12277 __ sdc1(F30, as_Register(base), disp);
12278 } else {
12279 __ move(T9, disp);
12280 __ addu(AT, as_Register(base), T9);
12281 __ sdc1(F30, AT, 0);
12282 }
12283 }
12284 %}
12285 ins_pipe( ialu_storeI );
12286 %}
12288 instruct loadSSI(mRegI dst, stackSlotI src)
12289 %{
12290 match(Set dst src);
12292 ins_cost(125);
12293 format %{ "lw $dst, $src\t# int stk @ loadSSI" %}
12294 ins_encode %{
12295 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSI) !");
12296 __ lw($dst$$Register, SP, $src$$disp);
12297 %}
12298 ins_pipe(ialu_loadI);
12299 %}
12301 instruct storeSSI(stackSlotI dst, mRegI src)
12302 %{
12303 match(Set dst src);
12305 ins_cost(100);
12306 format %{ "sw $dst, $src\t# int stk @ storeSSI" %}
12307 ins_encode %{
12308 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSI) !");
12309 __ sw($src$$Register, SP, $dst$$disp);
12310 %}
12311 ins_pipe(ialu_storeI);
12312 %}
12314 instruct loadSSL(mRegL dst, stackSlotL src)
12315 %{
12316 match(Set dst src);
12318 ins_cost(125);
12319 format %{ "ld $dst, $src\t# long stk @ loadSSL" %}
12320 ins_encode %{
12321 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSL) !");
12322 __ ld($dst$$Register, SP, $src$$disp);
12323 %}
12324 ins_pipe(ialu_loadI);
12325 %}
12327 instruct storeSSL(stackSlotL dst, mRegL src)
12328 %{
12329 match(Set dst src);
12331 ins_cost(100);
12332 format %{ "sd $dst, $src\t# long stk @ storeSSL" %}
12333 ins_encode %{
12334 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSL) !");
12335 __ sd($src$$Register, SP, $dst$$disp);
12336 %}
12337 ins_pipe(ialu_storeI);
12338 %}
12340 instruct loadSSP(mRegP dst, stackSlotP src)
12341 %{
12342 match(Set dst src);
12344 ins_cost(125);
12345 format %{ "ld $dst, $src\t# ptr stk @ loadSSP" %}
12346 ins_encode %{
12347 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSP) !");
12348 __ ld($dst$$Register, SP, $src$$disp);
12349 %}
12350 ins_pipe(ialu_loadI);
12351 %}
12353 instruct storeSSP(stackSlotP dst, mRegP src)
12354 %{
12355 match(Set dst src);
12357 ins_cost(100);
12358 format %{ "sd $dst, $src\t# ptr stk @ storeSSP" %}
12359 ins_encode %{
12360 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSP) !");
12361 __ sd($src$$Register, SP, $dst$$disp);
12362 %}
12363 ins_pipe(ialu_storeI);
12364 %}
12366 instruct loadSSF(regF dst, stackSlotF src)
12367 %{
12368 match(Set dst src);
12370 ins_cost(125);
12371 format %{ "lwc1 $dst, $src\t# float stk @ loadSSF" %}
12372 ins_encode %{
12373 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSF) !");
12374 __ lwc1($dst$$FloatRegister, SP, $src$$disp);
12375 %}
12376 ins_pipe(ialu_loadI);
12377 %}
12379 instruct storeSSF(stackSlotF dst, regF src)
12380 %{
12381 match(Set dst src);
12383 ins_cost(100);
12384 format %{ "swc1 $dst, $src\t# float stk @ storeSSF" %}
12385 ins_encode %{
12386 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSF) !");
12387 __ swc1($src$$FloatRegister, SP, $dst$$disp);
12388 %}
12389 ins_pipe(fpu_storeF);
12390 %}
12392 // Use the same format since predicate() can not be used here.
12393 instruct loadSSD(regD dst, stackSlotD src)
12394 %{
12395 match(Set dst src);
12397 ins_cost(125);
12398 format %{ "ldc1 $dst, $src\t# double stk @ loadSSD" %}
12399 ins_encode %{
12400 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSD) !");
12401 __ ldc1($dst$$FloatRegister, SP, $src$$disp);
12402 %}
12403 ins_pipe(ialu_loadI);
12404 %}
12406 instruct storeSSD(stackSlotD dst, regD src)
12407 %{
12408 match(Set dst src);
12410 ins_cost(100);
12411 format %{ "sdc1 $dst, $src\t# double stk @ storeSSD" %}
12412 ins_encode %{
12413 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSD) !");
12414 __ sdc1($src$$FloatRegister, SP, $dst$$disp);
12415 %}
12416 ins_pipe(fpu_storeF);
12417 %}
12419 instruct cmpFastLock( FlagsReg cr, mRegP object, s0_RegP box, mRegI tmp, mRegP scr) %{
12420 match( Set cr (FastLock object box) );
12421 effect( TEMP tmp, TEMP scr, USE_KILL box );
12422 ins_cost(300);
12423 format %{ "FASTLOCK $cr $object, $box, $tmp #@ cmpFastLock" %}
12424 ins_encode %{
12425 __ fast_lock($object$$Register, $box$$Register, $tmp$$Register, $scr$$Register);
12426 %}
12428 ins_pipe( pipe_slow );
12429 ins_pc_relative(1);
12430 %}
12432 instruct cmpFastUnlock( FlagsReg cr, mRegP object, s0_RegP box, mRegP tmp ) %{
12433 match( Set cr (FastUnlock object box) );
12434 effect( TEMP tmp, USE_KILL box );
12435 ins_cost(300);
12436 format %{ "FASTUNLOCK $object, $box, $tmp #@cmpFastUnlock" %}
12437 ins_encode %{
12438 __ fast_unlock($object$$Register, $box$$Register, $tmp$$Register);
12439 %}
12441 ins_pipe( pipe_slow );
12442 ins_pc_relative(1);
12443 %}
12445 // Store CMS card-mark Immediate
12446 instruct storeImmCM(memory mem, immI8 src) %{
12447 match(Set mem (StoreCM mem src));
12449 ins_cost(150);
12450 format %{ "MOV8 $mem,$src\t! CMS card-mark imm0" %}
12451 // opcode(0xC6);
12452 ins_encode(store_B_immI_enc_sync(mem, src));
12453 ins_pipe( ialu_storeI );
12454 %}
12456 // Die now
12457 instruct ShouldNotReachHere( )
12458 %{
12459 match(Halt);
12460 ins_cost(300);
12462 // Use the following format syntax
12463 format %{ "ILLTRAP ;#@ShouldNotReachHere" %}
12464 ins_encode %{
12465 // Here we should emit illtrap !
12467 __ stop("in ShoudNotReachHere");
12469 %}
12470 ins_pipe( pipe_jump );
12471 %}
12473 instruct leaP8Narrow(mRegP dst, indOffset8Narrow mem)
12474 %{
12475 predicate(Universe::narrow_oop_shift() == 0);
12476 match(Set dst mem);
12478 ins_cost(110);
12479 format %{ "leaq $dst, $mem\t# ptr off8narrow @ leaP8Narrow" %}
12480 ins_encode %{
12481 Register dst = $dst$$Register;
12482 Register base = as_Register($mem$$base);
12483 int disp = $mem$$disp;
12485 __ daddiu(dst, base, disp);
12486 %}
12487 ins_pipe( ialu_regI_imm16 );
12488 %}
12490 instruct leaPPosIdxScaleOff8(mRegP dst, basePosIndexScaleOffset8 mem)
12491 %{
12492 match(Set dst mem);
12494 ins_cost(110);
12495 format %{ "leaq $dst, $mem\t# @ PosIdxScaleOff8" %}
12496 ins_encode %{
12497 Register dst = $dst$$Register;
12498 Register base = as_Register($mem$$base);
12499 Register index = as_Register($mem$$index);
12500 int scale = $mem$$scale;
12501 int disp = $mem$$disp;
12503 if (scale == 0) {
12504 __ daddu(AT, base, index);
12505 __ daddiu(dst, AT, disp);
12506 } else {
12507 __ dsll(AT, index, scale);
12508 __ daddu(AT, base, AT);
12509 __ daddiu(dst, AT, disp);
12510 }
12511 %}
12513 ins_pipe( ialu_regI_imm16 );
12514 %}
12516 instruct leaPIdxScale(mRegP dst, indIndexScale mem)
12517 %{
12518 match(Set dst mem);
12520 ins_cost(110);
12521 format %{ "leaq $dst, $mem\t# @ leaPIdxScale" %}
12522 ins_encode %{
12523 Register dst = $dst$$Register;
12524 Register base = as_Register($mem$$base);
12525 Register index = as_Register($mem$$index);
12526 int scale = $mem$$scale;
12528 if (scale == 0) {
12529 __ daddu(dst, base, index);
12530 } else {
12531 __ dsll(AT, index, scale);
12532 __ daddu(dst, base, AT);
12533 }
12534 %}
12536 ins_pipe( ialu_regI_imm16 );
12537 %}
12539 // Jump Direct Conditional - Label defines a relative address from Jcc+1
12540 instruct jmpLoopEnd(cmpOp cop, mRegI src1, mRegI src2, label labl) %{
12541 match(CountedLoopEnd cop (CmpI src1 src2));
12542 effect(USE labl);
12544 ins_cost(300);
12545 format %{ "J$cop $src1, $src2, $labl\t# Loop end @ jmpLoopEnd" %}
12546 ins_encode %{
12547 Register op1 = $src1$$Register;
12548 Register op2 = $src2$$Register;
12549 Label &L = *($labl$$label);
12550 int flag = $cop$$cmpcode;
12552 switch(flag)
12553 {
12554 case 0x01: //equal
12555 if (&L)
12556 __ beq(op1, op2, L);
12557 else
12558 __ beq(op1, op2, (int)0);
12559 break;
12560 case 0x02: //not_equal
12561 if (&L)
12562 __ bne(op1, op2, L);
12563 else
12564 __ bne(op1, op2, (int)0);
12565 break;
12566 case 0x03: //above
12567 __ slt(AT, op2, op1);
12568 if(&L)
12569 __ bne(AT, R0, L);
12570 else
12571 __ bne(AT, R0, (int)0);
12572 break;
12573 case 0x04: //above_equal
12574 __ slt(AT, op1, op2);
12575 if(&L)
12576 __ beq(AT, R0, L);
12577 else
12578 __ beq(AT, R0, (int)0);
12579 break;
12580 case 0x05: //below
12581 __ slt(AT, op1, op2);
12582 if(&L)
12583 __ bne(AT, R0, L);
12584 else
12585 __ bne(AT, R0, (int)0);
12586 break;
12587 case 0x06: //below_equal
12588 __ slt(AT, op2, op1);
12589 if(&L)
12590 __ beq(AT, R0, L);
12591 else
12592 __ beq(AT, R0, (int)0);
12593 break;
12594 default:
12595 Unimplemented();
12596 }
12597 __ nop();
12598 %}
12599 ins_pipe( pipe_jump );
12600 ins_pc_relative(1);
12601 %}
12604 instruct jmpLoopEnd_reg_imm16_sub(cmpOp cop, mRegI src1, immI16_sub src2, label labl) %{
12605 match(CountedLoopEnd cop (CmpI src1 src2));
12606 effect(USE labl);
12608 ins_cost(250);
12609 format %{ "J$cop $src1, $src2, $labl\t# Loop end @ jmpLoopEnd_reg_imm16_sub" %}
12610 ins_encode %{
12611 Register op1 = $src1$$Register;
12612 int op2 = $src2$$constant;
12613 Label &L = *($labl$$label);
12614 int flag = $cop$$cmpcode;
12616 __ addiu32(AT, op1, -1 * op2);
12618 switch(flag)
12619 {
12620 case 0x01: //equal
12621 if (&L)
12622 __ beq(AT, R0, L);
12623 else
12624 __ beq(AT, R0, (int)0);
12625 break;
12626 case 0x02: //not_equal
12627 if (&L)
12628 __ bne(AT, R0, L);
12629 else
12630 __ bne(AT, R0, (int)0);
12631 break;
12632 case 0x03: //above
12633 if(&L)
12634 __ bgtz(AT, L);
12635 else
12636 __ bgtz(AT, (int)0);
12637 break;
12638 case 0x04: //above_equal
12639 if(&L)
12640 __ bgez(AT, L);
12641 else
12642 __ bgez(AT,(int)0);
12643 break;
12644 case 0x05: //below
12645 if(&L)
12646 __ bltz(AT, L);
12647 else
12648 __ bltz(AT, (int)0);
12649 break;
12650 case 0x06: //below_equal
12651 if(&L)
12652 __ blez(AT, L);
12653 else
12654 __ blez(AT, (int)0);
12655 break;
12656 default:
12657 Unimplemented();
12658 }
12659 __ nop();
12660 %}
12661 ins_pipe( pipe_jump );
12662 ins_pc_relative(1);
12663 %}
12666 /*
12667 // Jump Direct Conditional - Label defines a relative address from Jcc+1
12668 instruct jmpLoopEndU(cmpOpU cop, eFlagsRegU cmp, label labl) %{
12669 match(CountedLoopEnd cop cmp);
12670 effect(USE labl);
12672 ins_cost(300);
12673 format %{ "J$cop,u $labl\t# Loop end" %}
12674 size(6);
12675 opcode(0x0F, 0x80);
12676 ins_encode( Jcc( cop, labl) );
12677 ins_pipe( pipe_jump );
12678 ins_pc_relative(1);
12679 %}
12681 instruct jmpLoopEndUCF(cmpOpUCF cop, eFlagsRegUCF cmp, label labl) %{
12682 match(CountedLoopEnd cop cmp);
12683 effect(USE labl);
12685 ins_cost(200);
12686 format %{ "J$cop,u $labl\t# Loop end" %}
12687 opcode(0x0F, 0x80);
12688 ins_encode( Jcc( cop, labl) );
12689 ins_pipe( pipe_jump );
12690 ins_pc_relative(1);
12691 %}
12692 */
12694 // This match pattern is created for StoreIConditional since I cannot match IfNode without a RegFlags! fujie 2012/07/17
12695 instruct jmpCon_flags(cmpOp cop, FlagsReg cr, label labl) %{
12696 match(If cop cr);
12697 effect(USE labl);
12699 ins_cost(300);
12700 format %{ "J$cop $labl #mips uses AT as eflag @jmpCon_flags" %}
12702 ins_encode %{
12703 Label &L = *($labl$$label);
12704 switch($cop$$cmpcode)
12705 {
12706 case 0x01: //equal
12707 if (&L)
12708 __ bne(AT, R0, L);
12709 else
12710 __ bne(AT, R0, (int)0);
12711 break;
12712 case 0x02: //not equal
12713 if (&L)
12714 __ beq(AT, R0, L);
12715 else
12716 __ beq(AT, R0, (int)0);
12717 break;
12718 default:
12719 Unimplemented();
12720 }
12721 __ nop();
12722 %}
12724 ins_pipe( pipe_jump );
12725 ins_pc_relative(1);
12726 %}
12729 // ============================================================================
12730 // The 2nd slow-half of a subtype check. Scan the subklass's 2ndary superklass
12731 // array for an instance of the superklass. Set a hidden internal cache on a
12732 // hit (cache is checked with exposed code in gen_subtype_check()). Return
12733 // NZ for a miss or zero for a hit. The encoding ALSO sets flags.
12734 instruct partialSubtypeCheck( mRegP result, no_T8_mRegP sub, no_T8_mRegP super, mT8RegI tmp ) %{
12735 match(Set result (PartialSubtypeCheck sub super));
12736 effect(KILL tmp);
12737 ins_cost(1100); // slightly larger than the next version
12738 format %{ "partialSubtypeCheck result=$result, sub=$sub, super=$super, tmp=$tmp " %}
12740 ins_encode( enc_PartialSubtypeCheck(result, sub, super, tmp) );
12741 ins_pipe( pipe_slow );
12742 %}
12745 // Conditional-store of an int value.
12746 // ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG on Intel.
12747 instruct storeIConditional( memory mem, mRegI oldval, mRegI newval, FlagsReg cr ) %{
12748 match(Set cr (StoreIConditional mem (Binary oldval newval)));
12749 // effect(KILL oldval);
12750 format %{ "CMPXCHG $newval, $mem, $oldval \t# @storeIConditional" %}
12752 ins_encode %{
12753 Register oldval = $oldval$$Register;
12754 Register newval = $newval$$Register;
12755 Address addr(as_Register($mem$$base), $mem$$disp);
12756 Label again, failure;
12758 // int base = $mem$$base;
12759 int index = $mem$$index;
12760 int scale = $mem$$scale;
12761 int disp = $mem$$disp;
12763 guarantee(Assembler::is_simm16(disp), "");
12765 if( index != 0 ) {
12766 __ stop("in storeIConditional: index != 0");
12767 } else {
12768 __ bind(again);
12769 if(UseSyncLevel <= 1000) __ sync();
12770 __ ll(AT, addr);
12771 __ bne(AT, oldval, failure);
12772 __ delayed()->addu(AT, R0, R0);
12774 __ addu(AT, newval, R0);
12775 __ sc(AT, addr);
12776 __ beq(AT, R0, again);
12777 __ delayed()->addiu(AT, R0, 0xFF);
12778 __ bind(failure);
12779 __ sync();
12780 }
12781 %}
12783 ins_pipe( long_memory_op );
12784 %}
12786 // Conditional-store of a long value.
12787 // ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG.
12788 instruct storeLConditional(memory mem, t2RegL oldval, mRegL newval, FlagsReg cr )
12789 %{
12790 match(Set cr (StoreLConditional mem (Binary oldval newval)));
12791 effect(KILL oldval);
12793 format %{ "cmpxchg $mem, $newval\t# If $oldval == $mem then store $newval into $mem" %}
12794 ins_encode%{
12795 Register oldval = $oldval$$Register;
12796 Register newval = $newval$$Register;
12797 Address addr((Register)$mem$$base, $mem$$disp);
12799 int index = $mem$$index;
12800 int scale = $mem$$scale;
12801 int disp = $mem$$disp;
12803 guarantee(Assembler::is_simm16(disp), "");
12805 if( index != 0 ) {
12806 __ stop("in storeIConditional: index != 0");
12807 } else {
12808 __ cmpxchg(newval, addr, oldval);
12809 }
12810 %}
12811 ins_pipe( long_memory_op );
12812 %}
12815 instruct compareAndSwapI( mRegI res, mRegP mem_ptr, mS2RegI oldval, mRegI newval) %{
12816 match(Set res (CompareAndSwapI mem_ptr (Binary oldval newval)));
12817 effect(KILL oldval);
12818 // match(CompareAndSwapI mem_ptr (Binary oldval newval));
12819 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapI\n\t"
12820 "MOV $res, 1 @ compareAndSwapI\n\t"
12821 "BNE AT, R0 @ compareAndSwapI\n\t"
12822 "MOV $res, 0 @ compareAndSwapI\n"
12823 "L:" %}
12824 ins_encode %{
12825 Register newval = $newval$$Register;
12826 Register oldval = $oldval$$Register;
12827 Register res = $res$$Register;
12828 Address addr($mem_ptr$$Register, 0);
12829 Label L;
12831 __ cmpxchg32(newval, addr, oldval);
12832 __ move(res, AT);
12833 %}
12834 ins_pipe( long_memory_op );
12835 %}
12837 //FIXME:
12838 instruct compareAndSwapP( mRegI res, mRegP mem_ptr, s2_RegP oldval, mRegP newval) %{
12839 match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval)));
12840 effect(KILL oldval);
12841 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapP\n\t"
12842 "MOV $res, AT @ compareAndSwapP\n\t"
12843 "L:" %}
12844 ins_encode %{
12845 Register newval = $newval$$Register;
12846 Register oldval = $oldval$$Register;
12847 Register res = $res$$Register;
12848 Address addr($mem_ptr$$Register, 0);
12849 Label L;
12851 __ cmpxchg(newval, addr, oldval);
12852 __ move(res, AT);
12853 %}
12854 ins_pipe( long_memory_op );
12855 %}
12857 instruct compareAndSwapN( mRegI res, mRegP mem_ptr, t2_RegN oldval, mRegN newval) %{
12858 match(Set res (CompareAndSwapN mem_ptr (Binary oldval newval)));
12859 effect(KILL oldval);
12860 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapN\n\t"
12861 "MOV $res, AT @ compareAndSwapN\n\t"
12862 "L:" %}
12863 ins_encode %{
12864 Register newval = $newval$$Register;
12865 Register oldval = $oldval$$Register;
12866 Register res = $res$$Register;
12867 Address addr($mem_ptr$$Register, 0);
12868 Label L;
12870 /* 2013/7/19 Jin: cmpxchg32 is implemented with ll/sc, which will do sign extension.
12871 * Thus, we should extend oldval's sign for correct comparision.
12872 */
12873 __ sll(oldval, oldval, 0);
12875 __ cmpxchg32(newval, addr, oldval);
12876 __ move(res, AT);
12877 %}
12878 ins_pipe( long_memory_op );
12879 %}
12881 //----------Max and Min--------------------------------------------------------
12882 // Min Instructions
12883 ////
12884 // *** Min and Max using the conditional move are slower than the
12885 // *** branch version on a Pentium III.
12886 // // Conditional move for min
12887 //instruct cmovI_reg_lt( eRegI op2, eRegI op1, eFlagsReg cr ) %{
12888 // effect( USE_DEF op2, USE op1, USE cr );
12889 // format %{ "CMOVlt $op2,$op1\t! min" %}
12890 // opcode(0x4C,0x0F);
12891 // ins_encode( OpcS, OpcP, RegReg( op2, op1 ) );
12892 // ins_pipe( pipe_cmov_reg );
12893 //%}
12894 //
12895 //// Min Register with Register (P6 version)
12896 //instruct minI_eReg_p6( eRegI op1, eRegI op2 ) %{
12897 // predicate(VM_Version::supports_cmov() );
12898 // match(Set op2 (MinI op1 op2));
12899 // ins_cost(200);
12900 // expand %{
12901 // eFlagsReg cr;
12902 // compI_eReg(cr,op1,op2);
12903 // cmovI_reg_lt(op2,op1,cr);
12904 // %}
12905 //%}
12907 // Min Register with Register (generic version)
12908 instruct minI_Reg_Reg(mRegI dst, mRegI src) %{
12909 match(Set dst (MinI dst src));
12910 //effect(KILL flags);
12911 ins_cost(80);
12913 format %{ "MIN $dst, $src @minI_Reg_Reg" %}
12914 ins_encode %{
12915 Register dst = $dst$$Register;
12916 Register src = $src$$Register;
12918 __ slt(AT, src, dst);
12919 __ movn(dst, src, AT);
12921 %}
12923 ins_pipe( pipe_slow );
12924 %}
12926 // Max Register with Register
12927 // *** Min and Max using the conditional move are slower than the
12928 // *** branch version on a Pentium III.
12929 // // Conditional move for max
12930 //instruct cmovI_reg_gt( eRegI op2, eRegI op1, eFlagsReg cr ) %{
12931 // effect( USE_DEF op2, USE op1, USE cr );
12932 // format %{ "CMOVgt $op2,$op1\t! max" %}
12933 // opcode(0x4F,0x0F);
12934 // ins_encode( OpcS, OpcP, RegReg( op2, op1 ) );
12935 // ins_pipe( pipe_cmov_reg );
12936 //%}
12937 //
12938 // // Max Register with Register (P6 version)
12939 //instruct maxI_eReg_p6( eRegI op1, eRegI op2 ) %{
12940 // predicate(VM_Version::supports_cmov() );
12941 // match(Set op2 (MaxI op1 op2));
12942 // ins_cost(200);
12943 // expand %{
12944 // eFlagsReg cr;
12945 // compI_eReg(cr,op1,op2);
12946 // cmovI_reg_gt(op2,op1,cr);
12947 // %}
12948 //%}
12950 // Max Register with Register (generic version)
12951 instruct maxI_Reg_Reg(mRegI dst, mRegI src) %{
12952 match(Set dst (MaxI dst src));
12953 ins_cost(80);
12955 format %{ "MAX $dst, $src @maxI_Reg_Reg" %}
12957 ins_encode %{
12958 Register dst = $dst$$Register;
12959 Register src = $src$$Register;
12961 __ slt(AT, dst, src);
12962 __ movn(dst, src, AT);
12964 %}
12966 ins_pipe( pipe_slow );
12967 %}
12969 instruct maxI_Reg_zero(mRegI dst, immI0 zero) %{
12970 match(Set dst (MaxI dst zero));
12971 ins_cost(50);
12973 format %{ "MAX $dst, 0 @maxI_Reg_zero" %}
12975 ins_encode %{
12976 Register dst = $dst$$Register;
12978 __ slt(AT, dst, R0);
12979 __ movn(dst, R0, AT);
12981 %}
12983 ins_pipe( pipe_slow );
12984 %}
12986 instruct zerox_long_reg_reg(mRegL dst, mRegL src, immL_32bits mask)
12987 %{
12988 match(Set dst (AndL src mask));
12990 format %{ "movl $dst, $src\t# zero-extend long @ zerox_long_reg_reg" %}
12991 ins_encode %{
12992 Register dst = $dst$$Register;
12993 Register src = $src$$Register;
12995 __ dext(dst, src, 0, 32);
12996 %}
12997 ins_pipe(ialu_regI_regI);
12998 %}
13000 instruct combine_i2l(mRegL dst, mRegI src1, immL_32bits mask, mRegI src2, immI_32 shift32)
13001 %{
13002 match(Set dst (OrL (AndL (ConvI2L src1) mask) (LShiftL (ConvI2L src2) shift32)));
13004 format %{ "combine_i2l $dst, $src2(H), $src1(L) @ combine_i2l" %}
13005 ins_encode %{
13006 Register dst = $dst$$Register;
13007 Register src1 = $src1$$Register;
13008 Register src2 = $src2$$Register;
13010 if (src1 == dst) {
13011 __ dinsu(dst, src2, 32, 32);
13012 } else if (src2 == dst) {
13013 __ dsll32(dst, dst, 0);
13014 __ dins(dst, src1, 0, 32);
13015 } else {
13016 __ dext(dst, src1, 0, 32);
13017 __ dinsu(dst, src2, 32, 32);
13018 }
13019 %}
13020 ins_pipe(ialu_regI_regI);
13021 %}
13023 // Zero-extend convert int to long
13024 instruct convI2L_reg_reg_zex(mRegL dst, mRegI src, immL_32bits mask)
13025 %{
13026 match(Set dst (AndL (ConvI2L src) mask));
13028 format %{ "movl $dst, $src\t# i2l zero-extend @ convI2L_reg_reg_zex" %}
13029 ins_encode %{
13030 Register dst = $dst$$Register;
13031 Register src = $src$$Register;
13033 __ dext(dst, src, 0, 32);
13034 %}
13035 ins_pipe(ialu_regI_regI);
13036 %}
13038 instruct convL2I2L_reg_reg_zex(mRegL dst, mRegL src, immL_32bits mask)
13039 %{
13040 match(Set dst (AndL (ConvI2L (ConvL2I src)) mask));
13042 format %{ "movl $dst, $src\t# i2l zero-extend @ convL2I2L_reg_reg_zex" %}
13043 ins_encode %{
13044 Register dst = $dst$$Register;
13045 Register src = $src$$Register;
13047 __ dext(dst, src, 0, 32);
13048 %}
13049 ins_pipe(ialu_regI_regI);
13050 %}
13052 // Match loading integer and casting it to unsigned int in long register.
13053 // LoadI + ConvI2L + AndL 0xffffffff.
13054 instruct loadUI2L_rmask(mRegL dst, memory mem, immL_32bits mask) %{
13055 match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
13057 format %{ "lwu $dst, $mem \t// zero-extend to long @ loadUI2L_rmask" %}
13058 ins_encode (load_N_enc(dst, mem));
13059 ins_pipe(ialu_loadI);
13060 %}
13062 instruct loadUI2L_lmask(mRegL dst, memory mem, immL_32bits mask) %{
13063 match(Set dst (AndL mask (ConvI2L (LoadI mem))));
13065 format %{ "lwu $dst, $mem \t// zero-extend to long @ loadUI2L_lmask" %}
13066 ins_encode (load_N_enc(dst, mem));
13067 ins_pipe(ialu_loadI);
13068 %}
13071 // ============================================================================
13072 // Safepoint Instruction
13073 instruct safePoint_poll(mRegP poll) %{
13074 match(SafePoint poll);
13075 effect(USE poll);
13077 ins_cost(125);
13078 format %{ "Safepoint @ [$poll] : poll for GC @ safePoint_poll" %}
13080 ins_encode %{
13081 Register poll_reg = $poll$$Register;
13083 __ block_comment("Safepoint:");
13084 __ relocate(relocInfo::poll_type);
13085 __ lw(AT, poll_reg, 0);
13086 %}
13088 ins_pipe( ialu_storeI );
13089 %}
13091 //----------Arithmetic Conversion Instructions---------------------------------
13093 instruct roundFloat_nop(regF dst)
13094 %{
13095 match(Set dst (RoundFloat dst));
13097 ins_cost(0);
13098 ins_encode();
13099 ins_pipe(empty);
13100 %}
13102 instruct roundDouble_nop(regD dst)
13103 %{
13104 match(Set dst (RoundDouble dst));
13106 ins_cost(0);
13107 ins_encode();
13108 ins_pipe(empty);
13109 %}
13111 //---------- Zeros Count Instructions ------------------------------------------
13112 // CountLeadingZerosINode CountTrailingZerosINode
13113 instruct countLeadingZerosI(mRegI dst, mRegI src) %{
13114 predicate(UseCountLeadingZerosInstruction);
13115 match(Set dst (CountLeadingZerosI src));
13117 format %{ "clz $dst, $src\t# count leading zeros (int)" %}
13118 ins_encode %{
13119 __ clz($dst$$Register, $src$$Register);
13120 %}
13121 ins_pipe( ialu_regL_regL );
13122 %}
13124 instruct countLeadingZerosL(mRegI dst, mRegL src) %{
13125 predicate(UseCountLeadingZerosInstruction);
13126 match(Set dst (CountLeadingZerosL src));
13128 format %{ "dclz $dst, $src\t# count leading zeros (long)" %}
13129 ins_encode %{
13130 __ dclz($dst$$Register, $src$$Register);
13131 %}
13132 ins_pipe( ialu_regL_regL );
13133 %}
13135 instruct countTrailingZerosI(mRegI dst, mRegI src) %{
13136 predicate(UseCountTrailingZerosInstruction);
13137 match(Set dst (CountTrailingZerosI src));
13139 format %{ "ctz $dst, $src\t# count trailing zeros (int)" %}
13140 ins_encode %{
13141 // ctz and dctz is gs instructions.
13142 __ ctz($dst$$Register, $src$$Register);
13143 %}
13144 ins_pipe( ialu_regL_regL );
13145 %}
13147 instruct countTrailingZerosL(mRegI dst, mRegL src) %{
13148 predicate(UseCountTrailingZerosInstruction);
13149 match(Set dst (CountTrailingZerosL src));
13151 format %{ "dcto $dst, $src\t# count trailing zeros (long)" %}
13152 ins_encode %{
13153 __ dctz($dst$$Register, $src$$Register);
13154 %}
13155 ins_pipe( ialu_regL_regL );
13156 %}
13158 // ====================VECTOR INSTRUCTIONS=====================================
13160 // Load vectors (8 bytes long)
13161 instruct loadV8(vecD dst, memory mem) %{
13162 predicate(n->as_LoadVector()->memory_size() == 8);
13163 match(Set dst (LoadVector mem));
13164 ins_cost(125);
13165 format %{ "load $dst, $mem\t! load vector (8 bytes)" %}
13166 ins_encode(load_D_enc(dst, mem));
13167 ins_pipe( fpu_loadF );
13168 %}
13170 // Store vectors (8 bytes long)
13171 instruct storeV8(memory mem, vecD src) %{
13172 predicate(n->as_StoreVector()->memory_size() == 8);
13173 match(Set mem (StoreVector mem src));
13174 ins_cost(145);
13175 format %{ "store $mem, $src\t! store vector (8 bytes)" %}
13176 ins_encode(store_D_reg_enc(mem, src));
13177 ins_pipe( fpu_storeF );
13178 %}
13180 instruct Repl8B(vecD dst, mRegI src) %{
13181 predicate(n->as_Vector()->length() == 8);
13182 match(Set dst (ReplicateB src));
13183 format %{ "replv_ob AT, $src\n\t"
13184 "dmtc1 AT, $dst\t! replicate8B" %}
13185 ins_encode %{
13186 __ replv_ob(AT, $src$$Register);
13187 __ dmtc1(AT, $dst$$FloatRegister);
13188 %}
13189 ins_pipe( pipe_mtc1 );
13190 %}
13192 instruct Repl8B_imm(vecD dst, immI con) %{
13193 predicate(n->as_Vector()->length() == 8);
13194 match(Set dst (ReplicateB con));
13195 format %{ "repl_ob AT, [$con]\n\t"
13196 "dmtc1 AT, $dst,0x00\t! replicate8B($con)" %}
13197 ins_encode %{
13198 int val = $con$$constant;
13199 __ repl_ob(AT, val);
13200 __ dmtc1(AT, $dst$$FloatRegister);
13201 %}
13202 ins_pipe( pipe_mtc1 );
13203 %}
13205 instruct Repl8B_zero(vecD dst, immI0 zero) %{
13206 predicate(n->as_Vector()->length() == 8);
13207 match(Set dst (ReplicateB zero));
13208 format %{ "dmtc1 R0, $dst\t! replicate8B zero" %}
13209 ins_encode %{
13210 __ dmtc1(R0, $dst$$FloatRegister);
13211 %}
13212 ins_pipe( pipe_mtc1 );
13213 %}
13215 instruct Repl8B_M1(vecD dst, immI_M1 M1) %{
13216 predicate(n->as_Vector()->length() == 8);
13217 match(Set dst (ReplicateB M1));
13218 format %{ "dmtc1 -1, $dst\t! replicate8B -1" %}
13219 ins_encode %{
13220 __ nor(AT, R0, R0);
13221 __ dmtc1(AT, $dst$$FloatRegister);
13222 %}
13223 ins_pipe( pipe_mtc1 );
13224 %}
13226 instruct Repl4S(vecD dst, mRegI src) %{
13227 predicate(n->as_Vector()->length() == 4);
13228 match(Set dst (ReplicateS src));
13229 format %{ "replv_qh AT, $src\n\t"
13230 "dmtc1 AT, $dst\t! replicate4S" %}
13231 ins_encode %{
13232 __ replv_qh(AT, $src$$Register);
13233 __ dmtc1(AT, $dst$$FloatRegister);
13234 %}
13235 ins_pipe( pipe_mtc1 );
13236 %}
13238 instruct Repl4S_imm(vecD dst, immI con) %{
13239 predicate(n->as_Vector()->length() == 4);
13240 match(Set dst (ReplicateS con));
13241 format %{ "replv_qh AT, [$con]\n\t"
13242 "dmtc1 AT, $dst\t! replicate4S($con)" %}
13243 ins_encode %{
13244 int val = $con$$constant;
13245 if ( Assembler::is_simm(val, 10)) {
13246 //repl_qh supports 10 bits immediate
13247 __ repl_qh(AT, val);
13248 } else {
13249 __ li32(AT, val);
13250 __ replv_qh(AT, AT);
13251 }
13252 __ dmtc1(AT, $dst$$FloatRegister);
13253 %}
13254 ins_pipe( pipe_mtc1 );
13255 %}
13257 instruct Repl4S_zero(vecD dst, immI0 zero) %{
13258 predicate(n->as_Vector()->length() == 4);
13259 match(Set dst (ReplicateS zero));
13260 format %{ "dmtc1 R0, $dst\t! replicate4S zero" %}
13261 ins_encode %{
13262 __ dmtc1(R0, $dst$$FloatRegister);
13263 %}
13264 ins_pipe( pipe_mtc1 );
13265 %}
13267 instruct Repl4S_M1(vecD dst, immI_M1 M1) %{
13268 predicate(n->as_Vector()->length() == 4);
13269 match(Set dst (ReplicateS M1));
13270 format %{ "dmtc1 -1, $dst\t! replicate4S -1" %}
13271 ins_encode %{
13272 __ nor(AT, R0, R0);
13273 __ dmtc1(AT, $dst$$FloatRegister);
13274 %}
13275 ins_pipe( pipe_mtc1 );
13276 %}
13278 // Replicate integer (4 byte) scalar to be vector
13279 instruct Repl2I(vecD dst, mRegI src) %{
13280 predicate(n->as_Vector()->length() == 2);
13281 match(Set dst (ReplicateI src));
13282 format %{ "dins AT, $src, 0, 32\n\t"
13283 "dinsu AT, $src, 32, 32\n\t"
13284 "dmtc1 AT, $dst\t! replicate2I" %}
13285 ins_encode %{
13286 __ dins(AT, $src$$Register, 0, 32);
13287 __ dinsu(AT, $src$$Register, 32, 32);
13288 __ dmtc1(AT, $dst$$FloatRegister);
13289 %}
13290 ins_pipe( pipe_mtc1 );
13291 %}
13293 // Replicate integer (4 byte) scalar immediate to be vector by loading from const table.
13294 instruct Repl2I_imm(vecD dst, immI con, mA7RegI tmp) %{
13295 predicate(n->as_Vector()->length() == 2);
13296 match(Set dst (ReplicateI con));
13297 effect(KILL tmp);
13298 format %{ "li32 AT, [$con], 32\n\t"
13299 "replv_pw AT, AT\n\t"
13300 "dmtc1 AT, $dst\t! replicate2I($con)" %}
13301 ins_encode %{
13302 int val = $con$$constant;
13303 __ li32(AT, val);
13304 __ replv_pw(AT, AT);
13305 __ dmtc1(AT, $dst$$FloatRegister);
13306 %}
13307 ins_pipe( pipe_mtc1 );
13308 %}
13310 // Replicate integer (4 byte) scalar zero to be vector
13311 instruct Repl2I_zero(vecD dst, immI0 zero) %{
13312 predicate(n->as_Vector()->length() == 2);
13313 match(Set dst (ReplicateI zero));
13314 format %{ "dmtc1 R0, $dst\t! replicate2I zero" %}
13315 ins_encode %{
13316 __ dmtc1(R0, $dst$$FloatRegister);
13317 %}
13318 ins_pipe( pipe_mtc1 );
13319 %}
13321 // Replicate integer (4 byte) scalar -1 to be vector
13322 instruct Repl2I_M1(vecD dst, immI_M1 M1) %{
13323 predicate(n->as_Vector()->length() == 2);
13324 match(Set dst (ReplicateI M1));
13325 format %{ "dmtc1 -1, $dst\t! replicate2I -1, use AT" %}
13326 ins_encode %{
13327 __ nor(AT, R0, R0);
13328 __ dmtc1(AT, $dst$$FloatRegister);
13329 %}
13330 ins_pipe( pipe_mtc1 );
13331 %}
13333 // Replicate float (4 byte) scalar to be vector
13334 instruct Repl2F(vecD dst, regF src) %{
13335 predicate(n->as_Vector()->length() == 2);
13336 match(Set dst (ReplicateF src));
13337 format %{ "cvt.ps $dst, $src, $src\t! replicate2F" %}
13338 ins_encode %{
13339 __ cvt_ps_s($dst$$FloatRegister, $src$$FloatRegister, $src$$FloatRegister);
13340 %}
13341 ins_pipe( pipe_slow );
13342 %}
13344 // Replicate float (4 byte) scalar zero to be vector
13345 instruct Repl2F_zero(vecD dst, immF0 zero) %{
13346 predicate(n->as_Vector()->length() == 2);
13347 match(Set dst (ReplicateF zero));
13348 format %{ "dmtc1 R0, $dst\t! replicate2F zero" %}
13349 ins_encode %{
13350 __ dmtc1(R0, $dst$$FloatRegister);
13351 %}
13352 ins_pipe( pipe_mtc1 );
13353 %}
13356 // ====================VECTOR ARITHMETIC=======================================
13358 // --------------------------------- ADD --------------------------------------
13360 // Floats vector add
13361 instruct vadd2F(vecD dst, vecD src) %{
13362 predicate(n->as_Vector()->length() == 2);
13363 match(Set dst (AddVF dst src));
13364 format %{ "add.ps $dst,$src\t! add packed2F" %}
13365 ins_encode %{
13366 __ add_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
13367 %}
13368 ins_pipe( pipe_slow );
13369 %}
13371 instruct vadd2F3(vecD dst, vecD src1, vecD src2) %{
13372 predicate(n->as_Vector()->length() == 2);
13373 match(Set dst (AddVF src1 src2));
13374 format %{ "add.ps $dst,$src1,$src2\t! add packed2F" %}
13375 ins_encode %{
13376 __ add_ps($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
13377 %}
13378 ins_pipe( fpu_regF_regF );
13379 %}
13381 // --------------------------------- SUB --------------------------------------
13383 // Floats vector sub
13384 instruct vsub2F(vecD dst, vecD src) %{
13385 predicate(n->as_Vector()->length() == 2);
13386 match(Set dst (SubVF dst src));
13387 format %{ "sub.ps $dst,$src\t! sub packed2F" %}
13388 ins_encode %{
13389 __ sub_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
13390 %}
13391 ins_pipe( fpu_regF_regF );
13392 %}
13394 // --------------------------------- MUL --------------------------------------
13396 // Floats vector mul
13397 instruct vmul2F(vecD dst, vecD src) %{
13398 predicate(n->as_Vector()->length() == 2);
13399 match(Set dst (MulVF dst src));
13400 format %{ "mul.ps $dst, $src\t! mul packed2F" %}
13401 ins_encode %{
13402 __ mul_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
13403 %}
13404 ins_pipe( fpu_regF_regF );
13405 %}
13407 instruct vmul2F3(vecD dst, vecD src1, vecD src2) %{
13408 predicate(n->as_Vector()->length() == 2);
13409 match(Set dst (MulVF src1 src2));
13410 format %{ "mul.ps $dst, $src1, $src2\t! mul packed2F" %}
13411 ins_encode %{
13412 __ mul_ps($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
13413 %}
13414 ins_pipe( fpu_regF_regF );
13415 %}
13417 // --------------------------------- DIV --------------------------------------
13418 // MIPS do not have div.ps
13421 //----------PEEPHOLE RULES-----------------------------------------------------
13422 // These must follow all instruction definitions as they use the names
13423 // defined in the instructions definitions.
13424 //
13425 // peepmatch ( root_instr_name [preceeding_instruction]* );
13426 //
13427 // peepconstraint %{
13428 // (instruction_number.operand_name relational_op instruction_number.operand_name
13429 // [, ...] );
13430 // // instruction numbers are zero-based using left to right order in peepmatch
13431 //
13432 // peepreplace ( instr_name ( [instruction_number.operand_name]* ) );
13433 // // provide an instruction_number.operand_name for each operand that appears
13434 // // in the replacement instruction's match rule
13435 //
13436 // ---------VM FLAGS---------------------------------------------------------
13437 //
13438 // All peephole optimizations can be turned off using -XX:-OptoPeephole
13439 //
13440 // Each peephole rule is given an identifying number starting with zero and
13441 // increasing by one in the order seen by the parser. An individual peephole
13442 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
13443 // on the command-line.
13444 //
13445 // ---------CURRENT LIMITATIONS----------------------------------------------
13446 //
13447 // Only match adjacent instructions in same basic block
13448 // Only equality constraints
13449 // Only constraints between operands, not (0.dest_reg == EAX_enc)
13450 // Only one replacement instruction
13451 //
13452 // ---------EXAMPLE----------------------------------------------------------
13453 //
13454 // // pertinent parts of existing instructions in architecture description
13455 // instruct movI(eRegI dst, eRegI src) %{
13456 // match(Set dst (CopyI src));
13457 // %}
13458 //
13459 // instruct incI_eReg(eRegI dst, immI1 src, eFlagsReg cr) %{
13460 // match(Set dst (AddI dst src));
13461 // effect(KILL cr);
13462 // %}
13463 //
13464 // // Change (inc mov) to lea
13465 // peephole %{
13466 // // increment preceeded by register-register move
13467 // peepmatch ( incI_eReg movI );
13468 // // require that the destination register of the increment
13469 // // match the destination register of the move
13470 // peepconstraint ( 0.dst == 1.dst );
13471 // // construct a replacement instruction that sets
13472 // // the destination to ( move's source register + one )
13473 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13474 // %}
13475 //
13476 // Implementation no longer uses movX instructions since
13477 // machine-independent system no longer uses CopyX nodes.
13478 //
13479 // peephole %{
13480 // peepmatch ( incI_eReg movI );
13481 // peepconstraint ( 0.dst == 1.dst );
13482 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13483 // %}
13484 //
13485 // peephole %{
13486 // peepmatch ( decI_eReg movI );
13487 // peepconstraint ( 0.dst == 1.dst );
13488 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13489 // %}
13490 //
13491 // peephole %{
13492 // peepmatch ( addI_eReg_imm movI );
13493 // peepconstraint ( 0.dst == 1.dst );
13494 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13495 // %}
13496 //
13497 // peephole %{
13498 // peepmatch ( addP_eReg_imm movP );
13499 // peepconstraint ( 0.dst == 1.dst );
13500 // peepreplace ( leaP_eReg_immI( 0.dst 1.src 0.src ) );
13501 // %}
13503 // // Change load of spilled value to only a spill
13504 // instruct storeI(memory mem, eRegI src) %{
13505 // match(Set mem (StoreI mem src));
13506 // %}
13507 //
13508 // instruct loadI(eRegI dst, memory mem) %{
13509 // match(Set dst (LoadI mem));
13510 // %}
13511 //
13512 //peephole %{
13513 // peepmatch ( loadI storeI );
13514 // peepconstraint ( 1.src == 0.dst, 1.mem == 0.mem );
13515 // peepreplace ( storeI( 1.mem 1.mem 1.src ) );
13516 //%}
13518 //----------SMARTSPILL RULES---------------------------------------------------
13519 // These must follow all instruction definitions as they use the names
13520 // defined in the instructions definitions.