Tue, 28 Feb 2017 09:53:43 -0500
[C2] Remove storeImmN & storeImmNKlass in mips_64.ad
1 //
2 // Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
3 // Copyright (c) 2015, 2016, Loongson Technology. All rights reserved.
4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 //
6 // This code is free software; you can redistribute it and/or modify it
7 // under the terms of the GNU General Public License version 2 only, as
8 // published by the Free Software Foundation.
9 //
10 // This code is distributed in the hope that it will be useful, but WITHOUT
11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 // version 2 for more details (a copy is included in the LICENSE file that
14 // accompanied this code).
15 //
16 // You should have received a copy of the GNU General Public License version
17 // 2 along with this work; if not, write to the Free Software Foundation,
18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 //
20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 // or visit www.oracle.com if you need additional information or have any
22 // questions.
23 //
24 //
26 // GodSon3 Architecture Description File
28 //----------REGISTER DEFINITION BLOCK------------------------------------------
29 // This information is used by the matcher and the register allocator to
30 // describe individual registers and classes of registers within the target
31 // archtecture.
33 // format:
34 // reg_def name (call convention, c-call convention, ideal type, encoding);
35 // call convention :
36 // NS = No-Save
37 // SOC = Save-On-Call
38 // SOE = Save-On-Entry
39 // AS = Always-Save
40 // ideal type :
41 // see opto/opcodes.hpp for more info
42 // reg_class name (reg, ...);
43 // alloc_class name (reg, ...);
44 register %{
46 // General Registers
47 // Integer Registers
48 reg_def R0 ( NS, NS, Op_RegI, 0, VMRegImpl::Bad());
49 reg_def AT ( NS, NS, Op_RegI, 1, AT->as_VMReg());
50 reg_def AT_H ( NS, NS, Op_RegI, 1, AT->as_VMReg()->next());
51 reg_def V0 (SOC, SOC, Op_RegI, 2, V0->as_VMReg());
52 reg_def V0_H (SOC, SOC, Op_RegI, 2, V0->as_VMReg()->next());
53 reg_def V1 (SOC, SOC, Op_RegI, 3, V1->as_VMReg());
54 reg_def V1_H (SOC, SOC, Op_RegI, 3, V1->as_VMReg()->next());
55 reg_def A0 (SOC, SOC, Op_RegI, 4, A0->as_VMReg());
56 reg_def A0_H (SOC, SOC, Op_RegI, 4, A0->as_VMReg()->next());
57 reg_def A1 (SOC, SOC, Op_RegI, 5, A1->as_VMReg());
58 reg_def A1_H (SOC, SOC, Op_RegI, 5, A1->as_VMReg()->next());
59 reg_def A2 (SOC, SOC, Op_RegI, 6, A2->as_VMReg());
60 reg_def A2_H (SOC, SOC, Op_RegI, 6, A2->as_VMReg()->next());
61 reg_def A3 (SOC, SOC, Op_RegI, 7, A3->as_VMReg());
62 reg_def A3_H (SOC, SOC, Op_RegI, 7, A3->as_VMReg()->next());
63 reg_def A4 (SOC, SOC, Op_RegI, 8, A4->as_VMReg());
64 reg_def A4_H (SOC, SOC, Op_RegI, 8, A4->as_VMReg()->next());
65 reg_def A5 (SOC, SOC, Op_RegI, 9, A5->as_VMReg());
66 reg_def A5_H (SOC, SOC, Op_RegI, 9, A5->as_VMReg()->next());
67 reg_def A6 (SOC, SOC, Op_RegI, 10, A6->as_VMReg());
68 reg_def A6_H (SOC, SOC, Op_RegI, 10, A6->as_VMReg()->next());
69 reg_def A7 (SOC, SOC, Op_RegI, 11, A7->as_VMReg());
70 reg_def A7_H (SOC, SOC, Op_RegI, 11, A7->as_VMReg()->next());
71 reg_def T0 (SOC, SOC, Op_RegI, 12, T0->as_VMReg());
72 reg_def T0_H (SOC, SOC, Op_RegI, 12, T0->as_VMReg()->next());
73 reg_def T1 (SOC, SOC, Op_RegI, 13, T1->as_VMReg());
74 reg_def T1_H (SOC, SOC, Op_RegI, 13, T1->as_VMReg()->next());
75 reg_def T2 (SOC, SOC, Op_RegI, 14, T2->as_VMReg());
76 reg_def T2_H (SOC, SOC, Op_RegI, 14, T2->as_VMReg()->next());
77 reg_def T3 (SOC, SOC, Op_RegI, 15, T3->as_VMReg());
78 reg_def T3_H (SOC, SOC, Op_RegI, 15, T3->as_VMReg()->next());
79 reg_def S0 (SOC, SOE, Op_RegI, 16, S0->as_VMReg());
80 reg_def S0_H (SOC, SOE, Op_RegI, 16, S0->as_VMReg()->next());
81 reg_def S1 (SOC, SOE, Op_RegI, 17, S1->as_VMReg());
82 reg_def S1_H (SOC, SOE, Op_RegI, 17, S1->as_VMReg()->next());
83 reg_def S2 (SOC, SOE, Op_RegI, 18, S2->as_VMReg());
84 reg_def S2_H (SOC, SOE, Op_RegI, 18, S2->as_VMReg()->next());
85 reg_def S3 (SOC, SOE, Op_RegI, 19, S3->as_VMReg());
86 reg_def S3_H (SOC, SOE, Op_RegI, 19, S3->as_VMReg()->next());
87 reg_def S4 (SOC, SOE, Op_RegI, 20, S4->as_VMReg());
88 reg_def S4_H (SOC, SOE, Op_RegI, 20, S4->as_VMReg()->next());
89 reg_def S5 (SOC, SOE, Op_RegI, 21, S5->as_VMReg());
90 reg_def S5_H (SOC, SOE, Op_RegI, 21, S5->as_VMReg()->next());
91 reg_def S6 (SOC, SOE, Op_RegI, 22, S6->as_VMReg());
92 reg_def S6_H (SOC, SOE, Op_RegI, 22, S6->as_VMReg()->next());
93 reg_def S7 (SOC, SOE, Op_RegI, 23, S7->as_VMReg());
94 reg_def S7_H (SOC, SOE, Op_RegI, 23, S7->as_VMReg()->next());
95 reg_def T8 (SOC, SOC, Op_RegI, 24, T8->as_VMReg());
96 reg_def T8_H (SOC, SOC, Op_RegI, 24, T8->as_VMReg()->next());
97 reg_def T9 (SOC, SOC, Op_RegI, 25, T9->as_VMReg());
98 reg_def T9_H (SOC, SOC, Op_RegI, 25, T9->as_VMReg()->next());
100 // Special Registers
101 reg_def K0 ( NS, NS, Op_RegI, 26, K0->as_VMReg());
102 reg_def K1 ( NS, NS, Op_RegI, 27, K1->as_VMReg());
103 reg_def GP ( NS, NS, Op_RegI, 28, GP->as_VMReg());
104 reg_def GP_H ( NS, NS, Op_RegI, 28, GP->as_VMReg()->next());
105 reg_def SP ( NS, NS, Op_RegI, 29, SP->as_VMReg());
106 reg_def SP_H ( NS, NS, Op_RegI, 29, SP->as_VMReg()->next());
107 reg_def FP ( NS, NS, Op_RegI, 30, FP->as_VMReg());
108 reg_def FP_H ( NS, NS, Op_RegI, 30, FP->as_VMReg()->next());
109 reg_def RA ( NS, NS, Op_RegI, 31, RA->as_VMReg());
110 reg_def RA_H ( NS, NS, Op_RegI, 31, RA->as_VMReg()->next());
112 // Floating registers.
113 reg_def F0 ( SOC, SOC, Op_RegF, 0, F0->as_VMReg());
114 reg_def F0_H ( SOC, SOC, Op_RegF, 0, F0->as_VMReg()->next());
115 reg_def F1 ( SOC, SOC, Op_RegF, 1, F1->as_VMReg());
116 reg_def F1_H ( SOC, SOC, Op_RegF, 1, F1->as_VMReg()->next());
117 reg_def F2 ( SOC, SOC, Op_RegF, 2, F2->as_VMReg());
118 reg_def F2_H ( SOC, SOC, Op_RegF, 2, F2->as_VMReg()->next());
119 reg_def F3 ( SOC, SOC, Op_RegF, 3, F3->as_VMReg());
120 reg_def F3_H ( SOC, SOC, Op_RegF, 3, F3->as_VMReg()->next());
121 reg_def F4 ( SOC, SOC, Op_RegF, 4, F4->as_VMReg());
122 reg_def F4_H ( SOC, SOC, Op_RegF, 4, F4->as_VMReg()->next());
123 reg_def F5 ( SOC, SOC, Op_RegF, 5, F5->as_VMReg());
124 reg_def F5_H ( SOC, SOC, Op_RegF, 5, F5->as_VMReg()->next());
125 reg_def F6 ( SOC, SOC, Op_RegF, 6, F6->as_VMReg());
126 reg_def F6_H ( SOC, SOC, Op_RegF, 6, F6->as_VMReg()->next());
127 reg_def F7 ( SOC, SOC, Op_RegF, 7, F7->as_VMReg());
128 reg_def F7_H ( SOC, SOC, Op_RegF, 7, F7->as_VMReg()->next());
129 reg_def F8 ( SOC, SOC, Op_RegF, 8, F8->as_VMReg());
130 reg_def F8_H ( SOC, SOC, Op_RegF, 8, F8->as_VMReg()->next());
131 reg_def F9 ( SOC, SOC, Op_RegF, 9, F9->as_VMReg());
132 reg_def F9_H ( SOC, SOC, Op_RegF, 9, F9->as_VMReg()->next());
133 reg_def F10 ( SOC, SOC, Op_RegF, 10, F10->as_VMReg());
134 reg_def F10_H ( SOC, SOC, Op_RegF, 10, F10->as_VMReg()->next());
135 reg_def F11 ( SOC, SOC, Op_RegF, 11, F11->as_VMReg());
136 reg_def F11_H ( SOC, SOC, Op_RegF, 11, F11->as_VMReg()->next());
137 reg_def F12 ( SOC, SOC, Op_RegF, 12, F12->as_VMReg());
138 reg_def F12_H ( SOC, SOC, Op_RegF, 12, F12->as_VMReg()->next());
139 reg_def F13 ( SOC, SOC, Op_RegF, 13, F13->as_VMReg());
140 reg_def F13_H ( SOC, SOC, Op_RegF, 13, F13->as_VMReg()->next());
141 reg_def F14 ( SOC, SOC, Op_RegF, 14, F14->as_VMReg());
142 reg_def F14_H ( SOC, SOC, Op_RegF, 14, F14->as_VMReg()->next());
143 reg_def F15 ( SOC, SOC, Op_RegF, 15, F15->as_VMReg());
144 reg_def F15_H ( SOC, SOC, Op_RegF, 15, F15->as_VMReg()->next());
145 reg_def F16 ( SOC, SOC, Op_RegF, 16, F16->as_VMReg());
146 reg_def F16_H ( SOC, SOC, Op_RegF, 16, F16->as_VMReg()->next());
147 reg_def F17 ( SOC, SOC, Op_RegF, 17, F17->as_VMReg());
148 reg_def F17_H ( SOC, SOC, Op_RegF, 17, F17->as_VMReg()->next());
149 reg_def F18 ( SOC, SOC, Op_RegF, 18, F18->as_VMReg());
150 reg_def F18_H ( SOC, SOC, Op_RegF, 18, F18->as_VMReg()->next());
151 reg_def F19 ( SOC, SOC, Op_RegF, 19, F19->as_VMReg());
152 reg_def F19_H ( SOC, SOC, Op_RegF, 19, F19->as_VMReg()->next());
153 reg_def F20 ( SOC, SOC, Op_RegF, 20, F20->as_VMReg());
154 reg_def F20_H ( SOC, SOC, Op_RegF, 20, F20->as_VMReg()->next());
155 reg_def F21 ( SOC, SOC, Op_RegF, 21, F21->as_VMReg());
156 reg_def F21_H ( SOC, SOC, Op_RegF, 21, F21->as_VMReg()->next());
157 reg_def F22 ( SOC, SOC, Op_RegF, 22, F22->as_VMReg());
158 reg_def F22_H ( SOC, SOC, Op_RegF, 22, F22->as_VMReg()->next());
159 reg_def F23 ( SOC, SOC, Op_RegF, 23, F23->as_VMReg());
160 reg_def F23_H ( SOC, SOC, Op_RegF, 23, F23->as_VMReg()->next());
161 reg_def F24 ( SOC, SOC, Op_RegF, 24, F24->as_VMReg());
162 reg_def F24_H ( SOC, SOC, Op_RegF, 24, F24->as_VMReg()->next());
163 reg_def F25 ( SOC, SOC, Op_RegF, 25, F25->as_VMReg());
164 reg_def F25_H ( SOC, SOC, Op_RegF, 25, F25->as_VMReg()->next());
165 reg_def F26 ( SOC, SOC, Op_RegF, 26, F26->as_VMReg());
166 reg_def F26_H ( SOC, SOC, Op_RegF, 26, F26->as_VMReg()->next());
167 reg_def F27 ( SOC, SOC, Op_RegF, 27, F27->as_VMReg());
168 reg_def F27_H ( SOC, SOC, Op_RegF, 27, F27->as_VMReg()->next());
169 reg_def F28 ( SOC, SOC, Op_RegF, 28, F28->as_VMReg());
170 reg_def F28_H ( SOC, SOC, Op_RegF, 28, F28->as_VMReg()->next());
171 reg_def F29 ( SOC, SOC, Op_RegF, 29, F29->as_VMReg());
172 reg_def F29_H ( SOC, SOC, Op_RegF, 29, F29->as_VMReg()->next());
173 reg_def F30 ( SOC, SOC, Op_RegF, 30, F30->as_VMReg());
174 reg_def F30_H ( SOC, SOC, Op_RegF, 30, F30->as_VMReg()->next());
175 reg_def F31 ( SOC, SOC, Op_RegF, 31, F31->as_VMReg());
176 reg_def F31_H ( SOC, SOC, Op_RegF, 31, F31->as_VMReg()->next());
179 // ----------------------------
180 // Special Registers
181 // Condition Codes Flag Registers
182 reg_def MIPS_FLAG (SOC, SOC, Op_RegFlags, 1, as_Register(1)->as_VMReg());
183 //S6 is used for get_thread(S6)
184 //S5 is uesd for heapbase of compressed oop
185 alloc_class chunk0(
186 S7, S7_H,
187 S0, S0_H,
188 S1, S1_H,
189 S2, S2_H,
190 S4, S4_H,
191 S5, S5_H,
192 S6, S6_H,
193 S3, S3_H,
194 T2, T2_H,
195 T3, T3_H,
196 T8, T8_H,
197 T9, T9_H,
198 T1, T1_H, // inline_cache_reg
199 V1, V1_H,
200 A7, A7_H,
201 A6, A6_H,
202 A5, A5_H,
203 A4, A4_H,
204 V0, V0_H,
205 A3, A3_H,
206 A2, A2_H,
207 A1, A1_H,
208 A0, A0_H,
209 T0, T0_H,
210 GP, GP_H
211 RA, RA_H,
212 SP, SP_H, // stack_pointer
213 FP, FP_H // frame_pointer
214 );
216 alloc_class chunk1( F0, F0_H,
217 F1, F1_H,
218 F2, F2_H,
219 F3, F3_H,
220 F4, F4_H,
221 F5, F5_H,
222 F6, F6_H,
223 F7, F7_H,
224 F8, F8_H,
225 F9, F9_H,
226 F10, F10_H,
227 F11, F11_H,
228 F20, F20_H,
229 F21, F21_H,
230 F22, F22_H,
231 F23, F23_H,
232 F24, F24_H,
233 F25, F25_H,
234 F26, F26_H,
235 F27, F27_H,
236 F28, F28_H,
237 F19, F19_H,
238 F18, F18_H,
239 F17, F17_H,
240 F16, F16_H,
241 F15, F15_H,
242 F14, F14_H,
243 F13, F13_H,
244 F12, F12_H,
245 F29, F29_H,
246 F30, F30_H,
247 F31, F31_H);
249 alloc_class chunk2(MIPS_FLAG);
251 reg_class s_reg( S0, S1, S2, S3, S4, S5, S6, S7 );
252 reg_class s0_reg( S0 );
253 reg_class s1_reg( S1 );
254 reg_class s2_reg( S2 );
255 reg_class s3_reg( S3 );
256 reg_class s4_reg( S4 );
257 reg_class s5_reg( S5 );
258 reg_class s6_reg( S6 );
259 reg_class s7_reg( S7 );
261 reg_class t_reg( T0, T1, T2, T3, T8, T9 );
262 reg_class t0_reg( T0 );
263 reg_class t1_reg( T1 );
264 reg_class t2_reg( T2 );
265 reg_class t3_reg( T3 );
266 reg_class t8_reg( T8 );
267 reg_class t9_reg( T9 );
269 reg_class a_reg( A0, A1, A2, A3, A4, A5, A6, A7 );
270 reg_class a0_reg( A0 );
271 reg_class a1_reg( A1 );
272 reg_class a2_reg( A2 );
273 reg_class a3_reg( A3 );
274 reg_class a4_reg( A4 );
275 reg_class a5_reg( A5 );
276 reg_class a6_reg( A6 );
277 reg_class a7_reg( A7 );
279 reg_class v0_reg( V0 );
280 reg_class v1_reg( V1 );
282 reg_class sp_reg( SP, SP_H );
283 reg_class fp_reg( FP, FP_H );
285 reg_class mips_flags(MIPS_FLAG);
287 reg_class v0_long_reg( V0, V0_H );
288 reg_class v1_long_reg( V1, V1_H );
289 reg_class a0_long_reg( A0, A0_H );
290 reg_class a1_long_reg( A1, A1_H );
291 reg_class a2_long_reg( A2, A2_H );
292 reg_class a3_long_reg( A3, A3_H );
293 reg_class a4_long_reg( A4, A4_H );
294 reg_class a5_long_reg( A5, A5_H );
295 reg_class a6_long_reg( A6, A6_H );
296 reg_class a7_long_reg( A7, A7_H );
297 reg_class t0_long_reg( T0, T0_H );
298 reg_class t1_long_reg( T1, T1_H );
299 reg_class t2_long_reg( T2, T2_H );
300 reg_class t3_long_reg( T3, T3_H );
301 reg_class t8_long_reg( T8, T8_H );
302 reg_class t9_long_reg( T9, T9_H );
303 reg_class s0_long_reg( S0, S0_H );
304 reg_class s1_long_reg( S1, S1_H );
305 reg_class s2_long_reg( S2, S2_H );
306 reg_class s3_long_reg( S3, S3_H );
307 reg_class s4_long_reg( S4, S4_H );
308 reg_class s5_long_reg( S5, S5_H );
309 reg_class s6_long_reg( S6, S6_H );
310 reg_class s7_long_reg( S7, S7_H );
312 reg_class int_reg( S7, S0, S1, S2, S4, S3, T8, T2, T3, T1, V1, A7, A6, A5, A4, V0, A3, A2, A1, A0, T0 );
314 reg_class no_Ax_int_reg( S7, S0, S1, S2, S4, S3, T8, T2, T3, T1, V1, V0, T0 );
316 reg_class p_reg(
317 S7, S7_H,
318 S0, S0_H,
319 S1, S1_H,
320 S2, S2_H,
321 S4, S4_H,
322 S3, S3_H,
323 T8, T8_H,
324 T2, T2_H,
325 T3, T3_H,
326 T1, T1_H,
327 A7, A7_H,
328 A6, A6_H,
329 A5, A5_H,
330 A4, A4_H,
331 A3, A3_H,
332 A2, A2_H,
333 A1, A1_H,
334 A0, A0_H,
335 T0, T0_H
336 );
338 reg_class no_T8_p_reg(
339 S7, S7_H,
340 S0, S0_H,
341 S1, S1_H,
342 S2, S2_H,
343 S4, S4_H,
344 S3, S3_H,
345 T2, T2_H,
346 T3, T3_H,
347 T1, T1_H,
348 A7, A7_H,
349 A6, A6_H,
350 A5, A5_H,
351 A4, A4_H,
352 A3, A3_H,
353 A2, A2_H,
354 A1, A1_H,
355 A0, A0_H,
356 T0, T0_H
357 );
359 reg_class long_reg(
360 S7, S7_H,
361 S0, S0_H,
362 S1, S1_H,
363 S2, S2_H,
364 S4, S4_H,
365 S3, S3_H,
366 T8, T8_H,
367 T2, T2_H,
368 T3, T3_H,
369 T1, T1_H,
370 A7, A7_H,
371 A6, A6_H,
372 A5, A5_H,
373 A4, A4_H,
374 A3, A3_H,
375 A2, A2_H,
376 A1, A1_H,
377 A0, A0_H,
378 T0, T0_H
379 );
382 // Floating point registers.
383 // 2012/8/23 Fu: F30/F31 are used as temporary registers in D2I
384 // 2016/12/1 aoqi: F31 are not used as temporary registers in D2I
385 reg_class flt_reg( F0, F1, F2, F3, F4, F5, F6, F7, F8, F9, F10, F11, F12, F13, F14, F15, F16, F17 F18, F19, F20, F21, F22, F23, F24, F25, F26, F27, F28, F29, F31);
386 reg_class dbl_reg( F0, F0_H,
387 F1, F1_H,
388 F2, F2_H,
389 F3, F3_H,
390 F4, F4_H,
391 F5, F5_H,
392 F6, F6_H,
393 F7, F7_H,
394 F8, F8_H,
395 F9, F9_H,
396 F10, F10_H,
397 F11, F11_H,
398 F12, F12_H,
399 F13, F13_H,
400 F14, F14_H,
401 F15, F15_H,
402 F16, F16_H,
403 F17, F17_H,
404 F18, F18_H,
405 F19, F19_H,
406 F20, F20_H,
407 F21, F21_H,
408 F22, F22_H,
409 F23, F23_H,
410 F24, F24_H,
411 F25, F25_H,
412 F26, F26_H,
413 F27, F27_H,
414 F28, F28_H,
415 F29, F29_H,
416 F31, F31_H);
418 reg_class flt_arg0( F12 );
419 reg_class dbl_arg0( F12, F12_H );
420 reg_class dbl_arg1( F14, F14_H );
422 %}
424 //----------DEFINITION BLOCK---------------------------------------------------
425 // Define name --> value mappings to inform the ADLC of an integer valued name
426 // Current support includes integer values in the range [0, 0x7FFFFFFF]
427 // Format:
428 // int_def <name> ( <int_value>, <expression>);
429 // Generated Code in ad_<arch>.hpp
430 // #define <name> (<expression>)
431 // // value == <int_value>
432 // Generated code in ad_<arch>.cpp adlc_verification()
433 // assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
434 //
435 definitions %{
436 int_def DEFAULT_COST ( 100, 100);
437 int_def HUGE_COST (1000000, 1000000);
439 // Memory refs are twice as expensive as run-of-the-mill.
440 int_def MEMORY_REF_COST ( 200, DEFAULT_COST * 2);
442 // Branches are even more expensive.
443 int_def BRANCH_COST ( 300, DEFAULT_COST * 3);
444 // we use jr instruction to construct call, so more expensive
445 // by yjl 2/28/2006
446 int_def CALL_COST ( 500, DEFAULT_COST * 5);
447 /*
448 int_def EQUAL ( 1, 1 );
449 int_def NOT_EQUAL ( 2, 2 );
450 int_def GREATER ( 3, 3 );
451 int_def GREATER_EQUAL ( 4, 4 );
452 int_def LESS ( 5, 5 );
453 int_def LESS_EQUAL ( 6, 6 );
454 */
455 %}
459 //----------SOURCE BLOCK-------------------------------------------------------
460 // This is a block of C++ code which provides values, functions, and
461 // definitions necessary in the rest of the architecture description
463 source_hpp %{
464 // Header information of the source block.
465 // Method declarations/definitions which are used outside
466 // the ad-scope can conveniently be defined here.
467 //
468 // To keep related declarations/definitions/uses close together,
469 // we switch between source %{ }% and source_hpp %{ }% freely as needed.
471 class CallStubImpl {
473 //--------------------------------------------------------------
474 //---< Used for optimization in Compile::shorten_branches >---
475 //--------------------------------------------------------------
477 public:
478 // Size of call trampoline stub.
479 static uint size_call_trampoline() {
480 return 0; // no call trampolines on this platform
481 }
483 // number of relocations needed by a call trampoline stub
484 static uint reloc_call_trampoline() {
485 return 0; // no call trampolines on this platform
486 }
487 };
489 class HandlerImpl {
491 public:
493 static int emit_exception_handler(CodeBuffer &cbuf);
494 static int emit_deopt_handler(CodeBuffer& cbuf);
496 static uint size_exception_handler() {
497 // NativeCall instruction size is the same as NativeJump.
498 // exception handler starts out as jump and can be patched to
499 // a call be deoptimization. (4932387)
500 // Note that this value is also credited (in output.cpp) to
501 // the size of the code section.
502 // return NativeJump::instruction_size;
503 int size = NativeCall::instruction_size;
504 return round_to(size, 16);
505 }
507 #ifdef _LP64
508 static uint size_deopt_handler() {
509 int size = NativeCall::instruction_size;
510 return round_to(size, 16);
511 }
512 #else
513 static uint size_deopt_handler() {
514 // NativeCall instruction size is the same as NativeJump.
515 // exception handler starts out as jump and can be patched to
516 // a call be deoptimization. (4932387)
517 // Note that this value is also credited (in output.cpp) to
518 // the size of the code section.
519 return 5 + NativeJump::instruction_size; // pushl(); jmp;
520 }
521 #endif
522 };
524 %} // end source_hpp
526 source %{
528 #define NO_INDEX 0
529 #define RELOC_IMM64 Assembler::imm_operand
530 #define RELOC_DISP32 Assembler::disp32_operand
533 #define __ _masm.
536 // Emit exception handler code.
537 // Stuff framesize into a register and call a VM stub routine.
538 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf) {
539 /*
540 // Note that the code buffer's insts_mark is always relative to insts.
541 // That's why we must use the macroassembler to generate a handler.
542 MacroAssembler _masm(&cbuf);
543 address base = __ start_a_stub(size_exception_handler());
544 if (base == NULL) return 0; // CodeBuffer::expand failed
545 int offset = __ offset();
546 __ jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
547 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
548 __ end_a_stub();
549 return offset;
550 */
551 // Note that the code buffer's insts_mark is always relative to insts.
552 // That's why we must use the macroassembler to generate a handler.
553 MacroAssembler _masm(&cbuf);
554 address base =
555 __ start_a_stub(size_exception_handler());
556 if (base == NULL) return 0; // CodeBuffer::expand failed
557 int offset = __ offset();
559 __ block_comment("; emit_exception_handler");
561 /* 2012/9/25 FIXME Jin: According to X86, we should use direct jumpt.
562 * * However, this will trigger an assert after the 40th method:
563 * *
564 * * 39 b java.lang.Throwable::<init> (25 bytes)
565 * * --- ns java.lang.Throwable::fillInStackTrace
566 * * 40 !b java.net.URLClassLoader::findClass (29 bytes)
567 * * /vm/opto/runtime.cpp, 900 , assert(caller.is_compiled_frame(),"must be")
568 * * 40 made not entrant (2) java.net.URLClassLoader::findClass (29 bytes)
569 * *
570 * * If we change from JR to JALR, the assert will disappear, but WebClient will
571 * * fail after the 403th method with unknown reason.
572 * */
573 __ li48(T9, (long)OptoRuntime::exception_blob()->entry_point());
574 __ jr(T9);
575 __ delayed()->nop();
576 __ align(16);
577 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
578 __ end_a_stub();
579 return offset;
580 }
582 // Emit deopt handler code.
583 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) {
584 /*
585 // Note that the code buffer's insts_mark is always relative to insts.
586 // That's why we must use the macroassembler to generate a handler.
587 MacroAssembler _masm(&cbuf);
588 address base = __ start_a_stub(size_deopt_handler());
589 if (base == NULL) return 0; // CodeBuffer::expand failed
590 int offset = __ offset();
592 #ifdef _LP64
593 address the_pc = (address) __ pc();
594 Label next;
595 // push a "the_pc" on the stack without destroying any registers
596 // as they all may be live.
598 // push address of "next"
599 __ call(next, relocInfo::none); // reloc none is fine since it is a disp32
600 __ bind(next);
601 // adjust it so it matches "the_pc"
602 __ subptr(Address(rsp, 0), __ offset() - offset);
603 #else
604 InternalAddress here(__ pc());
605 __ pushptr(here.addr());
606 #endif
608 __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
609 assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
610 __ end_a_stub();
611 return offset;
612 */
613 // Note that the code buffer's insts_mark is always relative to insts.
614 // That's why we must use the macroassembler to generate a handler.
615 MacroAssembler _masm(&cbuf);
616 address base =
617 __ start_a_stub(size_deopt_handler());
619 // FIXME
620 if (base == NULL) return 0; // CodeBuffer::expand failed
621 int offset = __ offset();
623 __ block_comment("; emit_deopt_handler");
625 cbuf.set_insts_mark();
626 __ relocate(relocInfo::runtime_call_type);
628 __ li48(T9, (long)SharedRuntime::deopt_blob()->unpack());
629 __ jalr(T9);
630 __ delayed()->nop();
631 __ align(16);
632 assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
633 __ end_a_stub();
634 return offset;
635 }
638 const bool Matcher::match_rule_supported(int opcode) {
639 if (!has_match_rule(opcode))
640 return false;
642 switch (opcode) {
643 //Op_CountLeadingZerosI Op_CountLeadingZerosL can be deleted, all MIPS CPUs support clz & dclz.
644 case Op_CountLeadingZerosI:
645 case Op_CountLeadingZerosL:
646 if (!UseCountLeadingZerosInstruction)
647 return false;
648 break;
649 case Op_CountTrailingZerosI:
650 case Op_CountTrailingZerosL:
651 if (!UseCountTrailingZerosInstruction)
652 return false;
653 break;
654 }
656 return true; // Per default match rules are supported.
657 }
659 //FIXME
660 // emit call stub, compiled java to interpreter
661 void emit_java_to_interp(CodeBuffer &cbuf ) {
662 // Stub is fixed up when the corresponding call is converted from calling
663 // compiled code to calling interpreted code.
664 // mov rbx,0
665 // jmp -1
667 address mark = cbuf.insts_mark(); // get mark within main instrs section
669 // Note that the code buffer's insts_mark is always relative to insts.
670 // That's why we must use the macroassembler to generate a stub.
671 MacroAssembler _masm(&cbuf);
673 address base =
674 __ start_a_stub(Compile::MAX_stubs_size);
675 if (base == NULL) return; // CodeBuffer::expand failed
676 // static stub relocation stores the instruction address of the call
678 __ relocate(static_stub_Relocation::spec(mark), 0);
680 /* 2012/10/29 Jin: Rmethod contains methodOop, it should be relocated for GC */
681 /*
682 int oop_index = __ oop_recorder()->allocate_index(NULL);
683 RelocationHolder rspec = oop_Relocation::spec(oop_index);
684 __ relocate(rspec);
685 */
687 // static stub relocation also tags the methodOop in the code-stream.
688 __ li48(S3, (long)0);
689 // This is recognized as unresolved by relocs/nativeInst/ic code
691 __ relocate(relocInfo::runtime_call_type);
693 cbuf.set_insts_mark();
694 address call_pc = (address)-1;
695 __ li48(AT, (long)call_pc);
696 __ jr(AT);
697 __ nop();
698 __ align(16);
699 __ end_a_stub();
700 // Update current stubs pointer and restore code_end.
701 }
703 // size of call stub, compiled java to interpretor
704 uint size_java_to_interp() {
705 int size = 4 * 4 + NativeCall::instruction_size; // sizeof(li48) + NativeCall::instruction_size
706 return round_to(size, 16);
707 }
709 // relocation entries for call stub, compiled java to interpreter
710 uint reloc_java_to_interp() {
711 return 16; // in emit_java_to_interp + in Java_Static_Call
712 }
714 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
715 if( Assembler::is_simm16(offset) ) return true;
716 else
717 {
718 assert(false, "Not implemented yet !" );
719 Unimplemented();
720 }
721 }
724 // No additional cost for CMOVL.
725 const int Matcher::long_cmove_cost() { return 0; }
727 // No CMOVF/CMOVD with SSE2
728 const int Matcher::float_cmove_cost() { return ConditionalMoveLimit; }
730 // Does the CPU require late expand (see block.cpp for description of late expand)?
731 const bool Matcher::require_postalloc_expand = false;
733 // Should the Matcher clone shifts on addressing modes, expecting them
734 // to be subsumed into complex addressing expressions or compute them
735 // into registers? True for Intel but false for most RISCs
736 const bool Matcher::clone_shift_expressions = false;
738 // Do we need to mask the count passed to shift instructions or does
739 // the cpu only look at the lower 5/6 bits anyway?
740 const bool Matcher::need_masked_shift_count = false;
742 bool Matcher::narrow_oop_use_complex_address() {
743 NOT_LP64(ShouldNotCallThis());
744 assert(UseCompressedOops, "only for compressed oops code");
745 return false;
746 }
748 bool Matcher::narrow_klass_use_complex_address() {
749 NOT_LP64(ShouldNotCallThis());
750 assert(UseCompressedClassPointers, "only for compressed klass code");
751 return false;
752 }
754 // This is UltraSparc specific, true just means we have fast l2f conversion
755 const bool Matcher::convL2FSupported(void) {
756 return true;
757 }
759 // Max vector size in bytes. 0 if not supported.
760 const int Matcher::vector_width_in_bytes(BasicType bt) {
761 assert(MaxVectorSize == 8, "");
762 return 8;
763 }
765 // Vector ideal reg
766 const int Matcher::vector_ideal_reg(int size) {
767 assert(MaxVectorSize == 8, "");
768 switch(size) {
769 case 8: return Op_VecD;
770 }
771 ShouldNotReachHere();
772 return 0;
773 }
775 // Only lowest bits of xmm reg are used for vector shift count.
776 const int Matcher::vector_shift_count_ideal_reg(int size) {
777 fatal("vector shift is not supported");
778 return Node::NotAMachineReg;
779 }
781 // Limits on vector size (number of elements) loaded into vector.
782 const int Matcher::max_vector_size(const BasicType bt) {
783 assert(is_java_primitive(bt), "only primitive type vectors");
784 return vector_width_in_bytes(bt)/type2aelembytes(bt);
785 }
787 const int Matcher::min_vector_size(const BasicType bt) {
788 return max_vector_size(bt); // Same as max.
789 }
791 // MIPS supports misaligned vectors store/load? FIXME
792 const bool Matcher::misaligned_vectors_ok() {
793 return false;
794 //return !AlignVector; // can be changed by flag
795 }
797 // Register for DIVI projection of divmodI
798 RegMask Matcher::divI_proj_mask() {
799 ShouldNotReachHere();
800 return RegMask();
801 }
803 // Register for MODI projection of divmodI
804 RegMask Matcher::modI_proj_mask() {
805 ShouldNotReachHere();
806 return RegMask();
807 }
809 // Register for DIVL projection of divmodL
810 RegMask Matcher::divL_proj_mask() {
811 ShouldNotReachHere();
812 return RegMask();
813 }
815 int Matcher::regnum_to_fpu_offset(int regnum) {
816 return regnum - 32; // The FP registers are in the second chunk
817 }
820 const bool Matcher::isSimpleConstant64(jlong value) {
821 // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
822 return true;
823 }
826 // Return whether or not this register is ever used as an argument. This
827 // function is used on startup to build the trampoline stubs in generateOptoStub.
828 // Registers not mentioned will be killed by the VM call in the trampoline, and
829 // arguments in those registers not be available to the callee.
830 bool Matcher::can_be_java_arg( int reg ) {
831 /* Refer to: [sharedRuntime_mips_64.cpp] SharedRuntime::java_calling_convention() */
832 if ( reg == T0_num || reg == T0_H_num
833 || reg == A0_num || reg == A0_H_num
834 || reg == A1_num || reg == A1_H_num
835 || reg == A2_num || reg == A2_H_num
836 || reg == A3_num || reg == A3_H_num
837 || reg == A4_num || reg == A4_H_num
838 || reg == A5_num || reg == A5_H_num
839 || reg == A6_num || reg == A6_H_num
840 || reg == A7_num || reg == A7_H_num )
841 return true;
843 if ( reg == F12_num || reg == F12_H_num
844 || reg == F13_num || reg == F13_H_num
845 || reg == F14_num || reg == F14_H_num
846 || reg == F15_num || reg == F15_H_num
847 || reg == F16_num || reg == F16_H_num
848 || reg == F17_num || reg == F17_H_num
849 || reg == F18_num || reg == F18_H_num
850 || reg == F19_num || reg == F19_H_num )
851 return true;
853 return false;
854 }
856 bool Matcher::is_spillable_arg( int reg ) {
857 return can_be_java_arg(reg);
858 }
860 bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) {
861 return false;
862 }
864 // Register for MODL projection of divmodL
865 RegMask Matcher::modL_proj_mask() {
866 ShouldNotReachHere();
867 return RegMask();
868 }
870 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
871 return FP_REG_mask();
872 }
874 // MIPS doesn't support AES intrinsics
875 const bool Matcher::pass_original_key_for_aes() {
876 return false;
877 }
879 // The address of the call instruction needs to be 16-byte aligned to
880 // ensure that it does not span a cache line so that it can be patched.
882 int CallStaticJavaDirectNode::compute_padding(int current_offset) const {
883 //lui
884 //ori
885 //dsll
886 //ori
888 //jalr
889 //nop
891 return round_to(current_offset, alignment_required()) - current_offset;
892 }
894 // The address of the call instruction needs to be 16-byte aligned to
895 // ensure that it does not span a cache line so that it can be patched.
896 int CallDynamicJavaDirectNode::compute_padding(int current_offset) const {
897 //li64 <--- skip
899 //lui
900 //ori
901 //dsll
902 //ori
904 //jalr
905 //nop
907 current_offset += 4 * 6; // skip li64
908 return round_to(current_offset, alignment_required()) - current_offset;
909 }
911 int CallLeafNoFPDirectNode::compute_padding(int current_offset) const {
912 //lui
913 //ori
914 //dsll
915 //ori
917 //jalr
918 //nop
920 return round_to(current_offset, alignment_required()) - current_offset;
921 }
923 int CallLeafDirectNode::compute_padding(int current_offset) const {
924 //lui
925 //ori
926 //dsll
927 //ori
929 //jalr
930 //nop
932 return round_to(current_offset, alignment_required()) - current_offset;
933 }
935 int CallRuntimeDirectNode::compute_padding(int current_offset) const {
936 //lui
937 //ori
938 //dsll
939 //ori
941 //jalr
942 //nop
944 return round_to(current_offset, alignment_required()) - current_offset;
945 }
947 // If CPU can load and store mis-aligned doubles directly then no fixup is
948 // needed. Else we split the double into 2 integer pieces and move it
949 // piece-by-piece. Only happens when passing doubles into C code as the
950 // Java calling convention forces doubles to be aligned.
951 const bool Matcher::misaligned_doubles_ok = false;
952 // Do floats take an entire double register or just half?
953 //const bool Matcher::float_in_double = true;
954 bool Matcher::float_in_double() { return false; }
955 // Threshold size for cleararray.
956 const int Matcher::init_array_short_size = 8 * BytesPerLong;
957 // Do ints take an entire long register or just half?
958 const bool Matcher::int_in_long = true;
959 // Is it better to copy float constants, or load them directly from memory?
960 // Intel can load a float constant from a direct address, requiring no
961 // extra registers. Most RISCs will have to materialize an address into a
962 // register first, so they would do better to copy the constant from stack.
963 const bool Matcher::rematerialize_float_constants = false;
964 // Advertise here if the CPU requires explicit rounding operations
965 // to implement the UseStrictFP mode.
966 const bool Matcher::strict_fp_requires_explicit_rounding = false;
967 // The ecx parameter to rep stos for the ClearArray node is in dwords.
968 const bool Matcher::init_array_count_is_in_bytes = false;
971 // Indicate if the safepoint node needs the polling page as an input.
972 // Since MIPS doesn't have absolute addressing, it needs.
973 bool SafePointNode::needs_polling_address_input() {
974 return true;
975 }
977 // !!!!! Special hack to get all type of calls to specify the byte offset
978 // from the start of the call to the point where the return address
979 // will point.
980 int MachCallStaticJavaNode::ret_addr_offset() {
981 assert(NativeCall::instruction_size == 24, "in MachCallStaticJavaNode::ret_addr_offset");
982 //The value ought to be 16 bytes.
983 //lui
984 //ori
985 //dsll
986 //ori
987 //jalr
988 //nop
989 return NativeCall::instruction_size;
990 }
992 int MachCallDynamicJavaNode::ret_addr_offset() {
993 /* 2012/9/10 Jin: must be kept in sync with Java_Dynamic_Call */
995 // return NativeCall::instruction_size;
996 assert(NativeCall::instruction_size == 24, "in MachCallDynamicJavaNode::ret_addr_offset");
997 //The value ought to be 4 + 16 bytes.
998 //lui IC_Klass,
999 //ori IC_Klass,
1000 //dsll IC_Klass
1001 //ori IC_Klass
1002 //lui T9
1003 //ori T9
1004 //dsll T9
1005 //ori T9
1006 //jalr T9
1007 //nop
1008 return 6 * 4 + NativeCall::instruction_size;
1010 }
1012 /*
1013 // EMIT_OPCODE()
1014 void emit_opcode(CodeBuffer &cbuf, int code) {
1015 *(cbuf.code_end()) = (unsigned char)code;
1016 cbuf.set_code_end(cbuf.code_end() + 1);
1017 }
1018 */
1020 void emit_d32_reloc(CodeBuffer &cbuf, int d32, relocInfo::relocType reloc,
1021 int format) {
1022 cbuf.relocate(cbuf.insts_mark(), reloc, format);
1023 cbuf.insts()->emit_int32(d32);
1024 }
1026 //=============================================================================
1028 // Figure out which register class each belongs in: rc_int, rc_float, rc_stack
1029 enum RC { rc_bad, rc_int, rc_float, rc_stack };
1030 static enum RC rc_class( OptoReg::Name reg ) {
1031 if( !OptoReg::is_valid(reg) ) return rc_bad;
1032 if (OptoReg::is_stack(reg)) return rc_stack;
1033 VMReg r = OptoReg::as_VMReg(reg);
1034 if (r->is_Register()) return rc_int;
1035 assert(r->is_FloatRegister(), "must be");
1036 return rc_float;
1037 }
1039 uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream* st ) const {
1040 // Get registers to move
1041 OptoReg::Name src_second = ra_->get_reg_second(in(1));
1042 OptoReg::Name src_first = ra_->get_reg_first(in(1));
1043 OptoReg::Name dst_second = ra_->get_reg_second(this );
1044 OptoReg::Name dst_first = ra_->get_reg_first(this );
1046 enum RC src_second_rc = rc_class(src_second);
1047 enum RC src_first_rc = rc_class(src_first);
1048 enum RC dst_second_rc = rc_class(dst_second);
1049 enum RC dst_first_rc = rc_class(dst_first);
1051 assert(OptoReg::is_valid(src_first) && OptoReg::is_valid(dst_first), "must move at least 1 register" );
1053 // Generate spill code!
1054 int size = 0;
1056 if( src_first == dst_first && src_second == dst_second )
1057 return 0; // Self copy, no move
1059 if (src_first_rc == rc_stack) {
1060 // mem ->
1061 if (dst_first_rc == rc_stack) {
1062 // mem -> mem
1063 assert(src_second != dst_first, "overlap");
1064 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1065 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1066 // 64-bit
1067 int src_offset = ra_->reg2offset(src_first);
1068 int dst_offset = ra_->reg2offset(dst_first);
1069 if (cbuf) {
1070 MacroAssembler _masm(cbuf);
1071 __ ld(AT, Address(SP, src_offset));
1072 __ sd(AT, Address(SP, dst_offset));
1073 #ifndef PRODUCT
1074 } else {
1075 if(!do_size){
1076 if (size != 0) st->print("\n\t");
1077 st->print("ld AT, [SP + #%d]\t# 64-bit mem-mem spill 1\n\t"
1078 "sd AT, [SP + #%d]",
1079 src_offset, dst_offset);
1080 }
1081 #endif
1082 }
1083 size += 8;
1084 } else {
1085 // 32-bit
1086 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1087 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1088 // No pushl/popl, so:
1089 int src_offset = ra_->reg2offset(src_first);
1090 int dst_offset = ra_->reg2offset(dst_first);
1091 if (cbuf) {
1092 MacroAssembler _masm(cbuf);
1093 __ lw(AT, Address(SP, src_offset));
1094 __ sw(AT, Address(SP, dst_offset));
1095 #ifndef PRODUCT
1096 } else {
1097 if(!do_size){
1098 if (size != 0) st->print("\n\t");
1099 st->print("lw AT, [SP + #%d] spill 2\n\t"
1100 "sw AT, [SP + #%d]\n\t",
1101 src_offset, dst_offset);
1102 }
1103 #endif
1104 }
1105 size += 8;
1106 }
1107 return size;
1108 } else if (dst_first_rc == rc_int) {
1109 // mem -> gpr
1110 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1111 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1112 // 64-bit
1113 int offset = ra_->reg2offset(src_first);
1114 if (cbuf) {
1115 MacroAssembler _masm(cbuf);
1116 __ ld(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1117 #ifndef PRODUCT
1118 } else {
1119 if(!do_size){
1120 if (size != 0) st->print("\n\t");
1121 st->print("ld %s, [SP + #%d]\t# spill 3",
1122 Matcher::regName[dst_first],
1123 offset);
1124 }
1125 #endif
1126 }
1127 size += 4;
1128 } else {
1129 // 32-bit
1130 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1131 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1132 int offset = ra_->reg2offset(src_first);
1133 if (cbuf) {
1134 MacroAssembler _masm(cbuf);
1135 if (this->ideal_reg() == Op_RegI)
1136 __ lw(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1137 else
1138 __ lwu(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1139 #ifndef PRODUCT
1140 } else {
1141 if(!do_size){
1142 if (size != 0) st->print("\n\t");
1143 if (this->ideal_reg() == Op_RegI)
1144 st->print("lw %s, [SP + #%d]\t# spill 4",
1145 Matcher::regName[dst_first],
1146 offset);
1147 else
1148 st->print("lwu %s, [SP + #%d]\t# spill 5",
1149 Matcher::regName[dst_first],
1150 offset);
1151 }
1152 #endif
1153 }
1154 size += 4;
1155 }
1156 return size;
1157 } else if (dst_first_rc == rc_float) {
1158 // mem-> xmm
1159 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1160 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1161 // 64-bit
1162 int offset = ra_->reg2offset(src_first);
1163 if (cbuf) {
1164 MacroAssembler _masm(cbuf);
1165 __ ldc1( as_FloatRegister(Matcher::_regEncode[dst_first]), Address(SP, offset));
1166 #ifndef PRODUCT
1167 } else {
1168 if(!do_size){
1169 if (size != 0) st->print("\n\t");
1170 st->print("ldc1 %s, [SP + #%d]\t# spill 6",
1171 Matcher::regName[dst_first],
1172 offset);
1173 }
1174 #endif
1175 }
1176 size += 4;
1177 } else {
1178 // 32-bit
1179 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1180 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1181 int offset = ra_->reg2offset(src_first);
1182 if (cbuf) {
1183 MacroAssembler _masm(cbuf);
1184 __ lwc1( as_FloatRegister(Matcher::_regEncode[dst_first]), Address(SP, offset));
1185 #ifndef PRODUCT
1186 } else {
1187 if(!do_size){
1188 if (size != 0) st->print("\n\t");
1189 st->print("lwc1 %s, [SP + #%d]\t# spill 7",
1190 Matcher::regName[dst_first],
1191 offset);
1192 }
1193 #endif
1194 }
1195 size += 4;
1196 }
1197 return size;
1198 }
1199 } else if (src_first_rc == rc_int) {
1200 // gpr ->
1201 if (dst_first_rc == rc_stack) {
1202 // gpr -> mem
1203 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1204 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1205 // 64-bit
1206 int offset = ra_->reg2offset(dst_first);
1207 if (cbuf) {
1208 MacroAssembler _masm(cbuf);
1209 __ sd(as_Register(Matcher::_regEncode[src_first]), Address(SP, offset));
1210 #ifndef PRODUCT
1211 } else {
1212 if(!do_size){
1213 if (size != 0) st->print("\n\t");
1214 st->print("sd %s, [SP + #%d] # spill 8",
1215 Matcher::regName[src_first],
1216 offset);
1217 }
1218 #endif
1219 }
1220 size += 4;
1221 } else {
1222 // 32-bit
1223 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1224 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1225 int offset = ra_->reg2offset(dst_first);
1226 if (cbuf) {
1227 MacroAssembler _masm(cbuf);
1228 __ sw(as_Register(Matcher::_regEncode[src_first]), Address(SP, offset));
1229 #ifndef PRODUCT
1230 } else {
1231 if(!do_size){
1232 if (size != 0) st->print("\n\t");
1233 st->print("sw %s, [SP + #%d]\t# spill 9",
1234 Matcher::regName[src_first], offset);
1235 }
1236 #endif
1237 }
1238 size += 4;
1239 }
1240 return size;
1241 } else if (dst_first_rc == rc_int) {
1242 // gpr -> gpr
1243 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1244 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1245 // 64-bit
1246 if (cbuf) {
1247 MacroAssembler _masm(cbuf);
1248 __ move(as_Register(Matcher::_regEncode[dst_first]),
1249 as_Register(Matcher::_regEncode[src_first]));
1250 #ifndef PRODUCT
1251 } else {
1252 if(!do_size){
1253 if (size != 0) st->print("\n\t");
1254 st->print("move(64bit) %s <-- %s\t# spill 10",
1255 Matcher::regName[dst_first],
1256 Matcher::regName[src_first]);
1257 }
1258 #endif
1259 }
1260 size += 4;
1261 return size;
1262 } else {
1263 // 32-bit
1264 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1265 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1266 if (cbuf) {
1267 MacroAssembler _masm(cbuf);
1268 if (this->ideal_reg() == Op_RegI)
1269 __ move_u32(as_Register(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first]));
1270 else
1271 __ daddu(as_Register(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first]), R0);
1273 #ifndef PRODUCT
1274 } else {
1275 if(!do_size){
1276 if (size != 0) st->print("\n\t");
1277 st->print("move(32-bit) %s <-- %s\t# spill 11",
1278 Matcher::regName[dst_first],
1279 Matcher::regName[src_first]);
1280 }
1281 #endif
1282 }
1283 size += 4;
1284 return size;
1285 }
1286 } else if (dst_first_rc == rc_float) {
1287 // gpr -> xmm
1288 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1289 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1290 // 64-bit
1291 if (cbuf) {
1292 MacroAssembler _masm(cbuf);
1293 __ dmtc1(as_Register(Matcher::_regEncode[src_first]), as_FloatRegister(Matcher::_regEncode[dst_first]));
1294 #ifndef PRODUCT
1295 } else {
1296 if(!do_size){
1297 if (size != 0) st->print("\n\t");
1298 st->print("dmtc1 %s, %s\t# spill 12",
1299 Matcher::regName[dst_first],
1300 Matcher::regName[src_first]);
1301 }
1302 #endif
1303 }
1304 size += 4;
1305 } else {
1306 // 32-bit
1307 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1308 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1309 if (cbuf) {
1310 MacroAssembler _masm(cbuf);
1311 __ mtc1( as_Register(Matcher::_regEncode[src_first]), as_FloatRegister(Matcher::_regEncode[dst_first]) );
1312 #ifndef PRODUCT
1313 } else {
1314 if(!do_size){
1315 if (size != 0) st->print("\n\t");
1316 st->print("mtc1 %s, %s\t# spill 13",
1317 Matcher::regName[dst_first],
1318 Matcher::regName[src_first]);
1319 }
1320 #endif
1321 }
1322 size += 4;
1323 }
1324 return size;
1325 }
1326 } else if (src_first_rc == rc_float) {
1327 // xmm ->
1328 if (dst_first_rc == rc_stack) {
1329 // xmm -> mem
1330 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1331 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1332 // 64-bit
1333 int offset = ra_->reg2offset(dst_first);
1334 if (cbuf) {
1335 MacroAssembler _masm(cbuf);
1336 __ sdc1( as_FloatRegister(Matcher::_regEncode[src_first]), Address(SP, offset) );
1337 #ifndef PRODUCT
1338 } else {
1339 if(!do_size){
1340 if (size != 0) st->print("\n\t");
1341 st->print("sdc1 %s, [SP + #%d]\t# spill 14",
1342 Matcher::regName[src_first],
1343 offset);
1344 }
1345 #endif
1346 }
1347 size += 4;
1348 } else {
1349 // 32-bit
1350 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1351 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1352 int offset = ra_->reg2offset(dst_first);
1353 if (cbuf) {
1354 MacroAssembler _masm(cbuf);
1355 __ swc1(as_FloatRegister(Matcher::_regEncode[src_first]), Address(SP, offset));
1356 #ifndef PRODUCT
1357 } else {
1358 if(!do_size){
1359 if (size != 0) st->print("\n\t");
1360 st->print("swc1 %s, [SP + #%d]\t# spill 15",
1361 Matcher::regName[src_first],
1362 offset);
1363 }
1364 #endif
1365 }
1366 size += 4;
1367 }
1368 return size;
1369 } else if (dst_first_rc == rc_int) {
1370 // xmm -> gpr
1371 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1372 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1373 // 64-bit
1374 if (cbuf) {
1375 MacroAssembler _masm(cbuf);
1376 __ dmfc1( as_Register(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1377 #ifndef PRODUCT
1378 } else {
1379 if(!do_size){
1380 if (size != 0) st->print("\n\t");
1381 st->print("dmfc1 %s, %s\t# spill 16",
1382 Matcher::regName[dst_first],
1383 Matcher::regName[src_first]);
1384 }
1385 #endif
1386 }
1387 size += 4;
1388 } else {
1389 // 32-bit
1390 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1391 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1392 if (cbuf) {
1393 MacroAssembler _masm(cbuf);
1394 __ mfc1( as_Register(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1395 #ifndef PRODUCT
1396 } else {
1397 if(!do_size){
1398 if (size != 0) st->print("\n\t");
1399 st->print("mfc1 %s, %s\t# spill 17",
1400 Matcher::regName[dst_first],
1401 Matcher::regName[src_first]);
1402 }
1403 #endif
1404 }
1405 size += 4;
1406 }
1407 return size;
1408 } else if (dst_first_rc == rc_float) {
1409 // xmm -> xmm
1410 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1411 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1412 // 64-bit
1413 if (cbuf) {
1414 MacroAssembler _masm(cbuf);
1415 __ mov_d( as_FloatRegister(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1416 #ifndef PRODUCT
1417 } else {
1418 if(!do_size){
1419 if (size != 0) st->print("\n\t");
1420 st->print("mov_d %s <-- %s\t# spill 18",
1421 Matcher::regName[dst_first],
1422 Matcher::regName[src_first]);
1423 }
1424 #endif
1425 }
1426 size += 4;
1427 } else {
1428 // 32-bit
1429 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1430 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1431 if (cbuf) {
1432 MacroAssembler _masm(cbuf);
1433 __ mov_s( as_FloatRegister(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1434 #ifndef PRODUCT
1435 } else {
1436 if(!do_size){
1437 if (size != 0) st->print("\n\t");
1438 st->print("mov_s %s <-- %s\t# spill 19",
1439 Matcher::regName[dst_first],
1440 Matcher::regName[src_first]);
1441 }
1442 #endif
1443 }
1444 size += 4;
1445 }
1446 return size;
1447 }
1448 }
1450 assert(0," foo ");
1451 Unimplemented();
1452 return size;
1454 }
1456 #ifndef PRODUCT
1457 void MachSpillCopyNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1458 implementation( NULL, ra_, false, st );
1459 }
1460 #endif
1462 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1463 implementation( &cbuf, ra_, false, NULL );
1464 }
1466 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
1467 return implementation( NULL, ra_, true, NULL );
1468 }
1470 //=============================================================================
1471 #
1473 #ifndef PRODUCT
1474 void MachBreakpointNode::format( PhaseRegAlloc *, outputStream* st ) const {
1475 st->print("INT3");
1476 }
1477 #endif
1479 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc* ra_) const {
1480 MacroAssembler _masm(&cbuf);
1481 __ int3();
1482 }
1484 uint MachBreakpointNode::size(PhaseRegAlloc* ra_) const {
1485 return MachNode::size(ra_);
1486 }
1489 //=============================================================================
1490 #ifndef PRODUCT
1491 void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1492 Compile *C = ra_->C;
1493 int framesize = C->frame_size_in_bytes();
1495 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1497 st->print("daddiu SP, SP, %d # Rlease stack @ MachEpilogNode",framesize);
1498 st->cr(); st->print("\t");
1499 if (UseLoongsonISA) {
1500 st->print("gslq RA, FP, SP, %d # Restore FP & RA @ MachEpilogNode", -wordSize*2);
1501 } else {
1502 st->print("ld RA, SP, %d # Restore RA @ MachEpilogNode", -wordSize);
1503 st->cr(); st->print("\t");
1504 st->print("ld FP, SP, %d # Restore FP @ MachEpilogNode", -wordSize*2);
1505 }
1507 if( do_polling() && C->is_method_compilation() ) {
1508 st->print("Poll Safepoint # MachEpilogNode");
1509 }
1510 }
1511 #endif
1513 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1514 Compile *C = ra_->C;
1515 MacroAssembler _masm(&cbuf);
1516 int framesize = C->frame_size_in_bytes();
1518 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1520 __ daddiu(SP, SP, framesize);
1522 if (UseLoongsonISA) {
1523 __ gslq(RA, FP, SP, -wordSize*2);
1524 } else {
1525 __ ld(RA, SP, -wordSize );
1526 __ ld(FP, SP, -wordSize*2 );
1527 }
1529 /* 2012/11/19 Jin: The epilog in a RuntimeStub should not contain a safepoint */
1530 if( do_polling() && C->is_method_compilation() ) {
1531 #ifndef OPT_SAFEPOINT
1532 __ set64(AT, (long)os::get_polling_page());
1533 __ relocate(relocInfo::poll_return_type);
1534 __ lw(AT, AT, 0);
1535 #else
1536 __ lui(AT, Assembler::split_high((intptr_t)os::get_polling_page()));
1537 __ relocate(relocInfo::poll_return_type);
1538 __ lw(AT, AT, Assembler::split_low((intptr_t)os::get_polling_page()));
1539 #endif
1540 }
1541 }
1543 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
1544 return MachNode::size(ra_); // too many variables; just compute it the hard way fujie debug
1545 }
1547 int MachEpilogNode::reloc() const {
1548 return 0; // a large enough number
1549 }
1551 const Pipeline * MachEpilogNode::pipeline() const {
1552 return MachNode::pipeline_class();
1553 }
1555 int MachEpilogNode::safepoint_offset() const { return 0; }
1557 //=============================================================================
1559 #ifndef PRODUCT
1560 void BoxLockNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1561 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1562 int reg = ra_->get_reg_first(this);
1563 st->print("ADDI %s, SP, %d @BoxLockNode",Matcher::regName[reg],offset);
1564 }
1565 #endif
1568 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
1569 return 4;
1570 }
1572 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1573 MacroAssembler _masm(&cbuf);
1574 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1575 int reg = ra_->get_encode(this);
1577 __ addi(as_Register(reg), SP, offset);
1578 /*
1579 if( offset >= 128 ) {
1580 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
1581 emit_rm(cbuf, 0x2, reg, 0x04);
1582 emit_rm(cbuf, 0x0, 0x04, SP_enc);
1583 emit_d32(cbuf, offset);
1584 }
1585 else {
1586 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
1587 emit_rm(cbuf, 0x1, reg, 0x04);
1588 emit_rm(cbuf, 0x0, 0x04, SP_enc);
1589 emit_d8(cbuf, offset);
1590 }
1591 */
1592 }
1595 //static int sizeof_FFree_Float_Stack_All = -1;
1597 int MachCallRuntimeNode::ret_addr_offset() {
1598 //lui
1599 //ori
1600 //dsll
1601 //ori
1602 //jalr
1603 //nop
1604 assert(NativeCall::instruction_size == 24, "in MachCallRuntimeNode::ret_addr_offset()");
1605 return NativeCall::instruction_size;
1606 // return 16;
1607 }
1613 //=============================================================================
1614 #ifndef PRODUCT
1615 void MachNopNode::format( PhaseRegAlloc *, outputStream* st ) const {
1616 st->print("NOP \t# %d bytes pad for loops and calls", 4 * _count);
1617 }
1618 #endif
1620 void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc * ) const {
1621 MacroAssembler _masm(&cbuf);
1622 int i = 0;
1623 for(i = 0; i < _count; i++)
1624 __ nop();
1625 }
1627 uint MachNopNode::size(PhaseRegAlloc *) const {
1628 return 4 * _count;
1629 }
1630 const Pipeline* MachNopNode::pipeline() const {
1631 return MachNode::pipeline_class();
1632 }
1634 //=============================================================================
1636 //=============================================================================
1637 #ifndef PRODUCT
1638 void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1639 st->print_cr("load_klass(AT, T0)");
1640 st->print_cr("\tbeq(AT, iCache, L)");
1641 st->print_cr("\tnop");
1642 st->print_cr("\tjmp(SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type)");
1643 st->print_cr("\tnop");
1644 st->print_cr("\tnop");
1645 st->print_cr(" L:");
1646 }
1647 #endif
1650 void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1651 MacroAssembler _masm(&cbuf);
1652 #ifdef ASSERT
1653 //uint code_size = cbuf.code_size();
1654 #endif
1655 int ic_reg = Matcher::inline_cache_reg_encode();
1656 Label L;
1657 Register receiver = T0;
1658 Register iCache = as_Register(ic_reg);
1659 __ load_klass(AT, receiver);
1660 __ beq(AT, iCache, L);
1661 __ nop();
1663 __ relocate(relocInfo::runtime_call_type);
1664 __ li48(T9, (long)SharedRuntime::get_ic_miss_stub());
1665 __ jr(T9);
1666 __ nop();
1668 /* WARNING these NOPs are critical so that verified entry point is properly
1669 * 8 bytes aligned for patching by NativeJump::patch_verified_entry() */
1670 __ align(CodeEntryAlignment);
1671 __ bind(L);
1672 }
1674 uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
1675 return MachNode::size(ra_);
1676 }
1680 //=============================================================================
1682 const RegMask& MachConstantBaseNode::_out_RegMask = P_REG_mask();
1684 int Compile::ConstantTable::calculate_table_base_offset() const {
1685 return 0; // absolute addressing, no offset
1686 }
1688 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
1689 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
1690 ShouldNotReachHere();
1691 }
1693 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
1694 Compile* C = ra_->C;
1695 Compile::ConstantTable& constant_table = C->constant_table();
1696 MacroAssembler _masm(&cbuf);
1698 Register Rtoc = as_Register(ra_->get_encode(this));
1699 CodeSection* consts_section = __ code()->consts();
1700 int consts_size = consts_section->align_at_start(consts_section->size());
1701 assert(constant_table.size() == consts_size, "must be equal");
1703 if (consts_section->size()) {
1704 // Materialize the constant table base.
1705 address baseaddr = consts_section->start() + -(constant_table.table_base_offset());
1706 // RelocationHolder rspec = internal_word_Relocation::spec(baseaddr);
1707 __ relocate(relocInfo::internal_pc_type);
1708 __ li48(Rtoc, (long)baseaddr);
1709 }
1710 }
1712 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
1713 // li48 (4 insts)
1714 return 4 * 4;
1715 }
1717 #ifndef PRODUCT
1718 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
1719 Register r = as_Register(ra_->get_encode(this));
1720 st->print("li48 %s, &constanttable (constant table base) @ MachConstantBaseNode", r->name());
1721 }
1722 #endif
1725 //=============================================================================
1726 #ifndef PRODUCT
1727 void MachPrologNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1728 Compile* C = ra_->C;
1730 int framesize = C->frame_size_in_bytes();
1731 int bangsize = C->bang_size_in_bytes();
1732 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1734 // Calls to C2R adapters often do not accept exceptional returns.
1735 // We require that their callers must bang for them. But be careful, because
1736 // some VM calls (such as call site linkage) can use several kilobytes of
1737 // stack. But the stack safety zone should account for that.
1738 // See bugs 4446381, 4468289, 4497237.
1739 if (C->need_stack_bang(bangsize)) {
1740 st->print_cr("# stack bang"); st->print("\t");
1741 }
1742 if (UseLoongsonISA) {
1743 st->print("gssq RA, FP, %d(SP) @ MachPrologNode\n\t", -wordSize*2);
1744 } else {
1745 st->print("sd RA, %d(SP) @ MachPrologNode\n\t", -wordSize);
1746 st->print("sd FP, %d(SP) @ MachPrologNode\n\t", -wordSize*2);
1747 }
1748 st->print("daddiu FP, SP, -%d \n\t", wordSize*2);
1749 st->print("daddiu SP, SP, -%d \t",framesize);
1750 }
1751 #endif
1754 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1755 Compile* C = ra_->C;
1756 MacroAssembler _masm(&cbuf);
1758 int framesize = C->frame_size_in_bytes();
1759 int bangsize = C->bang_size_in_bytes();
1761 // __ verified_entry(framesize, C->need_stack_bang(bangsize)?bangsize:0, false);
1763 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1765 if (C->need_stack_bang(framesize)) {
1766 __ generate_stack_overflow_check(framesize);
1767 }
1769 if (UseLoongsonISA) {
1770 __ gssq(RA, FP, SP, -wordSize*2);
1771 } else {
1772 __ sd(RA, SP, -wordSize);
1773 __ sd(FP, SP, -wordSize*2);
1774 }
1775 __ daddiu(FP, SP, -wordSize*2);
1776 __ daddiu(SP, SP, -framesize);
1777 __ nop(); /* 2013.10.22 Jin: Make enough room for patch_verified_entry() */
1778 __ nop();
1780 C->set_frame_complete(cbuf.insts_size());
1781 if (C->has_mach_constant_base_node()) {
1782 // NOTE: We set the table base offset here because users might be
1783 // emitted before MachConstantBaseNode.
1784 Compile::ConstantTable& constant_table = C->constant_table();
1785 constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
1786 }
1788 }
1791 uint MachPrologNode::size(PhaseRegAlloc *ra_) const {
1792 //fprintf(stderr, "\nPrologNode::size(ra_)= %d \n", MachNode::size(ra_));//fujie debug
1793 return MachNode::size(ra_); // too many variables; just compute it the hard way
1794 }
1796 int MachPrologNode::reloc() const {
1797 return 0; // a large enough number
1798 }
1800 %}
1802 //----------ENCODING BLOCK-----------------------------------------------------
1803 // This block specifies the encoding classes used by the compiler to output
1804 // byte streams. Encoding classes generate functions which are called by
1805 // Machine Instruction Nodes in order to generate the bit encoding of the
1806 // instruction. Operands specify their base encoding interface with the
1807 // interface keyword. There are currently supported four interfaces,
1808 // REG_INTER, CONST_INTER, MEMORY_INTER, & COND_INTER. REG_INTER causes an
1809 // operand to generate a function which returns its register number when
1810 // queried. CONST_INTER causes an operand to generate a function which
1811 // returns the value of the constant when queried. MEMORY_INTER causes an
1812 // operand to generate four functions which return the Base Register, the
1813 // Index Register, the Scale Value, and the Offset Value of the operand when
1814 // queried. COND_INTER causes an operand to generate six functions which
1815 // return the encoding code (ie - encoding bits for the instruction)
1816 // associated with each basic boolean condition for a conditional instruction.
1817 // Instructions specify two basic values for encoding. They use the
1818 // ins_encode keyword to specify their encoding class (which must be one of
1819 // the class names specified in the encoding block), and they use the
1820 // opcode keyword to specify, in order, their primary, secondary, and
1821 // tertiary opcode. Only the opcode sections which a particular instruction
1822 // needs for encoding need to be specified.
1823 encode %{
1824 /*
1825 Alias:
1826 1044 b java.io.ObjectInputStream::readHandle (130 bytes)
1827 118 B14: # B19 B15 <- B13 Freq: 0.899955
1828 118 add S1, S2, V0 #@addP_reg_reg
1829 11c lb S0, [S1 + #-8257524] #@loadB
1830 120 BReq S0, #3, B19 #@branchConI_reg_imm P=0.100000 C=-1.000000
1831 */
1832 //Load byte signed
1833 enc_class load_B_enc (mRegI dst, memory mem) %{
1834 MacroAssembler _masm(&cbuf);
1835 int dst = $dst$$reg;
1836 int base = $mem$$base;
1837 int index = $mem$$index;
1838 int scale = $mem$$scale;
1839 int disp = $mem$$disp;
1841 if( index != 0 ) {
1842 if( Assembler::is_simm16(disp) ) {
1843 if( UseLoongsonISA ) {
1844 if (scale == 0) {
1845 __ gslbx(as_Register(dst), as_Register(base), as_Register(index), disp);
1846 } else {
1847 __ dsll(AT, as_Register(index), scale);
1848 __ gslbx(as_Register(dst), as_Register(base), AT, disp);
1849 }
1850 } else {
1851 if (scale == 0) {
1852 __ addu(AT, as_Register(base), as_Register(index));
1853 } else {
1854 __ dsll(AT, as_Register(index), scale);
1855 __ addu(AT, as_Register(base), AT);
1856 }
1857 __ lb(as_Register(dst), AT, disp);
1858 }
1859 } else {
1860 if (scale == 0) {
1861 __ addu(AT, as_Register(base), as_Register(index));
1862 } else {
1863 __ dsll(AT, as_Register(index), scale);
1864 __ addu(AT, as_Register(base), AT);
1865 }
1866 __ move(T9, disp);
1867 if( UseLoongsonISA ) {
1868 __ gslbx(as_Register(dst), AT, T9, 0);
1869 } else {
1870 __ addu(AT, AT, T9);
1871 __ lb(as_Register(dst), AT, 0);
1872 }
1873 }
1874 } else {
1875 if( Assembler::is_simm16(disp) ) {
1876 __ lb(as_Register(dst), as_Register(base), disp);
1877 } else {
1878 __ move(T9, disp);
1879 if( UseLoongsonISA ) {
1880 __ gslbx(as_Register(dst), as_Register(base), T9, 0);
1881 } else {
1882 __ addu(AT, as_Register(base), T9);
1883 __ lb(as_Register(dst), AT, 0);
1884 }
1885 }
1886 }
1887 %}
1889 //Load byte unsigned
1890 enc_class load_UB_enc (mRegI dst, memory mem) %{
1891 MacroAssembler _masm(&cbuf);
1892 int dst = $dst$$reg;
1893 int base = $mem$$base;
1894 int index = $mem$$index;
1895 int scale = $mem$$scale;
1896 int disp = $mem$$disp;
1898 if( index != 0 ) {
1899 if (scale == 0) {
1900 __ daddu(AT, as_Register(base), as_Register(index));
1901 } else {
1902 __ dsll(AT, as_Register(index), scale);
1903 __ daddu(AT, as_Register(base), AT);
1904 }
1905 if( Assembler::is_simm16(disp) ) {
1906 __ lbu(as_Register(dst), AT, disp);
1907 } else {
1908 __ move(T9, disp);
1909 __ daddu(AT, AT, T9);
1910 __ lbu(as_Register(dst), AT, 0);
1911 }
1912 } else {
1913 if( Assembler::is_simm16(disp) ) {
1914 __ lbu(as_Register(dst), as_Register(base), disp);
1915 } else {
1916 __ move(T9, disp);
1917 __ daddu(AT, as_Register(base), T9);
1918 __ lbu(as_Register(dst), AT, 0);
1919 }
1920 }
1921 %}
1923 enc_class store_B_reg_enc (memory mem, mRegI src) %{
1924 MacroAssembler _masm(&cbuf);
1925 int src = $src$$reg;
1926 int base = $mem$$base;
1927 int index = $mem$$index;
1928 int scale = $mem$$scale;
1929 int disp = $mem$$disp;
1931 if( index != 0 ) {
1932 if (scale == 0) {
1933 if( Assembler::is_simm(disp, 8) ) {
1934 if (UseLoongsonISA) {
1935 __ gssbx(as_Register(src), as_Register(base), as_Register(index), disp);
1936 } else {
1937 __ addu(AT, as_Register(base), as_Register(index));
1938 __ sb(as_Register(src), AT, disp);
1939 }
1940 } else if( Assembler::is_simm16(disp) ) {
1941 __ addu(AT, as_Register(base), as_Register(index));
1942 __ sb(as_Register(src), AT, disp);
1943 } else {
1944 __ addu(AT, as_Register(base), as_Register(index));
1945 __ move(T9, disp);
1946 if (UseLoongsonISA) {
1947 __ gssbx(as_Register(src), AT, T9, 0);
1948 } else {
1949 __ addu(AT, AT, T9);
1950 __ sb(as_Register(src), AT, 0);
1951 }
1952 }
1953 } else {
1954 __ dsll(AT, as_Register(index), scale);
1955 if( Assembler::is_simm(disp, 8) ) {
1956 if (UseLoongsonISA) {
1957 __ gssbx(as_Register(src), AT, as_Register(base), disp);
1958 } else {
1959 __ addu(AT, as_Register(base), AT);
1960 __ sb(as_Register(src), AT, disp);
1961 }
1962 } else if( Assembler::is_simm16(disp) ) {
1963 __ addu(AT, as_Register(base), AT);
1964 __ sb(as_Register(src), AT, disp);
1965 } else {
1966 __ addu(AT, as_Register(base), AT);
1967 __ move(T9, disp);
1968 if (UseLoongsonISA) {
1969 __ gssbx(as_Register(src), AT, T9, 0);
1970 } else {
1971 __ addu(AT, AT, T9);
1972 __ sb(as_Register(src), AT, 0);
1973 }
1974 }
1975 }
1976 } else {
1977 if( Assembler::is_simm16(disp) ) {
1978 __ sb(as_Register(src), as_Register(base), disp);
1979 } else {
1980 __ move(T9, disp);
1981 if (UseLoongsonISA) {
1982 __ gssbx(as_Register(src), as_Register(base), T9, 0);
1983 } else {
1984 __ addu(AT, as_Register(base), T9);
1985 __ sb(as_Register(src), AT, 0);
1986 }
1987 }
1988 }
1989 %}
1991 enc_class store_B_immI_enc (memory mem, immI8 src) %{
1992 MacroAssembler _masm(&cbuf);
1993 int base = $mem$$base;
1994 int index = $mem$$index;
1995 int scale = $mem$$scale;
1996 int disp = $mem$$disp;
1997 int value = $src$$constant;
1999 if( index != 0 ) {
2000 if (!UseLoongsonISA) {
2001 if (scale == 0) {
2002 __ daddu(AT, as_Register(base), as_Register(index));
2003 } else {
2004 __ dsll(AT, as_Register(index), scale);
2005 __ daddu(AT, as_Register(base), AT);
2006 }
2007 if( Assembler::is_simm16(disp) ) {
2008 if (value == 0) {
2009 __ sb(R0, AT, disp);
2010 } else {
2011 __ move(T9, value);
2012 __ sb(T9, AT, disp);
2013 }
2014 } else {
2015 if (value == 0) {
2016 __ move(T9, disp);
2017 __ daddu(AT, AT, T9);
2018 __ sb(R0, AT, 0);
2019 } else {
2020 __ move(T9, disp);
2021 __ daddu(AT, AT, T9);
2022 __ move(T9, value);
2023 __ sb(T9, AT, 0);
2024 }
2025 }
2026 } else {
2028 if (scale == 0) {
2029 if( Assembler::is_simm(disp, 8) ) {
2030 if (value == 0) {
2031 __ gssbx(R0, as_Register(base), as_Register(index), disp);
2032 } else {
2033 __ move(T9, value);
2034 __ gssbx(T9, as_Register(base), as_Register(index), disp);
2035 }
2036 } else if( Assembler::is_simm16(disp) ) {
2037 __ daddu(AT, as_Register(base), as_Register(index));
2038 if (value == 0) {
2039 __ sb(R0, AT, disp);
2040 } else {
2041 __ move(T9, value);
2042 __ sb(T9, AT, disp);
2043 }
2044 } else {
2045 if (value == 0) {
2046 __ daddu(AT, as_Register(base), as_Register(index));
2047 __ move(T9, disp);
2048 __ gssbx(R0, AT, T9, 0);
2049 } else {
2050 __ move(AT, disp);
2051 __ move(T9, value);
2052 __ daddu(AT, as_Register(base), AT);
2053 __ gssbx(T9, AT, as_Register(index), 0);
2054 }
2055 }
2057 } else {
2059 if( Assembler::is_simm(disp, 8) ) {
2060 __ dsll(AT, as_Register(index), scale);
2061 if (value == 0) {
2062 __ gssbx(R0, as_Register(base), AT, disp);
2063 } else {
2064 __ move(T9, value);
2065 __ gssbx(T9, as_Register(base), AT, disp);
2066 }
2067 } else if( Assembler::is_simm16(disp) ) {
2068 __ dsll(AT, as_Register(index), scale);
2069 __ daddu(AT, as_Register(base), AT);
2070 if (value == 0) {
2071 __ sb(R0, AT, disp);
2072 } else {
2073 __ move(T9, value);
2074 __ sb(T9, AT, disp);
2075 }
2076 } else {
2077 __ dsll(AT, as_Register(index), scale);
2078 if (value == 0) {
2079 __ daddu(AT, as_Register(base), AT);
2080 __ move(T9, disp);
2081 __ gssbx(R0, AT, T9, 0);
2082 } else {
2083 __ move(T9, disp);
2084 __ daddu(AT, AT, T9);
2085 __ move(T9, value);
2086 __ gssbx(T9, as_Register(base), AT, 0);
2087 }
2088 }
2089 }
2090 }
2091 } else {
2092 if( Assembler::is_simm16(disp) ) {
2093 if (value == 0) {
2094 __ sb(R0, as_Register(base), disp);
2095 } else {
2096 __ move(AT, value);
2097 __ sb(AT, as_Register(base), disp);
2098 }
2099 } else {
2100 if (value == 0) {
2101 __ move(T9, disp);
2102 if (UseLoongsonISA) {
2103 __ gssbx(R0, as_Register(base), T9, 0);
2104 } else {
2105 __ daddu(AT, as_Register(base), T9);
2106 __ sb(R0, AT, 0);
2107 }
2108 } else {
2109 __ move(T9, disp);
2110 if (UseLoongsonISA) {
2111 __ move(AT, value);
2112 __ gssbx(AT, as_Register(base), T9, 0);
2113 } else {
2114 __ daddu(AT, as_Register(base), T9);
2115 __ move(T9, value);
2116 __ sb(T9, AT, 0);
2117 }
2118 }
2119 }
2120 }
2121 %}
2124 enc_class store_B_immI_enc_sync (memory mem, immI8 src) %{
2125 MacroAssembler _masm(&cbuf);
2126 int base = $mem$$base;
2127 int index = $mem$$index;
2128 int scale = $mem$$scale;
2129 int disp = $mem$$disp;
2130 int value = $src$$constant;
2132 if( index != 0 ) {
2133 if (scale == 0) {
2134 __ daddu(AT, as_Register(base), as_Register(index));
2135 } else {
2136 __ dsll(AT, as_Register(index), scale);
2137 __ daddu(AT, as_Register(base), AT);
2138 }
2139 if( Assembler::is_simm16(disp) ) {
2140 if (value == 0) {
2141 __ sb(R0, AT, disp);
2142 } else {
2143 __ move(T9, value);
2144 __ sb(T9, AT, disp);
2145 }
2146 } else {
2147 if (value == 0) {
2148 __ move(T9, disp);
2149 __ daddu(AT, AT, T9);
2150 __ sb(R0, AT, 0);
2151 } else {
2152 __ move(T9, disp);
2153 __ daddu(AT, AT, T9);
2154 __ move(T9, value);
2155 __ sb(T9, AT, 0);
2156 }
2157 }
2158 } else {
2159 if( Assembler::is_simm16(disp) ) {
2160 if (value == 0) {
2161 __ sb(R0, as_Register(base), disp);
2162 } else {
2163 __ move(AT, value);
2164 __ sb(AT, as_Register(base), disp);
2165 }
2166 } else {
2167 if (value == 0) {
2168 __ move(T9, disp);
2169 __ daddu(AT, as_Register(base), T9);
2170 __ sb(R0, AT, 0);
2171 } else {
2172 __ move(T9, disp);
2173 __ daddu(AT, as_Register(base), T9);
2174 __ move(T9, value);
2175 __ sb(T9, AT, 0);
2176 }
2177 }
2178 }
2180 __ sync();
2181 %}
2183 // Load Short (16bit signed)
2184 enc_class load_S_enc (mRegI dst, memory mem) %{
2185 MacroAssembler _masm(&cbuf);
2186 int dst = $dst$$reg;
2187 int base = $mem$$base;
2188 int index = $mem$$index;
2189 int scale = $mem$$scale;
2190 int disp = $mem$$disp;
2192 if( index != 0 ) {
2193 if (scale == 0) {
2194 __ daddu(AT, as_Register(base), as_Register(index));
2195 } else {
2196 __ dsll(AT, as_Register(index), scale);
2197 __ daddu(AT, as_Register(base), AT);
2198 }
2199 if( Assembler::is_simm16(disp) ) {
2200 __ lh(as_Register(dst), AT, disp);
2201 } else {
2202 __ move(T9, disp);
2203 __ addu(AT, AT, T9);
2204 __ lh(as_Register(dst), AT, 0);
2205 }
2206 } else {
2207 if( Assembler::is_simm16(disp) ) {
2208 __ lh(as_Register(dst), as_Register(base), disp);
2209 } else {
2210 __ move(T9, disp);
2211 __ addu(AT, as_Register(base), T9);
2212 __ lh(as_Register(dst), AT, 0);
2213 }
2214 }
2215 %}
2217 // Load Char (16bit unsigned)
2218 enc_class load_C_enc (mRegI dst, memory mem) %{
2219 MacroAssembler _masm(&cbuf);
2220 int dst = $dst$$reg;
2221 int base = $mem$$base;
2222 int index = $mem$$index;
2223 int scale = $mem$$scale;
2224 int disp = $mem$$disp;
2226 if( index != 0 ) {
2227 if (scale == 0) {
2228 __ daddu(AT, as_Register(base), as_Register(index));
2229 } else {
2230 __ dsll(AT, as_Register(index), scale);
2231 __ daddu(AT, as_Register(base), AT);
2232 }
2233 if( Assembler::is_simm16(disp) ) {
2234 __ lhu(as_Register(dst), AT, disp);
2235 } else {
2236 __ move(T9, disp);
2237 __ addu(AT, AT, T9);
2238 __ lhu(as_Register(dst), AT, 0);
2239 }
2240 } else {
2241 if( Assembler::is_simm16(disp) ) {
2242 __ lhu(as_Register(dst), as_Register(base), disp);
2243 } else {
2244 __ move(T9, disp);
2245 __ daddu(AT, as_Register(base), T9);
2246 __ lhu(as_Register(dst), AT, 0);
2247 }
2248 }
2249 %}
2251 // Store Char (16bit unsigned)
2252 enc_class store_C_reg_enc (memory mem, mRegI src) %{
2253 MacroAssembler _masm(&cbuf);
2254 int src = $src$$reg;
2255 int base = $mem$$base;
2256 int index = $mem$$index;
2257 int scale = $mem$$scale;
2258 int disp = $mem$$disp;
2260 if( index != 0 ) {
2261 if( Assembler::is_simm16(disp) ) {
2262 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2263 if (scale == 0) {
2264 __ gsshx(as_Register(src), as_Register(base), as_Register(index), disp);
2265 } else {
2266 __ dsll(AT, as_Register(index), scale);
2267 __ gsshx(as_Register(src), as_Register(base), AT, disp);
2268 }
2269 } else {
2270 if (scale == 0) {
2271 __ addu(AT, as_Register(base), as_Register(index));
2272 } else {
2273 __ dsll(AT, as_Register(index), scale);
2274 __ addu(AT, as_Register(base), AT);
2275 }
2276 __ sh(as_Register(src), AT, disp);
2277 }
2278 } else {
2279 if (scale == 0) {
2280 __ addu(AT, as_Register(base), as_Register(index));
2281 } else {
2282 __ dsll(AT, as_Register(index), scale);
2283 __ addu(AT, as_Register(base), AT);
2284 }
2285 __ move(T9, disp);
2286 if( UseLoongsonISA ) {
2287 __ gsshx(as_Register(src), AT, T9, 0);
2288 } else {
2289 __ addu(AT, AT, T9);
2290 __ sh(as_Register(src), AT, 0);
2291 }
2292 }
2293 } else {
2294 if( Assembler::is_simm16(disp) ) {
2295 __ sh(as_Register(src), as_Register(base), disp);
2296 } else {
2297 __ move(T9, disp);
2298 if( UseLoongsonISA ) {
2299 __ gsshx(as_Register(src), as_Register(base), T9, 0);
2300 } else {
2301 __ addu(AT, as_Register(base), T9);
2302 __ sh(as_Register(src), AT, 0);
2303 }
2304 }
2305 }
2306 %}
2308 enc_class load_I_enc (mRegI dst, memory mem) %{
2309 MacroAssembler _masm(&cbuf);
2310 int dst = $dst$$reg;
2311 int base = $mem$$base;
2312 int index = $mem$$index;
2313 int scale = $mem$$scale;
2314 int disp = $mem$$disp;
2316 if( index != 0 ) {
2317 if( Assembler::is_simm16(disp) ) {
2318 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2319 if (scale == 0) {
2320 __ gslwx(as_Register(dst), as_Register(base), as_Register(index), disp);
2321 } else {
2322 __ dsll(AT, as_Register(index), scale);
2323 __ gslwx(as_Register(dst), as_Register(base), AT, disp);
2324 }
2325 } else {
2326 if (scale == 0) {
2327 __ addu(AT, as_Register(base), as_Register(index));
2328 } else {
2329 __ dsll(AT, as_Register(index), scale);
2330 __ addu(AT, as_Register(base), AT);
2331 }
2332 __ lw(as_Register(dst), AT, disp);
2333 }
2334 } else {
2335 if (scale == 0) {
2336 __ addu(AT, as_Register(base), as_Register(index));
2337 } else {
2338 __ dsll(AT, as_Register(index), scale);
2339 __ addu(AT, as_Register(base), AT);
2340 }
2341 __ move(T9, disp);
2342 if( UseLoongsonISA ) {
2343 __ gslwx(as_Register(dst), AT, T9, 0);
2344 } else {
2345 __ addu(AT, AT, T9);
2346 __ lw(as_Register(dst), AT, 0);
2347 }
2348 }
2349 } else {
2350 if( Assembler::is_simm16(disp) ) {
2351 __ lw(as_Register(dst), as_Register(base), disp);
2352 } else {
2353 __ move(T9, disp);
2354 if( UseLoongsonISA ) {
2355 __ gslwx(as_Register(dst), as_Register(base), T9, 0);
2356 } else {
2357 __ addu(AT, as_Register(base), T9);
2358 __ lw(as_Register(dst), AT, 0);
2359 }
2360 }
2361 }
2362 %}
2364 enc_class store_I_reg_enc (memory mem, mRegI src) %{
2365 MacroAssembler _masm(&cbuf);
2366 int src = $src$$reg;
2367 int base = $mem$$base;
2368 int index = $mem$$index;
2369 int scale = $mem$$scale;
2370 int disp = $mem$$disp;
2372 if( index != 0 ) {
2373 if( Assembler::is_simm16(disp) ) {
2374 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2375 if (scale == 0) {
2376 __ gsswx(as_Register(src), as_Register(base), as_Register(index), disp);
2377 } else {
2378 __ dsll(AT, as_Register(index), scale);
2379 __ gsswx(as_Register(src), as_Register(base), AT, disp);
2380 }
2381 } else {
2382 if (scale == 0) {
2383 __ addu(AT, as_Register(base), as_Register(index));
2384 } else {
2385 __ dsll(AT, as_Register(index), scale);
2386 __ addu(AT, as_Register(base), AT);
2387 }
2388 __ sw(as_Register(src), AT, disp);
2389 }
2390 } else {
2391 if (scale == 0) {
2392 __ addu(AT, as_Register(base), as_Register(index));
2393 } else {
2394 __ dsll(AT, as_Register(index), scale);
2395 __ addu(AT, as_Register(base), AT);
2396 }
2397 __ move(T9, disp);
2398 if( UseLoongsonISA ) {
2399 __ gsswx(as_Register(src), AT, T9, 0);
2400 } else {
2401 __ addu(AT, AT, T9);
2402 __ sw(as_Register(src), AT, 0);
2403 }
2404 }
2405 } else {
2406 if( Assembler::is_simm16(disp) ) {
2407 __ sw(as_Register(src), as_Register(base), disp);
2408 } else {
2409 __ move(T9, disp);
2410 if( UseLoongsonISA ) {
2411 __ gsswx(as_Register(src), as_Register(base), T9, 0);
2412 } else {
2413 __ addu(AT, as_Register(base), T9);
2414 __ sw(as_Register(src), AT, 0);
2415 }
2416 }
2417 }
2418 %}
2420 enc_class store_I_immI_enc (memory mem, immI src) %{
2421 MacroAssembler _masm(&cbuf);
2422 int base = $mem$$base;
2423 int index = $mem$$index;
2424 int scale = $mem$$scale;
2425 int disp = $mem$$disp;
2426 int value = $src$$constant;
2428 if( index != 0 ) {
2429 if (scale == 0) {
2430 __ daddu(AT, as_Register(base), as_Register(index));
2431 } else {
2432 __ dsll(AT, as_Register(index), scale);
2433 __ daddu(AT, as_Register(base), AT);
2434 }
2435 if( Assembler::is_simm16(disp) ) {
2436 if (value == 0) {
2437 __ sw(R0, AT, disp);
2438 } else {
2439 __ move(T9, value);
2440 __ sw(T9, AT, disp);
2441 }
2442 } else {
2443 if (value == 0) {
2444 __ move(T9, disp);
2445 __ addu(AT, AT, T9);
2446 __ sw(R0, AT, 0);
2447 } else {
2448 __ move(T9, disp);
2449 __ addu(AT, AT, T9);
2450 __ move(T9, value);
2451 __ sw(T9, AT, 0);
2452 }
2453 }
2454 } else {
2455 if( Assembler::is_simm16(disp) ) {
2456 if (value == 0) {
2457 __ sw(R0, as_Register(base), disp);
2458 } else {
2459 __ move(AT, value);
2460 __ sw(AT, as_Register(base), disp);
2461 }
2462 } else {
2463 if (value == 0) {
2464 __ move(T9, disp);
2465 __ addu(AT, as_Register(base), T9);
2466 __ sw(R0, AT, 0);
2467 } else {
2468 __ move(T9, disp);
2469 __ addu(AT, as_Register(base), T9);
2470 __ move(T9, value);
2471 __ sw(T9, AT, 0);
2472 }
2473 }
2474 }
2475 %}
2477 enc_class load_N_enc (mRegN dst, memory mem) %{
2478 MacroAssembler _masm(&cbuf);
2479 int dst = $dst$$reg;
2480 int base = $mem$$base;
2481 int index = $mem$$index;
2482 int scale = $mem$$scale;
2483 int disp = $mem$$disp;
2484 relocInfo::relocType disp_reloc = $mem->disp_reloc();
2485 assert(disp_reloc == relocInfo::none, "cannot have disp");
2487 if( index != 0 ) {
2488 if (scale == 0) {
2489 __ daddu(AT, as_Register(base), as_Register(index));
2490 } else {
2491 __ dsll(AT, as_Register(index), scale);
2492 __ daddu(AT, as_Register(base), AT);
2493 }
2494 if( Assembler::is_simm16(disp) ) {
2495 __ lwu(as_Register(dst), AT, disp);
2496 } else {
2497 __ li(T9, disp);
2498 __ daddu(AT, AT, T9);
2499 __ lwu(as_Register(dst), AT, 0);
2500 }
2501 } else {
2502 if( Assembler::is_simm16(disp) ) {
2503 __ lwu(as_Register(dst), as_Register(base), disp);
2504 } else {
2505 __ li(T9, disp);
2506 __ daddu(AT, as_Register(base), T9);
2507 __ lwu(as_Register(dst), AT, 0);
2508 }
2509 }
2511 %}
2514 enc_class load_P_enc (mRegP dst, memory mem) %{
2515 MacroAssembler _masm(&cbuf);
2516 int dst = $dst$$reg;
2517 int base = $mem$$base;
2518 int index = $mem$$index;
2519 int scale = $mem$$scale;
2520 int disp = $mem$$disp;
2521 relocInfo::relocType disp_reloc = $mem->disp_reloc();
2522 assert(disp_reloc == relocInfo::none, "cannot have disp");
2524 if( index != 0 ) {
2525 if (scale == 0) {
2526 __ daddu(AT, as_Register(base), as_Register(index));
2527 } else {
2528 __ dsll(AT, as_Register(index), scale);
2529 __ daddu(AT, as_Register(base), AT);
2530 }
2531 if( Assembler::is_simm16(disp) ) {
2532 __ ld(as_Register(dst), AT, disp);
2533 } else {
2534 __ li(T9, disp);
2535 __ daddu(AT, AT, T9);
2536 __ ld(as_Register(dst), AT, 0);
2537 }
2538 } else {
2539 if( Assembler::is_simm16(disp) ) {
2540 __ ld(as_Register(dst), as_Register(base), disp);
2541 } else {
2542 __ li(T9, disp);
2543 __ daddu(AT, as_Register(base), T9);
2544 __ ld(as_Register(dst), AT, 0);
2545 }
2546 }
2547 // if( disp_reloc != relocInfo::none) __ ld(as_Register(dst), as_Register(dst), 0);
2548 %}
2550 enc_class store_P_reg_enc (memory mem, mRegP src) %{
2551 MacroAssembler _masm(&cbuf);
2552 int src = $src$$reg;
2553 int base = $mem$$base;
2554 int index = $mem$$index;
2555 int scale = $mem$$scale;
2556 int disp = $mem$$disp;
2558 if( index != 0 ) {
2559 if (scale == 0) {
2560 __ daddu(AT, as_Register(base), as_Register(index));
2561 } else {
2562 __ dsll(AT, as_Register(index), scale);
2563 __ daddu(AT, as_Register(base), AT);
2564 }
2565 if( Assembler::is_simm16(disp) ) {
2566 __ sd(as_Register(src), AT, disp);
2567 } else {
2568 __ move(T9, disp);
2569 __ daddu(AT, AT, T9);
2570 __ sd(as_Register(src), AT, 0);
2571 }
2572 } else {
2573 if( Assembler::is_simm16(disp) ) {
2574 __ sd(as_Register(src), as_Register(base), disp);
2575 } else {
2576 __ move(T9, disp);
2577 __ daddu(AT, as_Register(base), T9);
2578 __ sd(as_Register(src), AT, 0);
2579 }
2580 }
2581 %}
2583 enc_class store_N_reg_enc (memory mem, mRegN src) %{
2584 MacroAssembler _masm(&cbuf);
2585 int src = $src$$reg;
2586 int base = $mem$$base;
2587 int index = $mem$$index;
2588 int scale = $mem$$scale;
2589 int disp = $mem$$disp;
2591 if( index != 0 ) {
2592 if (scale == 0) {
2593 __ daddu(AT, as_Register(base), as_Register(index));
2594 } else {
2595 __ dsll(AT, as_Register(index), scale);
2596 __ daddu(AT, as_Register(base), AT);
2597 }
2598 if( Assembler::is_simm16(disp) ) {
2599 __ sw(as_Register(src), AT, disp);
2600 } else {
2601 __ move(T9, disp);
2602 __ addu(AT, AT, T9);
2603 __ sw(as_Register(src), AT, 0);
2604 }
2605 } else {
2606 if( Assembler::is_simm16(disp) ) {
2607 __ sw(as_Register(src), as_Register(base), disp);
2608 } else {
2609 __ move(T9, disp);
2610 __ addu(AT, as_Register(base), T9);
2611 __ sw(as_Register(src), AT, 0);
2612 }
2613 }
2614 %}
2616 enc_class store_P_immP_enc (memory mem, immP31 src) %{
2617 MacroAssembler _masm(&cbuf);
2618 int base = $mem$$base;
2619 int index = $mem$$index;
2620 int scale = $mem$$scale;
2621 int disp = $mem$$disp;
2622 long value = $src$$constant;
2624 if( index != 0 ) {
2625 if (scale == 0) {
2626 __ daddu(AT, as_Register(base), as_Register(index));
2627 } else {
2628 __ dsll(AT, as_Register(index), scale);
2629 __ daddu(AT, as_Register(base), AT);
2630 }
2631 if( Assembler::is_simm16(disp) ) {
2632 if (value == 0) {
2633 __ sd(R0, AT, disp);
2634 } else {
2635 __ move(T9, value);
2636 __ sd(T9, AT, disp);
2637 }
2638 } else {
2639 if (value == 0) {
2640 __ move(T9, disp);
2641 __ daddu(AT, AT, T9);
2642 __ sd(R0, AT, 0);
2643 } else {
2644 __ move(T9, disp);
2645 __ daddu(AT, AT, T9);
2646 __ move(T9, value);
2647 __ sd(T9, AT, 0);
2648 }
2649 }
2650 } else {
2651 if( Assembler::is_simm16(disp) ) {
2652 if (value == 0) {
2653 __ sd(R0, as_Register(base), disp);
2654 } else {
2655 __ move(AT, value);
2656 __ sd(AT, as_Register(base), disp);
2657 }
2658 } else {
2659 if (value == 0) {
2660 __ move(T9, disp);
2661 __ daddu(AT, as_Register(base), T9);
2662 __ sd(R0, AT, 0);
2663 } else {
2664 __ move(T9, disp);
2665 __ daddu(AT, as_Register(base), T9);
2666 __ move(T9, value);
2667 __ sd(T9, AT, 0);
2668 }
2669 }
2670 }
2671 %}
2674 enc_class storeImmN0_enc(memory mem, ImmN0 src) %{
2675 MacroAssembler _masm(&cbuf);
2676 int base = $mem$$base;
2677 int index = $mem$$index;
2678 int scale = $mem$$scale;
2679 int disp = $mem$$disp;
2681 if(index!=0){
2682 if (scale == 0) {
2683 __ daddu(AT, as_Register(base), as_Register(index));
2684 } else {
2685 __ dsll(AT, as_Register(index), scale);
2686 __ daddu(AT, as_Register(base), AT);
2687 }
2689 if( Assembler::is_simm16(disp) ) {
2690 __ sw(R0, AT, disp);
2691 } else {
2692 __ move(T9, disp);
2693 __ daddu(AT, AT, T9);
2694 __ sw(R0, AT, 0);
2695 }
2696 }
2697 else {
2698 if( Assembler::is_simm16(disp) ) {
2699 __ sw(R0, as_Register(base), disp);
2700 } else {
2701 __ move(T9, disp);
2702 __ daddu(AT, as_Register(base), T9);
2703 __ sw(R0, AT, 0);
2704 }
2705 }
2706 %}
2708 enc_class load_L_enc (mRegL dst, memory mem) %{
2709 MacroAssembler _masm(&cbuf);
2710 int base = $mem$$base;
2711 int index = $mem$$index;
2712 int scale = $mem$$scale;
2713 int disp = $mem$$disp;
2714 Register dst_reg = as_Register($dst$$reg);
2716 /*********************2013/03/27**************************
2717 * Jin: $base may contain a null object.
2718 * Server JIT force the exception_offset to be the pos of
2719 * the first instruction.
2720 * I insert such a 'null_check' at the beginning.
2721 *******************************************************/
2723 __ lw(AT, as_Register(base), 0);
2725 /*********************2012/10/04**************************
2726 * Error case found in SortTest
2727 * 337 b java.util.Arrays::sort1 (401 bytes)
2728 * B73:
2729 * d34 lw T4.lo, [T4 + #16] #@loadL-lo
2730 * lw T4.hi, [T4 + #16]+4 #@loadL-hi
2731 *
2732 * The original instructions generated here are :
2733 * __ lw(dst_lo, as_Register(base), disp);
2734 * __ lw(dst_hi, as_Register(base), disp + 4);
2735 *******************************************************/
2737 if( index != 0 ) {
2738 if (scale == 0) {
2739 __ daddu(AT, as_Register(base), as_Register(index));
2740 } else {
2741 __ dsll(AT, as_Register(index), scale);
2742 __ daddu(AT, as_Register(base), AT);
2743 }
2744 if( Assembler::is_simm16(disp) ) {
2745 __ ld(dst_reg, AT, disp);
2746 } else {
2747 __ move(T9, disp);
2748 __ daddu(AT, AT, T9);
2749 __ ld(dst_reg, AT, 0);
2750 }
2751 } else {
2752 if( Assembler::is_simm16(disp) ) {
2753 __ move(AT, as_Register(base));
2754 __ ld(dst_reg, AT, disp);
2755 } else {
2756 __ move(T9, disp);
2757 __ daddu(AT, as_Register(base), T9);
2758 __ ld(dst_reg, AT, 0);
2759 }
2760 }
2761 %}
2763 enc_class store_L_reg_enc (memory mem, mRegL src) %{
2764 MacroAssembler _masm(&cbuf);
2765 int base = $mem$$base;
2766 int index = $mem$$index;
2767 int scale = $mem$$scale;
2768 int disp = $mem$$disp;
2769 Register src_reg = as_Register($src$$reg);
2771 if( index != 0 ) {
2772 if (scale == 0) {
2773 __ daddu(AT, as_Register(base), as_Register(index));
2774 } else {
2775 __ dsll(AT, as_Register(index), scale);
2776 __ daddu(AT, as_Register(base), AT);
2777 }
2778 if( Assembler::is_simm16(disp) ) {
2779 __ sd(src_reg, AT, disp);
2780 } else {
2781 __ move(T9, disp);
2782 __ daddu(AT, AT, T9);
2783 __ sd(src_reg, AT, 0);
2784 }
2785 } else {
2786 if( Assembler::is_simm16(disp) ) {
2787 __ move(AT, as_Register(base));
2788 __ sd(src_reg, AT, disp);
2789 } else {
2790 __ move(T9, disp);
2791 __ daddu(AT, as_Register(base), T9);
2792 __ sd(src_reg, AT, 0);
2793 }
2794 }
2795 %}
2797 enc_class store_L_immL0_enc (memory mem, immL0 src) %{
2798 MacroAssembler _masm(&cbuf);
2799 int base = $mem$$base;
2800 int index = $mem$$index;
2801 int scale = $mem$$scale;
2802 int disp = $mem$$disp;
2804 if( index != 0 ) {
2805 if (scale == 0) {
2806 __ daddu(AT, as_Register(base), as_Register(index));
2807 } else {
2808 __ dsll(AT, as_Register(index), scale);
2809 __ daddu(AT, as_Register(base), AT);
2810 }
2811 if( Assembler::is_simm16(disp) ) {
2812 __ sd(R0, AT, disp);
2813 } else {
2814 __ move(T9, disp);
2815 __ addu(AT, AT, T9);
2816 __ sd(R0, AT, 0);
2817 }
2818 } else {
2819 if( Assembler::is_simm16(disp) ) {
2820 __ move(AT, as_Register(base));
2821 __ sd(R0, AT, disp);
2822 } else {
2823 __ move(T9, disp);
2824 __ addu(AT, as_Register(base), T9);
2825 __ sd(R0, AT, 0);
2826 }
2827 }
2828 %}
2830 enc_class store_L_immL_enc (memory mem, immL src) %{
2831 MacroAssembler _masm(&cbuf);
2832 int base = $mem$$base;
2833 int index = $mem$$index;
2834 int scale = $mem$$scale;
2835 int disp = $mem$$disp;
2836 long imm = $src$$constant;
2838 if( index != 0 ) {
2839 if (scale == 0) {
2840 __ daddu(AT, as_Register(base), as_Register(index));
2841 } else {
2842 __ dsll(AT, as_Register(index), scale);
2843 __ daddu(AT, as_Register(base), AT);
2844 }
2845 if( Assembler::is_simm16(disp) ) {
2846 __ li(T9, imm);
2847 __ sd(T9, AT, disp);
2848 } else {
2849 __ move(T9, disp);
2850 __ addu(AT, AT, T9);
2851 __ li(T9, imm);
2852 __ sd(T9, AT, 0);
2853 }
2854 } else {
2855 if( Assembler::is_simm16(disp) ) {
2856 __ move(AT, as_Register(base));
2857 __ li(T9, imm);
2858 __ sd(T9, AT, disp);
2859 } else {
2860 __ move(T9, disp);
2861 __ addu(AT, as_Register(base), T9);
2862 __ li(T9, imm);
2863 __ sd(T9, AT, 0);
2864 }
2865 }
2866 %}
2868 enc_class load_F_enc (regF dst, memory mem) %{
2869 MacroAssembler _masm(&cbuf);
2870 int base = $mem$$base;
2871 int index = $mem$$index;
2872 int scale = $mem$$scale;
2873 int disp = $mem$$disp;
2874 FloatRegister dst = $dst$$FloatRegister;
2876 if( index != 0 ) {
2877 if( Assembler::is_simm16(disp) ) {
2878 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2879 if (scale == 0) {
2880 __ gslwxc1(dst, as_Register(base), as_Register(index), disp);
2881 } else {
2882 __ dsll(AT, as_Register(index), scale);
2883 __ gslwxc1(dst, as_Register(base), AT, disp);
2884 }
2885 } else {
2886 if (scale == 0) {
2887 __ daddu(AT, as_Register(base), as_Register(index));
2888 } else {
2889 __ dsll(AT, as_Register(index), scale);
2890 __ daddu(AT, as_Register(base), AT);
2891 }
2892 __ lwc1(dst, AT, disp);
2893 }
2894 } else {
2895 if (scale == 0) {
2896 __ daddu(AT, as_Register(base), as_Register(index));
2897 } else {
2898 __ dsll(AT, as_Register(index), scale);
2899 __ daddu(AT, as_Register(base), AT);
2900 }
2901 __ move(T9, disp);
2902 if( UseLoongsonISA ) {
2903 __ gslwxc1(dst, AT, T9, 0);
2904 } else {
2905 __ daddu(AT, AT, T9);
2906 __ lwc1(dst, AT, 0);
2907 }
2908 }
2909 } else {
2910 if( Assembler::is_simm16(disp) ) {
2911 __ lwc1(dst, as_Register(base), disp);
2912 } else {
2913 __ move(T9, disp);
2914 if( UseLoongsonISA ) {
2915 __ gslwxc1(dst, as_Register(base), T9, 0);
2916 } else {
2917 __ daddu(AT, as_Register(base), T9);
2918 __ lwc1(dst, AT, 0);
2919 }
2920 }
2921 }
2922 %}
2924 enc_class store_F_reg_enc (memory mem, regF src) %{
2925 MacroAssembler _masm(&cbuf);
2926 int base = $mem$$base;
2927 int index = $mem$$index;
2928 int scale = $mem$$scale;
2929 int disp = $mem$$disp;
2930 FloatRegister src = $src$$FloatRegister;
2932 if( index != 0 ) {
2933 if( Assembler::is_simm16(disp) ) {
2934 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2935 if (scale == 0) {
2936 __ gsswxc1(src, as_Register(base), as_Register(index), disp);
2937 } else {
2938 __ dsll(AT, as_Register(index), scale);
2939 __ gsswxc1(src, as_Register(base), AT, disp);
2940 }
2941 } else {
2942 if (scale == 0) {
2943 __ daddu(AT, as_Register(base), as_Register(index));
2944 } else {
2945 __ dsll(AT, as_Register(index), scale);
2946 __ daddu(AT, as_Register(base), AT);
2947 }
2948 __ swc1(src, AT, disp);
2949 }
2950 } else {
2951 if (scale == 0) {
2952 __ daddu(AT, as_Register(base), as_Register(index));
2953 } else {
2954 __ dsll(AT, as_Register(index), scale);
2955 __ daddu(AT, as_Register(base), AT);
2956 }
2957 __ move(T9, disp);
2958 if( UseLoongsonISA ) {
2959 __ gsswxc1(src, AT, T9, 0);
2960 } else {
2961 __ daddu(AT, AT, T9);
2962 __ swc1(src, AT, 0);
2963 }
2964 }
2965 } else {
2966 if( Assembler::is_simm16(disp) ) {
2967 __ swc1(src, as_Register(base), disp);
2968 } else {
2969 __ move(T9, disp);
2970 if( UseLoongsonISA ) {
2971 __ gslwxc1(src, as_Register(base), T9, 0);
2972 } else {
2973 __ daddu(AT, as_Register(base), T9);
2974 __ swc1(src, AT, 0);
2975 }
2976 }
2977 }
2978 %}
2980 enc_class load_D_enc (regD dst, memory mem) %{
2981 MacroAssembler _masm(&cbuf);
2982 int base = $mem$$base;
2983 int index = $mem$$index;
2984 int scale = $mem$$scale;
2985 int disp = $mem$$disp;
2986 FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2988 if( index != 0 ) {
2989 if( Assembler::is_simm16(disp) ) {
2990 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2991 if (scale == 0) {
2992 __ gsldxc1(dst_reg, as_Register(base), as_Register(index), disp);
2993 } else {
2994 __ dsll(AT, as_Register(index), scale);
2995 __ gsldxc1(dst_reg, as_Register(base), AT, disp);
2996 }
2997 } else {
2998 if (scale == 0) {
2999 __ daddu(AT, as_Register(base), as_Register(index));
3000 } else {
3001 __ dsll(AT, as_Register(index), scale);
3002 __ daddu(AT, as_Register(base), AT);
3003 }
3004 __ ldc1(dst_reg, AT, disp);
3005 }
3006 } else {
3007 if (scale == 0) {
3008 __ daddu(AT, as_Register(base), as_Register(index));
3009 } else {
3010 __ dsll(AT, as_Register(index), scale);
3011 __ daddu(AT, as_Register(base), AT);
3012 }
3013 __ move(T9, disp);
3014 if( UseLoongsonISA ) {
3015 __ gsldxc1(dst_reg, AT, T9, 0);
3016 } else {
3017 __ addu(AT, AT, T9);
3018 __ ldc1(dst_reg, AT, 0);
3019 }
3020 }
3021 } else {
3022 if( Assembler::is_simm16(disp) ) {
3023 __ ldc1(dst_reg, as_Register(base), disp);
3024 } else {
3025 __ move(T9, disp);
3026 if( UseLoongsonISA ) {
3027 __ gsldxc1(dst_reg, as_Register(base), T9, 0);
3028 } else {
3029 __ addu(AT, as_Register(base), T9);
3030 __ ldc1(dst_reg, AT, 0);
3031 }
3032 }
3033 }
3034 %}
3036 enc_class store_D_reg_enc (memory mem, regD src) %{
3037 MacroAssembler _masm(&cbuf);
3038 int base = $mem$$base;
3039 int index = $mem$$index;
3040 int scale = $mem$$scale;
3041 int disp = $mem$$disp;
3042 FloatRegister src_reg = as_FloatRegister($src$$reg);
3044 if( index != 0 ) {
3045 if( Assembler::is_simm16(disp) ) {
3046 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3047 if (scale == 0) {
3048 __ gssdxc1(src_reg, as_Register(base), as_Register(index), disp);
3049 } else {
3050 __ dsll(AT, as_Register(index), scale);
3051 __ gssdxc1(src_reg, as_Register(base), AT, disp);
3052 }
3053 } else {
3054 if (scale == 0) {
3055 __ daddu(AT, as_Register(base), as_Register(index));
3056 } else {
3057 __ dsll(AT, as_Register(index), scale);
3058 __ daddu(AT, as_Register(base), AT);
3059 }
3060 __ sdc1(src_reg, AT, disp);
3061 }
3062 } else {
3063 if (scale == 0) {
3064 __ daddu(AT, as_Register(base), as_Register(index));
3065 } else {
3066 __ dsll(AT, as_Register(index), scale);
3067 __ daddu(AT, as_Register(base), AT);
3068 }
3069 __ move(T9, disp);
3070 if( UseLoongsonISA ) {
3071 __ gssdxc1(src_reg, AT, T9, 0);
3072 } else {
3073 __ addu(AT, AT, T9);
3074 __ sdc1(src_reg, AT, 0);
3075 }
3076 }
3077 } else {
3078 if( Assembler::is_simm16(disp) ) {
3079 __ sdc1(src_reg, as_Register(base), disp);
3080 } else {
3081 __ move(T9, disp);
3082 if( UseLoongsonISA ) {
3083 __ gssdxc1(src_reg, as_Register(base), T9, 0);
3084 } else {
3085 __ addu(AT, as_Register(base), T9);
3086 __ sdc1(src_reg, AT, 0);
3087 }
3088 }
3089 }
3090 %}
3092 enc_class Java_To_Runtime (method meth) %{ // CALL Java_To_Runtime, Java_To_Runtime_Leaf
3093 MacroAssembler _masm(&cbuf);
3094 // This is the instruction starting address for relocation info.
3095 __ block_comment("Java_To_Runtime");
3096 cbuf.set_insts_mark();
3097 __ relocate(relocInfo::runtime_call_type);
3099 __ li48(T9, (long)$meth$$method);
3100 __ jalr(T9);
3101 __ nop();
3102 %}
3104 enc_class Java_Static_Call (method meth) %{ // JAVA STATIC CALL
3105 // CALL to fixup routine. Fixup routine uses ScopeDesc info to determine
3106 // who we intended to call.
3107 MacroAssembler _masm(&cbuf);
3108 cbuf.set_insts_mark();
3110 if ( !_method ) {
3111 __ relocate(relocInfo::runtime_call_type);
3112 //emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.code_end()) - 4),
3113 // runtime_call_Relocation::spec(), RELOC_IMM32 );
3114 } else if(_optimized_virtual) {
3115 __ relocate(relocInfo::opt_virtual_call_type);
3116 //emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.code_end()) - 4),
3117 // opt_virtual_call_Relocation::spec(), RELOC_IMM32 );
3118 } else {
3119 __ relocate(relocInfo::static_call_type);
3120 //emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.code_end()) - 4),
3121 // static_call_Relocation::spec(), RELOC_IMM32 );
3122 }
3124 __ li(T9, $meth$$method);
3125 __ jalr(T9);
3126 __ nop();
3127 if( _method ) { // Emit stub for static call
3128 emit_java_to_interp(cbuf);
3129 }
3130 %}
3133 /*
3134 * [Ref: LIR_Assembler::ic_call() ]
3135 */
3136 enc_class Java_Dynamic_Call (method meth) %{ // JAVA DYNAMIC CALL
3137 MacroAssembler _masm(&cbuf);
3138 __ block_comment("Java_Dynamic_Call");
3139 __ ic_call((address)$meth$$method);
3140 %}
3143 enc_class Set_Flags_After_Fast_Lock_Unlock(FlagsReg cr) %{
3144 Register flags = $cr$$Register;
3145 Label L;
3147 MacroAssembler _masm(&cbuf);
3149 __ addu(flags, R0, R0);
3150 __ beq(AT, R0, L);
3151 __ delayed()->nop();
3152 __ move(flags, 0xFFFFFFFF);
3153 __ bind(L);
3154 %}
3156 enc_class enc_PartialSubtypeCheck(mRegP result, mRegP sub, mRegP super, mRegI tmp) %{
3157 Register result = $result$$Register;
3158 Register sub = $sub$$Register;
3159 Register super = $super$$Register;
3160 Register length = $tmp$$Register;
3161 Register tmp = T9;
3162 Label miss;
3164 /* 2012/9/28 Jin: result may be the same as sub
3165 * 47c B40: # B21 B41 <- B20 Freq: 0.155379
3166 * 47c partialSubtypeCheck result=S1, sub=S1, super=S3, length=S0
3167 * 4bc mov S2, NULL #@loadConP
3168 * 4c0 beq S1, S2, B21 #@branchConP P=0.999999 C=-1.000000
3169 */
3170 MacroAssembler _masm(&cbuf);
3171 Label done;
3172 __ check_klass_subtype_slow_path(sub, super, length, tmp,
3173 NULL, &miss,
3174 /*set_cond_codes:*/ true);
3175 /* 2013/7/22 Jin: Refer to X86_64's RDI */
3176 __ move(result, 0);
3177 __ b(done);
3178 __ nop();
3180 __ bind(miss);
3181 __ move(result, 1);
3182 __ bind(done);
3183 %}
3185 %}
3188 //---------MIPS FRAME--------------------------------------------------------------
3189 // Definition of frame structure and management information.
3190 //
3191 // S T A C K L A Y O U T Allocators stack-slot number
3192 // | (to get allocators register number
3193 // G Owned by | | v add SharedInfo::stack0)
3194 // r CALLER | |
3195 // o | +--------+ pad to even-align allocators stack-slot
3196 // w V | pad0 | numbers; owned by CALLER
3197 // t -----------+--------+----> Matcher::_in_arg_limit, unaligned
3198 // h ^ | in | 5
3199 // | | args | 4 Holes in incoming args owned by SELF
3200 // | | old | | 3
3201 // | | SP-+--------+----> Matcher::_old_SP, even aligned
3202 // v | | ret | 3 return address
3203 // Owned by +--------+
3204 // Self | pad2 | 2 pad to align old SP
3205 // | +--------+ 1
3206 // | | locks | 0
3207 // | +--------+----> SharedInfo::stack0, even aligned
3208 // | | pad1 | 11 pad to align new SP
3209 // | +--------+
3210 // | | | 10
3211 // | | spills | 9 spills
3212 // V | | 8 (pad0 slot for callee)
3213 // -----------+--------+----> Matcher::_out_arg_limit, unaligned
3214 // ^ | out | 7
3215 // | | args | 6 Holes in outgoing args owned by CALLEE
3216 // Owned by new | |
3217 // Callee SP-+--------+----> Matcher::_new_SP, even aligned
3218 // | |
3219 //
3220 // Note 1: Only region 8-11 is determined by the allocator. Region 0-5 is
3221 // known from SELF's arguments and the Java calling convention.
3222 // Region 6-7 is determined per call site.
3223 // Note 2: If the calling convention leaves holes in the incoming argument
3224 // area, those holes are owned by SELF. Holes in the outgoing area
3225 // are owned by the CALLEE. Holes should not be nessecary in the
3226 // incoming area, as the Java calling convention is completely under
3227 // the control of the AD file. Doubles can be sorted and packed to
3228 // avoid holes. Holes in the outgoing arguments may be nessecary for
3229 // varargs C calling conventions.
3230 // Note 3: Region 0-3 is even aligned, with pad2 as needed. Region 3-5 is
3231 // even aligned with pad0 as needed.
3232 // Region 6 is even aligned. Region 6-7 is NOT even aligned;
3233 // region 6-11 is even aligned; it may be padded out more so that
3234 // the region from SP to FP meets the minimum stack alignment.
3235 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
3236 // alignment. Region 11, pad1, may be dynamically extended so that
3237 // SP meets the minimum alignment.
3240 frame %{
3242 stack_direction(TOWARDS_LOW);
3244 // These two registers define part of the calling convention
3245 // between compiled code and the interpreter.
3246 // SEE StartI2CNode::calling_convention & StartC2INode::calling_convention & StartOSRNode::calling_convention
3247 // for more information. by yjl 3/16/2006
3249 inline_cache_reg(T1); // Inline Cache Register
3250 interpreter_method_oop_reg(S3); // Method Oop Register when calling interpreter
3251 /*
3252 inline_cache_reg(T1); // Inline Cache Register or methodOop for I2C
3253 interpreter_arg_ptr_reg(A0); // Argument pointer for I2C adapters
3254 */
3256 // Optional: name the operand used by cisc-spilling to access [stack_pointer + offset]
3257 cisc_spilling_operand_name(indOffset32);
3259 // Number of stack slots consumed by locking an object
3260 // generate Compile::sync_stack_slots
3261 #ifdef _LP64
3262 sync_stack_slots(2);
3263 #else
3264 sync_stack_slots(1);
3265 #endif
3267 frame_pointer(SP);
3269 // Interpreter stores its frame pointer in a register which is
3270 // stored to the stack by I2CAdaptors.
3271 // I2CAdaptors convert from interpreted java to compiled java.
3273 interpreter_frame_pointer(FP);
3275 // generate Matcher::stack_alignment
3276 stack_alignment(StackAlignmentInBytes); //wordSize = sizeof(char*);
3278 // Number of stack slots between incoming argument block and the start of
3279 // a new frame. The PROLOG must add this many slots to the stack. The
3280 // EPILOG must remove this many slots. Intel needs one slot for
3281 // return address.
3282 // generate Matcher::in_preserve_stack_slots
3283 //in_preserve_stack_slots(VerifyStackAtCalls + 2); //Now VerifyStackAtCalls is defined as false ! Leave one stack slot for ra and fp
3284 in_preserve_stack_slots(4); //Now VerifyStackAtCalls is defined as false ! Leave two stack slots for ra and fp
3286 // Number of outgoing stack slots killed above the out_preserve_stack_slots
3287 // for calls to C. Supports the var-args backing area for register parms.
3288 varargs_C_out_slots_killed(0);
3290 // The after-PROLOG location of the return address. Location of
3291 // return address specifies a type (REG or STACK) and a number
3292 // representing the register number (i.e. - use a register name) or
3293 // stack slot.
3294 // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
3295 // Otherwise, it is above the locks and verification slot and alignment word
3296 //return_addr(STACK -1+ round_to(1+VerifyStackAtCalls+Compile::current()->sync()*Compile::current()->sync_stack_slots(),WordsPerLong));
3297 return_addr(REG RA);
3299 // Body of function which returns an integer array locating
3300 // arguments either in registers or in stack slots. Passed an array
3301 // of ideal registers called "sig" and a "length" count. Stack-slot
3302 // offsets are based on outgoing arguments, i.e. a CALLER setting up
3303 // arguments for a CALLEE. Incoming stack arguments are
3304 // automatically biased by the preserve_stack_slots field above.
3307 // will generated to Matcher::calling_convention(OptoRegPair *sig, uint length, bool is_outgoing)
3308 // StartNode::calling_convention call this. by yjl 3/16/2006
3309 calling_convention %{
3310 SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
3311 %}
3316 // Body of function which returns an integer array locating
3317 // arguments either in registers or in stack slots. Passed an array
3318 // of ideal registers called "sig" and a "length" count. Stack-slot
3319 // offsets are based on outgoing arguments, i.e. a CALLER setting up
3320 // arguments for a CALLEE. Incoming stack arguments are
3321 // automatically biased by the preserve_stack_slots field above.
3324 // SEE CallRuntimeNode::calling_convention for more information. by yjl 3/16/2006
3325 c_calling_convention %{
3326 (void) SharedRuntime::c_calling_convention(sig_bt, regs, /*regs2=*/NULL, length);
3327 %}
3330 // Location of C & interpreter return values
3331 // register(s) contain(s) return value for Op_StartI2C and Op_StartOSR.
3332 // SEE Matcher::match. by yjl 3/16/2006
3333 c_return_value %{
3334 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3335 /* -- , -- , Op_RegN, Op_RegI, Op_RegP, Op_RegF, Op_RegD, Op_RegL */
3336 static int lo[Op_RegL+1] = { 0, 0, V0_num, V0_num, V0_num, F0_num, F0_num, V0_num };
3337 static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, V0_H_num, OptoReg::Bad, F0_H_num, V0_H_num };
3338 return OptoRegPair(hi[ideal_reg],lo[ideal_reg]);
3339 %}
3341 // Location of return values
3342 // register(s) contain(s) return value for Op_StartC2I and Op_Start.
3343 // SEE Matcher::match. by yjl 3/16/2006
3345 return_value %{
3346 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3347 /* -- , -- , Op_RegN, Op_RegI, Op_RegP, Op_RegF, Op_RegD, Op_RegL */
3348 static int lo[Op_RegL+1] = { 0, 0, V0_num, V0_num, V0_num, F0_num, F0_num, V0_num };
3349 static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, V0_H_num, OptoReg::Bad, F0_H_num, V0_H_num};
3350 return OptoRegPair(hi[ideal_reg],lo[ideal_reg]);
3351 %}
3353 %}
3355 //----------ATTRIBUTES---------------------------------------------------------
3356 //----------Operand Attributes-------------------------------------------------
3357 op_attrib op_cost(0); // Required cost attribute
3359 //----------Instruction Attributes---------------------------------------------
3360 ins_attrib ins_cost(100); // Required cost attribute
3361 ins_attrib ins_size(32); // Required size attribute (in bits)
3362 ins_attrib ins_pc_relative(0); // Required PC Relative flag
3363 ins_attrib ins_short_branch(0); // Required flag: is this instruction a
3364 // non-matching short branch variant of some
3365 // long branch?
3366 ins_attrib ins_alignment(4); // Required alignment attribute (must be a power of 2)
3367 // specifies the alignment that some part of the instruction (not
3368 // necessarily the start) requires. If > 1, a compute_padding()
3369 // function must be provided for the instruction
3371 //----------OPERANDS-----------------------------------------------------------
3372 // Operand definitions must precede instruction definitions for correct parsing
3373 // in the ADLC because operands constitute user defined types which are used in
3374 // instruction definitions.
3376 // Vectors
3377 operand vecD() %{
3378 constraint(ALLOC_IN_RC(dbl_reg));
3379 match(VecD);
3381 format %{ %}
3382 interface(REG_INTER);
3383 %}
3385 // Flags register, used as output of compare instructions
3386 operand FlagsReg() %{
3387 constraint(ALLOC_IN_RC(mips_flags));
3388 match(RegFlags);
3390 format %{ "EFLAGS" %}
3391 interface(REG_INTER);
3392 %}
3394 //----------Simple Operands----------------------------------------------------
3395 //TODO: Should we need to define some more special immediate number ?
3396 // Immediate Operands
3397 // Integer Immediate
3398 operand immI() %{
3399 match(ConI);
3400 //TODO: should not match immI8 here LEE
3401 match(immI8);
3403 op_cost(20);
3404 format %{ %}
3405 interface(CONST_INTER);
3406 %}
3408 // Long Immediate 8-bit
3409 operand immL8()
3410 %{
3411 predicate(-0x80L <= n->get_long() && n->get_long() < 0x80L);
3412 match(ConL);
3414 op_cost(5);
3415 format %{ %}
3416 interface(CONST_INTER);
3417 %}
3419 // Constant for test vs zero
3420 operand immI0() %{
3421 predicate(n->get_int() == 0);
3422 match(ConI);
3424 op_cost(0);
3425 format %{ %}
3426 interface(CONST_INTER);
3427 %}
3429 // Constant for increment
3430 operand immI1() %{
3431 predicate(n->get_int() == 1);
3432 match(ConI);
3434 op_cost(0);
3435 format %{ %}
3436 interface(CONST_INTER);
3437 %}
3439 // Constant for decrement
3440 operand immI_M1() %{
3441 predicate(n->get_int() == -1);
3442 match(ConI);
3444 op_cost(0);
3445 format %{ %}
3446 interface(CONST_INTER);
3447 %}
3449 operand immI_MaxI() %{
3450 predicate(n->get_int() == 2147483647);
3451 match(ConI);
3453 op_cost(0);
3454 format %{ %}
3455 interface(CONST_INTER);
3456 %}
3458 // Valid scale values for addressing modes
3459 operand immI2() %{
3460 predicate(0 <= n->get_int() && (n->get_int() <= 3));
3461 match(ConI);
3463 format %{ %}
3464 interface(CONST_INTER);
3465 %}
3467 operand immI8() %{
3468 predicate((-128 <= n->get_int()) && (n->get_int() <= 127));
3469 match(ConI);
3471 op_cost(5);
3472 format %{ %}
3473 interface(CONST_INTER);
3474 %}
3476 operand immI16() %{
3477 predicate((-32768 <= n->get_int()) && (n->get_int() <= 32767));
3478 match(ConI);
3480 op_cost(10);
3481 format %{ %}
3482 interface(CONST_INTER);
3483 %}
3485 // Constant for long shifts
3486 operand immI_32() %{
3487 predicate( n->get_int() == 32 );
3488 match(ConI);
3490 op_cost(0);
3491 format %{ %}
3492 interface(CONST_INTER);
3493 %}
3495 operand immI_63() %{
3496 predicate( n->get_int() == 63 );
3497 match(ConI);
3499 op_cost(0);
3500 format %{ %}
3501 interface(CONST_INTER);
3502 %}
3504 operand immI_0_31() %{
3505 predicate( n->get_int() >= 0 && n->get_int() <= 31 );
3506 match(ConI);
3508 op_cost(0);
3509 format %{ %}
3510 interface(CONST_INTER);
3511 %}
3513 // Operand for non-negtive integer mask
3514 operand immI_nonneg_mask() %{
3515 predicate( (n->get_int() >= 0) && (Assembler::is_int_mask(n->get_int()) != -1) );
3516 match(ConI);
3518 op_cost(0);
3519 format %{ %}
3520 interface(CONST_INTER);
3521 %}
3523 operand immI_32_63() %{
3524 predicate( n->get_int() >= 32 && n->get_int() <= 63 );
3525 match(ConI);
3526 op_cost(0);
3528 format %{ %}
3529 interface(CONST_INTER);
3530 %}
3532 operand immI16_sub() %{
3533 predicate((-32767 <= n->get_int()) && (n->get_int() <= 32768));
3534 match(ConI);
3536 op_cost(10);
3537 format %{ %}
3538 interface(CONST_INTER);
3539 %}
3541 operand immI_0_32767() %{
3542 predicate( n->get_int() >= 0 && n->get_int() <= 32767 );
3543 match(ConI);
3544 op_cost(0);
3546 format %{ %}
3547 interface(CONST_INTER);
3548 %}
3550 operand immI_0_65535() %{
3551 predicate( n->get_int() >= 0 && n->get_int() <= 65535 );
3552 match(ConI);
3553 op_cost(0);
3555 format %{ %}
3556 interface(CONST_INTER);
3557 %}
3559 operand immI_1() %{
3560 predicate( n->get_int() == 1 );
3561 match(ConI);
3563 op_cost(0);
3564 format %{ %}
3565 interface(CONST_INTER);
3566 %}
3568 operand immI_2() %{
3569 predicate( n->get_int() == 2 );
3570 match(ConI);
3572 op_cost(0);
3573 format %{ %}
3574 interface(CONST_INTER);
3575 %}
3577 operand immI_3() %{
3578 predicate( n->get_int() == 3 );
3579 match(ConI);
3581 op_cost(0);
3582 format %{ %}
3583 interface(CONST_INTER);
3584 %}
3586 operand immI_7() %{
3587 predicate( n->get_int() == 7 );
3588 match(ConI);
3590 format %{ %}
3591 interface(CONST_INTER);
3592 %}
3594 // Immediates for special shifts (sign extend)
3596 // Constants for increment
3597 operand immI_16() %{
3598 predicate( n->get_int() == 16 );
3599 match(ConI);
3601 format %{ %}
3602 interface(CONST_INTER);
3603 %}
3605 operand immI_24() %{
3606 predicate( n->get_int() == 24 );
3607 match(ConI);
3609 format %{ %}
3610 interface(CONST_INTER);
3611 %}
3613 // Constant for byte-wide masking
3614 operand immI_255() %{
3615 predicate( n->get_int() == 255 );
3616 match(ConI);
3618 op_cost(0);
3619 format %{ %}
3620 interface(CONST_INTER);
3621 %}
3623 operand immI_65535() %{
3624 predicate( n->get_int() == 65535 );
3625 match(ConI);
3627 op_cost(5);
3628 format %{ %}
3629 interface(CONST_INTER);
3630 %}
3632 operand immI_65536() %{
3633 predicate( n->get_int() == 65536 );
3634 match(ConI);
3636 op_cost(5);
3637 format %{ %}
3638 interface(CONST_INTER);
3639 %}
3641 operand immI_M65536() %{
3642 predicate( n->get_int() == -65536 );
3643 match(ConI);
3645 op_cost(5);
3646 format %{ %}
3647 interface(CONST_INTER);
3648 %}
3650 // Pointer Immediate
3651 operand immP() %{
3652 match(ConP);
3654 op_cost(10);
3655 format %{ %}
3656 interface(CONST_INTER);
3657 %}
3659 operand immP31()
3660 %{
3661 predicate(n->as_Type()->type()->reloc() == relocInfo::none
3662 && (n->get_ptr() >> 31) == 0);
3663 match(ConP);
3665 op_cost(5);
3666 format %{ %}
3667 interface(CONST_INTER);
3668 %}
3670 // NULL Pointer Immediate
3671 operand immP0() %{
3672 predicate( n->get_ptr() == 0 );
3673 match(ConP);
3674 op_cost(0);
3676 format %{ %}
3677 interface(CONST_INTER);
3678 %}
3680 // Pointer Immediate: 64-bit
3681 operand immP_set() %{
3682 match(ConP);
3684 op_cost(5);
3685 // formats are generated automatically for constants and base registers
3686 format %{ %}
3687 interface(CONST_INTER);
3688 %}
3690 // Pointer Immediate: 64-bit
3691 operand immP_load() %{
3692 predicate(n->bottom_type()->isa_oop_ptr() || (MacroAssembler::insts_for_set64(n->get_ptr()) > 3));
3693 match(ConP);
3695 op_cost(5);
3696 // formats are generated automatically for constants and base registers
3697 format %{ %}
3698 interface(CONST_INTER);
3699 %}
3701 // Pointer Immediate: 64-bit
3702 operand immP_no_oop_cheap() %{
3703 predicate(!n->bottom_type()->isa_oop_ptr() && (MacroAssembler::insts_for_set64(n->get_ptr()) <= 3));
3704 match(ConP);
3706 op_cost(5);
3707 // formats are generated automatically for constants and base registers
3708 format %{ %}
3709 interface(CONST_INTER);
3710 %}
3712 // Pointer for polling page
3713 operand immP_poll() %{
3714 predicate(n->get_ptr() != 0 && n->get_ptr() == (intptr_t)os::get_polling_page());
3715 match(ConP);
3716 op_cost(5);
3718 format %{ %}
3719 interface(CONST_INTER);
3720 %}
3722 // Pointer Immediate
3723 operand immN() %{
3724 match(ConN);
3726 op_cost(10);
3727 format %{ %}
3728 interface(CONST_INTER);
3729 %}
3731 operand immNKlass() %{
3732 match(ConNKlass);
3734 op_cost(10);
3735 format %{ %}
3736 interface(CONST_INTER);
3737 %}
3739 // NULL Pointer Immediate
3740 operand immN0() %{
3741 predicate(n->get_narrowcon() == 0);
3742 match(ConN);
3744 op_cost(5);
3745 format %{ %}
3746 interface(CONST_INTER);
3747 %}
3749 // Long Immediate
3750 operand immL() %{
3751 match(ConL);
3753 op_cost(20);
3754 format %{ %}
3755 interface(CONST_INTER);
3756 %}
3758 // Long Immediate zero
3759 operand immL0() %{
3760 predicate( n->get_long() == 0L );
3761 match(ConL);
3762 op_cost(0);
3764 format %{ %}
3765 interface(CONST_INTER);
3766 %}
3768 operand immL7() %{
3769 predicate( n->get_long() == 7L );
3770 match(ConL);
3771 op_cost(0);
3773 format %{ %}
3774 interface(CONST_INTER);
3775 %}
3777 operand immL_M1() %{
3778 predicate( n->get_long() == -1L );
3779 match(ConL);
3780 op_cost(0);
3782 format %{ %}
3783 interface(CONST_INTER);
3784 %}
3786 // bit 0..2 zero
3787 operand immL_M8() %{
3788 predicate( n->get_long() == -8L );
3789 match(ConL);
3790 op_cost(0);
3792 format %{ %}
3793 interface(CONST_INTER);
3794 %}
3796 // bit 2 zero
3797 operand immL_M5() %{
3798 predicate( n->get_long() == -5L );
3799 match(ConL);
3800 op_cost(0);
3802 format %{ %}
3803 interface(CONST_INTER);
3804 %}
3806 // bit 1..2 zero
3807 operand immL_M7() %{
3808 predicate( n->get_long() == -7L );
3809 match(ConL);
3810 op_cost(0);
3812 format %{ %}
3813 interface(CONST_INTER);
3814 %}
3816 // bit 0..1 zero
3817 operand immL_M4() %{
3818 predicate( n->get_long() == -4L );
3819 match(ConL);
3820 op_cost(0);
3822 format %{ %}
3823 interface(CONST_INTER);
3824 %}
3826 // bit 3..6 zero
3827 operand immL_M121() %{
3828 predicate( n->get_long() == -121L );
3829 match(ConL);
3830 op_cost(0);
3832 format %{ %}
3833 interface(CONST_INTER);
3834 %}
3836 // Long immediate from 0 to 127.
3837 // Used for a shorter form of long mul by 10.
3838 operand immL_127() %{
3839 predicate((0 <= n->get_long()) && (n->get_long() <= 127));
3840 match(ConL);
3841 op_cost(0);
3843 format %{ %}
3844 interface(CONST_INTER);
3845 %}
3847 // Operand for non-negtive long mask
3848 operand immL_nonneg_mask() %{
3849 predicate( (n->get_long() >= 0) && (Assembler::is_jlong_mask(n->get_long()) != -1) );
3850 match(ConL);
3852 op_cost(0);
3853 format %{ %}
3854 interface(CONST_INTER);
3855 %}
3857 operand immL_0_65535() %{
3858 predicate( n->get_long() >= 0 && n->get_long() <= 65535 );
3859 match(ConL);
3860 op_cost(0);
3862 format %{ %}
3863 interface(CONST_INTER);
3864 %}
3866 // Long Immediate: cheap (materialize in <= 3 instructions)
3867 operand immL_cheap() %{
3868 predicate(MacroAssembler::insts_for_set64(n->get_long()) <= 3);
3869 match(ConL);
3870 op_cost(0);
3872 format %{ %}
3873 interface(CONST_INTER);
3874 %}
3876 // Long Immediate: expensive (materialize in > 3 instructions)
3877 operand immL_expensive() %{
3878 predicate(MacroAssembler::insts_for_set64(n->get_long()) > 3);
3879 match(ConL);
3880 op_cost(0);
3882 format %{ %}
3883 interface(CONST_INTER);
3884 %}
3886 operand immL16() %{
3887 predicate((-32768 <= n->get_long()) && (n->get_long() <= 32767));
3888 match(ConL);
3890 op_cost(10);
3891 format %{ %}
3892 interface(CONST_INTER);
3893 %}
3895 operand immL16_sub() %{
3896 predicate((-32767 <= n->get_long()) && (n->get_long() <= 32768));
3897 match(ConL);
3899 op_cost(10);
3900 format %{ %}
3901 interface(CONST_INTER);
3902 %}
3904 // Long Immediate: low 32-bit mask
3905 operand immL_32bits() %{
3906 predicate(n->get_long() == 0xFFFFFFFFL);
3907 match(ConL);
3908 op_cost(20);
3910 format %{ %}
3911 interface(CONST_INTER);
3912 %}
3914 // Long Immediate 32-bit signed
3915 operand immL32()
3916 %{
3917 predicate(n->get_long() == (int) (n->get_long()));
3918 match(ConL);
3920 op_cost(15);
3921 format %{ %}
3922 interface(CONST_INTER);
3923 %}
3926 //single-precision floating-point zero
3927 operand immF0() %{
3928 predicate(jint_cast(n->getf()) == 0);
3929 match(ConF);
3931 op_cost(5);
3932 format %{ %}
3933 interface(CONST_INTER);
3934 %}
3936 //single-precision floating-point immediate
3937 operand immF() %{
3938 match(ConF);
3940 op_cost(20);
3941 format %{ %}
3942 interface(CONST_INTER);
3943 %}
3945 //double-precision floating-point zero
3946 operand immD0() %{
3947 predicate(jlong_cast(n->getd()) == 0);
3948 match(ConD);
3950 op_cost(5);
3951 format %{ %}
3952 interface(CONST_INTER);
3953 %}
3955 //double-precision floating-point immediate
3956 operand immD() %{
3957 match(ConD);
3959 op_cost(20);
3960 format %{ %}
3961 interface(CONST_INTER);
3962 %}
3964 // Register Operands
3965 // Integer Register
3966 operand mRegI() %{
3967 constraint(ALLOC_IN_RC(int_reg));
3968 match(RegI);
3970 format %{ %}
3971 interface(REG_INTER);
3972 %}
3974 operand no_Ax_mRegI() %{
3975 constraint(ALLOC_IN_RC(no_Ax_int_reg));
3976 match(RegI);
3977 match(mRegI);
3979 format %{ %}
3980 interface(REG_INTER);
3981 %}
3983 operand mS0RegI() %{
3984 constraint(ALLOC_IN_RC(s0_reg));
3985 match(RegI);
3986 match(mRegI);
3988 format %{ "S0" %}
3989 interface(REG_INTER);
3990 %}
3992 operand mS1RegI() %{
3993 constraint(ALLOC_IN_RC(s1_reg));
3994 match(RegI);
3995 match(mRegI);
3997 format %{ "S1" %}
3998 interface(REG_INTER);
3999 %}
4001 operand mS2RegI() %{
4002 constraint(ALLOC_IN_RC(s2_reg));
4003 match(RegI);
4004 match(mRegI);
4006 format %{ "S2" %}
4007 interface(REG_INTER);
4008 %}
4010 operand mS3RegI() %{
4011 constraint(ALLOC_IN_RC(s3_reg));
4012 match(RegI);
4013 match(mRegI);
4015 format %{ "S3" %}
4016 interface(REG_INTER);
4017 %}
4019 operand mS4RegI() %{
4020 constraint(ALLOC_IN_RC(s4_reg));
4021 match(RegI);
4022 match(mRegI);
4024 format %{ "S4" %}
4025 interface(REG_INTER);
4026 %}
4028 operand mS5RegI() %{
4029 constraint(ALLOC_IN_RC(s5_reg));
4030 match(RegI);
4031 match(mRegI);
4033 format %{ "S5" %}
4034 interface(REG_INTER);
4035 %}
4037 operand mS6RegI() %{
4038 constraint(ALLOC_IN_RC(s6_reg));
4039 match(RegI);
4040 match(mRegI);
4042 format %{ "S6" %}
4043 interface(REG_INTER);
4044 %}
4046 operand mS7RegI() %{
4047 constraint(ALLOC_IN_RC(s7_reg));
4048 match(RegI);
4049 match(mRegI);
4051 format %{ "S7" %}
4052 interface(REG_INTER);
4053 %}
4056 operand mT0RegI() %{
4057 constraint(ALLOC_IN_RC(t0_reg));
4058 match(RegI);
4059 match(mRegI);
4061 format %{ "T0" %}
4062 interface(REG_INTER);
4063 %}
4065 operand mT1RegI() %{
4066 constraint(ALLOC_IN_RC(t1_reg));
4067 match(RegI);
4068 match(mRegI);
4070 format %{ "T1" %}
4071 interface(REG_INTER);
4072 %}
4074 operand mT2RegI() %{
4075 constraint(ALLOC_IN_RC(t2_reg));
4076 match(RegI);
4077 match(mRegI);
4079 format %{ "T2" %}
4080 interface(REG_INTER);
4081 %}
4083 operand mT3RegI() %{
4084 constraint(ALLOC_IN_RC(t3_reg));
4085 match(RegI);
4086 match(mRegI);
4088 format %{ "T3" %}
4089 interface(REG_INTER);
4090 %}
4092 operand mT8RegI() %{
4093 constraint(ALLOC_IN_RC(t8_reg));
4094 match(RegI);
4095 match(mRegI);
4097 format %{ "T8" %}
4098 interface(REG_INTER);
4099 %}
4101 operand mT9RegI() %{
4102 constraint(ALLOC_IN_RC(t9_reg));
4103 match(RegI);
4104 match(mRegI);
4106 format %{ "T9" %}
4107 interface(REG_INTER);
4108 %}
4110 operand mA0RegI() %{
4111 constraint(ALLOC_IN_RC(a0_reg));
4112 match(RegI);
4113 match(mRegI);
4115 format %{ "A0" %}
4116 interface(REG_INTER);
4117 %}
4119 operand mA1RegI() %{
4120 constraint(ALLOC_IN_RC(a1_reg));
4121 match(RegI);
4122 match(mRegI);
4124 format %{ "A1" %}
4125 interface(REG_INTER);
4126 %}
4128 operand mA2RegI() %{
4129 constraint(ALLOC_IN_RC(a2_reg));
4130 match(RegI);
4131 match(mRegI);
4133 format %{ "A2" %}
4134 interface(REG_INTER);
4135 %}
4137 operand mA3RegI() %{
4138 constraint(ALLOC_IN_RC(a3_reg));
4139 match(RegI);
4140 match(mRegI);
4142 format %{ "A3" %}
4143 interface(REG_INTER);
4144 %}
4146 operand mA4RegI() %{
4147 constraint(ALLOC_IN_RC(a4_reg));
4148 match(RegI);
4149 match(mRegI);
4151 format %{ "A4" %}
4152 interface(REG_INTER);
4153 %}
4155 operand mA5RegI() %{
4156 constraint(ALLOC_IN_RC(a5_reg));
4157 match(RegI);
4158 match(mRegI);
4160 format %{ "A5" %}
4161 interface(REG_INTER);
4162 %}
4164 operand mA6RegI() %{
4165 constraint(ALLOC_IN_RC(a6_reg));
4166 match(RegI);
4167 match(mRegI);
4169 format %{ "A6" %}
4170 interface(REG_INTER);
4171 %}
4173 operand mA7RegI() %{
4174 constraint(ALLOC_IN_RC(a7_reg));
4175 match(RegI);
4176 match(mRegI);
4178 format %{ "A7" %}
4179 interface(REG_INTER);
4180 %}
4182 operand mV0RegI() %{
4183 constraint(ALLOC_IN_RC(v0_reg));
4184 match(RegI);
4185 match(mRegI);
4187 format %{ "V0" %}
4188 interface(REG_INTER);
4189 %}
4191 operand mV1RegI() %{
4192 constraint(ALLOC_IN_RC(v1_reg));
4193 match(RegI);
4194 match(mRegI);
4196 format %{ "V1" %}
4197 interface(REG_INTER);
4198 %}
4200 operand mRegN() %{
4201 constraint(ALLOC_IN_RC(int_reg));
4202 match(RegN);
4204 format %{ %}
4205 interface(REG_INTER);
4206 %}
4208 operand t0_RegN() %{
4209 constraint(ALLOC_IN_RC(t0_reg));
4210 match(RegN);
4211 match(mRegN);
4213 format %{ %}
4214 interface(REG_INTER);
4215 %}
4217 operand t1_RegN() %{
4218 constraint(ALLOC_IN_RC(t1_reg));
4219 match(RegN);
4220 match(mRegN);
4222 format %{ %}
4223 interface(REG_INTER);
4224 %}
4226 operand t2_RegN() %{
4227 constraint(ALLOC_IN_RC(t2_reg));
4228 match(RegN);
4229 match(mRegN);
4231 format %{ %}
4232 interface(REG_INTER);
4233 %}
4235 operand t3_RegN() %{
4236 constraint(ALLOC_IN_RC(t3_reg));
4237 match(RegN);
4238 match(mRegN);
4240 format %{ %}
4241 interface(REG_INTER);
4242 %}
4244 operand t8_RegN() %{
4245 constraint(ALLOC_IN_RC(t8_reg));
4246 match(RegN);
4247 match(mRegN);
4249 format %{ %}
4250 interface(REG_INTER);
4251 %}
4253 operand t9_RegN() %{
4254 constraint(ALLOC_IN_RC(t9_reg));
4255 match(RegN);
4256 match(mRegN);
4258 format %{ %}
4259 interface(REG_INTER);
4260 %}
4262 operand a0_RegN() %{
4263 constraint(ALLOC_IN_RC(a0_reg));
4264 match(RegN);
4265 match(mRegN);
4267 format %{ %}
4268 interface(REG_INTER);
4269 %}
4271 operand a1_RegN() %{
4272 constraint(ALLOC_IN_RC(a1_reg));
4273 match(RegN);
4274 match(mRegN);
4276 format %{ %}
4277 interface(REG_INTER);
4278 %}
4280 operand a2_RegN() %{
4281 constraint(ALLOC_IN_RC(a2_reg));
4282 match(RegN);
4283 match(mRegN);
4285 format %{ %}
4286 interface(REG_INTER);
4287 %}
4289 operand a3_RegN() %{
4290 constraint(ALLOC_IN_RC(a3_reg));
4291 match(RegN);
4292 match(mRegN);
4294 format %{ %}
4295 interface(REG_INTER);
4296 %}
4298 operand a4_RegN() %{
4299 constraint(ALLOC_IN_RC(a4_reg));
4300 match(RegN);
4301 match(mRegN);
4303 format %{ %}
4304 interface(REG_INTER);
4305 %}
4307 operand a5_RegN() %{
4308 constraint(ALLOC_IN_RC(a5_reg));
4309 match(RegN);
4310 match(mRegN);
4312 format %{ %}
4313 interface(REG_INTER);
4314 %}
4316 operand a6_RegN() %{
4317 constraint(ALLOC_IN_RC(a6_reg));
4318 match(RegN);
4319 match(mRegN);
4321 format %{ %}
4322 interface(REG_INTER);
4323 %}
4325 operand a7_RegN() %{
4326 constraint(ALLOC_IN_RC(a7_reg));
4327 match(RegN);
4328 match(mRegN);
4330 format %{ %}
4331 interface(REG_INTER);
4332 %}
4334 operand s0_RegN() %{
4335 constraint(ALLOC_IN_RC(s0_reg));
4336 match(RegN);
4337 match(mRegN);
4339 format %{ %}
4340 interface(REG_INTER);
4341 %}
4343 operand s1_RegN() %{
4344 constraint(ALLOC_IN_RC(s1_reg));
4345 match(RegN);
4346 match(mRegN);
4348 format %{ %}
4349 interface(REG_INTER);
4350 %}
4352 operand s2_RegN() %{
4353 constraint(ALLOC_IN_RC(s2_reg));
4354 match(RegN);
4355 match(mRegN);
4357 format %{ %}
4358 interface(REG_INTER);
4359 %}
4361 operand s3_RegN() %{
4362 constraint(ALLOC_IN_RC(s3_reg));
4363 match(RegN);
4364 match(mRegN);
4366 format %{ %}
4367 interface(REG_INTER);
4368 %}
4370 operand s4_RegN() %{
4371 constraint(ALLOC_IN_RC(s4_reg));
4372 match(RegN);
4373 match(mRegN);
4375 format %{ %}
4376 interface(REG_INTER);
4377 %}
4379 operand s5_RegN() %{
4380 constraint(ALLOC_IN_RC(s5_reg));
4381 match(RegN);
4382 match(mRegN);
4384 format %{ %}
4385 interface(REG_INTER);
4386 %}
4388 operand s6_RegN() %{
4389 constraint(ALLOC_IN_RC(s6_reg));
4390 match(RegN);
4391 match(mRegN);
4393 format %{ %}
4394 interface(REG_INTER);
4395 %}
4397 operand s7_RegN() %{
4398 constraint(ALLOC_IN_RC(s7_reg));
4399 match(RegN);
4400 match(mRegN);
4402 format %{ %}
4403 interface(REG_INTER);
4404 %}
4406 operand v0_RegN() %{
4407 constraint(ALLOC_IN_RC(v0_reg));
4408 match(RegN);
4409 match(mRegN);
4411 format %{ %}
4412 interface(REG_INTER);
4413 %}
4415 operand v1_RegN() %{
4416 constraint(ALLOC_IN_RC(v1_reg));
4417 match(RegN);
4418 match(mRegN);
4420 format %{ %}
4421 interface(REG_INTER);
4422 %}
4424 // Pointer Register
4425 operand mRegP() %{
4426 constraint(ALLOC_IN_RC(p_reg));
4427 match(RegP);
4429 format %{ %}
4430 interface(REG_INTER);
4431 %}
4433 operand no_T8_mRegP() %{
4434 constraint(ALLOC_IN_RC(no_T8_p_reg));
4435 match(RegP);
4436 match(mRegP);
4438 format %{ %}
4439 interface(REG_INTER);
4440 %}
4442 operand s0_RegP()
4443 %{
4444 constraint(ALLOC_IN_RC(s0_long_reg));
4445 match(RegP);
4446 match(mRegP);
4447 match(no_T8_mRegP);
4449 format %{ %}
4450 interface(REG_INTER);
4451 %}
4453 operand s1_RegP()
4454 %{
4455 constraint(ALLOC_IN_RC(s1_long_reg));
4456 match(RegP);
4457 match(mRegP);
4458 match(no_T8_mRegP);
4460 format %{ %}
4461 interface(REG_INTER);
4462 %}
4464 operand s2_RegP()
4465 %{
4466 constraint(ALLOC_IN_RC(s2_long_reg));
4467 match(RegP);
4468 match(mRegP);
4469 match(no_T8_mRegP);
4471 format %{ %}
4472 interface(REG_INTER);
4473 %}
4475 operand s3_RegP()
4476 %{
4477 constraint(ALLOC_IN_RC(s3_long_reg));
4478 match(RegP);
4479 match(mRegP);
4480 match(no_T8_mRegP);
4482 format %{ %}
4483 interface(REG_INTER);
4484 %}
4486 operand s4_RegP()
4487 %{
4488 constraint(ALLOC_IN_RC(s4_long_reg));
4489 match(RegP);
4490 match(mRegP);
4491 match(no_T8_mRegP);
4493 format %{ %}
4494 interface(REG_INTER);
4495 %}
4497 operand s5_RegP()
4498 %{
4499 constraint(ALLOC_IN_RC(s5_long_reg));
4500 match(RegP);
4501 match(mRegP);
4502 match(no_T8_mRegP);
4504 format %{ %}
4505 interface(REG_INTER);
4506 %}
4508 operand s6_RegP()
4509 %{
4510 constraint(ALLOC_IN_RC(s6_long_reg));
4511 match(RegP);
4512 match(mRegP);
4513 match(no_T8_mRegP);
4515 format %{ %}
4516 interface(REG_INTER);
4517 %}
4519 operand s7_RegP()
4520 %{
4521 constraint(ALLOC_IN_RC(s7_long_reg));
4522 match(RegP);
4523 match(mRegP);
4524 match(no_T8_mRegP);
4526 format %{ %}
4527 interface(REG_INTER);
4528 %}
4530 operand t0_RegP()
4531 %{
4532 constraint(ALLOC_IN_RC(t0_long_reg));
4533 match(RegP);
4534 match(mRegP);
4535 match(no_T8_mRegP);
4537 format %{ %}
4538 interface(REG_INTER);
4539 %}
4541 operand t1_RegP()
4542 %{
4543 constraint(ALLOC_IN_RC(t1_long_reg));
4544 match(RegP);
4545 match(mRegP);
4546 match(no_T8_mRegP);
4548 format %{ %}
4549 interface(REG_INTER);
4550 %}
4552 operand t2_RegP()
4553 %{
4554 constraint(ALLOC_IN_RC(t2_long_reg));
4555 match(RegP);
4556 match(mRegP);
4557 match(no_T8_mRegP);
4559 format %{ %}
4560 interface(REG_INTER);
4561 %}
4563 operand t3_RegP()
4564 %{
4565 constraint(ALLOC_IN_RC(t3_long_reg));
4566 match(RegP);
4567 match(mRegP);
4568 match(no_T8_mRegP);
4570 format %{ %}
4571 interface(REG_INTER);
4572 %}
4574 operand t8_RegP()
4575 %{
4576 constraint(ALLOC_IN_RC(t8_long_reg));
4577 match(RegP);
4578 match(mRegP);
4580 format %{ %}
4581 interface(REG_INTER);
4582 %}
4584 operand t9_RegP()
4585 %{
4586 constraint(ALLOC_IN_RC(t9_long_reg));
4587 match(RegP);
4588 match(mRegP);
4589 match(no_T8_mRegP);
4591 format %{ %}
4592 interface(REG_INTER);
4593 %}
4595 operand a0_RegP()
4596 %{
4597 constraint(ALLOC_IN_RC(a0_long_reg));
4598 match(RegP);
4599 match(mRegP);
4600 match(no_T8_mRegP);
4602 format %{ %}
4603 interface(REG_INTER);
4604 %}
4606 operand a1_RegP()
4607 %{
4608 constraint(ALLOC_IN_RC(a1_long_reg));
4609 match(RegP);
4610 match(mRegP);
4611 match(no_T8_mRegP);
4613 format %{ %}
4614 interface(REG_INTER);
4615 %}
4617 operand a2_RegP()
4618 %{
4619 constraint(ALLOC_IN_RC(a2_long_reg));
4620 match(RegP);
4621 match(mRegP);
4622 match(no_T8_mRegP);
4624 format %{ %}
4625 interface(REG_INTER);
4626 %}
4628 operand a3_RegP()
4629 %{
4630 constraint(ALLOC_IN_RC(a3_long_reg));
4631 match(RegP);
4632 match(mRegP);
4633 match(no_T8_mRegP);
4635 format %{ %}
4636 interface(REG_INTER);
4637 %}
4639 operand a4_RegP()
4640 %{
4641 constraint(ALLOC_IN_RC(a4_long_reg));
4642 match(RegP);
4643 match(mRegP);
4644 match(no_T8_mRegP);
4646 format %{ %}
4647 interface(REG_INTER);
4648 %}
4651 operand a5_RegP()
4652 %{
4653 constraint(ALLOC_IN_RC(a5_long_reg));
4654 match(RegP);
4655 match(mRegP);
4656 match(no_T8_mRegP);
4658 format %{ %}
4659 interface(REG_INTER);
4660 %}
4662 operand a6_RegP()
4663 %{
4664 constraint(ALLOC_IN_RC(a6_long_reg));
4665 match(RegP);
4666 match(mRegP);
4667 match(no_T8_mRegP);
4669 format %{ %}
4670 interface(REG_INTER);
4671 %}
4673 operand a7_RegP()
4674 %{
4675 constraint(ALLOC_IN_RC(a7_long_reg));
4676 match(RegP);
4677 match(mRegP);
4678 match(no_T8_mRegP);
4680 format %{ %}
4681 interface(REG_INTER);
4682 %}
4684 operand v0_RegP()
4685 %{
4686 constraint(ALLOC_IN_RC(v0_long_reg));
4687 match(RegP);
4688 match(mRegP);
4689 match(no_T8_mRegP);
4691 format %{ %}
4692 interface(REG_INTER);
4693 %}
4695 operand v1_RegP()
4696 %{
4697 constraint(ALLOC_IN_RC(v1_long_reg));
4698 match(RegP);
4699 match(mRegP);
4700 match(no_T8_mRegP);
4702 format %{ %}
4703 interface(REG_INTER);
4704 %}
4706 /*
4707 operand mSPRegP(mRegP reg) %{
4708 constraint(ALLOC_IN_RC(sp_reg));
4709 match(reg);
4711 format %{ "SP" %}
4712 interface(REG_INTER);
4713 %}
4715 operand mFPRegP(mRegP reg) %{
4716 constraint(ALLOC_IN_RC(fp_reg));
4717 match(reg);
4719 format %{ "FP" %}
4720 interface(REG_INTER);
4721 %}
4722 */
4724 operand mRegL() %{
4725 constraint(ALLOC_IN_RC(long_reg));
4726 match(RegL);
4728 format %{ %}
4729 interface(REG_INTER);
4730 %}
4732 operand v0RegL() %{
4733 constraint(ALLOC_IN_RC(v0_long_reg));
4734 match(RegL);
4735 match(mRegL);
4737 format %{ %}
4738 interface(REG_INTER);
4739 %}
4741 operand v1RegL() %{
4742 constraint(ALLOC_IN_RC(v1_long_reg));
4743 match(RegL);
4744 match(mRegL);
4746 format %{ %}
4747 interface(REG_INTER);
4748 %}
4750 operand a0RegL() %{
4751 constraint(ALLOC_IN_RC(a0_long_reg));
4752 match(RegL);
4753 match(mRegL);
4755 format %{ "A0" %}
4756 interface(REG_INTER);
4757 %}
4759 operand a1RegL() %{
4760 constraint(ALLOC_IN_RC(a1_long_reg));
4761 match(RegL);
4762 match(mRegL);
4764 format %{ %}
4765 interface(REG_INTER);
4766 %}
4768 operand a2RegL() %{
4769 constraint(ALLOC_IN_RC(a2_long_reg));
4770 match(RegL);
4771 match(mRegL);
4773 format %{ %}
4774 interface(REG_INTER);
4775 %}
4777 operand a3RegL() %{
4778 constraint(ALLOC_IN_RC(a3_long_reg));
4779 match(RegL);
4780 match(mRegL);
4782 format %{ %}
4783 interface(REG_INTER);
4784 %}
4786 operand t0RegL() %{
4787 constraint(ALLOC_IN_RC(t0_long_reg));
4788 match(RegL);
4789 match(mRegL);
4791 format %{ %}
4792 interface(REG_INTER);
4793 %}
4795 operand t1RegL() %{
4796 constraint(ALLOC_IN_RC(t1_long_reg));
4797 match(RegL);
4798 match(mRegL);
4800 format %{ %}
4801 interface(REG_INTER);
4802 %}
4804 operand t2RegL() %{
4805 constraint(ALLOC_IN_RC(t2_long_reg));
4806 match(RegL);
4807 match(mRegL);
4809 format %{ %}
4810 interface(REG_INTER);
4811 %}
4813 operand t3RegL() %{
4814 constraint(ALLOC_IN_RC(t3_long_reg));
4815 match(RegL);
4816 match(mRegL);
4818 format %{ %}
4819 interface(REG_INTER);
4820 %}
4822 operand t8RegL() %{
4823 constraint(ALLOC_IN_RC(t8_long_reg));
4824 match(RegL);
4825 match(mRegL);
4827 format %{ %}
4828 interface(REG_INTER);
4829 %}
4831 operand a4RegL() %{
4832 constraint(ALLOC_IN_RC(a4_long_reg));
4833 match(RegL);
4834 match(mRegL);
4836 format %{ %}
4837 interface(REG_INTER);
4838 %}
4840 operand a5RegL() %{
4841 constraint(ALLOC_IN_RC(a5_long_reg));
4842 match(RegL);
4843 match(mRegL);
4845 format %{ %}
4846 interface(REG_INTER);
4847 %}
4849 operand a6RegL() %{
4850 constraint(ALLOC_IN_RC(a6_long_reg));
4851 match(RegL);
4852 match(mRegL);
4854 format %{ %}
4855 interface(REG_INTER);
4856 %}
4858 operand a7RegL() %{
4859 constraint(ALLOC_IN_RC(a7_long_reg));
4860 match(RegL);
4861 match(mRegL);
4863 format %{ %}
4864 interface(REG_INTER);
4865 %}
4867 operand s0RegL() %{
4868 constraint(ALLOC_IN_RC(s0_long_reg));
4869 match(RegL);
4870 match(mRegL);
4872 format %{ %}
4873 interface(REG_INTER);
4874 %}
4876 operand s1RegL() %{
4877 constraint(ALLOC_IN_RC(s1_long_reg));
4878 match(RegL);
4879 match(mRegL);
4881 format %{ %}
4882 interface(REG_INTER);
4883 %}
4885 operand s2RegL() %{
4886 constraint(ALLOC_IN_RC(s2_long_reg));
4887 match(RegL);
4888 match(mRegL);
4890 format %{ %}
4891 interface(REG_INTER);
4892 %}
4894 operand s3RegL() %{
4895 constraint(ALLOC_IN_RC(s3_long_reg));
4896 match(RegL);
4897 match(mRegL);
4899 format %{ %}
4900 interface(REG_INTER);
4901 %}
4903 operand s4RegL() %{
4904 constraint(ALLOC_IN_RC(s4_long_reg));
4905 match(RegL);
4906 match(mRegL);
4908 format %{ %}
4909 interface(REG_INTER);
4910 %}
4912 operand s7RegL() %{
4913 constraint(ALLOC_IN_RC(s7_long_reg));
4914 match(RegL);
4915 match(mRegL);
4917 format %{ %}
4918 interface(REG_INTER);
4919 %}
4921 // Floating register operands
4922 operand regF() %{
4923 constraint(ALLOC_IN_RC(flt_reg));
4924 match(RegF);
4926 format %{ %}
4927 interface(REG_INTER);
4928 %}
4930 //Double Precision Floating register operands
4931 operand regD() %{
4932 constraint(ALLOC_IN_RC(dbl_reg));
4933 match(RegD);
4935 format %{ %}
4936 interface(REG_INTER);
4937 %}
4939 //----------Memory Operands----------------------------------------------------
4940 // Indirect Memory Operand
4941 operand indirect(mRegP reg) %{
4942 constraint(ALLOC_IN_RC(p_reg));
4943 match(reg);
4945 format %{ "[$reg] @ indirect" %}
4946 interface(MEMORY_INTER) %{
4947 base($reg);
4948 index(0x0); /* NO_INDEX */
4949 scale(0x0);
4950 disp(0x0);
4951 %}
4952 %}
4954 // Indirect Memory Plus Short Offset Operand
4955 operand indOffset8(mRegP reg, immL8 off)
4956 %{
4957 constraint(ALLOC_IN_RC(p_reg));
4958 match(AddP reg off);
4960 format %{ "[$reg + $off (8-bit)] @ indOffset8" %}
4961 interface(MEMORY_INTER) %{
4962 base($reg);
4963 index(0x0); /* NO_INDEX */
4964 scale(0x0);
4965 disp($off);
4966 %}
4967 %}
4969 // Indirect Memory Times Scale Plus Index Register
4970 operand indIndexScale(mRegP reg, mRegL lreg, immI2 scale)
4971 %{
4972 constraint(ALLOC_IN_RC(p_reg));
4973 match(AddP reg (LShiftL lreg scale));
4975 op_cost(10);
4976 format %{"[$reg + $lreg << $scale] @ indIndexScale" %}
4977 interface(MEMORY_INTER) %{
4978 base($reg);
4979 index($lreg);
4980 scale($scale);
4981 disp(0x0);
4982 %}
4983 %}
4986 // [base + index + offset]
4987 operand baseIndexOffset8(mRegP base, mRegL index, immL8 off)
4988 %{
4989 constraint(ALLOC_IN_RC(p_reg));
4990 op_cost(5);
4991 match(AddP (AddP base index) off);
4993 format %{ "[$base + $index + $off (8-bit)] @ baseIndexOffset8" %}
4994 interface(MEMORY_INTER) %{
4995 base($base);
4996 index($index);
4997 scale(0x0);
4998 disp($off);
4999 %}
5000 %}
5002 // [base + index + offset]
5003 operand baseIndexOffset8_convI2L(mRegP base, mRegI index, immL8 off)
5004 %{
5005 constraint(ALLOC_IN_RC(p_reg));
5006 op_cost(5);
5007 match(AddP (AddP base (ConvI2L index)) off);
5009 format %{ "[$base + $index + $off (8-bit)] @ baseIndexOffset8_convI2L" %}
5010 interface(MEMORY_INTER) %{
5011 base($base);
5012 index($index);
5013 scale(0x0);
5014 disp($off);
5015 %}
5016 %}
5018 // Indirect Memory Times Scale Plus Index Register Plus Offset Operand
5019 operand indIndexScaleOffset8(mRegP reg, immL8 off, mRegL lreg, immI2 scale)
5020 %{
5021 constraint(ALLOC_IN_RC(p_reg));
5022 match(AddP (AddP reg (LShiftL lreg scale)) off);
5024 op_cost(10);
5025 format %{"[$reg + $off + $lreg << $scale] @ indIndexScaleOffset8" %}
5026 interface(MEMORY_INTER) %{
5027 base($reg);
5028 index($lreg);
5029 scale($scale);
5030 disp($off);
5031 %}
5032 %}
5034 operand indIndexScaleOffset8_convI2L(mRegP reg, immL8 off, mRegI ireg, immI2 scale)
5035 %{
5036 constraint(ALLOC_IN_RC(p_reg));
5037 match(AddP (AddP reg (LShiftL (ConvI2L ireg) scale)) off);
5039 op_cost(10);
5040 format %{"[$reg + $off + $ireg << $scale] @ indIndexScaleOffset8_convI2L" %}
5041 interface(MEMORY_INTER) %{
5042 base($reg);
5043 index($ireg);
5044 scale($scale);
5045 disp($off);
5046 %}
5047 %}
5049 // [base + index<<scale + offset]
5050 operand basePosIndexScaleOffset8(mRegP base, mRegI index, immL8 off, immI_0_31 scale)
5051 %{
5052 constraint(ALLOC_IN_RC(p_reg));
5053 //predicate(n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
5054 op_cost(10);
5055 match(AddP (AddP base (LShiftL (ConvI2L index) scale)) off);
5057 format %{ "[$base + $index << $scale + $off (8-bit)] @ basePosIndexScaleOffset8" %}
5058 interface(MEMORY_INTER) %{
5059 base($base);
5060 index($index);
5061 scale($scale);
5062 disp($off);
5063 %}
5064 %}
5066 // Indirect Memory Times Scale Plus Index Register Plus Offset Operand
5067 operand indIndexScaleOffsetNarrow(mRegN reg, immL8 off, mRegL lreg, immI2 scale)
5068 %{
5069 predicate(Universe::narrow_oop_shift() == 0);
5070 constraint(ALLOC_IN_RC(p_reg));
5071 match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
5073 op_cost(10);
5074 format %{"[$reg + $off + $lreg << $scale] @ indIndexScaleOffsetNarrow" %}
5075 interface(MEMORY_INTER) %{
5076 base($reg);
5077 index($lreg);
5078 scale($scale);
5079 disp($off);
5080 %}
5081 %}
5083 // [base + index<<scale + offset] for compressd Oops
5084 operand indPosIndexI2LScaleOffset8Narrow(mRegN base, mRegI index, immL8 off, immI_0_31 scale)
5085 %{
5086 constraint(ALLOC_IN_RC(p_reg));
5087 //predicate(Universe::narrow_oop_shift() == 0 && n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
5088 predicate(Universe::narrow_oop_shift() == 0);
5089 op_cost(10);
5090 match(AddP (AddP (DecodeN base) (LShiftL (ConvI2L index) scale)) off);
5092 format %{ "[$base + $index << $scale + $off (8-bit)] @ indPosIndexI2LScaleOffset8Narrow" %}
5093 interface(MEMORY_INTER) %{
5094 base($base);
5095 index($index);
5096 scale($scale);
5097 disp($off);
5098 %}
5099 %}
5101 //FIXME: I think it's better to limit the immI to be 16-bit at most!
5102 // Indirect Memory Plus Long Offset Operand
5103 operand indOffset32(mRegP reg, immL32 off) %{
5104 constraint(ALLOC_IN_RC(p_reg));
5105 op_cost(20);
5106 match(AddP reg off);
5108 format %{ "[$reg + $off (32-bit)] @ indOffset32" %}
5109 interface(MEMORY_INTER) %{
5110 base($reg);
5111 index(0x0); /* NO_INDEX */
5112 scale(0x0);
5113 disp($off);
5114 %}
5115 %}
5117 // Indirect Memory Plus Index Register
5118 operand indIndex(mRegP addr, mRegL index) %{
5119 constraint(ALLOC_IN_RC(p_reg));
5120 match(AddP addr index);
5122 op_cost(20);
5123 format %{"[$addr + $index] @ indIndex" %}
5124 interface(MEMORY_INTER) %{
5125 base($addr);
5126 index($index);
5127 scale(0x0);
5128 disp(0x0);
5129 %}
5130 %}
5132 operand indirectNarrowKlass(mRegN reg)
5133 %{
5134 predicate(Universe::narrow_klass_shift() == 0);
5135 constraint(ALLOC_IN_RC(p_reg));
5136 op_cost(10);
5137 match(DecodeNKlass reg);
5139 format %{ "[$reg] @ indirectNarrowKlass" %}
5140 interface(MEMORY_INTER) %{
5141 base($reg);
5142 index(0x0);
5143 scale(0x0);
5144 disp(0x0);
5145 %}
5146 %}
5148 operand indOffset8NarrowKlass(mRegN reg, immL8 off)
5149 %{
5150 predicate(Universe::narrow_klass_shift() == 0);
5151 constraint(ALLOC_IN_RC(p_reg));
5152 op_cost(10);
5153 match(AddP (DecodeNKlass reg) off);
5155 format %{ "[$reg + $off (8-bit)] @ indOffset8NarrowKlass" %}
5156 interface(MEMORY_INTER) %{
5157 base($reg);
5158 index(0x0);
5159 scale(0x0);
5160 disp($off);
5161 %}
5162 %}
5164 operand indOffset32NarrowKlass(mRegN reg, immL32 off)
5165 %{
5166 predicate(Universe::narrow_klass_shift() == 0);
5167 constraint(ALLOC_IN_RC(p_reg));
5168 op_cost(10);
5169 match(AddP (DecodeNKlass reg) off);
5171 format %{ "[$reg + $off (32-bit)] @ indOffset32NarrowKlass" %}
5172 interface(MEMORY_INTER) %{
5173 base($reg);
5174 index(0x0);
5175 scale(0x0);
5176 disp($off);
5177 %}
5178 %}
5180 operand indIndexOffsetNarrowKlass(mRegN reg, mRegL lreg, immL32 off)
5181 %{
5182 predicate(Universe::narrow_klass_shift() == 0);
5183 constraint(ALLOC_IN_RC(p_reg));
5184 match(AddP (AddP (DecodeNKlass reg) lreg) off);
5186 op_cost(10);
5187 format %{"[$reg + $off + $lreg] @ indIndexOffsetNarrowKlass" %}
5188 interface(MEMORY_INTER) %{
5189 base($reg);
5190 index($lreg);
5191 scale(0x0);
5192 disp($off);
5193 %}
5194 %}
5196 operand indIndexNarrowKlass(mRegN reg, mRegL lreg)
5197 %{
5198 predicate(Universe::narrow_klass_shift() == 0);
5199 constraint(ALLOC_IN_RC(p_reg));
5200 match(AddP (DecodeNKlass reg) lreg);
5202 op_cost(10);
5203 format %{"[$reg + $lreg] @ indIndexNarrowKlass" %}
5204 interface(MEMORY_INTER) %{
5205 base($reg);
5206 index($lreg);
5207 scale(0x0);
5208 disp(0x0);
5209 %}
5210 %}
5212 // Indirect Memory Operand
5213 operand indirectNarrow(mRegN reg)
5214 %{
5215 predicate(Universe::narrow_oop_shift() == 0);
5216 constraint(ALLOC_IN_RC(p_reg));
5217 op_cost(10);
5218 match(DecodeN reg);
5220 format %{ "[$reg] @ indirectNarrow" %}
5221 interface(MEMORY_INTER) %{
5222 base($reg);
5223 index(0x0);
5224 scale(0x0);
5225 disp(0x0);
5226 %}
5227 %}
5229 // Indirect Memory Plus Short Offset Operand
5230 operand indOffset8Narrow(mRegN reg, immL8 off)
5231 %{
5232 predicate(Universe::narrow_oop_shift() == 0);
5233 constraint(ALLOC_IN_RC(p_reg));
5234 op_cost(10);
5235 match(AddP (DecodeN reg) off);
5237 format %{ "[$reg + $off (8-bit)] @ indOffset8Narrow" %}
5238 interface(MEMORY_INTER) %{
5239 base($reg);
5240 index(0x0);
5241 scale(0x0);
5242 disp($off);
5243 %}
5244 %}
5246 // Indirect Memory Plus Index Register Plus Offset Operand
5247 operand indIndexOffset8Narrow(mRegN reg, mRegL lreg, immL8 off)
5248 %{
5249 predicate(Universe::narrow_oop_shift() == 0);
5250 constraint(ALLOC_IN_RC(p_reg));
5251 match(AddP (AddP (DecodeN reg) lreg) off);
5253 op_cost(10);
5254 format %{"[$reg + $off + $lreg] @ indIndexOffset8Narrow" %}
5255 interface(MEMORY_INTER) %{
5256 base($reg);
5257 index($lreg);
5258 scale(0x0);
5259 disp($off);
5260 %}
5261 %}
5263 //----------Load Long Memory Operands------------------------------------------
5264 // The load-long idiom will use it's address expression again after loading
5265 // the first word of the long. If the load-long destination overlaps with
5266 // registers used in the addressing expression, the 2nd half will be loaded
5267 // from a clobbered address. Fix this by requiring that load-long use
5268 // address registers that do not overlap with the load-long target.
5270 // load-long support
5271 operand load_long_RegP() %{
5272 constraint(ALLOC_IN_RC(p_reg));
5273 match(RegP);
5274 match(mRegP);
5275 op_cost(100);
5276 format %{ %}
5277 interface(REG_INTER);
5278 %}
5280 // Indirect Memory Operand Long
5281 operand load_long_indirect(load_long_RegP reg) %{
5282 constraint(ALLOC_IN_RC(p_reg));
5283 match(reg);
5285 format %{ "[$reg]" %}
5286 interface(MEMORY_INTER) %{
5287 base($reg);
5288 index(0x0);
5289 scale(0x0);
5290 disp(0x0);
5291 %}
5292 %}
5294 // Indirect Memory Plus Long Offset Operand
5295 operand load_long_indOffset32(load_long_RegP reg, immL32 off) %{
5296 match(AddP reg off);
5298 format %{ "[$reg + $off]" %}
5299 interface(MEMORY_INTER) %{
5300 base($reg);
5301 index(0x0);
5302 scale(0x0);
5303 disp($off);
5304 %}
5305 %}
5307 //----------Conditional Branch Operands----------------------------------------
5308 // Comparison Op - This is the operation of the comparison, and is limited to
5309 // the following set of codes:
5310 // L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
5311 //
5312 // Other attributes of the comparison, such as unsignedness, are specified
5313 // by the comparison instruction that sets a condition code flags register.
5314 // That result is represented by a flags operand whose subtype is appropriate
5315 // to the unsignedness (etc.) of the comparison.
5316 //
5317 // Later, the instruction which matches both the Comparison Op (a Bool) and
5318 // the flags (produced by the Cmp) specifies the coding of the comparison op
5319 // by matching a specific subtype of Bool operand below, such as cmpOpU.
5321 // Comparision Code
5322 operand cmpOp() %{
5323 match(Bool);
5325 format %{ "" %}
5326 interface(COND_INTER) %{
5327 equal(0x01);
5328 not_equal(0x02);
5329 greater(0x03);
5330 greater_equal(0x04);
5331 less(0x05);
5332 less_equal(0x06);
5333 overflow(0x7);
5334 no_overflow(0x8);
5335 %}
5336 %}
5339 // Comparision Code
5340 // Comparison Code, unsigned compare. Used by FP also, with
5341 // C2 (unordered) turned into GT or LT already. The other bits
5342 // C0 and C3 are turned into Carry & Zero flags.
5343 operand cmpOpU() %{
5344 match(Bool);
5346 format %{ "" %}
5347 interface(COND_INTER) %{
5348 equal(0x01);
5349 not_equal(0x02);
5350 greater(0x03);
5351 greater_equal(0x04);
5352 less(0x05);
5353 less_equal(0x06);
5354 overflow(0x7);
5355 no_overflow(0x8);
5356 %}
5357 %}
5359 /*
5360 // Comparison Code, unsigned compare. Used by FP also, with
5361 // C2 (unordered) turned into GT or LT already. The other bits
5362 // C0 and C3 are turned into Carry & Zero flags.
5363 operand cmpOpU() %{
5364 match(Bool);
5366 format %{ "" %}
5367 interface(COND_INTER) %{
5368 equal(0x4);
5369 not_equal(0x5);
5370 less(0x2);
5371 greater_equal(0x3);
5372 less_equal(0x6);
5373 greater(0x7);
5374 %}
5375 %}
5376 */
5377 /*
5378 // Comparison Code for FP conditional move
5379 operand cmpOp_fcmov() %{
5380 match(Bool);
5382 format %{ "" %}
5383 interface(COND_INTER) %{
5384 equal (0x01);
5385 not_equal (0x02);
5386 greater (0x03);
5387 greater_equal(0x04);
5388 less (0x05);
5389 less_equal (0x06);
5390 %}
5391 %}
5393 // Comparision Code used in long compares
5394 operand cmpOp_commute() %{
5395 match(Bool);
5397 format %{ "" %}
5398 interface(COND_INTER) %{
5399 equal(0x4);
5400 not_equal(0x5);
5401 less(0xF);
5402 greater_equal(0xE);
5403 less_equal(0xD);
5404 greater(0xC);
5405 %}
5406 %}
5407 */
5409 //----------Special Memory Operands--------------------------------------------
5410 // Stack Slot Operand - This operand is used for loading and storing temporary
5411 // values on the stack where a match requires a value to
5412 // flow through memory.
5413 operand stackSlotP(sRegP reg) %{
5414 constraint(ALLOC_IN_RC(stack_slots));
5415 // No match rule because this operand is only generated in matching
5416 op_cost(50);
5417 format %{ "[$reg]" %}
5418 interface(MEMORY_INTER) %{
5419 base(0x1d); // SP
5420 index(0x0); // No Index
5421 scale(0x0); // No Scale
5422 disp($reg); // Stack Offset
5423 %}
5424 %}
5426 operand stackSlotI(sRegI reg) %{
5427 constraint(ALLOC_IN_RC(stack_slots));
5428 // No match rule because this operand is only generated in matching
5429 op_cost(50);
5430 format %{ "[$reg]" %}
5431 interface(MEMORY_INTER) %{
5432 base(0x1d); // SP
5433 index(0x0); // No Index
5434 scale(0x0); // No Scale
5435 disp($reg); // Stack Offset
5436 %}
5437 %}
5439 operand stackSlotF(sRegF reg) %{
5440 constraint(ALLOC_IN_RC(stack_slots));
5441 // No match rule because this operand is only generated in matching
5442 op_cost(50);
5443 format %{ "[$reg]" %}
5444 interface(MEMORY_INTER) %{
5445 base(0x1d); // SP
5446 index(0x0); // No Index
5447 scale(0x0); // No Scale
5448 disp($reg); // Stack Offset
5449 %}
5450 %}
5452 operand stackSlotD(sRegD reg) %{
5453 constraint(ALLOC_IN_RC(stack_slots));
5454 // No match rule because this operand is only generated in matching
5455 op_cost(50);
5456 format %{ "[$reg]" %}
5457 interface(MEMORY_INTER) %{
5458 base(0x1d); // SP
5459 index(0x0); // No Index
5460 scale(0x0); // No Scale
5461 disp($reg); // Stack Offset
5462 %}
5463 %}
5465 operand stackSlotL(sRegL reg) %{
5466 constraint(ALLOC_IN_RC(stack_slots));
5467 // No match rule because this operand is only generated in matching
5468 op_cost(50);
5469 format %{ "[$reg]" %}
5470 interface(MEMORY_INTER) %{
5471 base(0x1d); // SP
5472 index(0x0); // No Index
5473 scale(0x0); // No Scale
5474 disp($reg); // Stack Offset
5475 %}
5476 %}
5479 //------------------------OPERAND CLASSES--------------------------------------
5480 //opclass memory( direct, indirect, indOffset16, indOffset32, indOffset32X, indIndexOffset );
5481 opclass memory( indirect, indirectNarrow, indOffset8, indOffset32, indIndex, indIndexScale, load_long_indirect, load_long_indOffset32, baseIndexOffset8, baseIndexOffset8_convI2L, indIndexScaleOffset8, indIndexScaleOffset8_convI2L, basePosIndexScaleOffset8, indIndexScaleOffsetNarrow, indPosIndexI2LScaleOffset8Narrow, indOffset8Narrow, indIndexOffset8Narrow);
5484 //----------PIPELINE-----------------------------------------------------------
5485 // Rules which define the behavior of the target architectures pipeline.
5487 pipeline %{
5489 //----------ATTRIBUTES---------------------------------------------------------
5490 attributes %{
5491 fixed_size_instructions; // Fixed size instructions
5492 branch_has_delay_slot; // branch have delay slot in gs2
5493 max_instructions_per_bundle = 1; // 1 instruction per bundle
5494 max_bundles_per_cycle = 4; // Up to 4 bundles per cycle
5495 bundle_unit_size=4;
5496 instruction_unit_size = 4; // An instruction is 4 bytes long
5497 instruction_fetch_unit_size = 16; // The processor fetches one line
5498 instruction_fetch_units = 1; // of 16 bytes
5500 // List of nop instructions
5501 nops( MachNop );
5502 %}
5504 //----------RESOURCES----------------------------------------------------------
5505 // Resources are the functional units available to the machine
5507 resources(D1, D2, D3, D4, DECODE = D1 | D2 | D3| D4, ALU1, ALU2, ALU = ALU1 | ALU2, FPU1, FPU2, FPU = FPU1 | FPU2, MEM, BR);
5509 //----------PIPELINE DESCRIPTION-----------------------------------------------
5510 // Pipeline Description specifies the stages in the machine's pipeline
5512 // IF: fetch
5513 // ID: decode
5514 // RD: read
5515 // CA: caculate
5516 // WB: write back
5517 // CM: commit
5519 pipe_desc(IF, ID, RD, CA, WB, CM);
5522 //----------PIPELINE CLASSES---------------------------------------------------
5523 // Pipeline Classes describe the stages in which input and output are
5524 // referenced by the hardware pipeline.
5526 //No.1 Integer ALU reg-reg operation : dst <-- reg1 op reg2
5527 pipe_class ialu_regI_regI(mRegI dst, mRegI src1, mRegI src2) %{
5528 single_instruction;
5529 src1 : RD(read);
5530 src2 : RD(read);
5531 dst : WB(write)+1;
5532 DECODE : ID;
5533 ALU : CA;
5534 %}
5536 //No.19 Integer mult operation : dst <-- reg1 mult reg2
5537 pipe_class ialu_mult(mRegI dst, mRegI src1, mRegI src2) %{
5538 src1 : RD(read);
5539 src2 : RD(read);
5540 dst : WB(write)+5;
5541 DECODE : ID;
5542 ALU2 : CA;
5543 %}
5545 pipe_class mulL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
5546 src1 : RD(read);
5547 src2 : RD(read);
5548 dst : WB(write)+10;
5549 DECODE : ID;
5550 ALU2 : CA;
5551 %}
5553 //No.19 Integer div operation : dst <-- reg1 div reg2
5554 pipe_class ialu_div(mRegI dst, mRegI src1, mRegI src2) %{
5555 src1 : RD(read);
5556 src2 : RD(read);
5557 dst : WB(write)+10;
5558 DECODE : ID;
5559 ALU2 : CA;
5560 %}
5562 //No.19 Integer mod operation : dst <-- reg1 mod reg2
5563 pipe_class ialu_mod(mRegI dst, mRegI src1, mRegI src2) %{
5564 instruction_count(2);
5565 src1 : RD(read);
5566 src2 : RD(read);
5567 dst : WB(write)+10;
5568 DECODE : ID;
5569 ALU2 : CA;
5570 %}
5572 //No.15 Long ALU reg-reg operation : dst <-- reg1 op reg2
5573 pipe_class ialu_regL_regL(mRegL dst, mRegL src1, mRegL src2) %{
5574 instruction_count(2);
5575 src1 : RD(read);
5576 src2 : RD(read);
5577 dst : WB(write);
5578 DECODE : ID;
5579 ALU : CA;
5580 %}
5582 //No.18 Long ALU reg-imm16 operation : dst <-- reg1 op imm16
5583 pipe_class ialu_regL_imm16(mRegL dst, mRegL src) %{
5584 instruction_count(2);
5585 src : RD(read);
5586 dst : WB(write);
5587 DECODE : ID;
5588 ALU : CA;
5589 %}
5591 //no.16 load Long from memory :
5592 pipe_class ialu_loadL(mRegL dst, memory mem) %{
5593 instruction_count(2);
5594 mem : RD(read);
5595 dst : WB(write)+5;
5596 DECODE : ID;
5597 MEM : RD;
5598 %}
5600 //No.17 Store Long to Memory :
5601 pipe_class ialu_storeL(mRegL src, memory mem) %{
5602 instruction_count(2);
5603 mem : RD(read);
5604 src : RD(read);
5605 DECODE : ID;
5606 MEM : RD;
5607 %}
5609 //No.2 Integer ALU reg-imm16 operation : dst <-- reg1 op imm16
5610 pipe_class ialu_regI_imm16(mRegI dst, mRegI src) %{
5611 single_instruction;
5612 src : RD(read);
5613 dst : WB(write);
5614 DECODE : ID;
5615 ALU : CA;
5616 %}
5618 //No.3 Integer move operation : dst <-- reg
5619 pipe_class ialu_regI_mov(mRegI dst, mRegI src) %{
5620 src : RD(read);
5621 dst : WB(write);
5622 DECODE : ID;
5623 ALU : CA;
5624 %}
5626 //No.4 No instructions : do nothing
5627 pipe_class empty( ) %{
5628 instruction_count(0);
5629 %}
5631 //No.5 UnConditional branch :
5632 pipe_class pipe_jump( label labl ) %{
5633 multiple_bundles;
5634 DECODE : ID;
5635 BR : RD;
5636 %}
5638 //No.6 ALU Conditional branch :
5639 pipe_class pipe_alu_branch(mRegI src1, mRegI src2, label labl ) %{
5640 multiple_bundles;
5641 src1 : RD(read);
5642 src2 : RD(read);
5643 DECODE : ID;
5644 BR : RD;
5645 %}
5647 //no.7 load integer from memory :
5648 pipe_class ialu_loadI(mRegI dst, memory mem) %{
5649 mem : RD(read);
5650 dst : WB(write)+3;
5651 DECODE : ID;
5652 MEM : RD;
5653 %}
5655 //No.8 Store Integer to Memory :
5656 pipe_class ialu_storeI(mRegI src, memory mem) %{
5657 mem : RD(read);
5658 src : RD(read);
5659 DECODE : ID;
5660 MEM : RD;
5661 %}
5664 //No.10 Floating FPU reg-reg operation : dst <-- reg1 op reg2
5665 pipe_class fpu_regF_regF(regF dst, regF src1, regF src2) %{
5666 src1 : RD(read);
5667 src2 : RD(read);
5668 dst : WB(write);
5669 DECODE : ID;
5670 FPU : CA;
5671 %}
5673 //No.22 Floating div operation : dst <-- reg1 div reg2
5674 pipe_class fpu_div(regF dst, regF src1, regF src2) %{
5675 src1 : RD(read);
5676 src2 : RD(read);
5677 dst : WB(write);
5678 DECODE : ID;
5679 FPU2 : CA;
5680 %}
5682 pipe_class fcvt_I2D(regD dst, mRegI src) %{
5683 src : RD(read);
5684 dst : WB(write);
5685 DECODE : ID;
5686 FPU1 : CA;
5687 %}
5689 pipe_class fcvt_D2I(mRegI dst, regD src) %{
5690 src : RD(read);
5691 dst : WB(write);
5692 DECODE : ID;
5693 FPU1 : CA;
5694 %}
5696 pipe_class pipe_mfc1(mRegI dst, regD src) %{
5697 src : RD(read);
5698 dst : WB(write);
5699 DECODE : ID;
5700 MEM : RD;
5701 %}
5703 pipe_class pipe_mtc1(regD dst, mRegI src) %{
5704 src : RD(read);
5705 dst : WB(write);
5706 DECODE : ID;
5707 MEM : RD(5);
5708 %}
5710 //No.23 Floating sqrt operation : dst <-- reg1 sqrt reg2
5711 pipe_class fpu_sqrt(regF dst, regF src1, regF src2) %{
5712 multiple_bundles;
5713 src1 : RD(read);
5714 src2 : RD(read);
5715 dst : WB(write);
5716 DECODE : ID;
5717 FPU2 : CA;
5718 %}
5720 //No.11 Load Floating from Memory :
5721 pipe_class fpu_loadF(regF dst, memory mem) %{
5722 instruction_count(1);
5723 mem : RD(read);
5724 dst : WB(write)+3;
5725 DECODE : ID;
5726 MEM : RD;
5727 %}
5729 //No.12 Store Floating to Memory :
5730 pipe_class fpu_storeF(regF src, memory mem) %{
5731 instruction_count(1);
5732 mem : RD(read);
5733 src : RD(read);
5734 DECODE : ID;
5735 MEM : RD;
5736 %}
5738 //No.13 FPU Conditional branch :
5739 pipe_class pipe_fpu_branch(regF src1, regF src2, label labl ) %{
5740 multiple_bundles;
5741 src1 : RD(read);
5742 src2 : RD(read);
5743 DECODE : ID;
5744 BR : RD;
5745 %}
5747 //No.14 Floating FPU reg operation : dst <-- op reg
5748 pipe_class fpu1_regF(regF dst, regF src) %{
5749 src : RD(read);
5750 dst : WB(write);
5751 DECODE : ID;
5752 FPU : CA;
5753 %}
5755 pipe_class long_memory_op() %{
5756 instruction_count(10); multiple_bundles; force_serialization;
5757 fixed_latency(30);
5758 %}
5760 pipe_class simple_call() %{
5761 instruction_count(10); multiple_bundles; force_serialization;
5762 fixed_latency(200);
5763 BR : RD;
5764 %}
5766 pipe_class call() %{
5767 instruction_count(10); multiple_bundles; force_serialization;
5768 fixed_latency(200);
5769 %}
5771 //FIXME:
5772 //No.9 Piple slow : for multi-instructions
5773 pipe_class pipe_slow( ) %{
5774 instruction_count(20);
5775 force_serialization;
5776 multiple_bundles;
5777 fixed_latency(50);
5778 %}
5780 %}
5784 //----------INSTRUCTIONS-------------------------------------------------------
5785 //
5786 // match -- States which machine-independent subtree may be replaced
5787 // by this instruction.
5788 // ins_cost -- The estimated cost of this instruction is used by instruction
5789 // selection to identify a minimum cost tree of machine
5790 // instructions that matches a tree of machine-independent
5791 // instructions.
5792 // format -- A string providing the disassembly for this instruction.
5793 // The value of an instruction's operand may be inserted
5794 // by referring to it with a '$' prefix.
5795 // opcode -- Three instruction opcodes may be provided. These are referred
5796 // to within an encode class as $primary, $secondary, and $tertiary
5797 // respectively. The primary opcode is commonly used to
5798 // indicate the type of machine instruction, while secondary
5799 // and tertiary are often used for prefix options or addressing
5800 // modes.
5801 // ins_encode -- A list of encode classes with parameters. The encode class
5802 // name must have been defined in an 'enc_class' specification
5803 // in the encode section of the architecture description.
5806 // Load Integer
5807 instruct loadI(mRegI dst, memory mem) %{
5808 match(Set dst (LoadI mem));
5810 ins_cost(125);
5811 format %{ "lw $dst, $mem #@loadI" %}
5812 ins_encode (load_I_enc(dst, mem));
5813 ins_pipe( ialu_loadI );
5814 %}
5816 instruct loadI_convI2L(mRegL dst, memory mem) %{
5817 match(Set dst (ConvI2L (LoadI mem)));
5819 ins_cost(125);
5820 format %{ "lw $dst, $mem #@loadI_convI2L" %}
5821 ins_encode (load_I_enc(dst, mem));
5822 ins_pipe( ialu_loadI );
5823 %}
5825 // Load Integer (32 bit signed) to Byte (8 bit signed)
5826 instruct loadI2B(mRegI dst, memory mem, immI_24 twentyfour) %{
5827 match(Set dst (RShiftI (LShiftI (LoadI mem) twentyfour) twentyfour));
5829 ins_cost(125);
5830 format %{ "lb $dst, $mem\t# int -> byte #@loadI2B" %}
5831 ins_encode(load_B_enc(dst, mem));
5832 ins_pipe(ialu_loadI);
5833 %}
5835 // Load Integer (32 bit signed) to Unsigned Byte (8 bit UNsigned)
5836 instruct loadI2UB(mRegI dst, memory mem, immI_255 mask) %{
5837 match(Set dst (AndI (LoadI mem) mask));
5839 ins_cost(125);
5840 format %{ "lbu $dst, $mem\t# int -> ubyte #@loadI2UB" %}
5841 ins_encode(load_UB_enc(dst, mem));
5842 ins_pipe(ialu_loadI);
5843 %}
5845 // Load Integer (32 bit signed) to Short (16 bit signed)
5846 instruct loadI2S(mRegI dst, memory mem, immI_16 sixteen) %{
5847 match(Set dst (RShiftI (LShiftI (LoadI mem) sixteen) sixteen));
5849 ins_cost(125);
5850 format %{ "lh $dst, $mem\t# int -> short #@loadI2S" %}
5851 ins_encode(load_S_enc(dst, mem));
5852 ins_pipe(ialu_loadI);
5853 %}
5855 // Load Integer (32 bit signed) to Unsigned Short/Char (16 bit UNsigned)
5856 instruct loadI2US(mRegI dst, memory mem, immI_65535 mask) %{
5857 match(Set dst (AndI (LoadI mem) mask));
5859 ins_cost(125);
5860 format %{ "lhu $dst, $mem\t# int -> ushort/char #@loadI2US" %}
5861 ins_encode(load_C_enc(dst, mem));
5862 ins_pipe(ialu_loadI);
5863 %}
5865 // Load Long.
5866 instruct loadL(mRegL dst, memory mem) %{
5867 // predicate(!((LoadLNode*)n)->require_atomic_access());
5868 match(Set dst (LoadL mem));
5870 ins_cost(250);
5871 format %{ "ld $dst, $mem #@loadL" %}
5872 ins_encode(load_L_enc(dst, mem));
5873 ins_pipe( ialu_loadL );
5874 %}
5876 // Load Long - UNaligned
5877 instruct loadL_unaligned(mRegL dst, memory mem) %{
5878 match(Set dst (LoadL_unaligned mem));
5880 // FIXME: Jin: Need more effective ldl/ldr
5881 ins_cost(450);
5882 format %{ "ld $dst, $mem #@loadL_unaligned\n\t" %}
5883 ins_encode(load_L_enc(dst, mem));
5884 ins_pipe( ialu_loadL );
5885 %}
5887 // Store Long
5888 instruct storeL_reg(memory mem, mRegL src) %{
5889 predicate(!((StoreLNode*)n)->require_atomic_access());
5890 match(Set mem (StoreL mem src));
5892 ins_cost(200);
5893 format %{ "sd $mem, $src #@storeL_reg\n" %}
5894 ins_encode(store_L_reg_enc(mem, src));
5895 ins_pipe( ialu_storeL );
5896 %}
5898 //FIXME:volatile! atomic!
5899 // Volatile Store Long. Must be atomic, so move it into
5900 // the FP TOS and then do a 64-bit FIST. Has to probe the
5901 // target address before the store (for null-ptr checks)
5902 // so the memory operand is used twice in the encoding.
5903 instruct storeL_reg_atomic(memory mem, mRegL src) %{
5904 predicate(((StoreLNode*)n)->require_atomic_access());
5905 match(Set mem (StoreL mem src));
5907 ins_cost(200);
5908 format %{ "sw $mem, $src #@storeL_reg_atomic\n" %}
5909 ins_encode %{
5910 Register src = as_Register($src$$reg);
5912 int base = $mem$$base;
5913 int index = $mem$$index;
5914 int scale = $mem$$scale;
5915 int disp = $mem$$disp;
5917 if( index != 0 ) {
5918 if( Assembler::is_simm16(disp) ) {
5919 if (scale == 0) {
5920 __ addu(AT, as_Register(base), as_Register(index));
5921 } else {
5922 __ dsll(AT, as_Register(index), scale);
5923 __ addu(AT, as_Register(base), AT);
5924 }
5925 __ sd(src, AT, disp);
5926 } else {
5927 if (scale == 0) {
5928 __ addu(AT, as_Register(base), as_Register(index));
5929 } else {
5930 __ dsll(AT, as_Register(index), scale);
5931 __ addu(AT, as_Register(base), AT);
5932 }
5933 __ move(T9, disp);
5934 __ addu(AT, AT, T9);
5935 __ sd(src, AT, 0);
5936 }
5937 } else {
5938 if( Assembler::is_simm16(disp) ) {
5939 __ move(AT, as_Register(base));
5940 __ sd(src, AT, disp);
5941 } else {
5942 __ move(AT, as_Register(base));
5943 __ move(T9, disp);
5944 __ addu(AT, AT, T9);
5945 __ sd(src, AT, 0);
5946 }
5947 }
5949 %}
5950 ins_pipe( ialu_storeL );
5951 %}
5953 instruct storeL_immL0(memory mem, immL0 zero) %{
5954 match(Set mem (StoreL mem zero));
5956 ins_cost(180);
5957 format %{ "sd $mem, zero #@storeL_immL0" %}
5958 ins_encode(store_L_immL0_enc(mem, zero));
5959 ins_pipe( ialu_storeL );
5960 %}
5962 instruct storeL_imm(memory mem, immL src) %{
5963 match(Set mem (StoreL mem src));
5965 ins_cost(200);
5966 format %{ "sw $mem, $src #@storeL_imm" %}
5967 ins_encode(store_L_immL_enc(mem, src));
5968 ins_pipe( ialu_storeL );
5969 %}
5971 // Load Compressed Pointer
5972 instruct loadN(mRegN dst, memory mem)
5973 %{
5974 match(Set dst (LoadN mem));
5976 ins_cost(125); // XXX
5977 format %{ "lwu $dst, $mem\t# compressed ptr @ loadN" %}
5978 ins_encode (load_N_enc(dst, mem));
5979 ins_pipe( ialu_loadI ); // XXX
5980 %}
5982 // Load Pointer
5983 instruct loadP(mRegP dst, memory mem) %{
5984 match(Set dst (LoadP mem));
5986 ins_cost(125);
5987 format %{ "ld $dst, $mem #@loadP" %}
5988 ins_encode (load_P_enc(dst, mem));
5989 ins_pipe( ialu_loadI );
5990 %}
5992 // Load Klass Pointer
5993 instruct loadKlass(mRegP dst, memory mem) %{
5994 match(Set dst (LoadKlass mem));
5996 ins_cost(125);
5997 format %{ "MOV $dst,$mem @ loadKlass" %}
5998 ins_encode (load_P_enc(dst, mem));
5999 ins_pipe( ialu_loadI );
6000 %}
6002 // Load narrow Klass Pointer
6003 instruct loadNKlass(mRegN dst, memory mem)
6004 %{
6005 match(Set dst (LoadNKlass mem));
6007 ins_cost(125); // XXX
6008 format %{ "lwu $dst, $mem\t# compressed klass ptr @ loadNKlass" %}
6009 ins_encode (load_N_enc(dst, mem));
6010 ins_pipe( ialu_loadI ); // XXX
6011 %}
6013 // Load Constant
6014 instruct loadConI(mRegI dst, immI src) %{
6015 match(Set dst src);
6017 ins_cost(150);
6018 format %{ "mov $dst, $src #@loadConI" %}
6019 ins_encode %{
6020 Register dst = $dst$$Register;
6021 int value = $src$$constant;
6022 __ move(dst, value);
6023 %}
6024 ins_pipe( ialu_regI_regI );
6025 %}
6028 instruct loadConL_set64(mRegL dst, immL src) %{
6029 match(Set dst src);
6030 ins_cost(120);
6031 format %{ "li $dst, $src @ loadConL_set64" %}
6032 ins_encode %{
6033 __ set64($dst$$Register, $src$$constant);
6034 %}
6035 ins_pipe(ialu_regL_regL);
6036 %}
6038 /*
6039 // Load long value from constant table (predicated by immL_expensive).
6040 instruct loadConL_load(mRegL dst, immL_expensive src) %{
6041 match(Set dst src);
6042 ins_cost(150);
6043 format %{ "ld $dst, $constantoffset[$constanttablebase] # load long $src from table @ loadConL_ldx" %}
6044 ins_encode %{
6045 int con_offset = $constantoffset($src);
6047 if (Assembler::is_simm16(con_offset)) {
6048 __ ld($dst$$Register, $constanttablebase, con_offset);
6049 } else {
6050 __ set64(AT, con_offset);
6051 if (UseLoongsonISA) {
6052 __ gsldx($dst$$Register, $constanttablebase, AT, 0);
6053 } else {
6054 __ daddu(AT, $constanttablebase, AT);
6055 __ ld($dst$$Register, AT, 0);
6056 }
6057 }
6058 %}
6059 ins_pipe(ialu_loadI);
6060 %}
6061 */
6063 instruct loadConL16(mRegL dst, immL16 src) %{
6064 match(Set dst src);
6065 ins_cost(105);
6066 format %{ "mov $dst, $src #@loadConL16" %}
6067 ins_encode %{
6068 Register dst_reg = as_Register($dst$$reg);
6069 int value = $src$$constant;
6070 __ daddiu(dst_reg, R0, value);
6071 %}
6072 ins_pipe( ialu_regL_regL );
6073 %}
6076 instruct loadConL0(mRegL dst, immL0 src) %{
6077 match(Set dst src);
6078 ins_cost(100);
6079 format %{ "mov $dst, zero #@loadConL0" %}
6080 ins_encode %{
6081 Register dst_reg = as_Register($dst$$reg);
6082 __ daddu(dst_reg, R0, R0);
6083 %}
6084 ins_pipe( ialu_regL_regL );
6085 %}
6087 // Load Range
6088 instruct loadRange(mRegI dst, memory mem) %{
6089 match(Set dst (LoadRange mem));
6091 ins_cost(125);
6092 format %{ "MOV $dst,$mem @ loadRange" %}
6093 ins_encode(load_I_enc(dst, mem));
6094 ins_pipe( ialu_loadI );
6095 %}
6098 instruct storeP(memory mem, mRegP src ) %{
6099 match(Set mem (StoreP mem src));
6101 ins_cost(125);
6102 format %{ "sd $src, $mem #@storeP" %}
6103 ins_encode(store_P_reg_enc(mem, src));
6104 ins_pipe( ialu_storeI );
6105 %}
6107 /*
6108 [Ref: loadConP]
6110 Error:
6111 0x2d4b6d40: lui t9, 0x4f <--- handle
6112 0x2d4b6d44: addiu t9, t9, 0xffff808c
6113 0x2d4b6d48: sw t9, 0x4(s2)
6115 OK:
6116 0x2cc5ed40: lui t9, 0x336a <--- klass
6117 0x2cc5ed44: addiu t9, t9, 0x5a10
6118 0x2cc5ed48: sw t9, 0x4(s2)
6119 */
6120 // Store Pointer Immediate; null pointers or constant oops that do not
6121 // need card-mark barriers.
6123 // Store NULL Pointer, mark word, or other simple pointer constant.
6124 instruct storeImmP(memory mem, immP31 src) %{
6125 match(Set mem (StoreP mem src));
6127 ins_cost(150);
6128 format %{ "mov $mem, $src #@storeImmP" %}
6129 ins_encode(store_P_immP_enc(mem, src));
6130 ins_pipe( ialu_storeI );
6131 %}
6133 // Store Byte Immediate
6134 instruct storeImmB(memory mem, immI8 src) %{
6135 match(Set mem (StoreB mem src));
6137 ins_cost(150);
6138 format %{ "movb $mem, $src #@storeImmB" %}
6139 ins_encode(store_B_immI_enc(mem, src));
6140 ins_pipe( ialu_storeI );
6141 %}
6143 // Store Compressed Pointer
6144 instruct storeN(memory mem, mRegN src)
6145 %{
6146 match(Set mem (StoreN mem src));
6148 ins_cost(125); // XXX
6149 format %{ "sw $mem, $src\t# compressed ptr @ storeN" %}
6150 ins_encode(store_N_reg_enc(mem, src));
6151 ins_pipe( ialu_storeI );
6152 %}
6154 instruct storeNKlass(memory mem, mRegN src)
6155 %{
6156 match(Set mem (StoreNKlass mem src));
6158 ins_cost(125); // XXX
6159 format %{ "sw $mem, $src\t# compressed klass ptr @ storeNKlass" %}
6160 ins_encode(store_N_reg_enc(mem, src));
6161 ins_pipe( ialu_storeI );
6162 %}
6164 instruct storeImmN0(memory mem, immN0 zero)
6165 %{
6166 predicate(Universe::narrow_oop_base() == NULL && Universe::narrow_klass_base() == NULL);
6167 match(Set mem (StoreN mem zero));
6169 ins_cost(125); // XXX
6170 format %{ "storeN0 $mem, R12\t# compressed ptr" %}
6171 ins_encode(storeImmN0_enc(mem, zero));
6172 ins_pipe( ialu_storeI );
6173 %}
6175 // Store Byte
6176 instruct storeB(memory mem, mRegI src) %{
6177 match(Set mem (StoreB mem src));
6179 ins_cost(125);
6180 format %{ "sb $src, $mem #@storeB" %}
6181 ins_encode(store_B_reg_enc(mem, src));
6182 ins_pipe( ialu_storeI );
6183 %}
6185 instruct storeB_convL2I(memory mem, mRegL src) %{
6186 match(Set mem (StoreB mem (ConvL2I src)));
6188 ins_cost(125);
6189 format %{ "sb $src, $mem #@storeB_convL2I" %}
6190 ins_encode(store_B_reg_enc(mem, src));
6191 ins_pipe( ialu_storeI );
6192 %}
6194 // Load Byte (8bit signed)
6195 instruct loadB(mRegI dst, memory mem) %{
6196 match(Set dst (LoadB mem));
6198 ins_cost(125);
6199 format %{ "lb $dst, $mem #@loadB" %}
6200 ins_encode(load_B_enc(dst, mem));
6201 ins_pipe( ialu_loadI );
6202 %}
6204 instruct loadB_convI2L(mRegL dst, memory mem) %{
6205 match(Set dst (ConvI2L (LoadB mem)));
6207 ins_cost(125);
6208 format %{ "lb $dst, $mem #@loadB_convI2L" %}
6209 ins_encode(load_B_enc(dst, mem));
6210 ins_pipe( ialu_loadI );
6211 %}
6213 // Load Byte (8bit UNsigned)
6214 instruct loadUB(mRegI dst, memory mem) %{
6215 match(Set dst (LoadUB mem));
6217 ins_cost(125);
6218 format %{ "lbu $dst, $mem #@loadUB" %}
6219 ins_encode(load_UB_enc(dst, mem));
6220 ins_pipe( ialu_loadI );
6221 %}
6223 instruct loadUB_convI2L(mRegL dst, memory mem) %{
6224 match(Set dst (ConvI2L (LoadUB mem)));
6226 ins_cost(125);
6227 format %{ "lbu $dst, $mem #@loadUB_convI2L" %}
6228 ins_encode(load_UB_enc(dst, mem));
6229 ins_pipe( ialu_loadI );
6230 %}
6232 // Load Short (16bit signed)
6233 instruct loadS(mRegI dst, memory mem) %{
6234 match(Set dst (LoadS mem));
6236 ins_cost(125);
6237 format %{ "lh $dst, $mem #@loadS" %}
6238 ins_encode(load_S_enc(dst, mem));
6239 ins_pipe( ialu_loadI );
6240 %}
6242 // Load Short (16 bit signed) to Byte (8 bit signed)
6243 instruct loadS2B(mRegI dst, memory mem, immI_24 twentyfour) %{
6244 match(Set dst (RShiftI (LShiftI (LoadS mem) twentyfour) twentyfour));
6246 ins_cost(125);
6247 format %{ "lb $dst, $mem\t# short -> byte #@loadS2B" %}
6248 ins_encode(load_B_enc(dst, mem));
6249 ins_pipe(ialu_loadI);
6250 %}
6252 instruct loadS_convI2L(mRegL dst, memory mem) %{
6253 match(Set dst (ConvI2L (LoadS mem)));
6255 ins_cost(125);
6256 format %{ "lh $dst, $mem #@loadS_convI2L" %}
6257 ins_encode(load_S_enc(dst, mem));
6258 ins_pipe( ialu_loadI );
6259 %}
6261 // Store Integer Immediate
6262 instruct storeImmI(memory mem, immI src) %{
6263 match(Set mem (StoreI mem src));
6265 ins_cost(150);
6266 format %{ "mov $mem, $src #@storeImmI" %}
6267 ins_encode(store_I_immI_enc(mem, src));
6268 ins_pipe( ialu_storeI );
6269 %}
6271 // Store Integer
6272 instruct storeI(memory mem, mRegI src) %{
6273 match(Set mem (StoreI mem src));
6275 ins_cost(125);
6276 format %{ "sw $mem, $src #@storeI" %}
6277 ins_encode(store_I_reg_enc(mem, src));
6278 ins_pipe( ialu_storeI );
6279 %}
6281 instruct storeI_convL2I(memory mem, mRegL src) %{
6282 match(Set mem (StoreI mem (ConvL2I src)));
6284 ins_cost(125);
6285 format %{ "sw $mem, $src #@storeI_convL2I" %}
6286 ins_encode(store_I_reg_enc(mem, src));
6287 ins_pipe( ialu_storeI );
6288 %}
6290 // Load Float
6291 instruct loadF(regF dst, memory mem) %{
6292 match(Set dst (LoadF mem));
6294 ins_cost(150);
6295 format %{ "loadF $dst, $mem #@loadF" %}
6296 ins_encode(load_F_enc(dst, mem));
6297 ins_pipe( ialu_loadI );
6298 %}
6300 instruct loadConP_general(mRegP dst, immP src) %{
6301 match(Set dst src);
6303 ins_cost(120);
6304 format %{ "li $dst, $src #@loadConP_general" %}
6306 ins_encode %{
6307 Register dst = $dst$$Register;
6308 long* value = (long*)$src$$constant;
6309 bool is_need_reloc = $src->constant_reloc() != relocInfo::none;
6311 /* During GC, klassOop may be moved to new position in the heap.
6312 * It must be relocated.
6313 * Refer: [c1_LIRAssembler_mips.cpp] jobject2reg()
6314 */
6315 if (is_need_reloc) {
6316 if($src->constant_reloc() == relocInfo::metadata_type){
6317 int klass_index = __ oop_recorder()->find_index((Klass*)value);
6318 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
6320 __ relocate(rspec);
6321 __ li48(dst, (long)value);
6322 }
6324 if($src->constant_reloc() == relocInfo::oop_type){
6325 int oop_index = __ oop_recorder()->find_index((jobject)value);
6326 RelocationHolder rspec = oop_Relocation::spec(oop_index);
6328 __ relocate(rspec);
6329 __ li48(dst, (long)value);
6330 }
6331 } else {
6332 __ set64(dst, (long)value);
6333 }
6334 %}
6336 ins_pipe( ialu_regI_regI );
6337 %}
6339 /*
6340 instruct loadConP_load(mRegP dst, immP_load src) %{
6341 match(Set dst src);
6343 ins_cost(100);
6344 format %{ "ld $dst, [$constanttablebase + $constantoffset] load from constant table: ptr=$src @ loadConP_load" %}
6346 ins_encode %{
6348 int con_offset = $constantoffset($src);
6350 if (Assembler::is_simm16(con_offset)) {
6351 __ ld($dst$$Register, $constanttablebase, con_offset);
6352 } else {
6353 __ set64(AT, con_offset);
6354 if (UseLoongsonISA) {
6355 __ gsldx($dst$$Register, $constanttablebase, AT, 0);
6356 } else {
6357 __ daddu(AT, $constanttablebase, AT);
6358 __ ld($dst$$Register, AT, 0);
6359 }
6360 }
6361 %}
6363 ins_pipe(ialu_loadI);
6364 %}
6365 */
6367 instruct loadConP_no_oop_cheap(mRegP dst, immP_no_oop_cheap src) %{
6368 match(Set dst src);
6370 ins_cost(80);
6371 format %{ "li $dst, $src @ loadConP_no_oop_cheap" %}
6373 ins_encode %{
6374 __ set64($dst$$Register, $src$$constant);
6375 %}
6377 ins_pipe(ialu_regI_regI);
6378 %}
6381 instruct loadConP_poll(mRegP dst, immP_poll src) %{
6382 match(Set dst src);
6384 ins_cost(50);
6385 format %{ "li $dst, $src #@loadConP_poll" %}
6387 ins_encode %{
6388 Register dst = $dst$$Register;
6389 intptr_t value = (intptr_t)$src$$constant;
6391 __ set64(dst, (jlong)value);
6392 %}
6394 ins_pipe( ialu_regI_regI );
6395 %}
6397 instruct loadConP0(mRegP dst, immP0 src)
6398 %{
6399 match(Set dst src);
6401 ins_cost(50);
6402 format %{ "mov $dst, R0\t# ptr" %}
6403 ins_encode %{
6404 Register dst_reg = $dst$$Register;
6405 __ daddu(dst_reg, R0, R0);
6406 %}
6407 ins_pipe( ialu_regI_regI );
6408 %}
6410 instruct loadConN0(mRegN dst, immN0 src) %{
6411 match(Set dst src);
6412 format %{ "move $dst, R0\t# compressed NULL ptr" %}
6413 ins_encode %{
6414 __ move($dst$$Register, R0);
6415 %}
6416 ins_pipe( ialu_regI_regI );
6417 %}
6419 instruct loadConN(mRegN dst, immN src) %{
6420 match(Set dst src);
6422 ins_cost(125);
6423 format %{ "li $dst, $src\t# compressed ptr @ loadConN" %}
6424 ins_encode %{
6425 address con = (address)$src$$constant;
6426 if (con == NULL) {
6427 ShouldNotReachHere();
6428 } else {
6429 assert (UseCompressedOops, "should only be used for compressed headers");
6430 assert (Universe::heap() != NULL, "java heap should be initialized");
6431 assert (__ oop_recorder() != NULL, "this assembler needs an OopRecorder");
6433 Register dst = $dst$$Register;
6434 long* value = (long*)$src$$constant;
6435 int oop_index = __ oop_recorder()->find_index((jobject)value);
6436 RelocationHolder rspec = oop_Relocation::spec(oop_index);
6437 if(rspec.type()!=relocInfo::none){
6438 __ relocate(rspec, Assembler::narrow_oop_operand);
6439 __ li48(dst, oop_index);
6440 } else {
6441 __ set64(dst, oop_index);
6442 }
6443 }
6444 %}
6445 ins_pipe( ialu_regI_regI ); // XXX
6446 %}
6448 instruct loadConNKlass(mRegN dst, immNKlass src) %{
6449 match(Set dst src);
6451 ins_cost(125);
6452 format %{ "li $dst, $src\t# compressed klass ptr @ loadConNKlass" %}
6453 ins_encode %{
6454 address con = (address)$src$$constant;
6455 if (con == NULL) {
6456 ShouldNotReachHere();
6457 } else {
6458 Register dst = $dst$$Register;
6459 long* value = (long*)$src$$constant;
6461 int klass_index = __ oop_recorder()->find_index((Klass*)value);
6462 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
6463 long narrowp = (long)Klass::encode_klass((Klass*)value);
6465 if(rspec.type()!=relocInfo::none){
6466 __ relocate(rspec, Assembler::narrow_oop_operand);
6467 __ li48(dst, narrowp);
6468 } else {
6469 __ set64(dst, narrowp);
6470 }
6471 }
6472 %}
6473 ins_pipe( ialu_regI_regI ); // XXX
6474 %}
6476 //FIXME
6477 // Tail Call; Jump from runtime stub to Java code.
6478 // Also known as an 'interprocedural jump'.
6479 // Target of jump will eventually return to caller.
6480 // TailJump below removes the return address.
6481 instruct TailCalljmpInd(mRegP jump_target, mRegP method_oop) %{
6482 match(TailCall jump_target method_oop );
6483 ins_cost(300);
6484 format %{ "JMP $jump_target \t# @TailCalljmpInd" %}
6486 ins_encode %{
6487 Register target = $jump_target$$Register;
6488 Register oop = $method_oop$$Register;
6490 /* 2012/10/12 Jin: RA will be used in generate_forward_exception() */
6491 __ push(RA);
6493 __ move(S3, oop);
6494 __ jr(target);
6495 __ nop();
6496 %}
6498 ins_pipe( pipe_jump );
6499 %}
6501 // Create exception oop: created by stack-crawling runtime code.
6502 // Created exception is now available to this handler, and is setup
6503 // just prior to jumping to this handler. No code emitted.
6504 instruct CreateException( a0_RegP ex_oop )
6505 %{
6506 match(Set ex_oop (CreateEx));
6508 // use the following format syntax
6509 format %{ "# exception oop is in A0; no code emitted @CreateException" %}
6510 ins_encode %{
6511 /* Jin: X86 leaves this function empty */
6512 __ block_comment("CreateException is empty in X86/MIPS");
6513 %}
6514 ins_pipe( empty );
6515 // ins_pipe( pipe_jump );
6516 %}
6519 /* 2012/9/14 Jin: The mechanism of exception handling is clear now.
6521 - Common try/catch:
6522 2012/9/14 Jin: [stubGenerator_mips.cpp] generate_forward_exception()
6523 |- V0, V1 are created
6524 |- T9 <= SharedRuntime::exception_handler_for_return_address
6525 `- jr T9
6526 `- the caller's exception_handler
6527 `- jr OptoRuntime::exception_blob
6528 `- here
6529 - Rethrow(e.g. 'unwind'):
6530 * The callee:
6531 |- an exception is triggered during execution
6532 `- exits the callee method through RethrowException node
6533 |- The callee pushes exception_oop(T0) and exception_pc(RA)
6534 `- The callee jumps to OptoRuntime::rethrow_stub()
6535 * In OptoRuntime::rethrow_stub:
6536 |- The VM calls _rethrow_Java to determine the return address in the caller method
6537 `- exits the stub with tailjmpInd
6538 |- pops exception_oop(V0) and exception_pc(V1)
6539 `- jumps to the return address(usually an exception_handler)
6540 * The caller:
6541 `- continues processing the exception_blob with V0/V1
6542 */
6544 /*
6545 Disassembling OptoRuntime::rethrow_stub()
6547 ; locals
6548 0x2d3bf320: addiu sp, sp, 0xfffffff8
6549 0x2d3bf324: sw ra, 0x4(sp)
6550 0x2d3bf328: sw fp, 0x0(sp)
6551 0x2d3bf32c: addu fp, sp, zero
6552 0x2d3bf330: addiu sp, sp, 0xfffffff0
6553 0x2d3bf334: sw ra, 0x8(sp)
6554 0x2d3bf338: sw t0, 0x4(sp)
6555 0x2d3bf33c: sw sp, 0x0(sp)
6557 ; get_thread(S2)
6558 0x2d3bf340: addu s2, sp, zero
6559 0x2d3bf344: srl s2, s2, 12
6560 0x2d3bf348: sll s2, s2, 2
6561 0x2d3bf34c: lui at, 0x2c85
6562 0x2d3bf350: addu at, at, s2
6563 0x2d3bf354: lw s2, 0xffffcc80(at)
6565 0x2d3bf358: lw s0, 0x0(sp)
6566 0x2d3bf35c: sw s0, 0x118(s2) // last_sp -> threa
6567 0x2d3bf360: sw s2, 0xc(sp)
6569 ; OptoRuntime::rethrow_C(oopDesc* exception, JavaThread* thread, address ret_pc)
6570 0x2d3bf364: lw a0, 0x4(sp)
6571 0x2d3bf368: lw a1, 0xc(sp)
6572 0x2d3bf36c: lw a2, 0x8(sp)
6573 ;; Java_To_Runtime
6574 0x2d3bf370: lui t9, 0x2c34
6575 0x2d3bf374: addiu t9, t9, 0xffff8a48
6576 0x2d3bf378: jalr t9
6577 0x2d3bf37c: nop
6579 0x2d3bf380: addu s3, v0, zero ; S3: SharedRuntime::raw_exception_handler_for_return_address()
6581 0x2d3bf384: lw s0, 0xc(sp)
6582 0x2d3bf388: sw zero, 0x118(s0)
6583 0x2d3bf38c: sw zero, 0x11c(s0)
6584 0x2d3bf390: lw s1, 0x144(s0) ; ex_oop: S1
6585 0x2d3bf394: addu s2, s0, zero
6586 0x2d3bf398: sw zero, 0x144(s2)
6587 0x2d3bf39c: lw s0, 0x4(s2)
6588 0x2d3bf3a0: addiu s4, zero, 0x0
6589 0x2d3bf3a4: bne s0, s4, 0x2d3bf3d4
6590 0x2d3bf3a8: nop
6591 0x2d3bf3ac: addiu sp, sp, 0x10
6592 0x2d3bf3b0: addiu sp, sp, 0x8
6593 0x2d3bf3b4: lw ra, 0xfffffffc(sp)
6594 0x2d3bf3b8: lw fp, 0xfffffff8(sp)
6595 0x2d3bf3bc: lui at, 0x2b48
6596 0x2d3bf3c0: lw at, 0x100(at)
6598 ; tailjmpInd: Restores exception_oop & exception_pc
6599 0x2d3bf3c4: addu v1, ra, zero
6600 0x2d3bf3c8: addu v0, s1, zero
6601 0x2d3bf3cc: jr s3
6602 0x2d3bf3d0: nop
6603 ; Exception:
6604 0x2d3bf3d4: lui s1, 0x2cc8 ; generate_forward_exception()
6605 0x2d3bf3d8: addiu s1, s1, 0x40
6606 0x2d3bf3dc: addiu s2, zero, 0x0
6607 0x2d3bf3e0: addiu sp, sp, 0x10
6608 0x2d3bf3e4: addiu sp, sp, 0x8
6609 0x2d3bf3e8: lw ra, 0xfffffffc(sp)
6610 0x2d3bf3ec: lw fp, 0xfffffff8(sp)
6611 0x2d3bf3f0: lui at, 0x2b48
6612 0x2d3bf3f4: lw at, 0x100(at)
6613 ; TailCalljmpInd
6614 __ push(RA); ; to be used in generate_forward_exception()
6615 0x2d3bf3f8: addu t7, s2, zero
6616 0x2d3bf3fc: jr s1
6617 0x2d3bf400: nop
6618 */
6619 // Rethrow exception:
6620 // The exception oop will come in the first argument position.
6621 // Then JUMP (not call) to the rethrow stub code.
6622 instruct RethrowException()
6623 %{
6624 match(Rethrow);
6626 // use the following format syntax
6627 format %{ "JMP rethrow_stub #@RethrowException" %}
6628 ins_encode %{
6629 __ block_comment("@ RethrowException");
6631 cbuf.set_insts_mark();
6632 cbuf.relocate(cbuf.insts_mark(), runtime_call_Relocation::spec());
6634 // call OptoRuntime::rethrow_stub to get the exception handler in parent method
6635 __ li(T9, OptoRuntime::rethrow_stub());
6636 __ jr(T9);
6637 __ nop();
6638 %}
6639 ins_pipe( pipe_jump );
6640 %}
6642 instruct branchConP_zero(cmpOpU cmp, mRegP op1, immP0 zero, label labl) %{
6643 match(If cmp (CmpP op1 zero));
6644 effect(USE labl);
6646 ins_cost(180);
6647 format %{ "b$cmp $op1, R0, $labl #@branchConP_zero" %}
6649 ins_encode %{
6650 Register op1 = $op1$$Register;
6651 Register op2 = R0;
6652 Label &L = *($labl$$label);
6653 int flag = $cmp$$cmpcode;
6655 switch(flag)
6656 {
6657 case 0x01: //equal
6658 if (&L)
6659 __ beq(op1, op2, L);
6660 else
6661 __ beq(op1, op2, (int)0);
6662 break;
6663 case 0x02: //not_equal
6664 if (&L)
6665 __ bne(op1, op2, L);
6666 else
6667 __ bne(op1, op2, (int)0);
6668 break;
6669 /*
6670 case 0x03: //above
6671 __ sltu(AT, op2, op1);
6672 if(&L)
6673 __ bne(R0, AT, L);
6674 else
6675 __ bne(R0, AT, (int)0);
6676 break;
6677 case 0x04: //above_equal
6678 __ sltu(AT, op1, op2);
6679 if(&L)
6680 __ beq(AT, R0, L);
6681 else
6682 __ beq(AT, R0, (int)0);
6683 break;
6684 case 0x05: //below
6685 __ sltu(AT, op1, op2);
6686 if(&L)
6687 __ bne(R0, AT, L);
6688 else
6689 __ bne(R0, AT, (int)0);
6690 break;
6691 case 0x06: //below_equal
6692 __ sltu(AT, op2, op1);
6693 if(&L)
6694 __ beq(AT, R0, L);
6695 else
6696 __ beq(AT, R0, (int)0);
6697 break;
6698 */
6699 default:
6700 Unimplemented();
6701 }
6702 __ nop();
6703 %}
6705 ins_pc_relative(1);
6706 ins_pipe( pipe_alu_branch );
6707 %}
6710 instruct branchConP(cmpOpU cmp, mRegP op1, mRegP op2, label labl) %{
6711 match(If cmp (CmpP op1 op2));
6712 // predicate(can_branch_register(_kids[0]->_leaf, _kids[1]->_leaf));
6713 effect(USE labl);
6715 ins_cost(200);
6716 format %{ "b$cmp $op1, $op2, $labl #@branchConP" %}
6718 ins_encode %{
6719 Register op1 = $op1$$Register;
6720 Register op2 = $op2$$Register;
6721 Label &L = *($labl$$label);
6722 int flag = $cmp$$cmpcode;
6724 switch(flag)
6725 {
6726 case 0x01: //equal
6727 if (&L)
6728 __ beq(op1, op2, L);
6729 else
6730 __ beq(op1, op2, (int)0);
6731 break;
6732 case 0x02: //not_equal
6733 if (&L)
6734 __ bne(op1, op2, L);
6735 else
6736 __ bne(op1, op2, (int)0);
6737 break;
6738 case 0x03: //above
6739 __ sltu(AT, op2, op1);
6740 if(&L)
6741 __ bne(R0, AT, L);
6742 else
6743 __ bne(R0, AT, (int)0);
6744 break;
6745 case 0x04: //above_equal
6746 __ sltu(AT, op1, op2);
6747 if(&L)
6748 __ beq(AT, R0, L);
6749 else
6750 __ beq(AT, R0, (int)0);
6751 break;
6752 case 0x05: //below
6753 __ sltu(AT, op1, op2);
6754 if(&L)
6755 __ bne(R0, AT, L);
6756 else
6757 __ bne(R0, AT, (int)0);
6758 break;
6759 case 0x06: //below_equal
6760 __ sltu(AT, op2, op1);
6761 if(&L)
6762 __ beq(AT, R0, L);
6763 else
6764 __ beq(AT, R0, (int)0);
6765 break;
6766 default:
6767 Unimplemented();
6768 }
6769 __ nop();
6770 %}
6772 ins_pc_relative(1);
6773 ins_pipe( pipe_alu_branch );
6774 %}
6776 instruct cmpN_null_branch(cmpOp cmp, mRegN op1, immN0 null, label labl) %{
6777 match(If cmp (CmpN op1 null));
6778 effect(USE labl);
6780 ins_cost(180);
6781 format %{ "CMP $op1,0\t! compressed ptr\n\t"
6782 "BP$cmp $labl @ cmpN_null_branch" %}
6783 ins_encode %{
6784 Register op1 = $op1$$Register;
6785 Register op2 = R0;
6786 Label &L = *($labl$$label);
6787 int flag = $cmp$$cmpcode;
6789 switch(flag)
6790 {
6791 case 0x01: //equal
6792 if (&L)
6793 __ beq(op1, op2, L);
6794 else
6795 __ beq(op1, op2, (int)0);
6796 break;
6797 case 0x02: //not_equal
6798 if (&L)
6799 __ bne(op1, op2, L);
6800 else
6801 __ bne(op1, op2, (int)0);
6802 break;
6803 default:
6804 Unimplemented();
6805 }
6806 __ nop();
6807 %}
6808 //TODO: pipe_branchP or create pipe_branchN LEE
6809 ins_pc_relative(1);
6810 ins_pipe( pipe_alu_branch );
6811 %}
6813 instruct cmpN_reg_branch(cmpOp cmp, mRegN op1, mRegN op2, label labl) %{
6814 match(If cmp (CmpN op1 op2));
6815 effect(USE labl);
6817 ins_cost(180);
6818 format %{ "CMP $op1,$op2\t! compressed ptr\n\t"
6819 "BP$cmp $labl" %}
6820 ins_encode %{
6821 Register op1_reg = $op1$$Register;
6822 Register op2_reg = $op2$$Register;
6823 Label &L = *($labl$$label);
6824 int flag = $cmp$$cmpcode;
6826 switch(flag)
6827 {
6828 case 0x01: //equal
6829 if (&L)
6830 __ beq(op1_reg, op2_reg, L);
6831 else
6832 __ beq(op1_reg, op2_reg, (int)0);
6833 break;
6834 case 0x02: //not_equal
6835 if (&L)
6836 __ bne(op1_reg, op2_reg, L);
6837 else
6838 __ bne(op1_reg, op2_reg, (int)0);
6839 break;
6840 case 0x03: //above
6841 __ sltu(AT, op2_reg, op1_reg);
6842 if(&L)
6843 __ bne(R0, AT, L);
6844 else
6845 __ bne(R0, AT, (int)0);
6846 break;
6847 case 0x04: //above_equal
6848 __ sltu(AT, op1_reg, op2_reg);
6849 if(&L)
6850 __ beq(AT, R0, L);
6851 else
6852 __ beq(AT, R0, (int)0);
6853 break;
6854 case 0x05: //below
6855 __ sltu(AT, op1_reg, op2_reg);
6856 if(&L)
6857 __ bne(R0, AT, L);
6858 else
6859 __ bne(R0, AT, (int)0);
6860 break;
6861 case 0x06: //below_equal
6862 __ sltu(AT, op2_reg, op1_reg);
6863 if(&L)
6864 __ beq(AT, R0, L);
6865 else
6866 __ beq(AT, R0, (int)0);
6867 break;
6868 default:
6869 Unimplemented();
6870 }
6871 __ nop();
6872 %}
6873 ins_pc_relative(1);
6874 ins_pipe( pipe_alu_branch );
6875 %}
6877 instruct branchConIU_reg_reg(cmpOpU cmp, mRegI src1, mRegI src2, label labl) %{
6878 match( If cmp (CmpU src1 src2) );
6879 effect(USE labl);
6880 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_reg" %}
6882 ins_encode %{
6883 Register op1 = $src1$$Register;
6884 Register op2 = $src2$$Register;
6885 Label &L = *($labl$$label);
6886 int flag = $cmp$$cmpcode;
6888 switch(flag)
6889 {
6890 case 0x01: //equal
6891 if (&L)
6892 __ beq(op1, op2, L);
6893 else
6894 __ beq(op1, op2, (int)0);
6895 break;
6896 case 0x02: //not_equal
6897 if (&L)
6898 __ bne(op1, op2, L);
6899 else
6900 __ bne(op1, op2, (int)0);
6901 break;
6902 case 0x03: //above
6903 __ sltu(AT, op2, op1);
6904 if(&L)
6905 __ bne(AT, R0, L);
6906 else
6907 __ bne(AT, R0, (int)0);
6908 break;
6909 case 0x04: //above_equal
6910 __ sltu(AT, op1, op2);
6911 if(&L)
6912 __ beq(AT, R0, L);
6913 else
6914 __ beq(AT, R0, (int)0);
6915 break;
6916 case 0x05: //below
6917 __ sltu(AT, op1, op2);
6918 if(&L)
6919 __ bne(AT, R0, L);
6920 else
6921 __ bne(AT, R0, (int)0);
6922 break;
6923 case 0x06: //below_equal
6924 __ sltu(AT, op2, op1);
6925 if(&L)
6926 __ beq(AT, R0, L);
6927 else
6928 __ beq(AT, R0, (int)0);
6929 break;
6930 default:
6931 Unimplemented();
6932 }
6933 __ nop();
6934 %}
6936 ins_pc_relative(1);
6937 ins_pipe( pipe_alu_branch );
6938 %}
6941 instruct branchConIU_reg_imm(cmpOpU cmp, mRegI src1, immI src2, label labl) %{
6942 match( If cmp (CmpU src1 src2) );
6943 effect(USE labl);
6944 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_imm" %}
6946 ins_encode %{
6947 Register op1 = $src1$$Register;
6948 int val = $src2$$constant;
6949 Label &L = *($labl$$label);
6950 int flag = $cmp$$cmpcode;
6952 __ move(AT, val);
6953 switch(flag)
6954 {
6955 case 0x01: //equal
6956 if (&L)
6957 __ beq(op1, AT, L);
6958 else
6959 __ beq(op1, AT, (int)0);
6960 break;
6961 case 0x02: //not_equal
6962 if (&L)
6963 __ bne(op1, AT, L);
6964 else
6965 __ bne(op1, AT, (int)0);
6966 break;
6967 case 0x03: //above
6968 __ sltu(AT, AT, op1);
6969 if(&L)
6970 __ bne(R0, AT, L);
6971 else
6972 __ bne(R0, AT, (int)0);
6973 break;
6974 case 0x04: //above_equal
6975 __ sltu(AT, op1, AT);
6976 if(&L)
6977 __ beq(AT, R0, L);
6978 else
6979 __ beq(AT, R0, (int)0);
6980 break;
6981 case 0x05: //below
6982 __ sltu(AT, op1, AT);
6983 if(&L)
6984 __ bne(R0, AT, L);
6985 else
6986 __ bne(R0, AT, (int)0);
6987 break;
6988 case 0x06: //below_equal
6989 __ sltu(AT, AT, op1);
6990 if(&L)
6991 __ beq(AT, R0, L);
6992 else
6993 __ beq(AT, R0, (int)0);
6994 break;
6995 default:
6996 Unimplemented();
6997 }
6998 __ nop();
6999 %}
7001 ins_pc_relative(1);
7002 ins_pipe( pipe_alu_branch );
7003 %}
7005 instruct branchConI_reg_reg(cmpOp cmp, mRegI src1, mRegI src2, label labl) %{
7006 match( If cmp (CmpI src1 src2) );
7007 effect(USE labl);
7008 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_reg" %}
7010 ins_encode %{
7011 Register op1 = $src1$$Register;
7012 Register op2 = $src2$$Register;
7013 Label &L = *($labl$$label);
7014 int flag = $cmp$$cmpcode;
7016 switch(flag)
7017 {
7018 case 0x01: //equal
7019 if (&L)
7020 __ beq(op1, op2, L);
7021 else
7022 __ beq(op1, op2, (int)0);
7023 break;
7024 case 0x02: //not_equal
7025 if (&L)
7026 __ bne(op1, op2, L);
7027 else
7028 __ bne(op1, op2, (int)0);
7029 break;
7030 case 0x03: //above
7031 __ slt(AT, op2, op1);
7032 if(&L)
7033 __ bne(R0, AT, L);
7034 else
7035 __ bne(R0, AT, (int)0);
7036 break;
7037 case 0x04: //above_equal
7038 __ slt(AT, op1, op2);
7039 if(&L)
7040 __ beq(AT, R0, L);
7041 else
7042 __ beq(AT, R0, (int)0);
7043 break;
7044 case 0x05: //below
7045 __ slt(AT, op1, op2);
7046 if(&L)
7047 __ bne(R0, AT, L);
7048 else
7049 __ bne(R0, AT, (int)0);
7050 break;
7051 case 0x06: //below_equal
7052 __ slt(AT, op2, op1);
7053 if(&L)
7054 __ beq(AT, R0, L);
7055 else
7056 __ beq(AT, R0, (int)0);
7057 break;
7058 default:
7059 Unimplemented();
7060 }
7061 __ nop();
7062 %}
7064 ins_pc_relative(1);
7065 ins_pipe( pipe_alu_branch );
7066 %}
7068 instruct branchConI_reg_imm0(cmpOp cmp, mRegI src1, immI0 src2, label labl) %{
7069 match( If cmp (CmpI src1 src2) );
7070 effect(USE labl);
7071 ins_cost(170);
7072 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm0" %}
7074 ins_encode %{
7075 Register op1 = $src1$$Register;
7076 // int val = $src2$$constant;
7077 Label &L = *($labl$$label);
7078 int flag = $cmp$$cmpcode;
7080 //__ move(AT, val);
7081 switch(flag)
7082 {
7083 case 0x01: //equal
7084 if (&L)
7085 __ beq(op1, R0, L);
7086 else
7087 __ beq(op1, R0, (int)0);
7088 break;
7089 case 0x02: //not_equal
7090 if (&L)
7091 __ bne(op1, R0, L);
7092 else
7093 __ bne(op1, R0, (int)0);
7094 break;
7095 case 0x03: //greater
7096 if(&L)
7097 __ bgtz(op1, L);
7098 else
7099 __ bgtz(op1, (int)0);
7100 break;
7101 case 0x04: //greater_equal
7102 if(&L)
7103 __ bgez(op1, L);
7104 else
7105 __ bgez(op1, (int)0);
7106 break;
7107 case 0x05: //less
7108 if(&L)
7109 __ bltz(op1, L);
7110 else
7111 __ bltz(op1, (int)0);
7112 break;
7113 case 0x06: //less_equal
7114 if(&L)
7115 __ blez(op1, L);
7116 else
7117 __ blez(op1, (int)0);
7118 break;
7119 default:
7120 Unimplemented();
7121 }
7122 __ nop();
7123 %}
7125 ins_pc_relative(1);
7126 ins_pipe( pipe_alu_branch );
7127 %}
7130 instruct branchConI_reg_imm(cmpOp cmp, mRegI src1, immI src2, label labl) %{
7131 match( If cmp (CmpI src1 src2) );
7132 effect(USE labl);
7133 ins_cost(200);
7134 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm" %}
7136 ins_encode %{
7137 Register op1 = $src1$$Register;
7138 int val = $src2$$constant;
7139 Label &L = *($labl$$label);
7140 int flag = $cmp$$cmpcode;
7142 __ move(AT, val);
7143 switch(flag)
7144 {
7145 case 0x01: //equal
7146 if (&L)
7147 __ beq(op1, AT, L);
7148 else
7149 __ beq(op1, AT, (int)0);
7150 break;
7151 case 0x02: //not_equal
7152 if (&L)
7153 __ bne(op1, AT, L);
7154 else
7155 __ bne(op1, AT, (int)0);
7156 break;
7157 case 0x03: //greater
7158 __ slt(AT, AT, op1);
7159 if(&L)
7160 __ bne(R0, AT, L);
7161 else
7162 __ bne(R0, AT, (int)0);
7163 break;
7164 case 0x04: //greater_equal
7165 __ slt(AT, op1, AT);
7166 if(&L)
7167 __ beq(AT, R0, L);
7168 else
7169 __ beq(AT, R0, (int)0);
7170 break;
7171 case 0x05: //less
7172 __ slt(AT, op1, AT);
7173 if(&L)
7174 __ bne(R0, AT, L);
7175 else
7176 __ bne(R0, AT, (int)0);
7177 break;
7178 case 0x06: //less_equal
7179 __ slt(AT, AT, op1);
7180 if(&L)
7181 __ beq(AT, R0, L);
7182 else
7183 __ beq(AT, R0, (int)0);
7184 break;
7185 default:
7186 Unimplemented();
7187 }
7188 __ nop();
7189 %}
7191 ins_pc_relative(1);
7192 ins_pipe( pipe_alu_branch );
7193 %}
7195 instruct branchConIU_reg_imm0(cmpOpU cmp, mRegI src1, immI0 zero, label labl) %{
7196 match( If cmp (CmpU src1 zero) );
7197 effect(USE labl);
7198 format %{ "BR$cmp $src1, zero, $labl #@branchConIU_reg_imm0" %}
7200 ins_encode %{
7201 Register op1 = $src1$$Register;
7202 Label &L = *($labl$$label);
7203 int flag = $cmp$$cmpcode;
7205 switch(flag)
7206 {
7207 case 0x01: //equal
7208 if (&L)
7209 __ beq(op1, R0, L);
7210 else
7211 __ beq(op1, R0, (int)0);
7212 break;
7213 case 0x02: //not_equal
7214 if (&L)
7215 __ bne(op1, R0, L);
7216 else
7217 __ bne(op1, R0, (int)0);
7218 break;
7219 case 0x03: //above
7220 if(&L)
7221 __ bne(R0, op1, L);
7222 else
7223 __ bne(R0, op1, (int)0);
7224 break;
7225 case 0x04: //above_equal
7226 if(&L)
7227 __ beq(R0, R0, L);
7228 else
7229 __ beq(R0, R0, (int)0);
7230 break;
7231 case 0x05: //below
7232 return;
7233 break;
7234 case 0x06: //below_equal
7235 if(&L)
7236 __ beq(op1, R0, L);
7237 else
7238 __ beq(op1, R0, (int)0);
7239 break;
7240 default:
7241 Unimplemented();
7242 }
7243 __ nop();
7244 %}
7246 ins_pc_relative(1);
7247 ins_pipe( pipe_alu_branch );
7248 %}
7251 instruct branchConIU_reg_immI16(cmpOpU cmp, mRegI src1, immI16 src2, label labl) %{
7252 match( If cmp (CmpU src1 src2) );
7253 effect(USE labl);
7254 ins_cost(180);
7255 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_immI16" %}
7257 ins_encode %{
7258 Register op1 = $src1$$Register;
7259 int val = $src2$$constant;
7260 Label &L = *($labl$$label);
7261 int flag = $cmp$$cmpcode;
7263 switch(flag)
7264 {
7265 case 0x01: //equal
7266 __ move(AT, val);
7267 if (&L)
7268 __ beq(op1, AT, L);
7269 else
7270 __ beq(op1, AT, (int)0);
7271 break;
7272 case 0x02: //not_equal
7273 __ move(AT, val);
7274 if (&L)
7275 __ bne(op1, AT, L);
7276 else
7277 __ bne(op1, AT, (int)0);
7278 break;
7279 case 0x03: //above
7280 __ move(AT, val);
7281 __ sltu(AT, AT, op1);
7282 if(&L)
7283 __ bne(R0, AT, L);
7284 else
7285 __ bne(R0, AT, (int)0);
7286 break;
7287 case 0x04: //above_equal
7288 __ sltiu(AT, op1, val);
7289 if(&L)
7290 __ beq(AT, R0, L);
7291 else
7292 __ beq(AT, R0, (int)0);
7293 break;
7294 case 0x05: //below
7295 __ sltiu(AT, op1, val);
7296 if(&L)
7297 __ bne(R0, AT, L);
7298 else
7299 __ bne(R0, AT, (int)0);
7300 break;
7301 case 0x06: //below_equal
7302 __ move(AT, val);
7303 __ sltu(AT, AT, op1);
7304 if(&L)
7305 __ beq(AT, R0, L);
7306 else
7307 __ beq(AT, R0, (int)0);
7308 break;
7309 default:
7310 Unimplemented();
7311 }
7312 __ nop();
7313 %}
7315 ins_pc_relative(1);
7316 ins_pipe( pipe_alu_branch );
7317 %}
7320 instruct branchConL_regL_regL(cmpOp cmp, mRegL src1, mRegL src2, label labl) %{
7321 match( If cmp (CmpL src1 src2) );
7322 effect(USE labl);
7323 format %{ "BR$cmp $src1, $src2, $labl #@branchConL_regL_regL" %}
7324 ins_cost(250);
7326 ins_encode %{
7327 Register opr1_reg = as_Register($src1$$reg);
7328 Register opr2_reg = as_Register($src2$$reg);
7330 Label &target = *($labl$$label);
7331 int flag = $cmp$$cmpcode;
7333 switch(flag)
7334 {
7335 case 0x01: //equal
7336 if (&target)
7337 __ beq(opr1_reg, opr2_reg, target);
7338 else
7339 __ beq(opr1_reg, opr2_reg, (int)0);
7340 __ delayed()->nop();
7341 break;
7343 case 0x02: //not_equal
7344 if(&target)
7345 __ bne(opr1_reg, opr2_reg, target);
7346 else
7347 __ bne(opr1_reg, opr2_reg, (int)0);
7348 __ delayed()->nop();
7349 break;
7351 case 0x03: //greater
7352 __ slt(AT, opr2_reg, opr1_reg);
7353 if(&target)
7354 __ bne(AT, R0, target);
7355 else
7356 __ bne(AT, R0, (int)0);
7357 __ delayed()->nop();
7358 break;
7360 case 0x04: //greater_equal
7361 __ slt(AT, opr1_reg, opr2_reg);
7362 if(&target)
7363 __ beq(AT, R0, target);
7364 else
7365 __ beq(AT, R0, (int)0);
7366 __ delayed()->nop();
7368 break;
7370 case 0x05: //less
7371 __ slt(AT, opr1_reg, opr2_reg);
7372 if(&target)
7373 __ bne(AT, R0, target);
7374 else
7375 __ bne(AT, R0, (int)0);
7376 __ delayed()->nop();
7378 break;
7380 case 0x06: //less_equal
7381 __ slt(AT, opr2_reg, opr1_reg);
7383 if(&target)
7384 __ beq(AT, R0, target);
7385 else
7386 __ beq(AT, R0, (int)0);
7387 __ delayed()->nop();
7389 break;
7391 default:
7392 Unimplemented();
7393 }
7394 %}
7397 ins_pc_relative(1);
7398 ins_pipe( pipe_alu_branch );
7399 %}
7401 instruct branchConL_reg_immL16_sub(cmpOp cmp, mRegL src1, immL16_sub src2, label labl) %{
7402 match( If cmp (CmpL src1 src2) );
7403 effect(USE labl);
7404 ins_cost(180);
7405 format %{ "BR$cmp $src1, $src2, $labl #@branchConL_reg_immL16_sub" %}
7407 ins_encode %{
7408 Register op1 = $src1$$Register;
7409 int val = $src2$$constant;
7410 Label &L = *($labl$$label);
7411 int flag = $cmp$$cmpcode;
7413 __ daddiu(AT, op1, -1 * val);
7414 switch(flag)
7415 {
7416 case 0x01: //equal
7417 if (&L)
7418 __ beq(R0, AT, L);
7419 else
7420 __ beq(R0, AT, (int)0);
7421 break;
7422 case 0x02: //not_equal
7423 if (&L)
7424 __ bne(R0, AT, L);
7425 else
7426 __ bne(R0, AT, (int)0);
7427 break;
7428 case 0x03: //greater
7429 if(&L)
7430 __ bgtz(AT, L);
7431 else
7432 __ bgtz(AT, (int)0);
7433 break;
7434 case 0x04: //greater_equal
7435 if(&L)
7436 __ bgez(AT, L);
7437 else
7438 __ bgez(AT, (int)0);
7439 break;
7440 case 0x05: //less
7441 if(&L)
7442 __ bltz(AT, L);
7443 else
7444 __ bltz(AT, (int)0);
7445 break;
7446 case 0x06: //less_equal
7447 if(&L)
7448 __ blez(AT, L);
7449 else
7450 __ blez(AT, (int)0);
7451 break;
7452 default:
7453 Unimplemented();
7454 }
7455 __ nop();
7456 %}
7458 ins_pc_relative(1);
7459 ins_pipe( pipe_alu_branch );
7460 %}
7463 instruct branchConI_reg_imm16_sub(cmpOp cmp, mRegI src1, immI16_sub src2, label labl) %{
7464 match( If cmp (CmpI src1 src2) );
7465 effect(USE labl);
7466 ins_cost(180);
7467 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm16_sub" %}
7469 ins_encode %{
7470 Register op1 = $src1$$Register;
7471 int val = $src2$$constant;
7472 Label &L = *($labl$$label);
7473 int flag = $cmp$$cmpcode;
7475 __ addiu32(AT, op1, -1 * val);
7476 switch(flag)
7477 {
7478 case 0x01: //equal
7479 if (&L)
7480 __ beq(R0, AT, L);
7481 else
7482 __ beq(R0, AT, (int)0);
7483 break;
7484 case 0x02: //not_equal
7485 if (&L)
7486 __ bne(R0, AT, L);
7487 else
7488 __ bne(R0, AT, (int)0);
7489 break;
7490 case 0x03: //greater
7491 if(&L)
7492 __ bgtz(AT, L);
7493 else
7494 __ bgtz(AT, (int)0);
7495 break;
7496 case 0x04: //greater_equal
7497 if(&L)
7498 __ bgez(AT, L);
7499 else
7500 __ bgez(AT, (int)0);
7501 break;
7502 case 0x05: //less
7503 if(&L)
7504 __ bltz(AT, L);
7505 else
7506 __ bltz(AT, (int)0);
7507 break;
7508 case 0x06: //less_equal
7509 if(&L)
7510 __ blez(AT, L);
7511 else
7512 __ blez(AT, (int)0);
7513 break;
7514 default:
7515 Unimplemented();
7516 }
7517 __ nop();
7518 %}
7520 ins_pc_relative(1);
7521 ins_pipe( pipe_alu_branch );
7522 %}
7524 instruct branchConL_regL_immL0(cmpOp cmp, mRegL src1, immL0 zero, label labl) %{
7525 match( If cmp (CmpL src1 zero) );
7526 effect(USE labl);
7527 format %{ "BR$cmp $src1, zero, $labl #@branchConL_regL_immL0" %}
7528 ins_cost(150);
7530 ins_encode %{
7531 Register opr1_reg = as_Register($src1$$reg);
7532 Label &target = *($labl$$label);
7533 int flag = $cmp$$cmpcode;
7535 switch(flag)
7536 {
7537 case 0x01: //equal
7538 if (&target)
7539 __ beq(opr1_reg, R0, target);
7540 else
7541 __ beq(opr1_reg, R0, int(0));
7542 break;
7544 case 0x02: //not_equal
7545 if(&target)
7546 __ bne(opr1_reg, R0, target);
7547 else
7548 __ bne(opr1_reg, R0, (int)0);
7549 break;
7551 case 0x03: //greater
7552 if(&target)
7553 __ bgtz(opr1_reg, target);
7554 else
7555 __ bgtz(opr1_reg, (int)0);
7556 break;
7558 case 0x04: //greater_equal
7559 if(&target)
7560 __ bgez(opr1_reg, target);
7561 else
7562 __ bgez(opr1_reg, (int)0);
7563 break;
7565 case 0x05: //less
7566 __ slt(AT, opr1_reg, R0);
7567 if(&target)
7568 __ bne(AT, R0, target);
7569 else
7570 __ bne(AT, R0, (int)0);
7571 break;
7573 case 0x06: //less_equal
7574 if (&target)
7575 __ blez(opr1_reg, target);
7576 else
7577 __ blez(opr1_reg, int(0));
7578 break;
7580 default:
7581 Unimplemented();
7582 }
7583 __ delayed()->nop();
7584 %}
7587 ins_pc_relative(1);
7588 ins_pipe( pipe_alu_branch );
7589 %}
7592 //FIXME
7593 instruct branchConF_reg_reg(cmpOp cmp, regF src1, regF src2, label labl) %{
7594 match( If cmp (CmpF src1 src2) );
7595 effect(USE labl);
7596 format %{ "BR$cmp $src1, $src2, $labl #@branchConF_reg_reg" %}
7598 ins_encode %{
7599 FloatRegister reg_op1 = $src1$$FloatRegister;
7600 FloatRegister reg_op2 = $src2$$FloatRegister;
7601 Label &L = *($labl$$label);
7602 int flag = $cmp$$cmpcode;
7604 switch(flag)
7605 {
7606 case 0x01: //equal
7607 __ c_eq_s(reg_op1, reg_op2);
7608 if (&L)
7609 __ bc1t(L);
7610 else
7611 __ bc1t((int)0);
7612 break;
7613 case 0x02: //not_equal
7614 __ c_eq_s(reg_op1, reg_op2);
7615 if (&L)
7616 __ bc1f(L);
7617 else
7618 __ bc1f((int)0);
7619 break;
7620 case 0x03: //greater
7621 __ c_ule_s(reg_op1, reg_op2);
7622 if(&L)
7623 __ bc1f(L);
7624 else
7625 __ bc1f((int)0);
7626 break;
7627 case 0x04: //greater_equal
7628 __ c_ult_s(reg_op1, reg_op2);
7629 if(&L)
7630 __ bc1f(L);
7631 else
7632 __ bc1f((int)0);
7633 break;
7634 case 0x05: //less
7635 __ c_ult_s(reg_op1, reg_op2);
7636 if(&L)
7637 __ bc1t(L);
7638 else
7639 __ bc1t((int)0);
7640 break;
7641 case 0x06: //less_equal
7642 __ c_ule_s(reg_op1, reg_op2);
7643 if(&L)
7644 __ bc1t(L);
7645 else
7646 __ bc1t((int)0);
7647 break;
7648 default:
7649 Unimplemented();
7650 }
7651 __ nop();
7652 %}
7654 ins_pc_relative(1);
7655 ins_pipe(pipe_slow);
7656 %}
7658 instruct branchConD_reg_reg(cmpOp cmp, regD src1, regD src2, label labl) %{
7659 match( If cmp (CmpD src1 src2) );
7660 effect(USE labl);
7661 format %{ "BR$cmp $src1, $src2, $labl #@branchConD_reg_reg" %}
7663 ins_encode %{
7664 FloatRegister reg_op1 = $src1$$FloatRegister;
7665 FloatRegister reg_op2 = $src2$$FloatRegister;
7666 Label &L = *($labl$$label);
7667 int flag = $cmp$$cmpcode;
7669 switch(flag)
7670 {
7671 case 0x01: //equal
7672 __ c_eq_d(reg_op1, reg_op2);
7673 if (&L)
7674 __ bc1t(L);
7675 else
7676 __ bc1t((int)0);
7677 break;
7678 case 0x02: //not_equal
7679 //2016/4/19 aoqi: c_ueq_d cannot distinguish NaN from equal. Double.isNaN(Double) is implemented by 'f != f', so the use of c_ueq_d causes bugs.
7680 __ c_eq_d(reg_op1, reg_op2);
7681 if (&L)
7682 __ bc1f(L);
7683 else
7684 __ bc1f((int)0);
7685 break;
7686 case 0x03: //greater
7687 __ c_ule_d(reg_op1, reg_op2);
7688 if(&L)
7689 __ bc1f(L);
7690 else
7691 __ bc1f((int)0);
7692 break;
7693 case 0x04: //greater_equal
7694 __ c_ult_d(reg_op1, reg_op2);
7695 if(&L)
7696 __ bc1f(L);
7697 else
7698 __ bc1f((int)0);
7699 break;
7700 case 0x05: //less
7701 __ c_ult_d(reg_op1, reg_op2);
7702 if(&L)
7703 __ bc1t(L);
7704 else
7705 __ bc1t((int)0);
7706 break;
7707 case 0x06: //less_equal
7708 __ c_ule_d(reg_op1, reg_op2);
7709 if(&L)
7710 __ bc1t(L);
7711 else
7712 __ bc1t((int)0);
7713 break;
7714 default:
7715 Unimplemented();
7716 }
7717 __ nop();
7718 %}
7720 ins_pc_relative(1);
7721 ins_pipe(pipe_slow);
7722 %}
7725 // Call Runtime Instruction
7726 instruct CallRuntimeDirect(method meth) %{
7727 match(CallRuntime );
7728 effect(USE meth);
7730 ins_cost(300);
7731 format %{ "CALL,runtime #@CallRuntimeDirect" %}
7732 ins_encode( Java_To_Runtime( meth ) );
7733 ins_pipe( pipe_slow );
7734 ins_alignment(16);
7735 %}
7739 //------------------------MemBar Instructions-------------------------------
7740 //Memory barrier flavors
7742 instruct membar_acquire() %{
7743 match(MemBarAcquire);
7744 ins_cost(0);
7746 size(0);
7747 format %{ "MEMBAR-acquire (empty) @ membar_acquire" %}
7748 ins_encode();
7749 ins_pipe(empty);
7750 %}
7752 instruct load_fence() %{
7753 match(LoadFence);
7754 ins_cost(400);
7756 format %{ "MEMBAR @ load_fence" %}
7757 ins_encode %{
7758 __ sync();
7759 %}
7760 ins_pipe(pipe_slow);
7761 %}
7763 instruct membar_acquire_lock()
7764 %{
7765 match(MemBarAcquireLock);
7766 ins_cost(0);
7768 size(0);
7769 format %{ "MEMBAR-acquire (acquire as part of CAS in prior FastLock so empty encoding) @ membar_acquire_lock" %}
7770 ins_encode();
7771 ins_pipe(empty);
7772 %}
7774 instruct membar_release() %{
7775 match(MemBarRelease);
7776 ins_cost(0);
7778 size(0);
7779 format %{ "MEMBAR-release (empty) @ membar_release" %}
7780 ins_encode();
7781 ins_pipe(empty);
7782 %}
7784 instruct store_fence() %{
7785 match(StoreFence);
7786 ins_cost(400);
7788 format %{ "MEMBAR @ store_fence" %}
7790 ins_encode %{
7791 __ sync();
7792 %}
7794 ins_pipe(pipe_slow);
7795 %}
7797 instruct membar_release_lock()
7798 %{
7799 match(MemBarReleaseLock);
7800 ins_cost(0);
7802 size(0);
7803 format %{ "MEMBAR-release-lock (release in FastUnlock so empty) @ membar_release_lock" %}
7804 ins_encode();
7805 ins_pipe(empty);
7806 %}
7809 instruct membar_volatile() %{
7810 match(MemBarVolatile);
7811 ins_cost(400);
7813 format %{ "MEMBAR-volatile" %}
7814 ins_encode %{
7815 if( !os::is_MP() ) return; // Not needed on single CPU
7816 __ sync();
7818 %}
7819 ins_pipe(pipe_slow);
7820 %}
7822 instruct unnecessary_membar_volatile() %{
7823 match(MemBarVolatile);
7824 predicate(Matcher::post_store_load_barrier(n));
7825 ins_cost(0);
7827 size(0);
7828 format %{ "MEMBAR-volatile (unnecessary so empty encoding) @ unnecessary_membar_volatile" %}
7829 ins_encode( );
7830 ins_pipe(empty);
7831 %}
7833 instruct membar_storestore() %{
7834 match(MemBarStoreStore);
7836 ins_cost(0);
7837 size(0);
7838 format %{ "MEMBAR-storestore (empty encoding) @ membar_storestore" %}
7839 ins_encode( );
7840 ins_pipe(empty);
7841 %}
7843 //----------Move Instructions--------------------------------------------------
7844 instruct castX2P(mRegP dst, mRegL src) %{
7845 match(Set dst (CastX2P src));
7846 format %{ "castX2P $dst, $src @ castX2P" %}
7847 ins_encode %{
7848 Register src = $src$$Register;
7849 Register dst = $dst$$Register;
7851 if(src != dst)
7852 __ move(dst, src);
7853 %}
7854 ins_cost(10);
7855 ins_pipe( ialu_regI_mov );
7856 %}
7858 instruct castP2X(mRegL dst, mRegP src ) %{
7859 match(Set dst (CastP2X src));
7861 format %{ "mov $dst, $src\t #@castP2X" %}
7862 ins_encode %{
7863 Register src = $src$$Register;
7864 Register dst = $dst$$Register;
7866 if(src != dst)
7867 __ move(dst, src);
7868 %}
7869 ins_pipe( ialu_regI_mov );
7870 %}
7872 instruct MoveF2I_reg_reg(mRegI dst, regF src) %{
7873 match(Set dst (MoveF2I src));
7874 effect(DEF dst, USE src);
7875 ins_cost(85);
7876 format %{ "MoveF2I $dst, $src @ MoveF2I_reg_reg" %}
7877 ins_encode %{
7878 Register dst = as_Register($dst$$reg);
7879 FloatRegister src = as_FloatRegister($src$$reg);
7881 __ mfc1(dst, src);
7882 %}
7883 ins_pipe( pipe_slow );
7884 %}
7886 instruct MoveI2F_reg_reg(regF dst, mRegI src) %{
7887 match(Set dst (MoveI2F src));
7888 effect(DEF dst, USE src);
7889 ins_cost(85);
7890 format %{ "MoveI2F $dst, $src @ MoveI2F_reg_reg" %}
7891 ins_encode %{
7892 Register src = as_Register($src$$reg);
7893 FloatRegister dst = as_FloatRegister($dst$$reg);
7895 __ mtc1(src, dst);
7896 %}
7897 ins_pipe( pipe_slow );
7898 %}
7900 instruct MoveD2L_reg_reg(mRegL dst, regD src) %{
7901 match(Set dst (MoveD2L src));
7902 effect(DEF dst, USE src);
7903 ins_cost(85);
7904 format %{ "MoveD2L $dst, $src @ MoveD2L_reg_reg" %}
7905 ins_encode %{
7906 Register dst = as_Register($dst$$reg);
7907 FloatRegister src = as_FloatRegister($src$$reg);
7909 __ dmfc1(dst, src);
7910 %}
7911 ins_pipe( pipe_slow );
7912 %}
7914 instruct MoveL2D_reg_reg(regD dst, mRegL src) %{
7915 match(Set dst (MoveL2D src));
7916 effect(DEF dst, USE src);
7917 ins_cost(85);
7918 format %{ "MoveL2D $dst, $src @ MoveL2D_reg_reg" %}
7919 ins_encode %{
7920 FloatRegister dst = as_FloatRegister($dst$$reg);
7921 Register src = as_Register($src$$reg);
7923 __ dmtc1(src, dst);
7924 %}
7925 ins_pipe( pipe_slow );
7926 %}
7928 //----------Conditional Move---------------------------------------------------
7929 // Conditional move
7930 instruct cmovI_cmpI_reg_reg(mRegI dst, mRegI src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
7931 match(Set dst (CMoveI (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
7932 ins_cost(80);
7933 format %{
7934 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpI_reg_reg\n"
7935 "\tCMOV $dst,$src \t @cmovI_cmpI_reg_reg"
7936 %}
7938 ins_encode %{
7939 Register op1 = $tmp1$$Register;
7940 Register op2 = $tmp2$$Register;
7941 Register dst = $dst$$Register;
7942 Register src = $src$$Register;
7943 int flag = $cop$$cmpcode;
7945 switch(flag)
7946 {
7947 case 0x01: //equal
7948 __ subu32(AT, op1, op2);
7949 __ movz(dst, src, AT);
7950 break;
7952 case 0x02: //not_equal
7953 __ subu32(AT, op1, op2);
7954 __ movn(dst, src, AT);
7955 break;
7957 case 0x03: //great
7958 __ slt(AT, op2, op1);
7959 __ movn(dst, src, AT);
7960 break;
7962 case 0x04: //great_equal
7963 __ slt(AT, op1, op2);
7964 __ movz(dst, src, AT);
7965 break;
7967 case 0x05: //less
7968 __ slt(AT, op1, op2);
7969 __ movn(dst, src, AT);
7970 break;
7972 case 0x06: //less_equal
7973 __ slt(AT, op2, op1);
7974 __ movz(dst, src, AT);
7975 break;
7977 default:
7978 Unimplemented();
7979 }
7980 %}
7982 ins_pipe( pipe_slow );
7983 %}
7985 instruct cmovI_cmpP_reg_reg(mRegI dst, mRegI src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
7986 match(Set dst (CMoveI (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
7987 ins_cost(80);
7988 format %{
7989 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpP_reg_reg\n\t"
7990 "CMOV $dst,$src\t @cmovI_cmpP_reg_reg"
7991 %}
7992 ins_encode %{
7993 Register op1 = $tmp1$$Register;
7994 Register op2 = $tmp2$$Register;
7995 Register dst = $dst$$Register;
7996 Register src = $src$$Register;
7997 int flag = $cop$$cmpcode;
7999 switch(flag)
8000 {
8001 case 0x01: //equal
8002 __ subu(AT, op1, op2);
8003 __ movz(dst, src, AT);
8004 break;
8006 case 0x02: //not_equal
8007 __ subu(AT, op1, op2);
8008 __ movn(dst, src, AT);
8009 break;
8011 case 0x03: //above
8012 __ sltu(AT, op2, op1);
8013 __ movn(dst, src, AT);
8014 break;
8016 case 0x04: //above_equal
8017 __ sltu(AT, op1, op2);
8018 __ movz(dst, src, AT);
8019 break;
8021 case 0x05: //below
8022 __ sltu(AT, op1, op2);
8023 __ movn(dst, src, AT);
8024 break;
8026 case 0x06: //below_equal
8027 __ sltu(AT, op2, op1);
8028 __ movz(dst, src, AT);
8029 break;
8031 default:
8032 Unimplemented();
8033 }
8034 %}
8036 ins_pipe( pipe_slow );
8037 %}
8039 instruct cmovI_cmpN_reg_reg(mRegI dst, mRegI src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8040 match(Set dst (CMoveI (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8041 ins_cost(80);
8042 format %{
8043 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpN_reg_reg\n\t"
8044 "CMOV $dst,$src\t @cmovI_cmpN_reg_reg"
8045 %}
8046 ins_encode %{
8047 Register op1 = $tmp1$$Register;
8048 Register op2 = $tmp2$$Register;
8049 Register dst = $dst$$Register;
8050 Register src = $src$$Register;
8051 int flag = $cop$$cmpcode;
8053 switch(flag)
8054 {
8055 case 0x01: //equal
8056 __ subu32(AT, op1, op2);
8057 __ movz(dst, src, AT);
8058 break;
8060 case 0x02: //not_equal
8061 __ subu32(AT, op1, op2);
8062 __ movn(dst, src, AT);
8063 break;
8065 case 0x03: //above
8066 __ sltu(AT, op2, op1);
8067 __ movn(dst, src, AT);
8068 break;
8070 case 0x04: //above_equal
8071 __ sltu(AT, op1, op2);
8072 __ movz(dst, src, AT);
8073 break;
8075 case 0x05: //below
8076 __ sltu(AT, op1, op2);
8077 __ movn(dst, src, AT);
8078 break;
8080 case 0x06: //below_equal
8081 __ sltu(AT, op2, op1);
8082 __ movz(dst, src, AT);
8083 break;
8085 default:
8086 Unimplemented();
8087 }
8088 %}
8090 ins_pipe( pipe_slow );
8091 %}
8093 instruct cmovP_cmpN_reg_reg(mRegP dst, mRegP src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8094 match(Set dst (CMoveP (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8095 ins_cost(80);
8096 format %{
8097 "CMPU$cop $tmp1,$tmp2\t @cmovP_cmpN_reg_reg\n\t"
8098 "CMOV $dst,$src\t @cmovP_cmpN_reg_reg"
8099 %}
8100 ins_encode %{
8101 Register op1 = $tmp1$$Register;
8102 Register op2 = $tmp2$$Register;
8103 Register dst = $dst$$Register;
8104 Register src = $src$$Register;
8105 int flag = $cop$$cmpcode;
8107 switch(flag)
8108 {
8109 case 0x01: //equal
8110 __ subu32(AT, op1, op2);
8111 __ movz(dst, src, AT);
8112 break;
8114 case 0x02: //not_equal
8115 __ subu32(AT, op1, op2);
8116 __ movn(dst, src, AT);
8117 break;
8119 case 0x03: //above
8120 __ sltu(AT, op2, op1);
8121 __ movn(dst, src, AT);
8122 break;
8124 case 0x04: //above_equal
8125 __ sltu(AT, op1, op2);
8126 __ movz(dst, src, AT);
8127 break;
8129 case 0x05: //below
8130 __ sltu(AT, op1, op2);
8131 __ movn(dst, src, AT);
8132 break;
8134 case 0x06: //below_equal
8135 __ sltu(AT, op2, op1);
8136 __ movz(dst, src, AT);
8137 break;
8139 default:
8140 Unimplemented();
8141 }
8142 %}
8144 ins_pipe( pipe_slow );
8145 %}
8147 instruct cmovN_cmpP_reg_reg(mRegN dst, mRegN src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
8148 match(Set dst (CMoveN (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
8149 ins_cost(80);
8150 format %{
8151 "CMPU$cop $tmp1,$tmp2\t @cmovN_cmpP_reg_reg\n\t"
8152 "CMOV $dst,$src\t @cmovN_cmpP_reg_reg"
8153 %}
8154 ins_encode %{
8155 Register op1 = $tmp1$$Register;
8156 Register op2 = $tmp2$$Register;
8157 Register dst = $dst$$Register;
8158 Register src = $src$$Register;
8159 int flag = $cop$$cmpcode;
8161 switch(flag)
8162 {
8163 case 0x01: //equal
8164 __ subu(AT, op1, op2);
8165 __ movz(dst, src, AT);
8166 break;
8168 case 0x02: //not_equal
8169 __ subu(AT, op1, op2);
8170 __ movn(dst, src, AT);
8171 break;
8173 case 0x03: //above
8174 __ sltu(AT, op2, op1);
8175 __ movn(dst, src, AT);
8176 break;
8178 case 0x04: //above_equal
8179 __ sltu(AT, op1, op2);
8180 __ movz(dst, src, AT);
8181 break;
8183 case 0x05: //below
8184 __ sltu(AT, op1, op2);
8185 __ movn(dst, src, AT);
8186 break;
8188 case 0x06: //below_equal
8189 __ sltu(AT, op2, op1);
8190 __ movz(dst, src, AT);
8191 break;
8193 default:
8194 Unimplemented();
8195 }
8196 %}
8198 ins_pipe( pipe_slow );
8199 %}
8201 instruct cmovP_cmpD_reg_reg(mRegP dst, mRegP src, regD tmp1, regD tmp2, cmpOp cop ) %{
8202 match(Set dst (CMoveP (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8203 ins_cost(80);
8204 format %{
8205 "CMP$cop $tmp1, $tmp2\t @cmovP_cmpD_reg_reg\n"
8206 "\tCMOV $dst,$src \t @cmovP_cmpD_reg_reg"
8207 %}
8208 ins_encode %{
8209 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8210 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8211 Register dst = as_Register($dst$$reg);
8212 Register src = as_Register($src$$reg);
8214 int flag = $cop$$cmpcode;
8216 switch(flag)
8217 {
8218 case 0x01: //equal
8219 __ c_eq_d(reg_op1, reg_op2);
8220 __ movt(dst, src);
8221 break;
8222 case 0x02: //not_equal
8223 __ c_eq_d(reg_op1, reg_op2);
8224 __ movf(dst, src);
8225 break;
8226 case 0x03: //greater
8227 __ c_ole_d(reg_op1, reg_op2);
8228 __ movf(dst, src);
8229 break;
8230 case 0x04: //greater_equal
8231 __ c_olt_d(reg_op1, reg_op2);
8232 __ movf(dst, src);
8233 break;
8234 case 0x05: //less
8235 __ c_ult_d(reg_op1, reg_op2);
8236 __ movt(dst, src);
8237 break;
8238 case 0x06: //less_equal
8239 __ c_ule_d(reg_op1, reg_op2);
8240 __ movt(dst, src);
8241 break;
8242 default:
8243 Unimplemented();
8244 }
8245 %}
8247 ins_pipe( pipe_slow );
8248 %}
8251 instruct cmovN_cmpN_reg_reg(mRegN dst, mRegN src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8252 match(Set dst (CMoveN (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8253 ins_cost(80);
8254 format %{
8255 "CMPU$cop $tmp1,$tmp2\t @cmovN_cmpN_reg_reg\n\t"
8256 "CMOV $dst,$src\t @cmovN_cmpN_reg_reg"
8257 %}
8258 ins_encode %{
8259 Register op1 = $tmp1$$Register;
8260 Register op2 = $tmp2$$Register;
8261 Register dst = $dst$$Register;
8262 Register src = $src$$Register;
8263 int flag = $cop$$cmpcode;
8265 switch(flag)
8266 {
8267 case 0x01: //equal
8268 __ subu32(AT, op1, op2);
8269 __ movz(dst, src, AT);
8270 break;
8272 case 0x02: //not_equal
8273 __ subu32(AT, op1, op2);
8274 __ movn(dst, src, AT);
8275 break;
8277 case 0x03: //above
8278 __ sltu(AT, op2, op1);
8279 __ movn(dst, src, AT);
8280 break;
8282 case 0x04: //above_equal
8283 __ sltu(AT, op1, op2);
8284 __ movz(dst, src, AT);
8285 break;
8287 case 0x05: //below
8288 __ sltu(AT, op1, op2);
8289 __ movn(dst, src, AT);
8290 break;
8292 case 0x06: //below_equal
8293 __ sltu(AT, op2, op1);
8294 __ movz(dst, src, AT);
8295 break;
8297 default:
8298 Unimplemented();
8299 }
8300 %}
8302 ins_pipe( pipe_slow );
8303 %}
8306 instruct cmovI_cmpU_reg_reg(mRegI dst, mRegI src, mRegI tmp1, mRegI tmp2, cmpOpU cop ) %{
8307 match(Set dst (CMoveI (Binary cop (CmpU tmp1 tmp2)) (Binary dst src)));
8308 ins_cost(80);
8309 format %{
8310 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpU_reg_reg\n\t"
8311 "CMOV $dst,$src\t @cmovI_cmpU_reg_reg"
8312 %}
8313 ins_encode %{
8314 Register op1 = $tmp1$$Register;
8315 Register op2 = $tmp2$$Register;
8316 Register dst = $dst$$Register;
8317 Register src = $src$$Register;
8318 int flag = $cop$$cmpcode;
8320 switch(flag)
8321 {
8322 case 0x01: //equal
8323 __ subu(AT, op1, op2);
8324 __ movz(dst, src, AT);
8325 break;
8327 case 0x02: //not_equal
8328 __ subu(AT, op1, op2);
8329 __ movn(dst, src, AT);
8330 break;
8332 case 0x03: //above
8333 __ sltu(AT, op2, op1);
8334 __ movn(dst, src, AT);
8335 break;
8337 case 0x04: //above_equal
8338 __ sltu(AT, op1, op2);
8339 __ movz(dst, src, AT);
8340 break;
8342 case 0x05: //below
8343 __ sltu(AT, op1, op2);
8344 __ movn(dst, src, AT);
8345 break;
8347 case 0x06: //below_equal
8348 __ sltu(AT, op2, op1);
8349 __ movz(dst, src, AT);
8350 break;
8352 default:
8353 Unimplemented();
8354 }
8355 %}
8357 ins_pipe( pipe_slow );
8358 %}
8360 instruct cmovI_cmpL_reg_reg(mRegI dst, mRegI src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8361 match(Set dst (CMoveI (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8362 ins_cost(80);
8363 format %{
8364 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpL_reg_reg\n"
8365 "\tCMOV $dst,$src \t @cmovI_cmpL_reg_reg"
8366 %}
8367 ins_encode %{
8368 Register opr1 = as_Register($tmp1$$reg);
8369 Register opr2 = as_Register($tmp2$$reg);
8370 Register dst = $dst$$Register;
8371 Register src = $src$$Register;
8372 int flag = $cop$$cmpcode;
8374 switch(flag)
8375 {
8376 case 0x01: //equal
8377 __ subu(AT, opr1, opr2);
8378 __ movz(dst, src, AT);
8379 break;
8381 case 0x02: //not_equal
8382 __ subu(AT, opr1, opr2);
8383 __ movn(dst, src, AT);
8384 break;
8386 case 0x03: //greater
8387 __ slt(AT, opr2, opr1);
8388 __ movn(dst, src, AT);
8389 break;
8391 case 0x04: //greater_equal
8392 __ slt(AT, opr1, opr2);
8393 __ movz(dst, src, AT);
8394 break;
8396 case 0x05: //less
8397 __ slt(AT, opr1, opr2);
8398 __ movn(dst, src, AT);
8399 break;
8401 case 0x06: //less_equal
8402 __ slt(AT, opr2, opr1);
8403 __ movz(dst, src, AT);
8404 break;
8406 default:
8407 Unimplemented();
8408 }
8409 %}
8411 ins_pipe( pipe_slow );
8412 %}
8414 instruct cmovP_cmpL_reg_reg(mRegP dst, mRegP src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8415 match(Set dst (CMoveP (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8416 ins_cost(80);
8417 format %{
8418 "CMP$cop $tmp1, $tmp2\t @cmovP_cmpL_reg_reg\n"
8419 "\tCMOV $dst,$src \t @cmovP_cmpL_reg_reg"
8420 %}
8421 ins_encode %{
8422 Register opr1 = as_Register($tmp1$$reg);
8423 Register opr2 = as_Register($tmp2$$reg);
8424 Register dst = $dst$$Register;
8425 Register src = $src$$Register;
8426 int flag = $cop$$cmpcode;
8428 switch(flag)
8429 {
8430 case 0x01: //equal
8431 __ subu(AT, opr1, opr2);
8432 __ movz(dst, src, AT);
8433 break;
8435 case 0x02: //not_equal
8436 __ subu(AT, opr1, opr2);
8437 __ movn(dst, src, AT);
8438 break;
8440 case 0x03: //greater
8441 __ slt(AT, opr2, opr1);
8442 __ movn(dst, src, AT);
8443 break;
8445 case 0x04: //greater_equal
8446 __ slt(AT, opr1, opr2);
8447 __ movz(dst, src, AT);
8448 break;
8450 case 0x05: //less
8451 __ slt(AT, opr1, opr2);
8452 __ movn(dst, src, AT);
8453 break;
8455 case 0x06: //less_equal
8456 __ slt(AT, opr2, opr1);
8457 __ movz(dst, src, AT);
8458 break;
8460 default:
8461 Unimplemented();
8462 }
8463 %}
8465 ins_pipe( pipe_slow );
8466 %}
8468 instruct cmovI_cmpD_reg_reg(mRegI dst, mRegI src, regD tmp1, regD tmp2, cmpOp cop ) %{
8469 match(Set dst (CMoveI (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8470 ins_cost(80);
8471 format %{
8472 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpD_reg_reg\n"
8473 "\tCMOV $dst,$src \t @cmovI_cmpD_reg_reg"
8474 %}
8475 ins_encode %{
8476 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8477 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8478 Register dst = as_Register($dst$$reg);
8479 Register src = as_Register($src$$reg);
8481 int flag = $cop$$cmpcode;
8483 switch(flag)
8484 {
8485 case 0x01: //equal
8486 __ c_eq_d(reg_op1, reg_op2);
8487 __ movt(dst, src);
8488 break;
8489 case 0x02: //not_equal
8490 //2016/4/19 aoqi: See instruct branchConD_reg_reg. The change in branchConD_reg_reg fixed a bug. It seems similar here, so I made thesame change.
8491 __ c_eq_d(reg_op1, reg_op2);
8492 __ movf(dst, src);
8493 break;
8494 case 0x03: //greater
8495 __ c_ole_d(reg_op1, reg_op2);
8496 __ movf(dst, src);
8497 break;
8498 case 0x04: //greater_equal
8499 __ c_olt_d(reg_op1, reg_op2);
8500 __ movf(dst, src);
8501 break;
8502 case 0x05: //less
8503 __ c_ult_d(reg_op1, reg_op2);
8504 __ movt(dst, src);
8505 break;
8506 case 0x06: //less_equal
8507 __ c_ule_d(reg_op1, reg_op2);
8508 __ movt(dst, src);
8509 break;
8510 default:
8511 Unimplemented();
8512 }
8513 %}
8515 ins_pipe( pipe_slow );
8516 %}
8519 instruct cmovP_cmpP_reg_reg(mRegP dst, mRegP src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
8520 match(Set dst (CMoveP (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
8521 ins_cost(80);
8522 format %{
8523 "CMPU$cop $tmp1,$tmp2\t @cmovP_cmpP_reg_reg\n\t"
8524 "CMOV $dst,$src\t @cmovP_cmpP_reg_reg"
8525 %}
8526 ins_encode %{
8527 Register op1 = $tmp1$$Register;
8528 Register op2 = $tmp2$$Register;
8529 Register dst = $dst$$Register;
8530 Register src = $src$$Register;
8531 int flag = $cop$$cmpcode;
8533 switch(flag)
8534 {
8535 case 0x01: //equal
8536 __ subu(AT, op1, op2);
8537 __ movz(dst, src, AT);
8538 break;
8540 case 0x02: //not_equal
8541 __ subu(AT, op1, op2);
8542 __ movn(dst, src, AT);
8543 break;
8545 case 0x03: //above
8546 __ sltu(AT, op2, op1);
8547 __ movn(dst, src, AT);
8548 break;
8550 case 0x04: //above_equal
8551 __ sltu(AT, op1, op2);
8552 __ movz(dst, src, AT);
8553 break;
8555 case 0x05: //below
8556 __ sltu(AT, op1, op2);
8557 __ movn(dst, src, AT);
8558 break;
8560 case 0x06: //below_equal
8561 __ sltu(AT, op2, op1);
8562 __ movz(dst, src, AT);
8563 break;
8565 default:
8566 Unimplemented();
8567 }
8568 %}
8570 ins_pipe( pipe_slow );
8571 %}
8573 instruct cmovP_cmpI_reg_reg(mRegP dst, mRegP src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8574 match(Set dst (CMoveP (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8575 ins_cost(80);
8576 format %{
8577 "CMP$cop $tmp1,$tmp2\t @cmovP_cmpI_reg_reg\n\t"
8578 "CMOV $dst,$src\t @cmovP_cmpI_reg_reg"
8579 %}
8580 ins_encode %{
8581 Register op1 = $tmp1$$Register;
8582 Register op2 = $tmp2$$Register;
8583 Register dst = $dst$$Register;
8584 Register src = $src$$Register;
8585 int flag = $cop$$cmpcode;
8587 switch(flag)
8588 {
8589 case 0x01: //equal
8590 __ subu32(AT, op1, op2);
8591 __ movz(dst, src, AT);
8592 break;
8594 case 0x02: //not_equal
8595 __ subu32(AT, op1, op2);
8596 __ movn(dst, src, AT);
8597 break;
8599 case 0x03: //above
8600 __ slt(AT, op2, op1);
8601 __ movn(dst, src, AT);
8602 break;
8604 case 0x04: //above_equal
8605 __ slt(AT, op1, op2);
8606 __ movz(dst, src, AT);
8607 break;
8609 case 0x05: //below
8610 __ slt(AT, op1, op2);
8611 __ movn(dst, src, AT);
8612 break;
8614 case 0x06: //below_equal
8615 __ slt(AT, op2, op1);
8616 __ movz(dst, src, AT);
8617 break;
8619 default:
8620 Unimplemented();
8621 }
8622 %}
8624 ins_pipe( pipe_slow );
8625 %}
8627 instruct cmovN_cmpI_reg_reg(mRegN dst, mRegN src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8628 match(Set dst (CMoveN (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8629 ins_cost(80);
8630 format %{
8631 "CMP$cop $tmp1,$tmp2\t @cmovN_cmpI_reg_reg\n\t"
8632 "CMOV $dst,$src\t @cmovN_cmpI_reg_reg"
8633 %}
8634 ins_encode %{
8635 Register op1 = $tmp1$$Register;
8636 Register op2 = $tmp2$$Register;
8637 Register dst = $dst$$Register;
8638 Register src = $src$$Register;
8639 int flag = $cop$$cmpcode;
8641 switch(flag)
8642 {
8643 case 0x01: //equal
8644 __ subu32(AT, op1, op2);
8645 __ movz(dst, src, AT);
8646 break;
8648 case 0x02: //not_equal
8649 __ subu32(AT, op1, op2);
8650 __ movn(dst, src, AT);
8651 break;
8653 case 0x03: //above
8654 __ slt(AT, op2, op1);
8655 __ movn(dst, src, AT);
8656 break;
8658 case 0x04: //above_equal
8659 __ slt(AT, op1, op2);
8660 __ movz(dst, src, AT);
8661 break;
8663 case 0x05: //below
8664 __ slt(AT, op1, op2);
8665 __ movn(dst, src, AT);
8666 break;
8668 case 0x06: //below_equal
8669 __ slt(AT, op2, op1);
8670 __ movz(dst, src, AT);
8671 break;
8673 default:
8674 Unimplemented();
8675 }
8676 %}
8678 ins_pipe( pipe_slow );
8679 %}
8682 instruct cmovL_cmpI_reg_reg(mRegL dst, mRegL src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8683 match(Set dst (CMoveL (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8684 ins_cost(80);
8685 format %{
8686 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpI_reg_reg\n"
8687 "\tCMOV $dst,$src \t @cmovL_cmpI_reg_reg"
8688 %}
8690 ins_encode %{
8691 Register op1 = $tmp1$$Register;
8692 Register op2 = $tmp2$$Register;
8693 Register dst = as_Register($dst$$reg);
8694 Register src = as_Register($src$$reg);
8695 int flag = $cop$$cmpcode;
8697 switch(flag)
8698 {
8699 case 0x01: //equal
8700 __ subu32(AT, op1, op2);
8701 __ movz(dst, src, AT);
8702 break;
8704 case 0x02: //not_equal
8705 __ subu32(AT, op1, op2);
8706 __ movn(dst, src, AT);
8707 break;
8709 case 0x03: //great
8710 __ slt(AT, op2, op1);
8711 __ movn(dst, src, AT);
8712 break;
8714 case 0x04: //great_equal
8715 __ slt(AT, op1, op2);
8716 __ movz(dst, src, AT);
8717 break;
8719 case 0x05: //less
8720 __ slt(AT, op1, op2);
8721 __ movn(dst, src, AT);
8722 break;
8724 case 0x06: //less_equal
8725 __ slt(AT, op2, op1);
8726 __ movz(dst, src, AT);
8727 break;
8729 default:
8730 Unimplemented();
8731 }
8732 %}
8734 ins_pipe( pipe_slow );
8735 %}
8737 instruct cmovL_cmpL_reg_reg(mRegL dst, mRegL src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8738 match(Set dst (CMoveL (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8739 ins_cost(80);
8740 format %{
8741 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpL_reg_reg\n"
8742 "\tCMOV $dst,$src \t @cmovL_cmpL_reg_reg"
8743 %}
8744 ins_encode %{
8745 Register opr1 = as_Register($tmp1$$reg);
8746 Register opr2 = as_Register($tmp2$$reg);
8747 Register dst = as_Register($dst$$reg);
8748 Register src = as_Register($src$$reg);
8749 int flag = $cop$$cmpcode;
8751 switch(flag)
8752 {
8753 case 0x01: //equal
8754 __ subu(AT, opr1, opr2);
8755 __ movz(dst, src, AT);
8756 break;
8758 case 0x02: //not_equal
8759 __ subu(AT, opr1, opr2);
8760 __ movn(dst, src, AT);
8761 break;
8763 case 0x03: //greater
8764 __ slt(AT, opr2, opr1);
8765 __ movn(dst, src, AT);
8766 break;
8768 case 0x04: //greater_equal
8769 __ slt(AT, opr1, opr2);
8770 __ movz(dst, src, AT);
8771 break;
8773 case 0x05: //less
8774 __ slt(AT, opr1, opr2);
8775 __ movn(dst, src, AT);
8776 break;
8778 case 0x06: //less_equal
8779 __ slt(AT, opr2, opr1);
8780 __ movz(dst, src, AT);
8781 break;
8783 default:
8784 Unimplemented();
8785 }
8786 %}
8788 ins_pipe( pipe_slow );
8789 %}
8791 instruct cmovL_cmpN_reg_reg(mRegL dst, mRegL src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8792 match(Set dst (CMoveL (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8793 ins_cost(80);
8794 format %{
8795 "CMPU$cop $tmp1,$tmp2\t @cmovL_cmpN_reg_reg\n\t"
8796 "CMOV $dst,$src\t @cmovL_cmpN_reg_reg"
8797 %}
8798 ins_encode %{
8799 Register op1 = $tmp1$$Register;
8800 Register op2 = $tmp2$$Register;
8801 Register dst = $dst$$Register;
8802 Register src = $src$$Register;
8803 int flag = $cop$$cmpcode;
8805 switch(flag)
8806 {
8807 case 0x01: //equal
8808 __ subu32(AT, op1, op2);
8809 __ movz(dst, src, AT);
8810 break;
8812 case 0x02: //not_equal
8813 __ subu32(AT, op1, op2);
8814 __ movn(dst, src, AT);
8815 break;
8817 case 0x03: //above
8818 __ sltu(AT, op2, op1);
8819 __ movn(dst, src, AT);
8820 break;
8822 case 0x04: //above_equal
8823 __ sltu(AT, op1, op2);
8824 __ movz(dst, src, AT);
8825 break;
8827 case 0x05: //below
8828 __ sltu(AT, op1, op2);
8829 __ movn(dst, src, AT);
8830 break;
8832 case 0x06: //below_equal
8833 __ sltu(AT, op2, op1);
8834 __ movz(dst, src, AT);
8835 break;
8837 default:
8838 Unimplemented();
8839 }
8840 %}
8842 ins_pipe( pipe_slow );
8843 %}
8846 instruct cmovL_cmpD_reg_reg(mRegL dst, mRegL src, regD tmp1, regD tmp2, cmpOp cop ) %{
8847 match(Set dst (CMoveL (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8848 ins_cost(80);
8849 format %{
8850 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpD_reg_reg\n"
8851 "\tCMOV $dst,$src \t @cmovL_cmpD_reg_reg"
8852 %}
8853 ins_encode %{
8854 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8855 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8856 Register dst = as_Register($dst$$reg);
8857 Register src = as_Register($src$$reg);
8859 int flag = $cop$$cmpcode;
8861 switch(flag)
8862 {
8863 case 0x01: //equal
8864 __ c_eq_d(reg_op1, reg_op2);
8865 __ movt(dst, src);
8866 break;
8867 case 0x02: //not_equal
8868 __ c_eq_d(reg_op1, reg_op2);
8869 __ movf(dst, src);
8870 break;
8871 case 0x03: //greater
8872 __ c_ole_d(reg_op1, reg_op2);
8873 __ movf(dst, src);
8874 break;
8875 case 0x04: //greater_equal
8876 __ c_olt_d(reg_op1, reg_op2);
8877 __ movf(dst, src);
8878 break;
8879 case 0x05: //less
8880 __ c_ult_d(reg_op1, reg_op2);
8881 __ movt(dst, src);
8882 break;
8883 case 0x06: //less_equal
8884 __ c_ule_d(reg_op1, reg_op2);
8885 __ movt(dst, src);
8886 break;
8887 default:
8888 Unimplemented();
8889 }
8890 %}
8892 ins_pipe( pipe_slow );
8893 %}
8895 instruct cmovD_cmpD_reg_reg(regD dst, regD src, regD tmp1, regD tmp2, cmpOp cop ) %{
8896 match(Set dst (CMoveD (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8897 ins_cost(200);
8898 format %{
8899 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpD_reg_reg\n"
8900 "\tCMOV $dst,$src \t @cmovD_cmpD_reg_reg"
8901 %}
8902 ins_encode %{
8903 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8904 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8905 FloatRegister dst = as_FloatRegister($dst$$reg);
8906 FloatRegister src = as_FloatRegister($src$$reg);
8908 int flag = $cop$$cmpcode;
8910 Label L;
8912 switch(flag)
8913 {
8914 case 0x01: //equal
8915 __ c_eq_d(reg_op1, reg_op2);
8916 __ bc1f(L);
8917 __ nop();
8918 __ mov_d(dst, src);
8919 __ bind(L);
8920 break;
8921 case 0x02: //not_equal
8922 //2016/4/19 aoqi: See instruct branchConD_reg_reg. The change in branchConD_reg_reg fixed a bug. It seems similar here, so I made thesame change.
8923 __ c_eq_d(reg_op1, reg_op2);
8924 __ bc1t(L);
8925 __ nop();
8926 __ mov_d(dst, src);
8927 __ bind(L);
8928 break;
8929 case 0x03: //greater
8930 __ c_ole_d(reg_op1, reg_op2);
8931 __ bc1t(L);
8932 __ nop();
8933 __ mov_d(dst, src);
8934 __ bind(L);
8935 break;
8936 case 0x04: //greater_equal
8937 __ c_olt_d(reg_op1, reg_op2);
8938 __ bc1t(L);
8939 __ nop();
8940 __ mov_d(dst, src);
8941 __ bind(L);
8942 break;
8943 case 0x05: //less
8944 __ c_ult_d(reg_op1, reg_op2);
8945 __ bc1f(L);
8946 __ nop();
8947 __ mov_d(dst, src);
8948 __ bind(L);
8949 break;
8950 case 0x06: //less_equal
8951 __ c_ule_d(reg_op1, reg_op2);
8952 __ bc1f(L);
8953 __ nop();
8954 __ mov_d(dst, src);
8955 __ bind(L);
8956 break;
8957 default:
8958 Unimplemented();
8959 }
8960 %}
8962 ins_pipe( pipe_slow );
8963 %}
8965 instruct cmovF_cmpI_reg_reg(regF dst, regF src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8966 match(Set dst (CMoveF (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8967 ins_cost(200);
8968 format %{
8969 "CMP$cop $tmp1, $tmp2\t @cmovF_cmpI_reg_reg\n"
8970 "\tCMOV $dst, $src \t @cmovF_cmpI_reg_reg"
8971 %}
8973 ins_encode %{
8974 Register op1 = $tmp1$$Register;
8975 Register op2 = $tmp2$$Register;
8976 FloatRegister dst = as_FloatRegister($dst$$reg);
8977 FloatRegister src = as_FloatRegister($src$$reg);
8978 int flag = $cop$$cmpcode;
8979 Label L;
8981 switch(flag)
8982 {
8983 case 0x01: //equal
8984 __ bne(op1, op2, L);
8985 __ nop();
8986 __ mov_s(dst, src);
8987 __ bind(L);
8988 break;
8989 case 0x02: //not_equal
8990 __ beq(op1, op2, L);
8991 __ nop();
8992 __ mov_s(dst, src);
8993 __ bind(L);
8994 break;
8995 case 0x03: //great
8996 __ slt(AT, op2, op1);
8997 __ beq(AT, R0, L);
8998 __ nop();
8999 __ mov_s(dst, src);
9000 __ bind(L);
9001 break;
9002 case 0x04: //great_equal
9003 __ slt(AT, op1, op2);
9004 __ bne(AT, R0, L);
9005 __ nop();
9006 __ mov_s(dst, src);
9007 __ bind(L);
9008 break;
9009 case 0x05: //less
9010 __ slt(AT, op1, op2);
9011 __ beq(AT, R0, L);
9012 __ nop();
9013 __ mov_s(dst, src);
9014 __ bind(L);
9015 break;
9016 case 0x06: //less_equal
9017 __ slt(AT, op2, op1);
9018 __ bne(AT, R0, L);
9019 __ nop();
9020 __ mov_s(dst, src);
9021 __ bind(L);
9022 break;
9023 default:
9024 Unimplemented();
9025 }
9026 %}
9028 ins_pipe( pipe_slow );
9029 %}
9031 instruct cmovD_cmpI_reg_reg(regD dst, regD src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
9032 match(Set dst (CMoveD (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
9033 ins_cost(200);
9034 format %{
9035 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpI_reg_reg\n"
9036 "\tCMOV $dst, $src \t @cmovD_cmpI_reg_reg"
9037 %}
9039 ins_encode %{
9040 Register op1 = $tmp1$$Register;
9041 Register op2 = $tmp2$$Register;
9042 FloatRegister dst = as_FloatRegister($dst$$reg);
9043 FloatRegister src = as_FloatRegister($src$$reg);
9044 int flag = $cop$$cmpcode;
9045 Label L;
9047 switch(flag)
9048 {
9049 case 0x01: //equal
9050 __ bne(op1, op2, L);
9051 __ nop();
9052 __ mov_d(dst, src);
9053 __ bind(L);
9054 break;
9055 case 0x02: //not_equal
9056 __ beq(op1, op2, L);
9057 __ nop();
9058 __ mov_d(dst, src);
9059 __ bind(L);
9060 break;
9061 case 0x03: //great
9062 __ slt(AT, op2, op1);
9063 __ beq(AT, R0, L);
9064 __ nop();
9065 __ mov_d(dst, src);
9066 __ bind(L);
9067 break;
9068 case 0x04: //great_equal
9069 __ slt(AT, op1, op2);
9070 __ bne(AT, R0, L);
9071 __ nop();
9072 __ mov_d(dst, src);
9073 __ bind(L);
9074 break;
9075 case 0x05: //less
9076 __ slt(AT, op1, op2);
9077 __ beq(AT, R0, L);
9078 __ nop();
9079 __ mov_d(dst, src);
9080 __ bind(L);
9081 break;
9082 case 0x06: //less_equal
9083 __ slt(AT, op2, op1);
9084 __ bne(AT, R0, L);
9085 __ nop();
9086 __ mov_d(dst, src);
9087 __ bind(L);
9088 break;
9089 default:
9090 Unimplemented();
9091 }
9092 %}
9094 ins_pipe( pipe_slow );
9095 %}
9097 instruct cmovD_cmpP_reg_reg(regD dst, regD src, mRegP tmp1, mRegP tmp2, cmpOp cop ) %{
9098 match(Set dst (CMoveD (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
9099 ins_cost(200);
9100 format %{
9101 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpP_reg_reg\n"
9102 "\tCMOV $dst, $src \t @cmovD_cmpP_reg_reg"
9103 %}
9105 ins_encode %{
9106 Register op1 = $tmp1$$Register;
9107 Register op2 = $tmp2$$Register;
9108 FloatRegister dst = as_FloatRegister($dst$$reg);
9109 FloatRegister src = as_FloatRegister($src$$reg);
9110 int flag = $cop$$cmpcode;
9111 Label L;
9113 switch(flag)
9114 {
9115 case 0x01: //equal
9116 __ bne(op1, op2, L);
9117 __ nop();
9118 __ mov_d(dst, src);
9119 __ bind(L);
9120 break;
9121 case 0x02: //not_equal
9122 __ beq(op1, op2, L);
9123 __ nop();
9124 __ mov_d(dst, src);
9125 __ bind(L);
9126 break;
9127 case 0x03: //great
9128 __ slt(AT, op2, op1);
9129 __ beq(AT, R0, L);
9130 __ nop();
9131 __ mov_d(dst, src);
9132 __ bind(L);
9133 break;
9134 case 0x04: //great_equal
9135 __ slt(AT, op1, op2);
9136 __ bne(AT, R0, L);
9137 __ nop();
9138 __ mov_d(dst, src);
9139 __ bind(L);
9140 break;
9141 case 0x05: //less
9142 __ slt(AT, op1, op2);
9143 __ beq(AT, R0, L);
9144 __ nop();
9145 __ mov_d(dst, src);
9146 __ bind(L);
9147 break;
9148 case 0x06: //less_equal
9149 __ slt(AT, op2, op1);
9150 __ bne(AT, R0, L);
9151 __ nop();
9152 __ mov_d(dst, src);
9153 __ bind(L);
9154 break;
9155 default:
9156 Unimplemented();
9157 }
9158 %}
9160 ins_pipe( pipe_slow );
9161 %}
9163 //FIXME
9164 instruct cmovI_cmpF_reg_reg(mRegI dst, mRegI src, regF tmp1, regF tmp2, cmpOp cop ) %{
9165 match(Set dst (CMoveI (Binary cop (CmpF tmp1 tmp2)) (Binary dst src)));
9166 ins_cost(80);
9167 format %{
9168 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpF_reg_reg\n"
9169 "\tCMOV $dst,$src \t @cmovI_cmpF_reg_reg"
9170 %}
9172 ins_encode %{
9173 FloatRegister reg_op1 = $tmp1$$FloatRegister;
9174 FloatRegister reg_op2 = $tmp2$$FloatRegister;
9175 Register dst = $dst$$Register;
9176 Register src = $src$$Register;
9177 int flag = $cop$$cmpcode;
9179 switch(flag)
9180 {
9181 case 0x01: //equal
9182 __ c_eq_s(reg_op1, reg_op2);
9183 __ movt(dst, src);
9184 break;
9185 case 0x02: //not_equal
9186 __ c_eq_s(reg_op1, reg_op2);
9187 __ movf(dst, src);
9188 break;
9189 case 0x03: //greater
9190 __ c_ole_s(reg_op1, reg_op2);
9191 __ movf(dst, src);
9192 break;
9193 case 0x04: //greater_equal
9194 __ c_olt_s(reg_op1, reg_op2);
9195 __ movf(dst, src);
9196 break;
9197 case 0x05: //less
9198 __ c_ult_s(reg_op1, reg_op2);
9199 __ movt(dst, src);
9200 break;
9201 case 0x06: //less_equal
9202 __ c_ule_s(reg_op1, reg_op2);
9203 __ movt(dst, src);
9204 break;
9205 default:
9206 Unimplemented();
9207 }
9208 %}
9209 ins_pipe( pipe_slow );
9210 %}
9212 instruct cmovF_cmpF_reg_reg(regF dst, regF src, regF tmp1, regF tmp2, cmpOp cop ) %{
9213 match(Set dst (CMoveF (Binary cop (CmpF tmp1 tmp2)) (Binary dst src)));
9214 ins_cost(200);
9215 format %{
9216 "CMP$cop $tmp1, $tmp2\t @cmovF_cmpF_reg_reg\n"
9217 "\tCMOV $dst,$src \t @cmovF_cmpF_reg_reg"
9218 %}
9220 ins_encode %{
9221 FloatRegister reg_op1 = $tmp1$$FloatRegister;
9222 FloatRegister reg_op2 = $tmp2$$FloatRegister;
9223 FloatRegister dst = $dst$$FloatRegister;
9224 FloatRegister src = $src$$FloatRegister;
9225 Label L;
9226 int flag = $cop$$cmpcode;
9228 switch(flag)
9229 {
9230 case 0x01: //equal
9231 __ c_eq_s(reg_op1, reg_op2);
9232 __ bc1f(L);
9233 __ nop();
9234 __ mov_s(dst, src);
9235 __ bind(L);
9236 break;
9237 case 0x02: //not_equal
9238 __ c_eq_s(reg_op1, reg_op2);
9239 __ bc1t(L);
9240 __ nop();
9241 __ mov_s(dst, src);
9242 __ bind(L);
9243 break;
9244 case 0x03: //greater
9245 __ c_ole_s(reg_op1, reg_op2);
9246 __ bc1t(L);
9247 __ nop();
9248 __ mov_s(dst, src);
9249 __ bind(L);
9250 break;
9251 case 0x04: //greater_equal
9252 __ c_olt_s(reg_op1, reg_op2);
9253 __ bc1t(L);
9254 __ nop();
9255 __ mov_s(dst, src);
9256 __ bind(L);
9257 break;
9258 case 0x05: //less
9259 __ c_ult_s(reg_op1, reg_op2);
9260 __ bc1f(L);
9261 __ nop();
9262 __ mov_s(dst, src);
9263 __ bind(L);
9264 break;
9265 case 0x06: //less_equal
9266 __ c_ule_s(reg_op1, reg_op2);
9267 __ bc1f(L);
9268 __ nop();
9269 __ mov_s(dst, src);
9270 __ bind(L);
9271 break;
9272 default:
9273 Unimplemented();
9274 }
9275 %}
9276 ins_pipe( pipe_slow );
9277 %}
9279 // Manifest a CmpL result in an integer register. Very painful.
9280 // This is the test to avoid.
9281 instruct cmpL3_reg_reg(mRegI dst, mRegL src1, mRegL src2) %{
9282 match(Set dst (CmpL3 src1 src2));
9283 ins_cost(1000);
9284 format %{ "cmpL3 $dst, $src1, $src2 @ cmpL3_reg_reg" %}
9285 ins_encode %{
9286 Register opr1 = as_Register($src1$$reg);
9287 Register opr2 = as_Register($src2$$reg);
9288 Register dst = as_Register($dst$$reg);
9290 Label Done;
9292 __ subu(AT, opr1, opr2);
9293 __ bltz(AT, Done);
9294 __ delayed()->daddiu(dst, R0, -1);
9296 __ move(dst, 1);
9297 __ movz(dst, R0, AT);
9299 __ bind(Done);
9300 %}
9301 ins_pipe( pipe_slow );
9302 %}
9304 //
9305 // less_rsult = -1
9306 // greater_result = 1
9307 // equal_result = 0
9308 // nan_result = -1
9309 //
9310 instruct cmpF3_reg_reg(mRegI dst, regF src1, regF src2) %{
9311 match(Set dst (CmpF3 src1 src2));
9312 ins_cost(1000);
9313 format %{ "cmpF3 $dst, $src1, $src2 @ cmpF3_reg_reg" %}
9314 ins_encode %{
9315 FloatRegister src1 = as_FloatRegister($src1$$reg);
9316 FloatRegister src2 = as_FloatRegister($src2$$reg);
9317 Register dst = as_Register($dst$$reg);
9319 Label Done;
9321 __ c_ult_s(src1, src2);
9322 __ bc1t(Done);
9323 __ delayed()->daddiu(dst, R0, -1);
9325 __ c_eq_s(src1, src2);
9326 __ move(dst, 1);
9327 __ movt(dst, R0);
9329 __ bind(Done);
9330 %}
9331 ins_pipe( pipe_slow );
9332 %}
9334 instruct cmpD3_reg_reg(mRegI dst, regD src1, regD src2) %{
9335 match(Set dst (CmpD3 src1 src2));
9336 ins_cost(1000);
9337 format %{ "cmpD3 $dst, $src1, $src2 @ cmpD3_reg_reg" %}
9338 ins_encode %{
9339 FloatRegister src1 = as_FloatRegister($src1$$reg);
9340 FloatRegister src2 = as_FloatRegister($src2$$reg);
9341 Register dst = as_Register($dst$$reg);
9343 Label Done;
9345 __ c_ult_d(src1, src2);
9346 __ bc1t(Done);
9347 __ delayed()->daddiu(dst, R0, -1);
9349 __ c_eq_d(src1, src2);
9350 __ move(dst, 1);
9351 __ movt(dst, R0);
9353 __ bind(Done);
9354 %}
9355 ins_pipe( pipe_slow );
9356 %}
9358 instruct clear_array(mRegL cnt, mRegP base, Universe dummy) %{
9359 match(Set dummy (ClearArray cnt base));
9360 format %{ "CLEAR_ARRAY base = $base, cnt = $cnt # Clear doublewords" %}
9361 ins_encode %{
9362 //Assume cnt is the number of bytes in an array to be cleared,
9363 //and base points to the starting address of the array.
9364 Register base = $base$$Register;
9365 Register num = $cnt$$Register;
9366 Label Loop, done;
9368 /* 2012/9/21 Jin: according to X86, $cnt is caculated by doublewords(8 bytes) */
9369 __ move(T9, num); /* T9 = words */
9370 __ beq(T9, R0, done);
9371 __ nop();
9372 __ move(AT, base);
9374 __ bind(Loop);
9375 __ sd(R0, Address(AT, 0));
9376 __ daddi(AT, AT, wordSize);
9377 __ daddi(T9, T9, -1);
9378 __ bne(T9, R0, Loop);
9379 __ delayed()->nop();
9380 __ bind(done);
9381 %}
9382 ins_pipe( pipe_slow );
9383 %}
9385 instruct string_compare(a4_RegP str1, mA5RegI cnt1, a6_RegP str2, mA7RegI cnt2, no_Ax_mRegI result) %{
9386 match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
9387 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2);
9389 format %{ "String Compare $str1[len: $cnt1], $str2[len: $cnt2] -> $result @ string_compare" %}
9390 ins_encode %{
9391 // Get the first character position in both strings
9392 // [8] char array, [12] offset, [16] count
9393 Register str1 = $str1$$Register;
9394 Register str2 = $str2$$Register;
9395 Register cnt1 = $cnt1$$Register;
9396 Register cnt2 = $cnt2$$Register;
9397 Register result = $result$$Register;
9399 Label L, Loop, haveResult, done;
9401 // compute the and difference of lengths (in result)
9402 __ subu(result, cnt1, cnt2); // result holds the difference of two lengths
9404 // compute the shorter length (in cnt1)
9405 __ slt(AT, cnt2, cnt1);
9406 __ movn(cnt1, cnt2, AT);
9408 // Now the shorter length is in cnt1 and cnt2 can be used as a tmp register
9409 __ bind(Loop); // Loop begin
9410 __ beq(cnt1, R0, done);
9411 __ delayed()->lhu(AT, str1, 0);;
9413 // compare current character
9414 __ lhu(cnt2, str2, 0);
9415 __ bne(AT, cnt2, haveResult);
9416 __ delayed()->addi(str1, str1, 2);
9417 __ addi(str2, str2, 2);
9418 __ b(Loop);
9419 __ delayed()->addi(cnt1, cnt1, -1); // Loop end
9421 __ bind(haveResult);
9422 __ subu(result, AT, cnt2);
9424 __ bind(done);
9425 %}
9427 ins_pipe( pipe_slow );
9428 %}
9430 // intrinsic optimization
9431 instruct string_equals(a4_RegP str1, a5_RegP str2, mA6RegI cnt, mA7RegI temp, no_Ax_mRegI result) %{
9432 match(Set result (StrEquals (Binary str1 str2) cnt));
9433 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL temp);
9435 format %{ "String Equal $str1, $str2, len:$cnt tmp:$temp -> $result @ string_equals" %}
9436 ins_encode %{
9437 // Get the first character position in both strings
9438 // [8] char array, [12] offset, [16] count
9439 Register str1 = $str1$$Register;
9440 Register str2 = $str2$$Register;
9441 Register cnt = $cnt$$Register;
9442 Register tmp = $temp$$Register;
9443 Register result = $result$$Register;
9445 Label Loop, done;
9448 __ beq(str1, str2, done); // same char[] ?
9449 __ daddiu(result, R0, 1);
9451 __ bind(Loop); // Loop begin
9452 __ beq(cnt, R0, done);
9453 __ daddiu(result, R0, 1); // count == 0
9455 // compare current character
9456 __ lhu(AT, str1, 0);;
9457 __ lhu(tmp, str2, 0);
9458 __ bne(AT, tmp, done);
9459 __ delayed()->daddi(result, R0, 0);
9460 __ addi(str1, str1, 2);
9461 __ addi(str2, str2, 2);
9462 __ b(Loop);
9463 __ delayed()->addi(cnt, cnt, -1); // Loop end
9465 __ bind(done);
9466 %}
9468 ins_pipe( pipe_slow );
9469 %}
9471 //----------Arithmetic Instructions-------------------------------------------
9472 //----------Addition Instructions---------------------------------------------
9473 instruct addI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9474 match(Set dst (AddI src1 src2));
9476 format %{ "add $dst, $src1, $src2 #@addI_Reg_Reg" %}
9477 ins_encode %{
9478 Register dst = $dst$$Register;
9479 Register src1 = $src1$$Register;
9480 Register src2 = $src2$$Register;
9481 __ addu32(dst, src1, src2);
9482 %}
9483 ins_pipe( ialu_regI_regI );
9484 %}
9486 instruct addI_Reg_imm(mRegI dst, mRegI src1, immI src2) %{
9487 match(Set dst (AddI src1 src2));
9489 format %{ "add $dst, $src1, $src2 #@addI_Reg_imm" %}
9490 ins_encode %{
9491 Register dst = $dst$$Register;
9492 Register src1 = $src1$$Register;
9493 int imm = $src2$$constant;
9495 if(Assembler::is_simm16(imm)) {
9496 __ addiu32(dst, src1, imm);
9497 } else {
9498 __ move(AT, imm);
9499 __ addu32(dst, src1, AT);
9500 }
9501 %}
9502 ins_pipe( ialu_regI_regI );
9503 %}
9505 instruct addP_reg_reg(mRegP dst, mRegP src1, mRegL src2) %{
9506 match(Set dst (AddP src1 src2));
9508 format %{ "dadd $dst, $src1, $src2 #@addP_reg_reg" %}
9510 ins_encode %{
9511 Register dst = $dst$$Register;
9512 Register src1 = $src1$$Register;
9513 Register src2 = $src2$$Register;
9514 __ daddu(dst, src1, src2);
9515 %}
9517 ins_pipe( ialu_regI_regI );
9518 %}
9520 instruct addP_reg_reg_convI2L(mRegP dst, mRegP src1, mRegI src2) %{
9521 match(Set dst (AddP src1 (ConvI2L src2)));
9523 format %{ "dadd $dst, $src1, $src2 #@addP_reg_reg_convI2L" %}
9525 ins_encode %{
9526 Register dst = $dst$$Register;
9527 Register src1 = $src1$$Register;
9528 Register src2 = $src2$$Register;
9529 __ daddu(dst, src1, src2);
9530 %}
9532 ins_pipe( ialu_regI_regI );
9533 %}
9535 instruct addP_reg_imm(mRegP dst, mRegP src1, immL src2) %{
9536 match(Set dst (AddP src1 src2));
9538 format %{ "daddi $dst, $src1, $src2 #@addP_reg_imm" %}
9539 ins_encode %{
9540 Register src1 = $src1$$Register;
9541 long src2 = $src2$$constant;
9542 Register dst = $dst$$Register;
9544 if(Assembler::is_simm16(src2)) {
9545 __ daddiu(dst, src1, src2);
9546 } else {
9547 __ set64(AT, src2);
9548 __ daddu(dst, src1, AT);
9549 }
9550 %}
9551 ins_pipe( ialu_regI_imm16 );
9552 %}
9554 // Add Long Register with Register
9555 instruct addL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
9556 match(Set dst (AddL src1 src2));
9557 ins_cost(200);
9558 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_Reg\t" %}
9560 ins_encode %{
9561 Register dst_reg = as_Register($dst$$reg);
9562 Register src1_reg = as_Register($src1$$reg);
9563 Register src2_reg = as_Register($src2$$reg);
9565 __ daddu(dst_reg, src1_reg, src2_reg);
9566 %}
9568 ins_pipe( ialu_regL_regL );
9569 %}
9571 instruct addL_Reg_imm(mRegL dst, mRegL src1, immL16 src2)
9572 %{
9573 match(Set dst (AddL src1 src2));
9575 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_imm " %}
9576 ins_encode %{
9577 Register dst_reg = as_Register($dst$$reg);
9578 Register src1_reg = as_Register($src1$$reg);
9579 int src2_imm = $src2$$constant;
9581 __ daddiu(dst_reg, src1_reg, src2_imm);
9582 %}
9584 ins_pipe( ialu_regL_regL );
9585 %}
9587 instruct addL_RegI2L_imm(mRegL dst, mRegI src1, immL16 src2)
9588 %{
9589 match(Set dst (AddL (ConvI2L src1) src2));
9591 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_imm " %}
9592 ins_encode %{
9593 Register dst_reg = as_Register($dst$$reg);
9594 Register src1_reg = as_Register($src1$$reg);
9595 int src2_imm = $src2$$constant;
9597 __ daddiu(dst_reg, src1_reg, src2_imm);
9598 %}
9600 ins_pipe( ialu_regL_regL );
9601 %}
9603 instruct addL_RegI2L_Reg(mRegL dst, mRegI src1, mRegL src2) %{
9604 match(Set dst (AddL (ConvI2L src1) src2));
9605 ins_cost(200);
9606 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_Reg\t" %}
9608 ins_encode %{
9609 Register dst_reg = as_Register($dst$$reg);
9610 Register src1_reg = as_Register($src1$$reg);
9611 Register src2_reg = as_Register($src2$$reg);
9613 __ daddu(dst_reg, src1_reg, src2_reg);
9614 %}
9616 ins_pipe( ialu_regL_regL );
9617 %}
9619 instruct addL_RegI2L_RegI2L(mRegL dst, mRegI src1, mRegI src2) %{
9620 match(Set dst (AddL (ConvI2L src1) (ConvI2L src2)));
9621 ins_cost(200);
9622 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_RegI2L\t" %}
9624 ins_encode %{
9625 Register dst_reg = as_Register($dst$$reg);
9626 Register src1_reg = as_Register($src1$$reg);
9627 Register src2_reg = as_Register($src2$$reg);
9629 __ daddu(dst_reg, src1_reg, src2_reg);
9630 %}
9632 ins_pipe( ialu_regL_regL );
9633 %}
9635 instruct addL_Reg_RegI2L(mRegL dst, mRegL src1, mRegI src2) %{
9636 match(Set dst (AddL src1 (ConvI2L src2)));
9637 ins_cost(200);
9638 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_RegI2L\t" %}
9640 ins_encode %{
9641 Register dst_reg = as_Register($dst$$reg);
9642 Register src1_reg = as_Register($src1$$reg);
9643 Register src2_reg = as_Register($src2$$reg);
9645 __ daddu(dst_reg, src1_reg, src2_reg);
9646 %}
9648 ins_pipe( ialu_regL_regL );
9649 %}
9651 //----------Subtraction Instructions-------------------------------------------
9652 // Integer Subtraction Instructions
9653 instruct subI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9654 match(Set dst (SubI src1 src2));
9655 ins_cost(100);
9657 format %{ "sub $dst, $src1, $src2 #@subI_Reg_Reg" %}
9658 ins_encode %{
9659 Register dst = $dst$$Register;
9660 Register src1 = $src1$$Register;
9661 Register src2 = $src2$$Register;
9662 __ subu32(dst, src1, src2);
9663 %}
9664 ins_pipe( ialu_regI_regI );
9665 %}
9667 instruct subI_Reg_immI16_sub(mRegI dst, mRegI src1, immI16_sub src2) %{
9668 match(Set dst (SubI src1 src2));
9669 ins_cost(80);
9671 format %{ "sub $dst, $src1, $src2 #@subI_Reg_immI16_sub" %}
9672 ins_encode %{
9673 Register dst = $dst$$Register;
9674 Register src1 = $src1$$Register;
9675 __ addiu32(dst, src1, -1 * $src2$$constant);
9676 %}
9677 ins_pipe( ialu_regI_regI );
9678 %}
9680 instruct negI_Reg(mRegI dst, immI0 zero, mRegI src) %{
9681 match(Set dst (SubI zero src));
9682 ins_cost(80);
9684 format %{ "neg $dst, $src #@negI_Reg" %}
9685 ins_encode %{
9686 Register dst = $dst$$Register;
9687 Register src = $src$$Register;
9688 __ subu32(dst, R0, src);
9689 %}
9690 ins_pipe( ialu_regI_regI );
9691 %}
9693 instruct negL_Reg(mRegL dst, immL0 zero, mRegL src) %{
9694 match(Set dst (SubL zero src));
9695 ins_cost(80);
9697 format %{ "neg $dst, $src #@negL_Reg" %}
9698 ins_encode %{
9699 Register dst = $dst$$Register;
9700 Register src = $src$$Register;
9701 __ subu(dst, R0, src);
9702 %}
9703 ins_pipe( ialu_regI_regI );
9704 %}
9706 instruct subL_Reg_immL16_sub(mRegL dst, mRegL src1, immL16_sub src2) %{
9707 match(Set dst (SubL src1 src2));
9708 ins_cost(80);
9710 format %{ "sub $dst, $src1, $src2 #@subL_Reg_immL16_sub" %}
9711 ins_encode %{
9712 Register dst = $dst$$Register;
9713 Register src1 = $src1$$Register;
9714 __ daddiu(dst, src1, -1 * $src2$$constant);
9715 %}
9716 ins_pipe( ialu_regI_regI );
9717 %}
9719 // Subtract Long Register with Register.
9720 instruct subL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
9721 match(Set dst (SubL src1 src2));
9722 ins_cost(100);
9723 format %{ "SubL $dst, $src1, $src2 @ subL_Reg_Reg" %}
9724 ins_encode %{
9725 Register dst = as_Register($dst$$reg);
9726 Register src1 = as_Register($src1$$reg);
9727 Register src2 = as_Register($src2$$reg);
9729 __ subu(dst, src1, src2);
9730 %}
9731 ins_pipe( ialu_regL_regL );
9732 %}
9734 instruct subL_Reg_RegI2L(mRegL dst, mRegL src1, mRegI src2) %{
9735 match(Set dst (SubL src1 (ConvI2L src2)));
9736 ins_cost(100);
9737 format %{ "SubL $dst, $src1, $src2 @ subL_Reg_RegI2L" %}
9738 ins_encode %{
9739 Register dst = as_Register($dst$$reg);
9740 Register src1 = as_Register($src1$$reg);
9741 Register src2 = as_Register($src2$$reg);
9743 __ subu(dst, src1, src2);
9744 %}
9745 ins_pipe( ialu_regL_regL );
9746 %}
9748 instruct subL_RegI2L_Reg(mRegL dst, mRegI src1, mRegL src2) %{
9749 match(Set dst (SubL (ConvI2L src1) src2));
9750 ins_cost(200);
9751 format %{ "SubL $dst, $src1, $src2 @ subL_RegI2L_Reg" %}
9752 ins_encode %{
9753 Register dst = as_Register($dst$$reg);
9754 Register src1 = as_Register($src1$$reg);
9755 Register src2 = as_Register($src2$$reg);
9757 __ subu(dst, src1, src2);
9758 %}
9759 ins_pipe( ialu_regL_regL );
9760 %}
9762 instruct subL_RegI2L_RegI2L(mRegL dst, mRegI src1, mRegI src2) %{
9763 match(Set dst (SubL (ConvI2L src1) (ConvI2L src2)));
9764 ins_cost(200);
9765 format %{ "SubL $dst, $src1, $src2 @ subL_RegI2L_RegI2L" %}
9766 ins_encode %{
9767 Register dst = as_Register($dst$$reg);
9768 Register src1 = as_Register($src1$$reg);
9769 Register src2 = as_Register($src2$$reg);
9771 __ subu(dst, src1, src2);
9772 %}
9773 ins_pipe( ialu_regL_regL );
9774 %}
9776 // Integer MOD with Register
9777 instruct modI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9778 match(Set dst (ModI src1 src2));
9779 ins_cost(300);
9780 format %{ "modi $dst, $src1, $src2 @ modI_Reg_Reg" %}
9781 ins_encode %{
9782 Register dst = $dst$$Register;
9783 Register src1 = $src1$$Register;
9784 Register src2 = $src2$$Register;
9786 //if (UseLoongsonISA) {
9787 if (0) {
9788 // 2016.08.10
9789 // Experiments show that gsmod is slower that div+mfhi.
9790 // So I just disable it here.
9791 __ gsmod(dst, src1, src2);
9792 } else {
9793 __ div(src1, src2);
9794 __ mfhi(dst);
9795 }
9796 %}
9798 //ins_pipe( ialu_mod );
9799 ins_pipe( ialu_regI_regI );
9800 %}
9802 instruct modL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
9803 match(Set dst (ModL src1 src2));
9804 format %{ "modL $dst, $src1, $src2 @modL_reg_reg" %}
9806 ins_encode %{
9807 Register dst = as_Register($dst$$reg);
9808 Register op1 = as_Register($src1$$reg);
9809 Register op2 = as_Register($src2$$reg);
9811 if (UseLoongsonISA) {
9812 __ gsdmod(dst, op1, op2);
9813 } else {
9814 __ ddiv(op1, op2);
9815 __ mfhi(dst);
9816 }
9817 %}
9818 ins_pipe( pipe_slow );
9819 %}
9821 instruct mulI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9822 match(Set dst (MulI src1 src2));
9824 ins_cost(300);
9825 format %{ "mul $dst, $src1, $src2 @ mulI_Reg_Reg" %}
9826 ins_encode %{
9827 Register src1 = $src1$$Register;
9828 Register src2 = $src2$$Register;
9829 Register dst = $dst$$Register;
9831 __ mul(dst, src1, src2);
9832 %}
9833 ins_pipe( ialu_mult );
9834 %}
9836 instruct maddI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2, mRegI src3) %{
9837 match(Set dst (AddI (MulI src1 src2) src3));
9839 ins_cost(999);
9840 format %{ "madd $dst, $src1 * $src2 + $src3 #@maddI_Reg_Reg" %}
9841 ins_encode %{
9842 Register src1 = $src1$$Register;
9843 Register src2 = $src2$$Register;
9844 Register src3 = $src3$$Register;
9845 Register dst = $dst$$Register;
9847 __ mtlo(src3);
9848 __ madd(src1, src2);
9849 __ mflo(dst);
9850 %}
9851 ins_pipe( ialu_mult );
9852 %}
9854 instruct divI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9855 match(Set dst (DivI src1 src2));
9857 ins_cost(300);
9858 format %{ "div $dst, $src1, $src2 @ divI_Reg_Reg" %}
9859 ins_encode %{
9860 Register src1 = $src1$$Register;
9861 Register src2 = $src2$$Register;
9862 Register dst = $dst$$Register;
9864 /* 2012/4/21 Jin: In MIPS, div does not cause exception.
9865 We must trap an exception manually. */
9866 __ teq(R0, src2, 0x7);
9868 if (UseLoongsonISA) {
9869 __ gsdiv(dst, src1, src2);
9870 } else {
9871 __ div(src1, src2);
9873 __ nop();
9874 __ nop();
9875 __ mflo(dst);
9876 }
9877 %}
9878 ins_pipe( ialu_mod );
9879 %}
9881 instruct divF_Reg_Reg(regF dst, regF src1, regF src2) %{
9882 match(Set dst (DivF src1 src2));
9884 ins_cost(300);
9885 format %{ "divF $dst, $src1, $src2 @ divF_Reg_Reg" %}
9886 ins_encode %{
9887 FloatRegister src1 = $src1$$FloatRegister;
9888 FloatRegister src2 = $src2$$FloatRegister;
9889 FloatRegister dst = $dst$$FloatRegister;
9891 /* Here do we need to trap an exception manually ? */
9892 __ div_s(dst, src1, src2);
9893 %}
9894 ins_pipe( pipe_slow );
9895 %}
9897 instruct divD_Reg_Reg(regD dst, regD src1, regD src2) %{
9898 match(Set dst (DivD src1 src2));
9900 ins_cost(300);
9901 format %{ "divD $dst, $src1, $src2 @ divD_Reg_Reg" %}
9902 ins_encode %{
9903 FloatRegister src1 = $src1$$FloatRegister;
9904 FloatRegister src2 = $src2$$FloatRegister;
9905 FloatRegister dst = $dst$$FloatRegister;
9907 /* Here do we need to trap an exception manually ? */
9908 __ div_d(dst, src1, src2);
9909 %}
9910 ins_pipe( pipe_slow );
9911 %}
9913 instruct mulL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
9914 match(Set dst (MulL src1 src2));
9915 format %{ "mulL $dst, $src1, $src2 @mulL_reg_reg" %}
9916 ins_encode %{
9917 Register dst = as_Register($dst$$reg);
9918 Register op1 = as_Register($src1$$reg);
9919 Register op2 = as_Register($src2$$reg);
9921 if (UseLoongsonISA) {
9922 __ gsdmult(dst, op1, op2);
9923 } else {
9924 __ dmult(op1, op2);
9925 __ mflo(dst);
9926 }
9927 %}
9928 ins_pipe( pipe_slow );
9929 %}
9931 instruct mulL_reg_regI2L(mRegL dst, mRegL src1, mRegI src2) %{
9932 match(Set dst (MulL src1 (ConvI2L src2)));
9933 format %{ "mulL $dst, $src1, $src2 @mulL_reg_regI2L" %}
9934 ins_encode %{
9935 Register dst = as_Register($dst$$reg);
9936 Register op1 = as_Register($src1$$reg);
9937 Register op2 = as_Register($src2$$reg);
9939 if (UseLoongsonISA) {
9940 __ gsdmult(dst, op1, op2);
9941 } else {
9942 __ dmult(op1, op2);
9943 __ mflo(dst);
9944 }
9945 %}
9946 ins_pipe( pipe_slow );
9947 %}
9949 instruct divL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
9950 match(Set dst (DivL src1 src2));
9951 format %{ "divL $dst, $src1, $src2 @divL_reg_reg" %}
9953 ins_encode %{
9954 Register dst = as_Register($dst$$reg);
9955 Register op1 = as_Register($src1$$reg);
9956 Register op2 = as_Register($src2$$reg);
9958 if (UseLoongsonISA) {
9959 __ gsddiv(dst, op1, op2);
9960 } else {
9961 __ ddiv(op1, op2);
9962 __ mflo(dst);
9963 }
9964 %}
9965 ins_pipe( pipe_slow );
9966 %}
9968 instruct addF_reg_reg(regF dst, regF src1, regF src2) %{
9969 match(Set dst (AddF src1 src2));
9970 format %{ "AddF $dst, $src1, $src2 @addF_reg_reg" %}
9971 ins_encode %{
9972 FloatRegister src1 = as_FloatRegister($src1$$reg);
9973 FloatRegister src2 = as_FloatRegister($src2$$reg);
9974 FloatRegister dst = as_FloatRegister($dst$$reg);
9976 __ add_s(dst, src1, src2);
9977 %}
9978 ins_pipe( fpu_regF_regF );
9979 %}
9981 instruct subF_reg_reg(regF dst, regF src1, regF src2) %{
9982 match(Set dst (SubF src1 src2));
9983 format %{ "SubF $dst, $src1, $src2 @subF_reg_reg" %}
9984 ins_encode %{
9985 FloatRegister src1 = as_FloatRegister($src1$$reg);
9986 FloatRegister src2 = as_FloatRegister($src2$$reg);
9987 FloatRegister dst = as_FloatRegister($dst$$reg);
9989 __ sub_s(dst, src1, src2);
9990 %}
9991 ins_pipe( fpu_regF_regF );
9992 %}
9993 instruct addD_reg_reg(regD dst, regD src1, regD src2) %{
9994 match(Set dst (AddD src1 src2));
9995 format %{ "AddD $dst, $src1, $src2 @addD_reg_reg" %}
9996 ins_encode %{
9997 FloatRegister src1 = as_FloatRegister($src1$$reg);
9998 FloatRegister src2 = as_FloatRegister($src2$$reg);
9999 FloatRegister dst = as_FloatRegister($dst$$reg);
10001 __ add_d(dst, src1, src2);
10002 %}
10003 ins_pipe( fpu_regF_regF );
10004 %}
10006 instruct subD_reg_reg(regD dst, regD src1, regD src2) %{
10007 match(Set dst (SubD src1 src2));
10008 format %{ "SubD $dst, $src1, $src2 @subD_reg_reg" %}
10009 ins_encode %{
10010 FloatRegister src1 = as_FloatRegister($src1$$reg);
10011 FloatRegister src2 = as_FloatRegister($src2$$reg);
10012 FloatRegister dst = as_FloatRegister($dst$$reg);
10014 __ sub_d(dst, src1, src2);
10015 %}
10016 ins_pipe( fpu_regF_regF );
10017 %}
10019 instruct negF_reg(regF dst, regF src) %{
10020 match(Set dst (NegF src));
10021 format %{ "negF $dst, $src @negF_reg" %}
10022 ins_encode %{
10023 FloatRegister src = as_FloatRegister($src$$reg);
10024 FloatRegister dst = as_FloatRegister($dst$$reg);
10026 __ neg_s(dst, src);
10027 %}
10028 ins_pipe( fpu_regF_regF );
10029 %}
10031 instruct negD_reg(regD dst, regD src) %{
10032 match(Set dst (NegD src));
10033 format %{ "negD $dst, $src @negD_reg" %}
10034 ins_encode %{
10035 FloatRegister src = as_FloatRegister($src$$reg);
10036 FloatRegister dst = as_FloatRegister($dst$$reg);
10038 __ neg_d(dst, src);
10039 %}
10040 ins_pipe( fpu_regF_regF );
10041 %}
10044 instruct mulF_reg_reg(regF dst, regF src1, regF src2) %{
10045 match(Set dst (MulF src1 src2));
10046 format %{ "MULF $dst, $src1, $src2 @mulF_reg_reg" %}
10047 ins_encode %{
10048 FloatRegister src1 = $src1$$FloatRegister;
10049 FloatRegister src2 = $src2$$FloatRegister;
10050 FloatRegister dst = $dst$$FloatRegister;
10052 __ mul_s(dst, src1, src2);
10053 %}
10054 ins_pipe( fpu_regF_regF );
10055 %}
10057 instruct maddF_reg_reg(regF dst, regF src1, regF src2, regF src3) %{
10058 match(Set dst (AddF (MulF src1 src2) src3));
10059 // For compatibility reason (e.g. on the Loongson platform), disable this guy.
10060 ins_cost(44444);
10061 format %{ "maddF $dst, $src1, $src2, $src3 @maddF_reg_reg" %}
10062 ins_encode %{
10063 FloatRegister src1 = $src1$$FloatRegister;
10064 FloatRegister src2 = $src2$$FloatRegister;
10065 FloatRegister src3 = $src3$$FloatRegister;
10066 FloatRegister dst = $dst$$FloatRegister;
10068 __ madd_s(dst, src1, src2, src3);
10069 %}
10070 ins_pipe( fpu_regF_regF );
10071 %}
10073 // Mul two double precision floating piont number
10074 instruct mulD_reg_reg(regD dst, regD src1, regD src2) %{
10075 match(Set dst (MulD src1 src2));
10076 format %{ "MULD $dst, $src1, $src2 @mulD_reg_reg" %}
10077 ins_encode %{
10078 FloatRegister src1 = $src1$$FloatRegister;
10079 FloatRegister src2 = $src2$$FloatRegister;
10080 FloatRegister dst = $dst$$FloatRegister;
10082 __ mul_d(dst, src1, src2);
10083 %}
10084 ins_pipe( fpu_regF_regF );
10085 %}
10087 instruct maddD_reg_reg(regD dst, regD src1, regD src2, regD src3) %{
10088 match(Set dst (AddD (MulD src1 src2) src3));
10089 // For compatibility reason (e.g. on the Loongson platform), disable this guy.
10090 ins_cost(44444);
10091 format %{ "maddD $dst, $src1, $src2, $src3 @maddD_reg_reg" %}
10092 ins_encode %{
10093 FloatRegister src1 = $src1$$FloatRegister;
10094 FloatRegister src2 = $src2$$FloatRegister;
10095 FloatRegister src3 = $src3$$FloatRegister;
10096 FloatRegister dst = $dst$$FloatRegister;
10098 __ madd_d(dst, src1, src2, src3);
10099 %}
10100 ins_pipe( fpu_regF_regF );
10101 %}
10103 instruct absF_reg(regF dst, regF src) %{
10104 match(Set dst (AbsF src));
10105 ins_cost(100);
10106 format %{ "absF $dst, $src @absF_reg" %}
10107 ins_encode %{
10108 FloatRegister src = as_FloatRegister($src$$reg);
10109 FloatRegister dst = as_FloatRegister($dst$$reg);
10111 __ abs_s(dst, src);
10112 %}
10113 ins_pipe( fpu_regF_regF );
10114 %}
10117 // intrinsics for math_native.
10118 // AbsD SqrtD CosD SinD TanD LogD Log10D
10120 instruct absD_reg(regD dst, regD src) %{
10121 match(Set dst (AbsD src));
10122 ins_cost(100);
10123 format %{ "absD $dst, $src @absD_reg" %}
10124 ins_encode %{
10125 FloatRegister src = as_FloatRegister($src$$reg);
10126 FloatRegister dst = as_FloatRegister($dst$$reg);
10128 __ abs_d(dst, src);
10129 %}
10130 ins_pipe( fpu_regF_regF );
10131 %}
10133 instruct sqrtD_reg(regD dst, regD src) %{
10134 match(Set dst (SqrtD src));
10135 ins_cost(100);
10136 format %{ "SqrtD $dst, $src @sqrtD_reg" %}
10137 ins_encode %{
10138 FloatRegister src = as_FloatRegister($src$$reg);
10139 FloatRegister dst = as_FloatRegister($dst$$reg);
10141 __ sqrt_d(dst, src);
10142 %}
10143 ins_pipe( fpu_regF_regF );
10144 %}
10146 instruct sqrtF_reg(regF dst, regF src) %{
10147 match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
10148 ins_cost(100);
10149 format %{ "SqrtF $dst, $src @sqrtF_reg" %}
10150 ins_encode %{
10151 FloatRegister src = as_FloatRegister($src$$reg);
10152 FloatRegister dst = as_FloatRegister($dst$$reg);
10154 __ sqrt_s(dst, src);
10155 %}
10156 ins_pipe( fpu_regF_regF );
10157 %}
10158 //----------------------------------Logical Instructions----------------------
10159 //__________________________________Integer Logical Instructions-------------
10161 //And Instuctions
10162 // And Register with Immediate
10163 instruct andI_Reg_immI(mRegI dst, mRegI src1, immI src2) %{
10164 match(Set dst (AndI src1 src2));
10166 format %{ "and $dst, $src1, $src2 #@andI_Reg_immI" %}
10167 ins_encode %{
10168 Register dst = $dst$$Register;
10169 Register src = $src1$$Register;
10170 int val = $src2$$constant;
10172 __ move(AT, val);
10173 __ andr(dst, src, AT);
10174 %}
10175 ins_pipe( ialu_regI_regI );
10176 %}
10178 instruct andI_Reg_imm_0_65535(mRegI dst, mRegI src1, immI_0_65535 src2) %{
10179 match(Set dst (AndI src1 src2));
10180 ins_cost(60);
10182 format %{ "and $dst, $src1, $src2 #@andI_Reg_imm_0_65535" %}
10183 ins_encode %{
10184 Register dst = $dst$$Register;
10185 Register src = $src1$$Register;
10186 int val = $src2$$constant;
10188 __ andi(dst, src, val);
10189 %}
10190 ins_pipe( ialu_regI_regI );
10191 %}
10193 instruct andI_Reg_immI_nonneg_mask(mRegI dst, mRegI src1, immI_nonneg_mask mask) %{
10194 match(Set dst (AndI src1 mask));
10195 ins_cost(60);
10197 format %{ "and $dst, $src1, $mask #@andI_Reg_immI_nonneg_mask" %}
10198 ins_encode %{
10199 Register dst = $dst$$Register;
10200 Register src = $src1$$Register;
10201 int size = Assembler::is_int_mask($mask$$constant);
10203 __ ext(dst, src, 0, size);
10204 %}
10205 ins_pipe( ialu_regI_regI );
10206 %}
10208 instruct andL_Reg_immL_nonneg_mask(mRegL dst, mRegL src1, immL_nonneg_mask mask) %{
10209 match(Set dst (AndL src1 mask));
10210 ins_cost(60);
10212 format %{ "and $dst, $src1, $mask #@andL_Reg_immL_nonneg_mask" %}
10213 ins_encode %{
10214 Register dst = $dst$$Register;
10215 Register src = $src1$$Register;
10216 int size = Assembler::is_jlong_mask($mask$$constant);
10218 __ dext(dst, src, 0, size);
10219 %}
10220 ins_pipe( ialu_regI_regI );
10221 %}
10223 instruct xorI_Reg_imm_0_65535(mRegI dst, mRegI src1, immI_0_65535 src2) %{
10224 match(Set dst (XorI src1 src2));
10225 ins_cost(60);
10227 format %{ "xori $dst, $src1, $src2 #@xorI_Reg_imm_0_65535" %}
10228 ins_encode %{
10229 Register dst = $dst$$Register;
10230 Register src = $src1$$Register;
10231 int val = $src2$$constant;
10233 __ xori(dst, src, val);
10234 %}
10235 ins_pipe( ialu_regI_regI );
10236 %}
10238 instruct xorI_Reg_immI_M1(mRegI dst, mRegI src1, immI_M1 M1) %{
10239 match(Set dst (XorI src1 M1));
10240 predicate(UseLoongsonISA);
10241 ins_cost(60);
10243 format %{ "xor $dst, $src1, $M1 #@xorI_Reg_immI_M1" %}
10244 ins_encode %{
10245 Register dst = $dst$$Register;
10246 Register src = $src1$$Register;
10248 __ gsorn(dst, R0, src);
10249 %}
10250 ins_pipe( ialu_regI_regI );
10251 %}
10253 instruct xorL2I_Reg_immI_M1(mRegI dst, mRegL src1, immI_M1 M1) %{
10254 match(Set dst (XorI (ConvL2I src1) M1));
10255 predicate(UseLoongsonISA);
10256 ins_cost(60);
10258 format %{ "xor $dst, $src1, $M1 #@xorL2I_Reg_immI_M1" %}
10259 ins_encode %{
10260 Register dst = $dst$$Register;
10261 Register src = $src1$$Register;
10263 __ gsorn(dst, R0, src);
10264 %}
10265 ins_pipe( ialu_regI_regI );
10266 %}
10268 instruct xorL_Reg_imm_0_65535(mRegL dst, mRegL src1, immL_0_65535 src2) %{
10269 match(Set dst (XorL src1 src2));
10270 ins_cost(60);
10272 format %{ "xori $dst, $src1, $src2 #@xorL_Reg_imm_0_65535" %}
10273 ins_encode %{
10274 Register dst = $dst$$Register;
10275 Register src = $src1$$Register;
10276 int val = $src2$$constant;
10278 __ xori(dst, src, val);
10279 %}
10280 ins_pipe( ialu_regI_regI );
10281 %}
10283 /*
10284 instruct xorL_Reg_immL_M1(mRegL dst, mRegL src1, immL_M1 M1) %{
10285 match(Set dst (XorL src1 M1));
10286 predicate(UseLoongsonISA);
10287 ins_cost(60);
10289 format %{ "xor $dst, $src1, $M1 #@xorL_Reg_immL_M1" %}
10290 ins_encode %{
10291 Register dst = $dst$$Register;
10292 Register src = $src1$$Register;
10294 __ gsorn(dst, R0, src);
10295 %}
10296 ins_pipe( ialu_regI_regI );
10297 %}
10298 */
10300 instruct lbu_and_lmask(mRegI dst, memory mem, immI_255 mask) %{
10301 match(Set dst (AndI mask (LoadB mem)));
10302 ins_cost(60);
10304 format %{ "lhu $dst, $mem #@lbu_and_lmask" %}
10305 ins_encode(load_UB_enc(dst, mem));
10306 ins_pipe( ialu_loadI );
10307 %}
10309 instruct lbu_and_rmask(mRegI dst, memory mem, immI_255 mask) %{
10310 match(Set dst (AndI (LoadB mem) mask));
10311 ins_cost(60);
10313 format %{ "lhu $dst, $mem #@lbu_and_rmask" %}
10314 ins_encode(load_UB_enc(dst, mem));
10315 ins_pipe( ialu_loadI );
10316 %}
10318 instruct andI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10319 match(Set dst (AndI src1 src2));
10321 format %{ "and $dst, $src1, $src2 #@andI_Reg_Reg" %}
10322 ins_encode %{
10323 Register dst = $dst$$Register;
10324 Register src1 = $src1$$Register;
10325 Register src2 = $src2$$Register;
10326 __ andr(dst, src1, src2);
10327 %}
10328 ins_pipe( ialu_regI_regI );
10329 %}
10331 instruct andnI_Reg_nReg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10332 match(Set dst (AndI src1 (XorI src2 M1)));
10333 predicate(UseLoongsonISA);
10335 format %{ "andn $dst, $src1, $src2 #@andnI_Reg_nReg" %}
10336 ins_encode %{
10337 Register dst = $dst$$Register;
10338 Register src1 = $src1$$Register;
10339 Register src2 = $src2$$Register;
10341 __ gsandn(dst, src1, src2);
10342 %}
10343 ins_pipe( ialu_regI_regI );
10344 %}
10346 instruct ornI_Reg_nReg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10347 match(Set dst (OrI src1 (XorI src2 M1)));
10348 predicate(UseLoongsonISA);
10350 format %{ "orn $dst, $src1, $src2 #@ornI_Reg_nReg" %}
10351 ins_encode %{
10352 Register dst = $dst$$Register;
10353 Register src1 = $src1$$Register;
10354 Register src2 = $src2$$Register;
10356 __ gsorn(dst, src1, src2);
10357 %}
10358 ins_pipe( ialu_regI_regI );
10359 %}
10361 instruct andnI_nReg_Reg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10362 match(Set dst (AndI (XorI src1 M1) src2));
10363 predicate(UseLoongsonISA);
10365 format %{ "andn $dst, $src2, $src1 #@andnI_nReg_Reg" %}
10366 ins_encode %{
10367 Register dst = $dst$$Register;
10368 Register src1 = $src1$$Register;
10369 Register src2 = $src2$$Register;
10371 __ gsandn(dst, src2, src1);
10372 %}
10373 ins_pipe( ialu_regI_regI );
10374 %}
10376 instruct ornI_nReg_Reg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10377 match(Set dst (OrI (XorI src1 M1) src2));
10378 predicate(UseLoongsonISA);
10380 format %{ "orn $dst, $src2, $src1 #@ornI_nReg_Reg" %}
10381 ins_encode %{
10382 Register dst = $dst$$Register;
10383 Register src1 = $src1$$Register;
10384 Register src2 = $src2$$Register;
10386 __ gsorn(dst, src2, src1);
10387 %}
10388 ins_pipe( ialu_regI_regI );
10389 %}
10391 // And Long Register with Register
10392 instruct andL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10393 match(Set dst (AndL src1 src2));
10394 format %{ "AND $dst, $src1, $src2 @ andL_Reg_Reg\n\t" %}
10395 ins_encode %{
10396 Register dst_reg = as_Register($dst$$reg);
10397 Register src1_reg = as_Register($src1$$reg);
10398 Register src2_reg = as_Register($src2$$reg);
10400 __ andr(dst_reg, src1_reg, src2_reg);
10401 %}
10402 ins_pipe( ialu_regL_regL );
10403 %}
10405 instruct andL_Reg_Reg_convI2L(mRegL dst, mRegL src1, mRegI src2) %{
10406 match(Set dst (AndL src1 (ConvI2L src2)));
10407 format %{ "AND $dst, $src1, $src2 @ andL_Reg_Reg_convI2L\n\t" %}
10408 ins_encode %{
10409 Register dst_reg = as_Register($dst$$reg);
10410 Register src1_reg = as_Register($src1$$reg);
10411 Register src2_reg = as_Register($src2$$reg);
10413 __ andr(dst_reg, src1_reg, src2_reg);
10414 %}
10415 ins_pipe( ialu_regL_regL );
10416 %}
10418 instruct andL_Reg_imm_0_65535(mRegL dst, mRegL src1, immL_0_65535 src2) %{
10419 match(Set dst (AndL src1 src2));
10420 ins_cost(60);
10422 format %{ "and $dst, $src1, $src2 #@andL_Reg_imm_0_65535" %}
10423 ins_encode %{
10424 Register dst = $dst$$Register;
10425 Register src = $src1$$Register;
10426 long val = $src2$$constant;
10428 __ andi(dst, src, val);
10429 %}
10430 ins_pipe( ialu_regI_regI );
10431 %}
10433 instruct andL2I_Reg_imm_0_65535(mRegI dst, mRegL src1, immL_0_65535 src2) %{
10434 match(Set dst (ConvL2I (AndL src1 src2)));
10435 ins_cost(60);
10437 format %{ "and $dst, $src1, $src2 #@andL2I_Reg_imm_0_65535" %}
10438 ins_encode %{
10439 Register dst = $dst$$Register;
10440 Register src = $src1$$Register;
10441 long val = $src2$$constant;
10443 __ andi(dst, src, val);
10444 %}
10445 ins_pipe( ialu_regI_regI );
10446 %}
10448 /*
10449 instruct andnL_Reg_nReg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10450 match(Set dst (AndL src1 (XorL src2 M1)));
10451 predicate(UseLoongsonISA);
10453 format %{ "andn $dst, $src1, $src2 #@andnL_Reg_nReg" %}
10454 ins_encode %{
10455 Register dst = $dst$$Register;
10456 Register src1 = $src1$$Register;
10457 Register src2 = $src2$$Register;
10459 __ gsandn(dst, src1, src2);
10460 %}
10461 ins_pipe( ialu_regI_regI );
10462 %}
10463 */
10465 /*
10466 instruct ornL_Reg_nReg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10467 match(Set dst (OrL src1 (XorL src2 M1)));
10468 predicate(UseLoongsonISA);
10470 format %{ "orn $dst, $src1, $src2 #@ornL_Reg_nReg" %}
10471 ins_encode %{
10472 Register dst = $dst$$Register;
10473 Register src1 = $src1$$Register;
10474 Register src2 = $src2$$Register;
10476 __ gsorn(dst, src1, src2);
10477 %}
10478 ins_pipe( ialu_regI_regI );
10479 %}
10480 */
10482 /*
10483 instruct andnL_nReg_Reg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10484 match(Set dst (AndL (XorL src1 M1) src2));
10485 predicate(UseLoongsonISA);
10487 format %{ "andn $dst, $src2, $src1 #@andnL_nReg_Reg" %}
10488 ins_encode %{
10489 Register dst = $dst$$Register;
10490 Register src1 = $src1$$Register;
10491 Register src2 = $src2$$Register;
10493 __ gsandn(dst, src2, src1);
10494 %}
10495 ins_pipe( ialu_regI_regI );
10496 %}
10497 */
10499 /*
10500 instruct ornL_nReg_Reg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10501 match(Set dst (OrL (XorL src1 M1) src2));
10502 predicate(UseLoongsonISA);
10504 format %{ "orn $dst, $src2, $src1 #@ornL_nReg_Reg" %}
10505 ins_encode %{
10506 Register dst = $dst$$Register;
10507 Register src1 = $src1$$Register;
10508 Register src2 = $src2$$Register;
10510 __ gsorn(dst, src2, src1);
10511 %}
10512 ins_pipe( ialu_regI_regI );
10513 %}
10514 */
10516 instruct andL_Reg_immL_M8(mRegL dst, immL_M8 M8) %{
10517 match(Set dst (AndL dst M8));
10518 ins_cost(60);
10520 format %{ "and $dst, $dst, $M8 #@andL_Reg_immL_M8" %}
10521 ins_encode %{
10522 Register dst = $dst$$Register;
10524 __ dins(dst, R0, 0, 3);
10525 %}
10526 ins_pipe( ialu_regI_regI );
10527 %}
10529 instruct andL_Reg_immL_M5(mRegL dst, immL_M5 M5) %{
10530 match(Set dst (AndL dst M5));
10531 ins_cost(60);
10533 format %{ "and $dst, $dst, $M5 #@andL_Reg_immL_M5" %}
10534 ins_encode %{
10535 Register dst = $dst$$Register;
10537 __ dins(dst, R0, 2, 1);
10538 %}
10539 ins_pipe( ialu_regI_regI );
10540 %}
10542 instruct andL_Reg_immL_M7(mRegL dst, immL_M7 M7) %{
10543 match(Set dst (AndL dst M7));
10544 ins_cost(60);
10546 format %{ "and $dst, $dst, $M7 #@andL_Reg_immL_M7" %}
10547 ins_encode %{
10548 Register dst = $dst$$Register;
10550 __ dins(dst, R0, 1, 2);
10551 %}
10552 ins_pipe( ialu_regI_regI );
10553 %}
10555 instruct andL_Reg_immL_M4(mRegL dst, immL_M4 M4) %{
10556 match(Set dst (AndL dst M4));
10557 ins_cost(60);
10559 format %{ "and $dst, $dst, $M4 #@andL_Reg_immL_M4" %}
10560 ins_encode %{
10561 Register dst = $dst$$Register;
10563 __ dins(dst, R0, 0, 2);
10564 %}
10565 ins_pipe( ialu_regI_regI );
10566 %}
10568 instruct andL_Reg_immL_M121(mRegL dst, immL_M121 M121) %{
10569 match(Set dst (AndL dst M121));
10570 ins_cost(60);
10572 format %{ "and $dst, $dst, $M121 #@andL_Reg_immL_M121" %}
10573 ins_encode %{
10574 Register dst = $dst$$Register;
10576 __ dins(dst, R0, 3, 4);
10577 %}
10578 ins_pipe( ialu_regI_regI );
10579 %}
10581 // Or Long Register with Register
10582 instruct orL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10583 match(Set dst (OrL src1 src2));
10584 format %{ "OR $dst, $src1, $src2 @ orL_Reg_Reg\t" %}
10585 ins_encode %{
10586 Register dst_reg = $dst$$Register;
10587 Register src1_reg = $src1$$Register;
10588 Register src2_reg = $src2$$Register;
10590 __ orr(dst_reg, src1_reg, src2_reg);
10591 %}
10592 ins_pipe( ialu_regL_regL );
10593 %}
10595 instruct orL_Reg_P2XReg(mRegL dst, mRegP src1, mRegL src2) %{
10596 match(Set dst (OrL (CastP2X src1) src2));
10597 format %{ "OR $dst, $src1, $src2 @ orL_Reg_P2XReg\t" %}
10598 ins_encode %{
10599 Register dst_reg = $dst$$Register;
10600 Register src1_reg = $src1$$Register;
10601 Register src2_reg = $src2$$Register;
10603 __ orr(dst_reg, src1_reg, src2_reg);
10604 %}
10605 ins_pipe( ialu_regL_regL );
10606 %}
10608 // Xor Long Register with Register
10609 instruct xorL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10610 match(Set dst (XorL src1 src2));
10611 format %{ "XOR $dst, $src1, $src2 @ xorL_Reg_Reg\t" %}
10612 ins_encode %{
10613 Register dst_reg = as_Register($dst$$reg);
10614 Register src1_reg = as_Register($src1$$reg);
10615 Register src2_reg = as_Register($src2$$reg);
10617 __ xorr(dst_reg, src1_reg, src2_reg);
10618 %}
10619 ins_pipe( ialu_regL_regL );
10620 %}
10622 // Shift Left by 8-bit immediate
10623 instruct salI_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
10624 match(Set dst (LShiftI src shift));
10626 format %{ "SHL $dst, $src, $shift #@salI_Reg_imm" %}
10627 ins_encode %{
10628 Register src = $src$$Register;
10629 Register dst = $dst$$Register;
10630 int shamt = $shift$$constant;
10632 __ sll(dst, src, shamt);
10633 %}
10634 ins_pipe( ialu_regI_regI );
10635 %}
10637 instruct salL2I_Reg_imm(mRegI dst, mRegL src, immI8 shift) %{
10638 match(Set dst (LShiftI (ConvL2I src) shift));
10640 format %{ "SHL $dst, $src, $shift #@salL2I_Reg_imm" %}
10641 ins_encode %{
10642 Register src = $src$$Register;
10643 Register dst = $dst$$Register;
10644 int shamt = $shift$$constant;
10646 __ sll(dst, src, shamt);
10647 %}
10648 ins_pipe( ialu_regI_regI );
10649 %}
10651 instruct salI_Reg_imm_and_M65536(mRegI dst, mRegI src, immI_16 shift, immI_M65536 mask) %{
10652 match(Set dst (AndI (LShiftI src shift) mask));
10654 format %{ "SHL $dst, $src, $shift #@salI_Reg_imm_and_M65536" %}
10655 ins_encode %{
10656 Register src = $src$$Register;
10657 Register dst = $dst$$Register;
10659 __ sll(dst, src, 16);
10660 %}
10661 ins_pipe( ialu_regI_regI );
10662 %}
10664 instruct land7_2_s(mRegI dst, mRegL src, immL7 seven, immI_16 sixteen)
10665 %{
10666 match(Set dst (RShiftI (LShiftI (ConvL2I (AndL src seven)) sixteen) sixteen));
10668 format %{ "andi $dst, $src, 7\t# @land7_2_s" %}
10669 ins_encode %{
10670 Register src = $src$$Register;
10671 Register dst = $dst$$Register;
10673 __ andi(dst, src, 7);
10674 %}
10675 ins_pipe(ialu_regI_regI);
10676 %}
10678 instruct ori2s(mRegI dst, mRegI src1, immI_0_32767 src2, immI_16 sixteen)
10679 %{
10680 match(Set dst (RShiftI (LShiftI (OrI src1 src2) sixteen) sixteen));
10682 format %{ "ori $dst, $src1, $src2\t# @ori2s" %}
10683 ins_encode %{
10684 Register src = $src1$$Register;
10685 int val = $src2$$constant;
10686 Register dst = $dst$$Register;
10688 __ ori(dst, src, val);
10689 %}
10690 ins_pipe(ialu_regI_regI);
10691 %}
10693 // Logical Shift Right by 16, followed by Arithmetic Shift Left by 16.
10694 // This idiom is used by the compiler the i2s bytecode.
10695 instruct i2s(mRegI dst, mRegI src, immI_16 sixteen)
10696 %{
10697 match(Set dst (RShiftI (LShiftI src sixteen) sixteen));
10699 format %{ "i2s $dst, $src\t# @i2s" %}
10700 ins_encode %{
10701 Register src = $src$$Register;
10702 Register dst = $dst$$Register;
10704 __ seh(dst, src);
10705 %}
10706 ins_pipe(ialu_regI_regI);
10707 %}
10709 // Logical Shift Right by 24, followed by Arithmetic Shift Left by 24.
10710 // This idiom is used by the compiler for the i2b bytecode.
10711 instruct i2b(mRegI dst, mRegI src, immI_24 twentyfour)
10712 %{
10713 match(Set dst (RShiftI (LShiftI src twentyfour) twentyfour));
10715 format %{ "i2b $dst, $src\t# @i2b" %}
10716 ins_encode %{
10717 Register src = $src$$Register;
10718 Register dst = $dst$$Register;
10720 __ seb(dst, src);
10721 %}
10722 ins_pipe(ialu_regI_regI);
10723 %}
10726 instruct salI_RegL2I_imm(mRegI dst, mRegL src, immI8 shift) %{
10727 match(Set dst (LShiftI (ConvL2I src) shift));
10729 format %{ "SHL $dst, $src, $shift #@salI_RegL2I_imm" %}
10730 ins_encode %{
10731 Register src = $src$$Register;
10732 Register dst = $dst$$Register;
10733 int shamt = $shift$$constant;
10735 __ sll(dst, src, shamt);
10736 %}
10737 ins_pipe( ialu_regI_regI );
10738 %}
10740 // Shift Left by 8-bit immediate
10741 instruct salI_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
10742 match(Set dst (LShiftI src shift));
10744 format %{ "SHL $dst, $src, $shift #@salI_Reg_Reg" %}
10745 ins_encode %{
10746 Register src = $src$$Register;
10747 Register dst = $dst$$Register;
10748 Register shamt = $shift$$Register;
10749 __ sllv(dst, src, shamt);
10750 %}
10751 ins_pipe( ialu_regI_regI );
10752 %}
10755 // Shift Left Long
10756 instruct salL_Reg_imm(mRegL dst, mRegL src, immI8 shift) %{
10757 //predicate(UseNewLongLShift);
10758 match(Set dst (LShiftL src shift));
10759 ins_cost(100);
10760 format %{ "salL $dst, $src, $shift @ salL_Reg_imm" %}
10761 ins_encode %{
10762 Register src_reg = as_Register($src$$reg);
10763 Register dst_reg = as_Register($dst$$reg);
10764 int shamt = $shift$$constant;
10766 if (__ is_simm(shamt, 5))
10767 __ dsll(dst_reg, src_reg, shamt);
10768 else
10769 {
10770 int sa = Assembler::low(shamt, 6);
10771 if (sa < 32) {
10772 __ dsll(dst_reg, src_reg, sa);
10773 } else {
10774 __ dsll32(dst_reg, src_reg, sa - 32);
10775 }
10776 }
10777 %}
10778 ins_pipe( ialu_regL_regL );
10779 %}
10781 instruct salL_RegI2L_imm(mRegL dst, mRegI src, immI8 shift) %{
10782 //predicate(UseNewLongLShift);
10783 match(Set dst (LShiftL (ConvI2L src) shift));
10784 ins_cost(100);
10785 format %{ "salL $dst, $src, $shift @ salL_RegI2L_imm" %}
10786 ins_encode %{
10787 Register src_reg = as_Register($src$$reg);
10788 Register dst_reg = as_Register($dst$$reg);
10789 int shamt = $shift$$constant;
10791 if (__ is_simm(shamt, 5))
10792 __ dsll(dst_reg, src_reg, shamt);
10793 else
10794 {
10795 int sa = Assembler::low(shamt, 6);
10796 if (sa < 32) {
10797 __ dsll(dst_reg, src_reg, sa);
10798 } else {
10799 __ dsll32(dst_reg, src_reg, sa - 32);
10800 }
10801 }
10802 %}
10803 ins_pipe( ialu_regL_regL );
10804 %}
10806 // Shift Left Long
10807 instruct salL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
10808 //predicate(UseNewLongLShift);
10809 match(Set dst (LShiftL src shift));
10810 ins_cost(100);
10811 format %{ "salL $dst, $src, $shift @ salL_Reg_Reg" %}
10812 ins_encode %{
10813 Register src_reg = as_Register($src$$reg);
10814 Register dst_reg = as_Register($dst$$reg);
10816 __ dsllv(dst_reg, src_reg, $shift$$Register);
10817 %}
10818 ins_pipe( ialu_regL_regL );
10819 %}
10821 instruct salL_convI2L_Reg_imm(mRegL dst, mRegI src, immI8 shift) %{
10822 match(Set dst (LShiftL (ConvI2L src) shift));
10823 ins_cost(100);
10824 format %{ "salL $dst, $src, $shift @ salL_convI2L_Reg_imm" %}
10825 ins_encode %{
10826 Register src_reg = as_Register($src$$reg);
10827 Register dst_reg = as_Register($dst$$reg);
10828 int shamt = $shift$$constant;
10830 if (__ is_simm(shamt, 5)) {
10831 __ dsll(dst_reg, src_reg, shamt);
10832 } else {
10833 int sa = Assembler::low(shamt, 6);
10834 if (sa < 32) {
10835 __ dsll(dst_reg, src_reg, sa);
10836 } else {
10837 __ dsll32(dst_reg, src_reg, sa - 32);
10838 }
10839 }
10840 %}
10841 ins_pipe( ialu_regL_regL );
10842 %}
10844 // Shift Right Long
10845 instruct sarL_Reg_imm(mRegL dst, mRegL src, immI8 shift) %{
10846 match(Set dst (RShiftL src shift));
10847 ins_cost(100);
10848 format %{ "sarL $dst, $src, $shift @ sarL_Reg_imm" %}
10849 ins_encode %{
10850 Register src_reg = as_Register($src$$reg);
10851 Register dst_reg = as_Register($dst$$reg);
10852 int shamt = ($shift$$constant & 0x3f);
10853 if (__ is_simm(shamt, 5))
10854 __ dsra(dst_reg, src_reg, shamt);
10855 else {
10856 int sa = Assembler::low(shamt, 6);
10857 if (sa < 32) {
10858 __ dsra(dst_reg, src_reg, sa);
10859 } else {
10860 __ dsra32(dst_reg, src_reg, sa - 32);
10861 }
10862 }
10863 %}
10864 ins_pipe( ialu_regL_regL );
10865 %}
10867 instruct sarL2I_Reg_immI_32_63(mRegI dst, mRegL src, immI_32_63 shift) %{
10868 match(Set dst (ConvL2I (RShiftL src shift)));
10869 ins_cost(100);
10870 format %{ "sarL $dst, $src, $shift @ sarL2I_Reg_immI_32_63" %}
10871 ins_encode %{
10872 Register src_reg = as_Register($src$$reg);
10873 Register dst_reg = as_Register($dst$$reg);
10874 int shamt = $shift$$constant;
10876 __ dsra32(dst_reg, src_reg, shamt - 32);
10877 %}
10878 ins_pipe( ialu_regL_regL );
10879 %}
10881 // Shift Right Long arithmetically
10882 instruct sarL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
10883 //predicate(UseNewLongLShift);
10884 match(Set dst (RShiftL src shift));
10885 ins_cost(100);
10886 format %{ "sarL $dst, $src, $shift @ sarL_Reg_Reg" %}
10887 ins_encode %{
10888 Register src_reg = as_Register($src$$reg);
10889 Register dst_reg = as_Register($dst$$reg);
10891 __ dsrav(dst_reg, src_reg, $shift$$Register);
10892 %}
10893 ins_pipe( ialu_regL_regL );
10894 %}
10896 // Shift Right Long logically
10897 instruct slrL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
10898 match(Set dst (URShiftL src shift));
10899 ins_cost(100);
10900 format %{ "slrL $dst, $src, $shift @ slrL_Reg_Reg" %}
10901 ins_encode %{
10902 Register src_reg = as_Register($src$$reg);
10903 Register dst_reg = as_Register($dst$$reg);
10905 __ dsrlv(dst_reg, src_reg, $shift$$Register);
10906 %}
10907 ins_pipe( ialu_regL_regL );
10908 %}
10910 instruct slrL_Reg_immI_0_31(mRegL dst, mRegL src, immI_0_31 shift) %{
10911 match(Set dst (URShiftL src shift));
10912 ins_cost(80);
10913 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_0_31" %}
10914 ins_encode %{
10915 Register src_reg = as_Register($src$$reg);
10916 Register dst_reg = as_Register($dst$$reg);
10917 int shamt = $shift$$constant;
10919 __ dsrl(dst_reg, src_reg, shamt);
10920 %}
10921 ins_pipe( ialu_regL_regL );
10922 %}
10924 instruct slrL_Reg_immI_0_31_and_max_int(mRegI dst, mRegL src, immI_0_31 shift, immI_MaxI max_int) %{
10925 match(Set dst (AndI (ConvL2I (URShiftL src shift)) max_int));
10926 ins_cost(80);
10927 format %{ "dext $dst, $src, $shift, 31 @ slrL_Reg_immI_0_31_and_max_int" %}
10928 ins_encode %{
10929 Register src_reg = as_Register($src$$reg);
10930 Register dst_reg = as_Register($dst$$reg);
10931 int shamt = $shift$$constant;
10933 __ dext(dst_reg, src_reg, shamt, 31);
10934 %}
10935 ins_pipe( ialu_regL_regL );
10936 %}
10938 instruct slrL_P2XReg_immI_0_31(mRegL dst, mRegP src, immI_0_31 shift) %{
10939 match(Set dst (URShiftL (CastP2X src) shift));
10940 ins_cost(80);
10941 format %{ "slrL $dst, $src, $shift @ slrL_P2XReg_immI_0_31" %}
10942 ins_encode %{
10943 Register src_reg = as_Register($src$$reg);
10944 Register dst_reg = as_Register($dst$$reg);
10945 int shamt = $shift$$constant;
10947 __ dsrl(dst_reg, src_reg, shamt);
10948 %}
10949 ins_pipe( ialu_regL_regL );
10950 %}
10952 instruct slrL_Reg_immI_32_63(mRegL dst, mRegL src, immI_32_63 shift) %{
10953 match(Set dst (URShiftL src shift));
10954 ins_cost(80);
10955 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_32_63" %}
10956 ins_encode %{
10957 Register src_reg = as_Register($src$$reg);
10958 Register dst_reg = as_Register($dst$$reg);
10959 int shamt = $shift$$constant;
10961 __ dsrl32(dst_reg, src_reg, shamt - 32);
10962 %}
10963 ins_pipe( ialu_regL_regL );
10964 %}
10966 instruct slrL_Reg_immI_convL2I(mRegI dst, mRegL src, immI_32_63 shift) %{
10967 match(Set dst (ConvL2I (URShiftL src shift)));
10968 predicate(n->in(1)->in(2)->get_int() > 32);
10969 ins_cost(80);
10970 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_convL2I" %}
10971 ins_encode %{
10972 Register src_reg = as_Register($src$$reg);
10973 Register dst_reg = as_Register($dst$$reg);
10974 int shamt = $shift$$constant;
10976 __ dsrl32(dst_reg, src_reg, shamt - 32);
10977 %}
10978 ins_pipe( ialu_regL_regL );
10979 %}
10981 instruct slrL_P2XReg_immI_32_63(mRegL dst, mRegP src, immI_32_63 shift) %{
10982 match(Set dst (URShiftL (CastP2X src) shift));
10983 ins_cost(80);
10984 format %{ "slrL $dst, $src, $shift @ slrL_P2XReg_immI_32_63" %}
10985 ins_encode %{
10986 Register src_reg = as_Register($src$$reg);
10987 Register dst_reg = as_Register($dst$$reg);
10988 int shamt = $shift$$constant;
10990 __ dsrl32(dst_reg, src_reg, shamt - 32);
10991 %}
10992 ins_pipe( ialu_regL_regL );
10993 %}
10995 // Xor Instructions
10996 // Xor Register with Register
10997 instruct xorI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10998 match(Set dst (XorI src1 src2));
11000 format %{ "XOR $dst, $src1, $src2 #@xorI_Reg_Reg" %}
11002 ins_encode %{
11003 Register dst = $dst$$Register;
11004 Register src1 = $src1$$Register;
11005 Register src2 = $src2$$Register;
11006 __ xorr(dst, src1, src2);
11007 __ sll(dst, dst, 0); /* long -> int */
11008 %}
11010 ins_pipe( ialu_regI_regI );
11011 %}
11013 // Or Instructions
11014 // Or Register with Register
11015 instruct orI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
11016 match(Set dst (OrI src1 src2));
11018 format %{ "OR $dst, $src1, $src2 #@orI_Reg_Reg" %}
11019 ins_encode %{
11020 Register dst = $dst$$Register;
11021 Register src1 = $src1$$Register;
11022 Register src2 = $src2$$Register;
11023 __ orr(dst, src1, src2);
11024 %}
11026 ins_pipe( ialu_regI_regI );
11027 %}
11029 instruct rotI_shr_logical_Reg(mRegI dst, mRegI src, immI_0_31 rshift, immI_0_31 lshift, immI_1 one) %{
11030 match(Set dst (OrI (URShiftI src rshift) (LShiftI (AndI src one) lshift)));
11031 predicate(32 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int())));
11033 format %{ "rotr $dst, $src, 1 ...\n\t"
11034 "srl $dst, $dst, ($rshift-1) @ rotI_shr_logical_Reg" %}
11035 ins_encode %{
11036 Register dst = $dst$$Register;
11037 Register src = $src$$Register;
11038 int rshift = $rshift$$constant;
11040 __ rotr(dst, src, 1);
11041 if (rshift - 1) {
11042 __ srl(dst, dst, rshift - 1);
11043 }
11044 %}
11046 ins_pipe( ialu_regI_regI );
11047 %}
11049 instruct orI_Reg_castP2X(mRegL dst, mRegL src1, mRegP src2) %{
11050 match(Set dst (OrI src1 (CastP2X src2)));
11052 format %{ "OR $dst, $src1, $src2 #@orI_Reg_castP2X" %}
11053 ins_encode %{
11054 Register dst = $dst$$Register;
11055 Register src1 = $src1$$Register;
11056 Register src2 = $src2$$Register;
11057 __ orr(dst, src1, src2);
11058 %}
11060 ins_pipe( ialu_regI_regI );
11061 %}
11063 // Logical Shift Right by 8-bit immediate
11064 instruct shr_logical_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
11065 match(Set dst (URShiftI src shift));
11066 // effect(KILL cr);
11068 format %{ "SRL $dst, $src, $shift #@shr_logical_Reg_imm" %}
11069 ins_encode %{
11070 Register src = $src$$Register;
11071 Register dst = $dst$$Register;
11072 int shift = $shift$$constant;
11074 __ srl(dst, src, shift);
11075 %}
11076 ins_pipe( ialu_regI_regI );
11077 %}
11079 instruct shr_logical_Reg_imm_nonneg_mask(mRegI dst, mRegI src, immI_0_31 shift, immI_nonneg_mask mask) %{
11080 match(Set dst (AndI (URShiftI src shift) mask));
11082 format %{ "ext $dst, $src, $shift, one-bits($mask) #@shr_logical_Reg_imm_nonneg_mask" %}
11083 ins_encode %{
11084 Register src = $src$$Register;
11085 Register dst = $dst$$Register;
11086 int pos = $shift$$constant;
11087 int size = Assembler::is_int_mask($mask$$constant);
11089 __ ext(dst, src, pos, size);
11090 %}
11091 ins_pipe( ialu_regI_regI );
11092 %}
11094 instruct rolI_Reg_immI_0_31(mRegI dst, immI_0_31 lshift, immI_0_31 rshift)
11095 %{
11096 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
11097 match(Set dst (OrI (LShiftI dst lshift) (URShiftI dst rshift)));
11099 ins_cost(100);
11100 format %{ "rotr $dst, $dst, $rshift #@rolI_Reg_immI_0_31" %}
11101 ins_encode %{
11102 Register dst = $dst$$Register;
11103 int sa = $rshift$$constant;
11105 __ rotr(dst, dst, sa);
11106 %}
11107 ins_pipe( ialu_regI_regI );
11108 %}
11110 instruct rolL_Reg_immI_0_31(mRegL dst, immI_32_63 lshift, immI_0_31 rshift)
11111 %{
11112 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11113 match(Set dst (OrL (LShiftL dst lshift) (URShiftL dst rshift)));
11115 ins_cost(100);
11116 format %{ "rotr $dst, $dst, $rshift #@rolL_Reg_immI_0_31" %}
11117 ins_encode %{
11118 Register dst = $dst$$Register;
11119 int sa = $rshift$$constant;
11121 __ drotr(dst, dst, sa);
11122 %}
11123 ins_pipe( ialu_regI_regI );
11124 %}
11126 instruct rolL_Reg_immI_32_63(mRegL dst, immI_0_31 lshift, immI_32_63 rshift)
11127 %{
11128 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11129 match(Set dst (OrL (LShiftL dst lshift) (URShiftL dst rshift)));
11131 ins_cost(100);
11132 format %{ "rotr $dst, $dst, $rshift #@rolL_Reg_immI_32_63" %}
11133 ins_encode %{
11134 Register dst = $dst$$Register;
11135 int sa = $rshift$$constant;
11137 __ drotr32(dst, dst, sa - 32);
11138 %}
11139 ins_pipe( ialu_regI_regI );
11140 %}
11142 instruct rorI_Reg_immI_0_31(mRegI dst, immI_0_31 rshift, immI_0_31 lshift)
11143 %{
11144 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
11145 match(Set dst (OrI (URShiftI dst rshift) (LShiftI dst lshift)));
11147 ins_cost(100);
11148 format %{ "rotr $dst, $dst, $rshift #@rorI_Reg_immI_0_31" %}
11149 ins_encode %{
11150 Register dst = $dst$$Register;
11151 int sa = $rshift$$constant;
11153 __ rotr(dst, dst, sa);
11154 %}
11155 ins_pipe( ialu_regI_regI );
11156 %}
11158 instruct rorL_Reg_immI_0_31(mRegL dst, immI_0_31 rshift, immI_32_63 lshift)
11159 %{
11160 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11161 match(Set dst (OrL (URShiftL dst rshift) (LShiftL dst lshift)));
11163 ins_cost(100);
11164 format %{ "rotr $dst, $dst, $rshift #@rorL_Reg_immI_0_31" %}
11165 ins_encode %{
11166 Register dst = $dst$$Register;
11167 int sa = $rshift$$constant;
11169 __ drotr(dst, dst, sa);
11170 %}
11171 ins_pipe( ialu_regI_regI );
11172 %}
11174 instruct rorL_Reg_immI_32_63(mRegL dst, immI_32_63 rshift, immI_0_31 lshift)
11175 %{
11176 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11177 match(Set dst (OrL (URShiftL dst rshift) (LShiftL dst lshift)));
11179 ins_cost(100);
11180 format %{ "rotr $dst, $dst, $rshift #@rorL_Reg_immI_32_63" %}
11181 ins_encode %{
11182 Register dst = $dst$$Register;
11183 int sa = $rshift$$constant;
11185 __ drotr32(dst, dst, sa - 32);
11186 %}
11187 ins_pipe( ialu_regI_regI );
11188 %}
11190 // Logical Shift Right
11191 instruct shr_logical_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
11192 match(Set dst (URShiftI src shift));
11194 format %{ "SRL $dst, $src, $shift #@shr_logical_Reg_Reg" %}
11195 ins_encode %{
11196 Register src = $src$$Register;
11197 Register dst = $dst$$Register;
11198 Register shift = $shift$$Register;
11199 __ srlv(dst, src, shift);
11200 %}
11201 ins_pipe( ialu_regI_regI );
11202 %}
11205 instruct shr_arith_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
11206 match(Set dst (RShiftI src shift));
11207 // effect(KILL cr);
11209 format %{ "SRA $dst, $src, $shift #@shr_arith_Reg_imm" %}
11210 ins_encode %{
11211 Register src = $src$$Register;
11212 Register dst = $dst$$Register;
11213 int shift = $shift$$constant;
11214 __ sra(dst, src, shift);
11215 %}
11216 ins_pipe( ialu_regI_regI );
11217 %}
11219 instruct shr_arith_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
11220 match(Set dst (RShiftI src shift));
11221 // effect(KILL cr);
11223 format %{ "SRA $dst, $src, $shift #@shr_arith_Reg_Reg" %}
11224 ins_encode %{
11225 Register src = $src$$Register;
11226 Register dst = $dst$$Register;
11227 Register shift = $shift$$Register;
11228 __ srav(dst, src, shift);
11229 %}
11230 ins_pipe( ialu_regI_regI );
11231 %}
11233 //----------Convert Int to Boolean---------------------------------------------
11235 instruct convI2B(mRegI dst, mRegI src) %{
11236 match(Set dst (Conv2B src));
11238 ins_cost(100);
11239 format %{ "convI2B $dst, $src @ convI2B" %}
11240 ins_encode %{
11241 Register dst = as_Register($dst$$reg);
11242 Register src = as_Register($src$$reg);
11244 if (dst != src) {
11245 __ daddiu(dst, R0, 1);
11246 __ movz(dst, R0, src);
11247 } else {
11248 __ move(AT, src);
11249 __ daddiu(dst, R0, 1);
11250 __ movz(dst, R0, AT);
11251 }
11252 %}
11254 ins_pipe( ialu_regL_regL );
11255 %}
11257 instruct convI2L_reg( mRegL dst, mRegI src) %{
11258 match(Set dst (ConvI2L src));
11260 ins_cost(100);
11261 format %{ "SLL $dst, $src @ convI2L_reg\t" %}
11262 ins_encode %{
11263 Register dst = as_Register($dst$$reg);
11264 Register src = as_Register($src$$reg);
11266 if(dst != src) __ sll(dst, src, 0);
11267 %}
11268 ins_pipe( ialu_regL_regL );
11269 %}
11272 instruct convL2I_reg( mRegI dst, mRegL src ) %{
11273 match(Set dst (ConvL2I src));
11275 format %{ "MOV $dst, $src @ convL2I_reg" %}
11276 ins_encode %{
11277 Register dst = as_Register($dst$$reg);
11278 Register src = as_Register($src$$reg);
11280 __ sll(dst, src, 0);
11281 %}
11283 ins_pipe( ialu_regI_regI );
11284 %}
11286 instruct convL2I2L_reg( mRegL dst, mRegL src ) %{
11287 match(Set dst (ConvI2L (ConvL2I src)));
11289 format %{ "sll $dst, $src, 0 @ convL2I2L_reg" %}
11290 ins_encode %{
11291 Register dst = as_Register($dst$$reg);
11292 Register src = as_Register($src$$reg);
11294 __ sll(dst, src, 0);
11295 %}
11297 ins_pipe( ialu_regI_regI );
11298 %}
11300 instruct convL2D_reg( regD dst, mRegL src ) %{
11301 match(Set dst (ConvL2D src));
11302 format %{ "convL2D $dst, $src @ convL2D_reg" %}
11303 ins_encode %{
11304 Register src = as_Register($src$$reg);
11305 FloatRegister dst = as_FloatRegister($dst$$reg);
11307 __ dmtc1(src, dst);
11308 __ cvt_d_l(dst, dst);
11309 %}
11311 ins_pipe( pipe_slow );
11312 %}
11314 instruct convD2L_reg_fast( mRegL dst, regD src ) %{
11315 match(Set dst (ConvD2L src));
11316 ins_cost(150);
11317 format %{ "convD2L $dst, $src @ convD2L_reg_fast" %}
11318 ins_encode %{
11319 Register dst = as_Register($dst$$reg);
11320 FloatRegister src = as_FloatRegister($src$$reg);
11322 Label Done;
11324 __ trunc_l_d(F30, src);
11325 // max_long: 0x7fffffffffffffff
11326 // __ set64(AT, 0x7fffffffffffffff);
11327 __ daddiu(AT, R0, -1);
11328 __ dsrl(AT, AT, 1);
11329 __ dmfc1(dst, F30);
11331 __ bne(dst, AT, Done);
11332 __ delayed()->mtc1(R0, F30);
11334 __ cvt_d_w(F30, F30);
11335 __ c_ult_d(src, F30);
11336 __ bc1f(Done);
11337 __ delayed()->daddiu(T9, R0, -1);
11339 __ c_un_d(src, src); //NaN?
11340 __ subu(dst, T9, AT);
11341 __ movt(dst, R0);
11343 __ bind(Done);
11344 %}
11346 ins_pipe( pipe_slow );
11347 %}
11349 instruct convD2L_reg_slow( mRegL dst, regD src ) %{
11350 match(Set dst (ConvD2L src));
11351 ins_cost(250);
11352 format %{ "convD2L $dst, $src @ convD2L_reg_slow" %}
11353 ins_encode %{
11354 Register dst = as_Register($dst$$reg);
11355 FloatRegister src = as_FloatRegister($src$$reg);
11357 Label L;
11359 __ c_un_d(src, src); //NaN?
11360 __ bc1t(L);
11361 __ delayed();
11362 __ move(dst, R0);
11364 __ trunc_l_d(F30, src);
11365 __ cfc1(AT, 31);
11366 __ li(T9, 0x10000);
11367 __ andr(AT, AT, T9);
11368 __ beq(AT, R0, L);
11369 __ delayed()->dmfc1(dst, F30);
11371 __ mov_d(F12, src);
11372 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1);
11373 __ move(dst, V0);
11374 __ bind(L);
11375 %}
11377 ins_pipe( pipe_slow );
11378 %}
11380 instruct convF2I_reg_fast( mRegI dst, regF src ) %{
11381 match(Set dst (ConvF2I src));
11382 ins_cost(150);
11383 format %{ "convf2i $dst, $src @ convF2I_reg_fast" %}
11384 ins_encode %{
11385 Register dreg = $dst$$Register;
11386 FloatRegister fval = $src$$FloatRegister;
11388 __ trunc_w_s(F30, fval);
11389 __ mfc1(dreg, F30);
11390 __ c_un_s(fval, fval); //NaN?
11391 __ movt(dreg, R0);
11392 %}
11394 ins_pipe( pipe_slow );
11395 %}
11397 instruct convF2I_reg_slow( mRegI dst, regF src ) %{
11398 match(Set dst (ConvF2I src));
11399 ins_cost(250);
11400 format %{ "convf2i $dst, $src @ convF2I_reg_slow" %}
11401 ins_encode %{
11402 Register dreg = $dst$$Register;
11403 FloatRegister fval = $src$$FloatRegister;
11404 Label L;
11406 __ c_un_s(fval, fval); //NaN?
11407 __ bc1t(L);
11408 __ delayed();
11409 __ move(dreg, R0);
11411 __ trunc_w_s(F30, fval);
11413 /* Call SharedRuntime:f2i() to do valid convention */
11414 __ cfc1(AT, 31);
11415 __ li(T9, 0x10000);
11416 __ andr(AT, AT, T9);
11417 __ beq(AT, R0, L);
11418 __ delayed()->mfc1(dreg, F30);
11420 __ mov_s(F12, fval);
11422 /* 2014/01/08 Fu : This bug was found when running ezDS's control-panel.
11423 * J 982 C2 javax.swing.text.BoxView.layoutMajorAxis(II[I[I)V (283 bytes) @ 0x000000555c46aa74
11424 *
11425 * An interger array index has been assigned to V0, and then changed from 1 to Integer.MAX_VALUE.
11426 * V0 is corrupted during call_VM_leaf(), and should be preserved.
11427 */
11428 if(dreg != V0) {
11429 __ push(V0);
11430 }
11431 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
11432 if(dreg != V0) {
11433 __ move(dreg, V0);
11434 __ pop(V0);
11435 }
11436 __ bind(L);
11437 %}
11439 ins_pipe( pipe_slow );
11440 %}
11442 instruct convF2L_reg_fast( mRegL dst, regF src ) %{
11443 match(Set dst (ConvF2L src));
11444 ins_cost(150);
11445 format %{ "convf2l $dst, $src @ convF2L_reg_fast" %}
11446 ins_encode %{
11447 Register dreg = $dst$$Register;
11448 FloatRegister fval = $src$$FloatRegister;
11450 __ trunc_l_s(F30, fval);
11451 __ dmfc1(dreg, F30);
11452 __ c_un_s(fval, fval); //NaN?
11453 __ movt(dreg, R0);
11454 %}
11456 ins_pipe( pipe_slow );
11457 %}
11459 instruct convF2L_reg_slow( mRegL dst, regF src ) %{
11460 match(Set dst (ConvF2L src));
11461 ins_cost(250);
11462 format %{ "convf2l $dst, $src @ convF2L_reg_slow" %}
11463 ins_encode %{
11464 Register dst = as_Register($dst$$reg);
11465 FloatRegister fval = $src$$FloatRegister;
11466 Label L;
11468 __ c_un_s(fval, fval); //NaN?
11469 __ bc1t(L);
11470 __ delayed();
11471 __ move(dst, R0);
11473 __ trunc_l_s(F30, fval);
11474 __ cfc1(AT, 31);
11475 __ li(T9, 0x10000);
11476 __ andr(AT, AT, T9);
11477 __ beq(AT, R0, L);
11478 __ delayed()->dmfc1(dst, F30);
11480 __ mov_s(F12, fval);
11481 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
11482 __ move(dst, V0);
11483 __ bind(L);
11484 %}
11486 ins_pipe( pipe_slow );
11487 %}
11489 instruct convL2F_reg( regF dst, mRegL src ) %{
11490 match(Set dst (ConvL2F src));
11491 format %{ "convl2f $dst, $src @ convL2F_reg" %}
11492 ins_encode %{
11493 FloatRegister dst = $dst$$FloatRegister;
11494 Register src = as_Register($src$$reg);
11495 Label L;
11497 __ dmtc1(src, dst);
11498 __ cvt_s_l(dst, dst);
11499 %}
11501 ins_pipe( pipe_slow );
11502 %}
11504 instruct convI2F_reg( regF dst, mRegI src ) %{
11505 match(Set dst (ConvI2F src));
11506 format %{ "convi2f $dst, $src @ convI2F_reg" %}
11507 ins_encode %{
11508 Register src = $src$$Register;
11509 FloatRegister dst = $dst$$FloatRegister;
11511 __ mtc1(src, dst);
11512 __ cvt_s_w(dst, dst);
11513 %}
11515 ins_pipe( fpu_regF_regF );
11516 %}
11518 instruct cmpLTMask_immI0( mRegI dst, mRegI p, immI0 zero ) %{
11519 match(Set dst (CmpLTMask p zero));
11520 ins_cost(100);
11522 format %{ "sra $dst, $p, 31 @ cmpLTMask_immI0" %}
11523 ins_encode %{
11524 Register src = $p$$Register;
11525 Register dst = $dst$$Register;
11527 __ sra(dst, src, 31);
11528 %}
11529 ins_pipe( pipe_slow );
11530 %}
11533 instruct cmpLTMask( mRegI dst, mRegI p, mRegI q ) %{
11534 match(Set dst (CmpLTMask p q));
11535 ins_cost(400);
11537 format %{ "cmpLTMask $dst, $p, $q @ cmpLTMask" %}
11538 ins_encode %{
11539 Register p = $p$$Register;
11540 Register q = $q$$Register;
11541 Register dst = $dst$$Register;
11543 __ slt(dst, p, q);
11544 __ subu(dst, R0, dst);
11545 %}
11546 ins_pipe( pipe_slow );
11547 %}
11549 instruct convP2B(mRegI dst, mRegP src) %{
11550 match(Set dst (Conv2B src));
11552 ins_cost(100);
11553 format %{ "convP2B $dst, $src @ convP2B" %}
11554 ins_encode %{
11555 Register dst = as_Register($dst$$reg);
11556 Register src = as_Register($src$$reg);
11558 if (dst != src) {
11559 __ daddiu(dst, R0, 1);
11560 __ movz(dst, R0, src);
11561 } else {
11562 __ move(AT, src);
11563 __ daddiu(dst, R0, 1);
11564 __ movz(dst, R0, AT);
11565 }
11566 %}
11568 ins_pipe( ialu_regL_regL );
11569 %}
11572 instruct convI2D_reg_reg(regD dst, mRegI src) %{
11573 match(Set dst (ConvI2D src));
11574 format %{ "conI2D $dst, $src @convI2D_reg" %}
11575 ins_encode %{
11576 Register src = $src$$Register;
11577 FloatRegister dst = $dst$$FloatRegister;
11578 __ mtc1(src, dst);
11579 __ cvt_d_w(dst, dst);
11580 %}
11581 ins_pipe( fpu_regF_regF );
11582 %}
11584 instruct convF2D_reg_reg(regD dst, regF src) %{
11585 match(Set dst (ConvF2D src));
11586 format %{ "convF2D $dst, $src\t# @convF2D_reg_reg" %}
11587 ins_encode %{
11588 FloatRegister dst = $dst$$FloatRegister;
11589 FloatRegister src = $src$$FloatRegister;
11591 __ cvt_d_s(dst, src);
11592 %}
11593 ins_pipe( fpu_regF_regF );
11594 %}
11596 instruct convD2F_reg_reg(regF dst, regD src) %{
11597 match(Set dst (ConvD2F src));
11598 format %{ "convD2F $dst, $src\t# @convD2F_reg_reg" %}
11599 ins_encode %{
11600 FloatRegister dst = $dst$$FloatRegister;
11601 FloatRegister src = $src$$FloatRegister;
11603 __ cvt_s_d(dst, src);
11604 %}
11605 ins_pipe( fpu_regF_regF );
11606 %}
11608 // Convert a double to an int. If the double is a NAN, stuff a zero in instead.
11609 instruct convD2I_reg_reg_fast( mRegI dst, regD src ) %{
11610 match(Set dst (ConvD2I src));
11612 ins_cost(150);
11613 format %{ "convD2I $dst, $src\t# @ convD2I_reg_reg_fast" %}
11615 ins_encode %{
11616 FloatRegister src = $src$$FloatRegister;
11617 Register dst = $dst$$Register;
11619 Label Done;
11621 __ trunc_w_d(F30, src);
11622 // max_int: 2147483647
11623 __ move(AT, 0x7fffffff);
11624 __ mfc1(dst, F30);
11626 __ bne(dst, AT, Done);
11627 __ delayed()->mtc1(R0, F30);
11629 __ cvt_d_w(F30, F30);
11630 __ c_ult_d(src, F30);
11631 __ bc1f(Done);
11632 __ delayed()->addiu(T9, R0, -1);
11634 __ c_un_d(src, src); //NaN?
11635 __ subu32(dst, T9, AT);
11636 __ movt(dst, R0);
11638 __ bind(Done);
11639 %}
11640 ins_pipe( pipe_slow );
11641 %}
11643 instruct convD2I_reg_reg_slow( mRegI dst, regD src ) %{
11644 match(Set dst (ConvD2I src));
11646 ins_cost(250);
11647 format %{ "convD2I $dst, $src\t# @ convD2I_reg_reg_slow" %}
11649 ins_encode %{
11650 FloatRegister src = $src$$FloatRegister;
11651 Register dst = $dst$$Register;
11652 Label L;
11654 __ trunc_w_d(F30, src);
11655 __ cfc1(AT, 31);
11656 __ li(T9, 0x10000);
11657 __ andr(AT, AT, T9);
11658 __ beq(AT, R0, L);
11659 __ delayed()->mfc1(dst, F30);
11661 __ mov_d(F12, src);
11662 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 1);
11663 __ move(dst, V0);
11664 __ bind(L);
11666 %}
11667 ins_pipe( pipe_slow );
11668 %}
11670 // Convert oop pointer into compressed form
11671 instruct encodeHeapOop(mRegN dst, mRegP src) %{
11672 predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
11673 match(Set dst (EncodeP src));
11674 format %{ "encode_heap_oop $dst,$src" %}
11675 ins_encode %{
11676 Register src = $src$$Register;
11677 Register dst = $dst$$Register;
11678 if (src != dst) {
11679 __ move(dst, src);
11680 }
11681 __ encode_heap_oop(dst);
11682 %}
11683 ins_pipe( ialu_regL_regL );
11684 %}
11686 instruct encodeHeapOop_not_null(mRegN dst, mRegP src) %{
11687 predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
11688 match(Set dst (EncodeP src));
11689 format %{ "encode_heap_oop_not_null $dst,$src @ encodeHeapOop_not_null" %}
11690 ins_encode %{
11691 __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
11692 %}
11693 ins_pipe( ialu_regL_regL );
11694 %}
11696 instruct decodeHeapOop(mRegP dst, mRegN src) %{
11697 predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
11698 n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
11699 match(Set dst (DecodeN src));
11700 format %{ "decode_heap_oop $dst,$src @ decodeHeapOop" %}
11701 ins_encode %{
11702 Register s = $src$$Register;
11703 Register d = $dst$$Register;
11704 if (s != d) {
11705 __ move(d, s);
11706 }
11707 __ decode_heap_oop(d);
11708 %}
11709 ins_pipe( ialu_regL_regL );
11710 %}
11712 instruct decodeHeapOop_not_null(mRegP dst, mRegN src) %{
11713 predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
11714 n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
11715 match(Set dst (DecodeN src));
11716 format %{ "decode_heap_oop_not_null $dst,$src @ decodeHeapOop_not_null" %}
11717 ins_encode %{
11718 Register s = $src$$Register;
11719 Register d = $dst$$Register;
11720 if (s != d) {
11721 __ decode_heap_oop_not_null(d, s);
11722 } else {
11723 __ decode_heap_oop_not_null(d);
11724 }
11725 %}
11726 ins_pipe( ialu_regL_regL );
11727 %}
11729 instruct encodeKlass_not_null(mRegN dst, mRegP src) %{
11730 match(Set dst (EncodePKlass src));
11731 format %{ "encode_heap_oop_not_null $dst,$src @ encodeKlass_not_null" %}
11732 ins_encode %{
11733 __ encode_klass_not_null($dst$$Register, $src$$Register);
11734 %}
11735 ins_pipe( ialu_regL_regL );
11736 %}
11738 instruct decodeKlass_not_null(mRegP dst, mRegN src) %{
11739 match(Set dst (DecodeNKlass src));
11740 format %{ "decode_heap_klass_not_null $dst,$src" %}
11741 ins_encode %{
11742 Register s = $src$$Register;
11743 Register d = $dst$$Register;
11744 if (s != d) {
11745 __ decode_klass_not_null(d, s);
11746 } else {
11747 __ decode_klass_not_null(d);
11748 }
11749 %}
11750 ins_pipe( ialu_regL_regL );
11751 %}
11753 //FIXME
11754 instruct tlsLoadP(mRegP dst) %{
11755 match(Set dst (ThreadLocal));
11757 ins_cost(0);
11758 format %{ " get_thread in $dst #@tlsLoadP" %}
11759 ins_encode %{
11760 Register dst = $dst$$Register;
11761 #ifdef OPT_THREAD
11762 __ move(dst, TREG);
11763 #else
11764 __ get_thread(dst);
11765 #endif
11766 %}
11768 ins_pipe( ialu_loadI );
11769 %}
11772 instruct checkCastPP( mRegP dst ) %{
11773 match(Set dst (CheckCastPP dst));
11775 format %{ "#checkcastPP of $dst (empty encoding) #@chekCastPP" %}
11776 ins_encode( /*empty encoding*/ );
11777 ins_pipe( empty );
11778 %}
11780 instruct castPP(mRegP dst)
11781 %{
11782 match(Set dst (CastPP dst));
11784 size(0);
11785 format %{ "# castPP of $dst" %}
11786 ins_encode(/* empty encoding */);
11787 ins_pipe(empty);
11788 %}
11790 instruct castII( mRegI dst ) %{
11791 match(Set dst (CastII dst));
11792 format %{ "#castII of $dst empty encoding" %}
11793 ins_encode( /*empty encoding*/ );
11794 ins_cost(0);
11795 ins_pipe( empty );
11796 %}
11798 // Return Instruction
11799 // Remove the return address & jump to it.
11800 instruct Ret() %{
11801 match(Return);
11802 format %{ "RET #@Ret" %}
11804 ins_encode %{
11805 __ jr(RA);
11806 __ nop();
11807 %}
11809 ins_pipe( pipe_jump );
11810 %}
11812 /*
11813 // For Loongson CPUs, jr seems too slow, so this rule shouldn't be imported.
11814 instruct jumpXtnd(mRegL switch_val) %{
11815 match(Jump switch_val);
11817 ins_cost(350);
11819 format %{ "load T9 <-- [$constanttablebase, $switch_val, $constantoffset] @ jumpXtnd\n\t"
11820 "jr T9\n\t"
11821 "nop" %}
11822 ins_encode %{
11823 Register table_base = $constanttablebase;
11824 int con_offset = $constantoffset;
11825 Register switch_reg = $switch_val$$Register;
11827 if (UseLoongsonISA) {
11828 if (Assembler::is_simm(con_offset, 8)) {
11829 __ gsldx(T9, table_base, switch_reg, con_offset);
11830 } else if (Assembler::is_simm16(con_offset)) {
11831 __ daddu(T9, table_base, switch_reg);
11832 __ ld(T9, T9, con_offset);
11833 } else {
11834 __ move(T9, con_offset);
11835 __ daddu(AT, table_base, switch_reg);
11836 __ gsldx(T9, AT, T9, 0);
11837 }
11838 } else {
11839 if (Assembler::is_simm16(con_offset)) {
11840 __ daddu(T9, table_base, switch_reg);
11841 __ ld(T9, T9, con_offset);
11842 } else {
11843 __ move(T9, con_offset);
11844 __ daddu(AT, table_base, switch_reg);
11845 __ daddu(AT, T9, AT);
11846 __ ld(T9, AT, 0);
11847 }
11848 }
11850 __ jr(T9);
11851 __ nop();
11853 %}
11854 ins_pipe(pipe_jump);
11855 %}
11856 */
11858 // Jump Direct - Label defines a relative address from JMP
11859 instruct jmpDir(label labl) %{
11860 match(Goto);
11861 effect(USE labl);
11863 ins_cost(300);
11864 format %{ "JMP $labl #@jmpDir" %}
11866 ins_encode %{
11867 Label &L = *($labl$$label);
11868 if(&L)
11869 __ b(L);
11870 else
11871 __ b(int(0));
11872 __ nop();
11873 %}
11875 ins_pipe( pipe_jump );
11876 ins_pc_relative(1);
11877 %}
11881 // Tail Jump; remove the return address; jump to target.
11882 // TailCall above leaves the return address around.
11883 // TailJump is used in only one place, the rethrow_Java stub (fancy_jump=2).
11884 // ex_oop (Exception Oop) is needed in %o0 at the jump. As there would be a
11885 // "restore" before this instruction (in Epilogue), we need to materialize it
11886 // in %i0.
11887 //FIXME
11888 instruct tailjmpInd(mRegP jump_target,mRegP ex_oop) %{
11889 match( TailJump jump_target ex_oop );
11890 ins_cost(200);
11891 format %{ "Jmp $jump_target ; ex_oop = $ex_oop #@tailjmpInd" %}
11892 ins_encode %{
11893 Register target = $jump_target$$Register;
11895 /* 2012/9/14 Jin: V0, V1 are indicated in:
11896 * [stubGenerator_mips.cpp] generate_forward_exception()
11897 * [runtime_mips.cpp] OptoRuntime::generate_exception_blob()
11898 */
11899 Register oop = $ex_oop$$Register;
11900 Register exception_oop = V0;
11901 Register exception_pc = V1;
11903 __ move(exception_pc, RA);
11904 __ move(exception_oop, oop);
11906 __ jr(target);
11907 __ nop();
11908 %}
11909 ins_pipe( pipe_jump );
11910 %}
11912 // ============================================================================
11913 // Procedure Call/Return Instructions
11914 // Call Java Static Instruction
11915 // Note: If this code changes, the corresponding ret_addr_offset() and
11916 // compute_padding() functions will have to be adjusted.
11917 instruct CallStaticJavaDirect(method meth) %{
11918 match(CallStaticJava);
11919 effect(USE meth);
11921 ins_cost(300);
11922 format %{ "CALL,static #@CallStaticJavaDirect " %}
11923 ins_encode( Java_Static_Call( meth ) );
11924 ins_pipe( pipe_slow );
11925 ins_pc_relative(1);
11926 ins_alignment(16);
11927 %}
11929 // Call Java Dynamic Instruction
11930 // Note: If this code changes, the corresponding ret_addr_offset() and
11931 // compute_padding() functions will have to be adjusted.
11932 instruct CallDynamicJavaDirect(method meth) %{
11933 match(CallDynamicJava);
11934 effect(USE meth);
11936 ins_cost(300);
11937 format %{"MOV IC_Klass, (oop)-1 @ CallDynamicJavaDirect\n\t"
11938 "CallDynamic @ CallDynamicJavaDirect" %}
11939 ins_encode( Java_Dynamic_Call( meth ) );
11940 ins_pipe( pipe_slow );
11941 ins_pc_relative(1);
11942 ins_alignment(16);
11943 %}
11945 instruct CallLeafNoFPDirect(method meth) %{
11946 match(CallLeafNoFP);
11947 effect(USE meth);
11949 ins_cost(300);
11950 format %{ "CALL_LEAF_NOFP,runtime " %}
11951 ins_encode(Java_To_Runtime(meth));
11952 ins_pipe( pipe_slow );
11953 ins_pc_relative(1);
11954 ins_alignment(16);
11955 %}
11957 // Prefetch instructions.
11959 instruct prefetchrNTA( memory mem ) %{
11960 match(PrefetchRead mem);
11961 ins_cost(125);
11963 format %{ "pref $mem\t# Prefetch into non-temporal cache for read @ prefetchrNTA" %}
11964 ins_encode %{
11965 int base = $mem$$base;
11966 int index = $mem$$index;
11967 int scale = $mem$$scale;
11968 int disp = $mem$$disp;
11970 if( index != 0 ) {
11971 if (scale == 0) {
11972 __ daddu(AT, as_Register(base), as_Register(index));
11973 } else {
11974 __ dsll(AT, as_Register(index), scale);
11975 __ daddu(AT, as_Register(base), AT);
11976 }
11977 } else {
11978 __ move(AT, as_Register(base));
11979 }
11980 if( Assembler::is_simm16(disp) ) {
11981 __ daddiu(AT, as_Register(base), disp);
11982 __ daddiu(AT, AT, disp);
11983 } else {
11984 __ move(T9, disp);
11985 __ daddu(AT, as_Register(base), T9);
11986 }
11987 __ pref(0, AT, 0); //hint: 0:load
11988 %}
11989 ins_pipe(pipe_slow);
11990 %}
11992 instruct prefetchwNTA( memory mem ) %{
11993 match(PrefetchWrite mem);
11994 ins_cost(125);
11995 format %{ "pref $mem\t# Prefetch to non-temporal cache for write @ prefetchwNTA" %}
11996 ins_encode %{
11997 int base = $mem$$base;
11998 int index = $mem$$index;
11999 int scale = $mem$$scale;
12000 int disp = $mem$$disp;
12002 if( index != 0 ) {
12003 if (scale == 0) {
12004 __ daddu(AT, as_Register(base), as_Register(index));
12005 } else {
12006 __ dsll(AT, as_Register(index), scale);
12007 __ daddu(AT, as_Register(base), AT);
12008 }
12009 } else {
12010 __ move(AT, as_Register(base));
12011 }
12012 if( Assembler::is_simm16(disp) ) {
12013 __ daddiu(AT, as_Register(base), disp);
12014 __ daddiu(AT, AT, disp);
12015 } else {
12016 __ move(T9, disp);
12017 __ daddu(AT, as_Register(base), T9);
12018 }
12019 __ pref(1, AT, 0); //hint: 1:store
12020 %}
12021 ins_pipe(pipe_slow);
12022 %}
12024 // Prefetch instructions for allocation.
12026 instruct prefetchAllocNTA( memory mem ) %{
12027 match(PrefetchAllocation mem);
12028 ins_cost(125);
12029 format %{ "pref $mem\t# Prefetch allocation @ prefetchAllocNTA" %}
12030 ins_encode %{
12031 int base = $mem$$base;
12032 int index = $mem$$index;
12033 int scale = $mem$$scale;
12034 int disp = $mem$$disp;
12036 Register dst = R0;
12038 if( index != 0 ) {
12039 if( Assembler::is_simm16(disp) ) {
12040 if( UseLoongsonISA ) {
12041 if (scale == 0) {
12042 __ gslbx(dst, as_Register(base), as_Register(index), disp);
12043 } else {
12044 __ dsll(AT, as_Register(index), scale);
12045 __ gslbx(dst, as_Register(base), AT, disp);
12046 }
12047 } else {
12048 if (scale == 0) {
12049 __ addu(AT, as_Register(base), as_Register(index));
12050 } else {
12051 __ dsll(AT, as_Register(index), scale);
12052 __ addu(AT, as_Register(base), AT);
12053 }
12054 __ lb(dst, AT, disp);
12055 }
12056 } else {
12057 if (scale == 0) {
12058 __ addu(AT, as_Register(base), as_Register(index));
12059 } else {
12060 __ dsll(AT, as_Register(index), scale);
12061 __ addu(AT, as_Register(base), AT);
12062 }
12063 __ move(T9, disp);
12064 if( UseLoongsonISA ) {
12065 __ gslbx(dst, AT, T9, 0);
12066 } else {
12067 __ addu(AT, AT, T9);
12068 __ lb(dst, AT, 0);
12069 }
12070 }
12071 } else {
12072 if( Assembler::is_simm16(disp) ) {
12073 __ lb(dst, as_Register(base), disp);
12074 } else {
12075 __ move(T9, disp);
12076 if( UseLoongsonISA ) {
12077 __ gslbx(dst, as_Register(base), T9, 0);
12078 } else {
12079 __ addu(AT, as_Register(base), T9);
12080 __ lb(dst, AT, 0);
12081 }
12082 }
12083 }
12084 %}
12085 ins_pipe(pipe_slow);
12086 %}
12089 // Call runtime without safepoint
12090 instruct CallLeafDirect(method meth) %{
12091 match(CallLeaf);
12092 effect(USE meth);
12094 ins_cost(300);
12095 format %{ "CALL_LEAF,runtime #@CallLeafDirect " %}
12096 ins_encode(Java_To_Runtime(meth));
12097 ins_pipe( pipe_slow );
12098 ins_pc_relative(1);
12099 ins_alignment(16);
12100 %}
12102 // Load Char (16bit unsigned)
12103 instruct loadUS(mRegI dst, memory mem) %{
12104 match(Set dst (LoadUS mem));
12106 ins_cost(125);
12107 format %{ "loadUS $dst,$mem @ loadC" %}
12108 ins_encode(load_C_enc(dst, mem));
12109 ins_pipe( ialu_loadI );
12110 %}
12112 instruct loadUS_convI2L(mRegL dst, memory mem) %{
12113 match(Set dst (ConvI2L (LoadUS mem)));
12115 ins_cost(125);
12116 format %{ "loadUS $dst,$mem @ loadUS_convI2L" %}
12117 ins_encode(load_C_enc(dst, mem));
12118 ins_pipe( ialu_loadI );
12119 %}
12121 // Store Char (16bit unsigned)
12122 instruct storeC(memory mem, mRegI src) %{
12123 match(Set mem (StoreC mem src));
12125 ins_cost(125);
12126 format %{ "storeC $src,$mem @ storeC" %}
12127 ins_encode(store_C_reg_enc(mem, src));
12128 ins_pipe( ialu_loadI );
12129 %}
12132 instruct loadConF0(regF dst, immF0 zero) %{
12133 match(Set dst zero);
12134 ins_cost(100);
12136 format %{ "mov $dst, zero @ loadConF0\n"%}
12137 ins_encode %{
12138 FloatRegister dst = $dst$$FloatRegister;
12140 __ mtc1(R0, dst);
12141 %}
12142 ins_pipe( fpu_loadF );
12143 %}
12146 instruct loadConF(regF dst, immF src) %{
12147 match(Set dst src);
12148 ins_cost(125);
12150 format %{ "lwc1 $dst, $constantoffset[$constanttablebase] # load FLOAT $src from table @ loadConF" %}
12151 ins_encode %{
12152 int con_offset = $constantoffset($src);
12154 if (Assembler::is_simm16(con_offset)) {
12155 __ lwc1($dst$$FloatRegister, $constanttablebase, con_offset);
12156 } else {
12157 __ set64(AT, con_offset);
12158 if (UseLoongsonISA) {
12159 __ gslwxc1($dst$$FloatRegister, $constanttablebase, AT, 0);
12160 } else {
12161 __ daddu(AT, $constanttablebase, AT);
12162 __ lwc1($dst$$FloatRegister, AT, 0);
12163 }
12164 }
12165 %}
12166 ins_pipe( fpu_loadF );
12167 %}
12170 instruct loadConD0(regD dst, immD0 zero) %{
12171 match(Set dst zero);
12172 ins_cost(100);
12174 format %{ "mov $dst, zero @ loadConD0"%}
12175 ins_encode %{
12176 FloatRegister dst = as_FloatRegister($dst$$reg);
12178 __ dmtc1(R0, dst);
12179 %}
12180 ins_pipe( fpu_loadF );
12181 %}
12183 instruct loadConD(regD dst, immD src) %{
12184 match(Set dst src);
12185 ins_cost(125);
12187 format %{ "ldc1 $dst, $constantoffset[$constanttablebase] # load DOUBLE $src from table @ loadConD" %}
12188 ins_encode %{
12189 int con_offset = $constantoffset($src);
12191 if (Assembler::is_simm16(con_offset)) {
12192 __ ldc1($dst$$FloatRegister, $constanttablebase, con_offset);
12193 } else {
12194 __ set64(AT, con_offset);
12195 if (UseLoongsonISA) {
12196 __ gsldxc1($dst$$FloatRegister, $constanttablebase, AT, 0);
12197 } else {
12198 __ daddu(AT, $constanttablebase, AT);
12199 __ ldc1($dst$$FloatRegister, AT, 0);
12200 }
12201 }
12202 %}
12203 ins_pipe( fpu_loadF );
12204 %}
12206 // Store register Float value (it is faster than store from FPU register)
12207 instruct storeF_reg( memory mem, regF src) %{
12208 match(Set mem (StoreF mem src));
12210 ins_cost(50);
12211 format %{ "store $mem, $src\t# store float @ storeF_reg" %}
12212 ins_encode(store_F_reg_enc(mem, src));
12213 ins_pipe( fpu_storeF );
12214 %}
12216 instruct storeF_imm0( memory mem, immF0 zero) %{
12217 match(Set mem (StoreF mem zero));
12219 ins_cost(40);
12220 format %{ "store $mem, zero\t# store float @ storeF_imm0" %}
12221 ins_encode %{
12222 int base = $mem$$base;
12223 int index = $mem$$index;
12224 int scale = $mem$$scale;
12225 int disp = $mem$$disp;
12227 if( index != 0 ) {
12228 if(scale != 0) {
12229 __ dsll(T9, as_Register(index), scale);
12230 __ addu(AT, as_Register(base), T9);
12231 } else {
12232 __ daddu(AT, as_Register(base), as_Register(index));
12233 }
12234 if( Assembler::is_simm16(disp) ) {
12235 __ sw(R0, AT, disp);
12236 } else {
12237 __ move(T9, disp);
12238 __ addu(AT, AT, T9);
12239 __ sw(R0, AT, 0);
12240 }
12242 } else {
12243 if( Assembler::is_simm16(disp) ) {
12244 __ sw(R0, as_Register(base), disp);
12245 } else {
12246 __ move(T9, disp);
12247 __ addu(AT, as_Register(base), T9);
12248 __ sw(R0, AT, 0);
12249 }
12250 }
12251 %}
12252 ins_pipe( ialu_storeI );
12253 %}
12255 // Load Double
12256 instruct loadD(regD dst, memory mem) %{
12257 match(Set dst (LoadD mem));
12259 ins_cost(150);
12260 format %{ "loadD $dst, $mem #@loadD" %}
12261 ins_encode(load_D_enc(dst, mem));
12262 ins_pipe( ialu_loadI );
12263 %}
12265 // Load Double - UNaligned
12266 instruct loadD_unaligned(regD dst, memory mem ) %{
12267 match(Set dst (LoadD_unaligned mem));
12268 ins_cost(250);
12269 // FIXME: Jin: Need more effective ldl/ldr
12270 format %{ "loadD_unaligned $dst, $mem #@loadD_unaligned" %}
12271 ins_encode(load_D_enc(dst, mem));
12272 ins_pipe( ialu_loadI );
12273 %}
12275 instruct storeD_reg( memory mem, regD src) %{
12276 match(Set mem (StoreD mem src));
12278 ins_cost(50);
12279 format %{ "store $mem, $src\t# store float @ storeD_reg" %}
12280 ins_encode(store_D_reg_enc(mem, src));
12281 ins_pipe( fpu_storeF );
12282 %}
12284 instruct storeD_imm0( memory mem, immD0 zero) %{
12285 match(Set mem (StoreD mem zero));
12287 ins_cost(40);
12288 format %{ "store $mem, zero\t# store float @ storeD_imm0" %}
12289 ins_encode %{
12290 int base = $mem$$base;
12291 int index = $mem$$index;
12292 int scale = $mem$$scale;
12293 int disp = $mem$$disp;
12295 __ mtc1(R0, F30);
12296 __ cvt_d_w(F30, F30);
12298 if( index != 0 ) {
12299 if(scale != 0) {
12300 __ dsll(T9, as_Register(index), scale);
12301 __ addu(AT, as_Register(base), T9);
12302 } else {
12303 __ daddu(AT, as_Register(base), as_Register(index));
12304 }
12305 if( Assembler::is_simm16(disp) ) {
12306 __ sdc1(F30, AT, disp);
12307 } else {
12308 __ move(T9, disp);
12309 __ addu(AT, AT, T9);
12310 __ sdc1(F30, AT, 0);
12311 }
12313 } else {
12314 if( Assembler::is_simm16(disp) ) {
12315 __ sdc1(F30, as_Register(base), disp);
12316 } else {
12317 __ move(T9, disp);
12318 __ addu(AT, as_Register(base), T9);
12319 __ sdc1(F30, AT, 0);
12320 }
12321 }
12322 %}
12323 ins_pipe( ialu_storeI );
12324 %}
12326 instruct loadSSI(mRegI dst, stackSlotI src)
12327 %{
12328 match(Set dst src);
12330 ins_cost(125);
12331 format %{ "lw $dst, $src\t# int stk @ loadSSI" %}
12332 ins_encode %{
12333 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSI) !");
12334 __ lw($dst$$Register, SP, $src$$disp);
12335 %}
12336 ins_pipe(ialu_loadI);
12337 %}
12339 instruct storeSSI(stackSlotI dst, mRegI src)
12340 %{
12341 match(Set dst src);
12343 ins_cost(100);
12344 format %{ "sw $dst, $src\t# int stk @ storeSSI" %}
12345 ins_encode %{
12346 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSI) !");
12347 __ sw($src$$Register, SP, $dst$$disp);
12348 %}
12349 ins_pipe(ialu_storeI);
12350 %}
12352 instruct loadSSL(mRegL dst, stackSlotL src)
12353 %{
12354 match(Set dst src);
12356 ins_cost(125);
12357 format %{ "ld $dst, $src\t# long stk @ loadSSL" %}
12358 ins_encode %{
12359 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSL) !");
12360 __ ld($dst$$Register, SP, $src$$disp);
12361 %}
12362 ins_pipe(ialu_loadI);
12363 %}
12365 instruct storeSSL(stackSlotL dst, mRegL src)
12366 %{
12367 match(Set dst src);
12369 ins_cost(100);
12370 format %{ "sd $dst, $src\t# long stk @ storeSSL" %}
12371 ins_encode %{
12372 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSL) !");
12373 __ sd($src$$Register, SP, $dst$$disp);
12374 %}
12375 ins_pipe(ialu_storeI);
12376 %}
12378 instruct loadSSP(mRegP dst, stackSlotP src)
12379 %{
12380 match(Set dst src);
12382 ins_cost(125);
12383 format %{ "ld $dst, $src\t# ptr stk @ loadSSP" %}
12384 ins_encode %{
12385 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSP) !");
12386 __ ld($dst$$Register, SP, $src$$disp);
12387 %}
12388 ins_pipe(ialu_loadI);
12389 %}
12391 instruct storeSSP(stackSlotP dst, mRegP src)
12392 %{
12393 match(Set dst src);
12395 ins_cost(100);
12396 format %{ "sd $dst, $src\t# ptr stk @ storeSSP" %}
12397 ins_encode %{
12398 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSP) !");
12399 __ sd($src$$Register, SP, $dst$$disp);
12400 %}
12401 ins_pipe(ialu_storeI);
12402 %}
12404 instruct loadSSF(regF dst, stackSlotF src)
12405 %{
12406 match(Set dst src);
12408 ins_cost(125);
12409 format %{ "lwc1 $dst, $src\t# float stk @ loadSSF" %}
12410 ins_encode %{
12411 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSF) !");
12412 __ lwc1($dst$$FloatRegister, SP, $src$$disp);
12413 %}
12414 ins_pipe(ialu_loadI);
12415 %}
12417 instruct storeSSF(stackSlotF dst, regF src)
12418 %{
12419 match(Set dst src);
12421 ins_cost(100);
12422 format %{ "swc1 $dst, $src\t# float stk @ storeSSF" %}
12423 ins_encode %{
12424 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSF) !");
12425 __ swc1($src$$FloatRegister, SP, $dst$$disp);
12426 %}
12427 ins_pipe(fpu_storeF);
12428 %}
12430 // Use the same format since predicate() can not be used here.
12431 instruct loadSSD(regD dst, stackSlotD src)
12432 %{
12433 match(Set dst src);
12435 ins_cost(125);
12436 format %{ "ldc1 $dst, $src\t# double stk @ loadSSD" %}
12437 ins_encode %{
12438 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSD) !");
12439 __ ldc1($dst$$FloatRegister, SP, $src$$disp);
12440 %}
12441 ins_pipe(ialu_loadI);
12442 %}
12444 instruct storeSSD(stackSlotD dst, regD src)
12445 %{
12446 match(Set dst src);
12448 ins_cost(100);
12449 format %{ "sdc1 $dst, $src\t# double stk @ storeSSD" %}
12450 ins_encode %{
12451 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSD) !");
12452 __ sdc1($src$$FloatRegister, SP, $dst$$disp);
12453 %}
12454 ins_pipe(fpu_storeF);
12455 %}
12457 instruct cmpFastLock( FlagsReg cr, mRegP object, s0_RegP box, mRegI tmp, mRegP scr) %{
12458 match( Set cr (FastLock object box) );
12459 effect( TEMP tmp, TEMP scr, USE_KILL box );
12460 ins_cost(300);
12461 format %{ "FASTLOCK $cr $object, $box, $tmp #@ cmpFastLock" %}
12462 ins_encode %{
12463 __ fast_lock($object$$Register, $box$$Register, $tmp$$Register, $scr$$Register);
12464 %}
12466 ins_pipe( pipe_slow );
12467 ins_pc_relative(1);
12468 %}
12470 instruct cmpFastUnlock( FlagsReg cr, mRegP object, s0_RegP box, mRegP tmp ) %{
12471 match( Set cr (FastUnlock object box) );
12472 effect( TEMP tmp, USE_KILL box );
12473 ins_cost(300);
12474 format %{ "FASTUNLOCK $object, $box, $tmp #@cmpFastUnlock" %}
12475 ins_encode %{
12476 __ fast_unlock($object$$Register, $box$$Register, $tmp$$Register);
12477 %}
12479 ins_pipe( pipe_slow );
12480 ins_pc_relative(1);
12481 %}
12483 // Store CMS card-mark Immediate
12484 instruct storeImmCM(memory mem, immI8 src) %{
12485 match(Set mem (StoreCM mem src));
12487 ins_cost(150);
12488 format %{ "MOV8 $mem,$src\t! CMS card-mark imm0" %}
12489 // opcode(0xC6);
12490 ins_encode(store_B_immI_enc_sync(mem, src));
12491 ins_pipe( ialu_storeI );
12492 %}
12494 // Die now
12495 instruct ShouldNotReachHere( )
12496 %{
12497 match(Halt);
12498 ins_cost(300);
12500 // Use the following format syntax
12501 format %{ "ILLTRAP ;#@ShouldNotReachHere" %}
12502 ins_encode %{
12503 // Here we should emit illtrap !
12505 __ stop("in ShoudNotReachHere");
12507 %}
12508 ins_pipe( pipe_jump );
12509 %}
12511 instruct leaP8Narrow(mRegP dst, indOffset8Narrow mem)
12512 %{
12513 predicate(Universe::narrow_oop_shift() == 0);
12514 match(Set dst mem);
12516 ins_cost(110);
12517 format %{ "leaq $dst, $mem\t# ptr off8narrow @ leaP8Narrow" %}
12518 ins_encode %{
12519 Register dst = $dst$$Register;
12520 Register base = as_Register($mem$$base);
12521 int disp = $mem$$disp;
12523 __ daddiu(dst, base, disp);
12524 %}
12525 ins_pipe( ialu_regI_imm16 );
12526 %}
12528 instruct leaPPosIdxScaleOff8(mRegP dst, basePosIndexScaleOffset8 mem)
12529 %{
12530 match(Set dst mem);
12532 ins_cost(110);
12533 format %{ "leaq $dst, $mem\t# @ PosIdxScaleOff8" %}
12534 ins_encode %{
12535 Register dst = $dst$$Register;
12536 Register base = as_Register($mem$$base);
12537 Register index = as_Register($mem$$index);
12538 int scale = $mem$$scale;
12539 int disp = $mem$$disp;
12541 if (scale == 0) {
12542 __ daddu(AT, base, index);
12543 __ daddiu(dst, AT, disp);
12544 } else {
12545 __ dsll(AT, index, scale);
12546 __ daddu(AT, base, AT);
12547 __ daddiu(dst, AT, disp);
12548 }
12549 %}
12551 ins_pipe( ialu_regI_imm16 );
12552 %}
12554 instruct leaPIdxScale(mRegP dst, indIndexScale mem)
12555 %{
12556 match(Set dst mem);
12558 ins_cost(110);
12559 format %{ "leaq $dst, $mem\t# @ leaPIdxScale" %}
12560 ins_encode %{
12561 Register dst = $dst$$Register;
12562 Register base = as_Register($mem$$base);
12563 Register index = as_Register($mem$$index);
12564 int scale = $mem$$scale;
12566 if (scale == 0) {
12567 __ daddu(dst, base, index);
12568 } else {
12569 __ dsll(AT, index, scale);
12570 __ daddu(dst, base, AT);
12571 }
12572 %}
12574 ins_pipe( ialu_regI_imm16 );
12575 %}
12577 // Jump Direct Conditional - Label defines a relative address from Jcc+1
12578 instruct jmpLoopEnd(cmpOp cop, mRegI src1, mRegI src2, label labl) %{
12579 match(CountedLoopEnd cop (CmpI src1 src2));
12580 effect(USE labl);
12582 ins_cost(300);
12583 format %{ "J$cop $src1, $src2, $labl\t# Loop end @ jmpLoopEnd" %}
12584 ins_encode %{
12585 Register op1 = $src1$$Register;
12586 Register op2 = $src2$$Register;
12587 Label &L = *($labl$$label);
12588 int flag = $cop$$cmpcode;
12590 switch(flag)
12591 {
12592 case 0x01: //equal
12593 if (&L)
12594 __ beq(op1, op2, L);
12595 else
12596 __ beq(op1, op2, (int)0);
12597 break;
12598 case 0x02: //not_equal
12599 if (&L)
12600 __ bne(op1, op2, L);
12601 else
12602 __ bne(op1, op2, (int)0);
12603 break;
12604 case 0x03: //above
12605 __ slt(AT, op2, op1);
12606 if(&L)
12607 __ bne(AT, R0, L);
12608 else
12609 __ bne(AT, R0, (int)0);
12610 break;
12611 case 0x04: //above_equal
12612 __ slt(AT, op1, op2);
12613 if(&L)
12614 __ beq(AT, R0, L);
12615 else
12616 __ beq(AT, R0, (int)0);
12617 break;
12618 case 0x05: //below
12619 __ slt(AT, op1, op2);
12620 if(&L)
12621 __ bne(AT, R0, L);
12622 else
12623 __ bne(AT, R0, (int)0);
12624 break;
12625 case 0x06: //below_equal
12626 __ slt(AT, op2, op1);
12627 if(&L)
12628 __ beq(AT, R0, L);
12629 else
12630 __ beq(AT, R0, (int)0);
12631 break;
12632 default:
12633 Unimplemented();
12634 }
12635 __ nop();
12636 %}
12637 ins_pipe( pipe_jump );
12638 ins_pc_relative(1);
12639 %}
12642 instruct jmpLoopEnd_reg_imm16_sub(cmpOp cop, mRegI src1, immI16_sub src2, label labl) %{
12643 match(CountedLoopEnd cop (CmpI src1 src2));
12644 effect(USE labl);
12646 ins_cost(250);
12647 format %{ "J$cop $src1, $src2, $labl\t# Loop end @ jmpLoopEnd_reg_imm16_sub" %}
12648 ins_encode %{
12649 Register op1 = $src1$$Register;
12650 int op2 = $src2$$constant;
12651 Label &L = *($labl$$label);
12652 int flag = $cop$$cmpcode;
12654 __ addiu32(AT, op1, -1 * op2);
12656 switch(flag)
12657 {
12658 case 0x01: //equal
12659 if (&L)
12660 __ beq(AT, R0, L);
12661 else
12662 __ beq(AT, R0, (int)0);
12663 break;
12664 case 0x02: //not_equal
12665 if (&L)
12666 __ bne(AT, R0, L);
12667 else
12668 __ bne(AT, R0, (int)0);
12669 break;
12670 case 0x03: //above
12671 if(&L)
12672 __ bgtz(AT, L);
12673 else
12674 __ bgtz(AT, (int)0);
12675 break;
12676 case 0x04: //above_equal
12677 if(&L)
12678 __ bgez(AT, L);
12679 else
12680 __ bgez(AT,(int)0);
12681 break;
12682 case 0x05: //below
12683 if(&L)
12684 __ bltz(AT, L);
12685 else
12686 __ bltz(AT, (int)0);
12687 break;
12688 case 0x06: //below_equal
12689 if(&L)
12690 __ blez(AT, L);
12691 else
12692 __ blez(AT, (int)0);
12693 break;
12694 default:
12695 Unimplemented();
12696 }
12697 __ nop();
12698 %}
12699 ins_pipe( pipe_jump );
12700 ins_pc_relative(1);
12701 %}
12704 /*
12705 // Jump Direct Conditional - Label defines a relative address from Jcc+1
12706 instruct jmpLoopEndU(cmpOpU cop, eFlagsRegU cmp, label labl) %{
12707 match(CountedLoopEnd cop cmp);
12708 effect(USE labl);
12710 ins_cost(300);
12711 format %{ "J$cop,u $labl\t# Loop end" %}
12712 size(6);
12713 opcode(0x0F, 0x80);
12714 ins_encode( Jcc( cop, labl) );
12715 ins_pipe( pipe_jump );
12716 ins_pc_relative(1);
12717 %}
12719 instruct jmpLoopEndUCF(cmpOpUCF cop, eFlagsRegUCF cmp, label labl) %{
12720 match(CountedLoopEnd cop cmp);
12721 effect(USE labl);
12723 ins_cost(200);
12724 format %{ "J$cop,u $labl\t# Loop end" %}
12725 opcode(0x0F, 0x80);
12726 ins_encode( Jcc( cop, labl) );
12727 ins_pipe( pipe_jump );
12728 ins_pc_relative(1);
12729 %}
12730 */
12732 // This match pattern is created for StoreIConditional since I cannot match IfNode without a RegFlags! fujie 2012/07/17
12733 instruct jmpCon_flags(cmpOp cop, FlagsReg cr, label labl) %{
12734 match(If cop cr);
12735 effect(USE labl);
12737 ins_cost(300);
12738 format %{ "J$cop $labl #mips uses AT as eflag @jmpCon_flags" %}
12740 ins_encode %{
12741 Label &L = *($labl$$label);
12742 switch($cop$$cmpcode)
12743 {
12744 case 0x01: //equal
12745 if (&L)
12746 __ bne(AT, R0, L);
12747 else
12748 __ bne(AT, R0, (int)0);
12749 break;
12750 case 0x02: //not equal
12751 if (&L)
12752 __ beq(AT, R0, L);
12753 else
12754 __ beq(AT, R0, (int)0);
12755 break;
12756 default:
12757 Unimplemented();
12758 }
12759 __ nop();
12760 %}
12762 ins_pipe( pipe_jump );
12763 ins_pc_relative(1);
12764 %}
12767 // ============================================================================
12768 // The 2nd slow-half of a subtype check. Scan the subklass's 2ndary superklass
12769 // array for an instance of the superklass. Set a hidden internal cache on a
12770 // hit (cache is checked with exposed code in gen_subtype_check()). Return
12771 // NZ for a miss or zero for a hit. The encoding ALSO sets flags.
12772 instruct partialSubtypeCheck( mRegP result, no_T8_mRegP sub, no_T8_mRegP super, mT8RegI tmp ) %{
12773 match(Set result (PartialSubtypeCheck sub super));
12774 effect(KILL tmp);
12775 ins_cost(1100); // slightly larger than the next version
12776 format %{ "partialSubtypeCheck result=$result, sub=$sub, super=$super, tmp=$tmp " %}
12778 ins_encode( enc_PartialSubtypeCheck(result, sub, super, tmp) );
12779 ins_pipe( pipe_slow );
12780 %}
12783 // Conditional-store of an int value.
12784 // ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG on Intel.
12785 instruct storeIConditional( memory mem, mRegI oldval, mRegI newval, FlagsReg cr ) %{
12786 match(Set cr (StoreIConditional mem (Binary oldval newval)));
12787 // effect(KILL oldval);
12788 format %{ "CMPXCHG $newval, $mem, $oldval \t# @storeIConditional" %}
12790 ins_encode %{
12791 Register oldval = $oldval$$Register;
12792 Register newval = $newval$$Register;
12793 Address addr(as_Register($mem$$base), $mem$$disp);
12794 Label again, failure;
12796 // int base = $mem$$base;
12797 int index = $mem$$index;
12798 int scale = $mem$$scale;
12799 int disp = $mem$$disp;
12801 guarantee(Assembler::is_simm16(disp), "");
12803 if( index != 0 ) {
12804 __ stop("in storeIConditional: index != 0");
12805 } else {
12806 __ bind(again);
12807 if(UseSyncLevel <= 1000) __ sync();
12808 __ ll(AT, addr);
12809 __ bne(AT, oldval, failure);
12810 __ delayed()->addu(AT, R0, R0);
12812 __ addu(AT, newval, R0);
12813 __ sc(AT, addr);
12814 __ beq(AT, R0, again);
12815 __ delayed()->addiu(AT, R0, 0xFF);
12816 __ bind(failure);
12817 __ sync();
12818 }
12819 %}
12821 ins_pipe( long_memory_op );
12822 %}
12824 // Conditional-store of a long value.
12825 // ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG.
12826 instruct storeLConditional(memory mem, t2RegL oldval, mRegL newval, FlagsReg cr )
12827 %{
12828 match(Set cr (StoreLConditional mem (Binary oldval newval)));
12829 effect(KILL oldval);
12831 format %{ "cmpxchg $mem, $newval\t# If $oldval == $mem then store $newval into $mem" %}
12832 ins_encode%{
12833 Register oldval = $oldval$$Register;
12834 Register newval = $newval$$Register;
12835 Address addr((Register)$mem$$base, $mem$$disp);
12837 int index = $mem$$index;
12838 int scale = $mem$$scale;
12839 int disp = $mem$$disp;
12841 guarantee(Assembler::is_simm16(disp), "");
12843 if( index != 0 ) {
12844 __ stop("in storeIConditional: index != 0");
12845 } else {
12846 __ cmpxchg(newval, addr, oldval);
12847 }
12848 %}
12849 ins_pipe( long_memory_op );
12850 %}
12853 instruct compareAndSwapI( mRegI res, mRegP mem_ptr, mS2RegI oldval, mRegI newval) %{
12854 match(Set res (CompareAndSwapI mem_ptr (Binary oldval newval)));
12855 effect(KILL oldval);
12856 // match(CompareAndSwapI mem_ptr (Binary oldval newval));
12857 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapI\n\t"
12858 "MOV $res, 1 @ compareAndSwapI\n\t"
12859 "BNE AT, R0 @ compareAndSwapI\n\t"
12860 "MOV $res, 0 @ compareAndSwapI\n"
12861 "L:" %}
12862 ins_encode %{
12863 Register newval = $newval$$Register;
12864 Register oldval = $oldval$$Register;
12865 Register res = $res$$Register;
12866 Address addr($mem_ptr$$Register, 0);
12867 Label L;
12869 __ cmpxchg32(newval, addr, oldval);
12870 __ move(res, AT);
12871 %}
12872 ins_pipe( long_memory_op );
12873 %}
12875 //FIXME:
12876 instruct compareAndSwapP( mRegI res, mRegP mem_ptr, s2_RegP oldval, mRegP newval) %{
12877 match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval)));
12878 effect(KILL oldval);
12879 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapP\n\t"
12880 "MOV $res, AT @ compareAndSwapP\n\t"
12881 "L:" %}
12882 ins_encode %{
12883 Register newval = $newval$$Register;
12884 Register oldval = $oldval$$Register;
12885 Register res = $res$$Register;
12886 Address addr($mem_ptr$$Register, 0);
12887 Label L;
12889 __ cmpxchg(newval, addr, oldval);
12890 __ move(res, AT);
12891 %}
12892 ins_pipe( long_memory_op );
12893 %}
12895 instruct compareAndSwapN( mRegI res, mRegP mem_ptr, t2_RegN oldval, mRegN newval) %{
12896 match(Set res (CompareAndSwapN mem_ptr (Binary oldval newval)));
12897 effect(KILL oldval);
12898 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapN\n\t"
12899 "MOV $res, AT @ compareAndSwapN\n\t"
12900 "L:" %}
12901 ins_encode %{
12902 Register newval = $newval$$Register;
12903 Register oldval = $oldval$$Register;
12904 Register res = $res$$Register;
12905 Address addr($mem_ptr$$Register, 0);
12906 Label L;
12908 /* 2013/7/19 Jin: cmpxchg32 is implemented with ll/sc, which will do sign extension.
12909 * Thus, we should extend oldval's sign for correct comparision.
12910 */
12911 __ sll(oldval, oldval, 0);
12913 __ cmpxchg32(newval, addr, oldval);
12914 __ move(res, AT);
12915 %}
12916 ins_pipe( long_memory_op );
12917 %}
12919 //----------Max and Min--------------------------------------------------------
12920 // Min Instructions
12921 ////
12922 // *** Min and Max using the conditional move are slower than the
12923 // *** branch version on a Pentium III.
12924 // // Conditional move for min
12925 //instruct cmovI_reg_lt( eRegI op2, eRegI op1, eFlagsReg cr ) %{
12926 // effect( USE_DEF op2, USE op1, USE cr );
12927 // format %{ "CMOVlt $op2,$op1\t! min" %}
12928 // opcode(0x4C,0x0F);
12929 // ins_encode( OpcS, OpcP, RegReg( op2, op1 ) );
12930 // ins_pipe( pipe_cmov_reg );
12931 //%}
12932 //
12933 //// Min Register with Register (P6 version)
12934 //instruct minI_eReg_p6( eRegI op1, eRegI op2 ) %{
12935 // predicate(VM_Version::supports_cmov() );
12936 // match(Set op2 (MinI op1 op2));
12937 // ins_cost(200);
12938 // expand %{
12939 // eFlagsReg cr;
12940 // compI_eReg(cr,op1,op2);
12941 // cmovI_reg_lt(op2,op1,cr);
12942 // %}
12943 //%}
12945 // Min Register with Register (generic version)
12946 instruct minI_Reg_Reg(mRegI dst, mRegI src) %{
12947 match(Set dst (MinI dst src));
12948 //effect(KILL flags);
12949 ins_cost(80);
12951 format %{ "MIN $dst, $src @minI_Reg_Reg" %}
12952 ins_encode %{
12953 Register dst = $dst$$Register;
12954 Register src = $src$$Register;
12956 __ slt(AT, src, dst);
12957 __ movn(dst, src, AT);
12959 %}
12961 ins_pipe( pipe_slow );
12962 %}
12964 // Max Register with Register
12965 // *** Min and Max using the conditional move are slower than the
12966 // *** branch version on a Pentium III.
12967 // // Conditional move for max
12968 //instruct cmovI_reg_gt( eRegI op2, eRegI op1, eFlagsReg cr ) %{
12969 // effect( USE_DEF op2, USE op1, USE cr );
12970 // format %{ "CMOVgt $op2,$op1\t! max" %}
12971 // opcode(0x4F,0x0F);
12972 // ins_encode( OpcS, OpcP, RegReg( op2, op1 ) );
12973 // ins_pipe( pipe_cmov_reg );
12974 //%}
12975 //
12976 // // Max Register with Register (P6 version)
12977 //instruct maxI_eReg_p6( eRegI op1, eRegI op2 ) %{
12978 // predicate(VM_Version::supports_cmov() );
12979 // match(Set op2 (MaxI op1 op2));
12980 // ins_cost(200);
12981 // expand %{
12982 // eFlagsReg cr;
12983 // compI_eReg(cr,op1,op2);
12984 // cmovI_reg_gt(op2,op1,cr);
12985 // %}
12986 //%}
12988 // Max Register with Register (generic version)
12989 instruct maxI_Reg_Reg(mRegI dst, mRegI src) %{
12990 match(Set dst (MaxI dst src));
12991 ins_cost(80);
12993 format %{ "MAX $dst, $src @maxI_Reg_Reg" %}
12995 ins_encode %{
12996 Register dst = $dst$$Register;
12997 Register src = $src$$Register;
12999 __ slt(AT, dst, src);
13000 __ movn(dst, src, AT);
13002 %}
13004 ins_pipe( pipe_slow );
13005 %}
13007 instruct maxI_Reg_zero(mRegI dst, immI0 zero) %{
13008 match(Set dst (MaxI dst zero));
13009 ins_cost(50);
13011 format %{ "MAX $dst, 0 @maxI_Reg_zero" %}
13013 ins_encode %{
13014 Register dst = $dst$$Register;
13016 __ slt(AT, dst, R0);
13017 __ movn(dst, R0, AT);
13019 %}
13021 ins_pipe( pipe_slow );
13022 %}
13024 instruct zerox_long_reg_reg(mRegL dst, mRegL src, immL_32bits mask)
13025 %{
13026 match(Set dst (AndL src mask));
13028 format %{ "movl $dst, $src\t# zero-extend long @ zerox_long_reg_reg" %}
13029 ins_encode %{
13030 Register dst = $dst$$Register;
13031 Register src = $src$$Register;
13033 __ dext(dst, src, 0, 32);
13034 %}
13035 ins_pipe(ialu_regI_regI);
13036 %}
13038 instruct combine_i2l(mRegL dst, mRegI src1, immL_32bits mask, mRegI src2, immI_32 shift32)
13039 %{
13040 match(Set dst (OrL (AndL (ConvI2L src1) mask) (LShiftL (ConvI2L src2) shift32)));
13042 format %{ "combine_i2l $dst, $src2(H), $src1(L) @ combine_i2l" %}
13043 ins_encode %{
13044 Register dst = $dst$$Register;
13045 Register src1 = $src1$$Register;
13046 Register src2 = $src2$$Register;
13048 if (src1 == dst) {
13049 __ dinsu(dst, src2, 32, 32);
13050 } else if (src2 == dst) {
13051 __ dsll32(dst, dst, 0);
13052 __ dins(dst, src1, 0, 32);
13053 } else {
13054 __ dext(dst, src1, 0, 32);
13055 __ dinsu(dst, src2, 32, 32);
13056 }
13057 %}
13058 ins_pipe(ialu_regI_regI);
13059 %}
13061 // Zero-extend convert int to long
13062 instruct convI2L_reg_reg_zex(mRegL dst, mRegI src, immL_32bits mask)
13063 %{
13064 match(Set dst (AndL (ConvI2L src) mask));
13066 format %{ "movl $dst, $src\t# i2l zero-extend @ convI2L_reg_reg_zex" %}
13067 ins_encode %{
13068 Register dst = $dst$$Register;
13069 Register src = $src$$Register;
13071 __ dext(dst, src, 0, 32);
13072 %}
13073 ins_pipe(ialu_regI_regI);
13074 %}
13076 instruct convL2I2L_reg_reg_zex(mRegL dst, mRegL src, immL_32bits mask)
13077 %{
13078 match(Set dst (AndL (ConvI2L (ConvL2I src)) mask));
13080 format %{ "movl $dst, $src\t# i2l zero-extend @ convL2I2L_reg_reg_zex" %}
13081 ins_encode %{
13082 Register dst = $dst$$Register;
13083 Register src = $src$$Register;
13085 __ dext(dst, src, 0, 32);
13086 %}
13087 ins_pipe(ialu_regI_regI);
13088 %}
13090 // Match loading integer and casting it to unsigned int in long register.
13091 // LoadI + ConvI2L + AndL 0xffffffff.
13092 instruct loadUI2L_rmask(mRegL dst, memory mem, immL_32bits mask) %{
13093 match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
13095 format %{ "lwu $dst, $mem \t// zero-extend to long @ loadUI2L_rmask" %}
13096 ins_encode (load_N_enc(dst, mem));
13097 ins_pipe(ialu_loadI);
13098 %}
13100 instruct loadUI2L_lmask(mRegL dst, memory mem, immL_32bits mask) %{
13101 match(Set dst (AndL mask (ConvI2L (LoadI mem))));
13103 format %{ "lwu $dst, $mem \t// zero-extend to long @ loadUI2L_lmask" %}
13104 ins_encode (load_N_enc(dst, mem));
13105 ins_pipe(ialu_loadI);
13106 %}
13109 // ============================================================================
13110 // Safepoint Instruction
13111 instruct safePoint_poll(mRegP poll) %{
13112 match(SafePoint poll);
13113 effect(USE poll);
13115 ins_cost(125);
13116 format %{ "Safepoint @ [$poll] : poll for GC @ safePoint_poll" %}
13118 ins_encode %{
13119 Register poll_reg = $poll$$Register;
13121 __ block_comment("Safepoint:");
13122 __ relocate(relocInfo::poll_type);
13123 __ lw(AT, poll_reg, 0);
13124 %}
13126 ins_pipe( ialu_storeI );
13127 %}
13129 //----------Arithmetic Conversion Instructions---------------------------------
13131 instruct roundFloat_nop(regF dst)
13132 %{
13133 match(Set dst (RoundFloat dst));
13135 ins_cost(0);
13136 ins_encode();
13137 ins_pipe(empty);
13138 %}
13140 instruct roundDouble_nop(regD dst)
13141 %{
13142 match(Set dst (RoundDouble dst));
13144 ins_cost(0);
13145 ins_encode();
13146 ins_pipe(empty);
13147 %}
13149 //---------- Zeros Count Instructions ------------------------------------------
13150 // CountLeadingZerosINode CountTrailingZerosINode
13151 instruct countLeadingZerosI(mRegI dst, mRegI src) %{
13152 predicate(UseCountLeadingZerosInstruction);
13153 match(Set dst (CountLeadingZerosI src));
13155 format %{ "clz $dst, $src\t# count leading zeros (int)" %}
13156 ins_encode %{
13157 __ clz($dst$$Register, $src$$Register);
13158 %}
13159 ins_pipe( ialu_regL_regL );
13160 %}
13162 instruct countLeadingZerosL(mRegI dst, mRegL src) %{
13163 predicate(UseCountLeadingZerosInstruction);
13164 match(Set dst (CountLeadingZerosL src));
13166 format %{ "dclz $dst, $src\t# count leading zeros (long)" %}
13167 ins_encode %{
13168 __ dclz($dst$$Register, $src$$Register);
13169 %}
13170 ins_pipe( ialu_regL_regL );
13171 %}
13173 instruct countTrailingZerosI(mRegI dst, mRegI src) %{
13174 predicate(UseCountTrailingZerosInstruction);
13175 match(Set dst (CountTrailingZerosI src));
13177 format %{ "ctz $dst, $src\t# count trailing zeros (int)" %}
13178 ins_encode %{
13179 // ctz and dctz is gs instructions.
13180 __ ctz($dst$$Register, $src$$Register);
13181 %}
13182 ins_pipe( ialu_regL_regL );
13183 %}
13185 instruct countTrailingZerosL(mRegI dst, mRegL src) %{
13186 predicate(UseCountTrailingZerosInstruction);
13187 match(Set dst (CountTrailingZerosL src));
13189 format %{ "dcto $dst, $src\t# count trailing zeros (long)" %}
13190 ins_encode %{
13191 __ dctz($dst$$Register, $src$$Register);
13192 %}
13193 ins_pipe( ialu_regL_regL );
13194 %}
13196 // ====================VECTOR INSTRUCTIONS=====================================
13198 // Load vectors (8 bytes long)
13199 instruct loadV8(vecD dst, memory mem) %{
13200 predicate(n->as_LoadVector()->memory_size() == 8);
13201 match(Set dst (LoadVector mem));
13202 ins_cost(125);
13203 format %{ "load $dst, $mem\t! load vector (8 bytes)" %}
13204 ins_encode(load_D_enc(dst, mem));
13205 ins_pipe( fpu_loadF );
13206 %}
13208 // Store vectors (8 bytes long)
13209 instruct storeV8(memory mem, vecD src) %{
13210 predicate(n->as_StoreVector()->memory_size() == 8);
13211 match(Set mem (StoreVector mem src));
13212 ins_cost(145);
13213 format %{ "store $mem, $src\t! store vector (8 bytes)" %}
13214 ins_encode(store_D_reg_enc(mem, src));
13215 ins_pipe( fpu_storeF );
13216 %}
13218 instruct Repl8B(vecD dst, mRegI src) %{
13219 predicate(n->as_Vector()->length() == 8);
13220 match(Set dst (ReplicateB src));
13221 format %{ "replv_ob AT, $src\n\t"
13222 "dmtc1 AT, $dst\t! replicate8B" %}
13223 ins_encode %{
13224 __ replv_ob(AT, $src$$Register);
13225 __ dmtc1(AT, $dst$$FloatRegister);
13226 %}
13227 ins_pipe( pipe_mtc1 );
13228 %}
13230 instruct Repl8B_imm(vecD dst, immI con) %{
13231 predicate(n->as_Vector()->length() == 8);
13232 match(Set dst (ReplicateB con));
13233 format %{ "repl_ob AT, [$con]\n\t"
13234 "dmtc1 AT, $dst,0x00\t! replicate8B($con)" %}
13235 ins_encode %{
13236 int val = $con$$constant;
13237 __ repl_ob(AT, val);
13238 __ dmtc1(AT, $dst$$FloatRegister);
13239 %}
13240 ins_pipe( pipe_mtc1 );
13241 %}
13243 instruct Repl8B_zero(vecD dst, immI0 zero) %{
13244 predicate(n->as_Vector()->length() == 8);
13245 match(Set dst (ReplicateB zero));
13246 format %{ "dmtc1 R0, $dst\t! replicate8B zero" %}
13247 ins_encode %{
13248 __ dmtc1(R0, $dst$$FloatRegister);
13249 %}
13250 ins_pipe( pipe_mtc1 );
13251 %}
13253 instruct Repl8B_M1(vecD dst, immI_M1 M1) %{
13254 predicate(n->as_Vector()->length() == 8);
13255 match(Set dst (ReplicateB M1));
13256 format %{ "dmtc1 -1, $dst\t! replicate8B -1" %}
13257 ins_encode %{
13258 __ nor(AT, R0, R0);
13259 __ dmtc1(AT, $dst$$FloatRegister);
13260 %}
13261 ins_pipe( pipe_mtc1 );
13262 %}
13264 instruct Repl4S(vecD dst, mRegI src) %{
13265 predicate(n->as_Vector()->length() == 4);
13266 match(Set dst (ReplicateS src));
13267 format %{ "replv_qh AT, $src\n\t"
13268 "dmtc1 AT, $dst\t! replicate4S" %}
13269 ins_encode %{
13270 __ replv_qh(AT, $src$$Register);
13271 __ dmtc1(AT, $dst$$FloatRegister);
13272 %}
13273 ins_pipe( pipe_mtc1 );
13274 %}
13276 instruct Repl4S_imm(vecD dst, immI con) %{
13277 predicate(n->as_Vector()->length() == 4);
13278 match(Set dst (ReplicateS con));
13279 format %{ "replv_qh AT, [$con]\n\t"
13280 "dmtc1 AT, $dst\t! replicate4S($con)" %}
13281 ins_encode %{
13282 int val = $con$$constant;
13283 if ( Assembler::is_simm(val, 10)) {
13284 //repl_qh supports 10 bits immediate
13285 __ repl_qh(AT, val);
13286 } else {
13287 __ li32(AT, val);
13288 __ replv_qh(AT, AT);
13289 }
13290 __ dmtc1(AT, $dst$$FloatRegister);
13291 %}
13292 ins_pipe( pipe_mtc1 );
13293 %}
13295 instruct Repl4S_zero(vecD dst, immI0 zero) %{
13296 predicate(n->as_Vector()->length() == 4);
13297 match(Set dst (ReplicateS zero));
13298 format %{ "dmtc1 R0, $dst\t! replicate4S zero" %}
13299 ins_encode %{
13300 __ dmtc1(R0, $dst$$FloatRegister);
13301 %}
13302 ins_pipe( pipe_mtc1 );
13303 %}
13305 instruct Repl4S_M1(vecD dst, immI_M1 M1) %{
13306 predicate(n->as_Vector()->length() == 4);
13307 match(Set dst (ReplicateS M1));
13308 format %{ "dmtc1 -1, $dst\t! replicate4S -1" %}
13309 ins_encode %{
13310 __ nor(AT, R0, R0);
13311 __ dmtc1(AT, $dst$$FloatRegister);
13312 %}
13313 ins_pipe( pipe_mtc1 );
13314 %}
13316 // Replicate integer (4 byte) scalar to be vector
13317 instruct Repl2I(vecD dst, mRegI src) %{
13318 predicate(n->as_Vector()->length() == 2);
13319 match(Set dst (ReplicateI src));
13320 format %{ "dins AT, $src, 0, 32\n\t"
13321 "dinsu AT, $src, 32, 32\n\t"
13322 "dmtc1 AT, $dst\t! replicate2I" %}
13323 ins_encode %{
13324 __ dins(AT, $src$$Register, 0, 32);
13325 __ dinsu(AT, $src$$Register, 32, 32);
13326 __ dmtc1(AT, $dst$$FloatRegister);
13327 %}
13328 ins_pipe( pipe_mtc1 );
13329 %}
13331 // Replicate integer (4 byte) scalar immediate to be vector by loading from const table.
13332 instruct Repl2I_imm(vecD dst, immI con, mA7RegI tmp) %{
13333 predicate(n->as_Vector()->length() == 2);
13334 match(Set dst (ReplicateI con));
13335 effect(KILL tmp);
13336 format %{ "li32 AT, [$con], 32\n\t"
13337 "replv_pw AT, AT\n\t"
13338 "dmtc1 AT, $dst\t! replicate2I($con)" %}
13339 ins_encode %{
13340 int val = $con$$constant;
13341 __ li32(AT, val);
13342 __ replv_pw(AT, AT);
13343 __ dmtc1(AT, $dst$$FloatRegister);
13344 %}
13345 ins_pipe( pipe_mtc1 );
13346 %}
13348 // Replicate integer (4 byte) scalar zero to be vector
13349 instruct Repl2I_zero(vecD dst, immI0 zero) %{
13350 predicate(n->as_Vector()->length() == 2);
13351 match(Set dst (ReplicateI zero));
13352 format %{ "dmtc1 R0, $dst\t! replicate2I zero" %}
13353 ins_encode %{
13354 __ dmtc1(R0, $dst$$FloatRegister);
13355 %}
13356 ins_pipe( pipe_mtc1 );
13357 %}
13359 // Replicate integer (4 byte) scalar -1 to be vector
13360 instruct Repl2I_M1(vecD dst, immI_M1 M1) %{
13361 predicate(n->as_Vector()->length() == 2);
13362 match(Set dst (ReplicateI M1));
13363 format %{ "dmtc1 -1, $dst\t! replicate2I -1, use AT" %}
13364 ins_encode %{
13365 __ nor(AT, R0, R0);
13366 __ dmtc1(AT, $dst$$FloatRegister);
13367 %}
13368 ins_pipe( pipe_mtc1 );
13369 %}
13371 // Replicate float (4 byte) scalar to be vector
13372 instruct Repl2F(vecD dst, regF src) %{
13373 predicate(n->as_Vector()->length() == 2);
13374 match(Set dst (ReplicateF src));
13375 format %{ "cvt.ps $dst, $src, $src\t! replicate2F" %}
13376 ins_encode %{
13377 __ cvt_ps_s($dst$$FloatRegister, $src$$FloatRegister, $src$$FloatRegister);
13378 %}
13379 ins_pipe( pipe_slow );
13380 %}
13382 // Replicate float (4 byte) scalar zero to be vector
13383 instruct Repl2F_zero(vecD dst, immF0 zero) %{
13384 predicate(n->as_Vector()->length() == 2);
13385 match(Set dst (ReplicateF zero));
13386 format %{ "dmtc1 R0, $dst\t! replicate2F zero" %}
13387 ins_encode %{
13388 __ dmtc1(R0, $dst$$FloatRegister);
13389 %}
13390 ins_pipe( pipe_mtc1 );
13391 %}
13394 // ====================VECTOR ARITHMETIC=======================================
13396 // --------------------------------- ADD --------------------------------------
13398 // Floats vector add
13399 instruct vadd2F(vecD dst, vecD src) %{
13400 predicate(n->as_Vector()->length() == 2);
13401 match(Set dst (AddVF dst src));
13402 format %{ "add.ps $dst,$src\t! add packed2F" %}
13403 ins_encode %{
13404 __ add_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
13405 %}
13406 ins_pipe( pipe_slow );
13407 %}
13409 instruct vadd2F3(vecD dst, vecD src1, vecD src2) %{
13410 predicate(n->as_Vector()->length() == 2);
13411 match(Set dst (AddVF src1 src2));
13412 format %{ "add.ps $dst,$src1,$src2\t! add packed2F" %}
13413 ins_encode %{
13414 __ add_ps($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
13415 %}
13416 ins_pipe( fpu_regF_regF );
13417 %}
13419 // --------------------------------- SUB --------------------------------------
13421 // Floats vector sub
13422 instruct vsub2F(vecD dst, vecD src) %{
13423 predicate(n->as_Vector()->length() == 2);
13424 match(Set dst (SubVF dst src));
13425 format %{ "sub.ps $dst,$src\t! sub packed2F" %}
13426 ins_encode %{
13427 __ sub_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
13428 %}
13429 ins_pipe( fpu_regF_regF );
13430 %}
13432 // --------------------------------- MUL --------------------------------------
13434 // Floats vector mul
13435 instruct vmul2F(vecD dst, vecD src) %{
13436 predicate(n->as_Vector()->length() == 2);
13437 match(Set dst (MulVF dst src));
13438 format %{ "mul.ps $dst, $src\t! mul packed2F" %}
13439 ins_encode %{
13440 __ mul_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
13441 %}
13442 ins_pipe( fpu_regF_regF );
13443 %}
13445 instruct vmul2F3(vecD dst, vecD src1, vecD src2) %{
13446 predicate(n->as_Vector()->length() == 2);
13447 match(Set dst (MulVF src1 src2));
13448 format %{ "mul.ps $dst, $src1, $src2\t! mul packed2F" %}
13449 ins_encode %{
13450 __ mul_ps($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
13451 %}
13452 ins_pipe( fpu_regF_regF );
13453 %}
13455 // --------------------------------- DIV --------------------------------------
13456 // MIPS do not have div.ps
13459 //----------PEEPHOLE RULES-----------------------------------------------------
13460 // These must follow all instruction definitions as they use the names
13461 // defined in the instructions definitions.
13462 //
13463 // peepmatch ( root_instr_name [preceeding_instruction]* );
13464 //
13465 // peepconstraint %{
13466 // (instruction_number.operand_name relational_op instruction_number.operand_name
13467 // [, ...] );
13468 // // instruction numbers are zero-based using left to right order in peepmatch
13469 //
13470 // peepreplace ( instr_name ( [instruction_number.operand_name]* ) );
13471 // // provide an instruction_number.operand_name for each operand that appears
13472 // // in the replacement instruction's match rule
13473 //
13474 // ---------VM FLAGS---------------------------------------------------------
13475 //
13476 // All peephole optimizations can be turned off using -XX:-OptoPeephole
13477 //
13478 // Each peephole rule is given an identifying number starting with zero and
13479 // increasing by one in the order seen by the parser. An individual peephole
13480 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
13481 // on the command-line.
13482 //
13483 // ---------CURRENT LIMITATIONS----------------------------------------------
13484 //
13485 // Only match adjacent instructions in same basic block
13486 // Only equality constraints
13487 // Only constraints between operands, not (0.dest_reg == EAX_enc)
13488 // Only one replacement instruction
13489 //
13490 // ---------EXAMPLE----------------------------------------------------------
13491 //
13492 // // pertinent parts of existing instructions in architecture description
13493 // instruct movI(eRegI dst, eRegI src) %{
13494 // match(Set dst (CopyI src));
13495 // %}
13496 //
13497 // instruct incI_eReg(eRegI dst, immI1 src, eFlagsReg cr) %{
13498 // match(Set dst (AddI dst src));
13499 // effect(KILL cr);
13500 // %}
13501 //
13502 // // Change (inc mov) to lea
13503 // peephole %{
13504 // // increment preceeded by register-register move
13505 // peepmatch ( incI_eReg movI );
13506 // // require that the destination register of the increment
13507 // // match the destination register of the move
13508 // peepconstraint ( 0.dst == 1.dst );
13509 // // construct a replacement instruction that sets
13510 // // the destination to ( move's source register + one )
13511 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13512 // %}
13513 //
13514 // Implementation no longer uses movX instructions since
13515 // machine-independent system no longer uses CopyX nodes.
13516 //
13517 // peephole %{
13518 // peepmatch ( incI_eReg movI );
13519 // peepconstraint ( 0.dst == 1.dst );
13520 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13521 // %}
13522 //
13523 // peephole %{
13524 // peepmatch ( decI_eReg movI );
13525 // peepconstraint ( 0.dst == 1.dst );
13526 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13527 // %}
13528 //
13529 // peephole %{
13530 // peepmatch ( addI_eReg_imm movI );
13531 // peepconstraint ( 0.dst == 1.dst );
13532 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13533 // %}
13534 //
13535 // peephole %{
13536 // peepmatch ( addP_eReg_imm movP );
13537 // peepconstraint ( 0.dst == 1.dst );
13538 // peepreplace ( leaP_eReg_immI( 0.dst 1.src 0.src ) );
13539 // %}
13541 // // Change load of spilled value to only a spill
13542 // instruct storeI(memory mem, eRegI src) %{
13543 // match(Set mem (StoreI mem src));
13544 // %}
13545 //
13546 // instruct loadI(eRegI dst, memory mem) %{
13547 // match(Set dst (LoadI mem));
13548 // %}
13549 //
13550 //peephole %{
13551 // peepmatch ( loadI storeI );
13552 // peepconstraint ( 1.src == 0.dst, 1.mem == 0.mem );
13553 // peepreplace ( storeI( 1.mem 1.mem 1.src ) );
13554 //%}
13556 //----------SMARTSPILL RULES---------------------------------------------------
13557 // These must follow all instruction definitions as they use the names
13558 // defined in the instructions definitions.