Tue, 28 Feb 2017 11:35:32 -0500
[C2] Remove storeImmP and add storeImmP0 in mips_64.ad
1 //
2 // Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
3 // Copyright (c) 2015, 2016, Loongson Technology. All rights reserved.
4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 //
6 // This code is free software; you can redistribute it and/or modify it
7 // under the terms of the GNU General Public License version 2 only, as
8 // published by the Free Software Foundation.
9 //
10 // This code is distributed in the hope that it will be useful, but WITHOUT
11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 // version 2 for more details (a copy is included in the LICENSE file that
14 // accompanied this code).
15 //
16 // You should have received a copy of the GNU General Public License version
17 // 2 along with this work; if not, write to the Free Software Foundation,
18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 //
20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 // or visit www.oracle.com if you need additional information or have any
22 // questions.
23 //
24 //
26 // GodSon3 Architecture Description File
28 //----------REGISTER DEFINITION BLOCK------------------------------------------
29 // This information is used by the matcher and the register allocator to
30 // describe individual registers and classes of registers within the target
31 // archtecture.
33 // format:
34 // reg_def name (call convention, c-call convention, ideal type, encoding);
35 // call convention :
36 // NS = No-Save
37 // SOC = Save-On-Call
38 // SOE = Save-On-Entry
39 // AS = Always-Save
40 // ideal type :
41 // see opto/opcodes.hpp for more info
42 // reg_class name (reg, ...);
43 // alloc_class name (reg, ...);
44 register %{
46 // General Registers
47 // Integer Registers
48 reg_def R0 ( NS, NS, Op_RegI, 0, VMRegImpl::Bad());
49 reg_def AT ( NS, NS, Op_RegI, 1, AT->as_VMReg());
50 reg_def AT_H ( NS, NS, Op_RegI, 1, AT->as_VMReg()->next());
51 reg_def V0 (SOC, SOC, Op_RegI, 2, V0->as_VMReg());
52 reg_def V0_H (SOC, SOC, Op_RegI, 2, V0->as_VMReg()->next());
53 reg_def V1 (SOC, SOC, Op_RegI, 3, V1->as_VMReg());
54 reg_def V1_H (SOC, SOC, Op_RegI, 3, V1->as_VMReg()->next());
55 reg_def A0 (SOC, SOC, Op_RegI, 4, A0->as_VMReg());
56 reg_def A0_H (SOC, SOC, Op_RegI, 4, A0->as_VMReg()->next());
57 reg_def A1 (SOC, SOC, Op_RegI, 5, A1->as_VMReg());
58 reg_def A1_H (SOC, SOC, Op_RegI, 5, A1->as_VMReg()->next());
59 reg_def A2 (SOC, SOC, Op_RegI, 6, A2->as_VMReg());
60 reg_def A2_H (SOC, SOC, Op_RegI, 6, A2->as_VMReg()->next());
61 reg_def A3 (SOC, SOC, Op_RegI, 7, A3->as_VMReg());
62 reg_def A3_H (SOC, SOC, Op_RegI, 7, A3->as_VMReg()->next());
63 reg_def A4 (SOC, SOC, Op_RegI, 8, A4->as_VMReg());
64 reg_def A4_H (SOC, SOC, Op_RegI, 8, A4->as_VMReg()->next());
65 reg_def A5 (SOC, SOC, Op_RegI, 9, A5->as_VMReg());
66 reg_def A5_H (SOC, SOC, Op_RegI, 9, A5->as_VMReg()->next());
67 reg_def A6 (SOC, SOC, Op_RegI, 10, A6->as_VMReg());
68 reg_def A6_H (SOC, SOC, Op_RegI, 10, A6->as_VMReg()->next());
69 reg_def A7 (SOC, SOC, Op_RegI, 11, A7->as_VMReg());
70 reg_def A7_H (SOC, SOC, Op_RegI, 11, A7->as_VMReg()->next());
71 reg_def T0 (SOC, SOC, Op_RegI, 12, T0->as_VMReg());
72 reg_def T0_H (SOC, SOC, Op_RegI, 12, T0->as_VMReg()->next());
73 reg_def T1 (SOC, SOC, Op_RegI, 13, T1->as_VMReg());
74 reg_def T1_H (SOC, SOC, Op_RegI, 13, T1->as_VMReg()->next());
75 reg_def T2 (SOC, SOC, Op_RegI, 14, T2->as_VMReg());
76 reg_def T2_H (SOC, SOC, Op_RegI, 14, T2->as_VMReg()->next());
77 reg_def T3 (SOC, SOC, Op_RegI, 15, T3->as_VMReg());
78 reg_def T3_H (SOC, SOC, Op_RegI, 15, T3->as_VMReg()->next());
79 reg_def S0 (SOC, SOE, Op_RegI, 16, S0->as_VMReg());
80 reg_def S0_H (SOC, SOE, Op_RegI, 16, S0->as_VMReg()->next());
81 reg_def S1 (SOC, SOE, Op_RegI, 17, S1->as_VMReg());
82 reg_def S1_H (SOC, SOE, Op_RegI, 17, S1->as_VMReg()->next());
83 reg_def S2 (SOC, SOE, Op_RegI, 18, S2->as_VMReg());
84 reg_def S2_H (SOC, SOE, Op_RegI, 18, S2->as_VMReg()->next());
85 reg_def S3 (SOC, SOE, Op_RegI, 19, S3->as_VMReg());
86 reg_def S3_H (SOC, SOE, Op_RegI, 19, S3->as_VMReg()->next());
87 reg_def S4 (SOC, SOE, Op_RegI, 20, S4->as_VMReg());
88 reg_def S4_H (SOC, SOE, Op_RegI, 20, S4->as_VMReg()->next());
89 reg_def S5 (SOC, SOE, Op_RegI, 21, S5->as_VMReg());
90 reg_def S5_H (SOC, SOE, Op_RegI, 21, S5->as_VMReg()->next());
91 reg_def S6 (SOC, SOE, Op_RegI, 22, S6->as_VMReg());
92 reg_def S6_H (SOC, SOE, Op_RegI, 22, S6->as_VMReg()->next());
93 reg_def S7 (SOC, SOE, Op_RegI, 23, S7->as_VMReg());
94 reg_def S7_H (SOC, SOE, Op_RegI, 23, S7->as_VMReg()->next());
95 reg_def T8 (SOC, SOC, Op_RegI, 24, T8->as_VMReg());
96 reg_def T8_H (SOC, SOC, Op_RegI, 24, T8->as_VMReg()->next());
97 reg_def T9 (SOC, SOC, Op_RegI, 25, T9->as_VMReg());
98 reg_def T9_H (SOC, SOC, Op_RegI, 25, T9->as_VMReg()->next());
100 // Special Registers
101 reg_def K0 ( NS, NS, Op_RegI, 26, K0->as_VMReg());
102 reg_def K1 ( NS, NS, Op_RegI, 27, K1->as_VMReg());
103 reg_def GP ( NS, NS, Op_RegI, 28, GP->as_VMReg());
104 reg_def GP_H ( NS, NS, Op_RegI, 28, GP->as_VMReg()->next());
105 reg_def SP ( NS, NS, Op_RegI, 29, SP->as_VMReg());
106 reg_def SP_H ( NS, NS, Op_RegI, 29, SP->as_VMReg()->next());
107 reg_def FP ( NS, NS, Op_RegI, 30, FP->as_VMReg());
108 reg_def FP_H ( NS, NS, Op_RegI, 30, FP->as_VMReg()->next());
109 reg_def RA ( NS, NS, Op_RegI, 31, RA->as_VMReg());
110 reg_def RA_H ( NS, NS, Op_RegI, 31, RA->as_VMReg()->next());
112 // Floating registers.
113 reg_def F0 ( SOC, SOC, Op_RegF, 0, F0->as_VMReg());
114 reg_def F0_H ( SOC, SOC, Op_RegF, 0, F0->as_VMReg()->next());
115 reg_def F1 ( SOC, SOC, Op_RegF, 1, F1->as_VMReg());
116 reg_def F1_H ( SOC, SOC, Op_RegF, 1, F1->as_VMReg()->next());
117 reg_def F2 ( SOC, SOC, Op_RegF, 2, F2->as_VMReg());
118 reg_def F2_H ( SOC, SOC, Op_RegF, 2, F2->as_VMReg()->next());
119 reg_def F3 ( SOC, SOC, Op_RegF, 3, F3->as_VMReg());
120 reg_def F3_H ( SOC, SOC, Op_RegF, 3, F3->as_VMReg()->next());
121 reg_def F4 ( SOC, SOC, Op_RegF, 4, F4->as_VMReg());
122 reg_def F4_H ( SOC, SOC, Op_RegF, 4, F4->as_VMReg()->next());
123 reg_def F5 ( SOC, SOC, Op_RegF, 5, F5->as_VMReg());
124 reg_def F5_H ( SOC, SOC, Op_RegF, 5, F5->as_VMReg()->next());
125 reg_def F6 ( SOC, SOC, Op_RegF, 6, F6->as_VMReg());
126 reg_def F6_H ( SOC, SOC, Op_RegF, 6, F6->as_VMReg()->next());
127 reg_def F7 ( SOC, SOC, Op_RegF, 7, F7->as_VMReg());
128 reg_def F7_H ( SOC, SOC, Op_RegF, 7, F7->as_VMReg()->next());
129 reg_def F8 ( SOC, SOC, Op_RegF, 8, F8->as_VMReg());
130 reg_def F8_H ( SOC, SOC, Op_RegF, 8, F8->as_VMReg()->next());
131 reg_def F9 ( SOC, SOC, Op_RegF, 9, F9->as_VMReg());
132 reg_def F9_H ( SOC, SOC, Op_RegF, 9, F9->as_VMReg()->next());
133 reg_def F10 ( SOC, SOC, Op_RegF, 10, F10->as_VMReg());
134 reg_def F10_H ( SOC, SOC, Op_RegF, 10, F10->as_VMReg()->next());
135 reg_def F11 ( SOC, SOC, Op_RegF, 11, F11->as_VMReg());
136 reg_def F11_H ( SOC, SOC, Op_RegF, 11, F11->as_VMReg()->next());
137 reg_def F12 ( SOC, SOC, Op_RegF, 12, F12->as_VMReg());
138 reg_def F12_H ( SOC, SOC, Op_RegF, 12, F12->as_VMReg()->next());
139 reg_def F13 ( SOC, SOC, Op_RegF, 13, F13->as_VMReg());
140 reg_def F13_H ( SOC, SOC, Op_RegF, 13, F13->as_VMReg()->next());
141 reg_def F14 ( SOC, SOC, Op_RegF, 14, F14->as_VMReg());
142 reg_def F14_H ( SOC, SOC, Op_RegF, 14, F14->as_VMReg()->next());
143 reg_def F15 ( SOC, SOC, Op_RegF, 15, F15->as_VMReg());
144 reg_def F15_H ( SOC, SOC, Op_RegF, 15, F15->as_VMReg()->next());
145 reg_def F16 ( SOC, SOC, Op_RegF, 16, F16->as_VMReg());
146 reg_def F16_H ( SOC, SOC, Op_RegF, 16, F16->as_VMReg()->next());
147 reg_def F17 ( SOC, SOC, Op_RegF, 17, F17->as_VMReg());
148 reg_def F17_H ( SOC, SOC, Op_RegF, 17, F17->as_VMReg()->next());
149 reg_def F18 ( SOC, SOC, Op_RegF, 18, F18->as_VMReg());
150 reg_def F18_H ( SOC, SOC, Op_RegF, 18, F18->as_VMReg()->next());
151 reg_def F19 ( SOC, SOC, Op_RegF, 19, F19->as_VMReg());
152 reg_def F19_H ( SOC, SOC, Op_RegF, 19, F19->as_VMReg()->next());
153 reg_def F20 ( SOC, SOC, Op_RegF, 20, F20->as_VMReg());
154 reg_def F20_H ( SOC, SOC, Op_RegF, 20, F20->as_VMReg()->next());
155 reg_def F21 ( SOC, SOC, Op_RegF, 21, F21->as_VMReg());
156 reg_def F21_H ( SOC, SOC, Op_RegF, 21, F21->as_VMReg()->next());
157 reg_def F22 ( SOC, SOC, Op_RegF, 22, F22->as_VMReg());
158 reg_def F22_H ( SOC, SOC, Op_RegF, 22, F22->as_VMReg()->next());
159 reg_def F23 ( SOC, SOC, Op_RegF, 23, F23->as_VMReg());
160 reg_def F23_H ( SOC, SOC, Op_RegF, 23, F23->as_VMReg()->next());
161 reg_def F24 ( SOC, SOC, Op_RegF, 24, F24->as_VMReg());
162 reg_def F24_H ( SOC, SOC, Op_RegF, 24, F24->as_VMReg()->next());
163 reg_def F25 ( SOC, SOC, Op_RegF, 25, F25->as_VMReg());
164 reg_def F25_H ( SOC, SOC, Op_RegF, 25, F25->as_VMReg()->next());
165 reg_def F26 ( SOC, SOC, Op_RegF, 26, F26->as_VMReg());
166 reg_def F26_H ( SOC, SOC, Op_RegF, 26, F26->as_VMReg()->next());
167 reg_def F27 ( SOC, SOC, Op_RegF, 27, F27->as_VMReg());
168 reg_def F27_H ( SOC, SOC, Op_RegF, 27, F27->as_VMReg()->next());
169 reg_def F28 ( SOC, SOC, Op_RegF, 28, F28->as_VMReg());
170 reg_def F28_H ( SOC, SOC, Op_RegF, 28, F28->as_VMReg()->next());
171 reg_def F29 ( SOC, SOC, Op_RegF, 29, F29->as_VMReg());
172 reg_def F29_H ( SOC, SOC, Op_RegF, 29, F29->as_VMReg()->next());
173 reg_def F30 ( SOC, SOC, Op_RegF, 30, F30->as_VMReg());
174 reg_def F30_H ( SOC, SOC, Op_RegF, 30, F30->as_VMReg()->next());
175 reg_def F31 ( SOC, SOC, Op_RegF, 31, F31->as_VMReg());
176 reg_def F31_H ( SOC, SOC, Op_RegF, 31, F31->as_VMReg()->next());
179 // ----------------------------
180 // Special Registers
181 // Condition Codes Flag Registers
182 reg_def MIPS_FLAG (SOC, SOC, Op_RegFlags, 1, as_Register(1)->as_VMReg());
183 //S6 is used for get_thread(S6)
184 //S5 is uesd for heapbase of compressed oop
185 alloc_class chunk0(
186 S7, S7_H,
187 S0, S0_H,
188 S1, S1_H,
189 S2, S2_H,
190 S4, S4_H,
191 S5, S5_H,
192 S6, S6_H,
193 S3, S3_H,
194 T2, T2_H,
195 T3, T3_H,
196 T8, T8_H,
197 T9, T9_H,
198 T1, T1_H, // inline_cache_reg
199 V1, V1_H,
200 A7, A7_H,
201 A6, A6_H,
202 A5, A5_H,
203 A4, A4_H,
204 V0, V0_H,
205 A3, A3_H,
206 A2, A2_H,
207 A1, A1_H,
208 A0, A0_H,
209 T0, T0_H,
210 GP, GP_H
211 RA, RA_H,
212 SP, SP_H, // stack_pointer
213 FP, FP_H // frame_pointer
214 );
216 alloc_class chunk1( F0, F0_H,
217 F1, F1_H,
218 F2, F2_H,
219 F3, F3_H,
220 F4, F4_H,
221 F5, F5_H,
222 F6, F6_H,
223 F7, F7_H,
224 F8, F8_H,
225 F9, F9_H,
226 F10, F10_H,
227 F11, F11_H,
228 F20, F20_H,
229 F21, F21_H,
230 F22, F22_H,
231 F23, F23_H,
232 F24, F24_H,
233 F25, F25_H,
234 F26, F26_H,
235 F27, F27_H,
236 F28, F28_H,
237 F19, F19_H,
238 F18, F18_H,
239 F17, F17_H,
240 F16, F16_H,
241 F15, F15_H,
242 F14, F14_H,
243 F13, F13_H,
244 F12, F12_H,
245 F29, F29_H,
246 F30, F30_H,
247 F31, F31_H);
249 alloc_class chunk2(MIPS_FLAG);
251 reg_class s_reg( S0, S1, S2, S3, S4, S5, S6, S7 );
252 reg_class s0_reg( S0 );
253 reg_class s1_reg( S1 );
254 reg_class s2_reg( S2 );
255 reg_class s3_reg( S3 );
256 reg_class s4_reg( S4 );
257 reg_class s5_reg( S5 );
258 reg_class s6_reg( S6 );
259 reg_class s7_reg( S7 );
261 reg_class t_reg( T0, T1, T2, T3, T8, T9 );
262 reg_class t0_reg( T0 );
263 reg_class t1_reg( T1 );
264 reg_class t2_reg( T2 );
265 reg_class t3_reg( T3 );
266 reg_class t8_reg( T8 );
267 reg_class t9_reg( T9 );
269 reg_class a_reg( A0, A1, A2, A3, A4, A5, A6, A7 );
270 reg_class a0_reg( A0 );
271 reg_class a1_reg( A1 );
272 reg_class a2_reg( A2 );
273 reg_class a3_reg( A3 );
274 reg_class a4_reg( A4 );
275 reg_class a5_reg( A5 );
276 reg_class a6_reg( A6 );
277 reg_class a7_reg( A7 );
279 reg_class v0_reg( V0 );
280 reg_class v1_reg( V1 );
282 reg_class sp_reg( SP, SP_H );
283 reg_class fp_reg( FP, FP_H );
285 reg_class mips_flags(MIPS_FLAG);
287 reg_class v0_long_reg( V0, V0_H );
288 reg_class v1_long_reg( V1, V1_H );
289 reg_class a0_long_reg( A0, A0_H );
290 reg_class a1_long_reg( A1, A1_H );
291 reg_class a2_long_reg( A2, A2_H );
292 reg_class a3_long_reg( A3, A3_H );
293 reg_class a4_long_reg( A4, A4_H );
294 reg_class a5_long_reg( A5, A5_H );
295 reg_class a6_long_reg( A6, A6_H );
296 reg_class a7_long_reg( A7, A7_H );
297 reg_class t0_long_reg( T0, T0_H );
298 reg_class t1_long_reg( T1, T1_H );
299 reg_class t2_long_reg( T2, T2_H );
300 reg_class t3_long_reg( T3, T3_H );
301 reg_class t8_long_reg( T8, T8_H );
302 reg_class t9_long_reg( T9, T9_H );
303 reg_class s0_long_reg( S0, S0_H );
304 reg_class s1_long_reg( S1, S1_H );
305 reg_class s2_long_reg( S2, S2_H );
306 reg_class s3_long_reg( S3, S3_H );
307 reg_class s4_long_reg( S4, S4_H );
308 reg_class s5_long_reg( S5, S5_H );
309 reg_class s6_long_reg( S6, S6_H );
310 reg_class s7_long_reg( S7, S7_H );
312 reg_class int_reg( S7, S0, S1, S2, S4, S3, T8, T2, T3, T1, V1, A7, A6, A5, A4, V0, A3, A2, A1, A0, T0 );
314 reg_class no_Ax_int_reg( S7, S0, S1, S2, S4, S3, T8, T2, T3, T1, V1, V0, T0 );
316 reg_class p_reg(
317 S7, S7_H,
318 S0, S0_H,
319 S1, S1_H,
320 S2, S2_H,
321 S4, S4_H,
322 S3, S3_H,
323 T8, T8_H,
324 T2, T2_H,
325 T3, T3_H,
326 T1, T1_H,
327 A7, A7_H,
328 A6, A6_H,
329 A5, A5_H,
330 A4, A4_H,
331 A3, A3_H,
332 A2, A2_H,
333 A1, A1_H,
334 A0, A0_H,
335 T0, T0_H
336 );
338 reg_class no_T8_p_reg(
339 S7, S7_H,
340 S0, S0_H,
341 S1, S1_H,
342 S2, S2_H,
343 S4, S4_H,
344 S3, S3_H,
345 T2, T2_H,
346 T3, T3_H,
347 T1, T1_H,
348 A7, A7_H,
349 A6, A6_H,
350 A5, A5_H,
351 A4, A4_H,
352 A3, A3_H,
353 A2, A2_H,
354 A1, A1_H,
355 A0, A0_H,
356 T0, T0_H
357 );
359 reg_class long_reg(
360 S7, S7_H,
361 S0, S0_H,
362 S1, S1_H,
363 S2, S2_H,
364 S4, S4_H,
365 S3, S3_H,
366 T8, T8_H,
367 T2, T2_H,
368 T3, T3_H,
369 T1, T1_H,
370 A7, A7_H,
371 A6, A6_H,
372 A5, A5_H,
373 A4, A4_H,
374 A3, A3_H,
375 A2, A2_H,
376 A1, A1_H,
377 A0, A0_H,
378 T0, T0_H
379 );
382 // Floating point registers.
383 // 2012/8/23 Fu: F30/F31 are used as temporary registers in D2I
384 // 2016/12/1 aoqi: F31 are not used as temporary registers in D2I
385 reg_class flt_reg( F0, F1, F2, F3, F4, F5, F6, F7, F8, F9, F10, F11, F12, F13, F14, F15, F16, F17 F18, F19, F20, F21, F22, F23, F24, F25, F26, F27, F28, F29, F31);
386 reg_class dbl_reg( F0, F0_H,
387 F1, F1_H,
388 F2, F2_H,
389 F3, F3_H,
390 F4, F4_H,
391 F5, F5_H,
392 F6, F6_H,
393 F7, F7_H,
394 F8, F8_H,
395 F9, F9_H,
396 F10, F10_H,
397 F11, F11_H,
398 F12, F12_H,
399 F13, F13_H,
400 F14, F14_H,
401 F15, F15_H,
402 F16, F16_H,
403 F17, F17_H,
404 F18, F18_H,
405 F19, F19_H,
406 F20, F20_H,
407 F21, F21_H,
408 F22, F22_H,
409 F23, F23_H,
410 F24, F24_H,
411 F25, F25_H,
412 F26, F26_H,
413 F27, F27_H,
414 F28, F28_H,
415 F29, F29_H,
416 F31, F31_H);
418 reg_class flt_arg0( F12 );
419 reg_class dbl_arg0( F12, F12_H );
420 reg_class dbl_arg1( F14, F14_H );
422 %}
424 //----------DEFINITION BLOCK---------------------------------------------------
425 // Define name --> value mappings to inform the ADLC of an integer valued name
426 // Current support includes integer values in the range [0, 0x7FFFFFFF]
427 // Format:
428 // int_def <name> ( <int_value>, <expression>);
429 // Generated Code in ad_<arch>.hpp
430 // #define <name> (<expression>)
431 // // value == <int_value>
432 // Generated code in ad_<arch>.cpp adlc_verification()
433 // assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
434 //
435 definitions %{
436 int_def DEFAULT_COST ( 100, 100);
437 int_def HUGE_COST (1000000, 1000000);
439 // Memory refs are twice as expensive as run-of-the-mill.
440 int_def MEMORY_REF_COST ( 200, DEFAULT_COST * 2);
442 // Branches are even more expensive.
443 int_def BRANCH_COST ( 300, DEFAULT_COST * 3);
444 // we use jr instruction to construct call, so more expensive
445 // by yjl 2/28/2006
446 int_def CALL_COST ( 500, DEFAULT_COST * 5);
447 /*
448 int_def EQUAL ( 1, 1 );
449 int_def NOT_EQUAL ( 2, 2 );
450 int_def GREATER ( 3, 3 );
451 int_def GREATER_EQUAL ( 4, 4 );
452 int_def LESS ( 5, 5 );
453 int_def LESS_EQUAL ( 6, 6 );
454 */
455 %}
459 //----------SOURCE BLOCK-------------------------------------------------------
460 // This is a block of C++ code which provides values, functions, and
461 // definitions necessary in the rest of the architecture description
463 source_hpp %{
464 // Header information of the source block.
465 // Method declarations/definitions which are used outside
466 // the ad-scope can conveniently be defined here.
467 //
468 // To keep related declarations/definitions/uses close together,
469 // we switch between source %{ }% and source_hpp %{ }% freely as needed.
471 class CallStubImpl {
473 //--------------------------------------------------------------
474 //---< Used for optimization in Compile::shorten_branches >---
475 //--------------------------------------------------------------
477 public:
478 // Size of call trampoline stub.
479 static uint size_call_trampoline() {
480 return 0; // no call trampolines on this platform
481 }
483 // number of relocations needed by a call trampoline stub
484 static uint reloc_call_trampoline() {
485 return 0; // no call trampolines on this platform
486 }
487 };
489 class HandlerImpl {
491 public:
493 static int emit_exception_handler(CodeBuffer &cbuf);
494 static int emit_deopt_handler(CodeBuffer& cbuf);
496 static uint size_exception_handler() {
497 // NativeCall instruction size is the same as NativeJump.
498 // exception handler starts out as jump and can be patched to
499 // a call be deoptimization. (4932387)
500 // Note that this value is also credited (in output.cpp) to
501 // the size of the code section.
502 // return NativeJump::instruction_size;
503 int size = NativeCall::instruction_size;
504 return round_to(size, 16);
505 }
507 #ifdef _LP64
508 static uint size_deopt_handler() {
509 int size = NativeCall::instruction_size;
510 return round_to(size, 16);
511 }
512 #else
513 static uint size_deopt_handler() {
514 // NativeCall instruction size is the same as NativeJump.
515 // exception handler starts out as jump and can be patched to
516 // a call be deoptimization. (4932387)
517 // Note that this value is also credited (in output.cpp) to
518 // the size of the code section.
519 return 5 + NativeJump::instruction_size; // pushl(); jmp;
520 }
521 #endif
522 };
524 %} // end source_hpp
526 source %{
528 #define NO_INDEX 0
529 #define RELOC_IMM64 Assembler::imm_operand
530 #define RELOC_DISP32 Assembler::disp32_operand
533 #define __ _masm.
536 // Emit exception handler code.
537 // Stuff framesize into a register and call a VM stub routine.
538 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf) {
539 /*
540 // Note that the code buffer's insts_mark is always relative to insts.
541 // That's why we must use the macroassembler to generate a handler.
542 MacroAssembler _masm(&cbuf);
543 address base = __ start_a_stub(size_exception_handler());
544 if (base == NULL) return 0; // CodeBuffer::expand failed
545 int offset = __ offset();
546 __ jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
547 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
548 __ end_a_stub();
549 return offset;
550 */
551 // Note that the code buffer's insts_mark is always relative to insts.
552 // That's why we must use the macroassembler to generate a handler.
553 MacroAssembler _masm(&cbuf);
554 address base =
555 __ start_a_stub(size_exception_handler());
556 if (base == NULL) return 0; // CodeBuffer::expand failed
557 int offset = __ offset();
559 __ block_comment("; emit_exception_handler");
561 /* 2012/9/25 FIXME Jin: According to X86, we should use direct jumpt.
562 * * However, this will trigger an assert after the 40th method:
563 * *
564 * * 39 b java.lang.Throwable::<init> (25 bytes)
565 * * --- ns java.lang.Throwable::fillInStackTrace
566 * * 40 !b java.net.URLClassLoader::findClass (29 bytes)
567 * * /vm/opto/runtime.cpp, 900 , assert(caller.is_compiled_frame(),"must be")
568 * * 40 made not entrant (2) java.net.URLClassLoader::findClass (29 bytes)
569 * *
570 * * If we change from JR to JALR, the assert will disappear, but WebClient will
571 * * fail after the 403th method with unknown reason.
572 * */
573 __ li48(T9, (long)OptoRuntime::exception_blob()->entry_point());
574 __ jr(T9);
575 __ delayed()->nop();
576 __ align(16);
577 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
578 __ end_a_stub();
579 return offset;
580 }
582 // Emit deopt handler code.
583 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) {
584 /*
585 // Note that the code buffer's insts_mark is always relative to insts.
586 // That's why we must use the macroassembler to generate a handler.
587 MacroAssembler _masm(&cbuf);
588 address base = __ start_a_stub(size_deopt_handler());
589 if (base == NULL) return 0; // CodeBuffer::expand failed
590 int offset = __ offset();
592 #ifdef _LP64
593 address the_pc = (address) __ pc();
594 Label next;
595 // push a "the_pc" on the stack without destroying any registers
596 // as they all may be live.
598 // push address of "next"
599 __ call(next, relocInfo::none); // reloc none is fine since it is a disp32
600 __ bind(next);
601 // adjust it so it matches "the_pc"
602 __ subptr(Address(rsp, 0), __ offset() - offset);
603 #else
604 InternalAddress here(__ pc());
605 __ pushptr(here.addr());
606 #endif
608 __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
609 assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
610 __ end_a_stub();
611 return offset;
612 */
613 // Note that the code buffer's insts_mark is always relative to insts.
614 // That's why we must use the macroassembler to generate a handler.
615 MacroAssembler _masm(&cbuf);
616 address base =
617 __ start_a_stub(size_deopt_handler());
619 // FIXME
620 if (base == NULL) return 0; // CodeBuffer::expand failed
621 int offset = __ offset();
623 __ block_comment("; emit_deopt_handler");
625 cbuf.set_insts_mark();
626 __ relocate(relocInfo::runtime_call_type);
628 __ li48(T9, (long)SharedRuntime::deopt_blob()->unpack());
629 __ jalr(T9);
630 __ delayed()->nop();
631 __ align(16);
632 assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
633 __ end_a_stub();
634 return offset;
635 }
638 const bool Matcher::match_rule_supported(int opcode) {
639 if (!has_match_rule(opcode))
640 return false;
642 switch (opcode) {
643 //Op_CountLeadingZerosI Op_CountLeadingZerosL can be deleted, all MIPS CPUs support clz & dclz.
644 case Op_CountLeadingZerosI:
645 case Op_CountLeadingZerosL:
646 if (!UseCountLeadingZerosInstruction)
647 return false;
648 break;
649 case Op_CountTrailingZerosI:
650 case Op_CountTrailingZerosL:
651 if (!UseCountTrailingZerosInstruction)
652 return false;
653 break;
654 }
656 return true; // Per default match rules are supported.
657 }
659 //FIXME
660 // emit call stub, compiled java to interpreter
661 void emit_java_to_interp(CodeBuffer &cbuf ) {
662 // Stub is fixed up when the corresponding call is converted from calling
663 // compiled code to calling interpreted code.
664 // mov rbx,0
665 // jmp -1
667 address mark = cbuf.insts_mark(); // get mark within main instrs section
669 // Note that the code buffer's insts_mark is always relative to insts.
670 // That's why we must use the macroassembler to generate a stub.
671 MacroAssembler _masm(&cbuf);
673 address base =
674 __ start_a_stub(Compile::MAX_stubs_size);
675 if (base == NULL) return; // CodeBuffer::expand failed
676 // static stub relocation stores the instruction address of the call
678 __ relocate(static_stub_Relocation::spec(mark), 0);
680 /* 2012/10/29 Jin: Rmethod contains methodOop, it should be relocated for GC */
681 /*
682 int oop_index = __ oop_recorder()->allocate_index(NULL);
683 RelocationHolder rspec = oop_Relocation::spec(oop_index);
684 __ relocate(rspec);
685 */
687 // static stub relocation also tags the methodOop in the code-stream.
688 __ li48(S3, (long)0);
689 // This is recognized as unresolved by relocs/nativeInst/ic code
691 __ relocate(relocInfo::runtime_call_type);
693 cbuf.set_insts_mark();
694 address call_pc = (address)-1;
695 __ li48(AT, (long)call_pc);
696 __ jr(AT);
697 __ nop();
698 __ align(16);
699 __ end_a_stub();
700 // Update current stubs pointer and restore code_end.
701 }
703 // size of call stub, compiled java to interpretor
704 uint size_java_to_interp() {
705 int size = 4 * 4 + NativeCall::instruction_size; // sizeof(li48) + NativeCall::instruction_size
706 return round_to(size, 16);
707 }
709 // relocation entries for call stub, compiled java to interpreter
710 uint reloc_java_to_interp() {
711 return 16; // in emit_java_to_interp + in Java_Static_Call
712 }
714 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
715 if( Assembler::is_simm16(offset) ) return true;
716 else
717 {
718 assert(false, "Not implemented yet !" );
719 Unimplemented();
720 }
721 }
724 // No additional cost for CMOVL.
725 const int Matcher::long_cmove_cost() { return 0; }
727 // No CMOVF/CMOVD with SSE2
728 const int Matcher::float_cmove_cost() { return ConditionalMoveLimit; }
730 // Does the CPU require late expand (see block.cpp for description of late expand)?
731 const bool Matcher::require_postalloc_expand = false;
733 // Should the Matcher clone shifts on addressing modes, expecting them
734 // to be subsumed into complex addressing expressions or compute them
735 // into registers? True for Intel but false for most RISCs
736 const bool Matcher::clone_shift_expressions = false;
738 // Do we need to mask the count passed to shift instructions or does
739 // the cpu only look at the lower 5/6 bits anyway?
740 const bool Matcher::need_masked_shift_count = false;
742 bool Matcher::narrow_oop_use_complex_address() {
743 NOT_LP64(ShouldNotCallThis());
744 assert(UseCompressedOops, "only for compressed oops code");
745 return false;
746 }
748 bool Matcher::narrow_klass_use_complex_address() {
749 NOT_LP64(ShouldNotCallThis());
750 assert(UseCompressedClassPointers, "only for compressed klass code");
751 return false;
752 }
754 // This is UltraSparc specific, true just means we have fast l2f conversion
755 const bool Matcher::convL2FSupported(void) {
756 return true;
757 }
759 // Max vector size in bytes. 0 if not supported.
760 const int Matcher::vector_width_in_bytes(BasicType bt) {
761 assert(MaxVectorSize == 8, "");
762 return 8;
763 }
765 // Vector ideal reg
766 const int Matcher::vector_ideal_reg(int size) {
767 assert(MaxVectorSize == 8, "");
768 switch(size) {
769 case 8: return Op_VecD;
770 }
771 ShouldNotReachHere();
772 return 0;
773 }
775 // Only lowest bits of xmm reg are used for vector shift count.
776 const int Matcher::vector_shift_count_ideal_reg(int size) {
777 fatal("vector shift is not supported");
778 return Node::NotAMachineReg;
779 }
781 // Limits on vector size (number of elements) loaded into vector.
782 const int Matcher::max_vector_size(const BasicType bt) {
783 assert(is_java_primitive(bt), "only primitive type vectors");
784 return vector_width_in_bytes(bt)/type2aelembytes(bt);
785 }
787 const int Matcher::min_vector_size(const BasicType bt) {
788 return max_vector_size(bt); // Same as max.
789 }
791 // MIPS supports misaligned vectors store/load? FIXME
792 const bool Matcher::misaligned_vectors_ok() {
793 return false;
794 //return !AlignVector; // can be changed by flag
795 }
797 // Register for DIVI projection of divmodI
798 RegMask Matcher::divI_proj_mask() {
799 ShouldNotReachHere();
800 return RegMask();
801 }
803 // Register for MODI projection of divmodI
804 RegMask Matcher::modI_proj_mask() {
805 ShouldNotReachHere();
806 return RegMask();
807 }
809 // Register for DIVL projection of divmodL
810 RegMask Matcher::divL_proj_mask() {
811 ShouldNotReachHere();
812 return RegMask();
813 }
815 int Matcher::regnum_to_fpu_offset(int regnum) {
816 return regnum - 32; // The FP registers are in the second chunk
817 }
820 const bool Matcher::isSimpleConstant64(jlong value) {
821 // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
822 return true;
823 }
826 // Return whether or not this register is ever used as an argument. This
827 // function is used on startup to build the trampoline stubs in generateOptoStub.
828 // Registers not mentioned will be killed by the VM call in the trampoline, and
829 // arguments in those registers not be available to the callee.
830 bool Matcher::can_be_java_arg( int reg ) {
831 /* Refer to: [sharedRuntime_mips_64.cpp] SharedRuntime::java_calling_convention() */
832 if ( reg == T0_num || reg == T0_H_num
833 || reg == A0_num || reg == A0_H_num
834 || reg == A1_num || reg == A1_H_num
835 || reg == A2_num || reg == A2_H_num
836 || reg == A3_num || reg == A3_H_num
837 || reg == A4_num || reg == A4_H_num
838 || reg == A5_num || reg == A5_H_num
839 || reg == A6_num || reg == A6_H_num
840 || reg == A7_num || reg == A7_H_num )
841 return true;
843 if ( reg == F12_num || reg == F12_H_num
844 || reg == F13_num || reg == F13_H_num
845 || reg == F14_num || reg == F14_H_num
846 || reg == F15_num || reg == F15_H_num
847 || reg == F16_num || reg == F16_H_num
848 || reg == F17_num || reg == F17_H_num
849 || reg == F18_num || reg == F18_H_num
850 || reg == F19_num || reg == F19_H_num )
851 return true;
853 return false;
854 }
856 bool Matcher::is_spillable_arg( int reg ) {
857 return can_be_java_arg(reg);
858 }
860 bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) {
861 return false;
862 }
864 // Register for MODL projection of divmodL
865 RegMask Matcher::modL_proj_mask() {
866 ShouldNotReachHere();
867 return RegMask();
868 }
870 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
871 return FP_REG_mask();
872 }
874 // MIPS doesn't support AES intrinsics
875 const bool Matcher::pass_original_key_for_aes() {
876 return false;
877 }
879 // The address of the call instruction needs to be 16-byte aligned to
880 // ensure that it does not span a cache line so that it can be patched.
882 int CallStaticJavaDirectNode::compute_padding(int current_offset) const {
883 //lui
884 //ori
885 //dsll
886 //ori
888 //jalr
889 //nop
891 return round_to(current_offset, alignment_required()) - current_offset;
892 }
894 // The address of the call instruction needs to be 16-byte aligned to
895 // ensure that it does not span a cache line so that it can be patched.
896 int CallDynamicJavaDirectNode::compute_padding(int current_offset) const {
897 //li64 <--- skip
899 //lui
900 //ori
901 //dsll
902 //ori
904 //jalr
905 //nop
907 current_offset += 4 * 6; // skip li64
908 return round_to(current_offset, alignment_required()) - current_offset;
909 }
911 int CallLeafNoFPDirectNode::compute_padding(int current_offset) const {
912 //lui
913 //ori
914 //dsll
915 //ori
917 //jalr
918 //nop
920 return round_to(current_offset, alignment_required()) - current_offset;
921 }
923 int CallLeafDirectNode::compute_padding(int current_offset) const {
924 //lui
925 //ori
926 //dsll
927 //ori
929 //jalr
930 //nop
932 return round_to(current_offset, alignment_required()) - current_offset;
933 }
935 int CallRuntimeDirectNode::compute_padding(int current_offset) const {
936 //lui
937 //ori
938 //dsll
939 //ori
941 //jalr
942 //nop
944 return round_to(current_offset, alignment_required()) - current_offset;
945 }
947 // If CPU can load and store mis-aligned doubles directly then no fixup is
948 // needed. Else we split the double into 2 integer pieces and move it
949 // piece-by-piece. Only happens when passing doubles into C code as the
950 // Java calling convention forces doubles to be aligned.
951 const bool Matcher::misaligned_doubles_ok = false;
952 // Do floats take an entire double register or just half?
953 //const bool Matcher::float_in_double = true;
954 bool Matcher::float_in_double() { return false; }
955 // Threshold size for cleararray.
956 const int Matcher::init_array_short_size = 8 * BytesPerLong;
957 // Do ints take an entire long register or just half?
958 const bool Matcher::int_in_long = true;
959 // Is it better to copy float constants, or load them directly from memory?
960 // Intel can load a float constant from a direct address, requiring no
961 // extra registers. Most RISCs will have to materialize an address into a
962 // register first, so they would do better to copy the constant from stack.
963 const bool Matcher::rematerialize_float_constants = false;
964 // Advertise here if the CPU requires explicit rounding operations
965 // to implement the UseStrictFP mode.
966 const bool Matcher::strict_fp_requires_explicit_rounding = false;
967 // The ecx parameter to rep stos for the ClearArray node is in dwords.
968 const bool Matcher::init_array_count_is_in_bytes = false;
971 // Indicate if the safepoint node needs the polling page as an input.
972 // Since MIPS doesn't have absolute addressing, it needs.
973 bool SafePointNode::needs_polling_address_input() {
974 return true;
975 }
977 // !!!!! Special hack to get all type of calls to specify the byte offset
978 // from the start of the call to the point where the return address
979 // will point.
980 int MachCallStaticJavaNode::ret_addr_offset() {
981 assert(NativeCall::instruction_size == 24, "in MachCallStaticJavaNode::ret_addr_offset");
982 //The value ought to be 16 bytes.
983 //lui
984 //ori
985 //dsll
986 //ori
987 //jalr
988 //nop
989 return NativeCall::instruction_size;
990 }
992 int MachCallDynamicJavaNode::ret_addr_offset() {
993 /* 2012/9/10 Jin: must be kept in sync with Java_Dynamic_Call */
995 // return NativeCall::instruction_size;
996 assert(NativeCall::instruction_size == 24, "in MachCallDynamicJavaNode::ret_addr_offset");
997 //The value ought to be 4 + 16 bytes.
998 //lui IC_Klass,
999 //ori IC_Klass,
1000 //dsll IC_Klass
1001 //ori IC_Klass
1002 //lui T9
1003 //ori T9
1004 //dsll T9
1005 //ori T9
1006 //jalr T9
1007 //nop
1008 return 6 * 4 + NativeCall::instruction_size;
1010 }
1012 /*
1013 // EMIT_OPCODE()
1014 void emit_opcode(CodeBuffer &cbuf, int code) {
1015 *(cbuf.code_end()) = (unsigned char)code;
1016 cbuf.set_code_end(cbuf.code_end() + 1);
1017 }
1018 */
1020 void emit_d32_reloc(CodeBuffer &cbuf, int d32, relocInfo::relocType reloc,
1021 int format) {
1022 cbuf.relocate(cbuf.insts_mark(), reloc, format);
1023 cbuf.insts()->emit_int32(d32);
1024 }
1026 //=============================================================================
1028 // Figure out which register class each belongs in: rc_int, rc_float, rc_stack
1029 enum RC { rc_bad, rc_int, rc_float, rc_stack };
1030 static enum RC rc_class( OptoReg::Name reg ) {
1031 if( !OptoReg::is_valid(reg) ) return rc_bad;
1032 if (OptoReg::is_stack(reg)) return rc_stack;
1033 VMReg r = OptoReg::as_VMReg(reg);
1034 if (r->is_Register()) return rc_int;
1035 assert(r->is_FloatRegister(), "must be");
1036 return rc_float;
1037 }
1039 uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream* st ) const {
1040 // Get registers to move
1041 OptoReg::Name src_second = ra_->get_reg_second(in(1));
1042 OptoReg::Name src_first = ra_->get_reg_first(in(1));
1043 OptoReg::Name dst_second = ra_->get_reg_second(this );
1044 OptoReg::Name dst_first = ra_->get_reg_first(this );
1046 enum RC src_second_rc = rc_class(src_second);
1047 enum RC src_first_rc = rc_class(src_first);
1048 enum RC dst_second_rc = rc_class(dst_second);
1049 enum RC dst_first_rc = rc_class(dst_first);
1051 assert(OptoReg::is_valid(src_first) && OptoReg::is_valid(dst_first), "must move at least 1 register" );
1053 // Generate spill code!
1054 int size = 0;
1056 if( src_first == dst_first && src_second == dst_second )
1057 return 0; // Self copy, no move
1059 if (src_first_rc == rc_stack) {
1060 // mem ->
1061 if (dst_first_rc == rc_stack) {
1062 // mem -> mem
1063 assert(src_second != dst_first, "overlap");
1064 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1065 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1066 // 64-bit
1067 int src_offset = ra_->reg2offset(src_first);
1068 int dst_offset = ra_->reg2offset(dst_first);
1069 if (cbuf) {
1070 MacroAssembler _masm(cbuf);
1071 __ ld(AT, Address(SP, src_offset));
1072 __ sd(AT, Address(SP, dst_offset));
1073 #ifndef PRODUCT
1074 } else {
1075 if(!do_size){
1076 if (size != 0) st->print("\n\t");
1077 st->print("ld AT, [SP + #%d]\t# 64-bit mem-mem spill 1\n\t"
1078 "sd AT, [SP + #%d]",
1079 src_offset, dst_offset);
1080 }
1081 #endif
1082 }
1083 size += 8;
1084 } else {
1085 // 32-bit
1086 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1087 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1088 // No pushl/popl, so:
1089 int src_offset = ra_->reg2offset(src_first);
1090 int dst_offset = ra_->reg2offset(dst_first);
1091 if (cbuf) {
1092 MacroAssembler _masm(cbuf);
1093 __ lw(AT, Address(SP, src_offset));
1094 __ sw(AT, Address(SP, dst_offset));
1095 #ifndef PRODUCT
1096 } else {
1097 if(!do_size){
1098 if (size != 0) st->print("\n\t");
1099 st->print("lw AT, [SP + #%d] spill 2\n\t"
1100 "sw AT, [SP + #%d]\n\t",
1101 src_offset, dst_offset);
1102 }
1103 #endif
1104 }
1105 size += 8;
1106 }
1107 return size;
1108 } else if (dst_first_rc == rc_int) {
1109 // mem -> gpr
1110 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1111 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1112 // 64-bit
1113 int offset = ra_->reg2offset(src_first);
1114 if (cbuf) {
1115 MacroAssembler _masm(cbuf);
1116 __ ld(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1117 #ifndef PRODUCT
1118 } else {
1119 if(!do_size){
1120 if (size != 0) st->print("\n\t");
1121 st->print("ld %s, [SP + #%d]\t# spill 3",
1122 Matcher::regName[dst_first],
1123 offset);
1124 }
1125 #endif
1126 }
1127 size += 4;
1128 } else {
1129 // 32-bit
1130 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1131 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1132 int offset = ra_->reg2offset(src_first);
1133 if (cbuf) {
1134 MacroAssembler _masm(cbuf);
1135 if (this->ideal_reg() == Op_RegI)
1136 __ lw(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1137 else
1138 __ lwu(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1139 #ifndef PRODUCT
1140 } else {
1141 if(!do_size){
1142 if (size != 0) st->print("\n\t");
1143 if (this->ideal_reg() == Op_RegI)
1144 st->print("lw %s, [SP + #%d]\t# spill 4",
1145 Matcher::regName[dst_first],
1146 offset);
1147 else
1148 st->print("lwu %s, [SP + #%d]\t# spill 5",
1149 Matcher::regName[dst_first],
1150 offset);
1151 }
1152 #endif
1153 }
1154 size += 4;
1155 }
1156 return size;
1157 } else if (dst_first_rc == rc_float) {
1158 // mem-> xmm
1159 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1160 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1161 // 64-bit
1162 int offset = ra_->reg2offset(src_first);
1163 if (cbuf) {
1164 MacroAssembler _masm(cbuf);
1165 __ ldc1( as_FloatRegister(Matcher::_regEncode[dst_first]), Address(SP, offset));
1166 #ifndef PRODUCT
1167 } else {
1168 if(!do_size){
1169 if (size != 0) st->print("\n\t");
1170 st->print("ldc1 %s, [SP + #%d]\t# spill 6",
1171 Matcher::regName[dst_first],
1172 offset);
1173 }
1174 #endif
1175 }
1176 size += 4;
1177 } else {
1178 // 32-bit
1179 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1180 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1181 int offset = ra_->reg2offset(src_first);
1182 if (cbuf) {
1183 MacroAssembler _masm(cbuf);
1184 __ lwc1( as_FloatRegister(Matcher::_regEncode[dst_first]), Address(SP, offset));
1185 #ifndef PRODUCT
1186 } else {
1187 if(!do_size){
1188 if (size != 0) st->print("\n\t");
1189 st->print("lwc1 %s, [SP + #%d]\t# spill 7",
1190 Matcher::regName[dst_first],
1191 offset);
1192 }
1193 #endif
1194 }
1195 size += 4;
1196 }
1197 return size;
1198 }
1199 } else if (src_first_rc == rc_int) {
1200 // gpr ->
1201 if (dst_first_rc == rc_stack) {
1202 // gpr -> mem
1203 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1204 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1205 // 64-bit
1206 int offset = ra_->reg2offset(dst_first);
1207 if (cbuf) {
1208 MacroAssembler _masm(cbuf);
1209 __ sd(as_Register(Matcher::_regEncode[src_first]), Address(SP, offset));
1210 #ifndef PRODUCT
1211 } else {
1212 if(!do_size){
1213 if (size != 0) st->print("\n\t");
1214 st->print("sd %s, [SP + #%d] # spill 8",
1215 Matcher::regName[src_first],
1216 offset);
1217 }
1218 #endif
1219 }
1220 size += 4;
1221 } else {
1222 // 32-bit
1223 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1224 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1225 int offset = ra_->reg2offset(dst_first);
1226 if (cbuf) {
1227 MacroAssembler _masm(cbuf);
1228 __ sw(as_Register(Matcher::_regEncode[src_first]), Address(SP, offset));
1229 #ifndef PRODUCT
1230 } else {
1231 if(!do_size){
1232 if (size != 0) st->print("\n\t");
1233 st->print("sw %s, [SP + #%d]\t# spill 9",
1234 Matcher::regName[src_first], offset);
1235 }
1236 #endif
1237 }
1238 size += 4;
1239 }
1240 return size;
1241 } else if (dst_first_rc == rc_int) {
1242 // gpr -> gpr
1243 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1244 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1245 // 64-bit
1246 if (cbuf) {
1247 MacroAssembler _masm(cbuf);
1248 __ move(as_Register(Matcher::_regEncode[dst_first]),
1249 as_Register(Matcher::_regEncode[src_first]));
1250 #ifndef PRODUCT
1251 } else {
1252 if(!do_size){
1253 if (size != 0) st->print("\n\t");
1254 st->print("move(64bit) %s <-- %s\t# spill 10",
1255 Matcher::regName[dst_first],
1256 Matcher::regName[src_first]);
1257 }
1258 #endif
1259 }
1260 size += 4;
1261 return size;
1262 } else {
1263 // 32-bit
1264 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1265 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1266 if (cbuf) {
1267 MacroAssembler _masm(cbuf);
1268 if (this->ideal_reg() == Op_RegI)
1269 __ move_u32(as_Register(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first]));
1270 else
1271 __ daddu(as_Register(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first]), R0);
1273 #ifndef PRODUCT
1274 } else {
1275 if(!do_size){
1276 if (size != 0) st->print("\n\t");
1277 st->print("move(32-bit) %s <-- %s\t# spill 11",
1278 Matcher::regName[dst_first],
1279 Matcher::regName[src_first]);
1280 }
1281 #endif
1282 }
1283 size += 4;
1284 return size;
1285 }
1286 } else if (dst_first_rc == rc_float) {
1287 // gpr -> xmm
1288 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1289 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1290 // 64-bit
1291 if (cbuf) {
1292 MacroAssembler _masm(cbuf);
1293 __ dmtc1(as_Register(Matcher::_regEncode[src_first]), as_FloatRegister(Matcher::_regEncode[dst_first]));
1294 #ifndef PRODUCT
1295 } else {
1296 if(!do_size){
1297 if (size != 0) st->print("\n\t");
1298 st->print("dmtc1 %s, %s\t# spill 12",
1299 Matcher::regName[dst_first],
1300 Matcher::regName[src_first]);
1301 }
1302 #endif
1303 }
1304 size += 4;
1305 } else {
1306 // 32-bit
1307 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1308 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1309 if (cbuf) {
1310 MacroAssembler _masm(cbuf);
1311 __ mtc1( as_Register(Matcher::_regEncode[src_first]), as_FloatRegister(Matcher::_regEncode[dst_first]) );
1312 #ifndef PRODUCT
1313 } else {
1314 if(!do_size){
1315 if (size != 0) st->print("\n\t");
1316 st->print("mtc1 %s, %s\t# spill 13",
1317 Matcher::regName[dst_first],
1318 Matcher::regName[src_first]);
1319 }
1320 #endif
1321 }
1322 size += 4;
1323 }
1324 return size;
1325 }
1326 } else if (src_first_rc == rc_float) {
1327 // xmm ->
1328 if (dst_first_rc == rc_stack) {
1329 // xmm -> mem
1330 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1331 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1332 // 64-bit
1333 int offset = ra_->reg2offset(dst_first);
1334 if (cbuf) {
1335 MacroAssembler _masm(cbuf);
1336 __ sdc1( as_FloatRegister(Matcher::_regEncode[src_first]), Address(SP, offset) );
1337 #ifndef PRODUCT
1338 } else {
1339 if(!do_size){
1340 if (size != 0) st->print("\n\t");
1341 st->print("sdc1 %s, [SP + #%d]\t# spill 14",
1342 Matcher::regName[src_first],
1343 offset);
1344 }
1345 #endif
1346 }
1347 size += 4;
1348 } else {
1349 // 32-bit
1350 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1351 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1352 int offset = ra_->reg2offset(dst_first);
1353 if (cbuf) {
1354 MacroAssembler _masm(cbuf);
1355 __ swc1(as_FloatRegister(Matcher::_regEncode[src_first]), Address(SP, offset));
1356 #ifndef PRODUCT
1357 } else {
1358 if(!do_size){
1359 if (size != 0) st->print("\n\t");
1360 st->print("swc1 %s, [SP + #%d]\t# spill 15",
1361 Matcher::regName[src_first],
1362 offset);
1363 }
1364 #endif
1365 }
1366 size += 4;
1367 }
1368 return size;
1369 } else if (dst_first_rc == rc_int) {
1370 // xmm -> gpr
1371 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1372 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1373 // 64-bit
1374 if (cbuf) {
1375 MacroAssembler _masm(cbuf);
1376 __ dmfc1( as_Register(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1377 #ifndef PRODUCT
1378 } else {
1379 if(!do_size){
1380 if (size != 0) st->print("\n\t");
1381 st->print("dmfc1 %s, %s\t# spill 16",
1382 Matcher::regName[dst_first],
1383 Matcher::regName[src_first]);
1384 }
1385 #endif
1386 }
1387 size += 4;
1388 } else {
1389 // 32-bit
1390 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1391 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1392 if (cbuf) {
1393 MacroAssembler _masm(cbuf);
1394 __ mfc1( as_Register(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1395 #ifndef PRODUCT
1396 } else {
1397 if(!do_size){
1398 if (size != 0) st->print("\n\t");
1399 st->print("mfc1 %s, %s\t# spill 17",
1400 Matcher::regName[dst_first],
1401 Matcher::regName[src_first]);
1402 }
1403 #endif
1404 }
1405 size += 4;
1406 }
1407 return size;
1408 } else if (dst_first_rc == rc_float) {
1409 // xmm -> xmm
1410 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1411 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1412 // 64-bit
1413 if (cbuf) {
1414 MacroAssembler _masm(cbuf);
1415 __ mov_d( as_FloatRegister(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1416 #ifndef PRODUCT
1417 } else {
1418 if(!do_size){
1419 if (size != 0) st->print("\n\t");
1420 st->print("mov_d %s <-- %s\t# spill 18",
1421 Matcher::regName[dst_first],
1422 Matcher::regName[src_first]);
1423 }
1424 #endif
1425 }
1426 size += 4;
1427 } else {
1428 // 32-bit
1429 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1430 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1431 if (cbuf) {
1432 MacroAssembler _masm(cbuf);
1433 __ mov_s( as_FloatRegister(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1434 #ifndef PRODUCT
1435 } else {
1436 if(!do_size){
1437 if (size != 0) st->print("\n\t");
1438 st->print("mov_s %s <-- %s\t# spill 19",
1439 Matcher::regName[dst_first],
1440 Matcher::regName[src_first]);
1441 }
1442 #endif
1443 }
1444 size += 4;
1445 }
1446 return size;
1447 }
1448 }
1450 assert(0," foo ");
1451 Unimplemented();
1452 return size;
1454 }
1456 #ifndef PRODUCT
1457 void MachSpillCopyNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1458 implementation( NULL, ra_, false, st );
1459 }
1460 #endif
1462 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1463 implementation( &cbuf, ra_, false, NULL );
1464 }
1466 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
1467 return implementation( NULL, ra_, true, NULL );
1468 }
1470 //=============================================================================
1471 #
1473 #ifndef PRODUCT
1474 void MachBreakpointNode::format( PhaseRegAlloc *, outputStream* st ) const {
1475 st->print("INT3");
1476 }
1477 #endif
1479 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc* ra_) const {
1480 MacroAssembler _masm(&cbuf);
1481 __ int3();
1482 }
1484 uint MachBreakpointNode::size(PhaseRegAlloc* ra_) const {
1485 return MachNode::size(ra_);
1486 }
1489 //=============================================================================
1490 #ifndef PRODUCT
1491 void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1492 Compile *C = ra_->C;
1493 int framesize = C->frame_size_in_bytes();
1495 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1497 st->print("daddiu SP, SP, %d # Rlease stack @ MachEpilogNode",framesize);
1498 st->cr(); st->print("\t");
1499 if (UseLoongsonISA) {
1500 st->print("gslq RA, FP, SP, %d # Restore FP & RA @ MachEpilogNode", -wordSize*2);
1501 } else {
1502 st->print("ld RA, SP, %d # Restore RA @ MachEpilogNode", -wordSize);
1503 st->cr(); st->print("\t");
1504 st->print("ld FP, SP, %d # Restore FP @ MachEpilogNode", -wordSize*2);
1505 }
1507 if( do_polling() && C->is_method_compilation() ) {
1508 st->print("Poll Safepoint # MachEpilogNode");
1509 }
1510 }
1511 #endif
1513 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1514 Compile *C = ra_->C;
1515 MacroAssembler _masm(&cbuf);
1516 int framesize = C->frame_size_in_bytes();
1518 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1520 __ daddiu(SP, SP, framesize);
1522 if (UseLoongsonISA) {
1523 __ gslq(RA, FP, SP, -wordSize*2);
1524 } else {
1525 __ ld(RA, SP, -wordSize );
1526 __ ld(FP, SP, -wordSize*2 );
1527 }
1529 /* 2012/11/19 Jin: The epilog in a RuntimeStub should not contain a safepoint */
1530 if( do_polling() && C->is_method_compilation() ) {
1531 #ifndef OPT_SAFEPOINT
1532 __ set64(AT, (long)os::get_polling_page());
1533 __ relocate(relocInfo::poll_return_type);
1534 __ lw(AT, AT, 0);
1535 #else
1536 __ lui(AT, Assembler::split_high((intptr_t)os::get_polling_page()));
1537 __ relocate(relocInfo::poll_return_type);
1538 __ lw(AT, AT, Assembler::split_low((intptr_t)os::get_polling_page()));
1539 #endif
1540 }
1541 }
1543 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
1544 return MachNode::size(ra_); // too many variables; just compute it the hard way fujie debug
1545 }
1547 int MachEpilogNode::reloc() const {
1548 return 0; // a large enough number
1549 }
1551 const Pipeline * MachEpilogNode::pipeline() const {
1552 return MachNode::pipeline_class();
1553 }
1555 int MachEpilogNode::safepoint_offset() const { return 0; }
1557 //=============================================================================
1559 #ifndef PRODUCT
1560 void BoxLockNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1561 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1562 int reg = ra_->get_reg_first(this);
1563 st->print("ADDI %s, SP, %d @BoxLockNode",Matcher::regName[reg],offset);
1564 }
1565 #endif
1568 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
1569 return 4;
1570 }
1572 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1573 MacroAssembler _masm(&cbuf);
1574 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1575 int reg = ra_->get_encode(this);
1577 __ addi(as_Register(reg), SP, offset);
1578 /*
1579 if( offset >= 128 ) {
1580 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
1581 emit_rm(cbuf, 0x2, reg, 0x04);
1582 emit_rm(cbuf, 0x0, 0x04, SP_enc);
1583 emit_d32(cbuf, offset);
1584 }
1585 else {
1586 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
1587 emit_rm(cbuf, 0x1, reg, 0x04);
1588 emit_rm(cbuf, 0x0, 0x04, SP_enc);
1589 emit_d8(cbuf, offset);
1590 }
1591 */
1592 }
1595 //static int sizeof_FFree_Float_Stack_All = -1;
1597 int MachCallRuntimeNode::ret_addr_offset() {
1598 //lui
1599 //ori
1600 //dsll
1601 //ori
1602 //jalr
1603 //nop
1604 assert(NativeCall::instruction_size == 24, "in MachCallRuntimeNode::ret_addr_offset()");
1605 return NativeCall::instruction_size;
1606 // return 16;
1607 }
1613 //=============================================================================
1614 #ifndef PRODUCT
1615 void MachNopNode::format( PhaseRegAlloc *, outputStream* st ) const {
1616 st->print("NOP \t# %d bytes pad for loops and calls", 4 * _count);
1617 }
1618 #endif
1620 void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc * ) const {
1621 MacroAssembler _masm(&cbuf);
1622 int i = 0;
1623 for(i = 0; i < _count; i++)
1624 __ nop();
1625 }
1627 uint MachNopNode::size(PhaseRegAlloc *) const {
1628 return 4 * _count;
1629 }
1630 const Pipeline* MachNopNode::pipeline() const {
1631 return MachNode::pipeline_class();
1632 }
1634 //=============================================================================
1636 //=============================================================================
1637 #ifndef PRODUCT
1638 void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1639 st->print_cr("load_klass(AT, T0)");
1640 st->print_cr("\tbeq(AT, iCache, L)");
1641 st->print_cr("\tnop");
1642 st->print_cr("\tjmp(SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type)");
1643 st->print_cr("\tnop");
1644 st->print_cr("\tnop");
1645 st->print_cr(" L:");
1646 }
1647 #endif
1650 void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1651 MacroAssembler _masm(&cbuf);
1652 #ifdef ASSERT
1653 //uint code_size = cbuf.code_size();
1654 #endif
1655 int ic_reg = Matcher::inline_cache_reg_encode();
1656 Label L;
1657 Register receiver = T0;
1658 Register iCache = as_Register(ic_reg);
1659 __ load_klass(AT, receiver);
1660 __ beq(AT, iCache, L);
1661 __ nop();
1663 __ relocate(relocInfo::runtime_call_type);
1664 __ li48(T9, (long)SharedRuntime::get_ic_miss_stub());
1665 __ jr(T9);
1666 __ nop();
1668 /* WARNING these NOPs are critical so that verified entry point is properly
1669 * 8 bytes aligned for patching by NativeJump::patch_verified_entry() */
1670 __ align(CodeEntryAlignment);
1671 __ bind(L);
1672 }
1674 uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
1675 return MachNode::size(ra_);
1676 }
1680 //=============================================================================
1682 const RegMask& MachConstantBaseNode::_out_RegMask = P_REG_mask();
1684 int Compile::ConstantTable::calculate_table_base_offset() const {
1685 return 0; // absolute addressing, no offset
1686 }
1688 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
1689 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
1690 ShouldNotReachHere();
1691 }
1693 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
1694 Compile* C = ra_->C;
1695 Compile::ConstantTable& constant_table = C->constant_table();
1696 MacroAssembler _masm(&cbuf);
1698 Register Rtoc = as_Register(ra_->get_encode(this));
1699 CodeSection* consts_section = __ code()->consts();
1700 int consts_size = consts_section->align_at_start(consts_section->size());
1701 assert(constant_table.size() == consts_size, "must be equal");
1703 if (consts_section->size()) {
1704 // Materialize the constant table base.
1705 address baseaddr = consts_section->start() + -(constant_table.table_base_offset());
1706 // RelocationHolder rspec = internal_word_Relocation::spec(baseaddr);
1707 __ relocate(relocInfo::internal_pc_type);
1708 __ li48(Rtoc, (long)baseaddr);
1709 }
1710 }
1712 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
1713 // li48 (4 insts)
1714 return 4 * 4;
1715 }
1717 #ifndef PRODUCT
1718 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
1719 Register r = as_Register(ra_->get_encode(this));
1720 st->print("li48 %s, &constanttable (constant table base) @ MachConstantBaseNode", r->name());
1721 }
1722 #endif
1725 //=============================================================================
1726 #ifndef PRODUCT
1727 void MachPrologNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1728 Compile* C = ra_->C;
1730 int framesize = C->frame_size_in_bytes();
1731 int bangsize = C->bang_size_in_bytes();
1732 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1734 // Calls to C2R adapters often do not accept exceptional returns.
1735 // We require that their callers must bang for them. But be careful, because
1736 // some VM calls (such as call site linkage) can use several kilobytes of
1737 // stack. But the stack safety zone should account for that.
1738 // See bugs 4446381, 4468289, 4497237.
1739 if (C->need_stack_bang(bangsize)) {
1740 st->print_cr("# stack bang"); st->print("\t");
1741 }
1742 if (UseLoongsonISA) {
1743 st->print("gssq RA, FP, %d(SP) @ MachPrologNode\n\t", -wordSize*2);
1744 } else {
1745 st->print("sd RA, %d(SP) @ MachPrologNode\n\t", -wordSize);
1746 st->print("sd FP, %d(SP) @ MachPrologNode\n\t", -wordSize*2);
1747 }
1748 st->print("daddiu FP, SP, -%d \n\t", wordSize*2);
1749 st->print("daddiu SP, SP, -%d \t",framesize);
1750 }
1751 #endif
1754 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1755 Compile* C = ra_->C;
1756 MacroAssembler _masm(&cbuf);
1758 int framesize = C->frame_size_in_bytes();
1759 int bangsize = C->bang_size_in_bytes();
1761 // __ verified_entry(framesize, C->need_stack_bang(bangsize)?bangsize:0, false);
1763 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1765 if (C->need_stack_bang(framesize)) {
1766 __ generate_stack_overflow_check(framesize);
1767 }
1769 if (UseLoongsonISA) {
1770 __ gssq(RA, FP, SP, -wordSize*2);
1771 } else {
1772 __ sd(RA, SP, -wordSize);
1773 __ sd(FP, SP, -wordSize*2);
1774 }
1775 __ daddiu(FP, SP, -wordSize*2);
1776 __ daddiu(SP, SP, -framesize);
1777 __ nop(); /* 2013.10.22 Jin: Make enough room for patch_verified_entry() */
1778 __ nop();
1780 C->set_frame_complete(cbuf.insts_size());
1781 if (C->has_mach_constant_base_node()) {
1782 // NOTE: We set the table base offset here because users might be
1783 // emitted before MachConstantBaseNode.
1784 Compile::ConstantTable& constant_table = C->constant_table();
1785 constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
1786 }
1788 }
1791 uint MachPrologNode::size(PhaseRegAlloc *ra_) const {
1792 //fprintf(stderr, "\nPrologNode::size(ra_)= %d \n", MachNode::size(ra_));//fujie debug
1793 return MachNode::size(ra_); // too many variables; just compute it the hard way
1794 }
1796 int MachPrologNode::reloc() const {
1797 return 0; // a large enough number
1798 }
1800 %}
1802 //----------ENCODING BLOCK-----------------------------------------------------
1803 // This block specifies the encoding classes used by the compiler to output
1804 // byte streams. Encoding classes generate functions which are called by
1805 // Machine Instruction Nodes in order to generate the bit encoding of the
1806 // instruction. Operands specify their base encoding interface with the
1807 // interface keyword. There are currently supported four interfaces,
1808 // REG_INTER, CONST_INTER, MEMORY_INTER, & COND_INTER. REG_INTER causes an
1809 // operand to generate a function which returns its register number when
1810 // queried. CONST_INTER causes an operand to generate a function which
1811 // returns the value of the constant when queried. MEMORY_INTER causes an
1812 // operand to generate four functions which return the Base Register, the
1813 // Index Register, the Scale Value, and the Offset Value of the operand when
1814 // queried. COND_INTER causes an operand to generate six functions which
1815 // return the encoding code (ie - encoding bits for the instruction)
1816 // associated with each basic boolean condition for a conditional instruction.
1817 // Instructions specify two basic values for encoding. They use the
1818 // ins_encode keyword to specify their encoding class (which must be one of
1819 // the class names specified in the encoding block), and they use the
1820 // opcode keyword to specify, in order, their primary, secondary, and
1821 // tertiary opcode. Only the opcode sections which a particular instruction
1822 // needs for encoding need to be specified.
1823 encode %{
1824 /*
1825 Alias:
1826 1044 b java.io.ObjectInputStream::readHandle (130 bytes)
1827 118 B14: # B19 B15 <- B13 Freq: 0.899955
1828 118 add S1, S2, V0 #@addP_reg_reg
1829 11c lb S0, [S1 + #-8257524] #@loadB
1830 120 BReq S0, #3, B19 #@branchConI_reg_imm P=0.100000 C=-1.000000
1831 */
1832 //Load byte signed
1833 enc_class load_B_enc (mRegI dst, memory mem) %{
1834 MacroAssembler _masm(&cbuf);
1835 int dst = $dst$$reg;
1836 int base = $mem$$base;
1837 int index = $mem$$index;
1838 int scale = $mem$$scale;
1839 int disp = $mem$$disp;
1841 if( index != 0 ) {
1842 if( Assembler::is_simm16(disp) ) {
1843 if( UseLoongsonISA ) {
1844 if (scale == 0) {
1845 __ gslbx(as_Register(dst), as_Register(base), as_Register(index), disp);
1846 } else {
1847 __ dsll(AT, as_Register(index), scale);
1848 __ gslbx(as_Register(dst), as_Register(base), AT, disp);
1849 }
1850 } else {
1851 if (scale == 0) {
1852 __ addu(AT, as_Register(base), as_Register(index));
1853 } else {
1854 __ dsll(AT, as_Register(index), scale);
1855 __ addu(AT, as_Register(base), AT);
1856 }
1857 __ lb(as_Register(dst), AT, disp);
1858 }
1859 } else {
1860 if (scale == 0) {
1861 __ addu(AT, as_Register(base), as_Register(index));
1862 } else {
1863 __ dsll(AT, as_Register(index), scale);
1864 __ addu(AT, as_Register(base), AT);
1865 }
1866 __ move(T9, disp);
1867 if( UseLoongsonISA ) {
1868 __ gslbx(as_Register(dst), AT, T9, 0);
1869 } else {
1870 __ addu(AT, AT, T9);
1871 __ lb(as_Register(dst), AT, 0);
1872 }
1873 }
1874 } else {
1875 if( Assembler::is_simm16(disp) ) {
1876 __ lb(as_Register(dst), as_Register(base), disp);
1877 } else {
1878 __ move(T9, disp);
1879 if( UseLoongsonISA ) {
1880 __ gslbx(as_Register(dst), as_Register(base), T9, 0);
1881 } else {
1882 __ addu(AT, as_Register(base), T9);
1883 __ lb(as_Register(dst), AT, 0);
1884 }
1885 }
1886 }
1887 %}
1889 //Load byte unsigned
1890 enc_class load_UB_enc (mRegI dst, memory mem) %{
1891 MacroAssembler _masm(&cbuf);
1892 int dst = $dst$$reg;
1893 int base = $mem$$base;
1894 int index = $mem$$index;
1895 int scale = $mem$$scale;
1896 int disp = $mem$$disp;
1898 if( index != 0 ) {
1899 if (scale == 0) {
1900 __ daddu(AT, as_Register(base), as_Register(index));
1901 } else {
1902 __ dsll(AT, as_Register(index), scale);
1903 __ daddu(AT, as_Register(base), AT);
1904 }
1905 if( Assembler::is_simm16(disp) ) {
1906 __ lbu(as_Register(dst), AT, disp);
1907 } else {
1908 __ move(T9, disp);
1909 __ daddu(AT, AT, T9);
1910 __ lbu(as_Register(dst), AT, 0);
1911 }
1912 } else {
1913 if( Assembler::is_simm16(disp) ) {
1914 __ lbu(as_Register(dst), as_Register(base), disp);
1915 } else {
1916 __ move(T9, disp);
1917 __ daddu(AT, as_Register(base), T9);
1918 __ lbu(as_Register(dst), AT, 0);
1919 }
1920 }
1921 %}
1923 enc_class store_B_reg_enc (memory mem, mRegI src) %{
1924 MacroAssembler _masm(&cbuf);
1925 int src = $src$$reg;
1926 int base = $mem$$base;
1927 int index = $mem$$index;
1928 int scale = $mem$$scale;
1929 int disp = $mem$$disp;
1931 if( index != 0 ) {
1932 if (scale == 0) {
1933 if( Assembler::is_simm(disp, 8) ) {
1934 if (UseLoongsonISA) {
1935 __ gssbx(as_Register(src), as_Register(base), as_Register(index), disp);
1936 } else {
1937 __ addu(AT, as_Register(base), as_Register(index));
1938 __ sb(as_Register(src), AT, disp);
1939 }
1940 } else if( Assembler::is_simm16(disp) ) {
1941 __ addu(AT, as_Register(base), as_Register(index));
1942 __ sb(as_Register(src), AT, disp);
1943 } else {
1944 __ addu(AT, as_Register(base), as_Register(index));
1945 __ move(T9, disp);
1946 if (UseLoongsonISA) {
1947 __ gssbx(as_Register(src), AT, T9, 0);
1948 } else {
1949 __ addu(AT, AT, T9);
1950 __ sb(as_Register(src), AT, 0);
1951 }
1952 }
1953 } else {
1954 __ dsll(AT, as_Register(index), scale);
1955 if( Assembler::is_simm(disp, 8) ) {
1956 if (UseLoongsonISA) {
1957 __ gssbx(as_Register(src), AT, as_Register(base), disp);
1958 } else {
1959 __ addu(AT, as_Register(base), AT);
1960 __ sb(as_Register(src), AT, disp);
1961 }
1962 } else if( Assembler::is_simm16(disp) ) {
1963 __ addu(AT, as_Register(base), AT);
1964 __ sb(as_Register(src), AT, disp);
1965 } else {
1966 __ addu(AT, as_Register(base), AT);
1967 __ move(T9, disp);
1968 if (UseLoongsonISA) {
1969 __ gssbx(as_Register(src), AT, T9, 0);
1970 } else {
1971 __ addu(AT, AT, T9);
1972 __ sb(as_Register(src), AT, 0);
1973 }
1974 }
1975 }
1976 } else {
1977 if( Assembler::is_simm16(disp) ) {
1978 __ sb(as_Register(src), as_Register(base), disp);
1979 } else {
1980 __ move(T9, disp);
1981 if (UseLoongsonISA) {
1982 __ gssbx(as_Register(src), as_Register(base), T9, 0);
1983 } else {
1984 __ addu(AT, as_Register(base), T9);
1985 __ sb(as_Register(src), AT, 0);
1986 }
1987 }
1988 }
1989 %}
1991 enc_class store_B_immI_enc (memory mem, immI8 src) %{
1992 MacroAssembler _masm(&cbuf);
1993 int base = $mem$$base;
1994 int index = $mem$$index;
1995 int scale = $mem$$scale;
1996 int disp = $mem$$disp;
1997 int value = $src$$constant;
1999 if( index != 0 ) {
2000 if (!UseLoongsonISA) {
2001 if (scale == 0) {
2002 __ daddu(AT, as_Register(base), as_Register(index));
2003 } else {
2004 __ dsll(AT, as_Register(index), scale);
2005 __ daddu(AT, as_Register(base), AT);
2006 }
2007 if( Assembler::is_simm16(disp) ) {
2008 if (value == 0) {
2009 __ sb(R0, AT, disp);
2010 } else {
2011 __ move(T9, value);
2012 __ sb(T9, AT, disp);
2013 }
2014 } else {
2015 if (value == 0) {
2016 __ move(T9, disp);
2017 __ daddu(AT, AT, T9);
2018 __ sb(R0, AT, 0);
2019 } else {
2020 __ move(T9, disp);
2021 __ daddu(AT, AT, T9);
2022 __ move(T9, value);
2023 __ sb(T9, AT, 0);
2024 }
2025 }
2026 } else {
2028 if (scale == 0) {
2029 if( Assembler::is_simm(disp, 8) ) {
2030 if (value == 0) {
2031 __ gssbx(R0, as_Register(base), as_Register(index), disp);
2032 } else {
2033 __ move(T9, value);
2034 __ gssbx(T9, as_Register(base), as_Register(index), disp);
2035 }
2036 } else if( Assembler::is_simm16(disp) ) {
2037 __ daddu(AT, as_Register(base), as_Register(index));
2038 if (value == 0) {
2039 __ sb(R0, AT, disp);
2040 } else {
2041 __ move(T9, value);
2042 __ sb(T9, AT, disp);
2043 }
2044 } else {
2045 if (value == 0) {
2046 __ daddu(AT, as_Register(base), as_Register(index));
2047 __ move(T9, disp);
2048 __ gssbx(R0, AT, T9, 0);
2049 } else {
2050 __ move(AT, disp);
2051 __ move(T9, value);
2052 __ daddu(AT, as_Register(base), AT);
2053 __ gssbx(T9, AT, as_Register(index), 0);
2054 }
2055 }
2057 } else {
2059 if( Assembler::is_simm(disp, 8) ) {
2060 __ dsll(AT, as_Register(index), scale);
2061 if (value == 0) {
2062 __ gssbx(R0, as_Register(base), AT, disp);
2063 } else {
2064 __ move(T9, value);
2065 __ gssbx(T9, as_Register(base), AT, disp);
2066 }
2067 } else if( Assembler::is_simm16(disp) ) {
2068 __ dsll(AT, as_Register(index), scale);
2069 __ daddu(AT, as_Register(base), AT);
2070 if (value == 0) {
2071 __ sb(R0, AT, disp);
2072 } else {
2073 __ move(T9, value);
2074 __ sb(T9, AT, disp);
2075 }
2076 } else {
2077 __ dsll(AT, as_Register(index), scale);
2078 if (value == 0) {
2079 __ daddu(AT, as_Register(base), AT);
2080 __ move(T9, disp);
2081 __ gssbx(R0, AT, T9, 0);
2082 } else {
2083 __ move(T9, disp);
2084 __ daddu(AT, AT, T9);
2085 __ move(T9, value);
2086 __ gssbx(T9, as_Register(base), AT, 0);
2087 }
2088 }
2089 }
2090 }
2091 } else {
2092 if( Assembler::is_simm16(disp) ) {
2093 if (value == 0) {
2094 __ sb(R0, as_Register(base), disp);
2095 } else {
2096 __ move(AT, value);
2097 __ sb(AT, as_Register(base), disp);
2098 }
2099 } else {
2100 if (value == 0) {
2101 __ move(T9, disp);
2102 if (UseLoongsonISA) {
2103 __ gssbx(R0, as_Register(base), T9, 0);
2104 } else {
2105 __ daddu(AT, as_Register(base), T9);
2106 __ sb(R0, AT, 0);
2107 }
2108 } else {
2109 __ move(T9, disp);
2110 if (UseLoongsonISA) {
2111 __ move(AT, value);
2112 __ gssbx(AT, as_Register(base), T9, 0);
2113 } else {
2114 __ daddu(AT, as_Register(base), T9);
2115 __ move(T9, value);
2116 __ sb(T9, AT, 0);
2117 }
2118 }
2119 }
2120 }
2121 %}
2124 enc_class store_B_immI_enc_sync (memory mem, immI8 src) %{
2125 MacroAssembler _masm(&cbuf);
2126 int base = $mem$$base;
2127 int index = $mem$$index;
2128 int scale = $mem$$scale;
2129 int disp = $mem$$disp;
2130 int value = $src$$constant;
2132 if( index != 0 ) {
2133 if (scale == 0) {
2134 __ daddu(AT, as_Register(base), as_Register(index));
2135 } else {
2136 __ dsll(AT, as_Register(index), scale);
2137 __ daddu(AT, as_Register(base), AT);
2138 }
2139 if( Assembler::is_simm16(disp) ) {
2140 if (value == 0) {
2141 __ sb(R0, AT, disp);
2142 } else {
2143 __ move(T9, value);
2144 __ sb(T9, AT, disp);
2145 }
2146 } else {
2147 if (value == 0) {
2148 __ move(T9, disp);
2149 __ daddu(AT, AT, T9);
2150 __ sb(R0, AT, 0);
2151 } else {
2152 __ move(T9, disp);
2153 __ daddu(AT, AT, T9);
2154 __ move(T9, value);
2155 __ sb(T9, AT, 0);
2156 }
2157 }
2158 } else {
2159 if( Assembler::is_simm16(disp) ) {
2160 if (value == 0) {
2161 __ sb(R0, as_Register(base), disp);
2162 } else {
2163 __ move(AT, value);
2164 __ sb(AT, as_Register(base), disp);
2165 }
2166 } else {
2167 if (value == 0) {
2168 __ move(T9, disp);
2169 __ daddu(AT, as_Register(base), T9);
2170 __ sb(R0, AT, 0);
2171 } else {
2172 __ move(T9, disp);
2173 __ daddu(AT, as_Register(base), T9);
2174 __ move(T9, value);
2175 __ sb(T9, AT, 0);
2176 }
2177 }
2178 }
2180 __ sync();
2181 %}
2183 // Load Short (16bit signed)
2184 enc_class load_S_enc (mRegI dst, memory mem) %{
2185 MacroAssembler _masm(&cbuf);
2186 int dst = $dst$$reg;
2187 int base = $mem$$base;
2188 int index = $mem$$index;
2189 int scale = $mem$$scale;
2190 int disp = $mem$$disp;
2192 if( index != 0 ) {
2193 if (scale == 0) {
2194 __ daddu(AT, as_Register(base), as_Register(index));
2195 } else {
2196 __ dsll(AT, as_Register(index), scale);
2197 __ daddu(AT, as_Register(base), AT);
2198 }
2199 if( Assembler::is_simm16(disp) ) {
2200 __ lh(as_Register(dst), AT, disp);
2201 } else {
2202 __ move(T9, disp);
2203 __ addu(AT, AT, T9);
2204 __ lh(as_Register(dst), AT, 0);
2205 }
2206 } else {
2207 if( Assembler::is_simm16(disp) ) {
2208 __ lh(as_Register(dst), as_Register(base), disp);
2209 } else {
2210 __ move(T9, disp);
2211 __ addu(AT, as_Register(base), T9);
2212 __ lh(as_Register(dst), AT, 0);
2213 }
2214 }
2215 %}
2217 // Load Char (16bit unsigned)
2218 enc_class load_C_enc (mRegI dst, memory mem) %{
2219 MacroAssembler _masm(&cbuf);
2220 int dst = $dst$$reg;
2221 int base = $mem$$base;
2222 int index = $mem$$index;
2223 int scale = $mem$$scale;
2224 int disp = $mem$$disp;
2226 if( index != 0 ) {
2227 if (scale == 0) {
2228 __ daddu(AT, as_Register(base), as_Register(index));
2229 } else {
2230 __ dsll(AT, as_Register(index), scale);
2231 __ daddu(AT, as_Register(base), AT);
2232 }
2233 if( Assembler::is_simm16(disp) ) {
2234 __ lhu(as_Register(dst), AT, disp);
2235 } else {
2236 __ move(T9, disp);
2237 __ addu(AT, AT, T9);
2238 __ lhu(as_Register(dst), AT, 0);
2239 }
2240 } else {
2241 if( Assembler::is_simm16(disp) ) {
2242 __ lhu(as_Register(dst), as_Register(base), disp);
2243 } else {
2244 __ move(T9, disp);
2245 __ daddu(AT, as_Register(base), T9);
2246 __ lhu(as_Register(dst), AT, 0);
2247 }
2248 }
2249 %}
2251 // Store Char (16bit unsigned)
2252 enc_class store_C_reg_enc (memory mem, mRegI src) %{
2253 MacroAssembler _masm(&cbuf);
2254 int src = $src$$reg;
2255 int base = $mem$$base;
2256 int index = $mem$$index;
2257 int scale = $mem$$scale;
2258 int disp = $mem$$disp;
2260 if( index != 0 ) {
2261 if( Assembler::is_simm16(disp) ) {
2262 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2263 if (scale == 0) {
2264 __ gsshx(as_Register(src), as_Register(base), as_Register(index), disp);
2265 } else {
2266 __ dsll(AT, as_Register(index), scale);
2267 __ gsshx(as_Register(src), as_Register(base), AT, disp);
2268 }
2269 } else {
2270 if (scale == 0) {
2271 __ addu(AT, as_Register(base), as_Register(index));
2272 } else {
2273 __ dsll(AT, as_Register(index), scale);
2274 __ addu(AT, as_Register(base), AT);
2275 }
2276 __ sh(as_Register(src), AT, disp);
2277 }
2278 } else {
2279 if (scale == 0) {
2280 __ addu(AT, as_Register(base), as_Register(index));
2281 } else {
2282 __ dsll(AT, as_Register(index), scale);
2283 __ addu(AT, as_Register(base), AT);
2284 }
2285 __ move(T9, disp);
2286 if( UseLoongsonISA ) {
2287 __ gsshx(as_Register(src), AT, T9, 0);
2288 } else {
2289 __ addu(AT, AT, T9);
2290 __ sh(as_Register(src), AT, 0);
2291 }
2292 }
2293 } else {
2294 if( Assembler::is_simm16(disp) ) {
2295 __ sh(as_Register(src), as_Register(base), disp);
2296 } else {
2297 __ move(T9, disp);
2298 if( UseLoongsonISA ) {
2299 __ gsshx(as_Register(src), as_Register(base), T9, 0);
2300 } else {
2301 __ addu(AT, as_Register(base), T9);
2302 __ sh(as_Register(src), AT, 0);
2303 }
2304 }
2305 }
2306 %}
2308 enc_class load_I_enc (mRegI dst, memory mem) %{
2309 MacroAssembler _masm(&cbuf);
2310 int dst = $dst$$reg;
2311 int base = $mem$$base;
2312 int index = $mem$$index;
2313 int scale = $mem$$scale;
2314 int disp = $mem$$disp;
2316 if( index != 0 ) {
2317 if( Assembler::is_simm16(disp) ) {
2318 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2319 if (scale == 0) {
2320 __ gslwx(as_Register(dst), as_Register(base), as_Register(index), disp);
2321 } else {
2322 __ dsll(AT, as_Register(index), scale);
2323 __ gslwx(as_Register(dst), as_Register(base), AT, disp);
2324 }
2325 } else {
2326 if (scale == 0) {
2327 __ addu(AT, as_Register(base), as_Register(index));
2328 } else {
2329 __ dsll(AT, as_Register(index), scale);
2330 __ addu(AT, as_Register(base), AT);
2331 }
2332 __ lw(as_Register(dst), AT, disp);
2333 }
2334 } else {
2335 if (scale == 0) {
2336 __ addu(AT, as_Register(base), as_Register(index));
2337 } else {
2338 __ dsll(AT, as_Register(index), scale);
2339 __ addu(AT, as_Register(base), AT);
2340 }
2341 __ move(T9, disp);
2342 if( UseLoongsonISA ) {
2343 __ gslwx(as_Register(dst), AT, T9, 0);
2344 } else {
2345 __ addu(AT, AT, T9);
2346 __ lw(as_Register(dst), AT, 0);
2347 }
2348 }
2349 } else {
2350 if( Assembler::is_simm16(disp) ) {
2351 __ lw(as_Register(dst), as_Register(base), disp);
2352 } else {
2353 __ move(T9, disp);
2354 if( UseLoongsonISA ) {
2355 __ gslwx(as_Register(dst), as_Register(base), T9, 0);
2356 } else {
2357 __ addu(AT, as_Register(base), T9);
2358 __ lw(as_Register(dst), AT, 0);
2359 }
2360 }
2361 }
2362 %}
2364 enc_class store_I_reg_enc (memory mem, mRegI src) %{
2365 MacroAssembler _masm(&cbuf);
2366 int src = $src$$reg;
2367 int base = $mem$$base;
2368 int index = $mem$$index;
2369 int scale = $mem$$scale;
2370 int disp = $mem$$disp;
2372 if( index != 0 ) {
2373 if( Assembler::is_simm16(disp) ) {
2374 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2375 if (scale == 0) {
2376 __ gsswx(as_Register(src), as_Register(base), as_Register(index), disp);
2377 } else {
2378 __ dsll(AT, as_Register(index), scale);
2379 __ gsswx(as_Register(src), as_Register(base), AT, disp);
2380 }
2381 } else {
2382 if (scale == 0) {
2383 __ addu(AT, as_Register(base), as_Register(index));
2384 } else {
2385 __ dsll(AT, as_Register(index), scale);
2386 __ addu(AT, as_Register(base), AT);
2387 }
2388 __ sw(as_Register(src), AT, disp);
2389 }
2390 } else {
2391 if (scale == 0) {
2392 __ addu(AT, as_Register(base), as_Register(index));
2393 } else {
2394 __ dsll(AT, as_Register(index), scale);
2395 __ addu(AT, as_Register(base), AT);
2396 }
2397 __ move(T9, disp);
2398 if( UseLoongsonISA ) {
2399 __ gsswx(as_Register(src), AT, T9, 0);
2400 } else {
2401 __ addu(AT, AT, T9);
2402 __ sw(as_Register(src), AT, 0);
2403 }
2404 }
2405 } else {
2406 if( Assembler::is_simm16(disp) ) {
2407 __ sw(as_Register(src), as_Register(base), disp);
2408 } else {
2409 __ move(T9, disp);
2410 if( UseLoongsonISA ) {
2411 __ gsswx(as_Register(src), as_Register(base), T9, 0);
2412 } else {
2413 __ addu(AT, as_Register(base), T9);
2414 __ sw(as_Register(src), AT, 0);
2415 }
2416 }
2417 }
2418 %}
2420 enc_class store_I_immI_enc (memory mem, immI src) %{
2421 MacroAssembler _masm(&cbuf);
2422 int base = $mem$$base;
2423 int index = $mem$$index;
2424 int scale = $mem$$scale;
2425 int disp = $mem$$disp;
2426 int value = $src$$constant;
2428 if( index != 0 ) {
2429 if (scale == 0) {
2430 __ daddu(AT, as_Register(base), as_Register(index));
2431 } else {
2432 __ dsll(AT, as_Register(index), scale);
2433 __ daddu(AT, as_Register(base), AT);
2434 }
2435 if( Assembler::is_simm16(disp) ) {
2436 if (value == 0) {
2437 __ sw(R0, AT, disp);
2438 } else {
2439 __ move(T9, value);
2440 __ sw(T9, AT, disp);
2441 }
2442 } else {
2443 if (value == 0) {
2444 __ move(T9, disp);
2445 __ addu(AT, AT, T9);
2446 __ sw(R0, AT, 0);
2447 } else {
2448 __ move(T9, disp);
2449 __ addu(AT, AT, T9);
2450 __ move(T9, value);
2451 __ sw(T9, AT, 0);
2452 }
2453 }
2454 } else {
2455 if( Assembler::is_simm16(disp) ) {
2456 if (value == 0) {
2457 __ sw(R0, as_Register(base), disp);
2458 } else {
2459 __ move(AT, value);
2460 __ sw(AT, as_Register(base), disp);
2461 }
2462 } else {
2463 if (value == 0) {
2464 __ move(T9, disp);
2465 __ addu(AT, as_Register(base), T9);
2466 __ sw(R0, AT, 0);
2467 } else {
2468 __ move(T9, disp);
2469 __ addu(AT, as_Register(base), T9);
2470 __ move(T9, value);
2471 __ sw(T9, AT, 0);
2472 }
2473 }
2474 }
2475 %}
2477 enc_class load_N_enc (mRegN dst, memory mem) %{
2478 MacroAssembler _masm(&cbuf);
2479 int dst = $dst$$reg;
2480 int base = $mem$$base;
2481 int index = $mem$$index;
2482 int scale = $mem$$scale;
2483 int disp = $mem$$disp;
2484 relocInfo::relocType disp_reloc = $mem->disp_reloc();
2485 assert(disp_reloc == relocInfo::none, "cannot have disp");
2487 if( index != 0 ) {
2488 if (scale == 0) {
2489 __ daddu(AT, as_Register(base), as_Register(index));
2490 } else {
2491 __ dsll(AT, as_Register(index), scale);
2492 __ daddu(AT, as_Register(base), AT);
2493 }
2494 if( Assembler::is_simm16(disp) ) {
2495 __ lwu(as_Register(dst), AT, disp);
2496 } else {
2497 __ li(T9, disp);
2498 __ daddu(AT, AT, T9);
2499 __ lwu(as_Register(dst), AT, 0);
2500 }
2501 } else {
2502 if( Assembler::is_simm16(disp) ) {
2503 __ lwu(as_Register(dst), as_Register(base), disp);
2504 } else {
2505 __ li(T9, disp);
2506 __ daddu(AT, as_Register(base), T9);
2507 __ lwu(as_Register(dst), AT, 0);
2508 }
2509 }
2511 %}
2514 enc_class load_P_enc (mRegP dst, memory mem) %{
2515 MacroAssembler _masm(&cbuf);
2516 int dst = $dst$$reg;
2517 int base = $mem$$base;
2518 int index = $mem$$index;
2519 int scale = $mem$$scale;
2520 int disp = $mem$$disp;
2521 relocInfo::relocType disp_reloc = $mem->disp_reloc();
2522 assert(disp_reloc == relocInfo::none, "cannot have disp");
2524 if( index != 0 ) {
2525 if (scale == 0) {
2526 __ daddu(AT, as_Register(base), as_Register(index));
2527 } else {
2528 __ dsll(AT, as_Register(index), scale);
2529 __ daddu(AT, as_Register(base), AT);
2530 }
2531 if( Assembler::is_simm16(disp) ) {
2532 __ ld(as_Register(dst), AT, disp);
2533 } else {
2534 __ li(T9, disp);
2535 __ daddu(AT, AT, T9);
2536 __ ld(as_Register(dst), AT, 0);
2537 }
2538 } else {
2539 if( Assembler::is_simm16(disp) ) {
2540 __ ld(as_Register(dst), as_Register(base), disp);
2541 } else {
2542 __ li(T9, disp);
2543 __ daddu(AT, as_Register(base), T9);
2544 __ ld(as_Register(dst), AT, 0);
2545 }
2546 }
2547 // if( disp_reloc != relocInfo::none) __ ld(as_Register(dst), as_Register(dst), 0);
2548 %}
2550 enc_class store_P_reg_enc (memory mem, mRegP src) %{
2551 MacroAssembler _masm(&cbuf);
2552 int src = $src$$reg;
2553 int base = $mem$$base;
2554 int index = $mem$$index;
2555 int scale = $mem$$scale;
2556 int disp = $mem$$disp;
2558 if( index != 0 ) {
2559 if (scale == 0) {
2560 __ daddu(AT, as_Register(base), as_Register(index));
2561 } else {
2562 __ dsll(AT, as_Register(index), scale);
2563 __ daddu(AT, as_Register(base), AT);
2564 }
2565 if( Assembler::is_simm16(disp) ) {
2566 __ sd(as_Register(src), AT, disp);
2567 } else {
2568 __ move(T9, disp);
2569 __ daddu(AT, AT, T9);
2570 __ sd(as_Register(src), AT, 0);
2571 }
2572 } else {
2573 if( Assembler::is_simm16(disp) ) {
2574 __ sd(as_Register(src), as_Register(base), disp);
2575 } else {
2576 __ move(T9, disp);
2577 __ daddu(AT, as_Register(base), T9);
2578 __ sd(as_Register(src), AT, 0);
2579 }
2580 }
2581 %}
2583 enc_class store_N_reg_enc (memory mem, mRegN src) %{
2584 MacroAssembler _masm(&cbuf);
2585 int src = $src$$reg;
2586 int base = $mem$$base;
2587 int index = $mem$$index;
2588 int scale = $mem$$scale;
2589 int disp = $mem$$disp;
2591 if( index != 0 ) {
2592 if (scale == 0) {
2593 __ daddu(AT, as_Register(base), as_Register(index));
2594 } else {
2595 __ dsll(AT, as_Register(index), scale);
2596 __ daddu(AT, as_Register(base), AT);
2597 }
2598 if( Assembler::is_simm16(disp) ) {
2599 __ sw(as_Register(src), AT, disp);
2600 } else {
2601 __ move(T9, disp);
2602 __ addu(AT, AT, T9);
2603 __ sw(as_Register(src), AT, 0);
2604 }
2605 } else {
2606 if( Assembler::is_simm16(disp) ) {
2607 __ sw(as_Register(src), as_Register(base), disp);
2608 } else {
2609 __ move(T9, disp);
2610 __ addu(AT, as_Register(base), T9);
2611 __ sw(as_Register(src), AT, 0);
2612 }
2613 }
2614 %}
2616 enc_class store_P_immP0_enc (memory mem) %{
2617 MacroAssembler _masm(&cbuf);
2618 int base = $mem$$base;
2619 int index = $mem$$index;
2620 int scale = $mem$$scale;
2621 int disp = $mem$$disp;
2623 if( index != 0 ) {
2624 if (scale == 0) {
2625 if( Assembler::is_simm16(disp) ) {
2626 if (UseLoongsonISA && Assembler::is_simm(disp, 8)) {
2627 __ gssdx(R0, as_Register(base), as_Register(index), disp);
2628 } else {
2629 __ daddu(AT, as_Register(base), as_Register(index));
2630 __ sd(R0, AT, disp);
2631 }
2632 } else {
2633 __ daddu(AT, as_Register(base), as_Register(index));
2634 __ move(T9, disp);
2635 if(UseLoongsonISA) {
2636 __ gssdx(R0, AT, T9, 0);
2637 } else {
2638 __ daddu(AT, AT, T9);
2639 __ sd(R0, AT, 0);
2640 }
2641 }
2642 } else {
2643 __ dsll(AT, as_Register(index), scale);
2644 if( Assembler::is_simm16(disp) ) {
2645 if (UseLoongsonISA && Assembler::is_simm(disp, 8)) {
2646 __ gssdx(R0, as_Register(base), AT, disp);
2647 } else {
2648 __ daddu(AT, as_Register(base), AT);
2649 __ sd(R0, AT, disp);
2650 }
2651 } else {
2652 __ daddu(AT, as_Register(base), AT);
2653 __ move(T9, disp);
2654 if (UseLoongsonISA) {
2655 __ gssdx(R0, AT, T9, 0);
2656 } else {
2657 __ daddu(AT, AT, T9);
2658 __ sd(R0, AT, 0);
2659 }
2660 }
2661 }
2662 } else {
2663 if( Assembler::is_simm16(disp) ) {
2664 __ sd(R0, as_Register(base), disp);
2665 } else {
2666 __ move(T9, disp);
2667 if (UseLoongsonISA) {
2668 __ gssdx(R0, as_Register(base), T9, 0);
2669 } else {
2670 __ daddu(AT, as_Register(base), T9);
2671 __ sd(R0, AT, 0);
2672 }
2673 }
2674 }
2675 %}
2678 enc_class storeImmN0_enc(memory mem, ImmN0 src) %{
2679 MacroAssembler _masm(&cbuf);
2680 int base = $mem$$base;
2681 int index = $mem$$index;
2682 int scale = $mem$$scale;
2683 int disp = $mem$$disp;
2685 if(index!=0){
2686 if (scale == 0) {
2687 __ daddu(AT, as_Register(base), as_Register(index));
2688 } else {
2689 __ dsll(AT, as_Register(index), scale);
2690 __ daddu(AT, as_Register(base), AT);
2691 }
2693 if( Assembler::is_simm16(disp) ) {
2694 __ sw(R0, AT, disp);
2695 } else {
2696 __ move(T9, disp);
2697 __ daddu(AT, AT, T9);
2698 __ sw(R0, AT, 0);
2699 }
2700 }
2701 else {
2702 if( Assembler::is_simm16(disp) ) {
2703 __ sw(R0, as_Register(base), disp);
2704 } else {
2705 __ move(T9, disp);
2706 __ daddu(AT, as_Register(base), T9);
2707 __ sw(R0, AT, 0);
2708 }
2709 }
2710 %}
2712 enc_class load_L_enc (mRegL dst, memory mem) %{
2713 MacroAssembler _masm(&cbuf);
2714 int base = $mem$$base;
2715 int index = $mem$$index;
2716 int scale = $mem$$scale;
2717 int disp = $mem$$disp;
2718 Register dst_reg = as_Register($dst$$reg);
2720 /*********************2013/03/27**************************
2721 * Jin: $base may contain a null object.
2722 * Server JIT force the exception_offset to be the pos of
2723 * the first instruction.
2724 * I insert such a 'null_check' at the beginning.
2725 *******************************************************/
2727 __ lw(AT, as_Register(base), 0);
2729 /*********************2012/10/04**************************
2730 * Error case found in SortTest
2731 * 337 b java.util.Arrays::sort1 (401 bytes)
2732 * B73:
2733 * d34 lw T4.lo, [T4 + #16] #@loadL-lo
2734 * lw T4.hi, [T4 + #16]+4 #@loadL-hi
2735 *
2736 * The original instructions generated here are :
2737 * __ lw(dst_lo, as_Register(base), disp);
2738 * __ lw(dst_hi, as_Register(base), disp + 4);
2739 *******************************************************/
2741 if( index != 0 ) {
2742 if (scale == 0) {
2743 __ daddu(AT, as_Register(base), as_Register(index));
2744 } else {
2745 __ dsll(AT, as_Register(index), scale);
2746 __ daddu(AT, as_Register(base), AT);
2747 }
2748 if( Assembler::is_simm16(disp) ) {
2749 __ ld(dst_reg, AT, disp);
2750 } else {
2751 __ move(T9, disp);
2752 __ daddu(AT, AT, T9);
2753 __ ld(dst_reg, AT, 0);
2754 }
2755 } else {
2756 if( Assembler::is_simm16(disp) ) {
2757 __ move(AT, as_Register(base));
2758 __ ld(dst_reg, AT, disp);
2759 } else {
2760 __ move(T9, disp);
2761 __ daddu(AT, as_Register(base), T9);
2762 __ ld(dst_reg, AT, 0);
2763 }
2764 }
2765 %}
2767 enc_class store_L_reg_enc (memory mem, mRegL src) %{
2768 MacroAssembler _masm(&cbuf);
2769 int base = $mem$$base;
2770 int index = $mem$$index;
2771 int scale = $mem$$scale;
2772 int disp = $mem$$disp;
2773 Register src_reg = as_Register($src$$reg);
2775 if( index != 0 ) {
2776 if (scale == 0) {
2777 __ daddu(AT, as_Register(base), as_Register(index));
2778 } else {
2779 __ dsll(AT, as_Register(index), scale);
2780 __ daddu(AT, as_Register(base), AT);
2781 }
2782 if( Assembler::is_simm16(disp) ) {
2783 __ sd(src_reg, AT, disp);
2784 } else {
2785 __ move(T9, disp);
2786 __ daddu(AT, AT, T9);
2787 __ sd(src_reg, AT, 0);
2788 }
2789 } else {
2790 if( Assembler::is_simm16(disp) ) {
2791 __ move(AT, as_Register(base));
2792 __ sd(src_reg, AT, disp);
2793 } else {
2794 __ move(T9, disp);
2795 __ daddu(AT, as_Register(base), T9);
2796 __ sd(src_reg, AT, 0);
2797 }
2798 }
2799 %}
2801 enc_class store_L_immL0_enc (memory mem, immL0 src) %{
2802 MacroAssembler _masm(&cbuf);
2803 int base = $mem$$base;
2804 int index = $mem$$index;
2805 int scale = $mem$$scale;
2806 int disp = $mem$$disp;
2808 if( index != 0 ) {
2809 if (scale == 0) {
2810 __ daddu(AT, as_Register(base), as_Register(index));
2811 } else {
2812 __ dsll(AT, as_Register(index), scale);
2813 __ daddu(AT, as_Register(base), AT);
2814 }
2815 if( Assembler::is_simm16(disp) ) {
2816 __ sd(R0, AT, disp);
2817 } else {
2818 __ move(T9, disp);
2819 __ addu(AT, AT, T9);
2820 __ sd(R0, AT, 0);
2821 }
2822 } else {
2823 if( Assembler::is_simm16(disp) ) {
2824 __ move(AT, as_Register(base));
2825 __ sd(R0, AT, disp);
2826 } else {
2827 __ move(T9, disp);
2828 __ addu(AT, as_Register(base), T9);
2829 __ sd(R0, AT, 0);
2830 }
2831 }
2832 %}
2834 enc_class load_F_enc (regF dst, memory mem) %{
2835 MacroAssembler _masm(&cbuf);
2836 int base = $mem$$base;
2837 int index = $mem$$index;
2838 int scale = $mem$$scale;
2839 int disp = $mem$$disp;
2840 FloatRegister dst = $dst$$FloatRegister;
2842 if( index != 0 ) {
2843 if( Assembler::is_simm16(disp) ) {
2844 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2845 if (scale == 0) {
2846 __ gslwxc1(dst, as_Register(base), as_Register(index), disp);
2847 } else {
2848 __ dsll(AT, as_Register(index), scale);
2849 __ gslwxc1(dst, as_Register(base), AT, disp);
2850 }
2851 } else {
2852 if (scale == 0) {
2853 __ daddu(AT, as_Register(base), as_Register(index));
2854 } else {
2855 __ dsll(AT, as_Register(index), scale);
2856 __ daddu(AT, as_Register(base), AT);
2857 }
2858 __ lwc1(dst, AT, disp);
2859 }
2860 } else {
2861 if (scale == 0) {
2862 __ daddu(AT, as_Register(base), as_Register(index));
2863 } else {
2864 __ dsll(AT, as_Register(index), scale);
2865 __ daddu(AT, as_Register(base), AT);
2866 }
2867 __ move(T9, disp);
2868 if( UseLoongsonISA ) {
2869 __ gslwxc1(dst, AT, T9, 0);
2870 } else {
2871 __ daddu(AT, AT, T9);
2872 __ lwc1(dst, AT, 0);
2873 }
2874 }
2875 } else {
2876 if( Assembler::is_simm16(disp) ) {
2877 __ lwc1(dst, as_Register(base), disp);
2878 } else {
2879 __ move(T9, disp);
2880 if( UseLoongsonISA ) {
2881 __ gslwxc1(dst, as_Register(base), T9, 0);
2882 } else {
2883 __ daddu(AT, as_Register(base), T9);
2884 __ lwc1(dst, AT, 0);
2885 }
2886 }
2887 }
2888 %}
2890 enc_class store_F_reg_enc (memory mem, regF src) %{
2891 MacroAssembler _masm(&cbuf);
2892 int base = $mem$$base;
2893 int index = $mem$$index;
2894 int scale = $mem$$scale;
2895 int disp = $mem$$disp;
2896 FloatRegister src = $src$$FloatRegister;
2898 if( index != 0 ) {
2899 if( Assembler::is_simm16(disp) ) {
2900 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2901 if (scale == 0) {
2902 __ gsswxc1(src, as_Register(base), as_Register(index), disp);
2903 } else {
2904 __ dsll(AT, as_Register(index), scale);
2905 __ gsswxc1(src, as_Register(base), AT, disp);
2906 }
2907 } else {
2908 if (scale == 0) {
2909 __ daddu(AT, as_Register(base), as_Register(index));
2910 } else {
2911 __ dsll(AT, as_Register(index), scale);
2912 __ daddu(AT, as_Register(base), AT);
2913 }
2914 __ swc1(src, AT, disp);
2915 }
2916 } else {
2917 if (scale == 0) {
2918 __ daddu(AT, as_Register(base), as_Register(index));
2919 } else {
2920 __ dsll(AT, as_Register(index), scale);
2921 __ daddu(AT, as_Register(base), AT);
2922 }
2923 __ move(T9, disp);
2924 if( UseLoongsonISA ) {
2925 __ gsswxc1(src, AT, T9, 0);
2926 } else {
2927 __ daddu(AT, AT, T9);
2928 __ swc1(src, AT, 0);
2929 }
2930 }
2931 } else {
2932 if( Assembler::is_simm16(disp) ) {
2933 __ swc1(src, as_Register(base), disp);
2934 } else {
2935 __ move(T9, disp);
2936 if( UseLoongsonISA ) {
2937 __ gslwxc1(src, as_Register(base), T9, 0);
2938 } else {
2939 __ daddu(AT, as_Register(base), T9);
2940 __ swc1(src, AT, 0);
2941 }
2942 }
2943 }
2944 %}
2946 enc_class load_D_enc (regD dst, memory mem) %{
2947 MacroAssembler _masm(&cbuf);
2948 int base = $mem$$base;
2949 int index = $mem$$index;
2950 int scale = $mem$$scale;
2951 int disp = $mem$$disp;
2952 FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2954 if( index != 0 ) {
2955 if( Assembler::is_simm16(disp) ) {
2956 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2957 if (scale == 0) {
2958 __ gsldxc1(dst_reg, as_Register(base), as_Register(index), disp);
2959 } else {
2960 __ dsll(AT, as_Register(index), scale);
2961 __ gsldxc1(dst_reg, as_Register(base), AT, disp);
2962 }
2963 } else {
2964 if (scale == 0) {
2965 __ daddu(AT, as_Register(base), as_Register(index));
2966 } else {
2967 __ dsll(AT, as_Register(index), scale);
2968 __ daddu(AT, as_Register(base), AT);
2969 }
2970 __ ldc1(dst_reg, AT, disp);
2971 }
2972 } else {
2973 if (scale == 0) {
2974 __ daddu(AT, as_Register(base), as_Register(index));
2975 } else {
2976 __ dsll(AT, as_Register(index), scale);
2977 __ daddu(AT, as_Register(base), AT);
2978 }
2979 __ move(T9, disp);
2980 if( UseLoongsonISA ) {
2981 __ gsldxc1(dst_reg, AT, T9, 0);
2982 } else {
2983 __ addu(AT, AT, T9);
2984 __ ldc1(dst_reg, AT, 0);
2985 }
2986 }
2987 } else {
2988 if( Assembler::is_simm16(disp) ) {
2989 __ ldc1(dst_reg, as_Register(base), disp);
2990 } else {
2991 __ move(T9, disp);
2992 if( UseLoongsonISA ) {
2993 __ gsldxc1(dst_reg, as_Register(base), T9, 0);
2994 } else {
2995 __ addu(AT, as_Register(base), T9);
2996 __ ldc1(dst_reg, AT, 0);
2997 }
2998 }
2999 }
3000 %}
3002 enc_class store_D_reg_enc (memory mem, regD src) %{
3003 MacroAssembler _masm(&cbuf);
3004 int base = $mem$$base;
3005 int index = $mem$$index;
3006 int scale = $mem$$scale;
3007 int disp = $mem$$disp;
3008 FloatRegister src_reg = as_FloatRegister($src$$reg);
3010 if( index != 0 ) {
3011 if( Assembler::is_simm16(disp) ) {
3012 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3013 if (scale == 0) {
3014 __ gssdxc1(src_reg, as_Register(base), as_Register(index), disp);
3015 } else {
3016 __ dsll(AT, as_Register(index), scale);
3017 __ gssdxc1(src_reg, as_Register(base), AT, disp);
3018 }
3019 } else {
3020 if (scale == 0) {
3021 __ daddu(AT, as_Register(base), as_Register(index));
3022 } else {
3023 __ dsll(AT, as_Register(index), scale);
3024 __ daddu(AT, as_Register(base), AT);
3025 }
3026 __ sdc1(src_reg, AT, disp);
3027 }
3028 } else {
3029 if (scale == 0) {
3030 __ daddu(AT, as_Register(base), as_Register(index));
3031 } else {
3032 __ dsll(AT, as_Register(index), scale);
3033 __ daddu(AT, as_Register(base), AT);
3034 }
3035 __ move(T9, disp);
3036 if( UseLoongsonISA ) {
3037 __ gssdxc1(src_reg, AT, T9, 0);
3038 } else {
3039 __ addu(AT, AT, T9);
3040 __ sdc1(src_reg, AT, 0);
3041 }
3042 }
3043 } else {
3044 if( Assembler::is_simm16(disp) ) {
3045 __ sdc1(src_reg, as_Register(base), disp);
3046 } else {
3047 __ move(T9, disp);
3048 if( UseLoongsonISA ) {
3049 __ gssdxc1(src_reg, as_Register(base), T9, 0);
3050 } else {
3051 __ addu(AT, as_Register(base), T9);
3052 __ sdc1(src_reg, AT, 0);
3053 }
3054 }
3055 }
3056 %}
3058 enc_class Java_To_Runtime (method meth) %{ // CALL Java_To_Runtime, Java_To_Runtime_Leaf
3059 MacroAssembler _masm(&cbuf);
3060 // This is the instruction starting address for relocation info.
3061 __ block_comment("Java_To_Runtime");
3062 cbuf.set_insts_mark();
3063 __ relocate(relocInfo::runtime_call_type);
3065 __ li48(T9, (long)$meth$$method);
3066 __ jalr(T9);
3067 __ nop();
3068 %}
3070 enc_class Java_Static_Call (method meth) %{ // JAVA STATIC CALL
3071 // CALL to fixup routine. Fixup routine uses ScopeDesc info to determine
3072 // who we intended to call.
3073 MacroAssembler _masm(&cbuf);
3074 cbuf.set_insts_mark();
3076 if ( !_method ) {
3077 __ relocate(relocInfo::runtime_call_type);
3078 //emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.code_end()) - 4),
3079 // runtime_call_Relocation::spec(), RELOC_IMM32 );
3080 } else if(_optimized_virtual) {
3081 __ relocate(relocInfo::opt_virtual_call_type);
3082 //emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.code_end()) - 4),
3083 // opt_virtual_call_Relocation::spec(), RELOC_IMM32 );
3084 } else {
3085 __ relocate(relocInfo::static_call_type);
3086 //emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.code_end()) - 4),
3087 // static_call_Relocation::spec(), RELOC_IMM32 );
3088 }
3090 __ li(T9, $meth$$method);
3091 __ jalr(T9);
3092 __ nop();
3093 if( _method ) { // Emit stub for static call
3094 emit_java_to_interp(cbuf);
3095 }
3096 %}
3099 /*
3100 * [Ref: LIR_Assembler::ic_call() ]
3101 */
3102 enc_class Java_Dynamic_Call (method meth) %{ // JAVA DYNAMIC CALL
3103 MacroAssembler _masm(&cbuf);
3104 __ block_comment("Java_Dynamic_Call");
3105 __ ic_call((address)$meth$$method);
3106 %}
3109 enc_class Set_Flags_After_Fast_Lock_Unlock(FlagsReg cr) %{
3110 Register flags = $cr$$Register;
3111 Label L;
3113 MacroAssembler _masm(&cbuf);
3115 __ addu(flags, R0, R0);
3116 __ beq(AT, R0, L);
3117 __ delayed()->nop();
3118 __ move(flags, 0xFFFFFFFF);
3119 __ bind(L);
3120 %}
3122 enc_class enc_PartialSubtypeCheck(mRegP result, mRegP sub, mRegP super, mRegI tmp) %{
3123 Register result = $result$$Register;
3124 Register sub = $sub$$Register;
3125 Register super = $super$$Register;
3126 Register length = $tmp$$Register;
3127 Register tmp = T9;
3128 Label miss;
3130 /* 2012/9/28 Jin: result may be the same as sub
3131 * 47c B40: # B21 B41 <- B20 Freq: 0.155379
3132 * 47c partialSubtypeCheck result=S1, sub=S1, super=S3, length=S0
3133 * 4bc mov S2, NULL #@loadConP
3134 * 4c0 beq S1, S2, B21 #@branchConP P=0.999999 C=-1.000000
3135 */
3136 MacroAssembler _masm(&cbuf);
3137 Label done;
3138 __ check_klass_subtype_slow_path(sub, super, length, tmp,
3139 NULL, &miss,
3140 /*set_cond_codes:*/ true);
3141 /* 2013/7/22 Jin: Refer to X86_64's RDI */
3142 __ move(result, 0);
3143 __ b(done);
3144 __ nop();
3146 __ bind(miss);
3147 __ move(result, 1);
3148 __ bind(done);
3149 %}
3151 %}
3154 //---------MIPS FRAME--------------------------------------------------------------
3155 // Definition of frame structure and management information.
3156 //
3157 // S T A C K L A Y O U T Allocators stack-slot number
3158 // | (to get allocators register number
3159 // G Owned by | | v add SharedInfo::stack0)
3160 // r CALLER | |
3161 // o | +--------+ pad to even-align allocators stack-slot
3162 // w V | pad0 | numbers; owned by CALLER
3163 // t -----------+--------+----> Matcher::_in_arg_limit, unaligned
3164 // h ^ | in | 5
3165 // | | args | 4 Holes in incoming args owned by SELF
3166 // | | old | | 3
3167 // | | SP-+--------+----> Matcher::_old_SP, even aligned
3168 // v | | ret | 3 return address
3169 // Owned by +--------+
3170 // Self | pad2 | 2 pad to align old SP
3171 // | +--------+ 1
3172 // | | locks | 0
3173 // | +--------+----> SharedInfo::stack0, even aligned
3174 // | | pad1 | 11 pad to align new SP
3175 // | +--------+
3176 // | | | 10
3177 // | | spills | 9 spills
3178 // V | | 8 (pad0 slot for callee)
3179 // -----------+--------+----> Matcher::_out_arg_limit, unaligned
3180 // ^ | out | 7
3181 // | | args | 6 Holes in outgoing args owned by CALLEE
3182 // Owned by new | |
3183 // Callee SP-+--------+----> Matcher::_new_SP, even aligned
3184 // | |
3185 //
3186 // Note 1: Only region 8-11 is determined by the allocator. Region 0-5 is
3187 // known from SELF's arguments and the Java calling convention.
3188 // Region 6-7 is determined per call site.
3189 // Note 2: If the calling convention leaves holes in the incoming argument
3190 // area, those holes are owned by SELF. Holes in the outgoing area
3191 // are owned by the CALLEE. Holes should not be nessecary in the
3192 // incoming area, as the Java calling convention is completely under
3193 // the control of the AD file. Doubles can be sorted and packed to
3194 // avoid holes. Holes in the outgoing arguments may be nessecary for
3195 // varargs C calling conventions.
3196 // Note 3: Region 0-3 is even aligned, with pad2 as needed. Region 3-5 is
3197 // even aligned with pad0 as needed.
3198 // Region 6 is even aligned. Region 6-7 is NOT even aligned;
3199 // region 6-11 is even aligned; it may be padded out more so that
3200 // the region from SP to FP meets the minimum stack alignment.
3201 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
3202 // alignment. Region 11, pad1, may be dynamically extended so that
3203 // SP meets the minimum alignment.
3206 frame %{
3208 stack_direction(TOWARDS_LOW);
3210 // These two registers define part of the calling convention
3211 // between compiled code and the interpreter.
3212 // SEE StartI2CNode::calling_convention & StartC2INode::calling_convention & StartOSRNode::calling_convention
3213 // for more information. by yjl 3/16/2006
3215 inline_cache_reg(T1); // Inline Cache Register
3216 interpreter_method_oop_reg(S3); // Method Oop Register when calling interpreter
3217 /*
3218 inline_cache_reg(T1); // Inline Cache Register or methodOop for I2C
3219 interpreter_arg_ptr_reg(A0); // Argument pointer for I2C adapters
3220 */
3222 // Optional: name the operand used by cisc-spilling to access [stack_pointer + offset]
3223 cisc_spilling_operand_name(indOffset32);
3225 // Number of stack slots consumed by locking an object
3226 // generate Compile::sync_stack_slots
3227 #ifdef _LP64
3228 sync_stack_slots(2);
3229 #else
3230 sync_stack_slots(1);
3231 #endif
3233 frame_pointer(SP);
3235 // Interpreter stores its frame pointer in a register which is
3236 // stored to the stack by I2CAdaptors.
3237 // I2CAdaptors convert from interpreted java to compiled java.
3239 interpreter_frame_pointer(FP);
3241 // generate Matcher::stack_alignment
3242 stack_alignment(StackAlignmentInBytes); //wordSize = sizeof(char*);
3244 // Number of stack slots between incoming argument block and the start of
3245 // a new frame. The PROLOG must add this many slots to the stack. The
3246 // EPILOG must remove this many slots. Intel needs one slot for
3247 // return address.
3248 // generate Matcher::in_preserve_stack_slots
3249 //in_preserve_stack_slots(VerifyStackAtCalls + 2); //Now VerifyStackAtCalls is defined as false ! Leave one stack slot for ra and fp
3250 in_preserve_stack_slots(4); //Now VerifyStackAtCalls is defined as false ! Leave two stack slots for ra and fp
3252 // Number of outgoing stack slots killed above the out_preserve_stack_slots
3253 // for calls to C. Supports the var-args backing area for register parms.
3254 varargs_C_out_slots_killed(0);
3256 // The after-PROLOG location of the return address. Location of
3257 // return address specifies a type (REG or STACK) and a number
3258 // representing the register number (i.e. - use a register name) or
3259 // stack slot.
3260 // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
3261 // Otherwise, it is above the locks and verification slot and alignment word
3262 //return_addr(STACK -1+ round_to(1+VerifyStackAtCalls+Compile::current()->sync()*Compile::current()->sync_stack_slots(),WordsPerLong));
3263 return_addr(REG RA);
3265 // Body of function which returns an integer array locating
3266 // arguments either in registers or in stack slots. Passed an array
3267 // of ideal registers called "sig" and a "length" count. Stack-slot
3268 // offsets are based on outgoing arguments, i.e. a CALLER setting up
3269 // arguments for a CALLEE. Incoming stack arguments are
3270 // automatically biased by the preserve_stack_slots field above.
3273 // will generated to Matcher::calling_convention(OptoRegPair *sig, uint length, bool is_outgoing)
3274 // StartNode::calling_convention call this. by yjl 3/16/2006
3275 calling_convention %{
3276 SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
3277 %}
3282 // Body of function which returns an integer array locating
3283 // arguments either in registers or in stack slots. Passed an array
3284 // of ideal registers called "sig" and a "length" count. Stack-slot
3285 // offsets are based on outgoing arguments, i.e. a CALLER setting up
3286 // arguments for a CALLEE. Incoming stack arguments are
3287 // automatically biased by the preserve_stack_slots field above.
3290 // SEE CallRuntimeNode::calling_convention for more information. by yjl 3/16/2006
3291 c_calling_convention %{
3292 (void) SharedRuntime::c_calling_convention(sig_bt, regs, /*regs2=*/NULL, length);
3293 %}
3296 // Location of C & interpreter return values
3297 // register(s) contain(s) return value for Op_StartI2C and Op_StartOSR.
3298 // SEE Matcher::match. by yjl 3/16/2006
3299 c_return_value %{
3300 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3301 /* -- , -- , Op_RegN, Op_RegI, Op_RegP, Op_RegF, Op_RegD, Op_RegL */
3302 static int lo[Op_RegL+1] = { 0, 0, V0_num, V0_num, V0_num, F0_num, F0_num, V0_num };
3303 static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, V0_H_num, OptoReg::Bad, F0_H_num, V0_H_num };
3304 return OptoRegPair(hi[ideal_reg],lo[ideal_reg]);
3305 %}
3307 // Location of return values
3308 // register(s) contain(s) return value for Op_StartC2I and Op_Start.
3309 // SEE Matcher::match. by yjl 3/16/2006
3311 return_value %{
3312 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3313 /* -- , -- , Op_RegN, Op_RegI, Op_RegP, Op_RegF, Op_RegD, Op_RegL */
3314 static int lo[Op_RegL+1] = { 0, 0, V0_num, V0_num, V0_num, F0_num, F0_num, V0_num };
3315 static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, V0_H_num, OptoReg::Bad, F0_H_num, V0_H_num};
3316 return OptoRegPair(hi[ideal_reg],lo[ideal_reg]);
3317 %}
3319 %}
3321 //----------ATTRIBUTES---------------------------------------------------------
3322 //----------Operand Attributes-------------------------------------------------
3323 op_attrib op_cost(0); // Required cost attribute
3325 //----------Instruction Attributes---------------------------------------------
3326 ins_attrib ins_cost(100); // Required cost attribute
3327 ins_attrib ins_size(32); // Required size attribute (in bits)
3328 ins_attrib ins_pc_relative(0); // Required PC Relative flag
3329 ins_attrib ins_short_branch(0); // Required flag: is this instruction a
3330 // non-matching short branch variant of some
3331 // long branch?
3332 ins_attrib ins_alignment(4); // Required alignment attribute (must be a power of 2)
3333 // specifies the alignment that some part of the instruction (not
3334 // necessarily the start) requires. If > 1, a compute_padding()
3335 // function must be provided for the instruction
3337 //----------OPERANDS-----------------------------------------------------------
3338 // Operand definitions must precede instruction definitions for correct parsing
3339 // in the ADLC because operands constitute user defined types which are used in
3340 // instruction definitions.
3342 // Vectors
3343 operand vecD() %{
3344 constraint(ALLOC_IN_RC(dbl_reg));
3345 match(VecD);
3347 format %{ %}
3348 interface(REG_INTER);
3349 %}
3351 // Flags register, used as output of compare instructions
3352 operand FlagsReg() %{
3353 constraint(ALLOC_IN_RC(mips_flags));
3354 match(RegFlags);
3356 format %{ "EFLAGS" %}
3357 interface(REG_INTER);
3358 %}
3360 //----------Simple Operands----------------------------------------------------
3361 //TODO: Should we need to define some more special immediate number ?
3362 // Immediate Operands
3363 // Integer Immediate
3364 operand immI() %{
3365 match(ConI);
3366 //TODO: should not match immI8 here LEE
3367 match(immI8);
3369 op_cost(20);
3370 format %{ %}
3371 interface(CONST_INTER);
3372 %}
3374 // Long Immediate 8-bit
3375 operand immL8()
3376 %{
3377 predicate(-0x80L <= n->get_long() && n->get_long() < 0x80L);
3378 match(ConL);
3380 op_cost(5);
3381 format %{ %}
3382 interface(CONST_INTER);
3383 %}
3385 // Constant for test vs zero
3386 operand immI0() %{
3387 predicate(n->get_int() == 0);
3388 match(ConI);
3390 op_cost(0);
3391 format %{ %}
3392 interface(CONST_INTER);
3393 %}
3395 // Constant for increment
3396 operand immI1() %{
3397 predicate(n->get_int() == 1);
3398 match(ConI);
3400 op_cost(0);
3401 format %{ %}
3402 interface(CONST_INTER);
3403 %}
3405 // Constant for decrement
3406 operand immI_M1() %{
3407 predicate(n->get_int() == -1);
3408 match(ConI);
3410 op_cost(0);
3411 format %{ %}
3412 interface(CONST_INTER);
3413 %}
3415 operand immI_MaxI() %{
3416 predicate(n->get_int() == 2147483647);
3417 match(ConI);
3419 op_cost(0);
3420 format %{ %}
3421 interface(CONST_INTER);
3422 %}
3424 // Valid scale values for addressing modes
3425 operand immI2() %{
3426 predicate(0 <= n->get_int() && (n->get_int() <= 3));
3427 match(ConI);
3429 format %{ %}
3430 interface(CONST_INTER);
3431 %}
3433 operand immI8() %{
3434 predicate((-128 <= n->get_int()) && (n->get_int() <= 127));
3435 match(ConI);
3437 op_cost(5);
3438 format %{ %}
3439 interface(CONST_INTER);
3440 %}
3442 operand immI16() %{
3443 predicate((-32768 <= n->get_int()) && (n->get_int() <= 32767));
3444 match(ConI);
3446 op_cost(10);
3447 format %{ %}
3448 interface(CONST_INTER);
3449 %}
3451 // Constant for long shifts
3452 operand immI_32() %{
3453 predicate( n->get_int() == 32 );
3454 match(ConI);
3456 op_cost(0);
3457 format %{ %}
3458 interface(CONST_INTER);
3459 %}
3461 operand immI_63() %{
3462 predicate( n->get_int() == 63 );
3463 match(ConI);
3465 op_cost(0);
3466 format %{ %}
3467 interface(CONST_INTER);
3468 %}
3470 operand immI_0_31() %{
3471 predicate( n->get_int() >= 0 && n->get_int() <= 31 );
3472 match(ConI);
3474 op_cost(0);
3475 format %{ %}
3476 interface(CONST_INTER);
3477 %}
3479 // Operand for non-negtive integer mask
3480 operand immI_nonneg_mask() %{
3481 predicate( (n->get_int() >= 0) && (Assembler::is_int_mask(n->get_int()) != -1) );
3482 match(ConI);
3484 op_cost(0);
3485 format %{ %}
3486 interface(CONST_INTER);
3487 %}
3489 operand immI_32_63() %{
3490 predicate( n->get_int() >= 32 && n->get_int() <= 63 );
3491 match(ConI);
3492 op_cost(0);
3494 format %{ %}
3495 interface(CONST_INTER);
3496 %}
3498 operand immI16_sub() %{
3499 predicate((-32767 <= n->get_int()) && (n->get_int() <= 32768));
3500 match(ConI);
3502 op_cost(10);
3503 format %{ %}
3504 interface(CONST_INTER);
3505 %}
3507 operand immI_0_32767() %{
3508 predicate( n->get_int() >= 0 && n->get_int() <= 32767 );
3509 match(ConI);
3510 op_cost(0);
3512 format %{ %}
3513 interface(CONST_INTER);
3514 %}
3516 operand immI_0_65535() %{
3517 predicate( n->get_int() >= 0 && n->get_int() <= 65535 );
3518 match(ConI);
3519 op_cost(0);
3521 format %{ %}
3522 interface(CONST_INTER);
3523 %}
3525 operand immI_1() %{
3526 predicate( n->get_int() == 1 );
3527 match(ConI);
3529 op_cost(0);
3530 format %{ %}
3531 interface(CONST_INTER);
3532 %}
3534 operand immI_2() %{
3535 predicate( n->get_int() == 2 );
3536 match(ConI);
3538 op_cost(0);
3539 format %{ %}
3540 interface(CONST_INTER);
3541 %}
3543 operand immI_3() %{
3544 predicate( n->get_int() == 3 );
3545 match(ConI);
3547 op_cost(0);
3548 format %{ %}
3549 interface(CONST_INTER);
3550 %}
3552 operand immI_7() %{
3553 predicate( n->get_int() == 7 );
3554 match(ConI);
3556 format %{ %}
3557 interface(CONST_INTER);
3558 %}
3560 // Immediates for special shifts (sign extend)
3562 // Constants for increment
3563 operand immI_16() %{
3564 predicate( n->get_int() == 16 );
3565 match(ConI);
3567 format %{ %}
3568 interface(CONST_INTER);
3569 %}
3571 operand immI_24() %{
3572 predicate( n->get_int() == 24 );
3573 match(ConI);
3575 format %{ %}
3576 interface(CONST_INTER);
3577 %}
3579 // Constant for byte-wide masking
3580 operand immI_255() %{
3581 predicate( n->get_int() == 255 );
3582 match(ConI);
3584 op_cost(0);
3585 format %{ %}
3586 interface(CONST_INTER);
3587 %}
3589 operand immI_65535() %{
3590 predicate( n->get_int() == 65535 );
3591 match(ConI);
3593 op_cost(5);
3594 format %{ %}
3595 interface(CONST_INTER);
3596 %}
3598 operand immI_65536() %{
3599 predicate( n->get_int() == 65536 );
3600 match(ConI);
3602 op_cost(5);
3603 format %{ %}
3604 interface(CONST_INTER);
3605 %}
3607 operand immI_M65536() %{
3608 predicate( n->get_int() == -65536 );
3609 match(ConI);
3611 op_cost(5);
3612 format %{ %}
3613 interface(CONST_INTER);
3614 %}
3616 // Pointer Immediate
3617 operand immP() %{
3618 match(ConP);
3620 op_cost(10);
3621 format %{ %}
3622 interface(CONST_INTER);
3623 %}
3625 // NULL Pointer Immediate
3626 operand immP0() %{
3627 predicate( n->get_ptr() == 0 );
3628 match(ConP);
3629 op_cost(0);
3631 format %{ %}
3632 interface(CONST_INTER);
3633 %}
3635 // Pointer Immediate: 64-bit
3636 operand immP_set() %{
3637 match(ConP);
3639 op_cost(5);
3640 // formats are generated automatically for constants and base registers
3641 format %{ %}
3642 interface(CONST_INTER);
3643 %}
3645 // Pointer Immediate: 64-bit
3646 operand immP_load() %{
3647 predicate(n->bottom_type()->isa_oop_ptr() || (MacroAssembler::insts_for_set64(n->get_ptr()) > 3));
3648 match(ConP);
3650 op_cost(5);
3651 // formats are generated automatically for constants and base registers
3652 format %{ %}
3653 interface(CONST_INTER);
3654 %}
3656 // Pointer Immediate: 64-bit
3657 operand immP_no_oop_cheap() %{
3658 predicate(!n->bottom_type()->isa_oop_ptr() && (MacroAssembler::insts_for_set64(n->get_ptr()) <= 3));
3659 match(ConP);
3661 op_cost(5);
3662 // formats are generated automatically for constants and base registers
3663 format %{ %}
3664 interface(CONST_INTER);
3665 %}
3667 // Pointer for polling page
3668 operand immP_poll() %{
3669 predicate(n->get_ptr() != 0 && n->get_ptr() == (intptr_t)os::get_polling_page());
3670 match(ConP);
3671 op_cost(5);
3673 format %{ %}
3674 interface(CONST_INTER);
3675 %}
3677 // Pointer Immediate
3678 operand immN() %{
3679 match(ConN);
3681 op_cost(10);
3682 format %{ %}
3683 interface(CONST_INTER);
3684 %}
3686 operand immNKlass() %{
3687 match(ConNKlass);
3689 op_cost(10);
3690 format %{ %}
3691 interface(CONST_INTER);
3692 %}
3694 // NULL Pointer Immediate
3695 operand immN0() %{
3696 predicate(n->get_narrowcon() == 0);
3697 match(ConN);
3699 op_cost(5);
3700 format %{ %}
3701 interface(CONST_INTER);
3702 %}
3704 // Long Immediate
3705 operand immL() %{
3706 match(ConL);
3708 op_cost(20);
3709 format %{ %}
3710 interface(CONST_INTER);
3711 %}
3713 // Long Immediate zero
3714 operand immL0() %{
3715 predicate( n->get_long() == 0L );
3716 match(ConL);
3717 op_cost(0);
3719 format %{ %}
3720 interface(CONST_INTER);
3721 %}
3723 operand immL7() %{
3724 predicate( n->get_long() == 7L );
3725 match(ConL);
3726 op_cost(0);
3728 format %{ %}
3729 interface(CONST_INTER);
3730 %}
3732 operand immL_M1() %{
3733 predicate( n->get_long() == -1L );
3734 match(ConL);
3735 op_cost(0);
3737 format %{ %}
3738 interface(CONST_INTER);
3739 %}
3741 // bit 0..2 zero
3742 operand immL_M8() %{
3743 predicate( n->get_long() == -8L );
3744 match(ConL);
3745 op_cost(0);
3747 format %{ %}
3748 interface(CONST_INTER);
3749 %}
3751 // bit 2 zero
3752 operand immL_M5() %{
3753 predicate( n->get_long() == -5L );
3754 match(ConL);
3755 op_cost(0);
3757 format %{ %}
3758 interface(CONST_INTER);
3759 %}
3761 // bit 1..2 zero
3762 operand immL_M7() %{
3763 predicate( n->get_long() == -7L );
3764 match(ConL);
3765 op_cost(0);
3767 format %{ %}
3768 interface(CONST_INTER);
3769 %}
3771 // bit 0..1 zero
3772 operand immL_M4() %{
3773 predicate( n->get_long() == -4L );
3774 match(ConL);
3775 op_cost(0);
3777 format %{ %}
3778 interface(CONST_INTER);
3779 %}
3781 // bit 3..6 zero
3782 operand immL_M121() %{
3783 predicate( n->get_long() == -121L );
3784 match(ConL);
3785 op_cost(0);
3787 format %{ %}
3788 interface(CONST_INTER);
3789 %}
3791 // Long immediate from 0 to 127.
3792 // Used for a shorter form of long mul by 10.
3793 operand immL_127() %{
3794 predicate((0 <= n->get_long()) && (n->get_long() <= 127));
3795 match(ConL);
3796 op_cost(0);
3798 format %{ %}
3799 interface(CONST_INTER);
3800 %}
3802 // Operand for non-negtive long mask
3803 operand immL_nonneg_mask() %{
3804 predicate( (n->get_long() >= 0) && (Assembler::is_jlong_mask(n->get_long()) != -1) );
3805 match(ConL);
3807 op_cost(0);
3808 format %{ %}
3809 interface(CONST_INTER);
3810 %}
3812 operand immL_0_65535() %{
3813 predicate( n->get_long() >= 0 && n->get_long() <= 65535 );
3814 match(ConL);
3815 op_cost(0);
3817 format %{ %}
3818 interface(CONST_INTER);
3819 %}
3821 // Long Immediate: cheap (materialize in <= 3 instructions)
3822 operand immL_cheap() %{
3823 predicate(MacroAssembler::insts_for_set64(n->get_long()) <= 3);
3824 match(ConL);
3825 op_cost(0);
3827 format %{ %}
3828 interface(CONST_INTER);
3829 %}
3831 // Long Immediate: expensive (materialize in > 3 instructions)
3832 operand immL_expensive() %{
3833 predicate(MacroAssembler::insts_for_set64(n->get_long()) > 3);
3834 match(ConL);
3835 op_cost(0);
3837 format %{ %}
3838 interface(CONST_INTER);
3839 %}
3841 operand immL16() %{
3842 predicate((-32768 <= n->get_long()) && (n->get_long() <= 32767));
3843 match(ConL);
3845 op_cost(10);
3846 format %{ %}
3847 interface(CONST_INTER);
3848 %}
3850 operand immL16_sub() %{
3851 predicate((-32767 <= n->get_long()) && (n->get_long() <= 32768));
3852 match(ConL);
3854 op_cost(10);
3855 format %{ %}
3856 interface(CONST_INTER);
3857 %}
3859 // Long Immediate: low 32-bit mask
3860 operand immL_32bits() %{
3861 predicate(n->get_long() == 0xFFFFFFFFL);
3862 match(ConL);
3863 op_cost(20);
3865 format %{ %}
3866 interface(CONST_INTER);
3867 %}
3869 // Long Immediate 32-bit signed
3870 operand immL32()
3871 %{
3872 predicate(n->get_long() == (int) (n->get_long()));
3873 match(ConL);
3875 op_cost(15);
3876 format %{ %}
3877 interface(CONST_INTER);
3878 %}
3881 //single-precision floating-point zero
3882 operand immF0() %{
3883 predicate(jint_cast(n->getf()) == 0);
3884 match(ConF);
3886 op_cost(5);
3887 format %{ %}
3888 interface(CONST_INTER);
3889 %}
3891 //single-precision floating-point immediate
3892 operand immF() %{
3893 match(ConF);
3895 op_cost(20);
3896 format %{ %}
3897 interface(CONST_INTER);
3898 %}
3900 //double-precision floating-point zero
3901 operand immD0() %{
3902 predicate(jlong_cast(n->getd()) == 0);
3903 match(ConD);
3905 op_cost(5);
3906 format %{ %}
3907 interface(CONST_INTER);
3908 %}
3910 //double-precision floating-point immediate
3911 operand immD() %{
3912 match(ConD);
3914 op_cost(20);
3915 format %{ %}
3916 interface(CONST_INTER);
3917 %}
3919 // Register Operands
3920 // Integer Register
3921 operand mRegI() %{
3922 constraint(ALLOC_IN_RC(int_reg));
3923 match(RegI);
3925 format %{ %}
3926 interface(REG_INTER);
3927 %}
3929 operand no_Ax_mRegI() %{
3930 constraint(ALLOC_IN_RC(no_Ax_int_reg));
3931 match(RegI);
3932 match(mRegI);
3934 format %{ %}
3935 interface(REG_INTER);
3936 %}
3938 operand mS0RegI() %{
3939 constraint(ALLOC_IN_RC(s0_reg));
3940 match(RegI);
3941 match(mRegI);
3943 format %{ "S0" %}
3944 interface(REG_INTER);
3945 %}
3947 operand mS1RegI() %{
3948 constraint(ALLOC_IN_RC(s1_reg));
3949 match(RegI);
3950 match(mRegI);
3952 format %{ "S1" %}
3953 interface(REG_INTER);
3954 %}
3956 operand mS2RegI() %{
3957 constraint(ALLOC_IN_RC(s2_reg));
3958 match(RegI);
3959 match(mRegI);
3961 format %{ "S2" %}
3962 interface(REG_INTER);
3963 %}
3965 operand mS3RegI() %{
3966 constraint(ALLOC_IN_RC(s3_reg));
3967 match(RegI);
3968 match(mRegI);
3970 format %{ "S3" %}
3971 interface(REG_INTER);
3972 %}
3974 operand mS4RegI() %{
3975 constraint(ALLOC_IN_RC(s4_reg));
3976 match(RegI);
3977 match(mRegI);
3979 format %{ "S4" %}
3980 interface(REG_INTER);
3981 %}
3983 operand mS5RegI() %{
3984 constraint(ALLOC_IN_RC(s5_reg));
3985 match(RegI);
3986 match(mRegI);
3988 format %{ "S5" %}
3989 interface(REG_INTER);
3990 %}
3992 operand mS6RegI() %{
3993 constraint(ALLOC_IN_RC(s6_reg));
3994 match(RegI);
3995 match(mRegI);
3997 format %{ "S6" %}
3998 interface(REG_INTER);
3999 %}
4001 operand mS7RegI() %{
4002 constraint(ALLOC_IN_RC(s7_reg));
4003 match(RegI);
4004 match(mRegI);
4006 format %{ "S7" %}
4007 interface(REG_INTER);
4008 %}
4011 operand mT0RegI() %{
4012 constraint(ALLOC_IN_RC(t0_reg));
4013 match(RegI);
4014 match(mRegI);
4016 format %{ "T0" %}
4017 interface(REG_INTER);
4018 %}
4020 operand mT1RegI() %{
4021 constraint(ALLOC_IN_RC(t1_reg));
4022 match(RegI);
4023 match(mRegI);
4025 format %{ "T1" %}
4026 interface(REG_INTER);
4027 %}
4029 operand mT2RegI() %{
4030 constraint(ALLOC_IN_RC(t2_reg));
4031 match(RegI);
4032 match(mRegI);
4034 format %{ "T2" %}
4035 interface(REG_INTER);
4036 %}
4038 operand mT3RegI() %{
4039 constraint(ALLOC_IN_RC(t3_reg));
4040 match(RegI);
4041 match(mRegI);
4043 format %{ "T3" %}
4044 interface(REG_INTER);
4045 %}
4047 operand mT8RegI() %{
4048 constraint(ALLOC_IN_RC(t8_reg));
4049 match(RegI);
4050 match(mRegI);
4052 format %{ "T8" %}
4053 interface(REG_INTER);
4054 %}
4056 operand mT9RegI() %{
4057 constraint(ALLOC_IN_RC(t9_reg));
4058 match(RegI);
4059 match(mRegI);
4061 format %{ "T9" %}
4062 interface(REG_INTER);
4063 %}
4065 operand mA0RegI() %{
4066 constraint(ALLOC_IN_RC(a0_reg));
4067 match(RegI);
4068 match(mRegI);
4070 format %{ "A0" %}
4071 interface(REG_INTER);
4072 %}
4074 operand mA1RegI() %{
4075 constraint(ALLOC_IN_RC(a1_reg));
4076 match(RegI);
4077 match(mRegI);
4079 format %{ "A1" %}
4080 interface(REG_INTER);
4081 %}
4083 operand mA2RegI() %{
4084 constraint(ALLOC_IN_RC(a2_reg));
4085 match(RegI);
4086 match(mRegI);
4088 format %{ "A2" %}
4089 interface(REG_INTER);
4090 %}
4092 operand mA3RegI() %{
4093 constraint(ALLOC_IN_RC(a3_reg));
4094 match(RegI);
4095 match(mRegI);
4097 format %{ "A3" %}
4098 interface(REG_INTER);
4099 %}
4101 operand mA4RegI() %{
4102 constraint(ALLOC_IN_RC(a4_reg));
4103 match(RegI);
4104 match(mRegI);
4106 format %{ "A4" %}
4107 interface(REG_INTER);
4108 %}
4110 operand mA5RegI() %{
4111 constraint(ALLOC_IN_RC(a5_reg));
4112 match(RegI);
4113 match(mRegI);
4115 format %{ "A5" %}
4116 interface(REG_INTER);
4117 %}
4119 operand mA6RegI() %{
4120 constraint(ALLOC_IN_RC(a6_reg));
4121 match(RegI);
4122 match(mRegI);
4124 format %{ "A6" %}
4125 interface(REG_INTER);
4126 %}
4128 operand mA7RegI() %{
4129 constraint(ALLOC_IN_RC(a7_reg));
4130 match(RegI);
4131 match(mRegI);
4133 format %{ "A7" %}
4134 interface(REG_INTER);
4135 %}
4137 operand mV0RegI() %{
4138 constraint(ALLOC_IN_RC(v0_reg));
4139 match(RegI);
4140 match(mRegI);
4142 format %{ "V0" %}
4143 interface(REG_INTER);
4144 %}
4146 operand mV1RegI() %{
4147 constraint(ALLOC_IN_RC(v1_reg));
4148 match(RegI);
4149 match(mRegI);
4151 format %{ "V1" %}
4152 interface(REG_INTER);
4153 %}
4155 operand mRegN() %{
4156 constraint(ALLOC_IN_RC(int_reg));
4157 match(RegN);
4159 format %{ %}
4160 interface(REG_INTER);
4161 %}
4163 operand t0_RegN() %{
4164 constraint(ALLOC_IN_RC(t0_reg));
4165 match(RegN);
4166 match(mRegN);
4168 format %{ %}
4169 interface(REG_INTER);
4170 %}
4172 operand t1_RegN() %{
4173 constraint(ALLOC_IN_RC(t1_reg));
4174 match(RegN);
4175 match(mRegN);
4177 format %{ %}
4178 interface(REG_INTER);
4179 %}
4181 operand t2_RegN() %{
4182 constraint(ALLOC_IN_RC(t2_reg));
4183 match(RegN);
4184 match(mRegN);
4186 format %{ %}
4187 interface(REG_INTER);
4188 %}
4190 operand t3_RegN() %{
4191 constraint(ALLOC_IN_RC(t3_reg));
4192 match(RegN);
4193 match(mRegN);
4195 format %{ %}
4196 interface(REG_INTER);
4197 %}
4199 operand t8_RegN() %{
4200 constraint(ALLOC_IN_RC(t8_reg));
4201 match(RegN);
4202 match(mRegN);
4204 format %{ %}
4205 interface(REG_INTER);
4206 %}
4208 operand t9_RegN() %{
4209 constraint(ALLOC_IN_RC(t9_reg));
4210 match(RegN);
4211 match(mRegN);
4213 format %{ %}
4214 interface(REG_INTER);
4215 %}
4217 operand a0_RegN() %{
4218 constraint(ALLOC_IN_RC(a0_reg));
4219 match(RegN);
4220 match(mRegN);
4222 format %{ %}
4223 interface(REG_INTER);
4224 %}
4226 operand a1_RegN() %{
4227 constraint(ALLOC_IN_RC(a1_reg));
4228 match(RegN);
4229 match(mRegN);
4231 format %{ %}
4232 interface(REG_INTER);
4233 %}
4235 operand a2_RegN() %{
4236 constraint(ALLOC_IN_RC(a2_reg));
4237 match(RegN);
4238 match(mRegN);
4240 format %{ %}
4241 interface(REG_INTER);
4242 %}
4244 operand a3_RegN() %{
4245 constraint(ALLOC_IN_RC(a3_reg));
4246 match(RegN);
4247 match(mRegN);
4249 format %{ %}
4250 interface(REG_INTER);
4251 %}
4253 operand a4_RegN() %{
4254 constraint(ALLOC_IN_RC(a4_reg));
4255 match(RegN);
4256 match(mRegN);
4258 format %{ %}
4259 interface(REG_INTER);
4260 %}
4262 operand a5_RegN() %{
4263 constraint(ALLOC_IN_RC(a5_reg));
4264 match(RegN);
4265 match(mRegN);
4267 format %{ %}
4268 interface(REG_INTER);
4269 %}
4271 operand a6_RegN() %{
4272 constraint(ALLOC_IN_RC(a6_reg));
4273 match(RegN);
4274 match(mRegN);
4276 format %{ %}
4277 interface(REG_INTER);
4278 %}
4280 operand a7_RegN() %{
4281 constraint(ALLOC_IN_RC(a7_reg));
4282 match(RegN);
4283 match(mRegN);
4285 format %{ %}
4286 interface(REG_INTER);
4287 %}
4289 operand s0_RegN() %{
4290 constraint(ALLOC_IN_RC(s0_reg));
4291 match(RegN);
4292 match(mRegN);
4294 format %{ %}
4295 interface(REG_INTER);
4296 %}
4298 operand s1_RegN() %{
4299 constraint(ALLOC_IN_RC(s1_reg));
4300 match(RegN);
4301 match(mRegN);
4303 format %{ %}
4304 interface(REG_INTER);
4305 %}
4307 operand s2_RegN() %{
4308 constraint(ALLOC_IN_RC(s2_reg));
4309 match(RegN);
4310 match(mRegN);
4312 format %{ %}
4313 interface(REG_INTER);
4314 %}
4316 operand s3_RegN() %{
4317 constraint(ALLOC_IN_RC(s3_reg));
4318 match(RegN);
4319 match(mRegN);
4321 format %{ %}
4322 interface(REG_INTER);
4323 %}
4325 operand s4_RegN() %{
4326 constraint(ALLOC_IN_RC(s4_reg));
4327 match(RegN);
4328 match(mRegN);
4330 format %{ %}
4331 interface(REG_INTER);
4332 %}
4334 operand s5_RegN() %{
4335 constraint(ALLOC_IN_RC(s5_reg));
4336 match(RegN);
4337 match(mRegN);
4339 format %{ %}
4340 interface(REG_INTER);
4341 %}
4343 operand s6_RegN() %{
4344 constraint(ALLOC_IN_RC(s6_reg));
4345 match(RegN);
4346 match(mRegN);
4348 format %{ %}
4349 interface(REG_INTER);
4350 %}
4352 operand s7_RegN() %{
4353 constraint(ALLOC_IN_RC(s7_reg));
4354 match(RegN);
4355 match(mRegN);
4357 format %{ %}
4358 interface(REG_INTER);
4359 %}
4361 operand v0_RegN() %{
4362 constraint(ALLOC_IN_RC(v0_reg));
4363 match(RegN);
4364 match(mRegN);
4366 format %{ %}
4367 interface(REG_INTER);
4368 %}
4370 operand v1_RegN() %{
4371 constraint(ALLOC_IN_RC(v1_reg));
4372 match(RegN);
4373 match(mRegN);
4375 format %{ %}
4376 interface(REG_INTER);
4377 %}
4379 // Pointer Register
4380 operand mRegP() %{
4381 constraint(ALLOC_IN_RC(p_reg));
4382 match(RegP);
4384 format %{ %}
4385 interface(REG_INTER);
4386 %}
4388 operand no_T8_mRegP() %{
4389 constraint(ALLOC_IN_RC(no_T8_p_reg));
4390 match(RegP);
4391 match(mRegP);
4393 format %{ %}
4394 interface(REG_INTER);
4395 %}
4397 operand s0_RegP()
4398 %{
4399 constraint(ALLOC_IN_RC(s0_long_reg));
4400 match(RegP);
4401 match(mRegP);
4402 match(no_T8_mRegP);
4404 format %{ %}
4405 interface(REG_INTER);
4406 %}
4408 operand s1_RegP()
4409 %{
4410 constraint(ALLOC_IN_RC(s1_long_reg));
4411 match(RegP);
4412 match(mRegP);
4413 match(no_T8_mRegP);
4415 format %{ %}
4416 interface(REG_INTER);
4417 %}
4419 operand s2_RegP()
4420 %{
4421 constraint(ALLOC_IN_RC(s2_long_reg));
4422 match(RegP);
4423 match(mRegP);
4424 match(no_T8_mRegP);
4426 format %{ %}
4427 interface(REG_INTER);
4428 %}
4430 operand s3_RegP()
4431 %{
4432 constraint(ALLOC_IN_RC(s3_long_reg));
4433 match(RegP);
4434 match(mRegP);
4435 match(no_T8_mRegP);
4437 format %{ %}
4438 interface(REG_INTER);
4439 %}
4441 operand s4_RegP()
4442 %{
4443 constraint(ALLOC_IN_RC(s4_long_reg));
4444 match(RegP);
4445 match(mRegP);
4446 match(no_T8_mRegP);
4448 format %{ %}
4449 interface(REG_INTER);
4450 %}
4452 operand s5_RegP()
4453 %{
4454 constraint(ALLOC_IN_RC(s5_long_reg));
4455 match(RegP);
4456 match(mRegP);
4457 match(no_T8_mRegP);
4459 format %{ %}
4460 interface(REG_INTER);
4461 %}
4463 operand s6_RegP()
4464 %{
4465 constraint(ALLOC_IN_RC(s6_long_reg));
4466 match(RegP);
4467 match(mRegP);
4468 match(no_T8_mRegP);
4470 format %{ %}
4471 interface(REG_INTER);
4472 %}
4474 operand s7_RegP()
4475 %{
4476 constraint(ALLOC_IN_RC(s7_long_reg));
4477 match(RegP);
4478 match(mRegP);
4479 match(no_T8_mRegP);
4481 format %{ %}
4482 interface(REG_INTER);
4483 %}
4485 operand t0_RegP()
4486 %{
4487 constraint(ALLOC_IN_RC(t0_long_reg));
4488 match(RegP);
4489 match(mRegP);
4490 match(no_T8_mRegP);
4492 format %{ %}
4493 interface(REG_INTER);
4494 %}
4496 operand t1_RegP()
4497 %{
4498 constraint(ALLOC_IN_RC(t1_long_reg));
4499 match(RegP);
4500 match(mRegP);
4501 match(no_T8_mRegP);
4503 format %{ %}
4504 interface(REG_INTER);
4505 %}
4507 operand t2_RegP()
4508 %{
4509 constraint(ALLOC_IN_RC(t2_long_reg));
4510 match(RegP);
4511 match(mRegP);
4512 match(no_T8_mRegP);
4514 format %{ %}
4515 interface(REG_INTER);
4516 %}
4518 operand t3_RegP()
4519 %{
4520 constraint(ALLOC_IN_RC(t3_long_reg));
4521 match(RegP);
4522 match(mRegP);
4523 match(no_T8_mRegP);
4525 format %{ %}
4526 interface(REG_INTER);
4527 %}
4529 operand t8_RegP()
4530 %{
4531 constraint(ALLOC_IN_RC(t8_long_reg));
4532 match(RegP);
4533 match(mRegP);
4535 format %{ %}
4536 interface(REG_INTER);
4537 %}
4539 operand t9_RegP()
4540 %{
4541 constraint(ALLOC_IN_RC(t9_long_reg));
4542 match(RegP);
4543 match(mRegP);
4544 match(no_T8_mRegP);
4546 format %{ %}
4547 interface(REG_INTER);
4548 %}
4550 operand a0_RegP()
4551 %{
4552 constraint(ALLOC_IN_RC(a0_long_reg));
4553 match(RegP);
4554 match(mRegP);
4555 match(no_T8_mRegP);
4557 format %{ %}
4558 interface(REG_INTER);
4559 %}
4561 operand a1_RegP()
4562 %{
4563 constraint(ALLOC_IN_RC(a1_long_reg));
4564 match(RegP);
4565 match(mRegP);
4566 match(no_T8_mRegP);
4568 format %{ %}
4569 interface(REG_INTER);
4570 %}
4572 operand a2_RegP()
4573 %{
4574 constraint(ALLOC_IN_RC(a2_long_reg));
4575 match(RegP);
4576 match(mRegP);
4577 match(no_T8_mRegP);
4579 format %{ %}
4580 interface(REG_INTER);
4581 %}
4583 operand a3_RegP()
4584 %{
4585 constraint(ALLOC_IN_RC(a3_long_reg));
4586 match(RegP);
4587 match(mRegP);
4588 match(no_T8_mRegP);
4590 format %{ %}
4591 interface(REG_INTER);
4592 %}
4594 operand a4_RegP()
4595 %{
4596 constraint(ALLOC_IN_RC(a4_long_reg));
4597 match(RegP);
4598 match(mRegP);
4599 match(no_T8_mRegP);
4601 format %{ %}
4602 interface(REG_INTER);
4603 %}
4606 operand a5_RegP()
4607 %{
4608 constraint(ALLOC_IN_RC(a5_long_reg));
4609 match(RegP);
4610 match(mRegP);
4611 match(no_T8_mRegP);
4613 format %{ %}
4614 interface(REG_INTER);
4615 %}
4617 operand a6_RegP()
4618 %{
4619 constraint(ALLOC_IN_RC(a6_long_reg));
4620 match(RegP);
4621 match(mRegP);
4622 match(no_T8_mRegP);
4624 format %{ %}
4625 interface(REG_INTER);
4626 %}
4628 operand a7_RegP()
4629 %{
4630 constraint(ALLOC_IN_RC(a7_long_reg));
4631 match(RegP);
4632 match(mRegP);
4633 match(no_T8_mRegP);
4635 format %{ %}
4636 interface(REG_INTER);
4637 %}
4639 operand v0_RegP()
4640 %{
4641 constraint(ALLOC_IN_RC(v0_long_reg));
4642 match(RegP);
4643 match(mRegP);
4644 match(no_T8_mRegP);
4646 format %{ %}
4647 interface(REG_INTER);
4648 %}
4650 operand v1_RegP()
4651 %{
4652 constraint(ALLOC_IN_RC(v1_long_reg));
4653 match(RegP);
4654 match(mRegP);
4655 match(no_T8_mRegP);
4657 format %{ %}
4658 interface(REG_INTER);
4659 %}
4661 /*
4662 operand mSPRegP(mRegP reg) %{
4663 constraint(ALLOC_IN_RC(sp_reg));
4664 match(reg);
4666 format %{ "SP" %}
4667 interface(REG_INTER);
4668 %}
4670 operand mFPRegP(mRegP reg) %{
4671 constraint(ALLOC_IN_RC(fp_reg));
4672 match(reg);
4674 format %{ "FP" %}
4675 interface(REG_INTER);
4676 %}
4677 */
4679 operand mRegL() %{
4680 constraint(ALLOC_IN_RC(long_reg));
4681 match(RegL);
4683 format %{ %}
4684 interface(REG_INTER);
4685 %}
4687 operand v0RegL() %{
4688 constraint(ALLOC_IN_RC(v0_long_reg));
4689 match(RegL);
4690 match(mRegL);
4692 format %{ %}
4693 interface(REG_INTER);
4694 %}
4696 operand v1RegL() %{
4697 constraint(ALLOC_IN_RC(v1_long_reg));
4698 match(RegL);
4699 match(mRegL);
4701 format %{ %}
4702 interface(REG_INTER);
4703 %}
4705 operand a0RegL() %{
4706 constraint(ALLOC_IN_RC(a0_long_reg));
4707 match(RegL);
4708 match(mRegL);
4710 format %{ "A0" %}
4711 interface(REG_INTER);
4712 %}
4714 operand a1RegL() %{
4715 constraint(ALLOC_IN_RC(a1_long_reg));
4716 match(RegL);
4717 match(mRegL);
4719 format %{ %}
4720 interface(REG_INTER);
4721 %}
4723 operand a2RegL() %{
4724 constraint(ALLOC_IN_RC(a2_long_reg));
4725 match(RegL);
4726 match(mRegL);
4728 format %{ %}
4729 interface(REG_INTER);
4730 %}
4732 operand a3RegL() %{
4733 constraint(ALLOC_IN_RC(a3_long_reg));
4734 match(RegL);
4735 match(mRegL);
4737 format %{ %}
4738 interface(REG_INTER);
4739 %}
4741 operand t0RegL() %{
4742 constraint(ALLOC_IN_RC(t0_long_reg));
4743 match(RegL);
4744 match(mRegL);
4746 format %{ %}
4747 interface(REG_INTER);
4748 %}
4750 operand t1RegL() %{
4751 constraint(ALLOC_IN_RC(t1_long_reg));
4752 match(RegL);
4753 match(mRegL);
4755 format %{ %}
4756 interface(REG_INTER);
4757 %}
4759 operand t2RegL() %{
4760 constraint(ALLOC_IN_RC(t2_long_reg));
4761 match(RegL);
4762 match(mRegL);
4764 format %{ %}
4765 interface(REG_INTER);
4766 %}
4768 operand t3RegL() %{
4769 constraint(ALLOC_IN_RC(t3_long_reg));
4770 match(RegL);
4771 match(mRegL);
4773 format %{ %}
4774 interface(REG_INTER);
4775 %}
4777 operand t8RegL() %{
4778 constraint(ALLOC_IN_RC(t8_long_reg));
4779 match(RegL);
4780 match(mRegL);
4782 format %{ %}
4783 interface(REG_INTER);
4784 %}
4786 operand a4RegL() %{
4787 constraint(ALLOC_IN_RC(a4_long_reg));
4788 match(RegL);
4789 match(mRegL);
4791 format %{ %}
4792 interface(REG_INTER);
4793 %}
4795 operand a5RegL() %{
4796 constraint(ALLOC_IN_RC(a5_long_reg));
4797 match(RegL);
4798 match(mRegL);
4800 format %{ %}
4801 interface(REG_INTER);
4802 %}
4804 operand a6RegL() %{
4805 constraint(ALLOC_IN_RC(a6_long_reg));
4806 match(RegL);
4807 match(mRegL);
4809 format %{ %}
4810 interface(REG_INTER);
4811 %}
4813 operand a7RegL() %{
4814 constraint(ALLOC_IN_RC(a7_long_reg));
4815 match(RegL);
4816 match(mRegL);
4818 format %{ %}
4819 interface(REG_INTER);
4820 %}
4822 operand s0RegL() %{
4823 constraint(ALLOC_IN_RC(s0_long_reg));
4824 match(RegL);
4825 match(mRegL);
4827 format %{ %}
4828 interface(REG_INTER);
4829 %}
4831 operand s1RegL() %{
4832 constraint(ALLOC_IN_RC(s1_long_reg));
4833 match(RegL);
4834 match(mRegL);
4836 format %{ %}
4837 interface(REG_INTER);
4838 %}
4840 operand s2RegL() %{
4841 constraint(ALLOC_IN_RC(s2_long_reg));
4842 match(RegL);
4843 match(mRegL);
4845 format %{ %}
4846 interface(REG_INTER);
4847 %}
4849 operand s3RegL() %{
4850 constraint(ALLOC_IN_RC(s3_long_reg));
4851 match(RegL);
4852 match(mRegL);
4854 format %{ %}
4855 interface(REG_INTER);
4856 %}
4858 operand s4RegL() %{
4859 constraint(ALLOC_IN_RC(s4_long_reg));
4860 match(RegL);
4861 match(mRegL);
4863 format %{ %}
4864 interface(REG_INTER);
4865 %}
4867 operand s7RegL() %{
4868 constraint(ALLOC_IN_RC(s7_long_reg));
4869 match(RegL);
4870 match(mRegL);
4872 format %{ %}
4873 interface(REG_INTER);
4874 %}
4876 // Floating register operands
4877 operand regF() %{
4878 constraint(ALLOC_IN_RC(flt_reg));
4879 match(RegF);
4881 format %{ %}
4882 interface(REG_INTER);
4883 %}
4885 //Double Precision Floating register operands
4886 operand regD() %{
4887 constraint(ALLOC_IN_RC(dbl_reg));
4888 match(RegD);
4890 format %{ %}
4891 interface(REG_INTER);
4892 %}
4894 //----------Memory Operands----------------------------------------------------
4895 // Indirect Memory Operand
4896 operand indirect(mRegP reg) %{
4897 constraint(ALLOC_IN_RC(p_reg));
4898 match(reg);
4900 format %{ "[$reg] @ indirect" %}
4901 interface(MEMORY_INTER) %{
4902 base($reg);
4903 index(0x0); /* NO_INDEX */
4904 scale(0x0);
4905 disp(0x0);
4906 %}
4907 %}
4909 // Indirect Memory Plus Short Offset Operand
4910 operand indOffset8(mRegP reg, immL8 off)
4911 %{
4912 constraint(ALLOC_IN_RC(p_reg));
4913 match(AddP reg off);
4915 format %{ "[$reg + $off (8-bit)] @ indOffset8" %}
4916 interface(MEMORY_INTER) %{
4917 base($reg);
4918 index(0x0); /* NO_INDEX */
4919 scale(0x0);
4920 disp($off);
4921 %}
4922 %}
4924 // Indirect Memory Times Scale Plus Index Register
4925 operand indIndexScale(mRegP reg, mRegL lreg, immI2 scale)
4926 %{
4927 constraint(ALLOC_IN_RC(p_reg));
4928 match(AddP reg (LShiftL lreg scale));
4930 op_cost(10);
4931 format %{"[$reg + $lreg << $scale] @ indIndexScale" %}
4932 interface(MEMORY_INTER) %{
4933 base($reg);
4934 index($lreg);
4935 scale($scale);
4936 disp(0x0);
4937 %}
4938 %}
4941 // [base + index + offset]
4942 operand baseIndexOffset8(mRegP base, mRegL index, immL8 off)
4943 %{
4944 constraint(ALLOC_IN_RC(p_reg));
4945 op_cost(5);
4946 match(AddP (AddP base index) off);
4948 format %{ "[$base + $index + $off (8-bit)] @ baseIndexOffset8" %}
4949 interface(MEMORY_INTER) %{
4950 base($base);
4951 index($index);
4952 scale(0x0);
4953 disp($off);
4954 %}
4955 %}
4957 // [base + index + offset]
4958 operand baseIndexOffset8_convI2L(mRegP base, mRegI index, immL8 off)
4959 %{
4960 constraint(ALLOC_IN_RC(p_reg));
4961 op_cost(5);
4962 match(AddP (AddP base (ConvI2L index)) off);
4964 format %{ "[$base + $index + $off (8-bit)] @ baseIndexOffset8_convI2L" %}
4965 interface(MEMORY_INTER) %{
4966 base($base);
4967 index($index);
4968 scale(0x0);
4969 disp($off);
4970 %}
4971 %}
4973 // Indirect Memory Times Scale Plus Index Register Plus Offset Operand
4974 operand indIndexScaleOffset8(mRegP reg, immL8 off, mRegL lreg, immI2 scale)
4975 %{
4976 constraint(ALLOC_IN_RC(p_reg));
4977 match(AddP (AddP reg (LShiftL lreg scale)) off);
4979 op_cost(10);
4980 format %{"[$reg + $off + $lreg << $scale] @ indIndexScaleOffset8" %}
4981 interface(MEMORY_INTER) %{
4982 base($reg);
4983 index($lreg);
4984 scale($scale);
4985 disp($off);
4986 %}
4987 %}
4989 operand indIndexScaleOffset8_convI2L(mRegP reg, immL8 off, mRegI ireg, immI2 scale)
4990 %{
4991 constraint(ALLOC_IN_RC(p_reg));
4992 match(AddP (AddP reg (LShiftL (ConvI2L ireg) scale)) off);
4994 op_cost(10);
4995 format %{"[$reg + $off + $ireg << $scale] @ indIndexScaleOffset8_convI2L" %}
4996 interface(MEMORY_INTER) %{
4997 base($reg);
4998 index($ireg);
4999 scale($scale);
5000 disp($off);
5001 %}
5002 %}
5004 // [base + index<<scale + offset]
5005 operand basePosIndexScaleOffset8(mRegP base, mRegI index, immL8 off, immI_0_31 scale)
5006 %{
5007 constraint(ALLOC_IN_RC(p_reg));
5008 //predicate(n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
5009 op_cost(10);
5010 match(AddP (AddP base (LShiftL (ConvI2L index) scale)) off);
5012 format %{ "[$base + $index << $scale + $off (8-bit)] @ basePosIndexScaleOffset8" %}
5013 interface(MEMORY_INTER) %{
5014 base($base);
5015 index($index);
5016 scale($scale);
5017 disp($off);
5018 %}
5019 %}
5021 // Indirect Memory Times Scale Plus Index Register Plus Offset Operand
5022 operand indIndexScaleOffsetNarrow(mRegN reg, immL8 off, mRegL lreg, immI2 scale)
5023 %{
5024 predicate(Universe::narrow_oop_shift() == 0);
5025 constraint(ALLOC_IN_RC(p_reg));
5026 match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
5028 op_cost(10);
5029 format %{"[$reg + $off + $lreg << $scale] @ indIndexScaleOffsetNarrow" %}
5030 interface(MEMORY_INTER) %{
5031 base($reg);
5032 index($lreg);
5033 scale($scale);
5034 disp($off);
5035 %}
5036 %}
5038 // [base + index<<scale + offset] for compressd Oops
5039 operand indPosIndexI2LScaleOffset8Narrow(mRegN base, mRegI index, immL8 off, immI_0_31 scale)
5040 %{
5041 constraint(ALLOC_IN_RC(p_reg));
5042 //predicate(Universe::narrow_oop_shift() == 0 && n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
5043 predicate(Universe::narrow_oop_shift() == 0);
5044 op_cost(10);
5045 match(AddP (AddP (DecodeN base) (LShiftL (ConvI2L index) scale)) off);
5047 format %{ "[$base + $index << $scale + $off (8-bit)] @ indPosIndexI2LScaleOffset8Narrow" %}
5048 interface(MEMORY_INTER) %{
5049 base($base);
5050 index($index);
5051 scale($scale);
5052 disp($off);
5053 %}
5054 %}
5056 //FIXME: I think it's better to limit the immI to be 16-bit at most!
5057 // Indirect Memory Plus Long Offset Operand
5058 operand indOffset32(mRegP reg, immL32 off) %{
5059 constraint(ALLOC_IN_RC(p_reg));
5060 op_cost(20);
5061 match(AddP reg off);
5063 format %{ "[$reg + $off (32-bit)] @ indOffset32" %}
5064 interface(MEMORY_INTER) %{
5065 base($reg);
5066 index(0x0); /* NO_INDEX */
5067 scale(0x0);
5068 disp($off);
5069 %}
5070 %}
5072 // Indirect Memory Plus Index Register
5073 operand indIndex(mRegP addr, mRegL index) %{
5074 constraint(ALLOC_IN_RC(p_reg));
5075 match(AddP addr index);
5077 op_cost(20);
5078 format %{"[$addr + $index] @ indIndex" %}
5079 interface(MEMORY_INTER) %{
5080 base($addr);
5081 index($index);
5082 scale(0x0);
5083 disp(0x0);
5084 %}
5085 %}
5087 operand indirectNarrowKlass(mRegN reg)
5088 %{
5089 predicate(Universe::narrow_klass_shift() == 0);
5090 constraint(ALLOC_IN_RC(p_reg));
5091 op_cost(10);
5092 match(DecodeNKlass reg);
5094 format %{ "[$reg] @ indirectNarrowKlass" %}
5095 interface(MEMORY_INTER) %{
5096 base($reg);
5097 index(0x0);
5098 scale(0x0);
5099 disp(0x0);
5100 %}
5101 %}
5103 operand indOffset8NarrowKlass(mRegN reg, immL8 off)
5104 %{
5105 predicate(Universe::narrow_klass_shift() == 0);
5106 constraint(ALLOC_IN_RC(p_reg));
5107 op_cost(10);
5108 match(AddP (DecodeNKlass reg) off);
5110 format %{ "[$reg + $off (8-bit)] @ indOffset8NarrowKlass" %}
5111 interface(MEMORY_INTER) %{
5112 base($reg);
5113 index(0x0);
5114 scale(0x0);
5115 disp($off);
5116 %}
5117 %}
5119 operand indOffset32NarrowKlass(mRegN reg, immL32 off)
5120 %{
5121 predicate(Universe::narrow_klass_shift() == 0);
5122 constraint(ALLOC_IN_RC(p_reg));
5123 op_cost(10);
5124 match(AddP (DecodeNKlass reg) off);
5126 format %{ "[$reg + $off (32-bit)] @ indOffset32NarrowKlass" %}
5127 interface(MEMORY_INTER) %{
5128 base($reg);
5129 index(0x0);
5130 scale(0x0);
5131 disp($off);
5132 %}
5133 %}
5135 operand indIndexOffsetNarrowKlass(mRegN reg, mRegL lreg, immL32 off)
5136 %{
5137 predicate(Universe::narrow_klass_shift() == 0);
5138 constraint(ALLOC_IN_RC(p_reg));
5139 match(AddP (AddP (DecodeNKlass reg) lreg) off);
5141 op_cost(10);
5142 format %{"[$reg + $off + $lreg] @ indIndexOffsetNarrowKlass" %}
5143 interface(MEMORY_INTER) %{
5144 base($reg);
5145 index($lreg);
5146 scale(0x0);
5147 disp($off);
5148 %}
5149 %}
5151 operand indIndexNarrowKlass(mRegN reg, mRegL lreg)
5152 %{
5153 predicate(Universe::narrow_klass_shift() == 0);
5154 constraint(ALLOC_IN_RC(p_reg));
5155 match(AddP (DecodeNKlass reg) lreg);
5157 op_cost(10);
5158 format %{"[$reg + $lreg] @ indIndexNarrowKlass" %}
5159 interface(MEMORY_INTER) %{
5160 base($reg);
5161 index($lreg);
5162 scale(0x0);
5163 disp(0x0);
5164 %}
5165 %}
5167 // Indirect Memory Operand
5168 operand indirectNarrow(mRegN reg)
5169 %{
5170 predicate(Universe::narrow_oop_shift() == 0);
5171 constraint(ALLOC_IN_RC(p_reg));
5172 op_cost(10);
5173 match(DecodeN reg);
5175 format %{ "[$reg] @ indirectNarrow" %}
5176 interface(MEMORY_INTER) %{
5177 base($reg);
5178 index(0x0);
5179 scale(0x0);
5180 disp(0x0);
5181 %}
5182 %}
5184 // Indirect Memory Plus Short Offset Operand
5185 operand indOffset8Narrow(mRegN reg, immL8 off)
5186 %{
5187 predicate(Universe::narrow_oop_shift() == 0);
5188 constraint(ALLOC_IN_RC(p_reg));
5189 op_cost(10);
5190 match(AddP (DecodeN reg) off);
5192 format %{ "[$reg + $off (8-bit)] @ indOffset8Narrow" %}
5193 interface(MEMORY_INTER) %{
5194 base($reg);
5195 index(0x0);
5196 scale(0x0);
5197 disp($off);
5198 %}
5199 %}
5201 // Indirect Memory Plus Index Register Plus Offset Operand
5202 operand indIndexOffset8Narrow(mRegN reg, mRegL lreg, immL8 off)
5203 %{
5204 predicate(Universe::narrow_oop_shift() == 0);
5205 constraint(ALLOC_IN_RC(p_reg));
5206 match(AddP (AddP (DecodeN reg) lreg) off);
5208 op_cost(10);
5209 format %{"[$reg + $off + $lreg] @ indIndexOffset8Narrow" %}
5210 interface(MEMORY_INTER) %{
5211 base($reg);
5212 index($lreg);
5213 scale(0x0);
5214 disp($off);
5215 %}
5216 %}
5218 //----------Load Long Memory Operands------------------------------------------
5219 // The load-long idiom will use it's address expression again after loading
5220 // the first word of the long. If the load-long destination overlaps with
5221 // registers used in the addressing expression, the 2nd half will be loaded
5222 // from a clobbered address. Fix this by requiring that load-long use
5223 // address registers that do not overlap with the load-long target.
5225 // load-long support
5226 operand load_long_RegP() %{
5227 constraint(ALLOC_IN_RC(p_reg));
5228 match(RegP);
5229 match(mRegP);
5230 op_cost(100);
5231 format %{ %}
5232 interface(REG_INTER);
5233 %}
5235 // Indirect Memory Operand Long
5236 operand load_long_indirect(load_long_RegP reg) %{
5237 constraint(ALLOC_IN_RC(p_reg));
5238 match(reg);
5240 format %{ "[$reg]" %}
5241 interface(MEMORY_INTER) %{
5242 base($reg);
5243 index(0x0);
5244 scale(0x0);
5245 disp(0x0);
5246 %}
5247 %}
5249 // Indirect Memory Plus Long Offset Operand
5250 operand load_long_indOffset32(load_long_RegP reg, immL32 off) %{
5251 match(AddP reg off);
5253 format %{ "[$reg + $off]" %}
5254 interface(MEMORY_INTER) %{
5255 base($reg);
5256 index(0x0);
5257 scale(0x0);
5258 disp($off);
5259 %}
5260 %}
5262 //----------Conditional Branch Operands----------------------------------------
5263 // Comparison Op - This is the operation of the comparison, and is limited to
5264 // the following set of codes:
5265 // L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
5266 //
5267 // Other attributes of the comparison, such as unsignedness, are specified
5268 // by the comparison instruction that sets a condition code flags register.
5269 // That result is represented by a flags operand whose subtype is appropriate
5270 // to the unsignedness (etc.) of the comparison.
5271 //
5272 // Later, the instruction which matches both the Comparison Op (a Bool) and
5273 // the flags (produced by the Cmp) specifies the coding of the comparison op
5274 // by matching a specific subtype of Bool operand below, such as cmpOpU.
5276 // Comparision Code
5277 operand cmpOp() %{
5278 match(Bool);
5280 format %{ "" %}
5281 interface(COND_INTER) %{
5282 equal(0x01);
5283 not_equal(0x02);
5284 greater(0x03);
5285 greater_equal(0x04);
5286 less(0x05);
5287 less_equal(0x06);
5288 overflow(0x7);
5289 no_overflow(0x8);
5290 %}
5291 %}
5294 // Comparision Code
5295 // Comparison Code, unsigned compare. Used by FP also, with
5296 // C2 (unordered) turned into GT or LT already. The other bits
5297 // C0 and C3 are turned into Carry & Zero flags.
5298 operand cmpOpU() %{
5299 match(Bool);
5301 format %{ "" %}
5302 interface(COND_INTER) %{
5303 equal(0x01);
5304 not_equal(0x02);
5305 greater(0x03);
5306 greater_equal(0x04);
5307 less(0x05);
5308 less_equal(0x06);
5309 overflow(0x7);
5310 no_overflow(0x8);
5311 %}
5312 %}
5314 /*
5315 // Comparison Code, unsigned compare. Used by FP also, with
5316 // C2 (unordered) turned into GT or LT already. The other bits
5317 // C0 and C3 are turned into Carry & Zero flags.
5318 operand cmpOpU() %{
5319 match(Bool);
5321 format %{ "" %}
5322 interface(COND_INTER) %{
5323 equal(0x4);
5324 not_equal(0x5);
5325 less(0x2);
5326 greater_equal(0x3);
5327 less_equal(0x6);
5328 greater(0x7);
5329 %}
5330 %}
5331 */
5332 /*
5333 // Comparison Code for FP conditional move
5334 operand cmpOp_fcmov() %{
5335 match(Bool);
5337 format %{ "" %}
5338 interface(COND_INTER) %{
5339 equal (0x01);
5340 not_equal (0x02);
5341 greater (0x03);
5342 greater_equal(0x04);
5343 less (0x05);
5344 less_equal (0x06);
5345 %}
5346 %}
5348 // Comparision Code used in long compares
5349 operand cmpOp_commute() %{
5350 match(Bool);
5352 format %{ "" %}
5353 interface(COND_INTER) %{
5354 equal(0x4);
5355 not_equal(0x5);
5356 less(0xF);
5357 greater_equal(0xE);
5358 less_equal(0xD);
5359 greater(0xC);
5360 %}
5361 %}
5362 */
5364 //----------Special Memory Operands--------------------------------------------
5365 // Stack Slot Operand - This operand is used for loading and storing temporary
5366 // values on the stack where a match requires a value to
5367 // flow through memory.
5368 operand stackSlotP(sRegP reg) %{
5369 constraint(ALLOC_IN_RC(stack_slots));
5370 // No match rule because this operand is only generated in matching
5371 op_cost(50);
5372 format %{ "[$reg]" %}
5373 interface(MEMORY_INTER) %{
5374 base(0x1d); // SP
5375 index(0x0); // No Index
5376 scale(0x0); // No Scale
5377 disp($reg); // Stack Offset
5378 %}
5379 %}
5381 operand stackSlotI(sRegI reg) %{
5382 constraint(ALLOC_IN_RC(stack_slots));
5383 // No match rule because this operand is only generated in matching
5384 op_cost(50);
5385 format %{ "[$reg]" %}
5386 interface(MEMORY_INTER) %{
5387 base(0x1d); // SP
5388 index(0x0); // No Index
5389 scale(0x0); // No Scale
5390 disp($reg); // Stack Offset
5391 %}
5392 %}
5394 operand stackSlotF(sRegF reg) %{
5395 constraint(ALLOC_IN_RC(stack_slots));
5396 // No match rule because this operand is only generated in matching
5397 op_cost(50);
5398 format %{ "[$reg]" %}
5399 interface(MEMORY_INTER) %{
5400 base(0x1d); // SP
5401 index(0x0); // No Index
5402 scale(0x0); // No Scale
5403 disp($reg); // Stack Offset
5404 %}
5405 %}
5407 operand stackSlotD(sRegD reg) %{
5408 constraint(ALLOC_IN_RC(stack_slots));
5409 // No match rule because this operand is only generated in matching
5410 op_cost(50);
5411 format %{ "[$reg]" %}
5412 interface(MEMORY_INTER) %{
5413 base(0x1d); // SP
5414 index(0x0); // No Index
5415 scale(0x0); // No Scale
5416 disp($reg); // Stack Offset
5417 %}
5418 %}
5420 operand stackSlotL(sRegL reg) %{
5421 constraint(ALLOC_IN_RC(stack_slots));
5422 // No match rule because this operand is only generated in matching
5423 op_cost(50);
5424 format %{ "[$reg]" %}
5425 interface(MEMORY_INTER) %{
5426 base(0x1d); // SP
5427 index(0x0); // No Index
5428 scale(0x0); // No Scale
5429 disp($reg); // Stack Offset
5430 %}
5431 %}
5434 //------------------------OPERAND CLASSES--------------------------------------
5435 //opclass memory( direct, indirect, indOffset16, indOffset32, indOffset32X, indIndexOffset );
5436 opclass memory( indirect, indirectNarrow, indOffset8, indOffset32, indIndex, indIndexScale, load_long_indirect, load_long_indOffset32, baseIndexOffset8, baseIndexOffset8_convI2L, indIndexScaleOffset8, indIndexScaleOffset8_convI2L, basePosIndexScaleOffset8, indIndexScaleOffsetNarrow, indPosIndexI2LScaleOffset8Narrow, indOffset8Narrow, indIndexOffset8Narrow);
5439 //----------PIPELINE-----------------------------------------------------------
5440 // Rules which define the behavior of the target architectures pipeline.
5442 pipeline %{
5444 //----------ATTRIBUTES---------------------------------------------------------
5445 attributes %{
5446 fixed_size_instructions; // Fixed size instructions
5447 branch_has_delay_slot; // branch have delay slot in gs2
5448 max_instructions_per_bundle = 1; // 1 instruction per bundle
5449 max_bundles_per_cycle = 4; // Up to 4 bundles per cycle
5450 bundle_unit_size=4;
5451 instruction_unit_size = 4; // An instruction is 4 bytes long
5452 instruction_fetch_unit_size = 16; // The processor fetches one line
5453 instruction_fetch_units = 1; // of 16 bytes
5455 // List of nop instructions
5456 nops( MachNop );
5457 %}
5459 //----------RESOURCES----------------------------------------------------------
5460 // Resources are the functional units available to the machine
5462 resources(D1, D2, D3, D4, DECODE = D1 | D2 | D3| D4, ALU1, ALU2, ALU = ALU1 | ALU2, FPU1, FPU2, FPU = FPU1 | FPU2, MEM, BR);
5464 //----------PIPELINE DESCRIPTION-----------------------------------------------
5465 // Pipeline Description specifies the stages in the machine's pipeline
5467 // IF: fetch
5468 // ID: decode
5469 // RD: read
5470 // CA: caculate
5471 // WB: write back
5472 // CM: commit
5474 pipe_desc(IF, ID, RD, CA, WB, CM);
5477 //----------PIPELINE CLASSES---------------------------------------------------
5478 // Pipeline Classes describe the stages in which input and output are
5479 // referenced by the hardware pipeline.
5481 //No.1 Integer ALU reg-reg operation : dst <-- reg1 op reg2
5482 pipe_class ialu_regI_regI(mRegI dst, mRegI src1, mRegI src2) %{
5483 single_instruction;
5484 src1 : RD(read);
5485 src2 : RD(read);
5486 dst : WB(write)+1;
5487 DECODE : ID;
5488 ALU : CA;
5489 %}
5491 //No.19 Integer mult operation : dst <-- reg1 mult reg2
5492 pipe_class ialu_mult(mRegI dst, mRegI src1, mRegI src2) %{
5493 src1 : RD(read);
5494 src2 : RD(read);
5495 dst : WB(write)+5;
5496 DECODE : ID;
5497 ALU2 : CA;
5498 %}
5500 pipe_class mulL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
5501 src1 : RD(read);
5502 src2 : RD(read);
5503 dst : WB(write)+10;
5504 DECODE : ID;
5505 ALU2 : CA;
5506 %}
5508 //No.19 Integer div operation : dst <-- reg1 div reg2
5509 pipe_class ialu_div(mRegI dst, mRegI src1, mRegI src2) %{
5510 src1 : RD(read);
5511 src2 : RD(read);
5512 dst : WB(write)+10;
5513 DECODE : ID;
5514 ALU2 : CA;
5515 %}
5517 //No.19 Integer mod operation : dst <-- reg1 mod reg2
5518 pipe_class ialu_mod(mRegI dst, mRegI src1, mRegI src2) %{
5519 instruction_count(2);
5520 src1 : RD(read);
5521 src2 : RD(read);
5522 dst : WB(write)+10;
5523 DECODE : ID;
5524 ALU2 : CA;
5525 %}
5527 //No.15 Long ALU reg-reg operation : dst <-- reg1 op reg2
5528 pipe_class ialu_regL_regL(mRegL dst, mRegL src1, mRegL src2) %{
5529 instruction_count(2);
5530 src1 : RD(read);
5531 src2 : RD(read);
5532 dst : WB(write);
5533 DECODE : ID;
5534 ALU : CA;
5535 %}
5537 //No.18 Long ALU reg-imm16 operation : dst <-- reg1 op imm16
5538 pipe_class ialu_regL_imm16(mRegL dst, mRegL src) %{
5539 instruction_count(2);
5540 src : RD(read);
5541 dst : WB(write);
5542 DECODE : ID;
5543 ALU : CA;
5544 %}
5546 //no.16 load Long from memory :
5547 pipe_class ialu_loadL(mRegL dst, memory mem) %{
5548 instruction_count(2);
5549 mem : RD(read);
5550 dst : WB(write)+5;
5551 DECODE : ID;
5552 MEM : RD;
5553 %}
5555 //No.17 Store Long to Memory :
5556 pipe_class ialu_storeL(mRegL src, memory mem) %{
5557 instruction_count(2);
5558 mem : RD(read);
5559 src : RD(read);
5560 DECODE : ID;
5561 MEM : RD;
5562 %}
5564 //No.2 Integer ALU reg-imm16 operation : dst <-- reg1 op imm16
5565 pipe_class ialu_regI_imm16(mRegI dst, mRegI src) %{
5566 single_instruction;
5567 src : RD(read);
5568 dst : WB(write);
5569 DECODE : ID;
5570 ALU : CA;
5571 %}
5573 //No.3 Integer move operation : dst <-- reg
5574 pipe_class ialu_regI_mov(mRegI dst, mRegI src) %{
5575 src : RD(read);
5576 dst : WB(write);
5577 DECODE : ID;
5578 ALU : CA;
5579 %}
5581 //No.4 No instructions : do nothing
5582 pipe_class empty( ) %{
5583 instruction_count(0);
5584 %}
5586 //No.5 UnConditional branch :
5587 pipe_class pipe_jump( label labl ) %{
5588 multiple_bundles;
5589 DECODE : ID;
5590 BR : RD;
5591 %}
5593 //No.6 ALU Conditional branch :
5594 pipe_class pipe_alu_branch(mRegI src1, mRegI src2, label labl ) %{
5595 multiple_bundles;
5596 src1 : RD(read);
5597 src2 : RD(read);
5598 DECODE : ID;
5599 BR : RD;
5600 %}
5602 //no.7 load integer from memory :
5603 pipe_class ialu_loadI(mRegI dst, memory mem) %{
5604 mem : RD(read);
5605 dst : WB(write)+3;
5606 DECODE : ID;
5607 MEM : RD;
5608 %}
5610 //No.8 Store Integer to Memory :
5611 pipe_class ialu_storeI(mRegI src, memory mem) %{
5612 mem : RD(read);
5613 src : RD(read);
5614 DECODE : ID;
5615 MEM : RD;
5616 %}
5619 //No.10 Floating FPU reg-reg operation : dst <-- reg1 op reg2
5620 pipe_class fpu_regF_regF(regF dst, regF src1, regF src2) %{
5621 src1 : RD(read);
5622 src2 : RD(read);
5623 dst : WB(write);
5624 DECODE : ID;
5625 FPU : CA;
5626 %}
5628 //No.22 Floating div operation : dst <-- reg1 div reg2
5629 pipe_class fpu_div(regF dst, regF src1, regF src2) %{
5630 src1 : RD(read);
5631 src2 : RD(read);
5632 dst : WB(write);
5633 DECODE : ID;
5634 FPU2 : CA;
5635 %}
5637 pipe_class fcvt_I2D(regD dst, mRegI src) %{
5638 src : RD(read);
5639 dst : WB(write);
5640 DECODE : ID;
5641 FPU1 : CA;
5642 %}
5644 pipe_class fcvt_D2I(mRegI dst, regD src) %{
5645 src : RD(read);
5646 dst : WB(write);
5647 DECODE : ID;
5648 FPU1 : CA;
5649 %}
5651 pipe_class pipe_mfc1(mRegI dst, regD src) %{
5652 src : RD(read);
5653 dst : WB(write);
5654 DECODE : ID;
5655 MEM : RD;
5656 %}
5658 pipe_class pipe_mtc1(regD dst, mRegI src) %{
5659 src : RD(read);
5660 dst : WB(write);
5661 DECODE : ID;
5662 MEM : RD(5);
5663 %}
5665 //No.23 Floating sqrt operation : dst <-- reg1 sqrt reg2
5666 pipe_class fpu_sqrt(regF dst, regF src1, regF src2) %{
5667 multiple_bundles;
5668 src1 : RD(read);
5669 src2 : RD(read);
5670 dst : WB(write);
5671 DECODE : ID;
5672 FPU2 : CA;
5673 %}
5675 //No.11 Load Floating from Memory :
5676 pipe_class fpu_loadF(regF dst, memory mem) %{
5677 instruction_count(1);
5678 mem : RD(read);
5679 dst : WB(write)+3;
5680 DECODE : ID;
5681 MEM : RD;
5682 %}
5684 //No.12 Store Floating to Memory :
5685 pipe_class fpu_storeF(regF src, memory mem) %{
5686 instruction_count(1);
5687 mem : RD(read);
5688 src : RD(read);
5689 DECODE : ID;
5690 MEM : RD;
5691 %}
5693 //No.13 FPU Conditional branch :
5694 pipe_class pipe_fpu_branch(regF src1, regF src2, label labl ) %{
5695 multiple_bundles;
5696 src1 : RD(read);
5697 src2 : RD(read);
5698 DECODE : ID;
5699 BR : RD;
5700 %}
5702 //No.14 Floating FPU reg operation : dst <-- op reg
5703 pipe_class fpu1_regF(regF dst, regF src) %{
5704 src : RD(read);
5705 dst : WB(write);
5706 DECODE : ID;
5707 FPU : CA;
5708 %}
5710 pipe_class long_memory_op() %{
5711 instruction_count(10); multiple_bundles; force_serialization;
5712 fixed_latency(30);
5713 %}
5715 pipe_class simple_call() %{
5716 instruction_count(10); multiple_bundles; force_serialization;
5717 fixed_latency(200);
5718 BR : RD;
5719 %}
5721 pipe_class call() %{
5722 instruction_count(10); multiple_bundles; force_serialization;
5723 fixed_latency(200);
5724 %}
5726 //FIXME:
5727 //No.9 Piple slow : for multi-instructions
5728 pipe_class pipe_slow( ) %{
5729 instruction_count(20);
5730 force_serialization;
5731 multiple_bundles;
5732 fixed_latency(50);
5733 %}
5735 %}
5739 //----------INSTRUCTIONS-------------------------------------------------------
5740 //
5741 // match -- States which machine-independent subtree may be replaced
5742 // by this instruction.
5743 // ins_cost -- The estimated cost of this instruction is used by instruction
5744 // selection to identify a minimum cost tree of machine
5745 // instructions that matches a tree of machine-independent
5746 // instructions.
5747 // format -- A string providing the disassembly for this instruction.
5748 // The value of an instruction's operand may be inserted
5749 // by referring to it with a '$' prefix.
5750 // opcode -- Three instruction opcodes may be provided. These are referred
5751 // to within an encode class as $primary, $secondary, and $tertiary
5752 // respectively. The primary opcode is commonly used to
5753 // indicate the type of machine instruction, while secondary
5754 // and tertiary are often used for prefix options or addressing
5755 // modes.
5756 // ins_encode -- A list of encode classes with parameters. The encode class
5757 // name must have been defined in an 'enc_class' specification
5758 // in the encode section of the architecture description.
5761 // Load Integer
5762 instruct loadI(mRegI dst, memory mem) %{
5763 match(Set dst (LoadI mem));
5765 ins_cost(125);
5766 format %{ "lw $dst, $mem #@loadI" %}
5767 ins_encode (load_I_enc(dst, mem));
5768 ins_pipe( ialu_loadI );
5769 %}
5771 instruct loadI_convI2L(mRegL dst, memory mem) %{
5772 match(Set dst (ConvI2L (LoadI mem)));
5774 ins_cost(125);
5775 format %{ "lw $dst, $mem #@loadI_convI2L" %}
5776 ins_encode (load_I_enc(dst, mem));
5777 ins_pipe( ialu_loadI );
5778 %}
5780 // Load Integer (32 bit signed) to Byte (8 bit signed)
5781 instruct loadI2B(mRegI dst, memory mem, immI_24 twentyfour) %{
5782 match(Set dst (RShiftI (LShiftI (LoadI mem) twentyfour) twentyfour));
5784 ins_cost(125);
5785 format %{ "lb $dst, $mem\t# int -> byte #@loadI2B" %}
5786 ins_encode(load_B_enc(dst, mem));
5787 ins_pipe(ialu_loadI);
5788 %}
5790 // Load Integer (32 bit signed) to Unsigned Byte (8 bit UNsigned)
5791 instruct loadI2UB(mRegI dst, memory mem, immI_255 mask) %{
5792 match(Set dst (AndI (LoadI mem) mask));
5794 ins_cost(125);
5795 format %{ "lbu $dst, $mem\t# int -> ubyte #@loadI2UB" %}
5796 ins_encode(load_UB_enc(dst, mem));
5797 ins_pipe(ialu_loadI);
5798 %}
5800 // Load Integer (32 bit signed) to Short (16 bit signed)
5801 instruct loadI2S(mRegI dst, memory mem, immI_16 sixteen) %{
5802 match(Set dst (RShiftI (LShiftI (LoadI mem) sixteen) sixteen));
5804 ins_cost(125);
5805 format %{ "lh $dst, $mem\t# int -> short #@loadI2S" %}
5806 ins_encode(load_S_enc(dst, mem));
5807 ins_pipe(ialu_loadI);
5808 %}
5810 // Load Integer (32 bit signed) to Unsigned Short/Char (16 bit UNsigned)
5811 instruct loadI2US(mRegI dst, memory mem, immI_65535 mask) %{
5812 match(Set dst (AndI (LoadI mem) mask));
5814 ins_cost(125);
5815 format %{ "lhu $dst, $mem\t# int -> ushort/char #@loadI2US" %}
5816 ins_encode(load_C_enc(dst, mem));
5817 ins_pipe(ialu_loadI);
5818 %}
5820 // Load Long.
5821 instruct loadL(mRegL dst, memory mem) %{
5822 // predicate(!((LoadLNode*)n)->require_atomic_access());
5823 match(Set dst (LoadL mem));
5825 ins_cost(250);
5826 format %{ "ld $dst, $mem #@loadL" %}
5827 ins_encode(load_L_enc(dst, mem));
5828 ins_pipe( ialu_loadL );
5829 %}
5831 // Load Long - UNaligned
5832 instruct loadL_unaligned(mRegL dst, memory mem) %{
5833 match(Set dst (LoadL_unaligned mem));
5835 // FIXME: Jin: Need more effective ldl/ldr
5836 ins_cost(450);
5837 format %{ "ld $dst, $mem #@loadL_unaligned\n\t" %}
5838 ins_encode(load_L_enc(dst, mem));
5839 ins_pipe( ialu_loadL );
5840 %}
5842 // Store Long
5843 instruct storeL_reg(memory mem, mRegL src) %{
5844 match(Set mem (StoreL mem src));
5846 ins_cost(200);
5847 format %{ "sd $mem, $src #@storeL_reg\n" %}
5848 ins_encode(store_L_reg_enc(mem, src));
5849 ins_pipe( ialu_storeL );
5850 %}
5853 instruct storeL_immL0(memory mem, immL0 zero) %{
5854 match(Set mem (StoreL mem zero));
5856 ins_cost(180);
5857 format %{ "sd $mem, zero #@storeL_immL0" %}
5858 ins_encode(store_L_immL0_enc(mem, zero));
5859 ins_pipe( ialu_storeL );
5860 %}
5862 // Load Compressed Pointer
5863 instruct loadN(mRegN dst, memory mem)
5864 %{
5865 match(Set dst (LoadN mem));
5867 ins_cost(125); // XXX
5868 format %{ "lwu $dst, $mem\t# compressed ptr @ loadN" %}
5869 ins_encode (load_N_enc(dst, mem));
5870 ins_pipe( ialu_loadI ); // XXX
5871 %}
5873 // Load Pointer
5874 instruct loadP(mRegP dst, memory mem) %{
5875 match(Set dst (LoadP mem));
5877 ins_cost(125);
5878 format %{ "ld $dst, $mem #@loadP" %}
5879 ins_encode (load_P_enc(dst, mem));
5880 ins_pipe( ialu_loadI );
5881 %}
5883 // Load Klass Pointer
5884 instruct loadKlass(mRegP dst, memory mem) %{
5885 match(Set dst (LoadKlass mem));
5887 ins_cost(125);
5888 format %{ "MOV $dst,$mem @ loadKlass" %}
5889 ins_encode (load_P_enc(dst, mem));
5890 ins_pipe( ialu_loadI );
5891 %}
5893 // Load narrow Klass Pointer
5894 instruct loadNKlass(mRegN dst, memory mem)
5895 %{
5896 match(Set dst (LoadNKlass mem));
5898 ins_cost(125); // XXX
5899 format %{ "lwu $dst, $mem\t# compressed klass ptr @ loadNKlass" %}
5900 ins_encode (load_N_enc(dst, mem));
5901 ins_pipe( ialu_loadI ); // XXX
5902 %}
5904 // Load Constant
5905 instruct loadConI(mRegI dst, immI src) %{
5906 match(Set dst src);
5908 ins_cost(150);
5909 format %{ "mov $dst, $src #@loadConI" %}
5910 ins_encode %{
5911 Register dst = $dst$$Register;
5912 int value = $src$$constant;
5913 __ move(dst, value);
5914 %}
5915 ins_pipe( ialu_regI_regI );
5916 %}
5919 instruct loadConL_set64(mRegL dst, immL src) %{
5920 match(Set dst src);
5921 ins_cost(120);
5922 format %{ "li $dst, $src @ loadConL_set64" %}
5923 ins_encode %{
5924 __ set64($dst$$Register, $src$$constant);
5925 %}
5926 ins_pipe(ialu_regL_regL);
5927 %}
5929 /*
5930 // Load long value from constant table (predicated by immL_expensive).
5931 instruct loadConL_load(mRegL dst, immL_expensive src) %{
5932 match(Set dst src);
5933 ins_cost(150);
5934 format %{ "ld $dst, $constantoffset[$constanttablebase] # load long $src from table @ loadConL_ldx" %}
5935 ins_encode %{
5936 int con_offset = $constantoffset($src);
5938 if (Assembler::is_simm16(con_offset)) {
5939 __ ld($dst$$Register, $constanttablebase, con_offset);
5940 } else {
5941 __ set64(AT, con_offset);
5942 if (UseLoongsonISA) {
5943 __ gsldx($dst$$Register, $constanttablebase, AT, 0);
5944 } else {
5945 __ daddu(AT, $constanttablebase, AT);
5946 __ ld($dst$$Register, AT, 0);
5947 }
5948 }
5949 %}
5950 ins_pipe(ialu_loadI);
5951 %}
5952 */
5954 instruct loadConL16(mRegL dst, immL16 src) %{
5955 match(Set dst src);
5956 ins_cost(105);
5957 format %{ "mov $dst, $src #@loadConL16" %}
5958 ins_encode %{
5959 Register dst_reg = as_Register($dst$$reg);
5960 int value = $src$$constant;
5961 __ daddiu(dst_reg, R0, value);
5962 %}
5963 ins_pipe( ialu_regL_regL );
5964 %}
5967 instruct loadConL0(mRegL dst, immL0 src) %{
5968 match(Set dst src);
5969 ins_cost(100);
5970 format %{ "mov $dst, zero #@loadConL0" %}
5971 ins_encode %{
5972 Register dst_reg = as_Register($dst$$reg);
5973 __ daddu(dst_reg, R0, R0);
5974 %}
5975 ins_pipe( ialu_regL_regL );
5976 %}
5978 // Load Range
5979 instruct loadRange(mRegI dst, memory mem) %{
5980 match(Set dst (LoadRange mem));
5982 ins_cost(125);
5983 format %{ "MOV $dst,$mem @ loadRange" %}
5984 ins_encode(load_I_enc(dst, mem));
5985 ins_pipe( ialu_loadI );
5986 %}
5989 instruct storeP(memory mem, mRegP src ) %{
5990 match(Set mem (StoreP mem src));
5992 ins_cost(125);
5993 format %{ "sd $src, $mem #@storeP" %}
5994 ins_encode(store_P_reg_enc(mem, src));
5995 ins_pipe( ialu_storeI );
5996 %}
5998 // Store NULL Pointer, mark word, or other simple pointer constant.
5999 instruct storeImmP0(memory mem, immP0 zero) %{
6000 match(Set mem (StoreP mem zero));
6002 ins_cost(125);
6003 format %{ "mov $mem, $zero #@storeImmP0" %}
6004 ins_encode(store_P_immP0_enc(mem));
6005 ins_pipe( ialu_storeI );
6006 %}
6008 // Store Byte Immediate
6009 instruct storeImmB(memory mem, immI8 src) %{
6010 match(Set mem (StoreB mem src));
6012 ins_cost(150);
6013 format %{ "movb $mem, $src #@storeImmB" %}
6014 ins_encode(store_B_immI_enc(mem, src));
6015 ins_pipe( ialu_storeI );
6016 %}
6018 // Store Compressed Pointer
6019 instruct storeN(memory mem, mRegN src)
6020 %{
6021 match(Set mem (StoreN mem src));
6023 ins_cost(125); // XXX
6024 format %{ "sw $mem, $src\t# compressed ptr @ storeN" %}
6025 ins_encode(store_N_reg_enc(mem, src));
6026 ins_pipe( ialu_storeI );
6027 %}
6029 instruct storeNKlass(memory mem, mRegN src)
6030 %{
6031 match(Set mem (StoreNKlass mem src));
6033 ins_cost(125); // XXX
6034 format %{ "sw $mem, $src\t# compressed klass ptr @ storeNKlass" %}
6035 ins_encode(store_N_reg_enc(mem, src));
6036 ins_pipe( ialu_storeI );
6037 %}
6039 instruct storeImmN0(memory mem, immN0 zero)
6040 %{
6041 predicate(Universe::narrow_oop_base() == NULL && Universe::narrow_klass_base() == NULL);
6042 match(Set mem (StoreN mem zero));
6044 ins_cost(125); // XXX
6045 format %{ "storeN0 $mem, R12\t# compressed ptr" %}
6046 ins_encode(storeImmN0_enc(mem, zero));
6047 ins_pipe( ialu_storeI );
6048 %}
6050 // Store Byte
6051 instruct storeB(memory mem, mRegI src) %{
6052 match(Set mem (StoreB mem src));
6054 ins_cost(125);
6055 format %{ "sb $src, $mem #@storeB" %}
6056 ins_encode(store_B_reg_enc(mem, src));
6057 ins_pipe( ialu_storeI );
6058 %}
6060 instruct storeB_convL2I(memory mem, mRegL src) %{
6061 match(Set mem (StoreB mem (ConvL2I src)));
6063 ins_cost(125);
6064 format %{ "sb $src, $mem #@storeB_convL2I" %}
6065 ins_encode(store_B_reg_enc(mem, src));
6066 ins_pipe( ialu_storeI );
6067 %}
6069 // Load Byte (8bit signed)
6070 instruct loadB(mRegI dst, memory mem) %{
6071 match(Set dst (LoadB mem));
6073 ins_cost(125);
6074 format %{ "lb $dst, $mem #@loadB" %}
6075 ins_encode(load_B_enc(dst, mem));
6076 ins_pipe( ialu_loadI );
6077 %}
6079 instruct loadB_convI2L(mRegL dst, memory mem) %{
6080 match(Set dst (ConvI2L (LoadB mem)));
6082 ins_cost(125);
6083 format %{ "lb $dst, $mem #@loadB_convI2L" %}
6084 ins_encode(load_B_enc(dst, mem));
6085 ins_pipe( ialu_loadI );
6086 %}
6088 // Load Byte (8bit UNsigned)
6089 instruct loadUB(mRegI dst, memory mem) %{
6090 match(Set dst (LoadUB mem));
6092 ins_cost(125);
6093 format %{ "lbu $dst, $mem #@loadUB" %}
6094 ins_encode(load_UB_enc(dst, mem));
6095 ins_pipe( ialu_loadI );
6096 %}
6098 instruct loadUB_convI2L(mRegL dst, memory mem) %{
6099 match(Set dst (ConvI2L (LoadUB mem)));
6101 ins_cost(125);
6102 format %{ "lbu $dst, $mem #@loadUB_convI2L" %}
6103 ins_encode(load_UB_enc(dst, mem));
6104 ins_pipe( ialu_loadI );
6105 %}
6107 // Load Short (16bit signed)
6108 instruct loadS(mRegI dst, memory mem) %{
6109 match(Set dst (LoadS mem));
6111 ins_cost(125);
6112 format %{ "lh $dst, $mem #@loadS" %}
6113 ins_encode(load_S_enc(dst, mem));
6114 ins_pipe( ialu_loadI );
6115 %}
6117 // Load Short (16 bit signed) to Byte (8 bit signed)
6118 instruct loadS2B(mRegI dst, memory mem, immI_24 twentyfour) %{
6119 match(Set dst (RShiftI (LShiftI (LoadS mem) twentyfour) twentyfour));
6121 ins_cost(125);
6122 format %{ "lb $dst, $mem\t# short -> byte #@loadS2B" %}
6123 ins_encode(load_B_enc(dst, mem));
6124 ins_pipe(ialu_loadI);
6125 %}
6127 instruct loadS_convI2L(mRegL dst, memory mem) %{
6128 match(Set dst (ConvI2L (LoadS mem)));
6130 ins_cost(125);
6131 format %{ "lh $dst, $mem #@loadS_convI2L" %}
6132 ins_encode(load_S_enc(dst, mem));
6133 ins_pipe( ialu_loadI );
6134 %}
6136 // Store Integer Immediate
6137 instruct storeImmI(memory mem, immI src) %{
6138 match(Set mem (StoreI mem src));
6140 ins_cost(150);
6141 format %{ "mov $mem, $src #@storeImmI" %}
6142 ins_encode(store_I_immI_enc(mem, src));
6143 ins_pipe( ialu_storeI );
6144 %}
6146 // Store Integer
6147 instruct storeI(memory mem, mRegI src) %{
6148 match(Set mem (StoreI mem src));
6150 ins_cost(125);
6151 format %{ "sw $mem, $src #@storeI" %}
6152 ins_encode(store_I_reg_enc(mem, src));
6153 ins_pipe( ialu_storeI );
6154 %}
6156 instruct storeI_convL2I(memory mem, mRegL src) %{
6157 match(Set mem (StoreI mem (ConvL2I src)));
6159 ins_cost(125);
6160 format %{ "sw $mem, $src #@storeI_convL2I" %}
6161 ins_encode(store_I_reg_enc(mem, src));
6162 ins_pipe( ialu_storeI );
6163 %}
6165 // Load Float
6166 instruct loadF(regF dst, memory mem) %{
6167 match(Set dst (LoadF mem));
6169 ins_cost(150);
6170 format %{ "loadF $dst, $mem #@loadF" %}
6171 ins_encode(load_F_enc(dst, mem));
6172 ins_pipe( ialu_loadI );
6173 %}
6175 instruct loadConP_general(mRegP dst, immP src) %{
6176 match(Set dst src);
6178 ins_cost(120);
6179 format %{ "li $dst, $src #@loadConP_general" %}
6181 ins_encode %{
6182 Register dst = $dst$$Register;
6183 long* value = (long*)$src$$constant;
6184 bool is_need_reloc = $src->constant_reloc() != relocInfo::none;
6186 /* During GC, klassOop may be moved to new position in the heap.
6187 * It must be relocated.
6188 * Refer: [c1_LIRAssembler_mips.cpp] jobject2reg()
6189 */
6190 if (is_need_reloc) {
6191 if($src->constant_reloc() == relocInfo::metadata_type){
6192 int klass_index = __ oop_recorder()->find_index((Klass*)value);
6193 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
6195 __ relocate(rspec);
6196 __ li48(dst, (long)value);
6197 }
6199 if($src->constant_reloc() == relocInfo::oop_type){
6200 int oop_index = __ oop_recorder()->find_index((jobject)value);
6201 RelocationHolder rspec = oop_Relocation::spec(oop_index);
6203 __ relocate(rspec);
6204 __ li48(dst, (long)value);
6205 }
6206 } else {
6207 __ set64(dst, (long)value);
6208 }
6209 %}
6211 ins_pipe( ialu_regI_regI );
6212 %}
6214 /*
6215 instruct loadConP_load(mRegP dst, immP_load src) %{
6216 match(Set dst src);
6218 ins_cost(100);
6219 format %{ "ld $dst, [$constanttablebase + $constantoffset] load from constant table: ptr=$src @ loadConP_load" %}
6221 ins_encode %{
6223 int con_offset = $constantoffset($src);
6225 if (Assembler::is_simm16(con_offset)) {
6226 __ ld($dst$$Register, $constanttablebase, con_offset);
6227 } else {
6228 __ set64(AT, con_offset);
6229 if (UseLoongsonISA) {
6230 __ gsldx($dst$$Register, $constanttablebase, AT, 0);
6231 } else {
6232 __ daddu(AT, $constanttablebase, AT);
6233 __ ld($dst$$Register, AT, 0);
6234 }
6235 }
6236 %}
6238 ins_pipe(ialu_loadI);
6239 %}
6240 */
6242 instruct loadConP_no_oop_cheap(mRegP dst, immP_no_oop_cheap src) %{
6243 match(Set dst src);
6245 ins_cost(80);
6246 format %{ "li $dst, $src @ loadConP_no_oop_cheap" %}
6248 ins_encode %{
6249 __ set64($dst$$Register, $src$$constant);
6250 %}
6252 ins_pipe(ialu_regI_regI);
6253 %}
6256 instruct loadConP_poll(mRegP dst, immP_poll src) %{
6257 match(Set dst src);
6259 ins_cost(50);
6260 format %{ "li $dst, $src #@loadConP_poll" %}
6262 ins_encode %{
6263 Register dst = $dst$$Register;
6264 intptr_t value = (intptr_t)$src$$constant;
6266 __ set64(dst, (jlong)value);
6267 %}
6269 ins_pipe( ialu_regI_regI );
6270 %}
6272 instruct loadConP0(mRegP dst, immP0 src)
6273 %{
6274 match(Set dst src);
6276 ins_cost(50);
6277 format %{ "mov $dst, R0\t# ptr" %}
6278 ins_encode %{
6279 Register dst_reg = $dst$$Register;
6280 __ daddu(dst_reg, R0, R0);
6281 %}
6282 ins_pipe( ialu_regI_regI );
6283 %}
6285 instruct loadConN0(mRegN dst, immN0 src) %{
6286 match(Set dst src);
6287 format %{ "move $dst, R0\t# compressed NULL ptr" %}
6288 ins_encode %{
6289 __ move($dst$$Register, R0);
6290 %}
6291 ins_pipe( ialu_regI_regI );
6292 %}
6294 instruct loadConN(mRegN dst, immN src) %{
6295 match(Set dst src);
6297 ins_cost(125);
6298 format %{ "li $dst, $src\t# compressed ptr @ loadConN" %}
6299 ins_encode %{
6300 address con = (address)$src$$constant;
6301 if (con == NULL) {
6302 ShouldNotReachHere();
6303 } else {
6304 assert (UseCompressedOops, "should only be used for compressed headers");
6305 assert (Universe::heap() != NULL, "java heap should be initialized");
6306 assert (__ oop_recorder() != NULL, "this assembler needs an OopRecorder");
6308 Register dst = $dst$$Register;
6309 long* value = (long*)$src$$constant;
6310 int oop_index = __ oop_recorder()->find_index((jobject)value);
6311 RelocationHolder rspec = oop_Relocation::spec(oop_index);
6312 if(rspec.type()!=relocInfo::none){
6313 __ relocate(rspec, Assembler::narrow_oop_operand);
6314 __ li48(dst, oop_index);
6315 } else {
6316 __ set64(dst, oop_index);
6317 }
6318 }
6319 %}
6320 ins_pipe( ialu_regI_regI ); // XXX
6321 %}
6323 instruct loadConNKlass(mRegN dst, immNKlass src) %{
6324 match(Set dst src);
6326 ins_cost(125);
6327 format %{ "li $dst, $src\t# compressed klass ptr @ loadConNKlass" %}
6328 ins_encode %{
6329 address con = (address)$src$$constant;
6330 if (con == NULL) {
6331 ShouldNotReachHere();
6332 } else {
6333 Register dst = $dst$$Register;
6334 long* value = (long*)$src$$constant;
6336 int klass_index = __ oop_recorder()->find_index((Klass*)value);
6337 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
6338 long narrowp = (long)Klass::encode_klass((Klass*)value);
6340 if(rspec.type()!=relocInfo::none){
6341 __ relocate(rspec, Assembler::narrow_oop_operand);
6342 __ li48(dst, narrowp);
6343 } else {
6344 __ set64(dst, narrowp);
6345 }
6346 }
6347 %}
6348 ins_pipe( ialu_regI_regI ); // XXX
6349 %}
6351 //FIXME
6352 // Tail Call; Jump from runtime stub to Java code.
6353 // Also known as an 'interprocedural jump'.
6354 // Target of jump will eventually return to caller.
6355 // TailJump below removes the return address.
6356 instruct TailCalljmpInd(mRegP jump_target, mRegP method_oop) %{
6357 match(TailCall jump_target method_oop );
6358 ins_cost(300);
6359 format %{ "JMP $jump_target \t# @TailCalljmpInd" %}
6361 ins_encode %{
6362 Register target = $jump_target$$Register;
6363 Register oop = $method_oop$$Register;
6365 /* 2012/10/12 Jin: RA will be used in generate_forward_exception() */
6366 __ push(RA);
6368 __ move(S3, oop);
6369 __ jr(target);
6370 __ nop();
6371 %}
6373 ins_pipe( pipe_jump );
6374 %}
6376 // Create exception oop: created by stack-crawling runtime code.
6377 // Created exception is now available to this handler, and is setup
6378 // just prior to jumping to this handler. No code emitted.
6379 instruct CreateException( a0_RegP ex_oop )
6380 %{
6381 match(Set ex_oop (CreateEx));
6383 // use the following format syntax
6384 format %{ "# exception oop is in A0; no code emitted @CreateException" %}
6385 ins_encode %{
6386 /* Jin: X86 leaves this function empty */
6387 __ block_comment("CreateException is empty in X86/MIPS");
6388 %}
6389 ins_pipe( empty );
6390 // ins_pipe( pipe_jump );
6391 %}
6394 /* 2012/9/14 Jin: The mechanism of exception handling is clear now.
6396 - Common try/catch:
6397 2012/9/14 Jin: [stubGenerator_mips.cpp] generate_forward_exception()
6398 |- V0, V1 are created
6399 |- T9 <= SharedRuntime::exception_handler_for_return_address
6400 `- jr T9
6401 `- the caller's exception_handler
6402 `- jr OptoRuntime::exception_blob
6403 `- here
6404 - Rethrow(e.g. 'unwind'):
6405 * The callee:
6406 |- an exception is triggered during execution
6407 `- exits the callee method through RethrowException node
6408 |- The callee pushes exception_oop(T0) and exception_pc(RA)
6409 `- The callee jumps to OptoRuntime::rethrow_stub()
6410 * In OptoRuntime::rethrow_stub:
6411 |- The VM calls _rethrow_Java to determine the return address in the caller method
6412 `- exits the stub with tailjmpInd
6413 |- pops exception_oop(V0) and exception_pc(V1)
6414 `- jumps to the return address(usually an exception_handler)
6415 * The caller:
6416 `- continues processing the exception_blob with V0/V1
6417 */
6419 /*
6420 Disassembling OptoRuntime::rethrow_stub()
6422 ; locals
6423 0x2d3bf320: addiu sp, sp, 0xfffffff8
6424 0x2d3bf324: sw ra, 0x4(sp)
6425 0x2d3bf328: sw fp, 0x0(sp)
6426 0x2d3bf32c: addu fp, sp, zero
6427 0x2d3bf330: addiu sp, sp, 0xfffffff0
6428 0x2d3bf334: sw ra, 0x8(sp)
6429 0x2d3bf338: sw t0, 0x4(sp)
6430 0x2d3bf33c: sw sp, 0x0(sp)
6432 ; get_thread(S2)
6433 0x2d3bf340: addu s2, sp, zero
6434 0x2d3bf344: srl s2, s2, 12
6435 0x2d3bf348: sll s2, s2, 2
6436 0x2d3bf34c: lui at, 0x2c85
6437 0x2d3bf350: addu at, at, s2
6438 0x2d3bf354: lw s2, 0xffffcc80(at)
6440 0x2d3bf358: lw s0, 0x0(sp)
6441 0x2d3bf35c: sw s0, 0x118(s2) // last_sp -> threa
6442 0x2d3bf360: sw s2, 0xc(sp)
6444 ; OptoRuntime::rethrow_C(oopDesc* exception, JavaThread* thread, address ret_pc)
6445 0x2d3bf364: lw a0, 0x4(sp)
6446 0x2d3bf368: lw a1, 0xc(sp)
6447 0x2d3bf36c: lw a2, 0x8(sp)
6448 ;; Java_To_Runtime
6449 0x2d3bf370: lui t9, 0x2c34
6450 0x2d3bf374: addiu t9, t9, 0xffff8a48
6451 0x2d3bf378: jalr t9
6452 0x2d3bf37c: nop
6454 0x2d3bf380: addu s3, v0, zero ; S3: SharedRuntime::raw_exception_handler_for_return_address()
6456 0x2d3bf384: lw s0, 0xc(sp)
6457 0x2d3bf388: sw zero, 0x118(s0)
6458 0x2d3bf38c: sw zero, 0x11c(s0)
6459 0x2d3bf390: lw s1, 0x144(s0) ; ex_oop: S1
6460 0x2d3bf394: addu s2, s0, zero
6461 0x2d3bf398: sw zero, 0x144(s2)
6462 0x2d3bf39c: lw s0, 0x4(s2)
6463 0x2d3bf3a0: addiu s4, zero, 0x0
6464 0x2d3bf3a4: bne s0, s4, 0x2d3bf3d4
6465 0x2d3bf3a8: nop
6466 0x2d3bf3ac: addiu sp, sp, 0x10
6467 0x2d3bf3b0: addiu sp, sp, 0x8
6468 0x2d3bf3b4: lw ra, 0xfffffffc(sp)
6469 0x2d3bf3b8: lw fp, 0xfffffff8(sp)
6470 0x2d3bf3bc: lui at, 0x2b48
6471 0x2d3bf3c0: lw at, 0x100(at)
6473 ; tailjmpInd: Restores exception_oop & exception_pc
6474 0x2d3bf3c4: addu v1, ra, zero
6475 0x2d3bf3c8: addu v0, s1, zero
6476 0x2d3bf3cc: jr s3
6477 0x2d3bf3d0: nop
6478 ; Exception:
6479 0x2d3bf3d4: lui s1, 0x2cc8 ; generate_forward_exception()
6480 0x2d3bf3d8: addiu s1, s1, 0x40
6481 0x2d3bf3dc: addiu s2, zero, 0x0
6482 0x2d3bf3e0: addiu sp, sp, 0x10
6483 0x2d3bf3e4: addiu sp, sp, 0x8
6484 0x2d3bf3e8: lw ra, 0xfffffffc(sp)
6485 0x2d3bf3ec: lw fp, 0xfffffff8(sp)
6486 0x2d3bf3f0: lui at, 0x2b48
6487 0x2d3bf3f4: lw at, 0x100(at)
6488 ; TailCalljmpInd
6489 __ push(RA); ; to be used in generate_forward_exception()
6490 0x2d3bf3f8: addu t7, s2, zero
6491 0x2d3bf3fc: jr s1
6492 0x2d3bf400: nop
6493 */
6494 // Rethrow exception:
6495 // The exception oop will come in the first argument position.
6496 // Then JUMP (not call) to the rethrow stub code.
6497 instruct RethrowException()
6498 %{
6499 match(Rethrow);
6501 // use the following format syntax
6502 format %{ "JMP rethrow_stub #@RethrowException" %}
6503 ins_encode %{
6504 __ block_comment("@ RethrowException");
6506 cbuf.set_insts_mark();
6507 cbuf.relocate(cbuf.insts_mark(), runtime_call_Relocation::spec());
6509 // call OptoRuntime::rethrow_stub to get the exception handler in parent method
6510 __ li(T9, OptoRuntime::rethrow_stub());
6511 __ jr(T9);
6512 __ nop();
6513 %}
6514 ins_pipe( pipe_jump );
6515 %}
6517 instruct branchConP_zero(cmpOpU cmp, mRegP op1, immP0 zero, label labl) %{
6518 match(If cmp (CmpP op1 zero));
6519 effect(USE labl);
6521 ins_cost(180);
6522 format %{ "b$cmp $op1, R0, $labl #@branchConP_zero" %}
6524 ins_encode %{
6525 Register op1 = $op1$$Register;
6526 Register op2 = R0;
6527 Label &L = *($labl$$label);
6528 int flag = $cmp$$cmpcode;
6530 switch(flag)
6531 {
6532 case 0x01: //equal
6533 if (&L)
6534 __ beq(op1, op2, L);
6535 else
6536 __ beq(op1, op2, (int)0);
6537 break;
6538 case 0x02: //not_equal
6539 if (&L)
6540 __ bne(op1, op2, L);
6541 else
6542 __ bne(op1, op2, (int)0);
6543 break;
6544 /*
6545 case 0x03: //above
6546 __ sltu(AT, op2, op1);
6547 if(&L)
6548 __ bne(R0, AT, L);
6549 else
6550 __ bne(R0, AT, (int)0);
6551 break;
6552 case 0x04: //above_equal
6553 __ sltu(AT, op1, op2);
6554 if(&L)
6555 __ beq(AT, R0, L);
6556 else
6557 __ beq(AT, R0, (int)0);
6558 break;
6559 case 0x05: //below
6560 __ sltu(AT, op1, op2);
6561 if(&L)
6562 __ bne(R0, AT, L);
6563 else
6564 __ bne(R0, AT, (int)0);
6565 break;
6566 case 0x06: //below_equal
6567 __ sltu(AT, op2, op1);
6568 if(&L)
6569 __ beq(AT, R0, L);
6570 else
6571 __ beq(AT, R0, (int)0);
6572 break;
6573 */
6574 default:
6575 Unimplemented();
6576 }
6577 __ nop();
6578 %}
6580 ins_pc_relative(1);
6581 ins_pipe( pipe_alu_branch );
6582 %}
6585 instruct branchConP(cmpOpU cmp, mRegP op1, mRegP op2, label labl) %{
6586 match(If cmp (CmpP op1 op2));
6587 // predicate(can_branch_register(_kids[0]->_leaf, _kids[1]->_leaf));
6588 effect(USE labl);
6590 ins_cost(200);
6591 format %{ "b$cmp $op1, $op2, $labl #@branchConP" %}
6593 ins_encode %{
6594 Register op1 = $op1$$Register;
6595 Register op2 = $op2$$Register;
6596 Label &L = *($labl$$label);
6597 int flag = $cmp$$cmpcode;
6599 switch(flag)
6600 {
6601 case 0x01: //equal
6602 if (&L)
6603 __ beq(op1, op2, L);
6604 else
6605 __ beq(op1, op2, (int)0);
6606 break;
6607 case 0x02: //not_equal
6608 if (&L)
6609 __ bne(op1, op2, L);
6610 else
6611 __ bne(op1, op2, (int)0);
6612 break;
6613 case 0x03: //above
6614 __ sltu(AT, op2, op1);
6615 if(&L)
6616 __ bne(R0, AT, L);
6617 else
6618 __ bne(R0, AT, (int)0);
6619 break;
6620 case 0x04: //above_equal
6621 __ sltu(AT, op1, op2);
6622 if(&L)
6623 __ beq(AT, R0, L);
6624 else
6625 __ beq(AT, R0, (int)0);
6626 break;
6627 case 0x05: //below
6628 __ sltu(AT, op1, op2);
6629 if(&L)
6630 __ bne(R0, AT, L);
6631 else
6632 __ bne(R0, AT, (int)0);
6633 break;
6634 case 0x06: //below_equal
6635 __ sltu(AT, op2, op1);
6636 if(&L)
6637 __ beq(AT, R0, L);
6638 else
6639 __ beq(AT, R0, (int)0);
6640 break;
6641 default:
6642 Unimplemented();
6643 }
6644 __ nop();
6645 %}
6647 ins_pc_relative(1);
6648 ins_pipe( pipe_alu_branch );
6649 %}
6651 instruct cmpN_null_branch(cmpOp cmp, mRegN op1, immN0 null, label labl) %{
6652 match(If cmp (CmpN op1 null));
6653 effect(USE labl);
6655 ins_cost(180);
6656 format %{ "CMP $op1,0\t! compressed ptr\n\t"
6657 "BP$cmp $labl @ cmpN_null_branch" %}
6658 ins_encode %{
6659 Register op1 = $op1$$Register;
6660 Register op2 = R0;
6661 Label &L = *($labl$$label);
6662 int flag = $cmp$$cmpcode;
6664 switch(flag)
6665 {
6666 case 0x01: //equal
6667 if (&L)
6668 __ beq(op1, op2, L);
6669 else
6670 __ beq(op1, op2, (int)0);
6671 break;
6672 case 0x02: //not_equal
6673 if (&L)
6674 __ bne(op1, op2, L);
6675 else
6676 __ bne(op1, op2, (int)0);
6677 break;
6678 default:
6679 Unimplemented();
6680 }
6681 __ nop();
6682 %}
6683 //TODO: pipe_branchP or create pipe_branchN LEE
6684 ins_pc_relative(1);
6685 ins_pipe( pipe_alu_branch );
6686 %}
6688 instruct cmpN_reg_branch(cmpOp cmp, mRegN op1, mRegN op2, label labl) %{
6689 match(If cmp (CmpN op1 op2));
6690 effect(USE labl);
6692 ins_cost(180);
6693 format %{ "CMP $op1,$op2\t! compressed ptr\n\t"
6694 "BP$cmp $labl" %}
6695 ins_encode %{
6696 Register op1_reg = $op1$$Register;
6697 Register op2_reg = $op2$$Register;
6698 Label &L = *($labl$$label);
6699 int flag = $cmp$$cmpcode;
6701 switch(flag)
6702 {
6703 case 0x01: //equal
6704 if (&L)
6705 __ beq(op1_reg, op2_reg, L);
6706 else
6707 __ beq(op1_reg, op2_reg, (int)0);
6708 break;
6709 case 0x02: //not_equal
6710 if (&L)
6711 __ bne(op1_reg, op2_reg, L);
6712 else
6713 __ bne(op1_reg, op2_reg, (int)0);
6714 break;
6715 case 0x03: //above
6716 __ sltu(AT, op2_reg, op1_reg);
6717 if(&L)
6718 __ bne(R0, AT, L);
6719 else
6720 __ bne(R0, AT, (int)0);
6721 break;
6722 case 0x04: //above_equal
6723 __ sltu(AT, op1_reg, op2_reg);
6724 if(&L)
6725 __ beq(AT, R0, L);
6726 else
6727 __ beq(AT, R0, (int)0);
6728 break;
6729 case 0x05: //below
6730 __ sltu(AT, op1_reg, op2_reg);
6731 if(&L)
6732 __ bne(R0, AT, L);
6733 else
6734 __ bne(R0, AT, (int)0);
6735 break;
6736 case 0x06: //below_equal
6737 __ sltu(AT, op2_reg, op1_reg);
6738 if(&L)
6739 __ beq(AT, R0, L);
6740 else
6741 __ beq(AT, R0, (int)0);
6742 break;
6743 default:
6744 Unimplemented();
6745 }
6746 __ nop();
6747 %}
6748 ins_pc_relative(1);
6749 ins_pipe( pipe_alu_branch );
6750 %}
6752 instruct branchConIU_reg_reg(cmpOpU cmp, mRegI src1, mRegI src2, label labl) %{
6753 match( If cmp (CmpU src1 src2) );
6754 effect(USE labl);
6755 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_reg" %}
6757 ins_encode %{
6758 Register op1 = $src1$$Register;
6759 Register op2 = $src2$$Register;
6760 Label &L = *($labl$$label);
6761 int flag = $cmp$$cmpcode;
6763 switch(flag)
6764 {
6765 case 0x01: //equal
6766 if (&L)
6767 __ beq(op1, op2, L);
6768 else
6769 __ beq(op1, op2, (int)0);
6770 break;
6771 case 0x02: //not_equal
6772 if (&L)
6773 __ bne(op1, op2, L);
6774 else
6775 __ bne(op1, op2, (int)0);
6776 break;
6777 case 0x03: //above
6778 __ sltu(AT, op2, op1);
6779 if(&L)
6780 __ bne(AT, R0, L);
6781 else
6782 __ bne(AT, R0, (int)0);
6783 break;
6784 case 0x04: //above_equal
6785 __ sltu(AT, op1, op2);
6786 if(&L)
6787 __ beq(AT, R0, L);
6788 else
6789 __ beq(AT, R0, (int)0);
6790 break;
6791 case 0x05: //below
6792 __ sltu(AT, op1, op2);
6793 if(&L)
6794 __ bne(AT, R0, L);
6795 else
6796 __ bne(AT, R0, (int)0);
6797 break;
6798 case 0x06: //below_equal
6799 __ sltu(AT, op2, op1);
6800 if(&L)
6801 __ beq(AT, R0, L);
6802 else
6803 __ beq(AT, R0, (int)0);
6804 break;
6805 default:
6806 Unimplemented();
6807 }
6808 __ nop();
6809 %}
6811 ins_pc_relative(1);
6812 ins_pipe( pipe_alu_branch );
6813 %}
6816 instruct branchConIU_reg_imm(cmpOpU cmp, mRegI src1, immI src2, label labl) %{
6817 match( If cmp (CmpU src1 src2) );
6818 effect(USE labl);
6819 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_imm" %}
6821 ins_encode %{
6822 Register op1 = $src1$$Register;
6823 int val = $src2$$constant;
6824 Label &L = *($labl$$label);
6825 int flag = $cmp$$cmpcode;
6827 __ move(AT, val);
6828 switch(flag)
6829 {
6830 case 0x01: //equal
6831 if (&L)
6832 __ beq(op1, AT, L);
6833 else
6834 __ beq(op1, AT, (int)0);
6835 break;
6836 case 0x02: //not_equal
6837 if (&L)
6838 __ bne(op1, AT, L);
6839 else
6840 __ bne(op1, AT, (int)0);
6841 break;
6842 case 0x03: //above
6843 __ sltu(AT, AT, op1);
6844 if(&L)
6845 __ bne(R0, AT, L);
6846 else
6847 __ bne(R0, AT, (int)0);
6848 break;
6849 case 0x04: //above_equal
6850 __ sltu(AT, op1, AT);
6851 if(&L)
6852 __ beq(AT, R0, L);
6853 else
6854 __ beq(AT, R0, (int)0);
6855 break;
6856 case 0x05: //below
6857 __ sltu(AT, op1, AT);
6858 if(&L)
6859 __ bne(R0, AT, L);
6860 else
6861 __ bne(R0, AT, (int)0);
6862 break;
6863 case 0x06: //below_equal
6864 __ sltu(AT, AT, op1);
6865 if(&L)
6866 __ beq(AT, R0, L);
6867 else
6868 __ beq(AT, R0, (int)0);
6869 break;
6870 default:
6871 Unimplemented();
6872 }
6873 __ nop();
6874 %}
6876 ins_pc_relative(1);
6877 ins_pipe( pipe_alu_branch );
6878 %}
6880 instruct branchConI_reg_reg(cmpOp cmp, mRegI src1, mRegI src2, label labl) %{
6881 match( If cmp (CmpI src1 src2) );
6882 effect(USE labl);
6883 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_reg" %}
6885 ins_encode %{
6886 Register op1 = $src1$$Register;
6887 Register op2 = $src2$$Register;
6888 Label &L = *($labl$$label);
6889 int flag = $cmp$$cmpcode;
6891 switch(flag)
6892 {
6893 case 0x01: //equal
6894 if (&L)
6895 __ beq(op1, op2, L);
6896 else
6897 __ beq(op1, op2, (int)0);
6898 break;
6899 case 0x02: //not_equal
6900 if (&L)
6901 __ bne(op1, op2, L);
6902 else
6903 __ bne(op1, op2, (int)0);
6904 break;
6905 case 0x03: //above
6906 __ slt(AT, op2, op1);
6907 if(&L)
6908 __ bne(R0, AT, L);
6909 else
6910 __ bne(R0, AT, (int)0);
6911 break;
6912 case 0x04: //above_equal
6913 __ slt(AT, op1, op2);
6914 if(&L)
6915 __ beq(AT, R0, L);
6916 else
6917 __ beq(AT, R0, (int)0);
6918 break;
6919 case 0x05: //below
6920 __ slt(AT, op1, op2);
6921 if(&L)
6922 __ bne(R0, AT, L);
6923 else
6924 __ bne(R0, AT, (int)0);
6925 break;
6926 case 0x06: //below_equal
6927 __ slt(AT, op2, op1);
6928 if(&L)
6929 __ beq(AT, R0, L);
6930 else
6931 __ beq(AT, R0, (int)0);
6932 break;
6933 default:
6934 Unimplemented();
6935 }
6936 __ nop();
6937 %}
6939 ins_pc_relative(1);
6940 ins_pipe( pipe_alu_branch );
6941 %}
6943 instruct branchConI_reg_imm0(cmpOp cmp, mRegI src1, immI0 src2, label labl) %{
6944 match( If cmp (CmpI src1 src2) );
6945 effect(USE labl);
6946 ins_cost(170);
6947 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm0" %}
6949 ins_encode %{
6950 Register op1 = $src1$$Register;
6951 // int val = $src2$$constant;
6952 Label &L = *($labl$$label);
6953 int flag = $cmp$$cmpcode;
6955 //__ move(AT, val);
6956 switch(flag)
6957 {
6958 case 0x01: //equal
6959 if (&L)
6960 __ beq(op1, R0, L);
6961 else
6962 __ beq(op1, R0, (int)0);
6963 break;
6964 case 0x02: //not_equal
6965 if (&L)
6966 __ bne(op1, R0, L);
6967 else
6968 __ bne(op1, R0, (int)0);
6969 break;
6970 case 0x03: //greater
6971 if(&L)
6972 __ bgtz(op1, L);
6973 else
6974 __ bgtz(op1, (int)0);
6975 break;
6976 case 0x04: //greater_equal
6977 if(&L)
6978 __ bgez(op1, L);
6979 else
6980 __ bgez(op1, (int)0);
6981 break;
6982 case 0x05: //less
6983 if(&L)
6984 __ bltz(op1, L);
6985 else
6986 __ bltz(op1, (int)0);
6987 break;
6988 case 0x06: //less_equal
6989 if(&L)
6990 __ blez(op1, L);
6991 else
6992 __ blez(op1, (int)0);
6993 break;
6994 default:
6995 Unimplemented();
6996 }
6997 __ nop();
6998 %}
7000 ins_pc_relative(1);
7001 ins_pipe( pipe_alu_branch );
7002 %}
7005 instruct branchConI_reg_imm(cmpOp cmp, mRegI src1, immI src2, label labl) %{
7006 match( If cmp (CmpI src1 src2) );
7007 effect(USE labl);
7008 ins_cost(200);
7009 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm" %}
7011 ins_encode %{
7012 Register op1 = $src1$$Register;
7013 int val = $src2$$constant;
7014 Label &L = *($labl$$label);
7015 int flag = $cmp$$cmpcode;
7017 __ move(AT, val);
7018 switch(flag)
7019 {
7020 case 0x01: //equal
7021 if (&L)
7022 __ beq(op1, AT, L);
7023 else
7024 __ beq(op1, AT, (int)0);
7025 break;
7026 case 0x02: //not_equal
7027 if (&L)
7028 __ bne(op1, AT, L);
7029 else
7030 __ bne(op1, AT, (int)0);
7031 break;
7032 case 0x03: //greater
7033 __ slt(AT, AT, op1);
7034 if(&L)
7035 __ bne(R0, AT, L);
7036 else
7037 __ bne(R0, AT, (int)0);
7038 break;
7039 case 0x04: //greater_equal
7040 __ slt(AT, op1, AT);
7041 if(&L)
7042 __ beq(AT, R0, L);
7043 else
7044 __ beq(AT, R0, (int)0);
7045 break;
7046 case 0x05: //less
7047 __ slt(AT, op1, AT);
7048 if(&L)
7049 __ bne(R0, AT, L);
7050 else
7051 __ bne(R0, AT, (int)0);
7052 break;
7053 case 0x06: //less_equal
7054 __ slt(AT, AT, op1);
7055 if(&L)
7056 __ beq(AT, R0, L);
7057 else
7058 __ beq(AT, R0, (int)0);
7059 break;
7060 default:
7061 Unimplemented();
7062 }
7063 __ nop();
7064 %}
7066 ins_pc_relative(1);
7067 ins_pipe( pipe_alu_branch );
7068 %}
7070 instruct branchConIU_reg_imm0(cmpOpU cmp, mRegI src1, immI0 zero, label labl) %{
7071 match( If cmp (CmpU src1 zero) );
7072 effect(USE labl);
7073 format %{ "BR$cmp $src1, zero, $labl #@branchConIU_reg_imm0" %}
7075 ins_encode %{
7076 Register op1 = $src1$$Register;
7077 Label &L = *($labl$$label);
7078 int flag = $cmp$$cmpcode;
7080 switch(flag)
7081 {
7082 case 0x01: //equal
7083 if (&L)
7084 __ beq(op1, R0, L);
7085 else
7086 __ beq(op1, R0, (int)0);
7087 break;
7088 case 0x02: //not_equal
7089 if (&L)
7090 __ bne(op1, R0, L);
7091 else
7092 __ bne(op1, R0, (int)0);
7093 break;
7094 case 0x03: //above
7095 if(&L)
7096 __ bne(R0, op1, L);
7097 else
7098 __ bne(R0, op1, (int)0);
7099 break;
7100 case 0x04: //above_equal
7101 if(&L)
7102 __ beq(R0, R0, L);
7103 else
7104 __ beq(R0, R0, (int)0);
7105 break;
7106 case 0x05: //below
7107 return;
7108 break;
7109 case 0x06: //below_equal
7110 if(&L)
7111 __ beq(op1, R0, L);
7112 else
7113 __ beq(op1, R0, (int)0);
7114 break;
7115 default:
7116 Unimplemented();
7117 }
7118 __ nop();
7119 %}
7121 ins_pc_relative(1);
7122 ins_pipe( pipe_alu_branch );
7123 %}
7126 instruct branchConIU_reg_immI16(cmpOpU cmp, mRegI src1, immI16 src2, label labl) %{
7127 match( If cmp (CmpU src1 src2) );
7128 effect(USE labl);
7129 ins_cost(180);
7130 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_immI16" %}
7132 ins_encode %{
7133 Register op1 = $src1$$Register;
7134 int val = $src2$$constant;
7135 Label &L = *($labl$$label);
7136 int flag = $cmp$$cmpcode;
7138 switch(flag)
7139 {
7140 case 0x01: //equal
7141 __ move(AT, val);
7142 if (&L)
7143 __ beq(op1, AT, L);
7144 else
7145 __ beq(op1, AT, (int)0);
7146 break;
7147 case 0x02: //not_equal
7148 __ move(AT, val);
7149 if (&L)
7150 __ bne(op1, AT, L);
7151 else
7152 __ bne(op1, AT, (int)0);
7153 break;
7154 case 0x03: //above
7155 __ move(AT, val);
7156 __ sltu(AT, AT, op1);
7157 if(&L)
7158 __ bne(R0, AT, L);
7159 else
7160 __ bne(R0, AT, (int)0);
7161 break;
7162 case 0x04: //above_equal
7163 __ sltiu(AT, op1, val);
7164 if(&L)
7165 __ beq(AT, R0, L);
7166 else
7167 __ beq(AT, R0, (int)0);
7168 break;
7169 case 0x05: //below
7170 __ sltiu(AT, op1, val);
7171 if(&L)
7172 __ bne(R0, AT, L);
7173 else
7174 __ bne(R0, AT, (int)0);
7175 break;
7176 case 0x06: //below_equal
7177 __ move(AT, val);
7178 __ sltu(AT, AT, op1);
7179 if(&L)
7180 __ beq(AT, R0, L);
7181 else
7182 __ beq(AT, R0, (int)0);
7183 break;
7184 default:
7185 Unimplemented();
7186 }
7187 __ nop();
7188 %}
7190 ins_pc_relative(1);
7191 ins_pipe( pipe_alu_branch );
7192 %}
7195 instruct branchConL_regL_regL(cmpOp cmp, mRegL src1, mRegL src2, label labl) %{
7196 match( If cmp (CmpL src1 src2) );
7197 effect(USE labl);
7198 format %{ "BR$cmp $src1, $src2, $labl #@branchConL_regL_regL" %}
7199 ins_cost(250);
7201 ins_encode %{
7202 Register opr1_reg = as_Register($src1$$reg);
7203 Register opr2_reg = as_Register($src2$$reg);
7205 Label &target = *($labl$$label);
7206 int flag = $cmp$$cmpcode;
7208 switch(flag)
7209 {
7210 case 0x01: //equal
7211 if (&target)
7212 __ beq(opr1_reg, opr2_reg, target);
7213 else
7214 __ beq(opr1_reg, opr2_reg, (int)0);
7215 __ delayed()->nop();
7216 break;
7218 case 0x02: //not_equal
7219 if(&target)
7220 __ bne(opr1_reg, opr2_reg, target);
7221 else
7222 __ bne(opr1_reg, opr2_reg, (int)0);
7223 __ delayed()->nop();
7224 break;
7226 case 0x03: //greater
7227 __ slt(AT, opr2_reg, opr1_reg);
7228 if(&target)
7229 __ bne(AT, R0, target);
7230 else
7231 __ bne(AT, R0, (int)0);
7232 __ delayed()->nop();
7233 break;
7235 case 0x04: //greater_equal
7236 __ slt(AT, opr1_reg, opr2_reg);
7237 if(&target)
7238 __ beq(AT, R0, target);
7239 else
7240 __ beq(AT, R0, (int)0);
7241 __ delayed()->nop();
7243 break;
7245 case 0x05: //less
7246 __ slt(AT, opr1_reg, opr2_reg);
7247 if(&target)
7248 __ bne(AT, R0, target);
7249 else
7250 __ bne(AT, R0, (int)0);
7251 __ delayed()->nop();
7253 break;
7255 case 0x06: //less_equal
7256 __ slt(AT, opr2_reg, opr1_reg);
7258 if(&target)
7259 __ beq(AT, R0, target);
7260 else
7261 __ beq(AT, R0, (int)0);
7262 __ delayed()->nop();
7264 break;
7266 default:
7267 Unimplemented();
7268 }
7269 %}
7272 ins_pc_relative(1);
7273 ins_pipe( pipe_alu_branch );
7274 %}
7276 instruct branchConL_reg_immL16_sub(cmpOp cmp, mRegL src1, immL16_sub src2, label labl) %{
7277 match( If cmp (CmpL src1 src2) );
7278 effect(USE labl);
7279 ins_cost(180);
7280 format %{ "BR$cmp $src1, $src2, $labl #@branchConL_reg_immL16_sub" %}
7282 ins_encode %{
7283 Register op1 = $src1$$Register;
7284 int val = $src2$$constant;
7285 Label &L = *($labl$$label);
7286 int flag = $cmp$$cmpcode;
7288 __ daddiu(AT, op1, -1 * val);
7289 switch(flag)
7290 {
7291 case 0x01: //equal
7292 if (&L)
7293 __ beq(R0, AT, L);
7294 else
7295 __ beq(R0, AT, (int)0);
7296 break;
7297 case 0x02: //not_equal
7298 if (&L)
7299 __ bne(R0, AT, L);
7300 else
7301 __ bne(R0, AT, (int)0);
7302 break;
7303 case 0x03: //greater
7304 if(&L)
7305 __ bgtz(AT, L);
7306 else
7307 __ bgtz(AT, (int)0);
7308 break;
7309 case 0x04: //greater_equal
7310 if(&L)
7311 __ bgez(AT, L);
7312 else
7313 __ bgez(AT, (int)0);
7314 break;
7315 case 0x05: //less
7316 if(&L)
7317 __ bltz(AT, L);
7318 else
7319 __ bltz(AT, (int)0);
7320 break;
7321 case 0x06: //less_equal
7322 if(&L)
7323 __ blez(AT, L);
7324 else
7325 __ blez(AT, (int)0);
7326 break;
7327 default:
7328 Unimplemented();
7329 }
7330 __ nop();
7331 %}
7333 ins_pc_relative(1);
7334 ins_pipe( pipe_alu_branch );
7335 %}
7338 instruct branchConI_reg_imm16_sub(cmpOp cmp, mRegI src1, immI16_sub src2, label labl) %{
7339 match( If cmp (CmpI src1 src2) );
7340 effect(USE labl);
7341 ins_cost(180);
7342 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm16_sub" %}
7344 ins_encode %{
7345 Register op1 = $src1$$Register;
7346 int val = $src2$$constant;
7347 Label &L = *($labl$$label);
7348 int flag = $cmp$$cmpcode;
7350 __ addiu32(AT, op1, -1 * val);
7351 switch(flag)
7352 {
7353 case 0x01: //equal
7354 if (&L)
7355 __ beq(R0, AT, L);
7356 else
7357 __ beq(R0, AT, (int)0);
7358 break;
7359 case 0x02: //not_equal
7360 if (&L)
7361 __ bne(R0, AT, L);
7362 else
7363 __ bne(R0, AT, (int)0);
7364 break;
7365 case 0x03: //greater
7366 if(&L)
7367 __ bgtz(AT, L);
7368 else
7369 __ bgtz(AT, (int)0);
7370 break;
7371 case 0x04: //greater_equal
7372 if(&L)
7373 __ bgez(AT, L);
7374 else
7375 __ bgez(AT, (int)0);
7376 break;
7377 case 0x05: //less
7378 if(&L)
7379 __ bltz(AT, L);
7380 else
7381 __ bltz(AT, (int)0);
7382 break;
7383 case 0x06: //less_equal
7384 if(&L)
7385 __ blez(AT, L);
7386 else
7387 __ blez(AT, (int)0);
7388 break;
7389 default:
7390 Unimplemented();
7391 }
7392 __ nop();
7393 %}
7395 ins_pc_relative(1);
7396 ins_pipe( pipe_alu_branch );
7397 %}
7399 instruct branchConL_regL_immL0(cmpOp cmp, mRegL src1, immL0 zero, label labl) %{
7400 match( If cmp (CmpL src1 zero) );
7401 effect(USE labl);
7402 format %{ "BR$cmp $src1, zero, $labl #@branchConL_regL_immL0" %}
7403 ins_cost(150);
7405 ins_encode %{
7406 Register opr1_reg = as_Register($src1$$reg);
7407 Label &target = *($labl$$label);
7408 int flag = $cmp$$cmpcode;
7410 switch(flag)
7411 {
7412 case 0x01: //equal
7413 if (&target)
7414 __ beq(opr1_reg, R0, target);
7415 else
7416 __ beq(opr1_reg, R0, int(0));
7417 break;
7419 case 0x02: //not_equal
7420 if(&target)
7421 __ bne(opr1_reg, R0, target);
7422 else
7423 __ bne(opr1_reg, R0, (int)0);
7424 break;
7426 case 0x03: //greater
7427 if(&target)
7428 __ bgtz(opr1_reg, target);
7429 else
7430 __ bgtz(opr1_reg, (int)0);
7431 break;
7433 case 0x04: //greater_equal
7434 if(&target)
7435 __ bgez(opr1_reg, target);
7436 else
7437 __ bgez(opr1_reg, (int)0);
7438 break;
7440 case 0x05: //less
7441 __ slt(AT, opr1_reg, R0);
7442 if(&target)
7443 __ bne(AT, R0, target);
7444 else
7445 __ bne(AT, R0, (int)0);
7446 break;
7448 case 0x06: //less_equal
7449 if (&target)
7450 __ blez(opr1_reg, target);
7451 else
7452 __ blez(opr1_reg, int(0));
7453 break;
7455 default:
7456 Unimplemented();
7457 }
7458 __ delayed()->nop();
7459 %}
7462 ins_pc_relative(1);
7463 ins_pipe( pipe_alu_branch );
7464 %}
7467 //FIXME
7468 instruct branchConF_reg_reg(cmpOp cmp, regF src1, regF src2, label labl) %{
7469 match( If cmp (CmpF src1 src2) );
7470 effect(USE labl);
7471 format %{ "BR$cmp $src1, $src2, $labl #@branchConF_reg_reg" %}
7473 ins_encode %{
7474 FloatRegister reg_op1 = $src1$$FloatRegister;
7475 FloatRegister reg_op2 = $src2$$FloatRegister;
7476 Label &L = *($labl$$label);
7477 int flag = $cmp$$cmpcode;
7479 switch(flag)
7480 {
7481 case 0x01: //equal
7482 __ c_eq_s(reg_op1, reg_op2);
7483 if (&L)
7484 __ bc1t(L);
7485 else
7486 __ bc1t((int)0);
7487 break;
7488 case 0x02: //not_equal
7489 __ c_eq_s(reg_op1, reg_op2);
7490 if (&L)
7491 __ bc1f(L);
7492 else
7493 __ bc1f((int)0);
7494 break;
7495 case 0x03: //greater
7496 __ c_ule_s(reg_op1, reg_op2);
7497 if(&L)
7498 __ bc1f(L);
7499 else
7500 __ bc1f((int)0);
7501 break;
7502 case 0x04: //greater_equal
7503 __ c_ult_s(reg_op1, reg_op2);
7504 if(&L)
7505 __ bc1f(L);
7506 else
7507 __ bc1f((int)0);
7508 break;
7509 case 0x05: //less
7510 __ c_ult_s(reg_op1, reg_op2);
7511 if(&L)
7512 __ bc1t(L);
7513 else
7514 __ bc1t((int)0);
7515 break;
7516 case 0x06: //less_equal
7517 __ c_ule_s(reg_op1, reg_op2);
7518 if(&L)
7519 __ bc1t(L);
7520 else
7521 __ bc1t((int)0);
7522 break;
7523 default:
7524 Unimplemented();
7525 }
7526 __ nop();
7527 %}
7529 ins_pc_relative(1);
7530 ins_pipe(pipe_slow);
7531 %}
7533 instruct branchConD_reg_reg(cmpOp cmp, regD src1, regD src2, label labl) %{
7534 match( If cmp (CmpD src1 src2) );
7535 effect(USE labl);
7536 format %{ "BR$cmp $src1, $src2, $labl #@branchConD_reg_reg" %}
7538 ins_encode %{
7539 FloatRegister reg_op1 = $src1$$FloatRegister;
7540 FloatRegister reg_op2 = $src2$$FloatRegister;
7541 Label &L = *($labl$$label);
7542 int flag = $cmp$$cmpcode;
7544 switch(flag)
7545 {
7546 case 0x01: //equal
7547 __ c_eq_d(reg_op1, reg_op2);
7548 if (&L)
7549 __ bc1t(L);
7550 else
7551 __ bc1t((int)0);
7552 break;
7553 case 0x02: //not_equal
7554 //2016/4/19 aoqi: c_ueq_d cannot distinguish NaN from equal. Double.isNaN(Double) is implemented by 'f != f', so the use of c_ueq_d causes bugs.
7555 __ c_eq_d(reg_op1, reg_op2);
7556 if (&L)
7557 __ bc1f(L);
7558 else
7559 __ bc1f((int)0);
7560 break;
7561 case 0x03: //greater
7562 __ c_ule_d(reg_op1, reg_op2);
7563 if(&L)
7564 __ bc1f(L);
7565 else
7566 __ bc1f((int)0);
7567 break;
7568 case 0x04: //greater_equal
7569 __ c_ult_d(reg_op1, reg_op2);
7570 if(&L)
7571 __ bc1f(L);
7572 else
7573 __ bc1f((int)0);
7574 break;
7575 case 0x05: //less
7576 __ c_ult_d(reg_op1, reg_op2);
7577 if(&L)
7578 __ bc1t(L);
7579 else
7580 __ bc1t((int)0);
7581 break;
7582 case 0x06: //less_equal
7583 __ c_ule_d(reg_op1, reg_op2);
7584 if(&L)
7585 __ bc1t(L);
7586 else
7587 __ bc1t((int)0);
7588 break;
7589 default:
7590 Unimplemented();
7591 }
7592 __ nop();
7593 %}
7595 ins_pc_relative(1);
7596 ins_pipe(pipe_slow);
7597 %}
7600 // Call Runtime Instruction
7601 instruct CallRuntimeDirect(method meth) %{
7602 match(CallRuntime );
7603 effect(USE meth);
7605 ins_cost(300);
7606 format %{ "CALL,runtime #@CallRuntimeDirect" %}
7607 ins_encode( Java_To_Runtime( meth ) );
7608 ins_pipe( pipe_slow );
7609 ins_alignment(16);
7610 %}
7614 //------------------------MemBar Instructions-------------------------------
7615 //Memory barrier flavors
7617 instruct membar_acquire() %{
7618 match(MemBarAcquire);
7619 ins_cost(0);
7621 size(0);
7622 format %{ "MEMBAR-acquire (empty) @ membar_acquire" %}
7623 ins_encode();
7624 ins_pipe(empty);
7625 %}
7627 instruct load_fence() %{
7628 match(LoadFence);
7629 ins_cost(400);
7631 format %{ "MEMBAR @ load_fence" %}
7632 ins_encode %{
7633 __ sync();
7634 %}
7635 ins_pipe(pipe_slow);
7636 %}
7638 instruct membar_acquire_lock()
7639 %{
7640 match(MemBarAcquireLock);
7641 ins_cost(0);
7643 size(0);
7644 format %{ "MEMBAR-acquire (acquire as part of CAS in prior FastLock so empty encoding) @ membar_acquire_lock" %}
7645 ins_encode();
7646 ins_pipe(empty);
7647 %}
7649 instruct membar_release() %{
7650 match(MemBarRelease);
7651 ins_cost(0);
7653 size(0);
7654 format %{ "MEMBAR-release (empty) @ membar_release" %}
7655 ins_encode();
7656 ins_pipe(empty);
7657 %}
7659 instruct store_fence() %{
7660 match(StoreFence);
7661 ins_cost(400);
7663 format %{ "MEMBAR @ store_fence" %}
7665 ins_encode %{
7666 __ sync();
7667 %}
7669 ins_pipe(pipe_slow);
7670 %}
7672 instruct membar_release_lock()
7673 %{
7674 match(MemBarReleaseLock);
7675 ins_cost(0);
7677 size(0);
7678 format %{ "MEMBAR-release-lock (release in FastUnlock so empty) @ membar_release_lock" %}
7679 ins_encode();
7680 ins_pipe(empty);
7681 %}
7684 instruct membar_volatile() %{
7685 match(MemBarVolatile);
7686 ins_cost(400);
7688 format %{ "MEMBAR-volatile" %}
7689 ins_encode %{
7690 if( !os::is_MP() ) return; // Not needed on single CPU
7691 __ sync();
7693 %}
7694 ins_pipe(pipe_slow);
7695 %}
7697 instruct unnecessary_membar_volatile() %{
7698 match(MemBarVolatile);
7699 predicate(Matcher::post_store_load_barrier(n));
7700 ins_cost(0);
7702 size(0);
7703 format %{ "MEMBAR-volatile (unnecessary so empty encoding) @ unnecessary_membar_volatile" %}
7704 ins_encode( );
7705 ins_pipe(empty);
7706 %}
7708 instruct membar_storestore() %{
7709 match(MemBarStoreStore);
7711 ins_cost(0);
7712 size(0);
7713 format %{ "MEMBAR-storestore (empty encoding) @ membar_storestore" %}
7714 ins_encode( );
7715 ins_pipe(empty);
7716 %}
7718 //----------Move Instructions--------------------------------------------------
7719 instruct castX2P(mRegP dst, mRegL src) %{
7720 match(Set dst (CastX2P src));
7721 format %{ "castX2P $dst, $src @ castX2P" %}
7722 ins_encode %{
7723 Register src = $src$$Register;
7724 Register dst = $dst$$Register;
7726 if(src != dst)
7727 __ move(dst, src);
7728 %}
7729 ins_cost(10);
7730 ins_pipe( ialu_regI_mov );
7731 %}
7733 instruct castP2X(mRegL dst, mRegP src ) %{
7734 match(Set dst (CastP2X src));
7736 format %{ "mov $dst, $src\t #@castP2X" %}
7737 ins_encode %{
7738 Register src = $src$$Register;
7739 Register dst = $dst$$Register;
7741 if(src != dst)
7742 __ move(dst, src);
7743 %}
7744 ins_pipe( ialu_regI_mov );
7745 %}
7747 instruct MoveF2I_reg_reg(mRegI dst, regF src) %{
7748 match(Set dst (MoveF2I src));
7749 effect(DEF dst, USE src);
7750 ins_cost(85);
7751 format %{ "MoveF2I $dst, $src @ MoveF2I_reg_reg" %}
7752 ins_encode %{
7753 Register dst = as_Register($dst$$reg);
7754 FloatRegister src = as_FloatRegister($src$$reg);
7756 __ mfc1(dst, src);
7757 %}
7758 ins_pipe( pipe_slow );
7759 %}
7761 instruct MoveI2F_reg_reg(regF dst, mRegI src) %{
7762 match(Set dst (MoveI2F src));
7763 effect(DEF dst, USE src);
7764 ins_cost(85);
7765 format %{ "MoveI2F $dst, $src @ MoveI2F_reg_reg" %}
7766 ins_encode %{
7767 Register src = as_Register($src$$reg);
7768 FloatRegister dst = as_FloatRegister($dst$$reg);
7770 __ mtc1(src, dst);
7771 %}
7772 ins_pipe( pipe_slow );
7773 %}
7775 instruct MoveD2L_reg_reg(mRegL dst, regD src) %{
7776 match(Set dst (MoveD2L src));
7777 effect(DEF dst, USE src);
7778 ins_cost(85);
7779 format %{ "MoveD2L $dst, $src @ MoveD2L_reg_reg" %}
7780 ins_encode %{
7781 Register dst = as_Register($dst$$reg);
7782 FloatRegister src = as_FloatRegister($src$$reg);
7784 __ dmfc1(dst, src);
7785 %}
7786 ins_pipe( pipe_slow );
7787 %}
7789 instruct MoveL2D_reg_reg(regD dst, mRegL src) %{
7790 match(Set dst (MoveL2D src));
7791 effect(DEF dst, USE src);
7792 ins_cost(85);
7793 format %{ "MoveL2D $dst, $src @ MoveL2D_reg_reg" %}
7794 ins_encode %{
7795 FloatRegister dst = as_FloatRegister($dst$$reg);
7796 Register src = as_Register($src$$reg);
7798 __ dmtc1(src, dst);
7799 %}
7800 ins_pipe( pipe_slow );
7801 %}
7803 //----------Conditional Move---------------------------------------------------
7804 // Conditional move
7805 instruct cmovI_cmpI_reg_reg(mRegI dst, mRegI src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
7806 match(Set dst (CMoveI (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
7807 ins_cost(80);
7808 format %{
7809 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpI_reg_reg\n"
7810 "\tCMOV $dst,$src \t @cmovI_cmpI_reg_reg"
7811 %}
7813 ins_encode %{
7814 Register op1 = $tmp1$$Register;
7815 Register op2 = $tmp2$$Register;
7816 Register dst = $dst$$Register;
7817 Register src = $src$$Register;
7818 int flag = $cop$$cmpcode;
7820 switch(flag)
7821 {
7822 case 0x01: //equal
7823 __ subu32(AT, op1, op2);
7824 __ movz(dst, src, AT);
7825 break;
7827 case 0x02: //not_equal
7828 __ subu32(AT, op1, op2);
7829 __ movn(dst, src, AT);
7830 break;
7832 case 0x03: //great
7833 __ slt(AT, op2, op1);
7834 __ movn(dst, src, AT);
7835 break;
7837 case 0x04: //great_equal
7838 __ slt(AT, op1, op2);
7839 __ movz(dst, src, AT);
7840 break;
7842 case 0x05: //less
7843 __ slt(AT, op1, op2);
7844 __ movn(dst, src, AT);
7845 break;
7847 case 0x06: //less_equal
7848 __ slt(AT, op2, op1);
7849 __ movz(dst, src, AT);
7850 break;
7852 default:
7853 Unimplemented();
7854 }
7855 %}
7857 ins_pipe( pipe_slow );
7858 %}
7860 instruct cmovI_cmpP_reg_reg(mRegI dst, mRegI src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
7861 match(Set dst (CMoveI (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
7862 ins_cost(80);
7863 format %{
7864 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpP_reg_reg\n\t"
7865 "CMOV $dst,$src\t @cmovI_cmpP_reg_reg"
7866 %}
7867 ins_encode %{
7868 Register op1 = $tmp1$$Register;
7869 Register op2 = $tmp2$$Register;
7870 Register dst = $dst$$Register;
7871 Register src = $src$$Register;
7872 int flag = $cop$$cmpcode;
7874 switch(flag)
7875 {
7876 case 0x01: //equal
7877 __ subu(AT, op1, op2);
7878 __ movz(dst, src, AT);
7879 break;
7881 case 0x02: //not_equal
7882 __ subu(AT, op1, op2);
7883 __ movn(dst, src, AT);
7884 break;
7886 case 0x03: //above
7887 __ sltu(AT, op2, op1);
7888 __ movn(dst, src, AT);
7889 break;
7891 case 0x04: //above_equal
7892 __ sltu(AT, op1, op2);
7893 __ movz(dst, src, AT);
7894 break;
7896 case 0x05: //below
7897 __ sltu(AT, op1, op2);
7898 __ movn(dst, src, AT);
7899 break;
7901 case 0x06: //below_equal
7902 __ sltu(AT, op2, op1);
7903 __ movz(dst, src, AT);
7904 break;
7906 default:
7907 Unimplemented();
7908 }
7909 %}
7911 ins_pipe( pipe_slow );
7912 %}
7914 instruct cmovI_cmpN_reg_reg(mRegI dst, mRegI src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
7915 match(Set dst (CMoveI (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
7916 ins_cost(80);
7917 format %{
7918 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpN_reg_reg\n\t"
7919 "CMOV $dst,$src\t @cmovI_cmpN_reg_reg"
7920 %}
7921 ins_encode %{
7922 Register op1 = $tmp1$$Register;
7923 Register op2 = $tmp2$$Register;
7924 Register dst = $dst$$Register;
7925 Register src = $src$$Register;
7926 int flag = $cop$$cmpcode;
7928 switch(flag)
7929 {
7930 case 0x01: //equal
7931 __ subu32(AT, op1, op2);
7932 __ movz(dst, src, AT);
7933 break;
7935 case 0x02: //not_equal
7936 __ subu32(AT, op1, op2);
7937 __ movn(dst, src, AT);
7938 break;
7940 case 0x03: //above
7941 __ sltu(AT, op2, op1);
7942 __ movn(dst, src, AT);
7943 break;
7945 case 0x04: //above_equal
7946 __ sltu(AT, op1, op2);
7947 __ movz(dst, src, AT);
7948 break;
7950 case 0x05: //below
7951 __ sltu(AT, op1, op2);
7952 __ movn(dst, src, AT);
7953 break;
7955 case 0x06: //below_equal
7956 __ sltu(AT, op2, op1);
7957 __ movz(dst, src, AT);
7958 break;
7960 default:
7961 Unimplemented();
7962 }
7963 %}
7965 ins_pipe( pipe_slow );
7966 %}
7968 instruct cmovP_cmpN_reg_reg(mRegP dst, mRegP src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
7969 match(Set dst (CMoveP (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
7970 ins_cost(80);
7971 format %{
7972 "CMPU$cop $tmp1,$tmp2\t @cmovP_cmpN_reg_reg\n\t"
7973 "CMOV $dst,$src\t @cmovP_cmpN_reg_reg"
7974 %}
7975 ins_encode %{
7976 Register op1 = $tmp1$$Register;
7977 Register op2 = $tmp2$$Register;
7978 Register dst = $dst$$Register;
7979 Register src = $src$$Register;
7980 int flag = $cop$$cmpcode;
7982 switch(flag)
7983 {
7984 case 0x01: //equal
7985 __ subu32(AT, op1, op2);
7986 __ movz(dst, src, AT);
7987 break;
7989 case 0x02: //not_equal
7990 __ subu32(AT, op1, op2);
7991 __ movn(dst, src, AT);
7992 break;
7994 case 0x03: //above
7995 __ sltu(AT, op2, op1);
7996 __ movn(dst, src, AT);
7997 break;
7999 case 0x04: //above_equal
8000 __ sltu(AT, op1, op2);
8001 __ movz(dst, src, AT);
8002 break;
8004 case 0x05: //below
8005 __ sltu(AT, op1, op2);
8006 __ movn(dst, src, AT);
8007 break;
8009 case 0x06: //below_equal
8010 __ sltu(AT, op2, op1);
8011 __ movz(dst, src, AT);
8012 break;
8014 default:
8015 Unimplemented();
8016 }
8017 %}
8019 ins_pipe( pipe_slow );
8020 %}
8022 instruct cmovN_cmpP_reg_reg(mRegN dst, mRegN src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
8023 match(Set dst (CMoveN (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
8024 ins_cost(80);
8025 format %{
8026 "CMPU$cop $tmp1,$tmp2\t @cmovN_cmpP_reg_reg\n\t"
8027 "CMOV $dst,$src\t @cmovN_cmpP_reg_reg"
8028 %}
8029 ins_encode %{
8030 Register op1 = $tmp1$$Register;
8031 Register op2 = $tmp2$$Register;
8032 Register dst = $dst$$Register;
8033 Register src = $src$$Register;
8034 int flag = $cop$$cmpcode;
8036 switch(flag)
8037 {
8038 case 0x01: //equal
8039 __ subu(AT, op1, op2);
8040 __ movz(dst, src, AT);
8041 break;
8043 case 0x02: //not_equal
8044 __ subu(AT, op1, op2);
8045 __ movn(dst, src, AT);
8046 break;
8048 case 0x03: //above
8049 __ sltu(AT, op2, op1);
8050 __ movn(dst, src, AT);
8051 break;
8053 case 0x04: //above_equal
8054 __ sltu(AT, op1, op2);
8055 __ movz(dst, src, AT);
8056 break;
8058 case 0x05: //below
8059 __ sltu(AT, op1, op2);
8060 __ movn(dst, src, AT);
8061 break;
8063 case 0x06: //below_equal
8064 __ sltu(AT, op2, op1);
8065 __ movz(dst, src, AT);
8066 break;
8068 default:
8069 Unimplemented();
8070 }
8071 %}
8073 ins_pipe( pipe_slow );
8074 %}
8076 instruct cmovP_cmpD_reg_reg(mRegP dst, mRegP src, regD tmp1, regD tmp2, cmpOp cop ) %{
8077 match(Set dst (CMoveP (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8078 ins_cost(80);
8079 format %{
8080 "CMP$cop $tmp1, $tmp2\t @cmovP_cmpD_reg_reg\n"
8081 "\tCMOV $dst,$src \t @cmovP_cmpD_reg_reg"
8082 %}
8083 ins_encode %{
8084 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8085 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8086 Register dst = as_Register($dst$$reg);
8087 Register src = as_Register($src$$reg);
8089 int flag = $cop$$cmpcode;
8091 switch(flag)
8092 {
8093 case 0x01: //equal
8094 __ c_eq_d(reg_op1, reg_op2);
8095 __ movt(dst, src);
8096 break;
8097 case 0x02: //not_equal
8098 __ c_eq_d(reg_op1, reg_op2);
8099 __ movf(dst, src);
8100 break;
8101 case 0x03: //greater
8102 __ c_ole_d(reg_op1, reg_op2);
8103 __ movf(dst, src);
8104 break;
8105 case 0x04: //greater_equal
8106 __ c_olt_d(reg_op1, reg_op2);
8107 __ movf(dst, src);
8108 break;
8109 case 0x05: //less
8110 __ c_ult_d(reg_op1, reg_op2);
8111 __ movt(dst, src);
8112 break;
8113 case 0x06: //less_equal
8114 __ c_ule_d(reg_op1, reg_op2);
8115 __ movt(dst, src);
8116 break;
8117 default:
8118 Unimplemented();
8119 }
8120 %}
8122 ins_pipe( pipe_slow );
8123 %}
8126 instruct cmovN_cmpN_reg_reg(mRegN dst, mRegN src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8127 match(Set dst (CMoveN (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8128 ins_cost(80);
8129 format %{
8130 "CMPU$cop $tmp1,$tmp2\t @cmovN_cmpN_reg_reg\n\t"
8131 "CMOV $dst,$src\t @cmovN_cmpN_reg_reg"
8132 %}
8133 ins_encode %{
8134 Register op1 = $tmp1$$Register;
8135 Register op2 = $tmp2$$Register;
8136 Register dst = $dst$$Register;
8137 Register src = $src$$Register;
8138 int flag = $cop$$cmpcode;
8140 switch(flag)
8141 {
8142 case 0x01: //equal
8143 __ subu32(AT, op1, op2);
8144 __ movz(dst, src, AT);
8145 break;
8147 case 0x02: //not_equal
8148 __ subu32(AT, op1, op2);
8149 __ movn(dst, src, AT);
8150 break;
8152 case 0x03: //above
8153 __ sltu(AT, op2, op1);
8154 __ movn(dst, src, AT);
8155 break;
8157 case 0x04: //above_equal
8158 __ sltu(AT, op1, op2);
8159 __ movz(dst, src, AT);
8160 break;
8162 case 0x05: //below
8163 __ sltu(AT, op1, op2);
8164 __ movn(dst, src, AT);
8165 break;
8167 case 0x06: //below_equal
8168 __ sltu(AT, op2, op1);
8169 __ movz(dst, src, AT);
8170 break;
8172 default:
8173 Unimplemented();
8174 }
8175 %}
8177 ins_pipe( pipe_slow );
8178 %}
8181 instruct cmovI_cmpU_reg_reg(mRegI dst, mRegI src, mRegI tmp1, mRegI tmp2, cmpOpU cop ) %{
8182 match(Set dst (CMoveI (Binary cop (CmpU tmp1 tmp2)) (Binary dst src)));
8183 ins_cost(80);
8184 format %{
8185 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpU_reg_reg\n\t"
8186 "CMOV $dst,$src\t @cmovI_cmpU_reg_reg"
8187 %}
8188 ins_encode %{
8189 Register op1 = $tmp1$$Register;
8190 Register op2 = $tmp2$$Register;
8191 Register dst = $dst$$Register;
8192 Register src = $src$$Register;
8193 int flag = $cop$$cmpcode;
8195 switch(flag)
8196 {
8197 case 0x01: //equal
8198 __ subu(AT, op1, op2);
8199 __ movz(dst, src, AT);
8200 break;
8202 case 0x02: //not_equal
8203 __ subu(AT, op1, op2);
8204 __ movn(dst, src, AT);
8205 break;
8207 case 0x03: //above
8208 __ sltu(AT, op2, op1);
8209 __ movn(dst, src, AT);
8210 break;
8212 case 0x04: //above_equal
8213 __ sltu(AT, op1, op2);
8214 __ movz(dst, src, AT);
8215 break;
8217 case 0x05: //below
8218 __ sltu(AT, op1, op2);
8219 __ movn(dst, src, AT);
8220 break;
8222 case 0x06: //below_equal
8223 __ sltu(AT, op2, op1);
8224 __ movz(dst, src, AT);
8225 break;
8227 default:
8228 Unimplemented();
8229 }
8230 %}
8232 ins_pipe( pipe_slow );
8233 %}
8235 instruct cmovI_cmpL_reg_reg(mRegI dst, mRegI src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8236 match(Set dst (CMoveI (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8237 ins_cost(80);
8238 format %{
8239 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpL_reg_reg\n"
8240 "\tCMOV $dst,$src \t @cmovI_cmpL_reg_reg"
8241 %}
8242 ins_encode %{
8243 Register opr1 = as_Register($tmp1$$reg);
8244 Register opr2 = as_Register($tmp2$$reg);
8245 Register dst = $dst$$Register;
8246 Register src = $src$$Register;
8247 int flag = $cop$$cmpcode;
8249 switch(flag)
8250 {
8251 case 0x01: //equal
8252 __ subu(AT, opr1, opr2);
8253 __ movz(dst, src, AT);
8254 break;
8256 case 0x02: //not_equal
8257 __ subu(AT, opr1, opr2);
8258 __ movn(dst, src, AT);
8259 break;
8261 case 0x03: //greater
8262 __ slt(AT, opr2, opr1);
8263 __ movn(dst, src, AT);
8264 break;
8266 case 0x04: //greater_equal
8267 __ slt(AT, opr1, opr2);
8268 __ movz(dst, src, AT);
8269 break;
8271 case 0x05: //less
8272 __ slt(AT, opr1, opr2);
8273 __ movn(dst, src, AT);
8274 break;
8276 case 0x06: //less_equal
8277 __ slt(AT, opr2, opr1);
8278 __ movz(dst, src, AT);
8279 break;
8281 default:
8282 Unimplemented();
8283 }
8284 %}
8286 ins_pipe( pipe_slow );
8287 %}
8289 instruct cmovP_cmpL_reg_reg(mRegP dst, mRegP src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8290 match(Set dst (CMoveP (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8291 ins_cost(80);
8292 format %{
8293 "CMP$cop $tmp1, $tmp2\t @cmovP_cmpL_reg_reg\n"
8294 "\tCMOV $dst,$src \t @cmovP_cmpL_reg_reg"
8295 %}
8296 ins_encode %{
8297 Register opr1 = as_Register($tmp1$$reg);
8298 Register opr2 = as_Register($tmp2$$reg);
8299 Register dst = $dst$$Register;
8300 Register src = $src$$Register;
8301 int flag = $cop$$cmpcode;
8303 switch(flag)
8304 {
8305 case 0x01: //equal
8306 __ subu(AT, opr1, opr2);
8307 __ movz(dst, src, AT);
8308 break;
8310 case 0x02: //not_equal
8311 __ subu(AT, opr1, opr2);
8312 __ movn(dst, src, AT);
8313 break;
8315 case 0x03: //greater
8316 __ slt(AT, opr2, opr1);
8317 __ movn(dst, src, AT);
8318 break;
8320 case 0x04: //greater_equal
8321 __ slt(AT, opr1, opr2);
8322 __ movz(dst, src, AT);
8323 break;
8325 case 0x05: //less
8326 __ slt(AT, opr1, opr2);
8327 __ movn(dst, src, AT);
8328 break;
8330 case 0x06: //less_equal
8331 __ slt(AT, opr2, opr1);
8332 __ movz(dst, src, AT);
8333 break;
8335 default:
8336 Unimplemented();
8337 }
8338 %}
8340 ins_pipe( pipe_slow );
8341 %}
8343 instruct cmovI_cmpD_reg_reg(mRegI dst, mRegI src, regD tmp1, regD tmp2, cmpOp cop ) %{
8344 match(Set dst (CMoveI (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8345 ins_cost(80);
8346 format %{
8347 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpD_reg_reg\n"
8348 "\tCMOV $dst,$src \t @cmovI_cmpD_reg_reg"
8349 %}
8350 ins_encode %{
8351 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8352 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8353 Register dst = as_Register($dst$$reg);
8354 Register src = as_Register($src$$reg);
8356 int flag = $cop$$cmpcode;
8358 switch(flag)
8359 {
8360 case 0x01: //equal
8361 __ c_eq_d(reg_op1, reg_op2);
8362 __ movt(dst, src);
8363 break;
8364 case 0x02: //not_equal
8365 //2016/4/19 aoqi: See instruct branchConD_reg_reg. The change in branchConD_reg_reg fixed a bug. It seems similar here, so I made thesame change.
8366 __ c_eq_d(reg_op1, reg_op2);
8367 __ movf(dst, src);
8368 break;
8369 case 0x03: //greater
8370 __ c_ole_d(reg_op1, reg_op2);
8371 __ movf(dst, src);
8372 break;
8373 case 0x04: //greater_equal
8374 __ c_olt_d(reg_op1, reg_op2);
8375 __ movf(dst, src);
8376 break;
8377 case 0x05: //less
8378 __ c_ult_d(reg_op1, reg_op2);
8379 __ movt(dst, src);
8380 break;
8381 case 0x06: //less_equal
8382 __ c_ule_d(reg_op1, reg_op2);
8383 __ movt(dst, src);
8384 break;
8385 default:
8386 Unimplemented();
8387 }
8388 %}
8390 ins_pipe( pipe_slow );
8391 %}
8394 instruct cmovP_cmpP_reg_reg(mRegP dst, mRegP src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
8395 match(Set dst (CMoveP (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
8396 ins_cost(80);
8397 format %{
8398 "CMPU$cop $tmp1,$tmp2\t @cmovP_cmpP_reg_reg\n\t"
8399 "CMOV $dst,$src\t @cmovP_cmpP_reg_reg"
8400 %}
8401 ins_encode %{
8402 Register op1 = $tmp1$$Register;
8403 Register op2 = $tmp2$$Register;
8404 Register dst = $dst$$Register;
8405 Register src = $src$$Register;
8406 int flag = $cop$$cmpcode;
8408 switch(flag)
8409 {
8410 case 0x01: //equal
8411 __ subu(AT, op1, op2);
8412 __ movz(dst, src, AT);
8413 break;
8415 case 0x02: //not_equal
8416 __ subu(AT, op1, op2);
8417 __ movn(dst, src, AT);
8418 break;
8420 case 0x03: //above
8421 __ sltu(AT, op2, op1);
8422 __ movn(dst, src, AT);
8423 break;
8425 case 0x04: //above_equal
8426 __ sltu(AT, op1, op2);
8427 __ movz(dst, src, AT);
8428 break;
8430 case 0x05: //below
8431 __ sltu(AT, op1, op2);
8432 __ movn(dst, src, AT);
8433 break;
8435 case 0x06: //below_equal
8436 __ sltu(AT, op2, op1);
8437 __ movz(dst, src, AT);
8438 break;
8440 default:
8441 Unimplemented();
8442 }
8443 %}
8445 ins_pipe( pipe_slow );
8446 %}
8448 instruct cmovP_cmpI_reg_reg(mRegP dst, mRegP src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8449 match(Set dst (CMoveP (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8450 ins_cost(80);
8451 format %{
8452 "CMP$cop $tmp1,$tmp2\t @cmovP_cmpI_reg_reg\n\t"
8453 "CMOV $dst,$src\t @cmovP_cmpI_reg_reg"
8454 %}
8455 ins_encode %{
8456 Register op1 = $tmp1$$Register;
8457 Register op2 = $tmp2$$Register;
8458 Register dst = $dst$$Register;
8459 Register src = $src$$Register;
8460 int flag = $cop$$cmpcode;
8462 switch(flag)
8463 {
8464 case 0x01: //equal
8465 __ subu32(AT, op1, op2);
8466 __ movz(dst, src, AT);
8467 break;
8469 case 0x02: //not_equal
8470 __ subu32(AT, op1, op2);
8471 __ movn(dst, src, AT);
8472 break;
8474 case 0x03: //above
8475 __ slt(AT, op2, op1);
8476 __ movn(dst, src, AT);
8477 break;
8479 case 0x04: //above_equal
8480 __ slt(AT, op1, op2);
8481 __ movz(dst, src, AT);
8482 break;
8484 case 0x05: //below
8485 __ slt(AT, op1, op2);
8486 __ movn(dst, src, AT);
8487 break;
8489 case 0x06: //below_equal
8490 __ slt(AT, op2, op1);
8491 __ movz(dst, src, AT);
8492 break;
8494 default:
8495 Unimplemented();
8496 }
8497 %}
8499 ins_pipe( pipe_slow );
8500 %}
8502 instruct cmovN_cmpI_reg_reg(mRegN dst, mRegN src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8503 match(Set dst (CMoveN (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8504 ins_cost(80);
8505 format %{
8506 "CMP$cop $tmp1,$tmp2\t @cmovN_cmpI_reg_reg\n\t"
8507 "CMOV $dst,$src\t @cmovN_cmpI_reg_reg"
8508 %}
8509 ins_encode %{
8510 Register op1 = $tmp1$$Register;
8511 Register op2 = $tmp2$$Register;
8512 Register dst = $dst$$Register;
8513 Register src = $src$$Register;
8514 int flag = $cop$$cmpcode;
8516 switch(flag)
8517 {
8518 case 0x01: //equal
8519 __ subu32(AT, op1, op2);
8520 __ movz(dst, src, AT);
8521 break;
8523 case 0x02: //not_equal
8524 __ subu32(AT, op1, op2);
8525 __ movn(dst, src, AT);
8526 break;
8528 case 0x03: //above
8529 __ slt(AT, op2, op1);
8530 __ movn(dst, src, AT);
8531 break;
8533 case 0x04: //above_equal
8534 __ slt(AT, op1, op2);
8535 __ movz(dst, src, AT);
8536 break;
8538 case 0x05: //below
8539 __ slt(AT, op1, op2);
8540 __ movn(dst, src, AT);
8541 break;
8543 case 0x06: //below_equal
8544 __ slt(AT, op2, op1);
8545 __ movz(dst, src, AT);
8546 break;
8548 default:
8549 Unimplemented();
8550 }
8551 %}
8553 ins_pipe( pipe_slow );
8554 %}
8557 instruct cmovL_cmpI_reg_reg(mRegL dst, mRegL src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8558 match(Set dst (CMoveL (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8559 ins_cost(80);
8560 format %{
8561 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpI_reg_reg\n"
8562 "\tCMOV $dst,$src \t @cmovL_cmpI_reg_reg"
8563 %}
8565 ins_encode %{
8566 Register op1 = $tmp1$$Register;
8567 Register op2 = $tmp2$$Register;
8568 Register dst = as_Register($dst$$reg);
8569 Register src = as_Register($src$$reg);
8570 int flag = $cop$$cmpcode;
8572 switch(flag)
8573 {
8574 case 0x01: //equal
8575 __ subu32(AT, op1, op2);
8576 __ movz(dst, src, AT);
8577 break;
8579 case 0x02: //not_equal
8580 __ subu32(AT, op1, op2);
8581 __ movn(dst, src, AT);
8582 break;
8584 case 0x03: //great
8585 __ slt(AT, op2, op1);
8586 __ movn(dst, src, AT);
8587 break;
8589 case 0x04: //great_equal
8590 __ slt(AT, op1, op2);
8591 __ movz(dst, src, AT);
8592 break;
8594 case 0x05: //less
8595 __ slt(AT, op1, op2);
8596 __ movn(dst, src, AT);
8597 break;
8599 case 0x06: //less_equal
8600 __ slt(AT, op2, op1);
8601 __ movz(dst, src, AT);
8602 break;
8604 default:
8605 Unimplemented();
8606 }
8607 %}
8609 ins_pipe( pipe_slow );
8610 %}
8612 instruct cmovL_cmpL_reg_reg(mRegL dst, mRegL src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8613 match(Set dst (CMoveL (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8614 ins_cost(80);
8615 format %{
8616 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpL_reg_reg\n"
8617 "\tCMOV $dst,$src \t @cmovL_cmpL_reg_reg"
8618 %}
8619 ins_encode %{
8620 Register opr1 = as_Register($tmp1$$reg);
8621 Register opr2 = as_Register($tmp2$$reg);
8622 Register dst = as_Register($dst$$reg);
8623 Register src = as_Register($src$$reg);
8624 int flag = $cop$$cmpcode;
8626 switch(flag)
8627 {
8628 case 0x01: //equal
8629 __ subu(AT, opr1, opr2);
8630 __ movz(dst, src, AT);
8631 break;
8633 case 0x02: //not_equal
8634 __ subu(AT, opr1, opr2);
8635 __ movn(dst, src, AT);
8636 break;
8638 case 0x03: //greater
8639 __ slt(AT, opr2, opr1);
8640 __ movn(dst, src, AT);
8641 break;
8643 case 0x04: //greater_equal
8644 __ slt(AT, opr1, opr2);
8645 __ movz(dst, src, AT);
8646 break;
8648 case 0x05: //less
8649 __ slt(AT, opr1, opr2);
8650 __ movn(dst, src, AT);
8651 break;
8653 case 0x06: //less_equal
8654 __ slt(AT, opr2, opr1);
8655 __ movz(dst, src, AT);
8656 break;
8658 default:
8659 Unimplemented();
8660 }
8661 %}
8663 ins_pipe( pipe_slow );
8664 %}
8666 instruct cmovL_cmpN_reg_reg(mRegL dst, mRegL src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8667 match(Set dst (CMoveL (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8668 ins_cost(80);
8669 format %{
8670 "CMPU$cop $tmp1,$tmp2\t @cmovL_cmpN_reg_reg\n\t"
8671 "CMOV $dst,$src\t @cmovL_cmpN_reg_reg"
8672 %}
8673 ins_encode %{
8674 Register op1 = $tmp1$$Register;
8675 Register op2 = $tmp2$$Register;
8676 Register dst = $dst$$Register;
8677 Register src = $src$$Register;
8678 int flag = $cop$$cmpcode;
8680 switch(flag)
8681 {
8682 case 0x01: //equal
8683 __ subu32(AT, op1, op2);
8684 __ movz(dst, src, AT);
8685 break;
8687 case 0x02: //not_equal
8688 __ subu32(AT, op1, op2);
8689 __ movn(dst, src, AT);
8690 break;
8692 case 0x03: //above
8693 __ sltu(AT, op2, op1);
8694 __ movn(dst, src, AT);
8695 break;
8697 case 0x04: //above_equal
8698 __ sltu(AT, op1, op2);
8699 __ movz(dst, src, AT);
8700 break;
8702 case 0x05: //below
8703 __ sltu(AT, op1, op2);
8704 __ movn(dst, src, AT);
8705 break;
8707 case 0x06: //below_equal
8708 __ sltu(AT, op2, op1);
8709 __ movz(dst, src, AT);
8710 break;
8712 default:
8713 Unimplemented();
8714 }
8715 %}
8717 ins_pipe( pipe_slow );
8718 %}
8721 instruct cmovL_cmpD_reg_reg(mRegL dst, mRegL src, regD tmp1, regD tmp2, cmpOp cop ) %{
8722 match(Set dst (CMoveL (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8723 ins_cost(80);
8724 format %{
8725 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpD_reg_reg\n"
8726 "\tCMOV $dst,$src \t @cmovL_cmpD_reg_reg"
8727 %}
8728 ins_encode %{
8729 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8730 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8731 Register dst = as_Register($dst$$reg);
8732 Register src = as_Register($src$$reg);
8734 int flag = $cop$$cmpcode;
8736 switch(flag)
8737 {
8738 case 0x01: //equal
8739 __ c_eq_d(reg_op1, reg_op2);
8740 __ movt(dst, src);
8741 break;
8742 case 0x02: //not_equal
8743 __ c_eq_d(reg_op1, reg_op2);
8744 __ movf(dst, src);
8745 break;
8746 case 0x03: //greater
8747 __ c_ole_d(reg_op1, reg_op2);
8748 __ movf(dst, src);
8749 break;
8750 case 0x04: //greater_equal
8751 __ c_olt_d(reg_op1, reg_op2);
8752 __ movf(dst, src);
8753 break;
8754 case 0x05: //less
8755 __ c_ult_d(reg_op1, reg_op2);
8756 __ movt(dst, src);
8757 break;
8758 case 0x06: //less_equal
8759 __ c_ule_d(reg_op1, reg_op2);
8760 __ movt(dst, src);
8761 break;
8762 default:
8763 Unimplemented();
8764 }
8765 %}
8767 ins_pipe( pipe_slow );
8768 %}
8770 instruct cmovD_cmpD_reg_reg(regD dst, regD src, regD tmp1, regD tmp2, cmpOp cop ) %{
8771 match(Set dst (CMoveD (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8772 ins_cost(200);
8773 format %{
8774 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpD_reg_reg\n"
8775 "\tCMOV $dst,$src \t @cmovD_cmpD_reg_reg"
8776 %}
8777 ins_encode %{
8778 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8779 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8780 FloatRegister dst = as_FloatRegister($dst$$reg);
8781 FloatRegister src = as_FloatRegister($src$$reg);
8783 int flag = $cop$$cmpcode;
8785 Label L;
8787 switch(flag)
8788 {
8789 case 0x01: //equal
8790 __ c_eq_d(reg_op1, reg_op2);
8791 __ bc1f(L);
8792 __ nop();
8793 __ mov_d(dst, src);
8794 __ bind(L);
8795 break;
8796 case 0x02: //not_equal
8797 //2016/4/19 aoqi: See instruct branchConD_reg_reg. The change in branchConD_reg_reg fixed a bug. It seems similar here, so I made thesame change.
8798 __ c_eq_d(reg_op1, reg_op2);
8799 __ bc1t(L);
8800 __ nop();
8801 __ mov_d(dst, src);
8802 __ bind(L);
8803 break;
8804 case 0x03: //greater
8805 __ c_ole_d(reg_op1, reg_op2);
8806 __ bc1t(L);
8807 __ nop();
8808 __ mov_d(dst, src);
8809 __ bind(L);
8810 break;
8811 case 0x04: //greater_equal
8812 __ c_olt_d(reg_op1, reg_op2);
8813 __ bc1t(L);
8814 __ nop();
8815 __ mov_d(dst, src);
8816 __ bind(L);
8817 break;
8818 case 0x05: //less
8819 __ c_ult_d(reg_op1, reg_op2);
8820 __ bc1f(L);
8821 __ nop();
8822 __ mov_d(dst, src);
8823 __ bind(L);
8824 break;
8825 case 0x06: //less_equal
8826 __ c_ule_d(reg_op1, reg_op2);
8827 __ bc1f(L);
8828 __ nop();
8829 __ mov_d(dst, src);
8830 __ bind(L);
8831 break;
8832 default:
8833 Unimplemented();
8834 }
8835 %}
8837 ins_pipe( pipe_slow );
8838 %}
8840 instruct cmovF_cmpI_reg_reg(regF dst, regF src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8841 match(Set dst (CMoveF (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8842 ins_cost(200);
8843 format %{
8844 "CMP$cop $tmp1, $tmp2\t @cmovF_cmpI_reg_reg\n"
8845 "\tCMOV $dst, $src \t @cmovF_cmpI_reg_reg"
8846 %}
8848 ins_encode %{
8849 Register op1 = $tmp1$$Register;
8850 Register op2 = $tmp2$$Register;
8851 FloatRegister dst = as_FloatRegister($dst$$reg);
8852 FloatRegister src = as_FloatRegister($src$$reg);
8853 int flag = $cop$$cmpcode;
8854 Label L;
8856 switch(flag)
8857 {
8858 case 0x01: //equal
8859 __ bne(op1, op2, L);
8860 __ nop();
8861 __ mov_s(dst, src);
8862 __ bind(L);
8863 break;
8864 case 0x02: //not_equal
8865 __ beq(op1, op2, L);
8866 __ nop();
8867 __ mov_s(dst, src);
8868 __ bind(L);
8869 break;
8870 case 0x03: //great
8871 __ slt(AT, op2, op1);
8872 __ beq(AT, R0, L);
8873 __ nop();
8874 __ mov_s(dst, src);
8875 __ bind(L);
8876 break;
8877 case 0x04: //great_equal
8878 __ slt(AT, op1, op2);
8879 __ bne(AT, R0, L);
8880 __ nop();
8881 __ mov_s(dst, src);
8882 __ bind(L);
8883 break;
8884 case 0x05: //less
8885 __ slt(AT, op1, op2);
8886 __ beq(AT, R0, L);
8887 __ nop();
8888 __ mov_s(dst, src);
8889 __ bind(L);
8890 break;
8891 case 0x06: //less_equal
8892 __ slt(AT, op2, op1);
8893 __ bne(AT, R0, L);
8894 __ nop();
8895 __ mov_s(dst, src);
8896 __ bind(L);
8897 break;
8898 default:
8899 Unimplemented();
8900 }
8901 %}
8903 ins_pipe( pipe_slow );
8904 %}
8906 instruct cmovD_cmpI_reg_reg(regD dst, regD src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8907 match(Set dst (CMoveD (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8908 ins_cost(200);
8909 format %{
8910 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpI_reg_reg\n"
8911 "\tCMOV $dst, $src \t @cmovD_cmpI_reg_reg"
8912 %}
8914 ins_encode %{
8915 Register op1 = $tmp1$$Register;
8916 Register op2 = $tmp2$$Register;
8917 FloatRegister dst = as_FloatRegister($dst$$reg);
8918 FloatRegister src = as_FloatRegister($src$$reg);
8919 int flag = $cop$$cmpcode;
8920 Label L;
8922 switch(flag)
8923 {
8924 case 0x01: //equal
8925 __ bne(op1, op2, L);
8926 __ nop();
8927 __ mov_d(dst, src);
8928 __ bind(L);
8929 break;
8930 case 0x02: //not_equal
8931 __ beq(op1, op2, L);
8932 __ nop();
8933 __ mov_d(dst, src);
8934 __ bind(L);
8935 break;
8936 case 0x03: //great
8937 __ slt(AT, op2, op1);
8938 __ beq(AT, R0, L);
8939 __ nop();
8940 __ mov_d(dst, src);
8941 __ bind(L);
8942 break;
8943 case 0x04: //great_equal
8944 __ slt(AT, op1, op2);
8945 __ bne(AT, R0, L);
8946 __ nop();
8947 __ mov_d(dst, src);
8948 __ bind(L);
8949 break;
8950 case 0x05: //less
8951 __ slt(AT, op1, op2);
8952 __ beq(AT, R0, L);
8953 __ nop();
8954 __ mov_d(dst, src);
8955 __ bind(L);
8956 break;
8957 case 0x06: //less_equal
8958 __ slt(AT, op2, op1);
8959 __ bne(AT, R0, L);
8960 __ nop();
8961 __ mov_d(dst, src);
8962 __ bind(L);
8963 break;
8964 default:
8965 Unimplemented();
8966 }
8967 %}
8969 ins_pipe( pipe_slow );
8970 %}
8972 instruct cmovD_cmpP_reg_reg(regD dst, regD src, mRegP tmp1, mRegP tmp2, cmpOp cop ) %{
8973 match(Set dst (CMoveD (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
8974 ins_cost(200);
8975 format %{
8976 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpP_reg_reg\n"
8977 "\tCMOV $dst, $src \t @cmovD_cmpP_reg_reg"
8978 %}
8980 ins_encode %{
8981 Register op1 = $tmp1$$Register;
8982 Register op2 = $tmp2$$Register;
8983 FloatRegister dst = as_FloatRegister($dst$$reg);
8984 FloatRegister src = as_FloatRegister($src$$reg);
8985 int flag = $cop$$cmpcode;
8986 Label L;
8988 switch(flag)
8989 {
8990 case 0x01: //equal
8991 __ bne(op1, op2, L);
8992 __ nop();
8993 __ mov_d(dst, src);
8994 __ bind(L);
8995 break;
8996 case 0x02: //not_equal
8997 __ beq(op1, op2, L);
8998 __ nop();
8999 __ mov_d(dst, src);
9000 __ bind(L);
9001 break;
9002 case 0x03: //great
9003 __ slt(AT, op2, op1);
9004 __ beq(AT, R0, L);
9005 __ nop();
9006 __ mov_d(dst, src);
9007 __ bind(L);
9008 break;
9009 case 0x04: //great_equal
9010 __ slt(AT, op1, op2);
9011 __ bne(AT, R0, L);
9012 __ nop();
9013 __ mov_d(dst, src);
9014 __ bind(L);
9015 break;
9016 case 0x05: //less
9017 __ slt(AT, op1, op2);
9018 __ beq(AT, R0, L);
9019 __ nop();
9020 __ mov_d(dst, src);
9021 __ bind(L);
9022 break;
9023 case 0x06: //less_equal
9024 __ slt(AT, op2, op1);
9025 __ bne(AT, R0, L);
9026 __ nop();
9027 __ mov_d(dst, src);
9028 __ bind(L);
9029 break;
9030 default:
9031 Unimplemented();
9032 }
9033 %}
9035 ins_pipe( pipe_slow );
9036 %}
9038 //FIXME
9039 instruct cmovI_cmpF_reg_reg(mRegI dst, mRegI src, regF tmp1, regF tmp2, cmpOp cop ) %{
9040 match(Set dst (CMoveI (Binary cop (CmpF tmp1 tmp2)) (Binary dst src)));
9041 ins_cost(80);
9042 format %{
9043 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpF_reg_reg\n"
9044 "\tCMOV $dst,$src \t @cmovI_cmpF_reg_reg"
9045 %}
9047 ins_encode %{
9048 FloatRegister reg_op1 = $tmp1$$FloatRegister;
9049 FloatRegister reg_op2 = $tmp2$$FloatRegister;
9050 Register dst = $dst$$Register;
9051 Register src = $src$$Register;
9052 int flag = $cop$$cmpcode;
9054 switch(flag)
9055 {
9056 case 0x01: //equal
9057 __ c_eq_s(reg_op1, reg_op2);
9058 __ movt(dst, src);
9059 break;
9060 case 0x02: //not_equal
9061 __ c_eq_s(reg_op1, reg_op2);
9062 __ movf(dst, src);
9063 break;
9064 case 0x03: //greater
9065 __ c_ole_s(reg_op1, reg_op2);
9066 __ movf(dst, src);
9067 break;
9068 case 0x04: //greater_equal
9069 __ c_olt_s(reg_op1, reg_op2);
9070 __ movf(dst, src);
9071 break;
9072 case 0x05: //less
9073 __ c_ult_s(reg_op1, reg_op2);
9074 __ movt(dst, src);
9075 break;
9076 case 0x06: //less_equal
9077 __ c_ule_s(reg_op1, reg_op2);
9078 __ movt(dst, src);
9079 break;
9080 default:
9081 Unimplemented();
9082 }
9083 %}
9084 ins_pipe( pipe_slow );
9085 %}
9087 instruct cmovF_cmpF_reg_reg(regF dst, regF src, regF tmp1, regF tmp2, cmpOp cop ) %{
9088 match(Set dst (CMoveF (Binary cop (CmpF tmp1 tmp2)) (Binary dst src)));
9089 ins_cost(200);
9090 format %{
9091 "CMP$cop $tmp1, $tmp2\t @cmovF_cmpF_reg_reg\n"
9092 "\tCMOV $dst,$src \t @cmovF_cmpF_reg_reg"
9093 %}
9095 ins_encode %{
9096 FloatRegister reg_op1 = $tmp1$$FloatRegister;
9097 FloatRegister reg_op2 = $tmp2$$FloatRegister;
9098 FloatRegister dst = $dst$$FloatRegister;
9099 FloatRegister src = $src$$FloatRegister;
9100 Label L;
9101 int flag = $cop$$cmpcode;
9103 switch(flag)
9104 {
9105 case 0x01: //equal
9106 __ c_eq_s(reg_op1, reg_op2);
9107 __ bc1f(L);
9108 __ nop();
9109 __ mov_s(dst, src);
9110 __ bind(L);
9111 break;
9112 case 0x02: //not_equal
9113 __ c_eq_s(reg_op1, reg_op2);
9114 __ bc1t(L);
9115 __ nop();
9116 __ mov_s(dst, src);
9117 __ bind(L);
9118 break;
9119 case 0x03: //greater
9120 __ c_ole_s(reg_op1, reg_op2);
9121 __ bc1t(L);
9122 __ nop();
9123 __ mov_s(dst, src);
9124 __ bind(L);
9125 break;
9126 case 0x04: //greater_equal
9127 __ c_olt_s(reg_op1, reg_op2);
9128 __ bc1t(L);
9129 __ nop();
9130 __ mov_s(dst, src);
9131 __ bind(L);
9132 break;
9133 case 0x05: //less
9134 __ c_ult_s(reg_op1, reg_op2);
9135 __ bc1f(L);
9136 __ nop();
9137 __ mov_s(dst, src);
9138 __ bind(L);
9139 break;
9140 case 0x06: //less_equal
9141 __ c_ule_s(reg_op1, reg_op2);
9142 __ bc1f(L);
9143 __ nop();
9144 __ mov_s(dst, src);
9145 __ bind(L);
9146 break;
9147 default:
9148 Unimplemented();
9149 }
9150 %}
9151 ins_pipe( pipe_slow );
9152 %}
9154 // Manifest a CmpL result in an integer register. Very painful.
9155 // This is the test to avoid.
9156 instruct cmpL3_reg_reg(mRegI dst, mRegL src1, mRegL src2) %{
9157 match(Set dst (CmpL3 src1 src2));
9158 ins_cost(1000);
9159 format %{ "cmpL3 $dst, $src1, $src2 @ cmpL3_reg_reg" %}
9160 ins_encode %{
9161 Register opr1 = as_Register($src1$$reg);
9162 Register opr2 = as_Register($src2$$reg);
9163 Register dst = as_Register($dst$$reg);
9165 Label Done;
9167 __ subu(AT, opr1, opr2);
9168 __ bltz(AT, Done);
9169 __ delayed()->daddiu(dst, R0, -1);
9171 __ move(dst, 1);
9172 __ movz(dst, R0, AT);
9174 __ bind(Done);
9175 %}
9176 ins_pipe( pipe_slow );
9177 %}
9179 //
9180 // less_rsult = -1
9181 // greater_result = 1
9182 // equal_result = 0
9183 // nan_result = -1
9184 //
9185 instruct cmpF3_reg_reg(mRegI dst, regF src1, regF src2) %{
9186 match(Set dst (CmpF3 src1 src2));
9187 ins_cost(1000);
9188 format %{ "cmpF3 $dst, $src1, $src2 @ cmpF3_reg_reg" %}
9189 ins_encode %{
9190 FloatRegister src1 = as_FloatRegister($src1$$reg);
9191 FloatRegister src2 = as_FloatRegister($src2$$reg);
9192 Register dst = as_Register($dst$$reg);
9194 Label Done;
9196 __ c_ult_s(src1, src2);
9197 __ bc1t(Done);
9198 __ delayed()->daddiu(dst, R0, -1);
9200 __ c_eq_s(src1, src2);
9201 __ move(dst, 1);
9202 __ movt(dst, R0);
9204 __ bind(Done);
9205 %}
9206 ins_pipe( pipe_slow );
9207 %}
9209 instruct cmpD3_reg_reg(mRegI dst, regD src1, regD src2) %{
9210 match(Set dst (CmpD3 src1 src2));
9211 ins_cost(1000);
9212 format %{ "cmpD3 $dst, $src1, $src2 @ cmpD3_reg_reg" %}
9213 ins_encode %{
9214 FloatRegister src1 = as_FloatRegister($src1$$reg);
9215 FloatRegister src2 = as_FloatRegister($src2$$reg);
9216 Register dst = as_Register($dst$$reg);
9218 Label Done;
9220 __ c_ult_d(src1, src2);
9221 __ bc1t(Done);
9222 __ delayed()->daddiu(dst, R0, -1);
9224 __ c_eq_d(src1, src2);
9225 __ move(dst, 1);
9226 __ movt(dst, R0);
9228 __ bind(Done);
9229 %}
9230 ins_pipe( pipe_slow );
9231 %}
9233 instruct clear_array(mRegL cnt, mRegP base, Universe dummy) %{
9234 match(Set dummy (ClearArray cnt base));
9235 format %{ "CLEAR_ARRAY base = $base, cnt = $cnt # Clear doublewords" %}
9236 ins_encode %{
9237 //Assume cnt is the number of bytes in an array to be cleared,
9238 //and base points to the starting address of the array.
9239 Register base = $base$$Register;
9240 Register num = $cnt$$Register;
9241 Label Loop, done;
9243 /* 2012/9/21 Jin: according to X86, $cnt is caculated by doublewords(8 bytes) */
9244 __ move(T9, num); /* T9 = words */
9245 __ beq(T9, R0, done);
9246 __ nop();
9247 __ move(AT, base);
9249 __ bind(Loop);
9250 __ sd(R0, Address(AT, 0));
9251 __ daddi(AT, AT, wordSize);
9252 __ daddi(T9, T9, -1);
9253 __ bne(T9, R0, Loop);
9254 __ delayed()->nop();
9255 __ bind(done);
9256 %}
9257 ins_pipe( pipe_slow );
9258 %}
9260 instruct string_compare(a4_RegP str1, mA5RegI cnt1, a6_RegP str2, mA7RegI cnt2, no_Ax_mRegI result) %{
9261 match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
9262 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2);
9264 format %{ "String Compare $str1[len: $cnt1], $str2[len: $cnt2] -> $result @ string_compare" %}
9265 ins_encode %{
9266 // Get the first character position in both strings
9267 // [8] char array, [12] offset, [16] count
9268 Register str1 = $str1$$Register;
9269 Register str2 = $str2$$Register;
9270 Register cnt1 = $cnt1$$Register;
9271 Register cnt2 = $cnt2$$Register;
9272 Register result = $result$$Register;
9274 Label L, Loop, haveResult, done;
9276 // compute the and difference of lengths (in result)
9277 __ subu(result, cnt1, cnt2); // result holds the difference of two lengths
9279 // compute the shorter length (in cnt1)
9280 __ slt(AT, cnt2, cnt1);
9281 __ movn(cnt1, cnt2, AT);
9283 // Now the shorter length is in cnt1 and cnt2 can be used as a tmp register
9284 __ bind(Loop); // Loop begin
9285 __ beq(cnt1, R0, done);
9286 __ delayed()->lhu(AT, str1, 0);;
9288 // compare current character
9289 __ lhu(cnt2, str2, 0);
9290 __ bne(AT, cnt2, haveResult);
9291 __ delayed()->addi(str1, str1, 2);
9292 __ addi(str2, str2, 2);
9293 __ b(Loop);
9294 __ delayed()->addi(cnt1, cnt1, -1); // Loop end
9296 __ bind(haveResult);
9297 __ subu(result, AT, cnt2);
9299 __ bind(done);
9300 %}
9302 ins_pipe( pipe_slow );
9303 %}
9305 // intrinsic optimization
9306 instruct string_equals(a4_RegP str1, a5_RegP str2, mA6RegI cnt, mA7RegI temp, no_Ax_mRegI result) %{
9307 match(Set result (StrEquals (Binary str1 str2) cnt));
9308 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL temp);
9310 format %{ "String Equal $str1, $str2, len:$cnt tmp:$temp -> $result @ string_equals" %}
9311 ins_encode %{
9312 // Get the first character position in both strings
9313 // [8] char array, [12] offset, [16] count
9314 Register str1 = $str1$$Register;
9315 Register str2 = $str2$$Register;
9316 Register cnt = $cnt$$Register;
9317 Register tmp = $temp$$Register;
9318 Register result = $result$$Register;
9320 Label Loop, done;
9323 __ beq(str1, str2, done); // same char[] ?
9324 __ daddiu(result, R0, 1);
9326 __ bind(Loop); // Loop begin
9327 __ beq(cnt, R0, done);
9328 __ daddiu(result, R0, 1); // count == 0
9330 // compare current character
9331 __ lhu(AT, str1, 0);;
9332 __ lhu(tmp, str2, 0);
9333 __ bne(AT, tmp, done);
9334 __ delayed()->daddi(result, R0, 0);
9335 __ addi(str1, str1, 2);
9336 __ addi(str2, str2, 2);
9337 __ b(Loop);
9338 __ delayed()->addi(cnt, cnt, -1); // Loop end
9340 __ bind(done);
9341 %}
9343 ins_pipe( pipe_slow );
9344 %}
9346 //----------Arithmetic Instructions-------------------------------------------
9347 //----------Addition Instructions---------------------------------------------
9348 instruct addI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9349 match(Set dst (AddI src1 src2));
9351 format %{ "add $dst, $src1, $src2 #@addI_Reg_Reg" %}
9352 ins_encode %{
9353 Register dst = $dst$$Register;
9354 Register src1 = $src1$$Register;
9355 Register src2 = $src2$$Register;
9356 __ addu32(dst, src1, src2);
9357 %}
9358 ins_pipe( ialu_regI_regI );
9359 %}
9361 instruct addI_Reg_imm(mRegI dst, mRegI src1, immI src2) %{
9362 match(Set dst (AddI src1 src2));
9364 format %{ "add $dst, $src1, $src2 #@addI_Reg_imm" %}
9365 ins_encode %{
9366 Register dst = $dst$$Register;
9367 Register src1 = $src1$$Register;
9368 int imm = $src2$$constant;
9370 if(Assembler::is_simm16(imm)) {
9371 __ addiu32(dst, src1, imm);
9372 } else {
9373 __ move(AT, imm);
9374 __ addu32(dst, src1, AT);
9375 }
9376 %}
9377 ins_pipe( ialu_regI_regI );
9378 %}
9380 instruct addP_reg_reg(mRegP dst, mRegP src1, mRegL src2) %{
9381 match(Set dst (AddP src1 src2));
9383 format %{ "dadd $dst, $src1, $src2 #@addP_reg_reg" %}
9385 ins_encode %{
9386 Register dst = $dst$$Register;
9387 Register src1 = $src1$$Register;
9388 Register src2 = $src2$$Register;
9389 __ daddu(dst, src1, src2);
9390 %}
9392 ins_pipe( ialu_regI_regI );
9393 %}
9395 instruct addP_reg_reg_convI2L(mRegP dst, mRegP src1, mRegI src2) %{
9396 match(Set dst (AddP src1 (ConvI2L src2)));
9398 format %{ "dadd $dst, $src1, $src2 #@addP_reg_reg_convI2L" %}
9400 ins_encode %{
9401 Register dst = $dst$$Register;
9402 Register src1 = $src1$$Register;
9403 Register src2 = $src2$$Register;
9404 __ daddu(dst, src1, src2);
9405 %}
9407 ins_pipe( ialu_regI_regI );
9408 %}
9410 instruct addP_reg_imm(mRegP dst, mRegP src1, immL src2) %{
9411 match(Set dst (AddP src1 src2));
9413 format %{ "daddi $dst, $src1, $src2 #@addP_reg_imm" %}
9414 ins_encode %{
9415 Register src1 = $src1$$Register;
9416 long src2 = $src2$$constant;
9417 Register dst = $dst$$Register;
9419 if(Assembler::is_simm16(src2)) {
9420 __ daddiu(dst, src1, src2);
9421 } else {
9422 __ set64(AT, src2);
9423 __ daddu(dst, src1, AT);
9424 }
9425 %}
9426 ins_pipe( ialu_regI_imm16 );
9427 %}
9429 // Add Long Register with Register
9430 instruct addL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
9431 match(Set dst (AddL src1 src2));
9432 ins_cost(200);
9433 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_Reg\t" %}
9435 ins_encode %{
9436 Register dst_reg = as_Register($dst$$reg);
9437 Register src1_reg = as_Register($src1$$reg);
9438 Register src2_reg = as_Register($src2$$reg);
9440 __ daddu(dst_reg, src1_reg, src2_reg);
9441 %}
9443 ins_pipe( ialu_regL_regL );
9444 %}
9446 instruct addL_Reg_imm(mRegL dst, mRegL src1, immL16 src2)
9447 %{
9448 match(Set dst (AddL src1 src2));
9450 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_imm " %}
9451 ins_encode %{
9452 Register dst_reg = as_Register($dst$$reg);
9453 Register src1_reg = as_Register($src1$$reg);
9454 int src2_imm = $src2$$constant;
9456 __ daddiu(dst_reg, src1_reg, src2_imm);
9457 %}
9459 ins_pipe( ialu_regL_regL );
9460 %}
9462 instruct addL_RegI2L_imm(mRegL dst, mRegI src1, immL16 src2)
9463 %{
9464 match(Set dst (AddL (ConvI2L src1) src2));
9466 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_imm " %}
9467 ins_encode %{
9468 Register dst_reg = as_Register($dst$$reg);
9469 Register src1_reg = as_Register($src1$$reg);
9470 int src2_imm = $src2$$constant;
9472 __ daddiu(dst_reg, src1_reg, src2_imm);
9473 %}
9475 ins_pipe( ialu_regL_regL );
9476 %}
9478 instruct addL_RegI2L_Reg(mRegL dst, mRegI src1, mRegL src2) %{
9479 match(Set dst (AddL (ConvI2L src1) src2));
9480 ins_cost(200);
9481 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_Reg\t" %}
9483 ins_encode %{
9484 Register dst_reg = as_Register($dst$$reg);
9485 Register src1_reg = as_Register($src1$$reg);
9486 Register src2_reg = as_Register($src2$$reg);
9488 __ daddu(dst_reg, src1_reg, src2_reg);
9489 %}
9491 ins_pipe( ialu_regL_regL );
9492 %}
9494 instruct addL_RegI2L_RegI2L(mRegL dst, mRegI src1, mRegI src2) %{
9495 match(Set dst (AddL (ConvI2L src1) (ConvI2L src2)));
9496 ins_cost(200);
9497 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_RegI2L\t" %}
9499 ins_encode %{
9500 Register dst_reg = as_Register($dst$$reg);
9501 Register src1_reg = as_Register($src1$$reg);
9502 Register src2_reg = as_Register($src2$$reg);
9504 __ daddu(dst_reg, src1_reg, src2_reg);
9505 %}
9507 ins_pipe( ialu_regL_regL );
9508 %}
9510 instruct addL_Reg_RegI2L(mRegL dst, mRegL src1, mRegI src2) %{
9511 match(Set dst (AddL src1 (ConvI2L src2)));
9512 ins_cost(200);
9513 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_RegI2L\t" %}
9515 ins_encode %{
9516 Register dst_reg = as_Register($dst$$reg);
9517 Register src1_reg = as_Register($src1$$reg);
9518 Register src2_reg = as_Register($src2$$reg);
9520 __ daddu(dst_reg, src1_reg, src2_reg);
9521 %}
9523 ins_pipe( ialu_regL_regL );
9524 %}
9526 //----------Subtraction Instructions-------------------------------------------
9527 // Integer Subtraction Instructions
9528 instruct subI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9529 match(Set dst (SubI src1 src2));
9530 ins_cost(100);
9532 format %{ "sub $dst, $src1, $src2 #@subI_Reg_Reg" %}
9533 ins_encode %{
9534 Register dst = $dst$$Register;
9535 Register src1 = $src1$$Register;
9536 Register src2 = $src2$$Register;
9537 __ subu32(dst, src1, src2);
9538 %}
9539 ins_pipe( ialu_regI_regI );
9540 %}
9542 instruct subI_Reg_immI16_sub(mRegI dst, mRegI src1, immI16_sub src2) %{
9543 match(Set dst (SubI src1 src2));
9544 ins_cost(80);
9546 format %{ "sub $dst, $src1, $src2 #@subI_Reg_immI16_sub" %}
9547 ins_encode %{
9548 Register dst = $dst$$Register;
9549 Register src1 = $src1$$Register;
9550 __ addiu32(dst, src1, -1 * $src2$$constant);
9551 %}
9552 ins_pipe( ialu_regI_regI );
9553 %}
9555 instruct negI_Reg(mRegI dst, immI0 zero, mRegI src) %{
9556 match(Set dst (SubI zero src));
9557 ins_cost(80);
9559 format %{ "neg $dst, $src #@negI_Reg" %}
9560 ins_encode %{
9561 Register dst = $dst$$Register;
9562 Register src = $src$$Register;
9563 __ subu32(dst, R0, src);
9564 %}
9565 ins_pipe( ialu_regI_regI );
9566 %}
9568 instruct negL_Reg(mRegL dst, immL0 zero, mRegL src) %{
9569 match(Set dst (SubL zero src));
9570 ins_cost(80);
9572 format %{ "neg $dst, $src #@negL_Reg" %}
9573 ins_encode %{
9574 Register dst = $dst$$Register;
9575 Register src = $src$$Register;
9576 __ subu(dst, R0, src);
9577 %}
9578 ins_pipe( ialu_regI_regI );
9579 %}
9581 instruct subL_Reg_immL16_sub(mRegL dst, mRegL src1, immL16_sub src2) %{
9582 match(Set dst (SubL src1 src2));
9583 ins_cost(80);
9585 format %{ "sub $dst, $src1, $src2 #@subL_Reg_immL16_sub" %}
9586 ins_encode %{
9587 Register dst = $dst$$Register;
9588 Register src1 = $src1$$Register;
9589 __ daddiu(dst, src1, -1 * $src2$$constant);
9590 %}
9591 ins_pipe( ialu_regI_regI );
9592 %}
9594 // Subtract Long Register with Register.
9595 instruct subL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
9596 match(Set dst (SubL src1 src2));
9597 ins_cost(100);
9598 format %{ "SubL $dst, $src1, $src2 @ subL_Reg_Reg" %}
9599 ins_encode %{
9600 Register dst = as_Register($dst$$reg);
9601 Register src1 = as_Register($src1$$reg);
9602 Register src2 = as_Register($src2$$reg);
9604 __ subu(dst, src1, src2);
9605 %}
9606 ins_pipe( ialu_regL_regL );
9607 %}
9609 instruct subL_Reg_RegI2L(mRegL dst, mRegL src1, mRegI src2) %{
9610 match(Set dst (SubL src1 (ConvI2L src2)));
9611 ins_cost(100);
9612 format %{ "SubL $dst, $src1, $src2 @ subL_Reg_RegI2L" %}
9613 ins_encode %{
9614 Register dst = as_Register($dst$$reg);
9615 Register src1 = as_Register($src1$$reg);
9616 Register src2 = as_Register($src2$$reg);
9618 __ subu(dst, src1, src2);
9619 %}
9620 ins_pipe( ialu_regL_regL );
9621 %}
9623 instruct subL_RegI2L_Reg(mRegL dst, mRegI src1, mRegL src2) %{
9624 match(Set dst (SubL (ConvI2L src1) src2));
9625 ins_cost(200);
9626 format %{ "SubL $dst, $src1, $src2 @ subL_RegI2L_Reg" %}
9627 ins_encode %{
9628 Register dst = as_Register($dst$$reg);
9629 Register src1 = as_Register($src1$$reg);
9630 Register src2 = as_Register($src2$$reg);
9632 __ subu(dst, src1, src2);
9633 %}
9634 ins_pipe( ialu_regL_regL );
9635 %}
9637 instruct subL_RegI2L_RegI2L(mRegL dst, mRegI src1, mRegI src2) %{
9638 match(Set dst (SubL (ConvI2L src1) (ConvI2L src2)));
9639 ins_cost(200);
9640 format %{ "SubL $dst, $src1, $src2 @ subL_RegI2L_RegI2L" %}
9641 ins_encode %{
9642 Register dst = as_Register($dst$$reg);
9643 Register src1 = as_Register($src1$$reg);
9644 Register src2 = as_Register($src2$$reg);
9646 __ subu(dst, src1, src2);
9647 %}
9648 ins_pipe( ialu_regL_regL );
9649 %}
9651 // Integer MOD with Register
9652 instruct modI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9653 match(Set dst (ModI src1 src2));
9654 ins_cost(300);
9655 format %{ "modi $dst, $src1, $src2 @ modI_Reg_Reg" %}
9656 ins_encode %{
9657 Register dst = $dst$$Register;
9658 Register src1 = $src1$$Register;
9659 Register src2 = $src2$$Register;
9661 //if (UseLoongsonISA) {
9662 if (0) {
9663 // 2016.08.10
9664 // Experiments show that gsmod is slower that div+mfhi.
9665 // So I just disable it here.
9666 __ gsmod(dst, src1, src2);
9667 } else {
9668 __ div(src1, src2);
9669 __ mfhi(dst);
9670 }
9671 %}
9673 //ins_pipe( ialu_mod );
9674 ins_pipe( ialu_regI_regI );
9675 %}
9677 instruct modL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
9678 match(Set dst (ModL src1 src2));
9679 format %{ "modL $dst, $src1, $src2 @modL_reg_reg" %}
9681 ins_encode %{
9682 Register dst = as_Register($dst$$reg);
9683 Register op1 = as_Register($src1$$reg);
9684 Register op2 = as_Register($src2$$reg);
9686 if (UseLoongsonISA) {
9687 __ gsdmod(dst, op1, op2);
9688 } else {
9689 __ ddiv(op1, op2);
9690 __ mfhi(dst);
9691 }
9692 %}
9693 ins_pipe( pipe_slow );
9694 %}
9696 instruct mulI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9697 match(Set dst (MulI src1 src2));
9699 ins_cost(300);
9700 format %{ "mul $dst, $src1, $src2 @ mulI_Reg_Reg" %}
9701 ins_encode %{
9702 Register src1 = $src1$$Register;
9703 Register src2 = $src2$$Register;
9704 Register dst = $dst$$Register;
9706 __ mul(dst, src1, src2);
9707 %}
9708 ins_pipe( ialu_mult );
9709 %}
9711 instruct maddI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2, mRegI src3) %{
9712 match(Set dst (AddI (MulI src1 src2) src3));
9714 ins_cost(999);
9715 format %{ "madd $dst, $src1 * $src2 + $src3 #@maddI_Reg_Reg" %}
9716 ins_encode %{
9717 Register src1 = $src1$$Register;
9718 Register src2 = $src2$$Register;
9719 Register src3 = $src3$$Register;
9720 Register dst = $dst$$Register;
9722 __ mtlo(src3);
9723 __ madd(src1, src2);
9724 __ mflo(dst);
9725 %}
9726 ins_pipe( ialu_mult );
9727 %}
9729 instruct divI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9730 match(Set dst (DivI src1 src2));
9732 ins_cost(300);
9733 format %{ "div $dst, $src1, $src2 @ divI_Reg_Reg" %}
9734 ins_encode %{
9735 Register src1 = $src1$$Register;
9736 Register src2 = $src2$$Register;
9737 Register dst = $dst$$Register;
9739 /* 2012/4/21 Jin: In MIPS, div does not cause exception.
9740 We must trap an exception manually. */
9741 __ teq(R0, src2, 0x7);
9743 if (UseLoongsonISA) {
9744 __ gsdiv(dst, src1, src2);
9745 } else {
9746 __ div(src1, src2);
9748 __ nop();
9749 __ nop();
9750 __ mflo(dst);
9751 }
9752 %}
9753 ins_pipe( ialu_mod );
9754 %}
9756 instruct divF_Reg_Reg(regF dst, regF src1, regF src2) %{
9757 match(Set dst (DivF src1 src2));
9759 ins_cost(300);
9760 format %{ "divF $dst, $src1, $src2 @ divF_Reg_Reg" %}
9761 ins_encode %{
9762 FloatRegister src1 = $src1$$FloatRegister;
9763 FloatRegister src2 = $src2$$FloatRegister;
9764 FloatRegister dst = $dst$$FloatRegister;
9766 /* Here do we need to trap an exception manually ? */
9767 __ div_s(dst, src1, src2);
9768 %}
9769 ins_pipe( pipe_slow );
9770 %}
9772 instruct divD_Reg_Reg(regD dst, regD src1, regD src2) %{
9773 match(Set dst (DivD src1 src2));
9775 ins_cost(300);
9776 format %{ "divD $dst, $src1, $src2 @ divD_Reg_Reg" %}
9777 ins_encode %{
9778 FloatRegister src1 = $src1$$FloatRegister;
9779 FloatRegister src2 = $src2$$FloatRegister;
9780 FloatRegister dst = $dst$$FloatRegister;
9782 /* Here do we need to trap an exception manually ? */
9783 __ div_d(dst, src1, src2);
9784 %}
9785 ins_pipe( pipe_slow );
9786 %}
9788 instruct mulL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
9789 match(Set dst (MulL src1 src2));
9790 format %{ "mulL $dst, $src1, $src2 @mulL_reg_reg" %}
9791 ins_encode %{
9792 Register dst = as_Register($dst$$reg);
9793 Register op1 = as_Register($src1$$reg);
9794 Register op2 = as_Register($src2$$reg);
9796 if (UseLoongsonISA) {
9797 __ gsdmult(dst, op1, op2);
9798 } else {
9799 __ dmult(op1, op2);
9800 __ mflo(dst);
9801 }
9802 %}
9803 ins_pipe( pipe_slow );
9804 %}
9806 instruct mulL_reg_regI2L(mRegL dst, mRegL src1, mRegI src2) %{
9807 match(Set dst (MulL src1 (ConvI2L src2)));
9808 format %{ "mulL $dst, $src1, $src2 @mulL_reg_regI2L" %}
9809 ins_encode %{
9810 Register dst = as_Register($dst$$reg);
9811 Register op1 = as_Register($src1$$reg);
9812 Register op2 = as_Register($src2$$reg);
9814 if (UseLoongsonISA) {
9815 __ gsdmult(dst, op1, op2);
9816 } else {
9817 __ dmult(op1, op2);
9818 __ mflo(dst);
9819 }
9820 %}
9821 ins_pipe( pipe_slow );
9822 %}
9824 instruct divL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
9825 match(Set dst (DivL src1 src2));
9826 format %{ "divL $dst, $src1, $src2 @divL_reg_reg" %}
9828 ins_encode %{
9829 Register dst = as_Register($dst$$reg);
9830 Register op1 = as_Register($src1$$reg);
9831 Register op2 = as_Register($src2$$reg);
9833 if (UseLoongsonISA) {
9834 __ gsddiv(dst, op1, op2);
9835 } else {
9836 __ ddiv(op1, op2);
9837 __ mflo(dst);
9838 }
9839 %}
9840 ins_pipe( pipe_slow );
9841 %}
9843 instruct addF_reg_reg(regF dst, regF src1, regF src2) %{
9844 match(Set dst (AddF src1 src2));
9845 format %{ "AddF $dst, $src1, $src2 @addF_reg_reg" %}
9846 ins_encode %{
9847 FloatRegister src1 = as_FloatRegister($src1$$reg);
9848 FloatRegister src2 = as_FloatRegister($src2$$reg);
9849 FloatRegister dst = as_FloatRegister($dst$$reg);
9851 __ add_s(dst, src1, src2);
9852 %}
9853 ins_pipe( fpu_regF_regF );
9854 %}
9856 instruct subF_reg_reg(regF dst, regF src1, regF src2) %{
9857 match(Set dst (SubF src1 src2));
9858 format %{ "SubF $dst, $src1, $src2 @subF_reg_reg" %}
9859 ins_encode %{
9860 FloatRegister src1 = as_FloatRegister($src1$$reg);
9861 FloatRegister src2 = as_FloatRegister($src2$$reg);
9862 FloatRegister dst = as_FloatRegister($dst$$reg);
9864 __ sub_s(dst, src1, src2);
9865 %}
9866 ins_pipe( fpu_regF_regF );
9867 %}
9868 instruct addD_reg_reg(regD dst, regD src1, regD src2) %{
9869 match(Set dst (AddD src1 src2));
9870 format %{ "AddD $dst, $src1, $src2 @addD_reg_reg" %}
9871 ins_encode %{
9872 FloatRegister src1 = as_FloatRegister($src1$$reg);
9873 FloatRegister src2 = as_FloatRegister($src2$$reg);
9874 FloatRegister dst = as_FloatRegister($dst$$reg);
9876 __ add_d(dst, src1, src2);
9877 %}
9878 ins_pipe( fpu_regF_regF );
9879 %}
9881 instruct subD_reg_reg(regD dst, regD src1, regD src2) %{
9882 match(Set dst (SubD src1 src2));
9883 format %{ "SubD $dst, $src1, $src2 @subD_reg_reg" %}
9884 ins_encode %{
9885 FloatRegister src1 = as_FloatRegister($src1$$reg);
9886 FloatRegister src2 = as_FloatRegister($src2$$reg);
9887 FloatRegister dst = as_FloatRegister($dst$$reg);
9889 __ sub_d(dst, src1, src2);
9890 %}
9891 ins_pipe( fpu_regF_regF );
9892 %}
9894 instruct negF_reg(regF dst, regF src) %{
9895 match(Set dst (NegF src));
9896 format %{ "negF $dst, $src @negF_reg" %}
9897 ins_encode %{
9898 FloatRegister src = as_FloatRegister($src$$reg);
9899 FloatRegister dst = as_FloatRegister($dst$$reg);
9901 __ neg_s(dst, src);
9902 %}
9903 ins_pipe( fpu_regF_regF );
9904 %}
9906 instruct negD_reg(regD dst, regD src) %{
9907 match(Set dst (NegD src));
9908 format %{ "negD $dst, $src @negD_reg" %}
9909 ins_encode %{
9910 FloatRegister src = as_FloatRegister($src$$reg);
9911 FloatRegister dst = as_FloatRegister($dst$$reg);
9913 __ neg_d(dst, src);
9914 %}
9915 ins_pipe( fpu_regF_regF );
9916 %}
9919 instruct mulF_reg_reg(regF dst, regF src1, regF src2) %{
9920 match(Set dst (MulF src1 src2));
9921 format %{ "MULF $dst, $src1, $src2 @mulF_reg_reg" %}
9922 ins_encode %{
9923 FloatRegister src1 = $src1$$FloatRegister;
9924 FloatRegister src2 = $src2$$FloatRegister;
9925 FloatRegister dst = $dst$$FloatRegister;
9927 __ mul_s(dst, src1, src2);
9928 %}
9929 ins_pipe( fpu_regF_regF );
9930 %}
9932 instruct maddF_reg_reg(regF dst, regF src1, regF src2, regF src3) %{
9933 match(Set dst (AddF (MulF src1 src2) src3));
9934 // For compatibility reason (e.g. on the Loongson platform), disable this guy.
9935 ins_cost(44444);
9936 format %{ "maddF $dst, $src1, $src2, $src3 @maddF_reg_reg" %}
9937 ins_encode %{
9938 FloatRegister src1 = $src1$$FloatRegister;
9939 FloatRegister src2 = $src2$$FloatRegister;
9940 FloatRegister src3 = $src3$$FloatRegister;
9941 FloatRegister dst = $dst$$FloatRegister;
9943 __ madd_s(dst, src1, src2, src3);
9944 %}
9945 ins_pipe( fpu_regF_regF );
9946 %}
9948 // Mul two double precision floating piont number
9949 instruct mulD_reg_reg(regD dst, regD src1, regD src2) %{
9950 match(Set dst (MulD src1 src2));
9951 format %{ "MULD $dst, $src1, $src2 @mulD_reg_reg" %}
9952 ins_encode %{
9953 FloatRegister src1 = $src1$$FloatRegister;
9954 FloatRegister src2 = $src2$$FloatRegister;
9955 FloatRegister dst = $dst$$FloatRegister;
9957 __ mul_d(dst, src1, src2);
9958 %}
9959 ins_pipe( fpu_regF_regF );
9960 %}
9962 instruct maddD_reg_reg(regD dst, regD src1, regD src2, regD src3) %{
9963 match(Set dst (AddD (MulD src1 src2) src3));
9964 // For compatibility reason (e.g. on the Loongson platform), disable this guy.
9965 ins_cost(44444);
9966 format %{ "maddD $dst, $src1, $src2, $src3 @maddD_reg_reg" %}
9967 ins_encode %{
9968 FloatRegister src1 = $src1$$FloatRegister;
9969 FloatRegister src2 = $src2$$FloatRegister;
9970 FloatRegister src3 = $src3$$FloatRegister;
9971 FloatRegister dst = $dst$$FloatRegister;
9973 __ madd_d(dst, src1, src2, src3);
9974 %}
9975 ins_pipe( fpu_regF_regF );
9976 %}
9978 instruct absF_reg(regF dst, regF src) %{
9979 match(Set dst (AbsF src));
9980 ins_cost(100);
9981 format %{ "absF $dst, $src @absF_reg" %}
9982 ins_encode %{
9983 FloatRegister src = as_FloatRegister($src$$reg);
9984 FloatRegister dst = as_FloatRegister($dst$$reg);
9986 __ abs_s(dst, src);
9987 %}
9988 ins_pipe( fpu_regF_regF );
9989 %}
9992 // intrinsics for math_native.
9993 // AbsD SqrtD CosD SinD TanD LogD Log10D
9995 instruct absD_reg(regD dst, regD src) %{
9996 match(Set dst (AbsD src));
9997 ins_cost(100);
9998 format %{ "absD $dst, $src @absD_reg" %}
9999 ins_encode %{
10000 FloatRegister src = as_FloatRegister($src$$reg);
10001 FloatRegister dst = as_FloatRegister($dst$$reg);
10003 __ abs_d(dst, src);
10004 %}
10005 ins_pipe( fpu_regF_regF );
10006 %}
10008 instruct sqrtD_reg(regD dst, regD src) %{
10009 match(Set dst (SqrtD src));
10010 ins_cost(100);
10011 format %{ "SqrtD $dst, $src @sqrtD_reg" %}
10012 ins_encode %{
10013 FloatRegister src = as_FloatRegister($src$$reg);
10014 FloatRegister dst = as_FloatRegister($dst$$reg);
10016 __ sqrt_d(dst, src);
10017 %}
10018 ins_pipe( fpu_regF_regF );
10019 %}
10021 instruct sqrtF_reg(regF dst, regF src) %{
10022 match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
10023 ins_cost(100);
10024 format %{ "SqrtF $dst, $src @sqrtF_reg" %}
10025 ins_encode %{
10026 FloatRegister src = as_FloatRegister($src$$reg);
10027 FloatRegister dst = as_FloatRegister($dst$$reg);
10029 __ sqrt_s(dst, src);
10030 %}
10031 ins_pipe( fpu_regF_regF );
10032 %}
10033 //----------------------------------Logical Instructions----------------------
10034 //__________________________________Integer Logical Instructions-------------
10036 //And Instuctions
10037 // And Register with Immediate
10038 instruct andI_Reg_immI(mRegI dst, mRegI src1, immI src2) %{
10039 match(Set dst (AndI src1 src2));
10041 format %{ "and $dst, $src1, $src2 #@andI_Reg_immI" %}
10042 ins_encode %{
10043 Register dst = $dst$$Register;
10044 Register src = $src1$$Register;
10045 int val = $src2$$constant;
10047 __ move(AT, val);
10048 __ andr(dst, src, AT);
10049 %}
10050 ins_pipe( ialu_regI_regI );
10051 %}
10053 instruct andI_Reg_imm_0_65535(mRegI dst, mRegI src1, immI_0_65535 src2) %{
10054 match(Set dst (AndI src1 src2));
10055 ins_cost(60);
10057 format %{ "and $dst, $src1, $src2 #@andI_Reg_imm_0_65535" %}
10058 ins_encode %{
10059 Register dst = $dst$$Register;
10060 Register src = $src1$$Register;
10061 int val = $src2$$constant;
10063 __ andi(dst, src, val);
10064 %}
10065 ins_pipe( ialu_regI_regI );
10066 %}
10068 instruct andI_Reg_immI_nonneg_mask(mRegI dst, mRegI src1, immI_nonneg_mask mask) %{
10069 match(Set dst (AndI src1 mask));
10070 ins_cost(60);
10072 format %{ "and $dst, $src1, $mask #@andI_Reg_immI_nonneg_mask" %}
10073 ins_encode %{
10074 Register dst = $dst$$Register;
10075 Register src = $src1$$Register;
10076 int size = Assembler::is_int_mask($mask$$constant);
10078 __ ext(dst, src, 0, size);
10079 %}
10080 ins_pipe( ialu_regI_regI );
10081 %}
10083 instruct andL_Reg_immL_nonneg_mask(mRegL dst, mRegL src1, immL_nonneg_mask mask) %{
10084 match(Set dst (AndL src1 mask));
10085 ins_cost(60);
10087 format %{ "and $dst, $src1, $mask #@andL_Reg_immL_nonneg_mask" %}
10088 ins_encode %{
10089 Register dst = $dst$$Register;
10090 Register src = $src1$$Register;
10091 int size = Assembler::is_jlong_mask($mask$$constant);
10093 __ dext(dst, src, 0, size);
10094 %}
10095 ins_pipe( ialu_regI_regI );
10096 %}
10098 instruct xorI_Reg_imm_0_65535(mRegI dst, mRegI src1, immI_0_65535 src2) %{
10099 match(Set dst (XorI src1 src2));
10100 ins_cost(60);
10102 format %{ "xori $dst, $src1, $src2 #@xorI_Reg_imm_0_65535" %}
10103 ins_encode %{
10104 Register dst = $dst$$Register;
10105 Register src = $src1$$Register;
10106 int val = $src2$$constant;
10108 __ xori(dst, src, val);
10109 %}
10110 ins_pipe( ialu_regI_regI );
10111 %}
10113 instruct xorI_Reg_immI_M1(mRegI dst, mRegI src1, immI_M1 M1) %{
10114 match(Set dst (XorI src1 M1));
10115 predicate(UseLoongsonISA);
10116 ins_cost(60);
10118 format %{ "xor $dst, $src1, $M1 #@xorI_Reg_immI_M1" %}
10119 ins_encode %{
10120 Register dst = $dst$$Register;
10121 Register src = $src1$$Register;
10123 __ gsorn(dst, R0, src);
10124 %}
10125 ins_pipe( ialu_regI_regI );
10126 %}
10128 instruct xorL2I_Reg_immI_M1(mRegI dst, mRegL src1, immI_M1 M1) %{
10129 match(Set dst (XorI (ConvL2I src1) M1));
10130 predicate(UseLoongsonISA);
10131 ins_cost(60);
10133 format %{ "xor $dst, $src1, $M1 #@xorL2I_Reg_immI_M1" %}
10134 ins_encode %{
10135 Register dst = $dst$$Register;
10136 Register src = $src1$$Register;
10138 __ gsorn(dst, R0, src);
10139 %}
10140 ins_pipe( ialu_regI_regI );
10141 %}
10143 instruct xorL_Reg_imm_0_65535(mRegL dst, mRegL src1, immL_0_65535 src2) %{
10144 match(Set dst (XorL src1 src2));
10145 ins_cost(60);
10147 format %{ "xori $dst, $src1, $src2 #@xorL_Reg_imm_0_65535" %}
10148 ins_encode %{
10149 Register dst = $dst$$Register;
10150 Register src = $src1$$Register;
10151 int val = $src2$$constant;
10153 __ xori(dst, src, val);
10154 %}
10155 ins_pipe( ialu_regI_regI );
10156 %}
10158 /*
10159 instruct xorL_Reg_immL_M1(mRegL dst, mRegL src1, immL_M1 M1) %{
10160 match(Set dst (XorL src1 M1));
10161 predicate(UseLoongsonISA);
10162 ins_cost(60);
10164 format %{ "xor $dst, $src1, $M1 #@xorL_Reg_immL_M1" %}
10165 ins_encode %{
10166 Register dst = $dst$$Register;
10167 Register src = $src1$$Register;
10169 __ gsorn(dst, R0, src);
10170 %}
10171 ins_pipe( ialu_regI_regI );
10172 %}
10173 */
10175 instruct lbu_and_lmask(mRegI dst, memory mem, immI_255 mask) %{
10176 match(Set dst (AndI mask (LoadB mem)));
10177 ins_cost(60);
10179 format %{ "lhu $dst, $mem #@lbu_and_lmask" %}
10180 ins_encode(load_UB_enc(dst, mem));
10181 ins_pipe( ialu_loadI );
10182 %}
10184 instruct lbu_and_rmask(mRegI dst, memory mem, immI_255 mask) %{
10185 match(Set dst (AndI (LoadB mem) mask));
10186 ins_cost(60);
10188 format %{ "lhu $dst, $mem #@lbu_and_rmask" %}
10189 ins_encode(load_UB_enc(dst, mem));
10190 ins_pipe( ialu_loadI );
10191 %}
10193 instruct andI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10194 match(Set dst (AndI src1 src2));
10196 format %{ "and $dst, $src1, $src2 #@andI_Reg_Reg" %}
10197 ins_encode %{
10198 Register dst = $dst$$Register;
10199 Register src1 = $src1$$Register;
10200 Register src2 = $src2$$Register;
10201 __ andr(dst, src1, src2);
10202 %}
10203 ins_pipe( ialu_regI_regI );
10204 %}
10206 instruct andnI_Reg_nReg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10207 match(Set dst (AndI src1 (XorI src2 M1)));
10208 predicate(UseLoongsonISA);
10210 format %{ "andn $dst, $src1, $src2 #@andnI_Reg_nReg" %}
10211 ins_encode %{
10212 Register dst = $dst$$Register;
10213 Register src1 = $src1$$Register;
10214 Register src2 = $src2$$Register;
10216 __ gsandn(dst, src1, src2);
10217 %}
10218 ins_pipe( ialu_regI_regI );
10219 %}
10221 instruct ornI_Reg_nReg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10222 match(Set dst (OrI src1 (XorI src2 M1)));
10223 predicate(UseLoongsonISA);
10225 format %{ "orn $dst, $src1, $src2 #@ornI_Reg_nReg" %}
10226 ins_encode %{
10227 Register dst = $dst$$Register;
10228 Register src1 = $src1$$Register;
10229 Register src2 = $src2$$Register;
10231 __ gsorn(dst, src1, src2);
10232 %}
10233 ins_pipe( ialu_regI_regI );
10234 %}
10236 instruct andnI_nReg_Reg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10237 match(Set dst (AndI (XorI src1 M1) src2));
10238 predicate(UseLoongsonISA);
10240 format %{ "andn $dst, $src2, $src1 #@andnI_nReg_Reg" %}
10241 ins_encode %{
10242 Register dst = $dst$$Register;
10243 Register src1 = $src1$$Register;
10244 Register src2 = $src2$$Register;
10246 __ gsandn(dst, src2, src1);
10247 %}
10248 ins_pipe( ialu_regI_regI );
10249 %}
10251 instruct ornI_nReg_Reg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10252 match(Set dst (OrI (XorI src1 M1) src2));
10253 predicate(UseLoongsonISA);
10255 format %{ "orn $dst, $src2, $src1 #@ornI_nReg_Reg" %}
10256 ins_encode %{
10257 Register dst = $dst$$Register;
10258 Register src1 = $src1$$Register;
10259 Register src2 = $src2$$Register;
10261 __ gsorn(dst, src2, src1);
10262 %}
10263 ins_pipe( ialu_regI_regI );
10264 %}
10266 // And Long Register with Register
10267 instruct andL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10268 match(Set dst (AndL src1 src2));
10269 format %{ "AND $dst, $src1, $src2 @ andL_Reg_Reg\n\t" %}
10270 ins_encode %{
10271 Register dst_reg = as_Register($dst$$reg);
10272 Register src1_reg = as_Register($src1$$reg);
10273 Register src2_reg = as_Register($src2$$reg);
10275 __ andr(dst_reg, src1_reg, src2_reg);
10276 %}
10277 ins_pipe( ialu_regL_regL );
10278 %}
10280 instruct andL_Reg_Reg_convI2L(mRegL dst, mRegL src1, mRegI src2) %{
10281 match(Set dst (AndL src1 (ConvI2L src2)));
10282 format %{ "AND $dst, $src1, $src2 @ andL_Reg_Reg_convI2L\n\t" %}
10283 ins_encode %{
10284 Register dst_reg = as_Register($dst$$reg);
10285 Register src1_reg = as_Register($src1$$reg);
10286 Register src2_reg = as_Register($src2$$reg);
10288 __ andr(dst_reg, src1_reg, src2_reg);
10289 %}
10290 ins_pipe( ialu_regL_regL );
10291 %}
10293 instruct andL_Reg_imm_0_65535(mRegL dst, mRegL src1, immL_0_65535 src2) %{
10294 match(Set dst (AndL src1 src2));
10295 ins_cost(60);
10297 format %{ "and $dst, $src1, $src2 #@andL_Reg_imm_0_65535" %}
10298 ins_encode %{
10299 Register dst = $dst$$Register;
10300 Register src = $src1$$Register;
10301 long val = $src2$$constant;
10303 __ andi(dst, src, val);
10304 %}
10305 ins_pipe( ialu_regI_regI );
10306 %}
10308 instruct andL2I_Reg_imm_0_65535(mRegI dst, mRegL src1, immL_0_65535 src2) %{
10309 match(Set dst (ConvL2I (AndL src1 src2)));
10310 ins_cost(60);
10312 format %{ "and $dst, $src1, $src2 #@andL2I_Reg_imm_0_65535" %}
10313 ins_encode %{
10314 Register dst = $dst$$Register;
10315 Register src = $src1$$Register;
10316 long val = $src2$$constant;
10318 __ andi(dst, src, val);
10319 %}
10320 ins_pipe( ialu_regI_regI );
10321 %}
10323 /*
10324 instruct andnL_Reg_nReg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10325 match(Set dst (AndL src1 (XorL src2 M1)));
10326 predicate(UseLoongsonISA);
10328 format %{ "andn $dst, $src1, $src2 #@andnL_Reg_nReg" %}
10329 ins_encode %{
10330 Register dst = $dst$$Register;
10331 Register src1 = $src1$$Register;
10332 Register src2 = $src2$$Register;
10334 __ gsandn(dst, src1, src2);
10335 %}
10336 ins_pipe( ialu_regI_regI );
10337 %}
10338 */
10340 /*
10341 instruct ornL_Reg_nReg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10342 match(Set dst (OrL src1 (XorL src2 M1)));
10343 predicate(UseLoongsonISA);
10345 format %{ "orn $dst, $src1, $src2 #@ornL_Reg_nReg" %}
10346 ins_encode %{
10347 Register dst = $dst$$Register;
10348 Register src1 = $src1$$Register;
10349 Register src2 = $src2$$Register;
10351 __ gsorn(dst, src1, src2);
10352 %}
10353 ins_pipe( ialu_regI_regI );
10354 %}
10355 */
10357 /*
10358 instruct andnL_nReg_Reg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10359 match(Set dst (AndL (XorL src1 M1) src2));
10360 predicate(UseLoongsonISA);
10362 format %{ "andn $dst, $src2, $src1 #@andnL_nReg_Reg" %}
10363 ins_encode %{
10364 Register dst = $dst$$Register;
10365 Register src1 = $src1$$Register;
10366 Register src2 = $src2$$Register;
10368 __ gsandn(dst, src2, src1);
10369 %}
10370 ins_pipe( ialu_regI_regI );
10371 %}
10372 */
10374 /*
10375 instruct ornL_nReg_Reg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10376 match(Set dst (OrL (XorL src1 M1) src2));
10377 predicate(UseLoongsonISA);
10379 format %{ "orn $dst, $src2, $src1 #@ornL_nReg_Reg" %}
10380 ins_encode %{
10381 Register dst = $dst$$Register;
10382 Register src1 = $src1$$Register;
10383 Register src2 = $src2$$Register;
10385 __ gsorn(dst, src2, src1);
10386 %}
10387 ins_pipe( ialu_regI_regI );
10388 %}
10389 */
10391 instruct andL_Reg_immL_M8(mRegL dst, immL_M8 M8) %{
10392 match(Set dst (AndL dst M8));
10393 ins_cost(60);
10395 format %{ "and $dst, $dst, $M8 #@andL_Reg_immL_M8" %}
10396 ins_encode %{
10397 Register dst = $dst$$Register;
10399 __ dins(dst, R0, 0, 3);
10400 %}
10401 ins_pipe( ialu_regI_regI );
10402 %}
10404 instruct andL_Reg_immL_M5(mRegL dst, immL_M5 M5) %{
10405 match(Set dst (AndL dst M5));
10406 ins_cost(60);
10408 format %{ "and $dst, $dst, $M5 #@andL_Reg_immL_M5" %}
10409 ins_encode %{
10410 Register dst = $dst$$Register;
10412 __ dins(dst, R0, 2, 1);
10413 %}
10414 ins_pipe( ialu_regI_regI );
10415 %}
10417 instruct andL_Reg_immL_M7(mRegL dst, immL_M7 M7) %{
10418 match(Set dst (AndL dst M7));
10419 ins_cost(60);
10421 format %{ "and $dst, $dst, $M7 #@andL_Reg_immL_M7" %}
10422 ins_encode %{
10423 Register dst = $dst$$Register;
10425 __ dins(dst, R0, 1, 2);
10426 %}
10427 ins_pipe( ialu_regI_regI );
10428 %}
10430 instruct andL_Reg_immL_M4(mRegL dst, immL_M4 M4) %{
10431 match(Set dst (AndL dst M4));
10432 ins_cost(60);
10434 format %{ "and $dst, $dst, $M4 #@andL_Reg_immL_M4" %}
10435 ins_encode %{
10436 Register dst = $dst$$Register;
10438 __ dins(dst, R0, 0, 2);
10439 %}
10440 ins_pipe( ialu_regI_regI );
10441 %}
10443 instruct andL_Reg_immL_M121(mRegL dst, immL_M121 M121) %{
10444 match(Set dst (AndL dst M121));
10445 ins_cost(60);
10447 format %{ "and $dst, $dst, $M121 #@andL_Reg_immL_M121" %}
10448 ins_encode %{
10449 Register dst = $dst$$Register;
10451 __ dins(dst, R0, 3, 4);
10452 %}
10453 ins_pipe( ialu_regI_regI );
10454 %}
10456 // Or Long Register with Register
10457 instruct orL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10458 match(Set dst (OrL src1 src2));
10459 format %{ "OR $dst, $src1, $src2 @ orL_Reg_Reg\t" %}
10460 ins_encode %{
10461 Register dst_reg = $dst$$Register;
10462 Register src1_reg = $src1$$Register;
10463 Register src2_reg = $src2$$Register;
10465 __ orr(dst_reg, src1_reg, src2_reg);
10466 %}
10467 ins_pipe( ialu_regL_regL );
10468 %}
10470 instruct orL_Reg_P2XReg(mRegL dst, mRegP src1, mRegL src2) %{
10471 match(Set dst (OrL (CastP2X src1) src2));
10472 format %{ "OR $dst, $src1, $src2 @ orL_Reg_P2XReg\t" %}
10473 ins_encode %{
10474 Register dst_reg = $dst$$Register;
10475 Register src1_reg = $src1$$Register;
10476 Register src2_reg = $src2$$Register;
10478 __ orr(dst_reg, src1_reg, src2_reg);
10479 %}
10480 ins_pipe( ialu_regL_regL );
10481 %}
10483 // Xor Long Register with Register
10484 instruct xorL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10485 match(Set dst (XorL src1 src2));
10486 format %{ "XOR $dst, $src1, $src2 @ xorL_Reg_Reg\t" %}
10487 ins_encode %{
10488 Register dst_reg = as_Register($dst$$reg);
10489 Register src1_reg = as_Register($src1$$reg);
10490 Register src2_reg = as_Register($src2$$reg);
10492 __ xorr(dst_reg, src1_reg, src2_reg);
10493 %}
10494 ins_pipe( ialu_regL_regL );
10495 %}
10497 // Shift Left by 8-bit immediate
10498 instruct salI_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
10499 match(Set dst (LShiftI src shift));
10501 format %{ "SHL $dst, $src, $shift #@salI_Reg_imm" %}
10502 ins_encode %{
10503 Register src = $src$$Register;
10504 Register dst = $dst$$Register;
10505 int shamt = $shift$$constant;
10507 __ sll(dst, src, shamt);
10508 %}
10509 ins_pipe( ialu_regI_regI );
10510 %}
10512 instruct salL2I_Reg_imm(mRegI dst, mRegL src, immI8 shift) %{
10513 match(Set dst (LShiftI (ConvL2I src) shift));
10515 format %{ "SHL $dst, $src, $shift #@salL2I_Reg_imm" %}
10516 ins_encode %{
10517 Register src = $src$$Register;
10518 Register dst = $dst$$Register;
10519 int shamt = $shift$$constant;
10521 __ sll(dst, src, shamt);
10522 %}
10523 ins_pipe( ialu_regI_regI );
10524 %}
10526 instruct salI_Reg_imm_and_M65536(mRegI dst, mRegI src, immI_16 shift, immI_M65536 mask) %{
10527 match(Set dst (AndI (LShiftI src shift) mask));
10529 format %{ "SHL $dst, $src, $shift #@salI_Reg_imm_and_M65536" %}
10530 ins_encode %{
10531 Register src = $src$$Register;
10532 Register dst = $dst$$Register;
10534 __ sll(dst, src, 16);
10535 %}
10536 ins_pipe( ialu_regI_regI );
10537 %}
10539 instruct land7_2_s(mRegI dst, mRegL src, immL7 seven, immI_16 sixteen)
10540 %{
10541 match(Set dst (RShiftI (LShiftI (ConvL2I (AndL src seven)) sixteen) sixteen));
10543 format %{ "andi $dst, $src, 7\t# @land7_2_s" %}
10544 ins_encode %{
10545 Register src = $src$$Register;
10546 Register dst = $dst$$Register;
10548 __ andi(dst, src, 7);
10549 %}
10550 ins_pipe(ialu_regI_regI);
10551 %}
10553 instruct ori2s(mRegI dst, mRegI src1, immI_0_32767 src2, immI_16 sixteen)
10554 %{
10555 match(Set dst (RShiftI (LShiftI (OrI src1 src2) sixteen) sixteen));
10557 format %{ "ori $dst, $src1, $src2\t# @ori2s" %}
10558 ins_encode %{
10559 Register src = $src1$$Register;
10560 int val = $src2$$constant;
10561 Register dst = $dst$$Register;
10563 __ ori(dst, src, val);
10564 %}
10565 ins_pipe(ialu_regI_regI);
10566 %}
10568 // Logical Shift Right by 16, followed by Arithmetic Shift Left by 16.
10569 // This idiom is used by the compiler the i2s bytecode.
10570 instruct i2s(mRegI dst, mRegI src, immI_16 sixteen)
10571 %{
10572 match(Set dst (RShiftI (LShiftI src sixteen) sixteen));
10574 format %{ "i2s $dst, $src\t# @i2s" %}
10575 ins_encode %{
10576 Register src = $src$$Register;
10577 Register dst = $dst$$Register;
10579 __ seh(dst, src);
10580 %}
10581 ins_pipe(ialu_regI_regI);
10582 %}
10584 // Logical Shift Right by 24, followed by Arithmetic Shift Left by 24.
10585 // This idiom is used by the compiler for the i2b bytecode.
10586 instruct i2b(mRegI dst, mRegI src, immI_24 twentyfour)
10587 %{
10588 match(Set dst (RShiftI (LShiftI src twentyfour) twentyfour));
10590 format %{ "i2b $dst, $src\t# @i2b" %}
10591 ins_encode %{
10592 Register src = $src$$Register;
10593 Register dst = $dst$$Register;
10595 __ seb(dst, src);
10596 %}
10597 ins_pipe(ialu_regI_regI);
10598 %}
10601 instruct salI_RegL2I_imm(mRegI dst, mRegL src, immI8 shift) %{
10602 match(Set dst (LShiftI (ConvL2I src) shift));
10604 format %{ "SHL $dst, $src, $shift #@salI_RegL2I_imm" %}
10605 ins_encode %{
10606 Register src = $src$$Register;
10607 Register dst = $dst$$Register;
10608 int shamt = $shift$$constant;
10610 __ sll(dst, src, shamt);
10611 %}
10612 ins_pipe( ialu_regI_regI );
10613 %}
10615 // Shift Left by 8-bit immediate
10616 instruct salI_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
10617 match(Set dst (LShiftI src shift));
10619 format %{ "SHL $dst, $src, $shift #@salI_Reg_Reg" %}
10620 ins_encode %{
10621 Register src = $src$$Register;
10622 Register dst = $dst$$Register;
10623 Register shamt = $shift$$Register;
10624 __ sllv(dst, src, shamt);
10625 %}
10626 ins_pipe( ialu_regI_regI );
10627 %}
10630 // Shift Left Long
10631 instruct salL_Reg_imm(mRegL dst, mRegL src, immI8 shift) %{
10632 //predicate(UseNewLongLShift);
10633 match(Set dst (LShiftL src shift));
10634 ins_cost(100);
10635 format %{ "salL $dst, $src, $shift @ salL_Reg_imm" %}
10636 ins_encode %{
10637 Register src_reg = as_Register($src$$reg);
10638 Register dst_reg = as_Register($dst$$reg);
10639 int shamt = $shift$$constant;
10641 if (__ is_simm(shamt, 5))
10642 __ dsll(dst_reg, src_reg, shamt);
10643 else
10644 {
10645 int sa = Assembler::low(shamt, 6);
10646 if (sa < 32) {
10647 __ dsll(dst_reg, src_reg, sa);
10648 } else {
10649 __ dsll32(dst_reg, src_reg, sa - 32);
10650 }
10651 }
10652 %}
10653 ins_pipe( ialu_regL_regL );
10654 %}
10656 instruct salL_RegI2L_imm(mRegL dst, mRegI src, immI8 shift) %{
10657 //predicate(UseNewLongLShift);
10658 match(Set dst (LShiftL (ConvI2L src) shift));
10659 ins_cost(100);
10660 format %{ "salL $dst, $src, $shift @ salL_RegI2L_imm" %}
10661 ins_encode %{
10662 Register src_reg = as_Register($src$$reg);
10663 Register dst_reg = as_Register($dst$$reg);
10664 int shamt = $shift$$constant;
10666 if (__ is_simm(shamt, 5))
10667 __ dsll(dst_reg, src_reg, shamt);
10668 else
10669 {
10670 int sa = Assembler::low(shamt, 6);
10671 if (sa < 32) {
10672 __ dsll(dst_reg, src_reg, sa);
10673 } else {
10674 __ dsll32(dst_reg, src_reg, sa - 32);
10675 }
10676 }
10677 %}
10678 ins_pipe( ialu_regL_regL );
10679 %}
10681 // Shift Left Long
10682 instruct salL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
10683 //predicate(UseNewLongLShift);
10684 match(Set dst (LShiftL src shift));
10685 ins_cost(100);
10686 format %{ "salL $dst, $src, $shift @ salL_Reg_Reg" %}
10687 ins_encode %{
10688 Register src_reg = as_Register($src$$reg);
10689 Register dst_reg = as_Register($dst$$reg);
10691 __ dsllv(dst_reg, src_reg, $shift$$Register);
10692 %}
10693 ins_pipe( ialu_regL_regL );
10694 %}
10696 instruct salL_convI2L_Reg_imm(mRegL dst, mRegI src, immI8 shift) %{
10697 match(Set dst (LShiftL (ConvI2L src) shift));
10698 ins_cost(100);
10699 format %{ "salL $dst, $src, $shift @ salL_convI2L_Reg_imm" %}
10700 ins_encode %{
10701 Register src_reg = as_Register($src$$reg);
10702 Register dst_reg = as_Register($dst$$reg);
10703 int shamt = $shift$$constant;
10705 if (__ is_simm(shamt, 5)) {
10706 __ dsll(dst_reg, src_reg, shamt);
10707 } else {
10708 int sa = Assembler::low(shamt, 6);
10709 if (sa < 32) {
10710 __ dsll(dst_reg, src_reg, sa);
10711 } else {
10712 __ dsll32(dst_reg, src_reg, sa - 32);
10713 }
10714 }
10715 %}
10716 ins_pipe( ialu_regL_regL );
10717 %}
10719 // Shift Right Long
10720 instruct sarL_Reg_imm(mRegL dst, mRegL src, immI8 shift) %{
10721 match(Set dst (RShiftL src shift));
10722 ins_cost(100);
10723 format %{ "sarL $dst, $src, $shift @ sarL_Reg_imm" %}
10724 ins_encode %{
10725 Register src_reg = as_Register($src$$reg);
10726 Register dst_reg = as_Register($dst$$reg);
10727 int shamt = ($shift$$constant & 0x3f);
10728 if (__ is_simm(shamt, 5))
10729 __ dsra(dst_reg, src_reg, shamt);
10730 else {
10731 int sa = Assembler::low(shamt, 6);
10732 if (sa < 32) {
10733 __ dsra(dst_reg, src_reg, sa);
10734 } else {
10735 __ dsra32(dst_reg, src_reg, sa - 32);
10736 }
10737 }
10738 %}
10739 ins_pipe( ialu_regL_regL );
10740 %}
10742 instruct sarL2I_Reg_immI_32_63(mRegI dst, mRegL src, immI_32_63 shift) %{
10743 match(Set dst (ConvL2I (RShiftL src shift)));
10744 ins_cost(100);
10745 format %{ "sarL $dst, $src, $shift @ sarL2I_Reg_immI_32_63" %}
10746 ins_encode %{
10747 Register src_reg = as_Register($src$$reg);
10748 Register dst_reg = as_Register($dst$$reg);
10749 int shamt = $shift$$constant;
10751 __ dsra32(dst_reg, src_reg, shamt - 32);
10752 %}
10753 ins_pipe( ialu_regL_regL );
10754 %}
10756 // Shift Right Long arithmetically
10757 instruct sarL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
10758 //predicate(UseNewLongLShift);
10759 match(Set dst (RShiftL src shift));
10760 ins_cost(100);
10761 format %{ "sarL $dst, $src, $shift @ sarL_Reg_Reg" %}
10762 ins_encode %{
10763 Register src_reg = as_Register($src$$reg);
10764 Register dst_reg = as_Register($dst$$reg);
10766 __ dsrav(dst_reg, src_reg, $shift$$Register);
10767 %}
10768 ins_pipe( ialu_regL_regL );
10769 %}
10771 // Shift Right Long logically
10772 instruct slrL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
10773 match(Set dst (URShiftL src shift));
10774 ins_cost(100);
10775 format %{ "slrL $dst, $src, $shift @ slrL_Reg_Reg" %}
10776 ins_encode %{
10777 Register src_reg = as_Register($src$$reg);
10778 Register dst_reg = as_Register($dst$$reg);
10780 __ dsrlv(dst_reg, src_reg, $shift$$Register);
10781 %}
10782 ins_pipe( ialu_regL_regL );
10783 %}
10785 instruct slrL_Reg_immI_0_31(mRegL dst, mRegL src, immI_0_31 shift) %{
10786 match(Set dst (URShiftL src shift));
10787 ins_cost(80);
10788 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_0_31" %}
10789 ins_encode %{
10790 Register src_reg = as_Register($src$$reg);
10791 Register dst_reg = as_Register($dst$$reg);
10792 int shamt = $shift$$constant;
10794 __ dsrl(dst_reg, src_reg, shamt);
10795 %}
10796 ins_pipe( ialu_regL_regL );
10797 %}
10799 instruct slrL_Reg_immI_0_31_and_max_int(mRegI dst, mRegL src, immI_0_31 shift, immI_MaxI max_int) %{
10800 match(Set dst (AndI (ConvL2I (URShiftL src shift)) max_int));
10801 ins_cost(80);
10802 format %{ "dext $dst, $src, $shift, 31 @ slrL_Reg_immI_0_31_and_max_int" %}
10803 ins_encode %{
10804 Register src_reg = as_Register($src$$reg);
10805 Register dst_reg = as_Register($dst$$reg);
10806 int shamt = $shift$$constant;
10808 __ dext(dst_reg, src_reg, shamt, 31);
10809 %}
10810 ins_pipe( ialu_regL_regL );
10811 %}
10813 instruct slrL_P2XReg_immI_0_31(mRegL dst, mRegP src, immI_0_31 shift) %{
10814 match(Set dst (URShiftL (CastP2X src) shift));
10815 ins_cost(80);
10816 format %{ "slrL $dst, $src, $shift @ slrL_P2XReg_immI_0_31" %}
10817 ins_encode %{
10818 Register src_reg = as_Register($src$$reg);
10819 Register dst_reg = as_Register($dst$$reg);
10820 int shamt = $shift$$constant;
10822 __ dsrl(dst_reg, src_reg, shamt);
10823 %}
10824 ins_pipe( ialu_regL_regL );
10825 %}
10827 instruct slrL_Reg_immI_32_63(mRegL dst, mRegL src, immI_32_63 shift) %{
10828 match(Set dst (URShiftL src shift));
10829 ins_cost(80);
10830 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_32_63" %}
10831 ins_encode %{
10832 Register src_reg = as_Register($src$$reg);
10833 Register dst_reg = as_Register($dst$$reg);
10834 int shamt = $shift$$constant;
10836 __ dsrl32(dst_reg, src_reg, shamt - 32);
10837 %}
10838 ins_pipe( ialu_regL_regL );
10839 %}
10841 instruct slrL_Reg_immI_convL2I(mRegI dst, mRegL src, immI_32_63 shift) %{
10842 match(Set dst (ConvL2I (URShiftL src shift)));
10843 predicate(n->in(1)->in(2)->get_int() > 32);
10844 ins_cost(80);
10845 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_convL2I" %}
10846 ins_encode %{
10847 Register src_reg = as_Register($src$$reg);
10848 Register dst_reg = as_Register($dst$$reg);
10849 int shamt = $shift$$constant;
10851 __ dsrl32(dst_reg, src_reg, shamt - 32);
10852 %}
10853 ins_pipe( ialu_regL_regL );
10854 %}
10856 instruct slrL_P2XReg_immI_32_63(mRegL dst, mRegP src, immI_32_63 shift) %{
10857 match(Set dst (URShiftL (CastP2X src) shift));
10858 ins_cost(80);
10859 format %{ "slrL $dst, $src, $shift @ slrL_P2XReg_immI_32_63" %}
10860 ins_encode %{
10861 Register src_reg = as_Register($src$$reg);
10862 Register dst_reg = as_Register($dst$$reg);
10863 int shamt = $shift$$constant;
10865 __ dsrl32(dst_reg, src_reg, shamt - 32);
10866 %}
10867 ins_pipe( ialu_regL_regL );
10868 %}
10870 // Xor Instructions
10871 // Xor Register with Register
10872 instruct xorI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10873 match(Set dst (XorI src1 src2));
10875 format %{ "XOR $dst, $src1, $src2 #@xorI_Reg_Reg" %}
10877 ins_encode %{
10878 Register dst = $dst$$Register;
10879 Register src1 = $src1$$Register;
10880 Register src2 = $src2$$Register;
10881 __ xorr(dst, src1, src2);
10882 __ sll(dst, dst, 0); /* long -> int */
10883 %}
10885 ins_pipe( ialu_regI_regI );
10886 %}
10888 // Or Instructions
10889 // Or Register with Register
10890 instruct orI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10891 match(Set dst (OrI src1 src2));
10893 format %{ "OR $dst, $src1, $src2 #@orI_Reg_Reg" %}
10894 ins_encode %{
10895 Register dst = $dst$$Register;
10896 Register src1 = $src1$$Register;
10897 Register src2 = $src2$$Register;
10898 __ orr(dst, src1, src2);
10899 %}
10901 ins_pipe( ialu_regI_regI );
10902 %}
10904 instruct rotI_shr_logical_Reg(mRegI dst, mRegI src, immI_0_31 rshift, immI_0_31 lshift, immI_1 one) %{
10905 match(Set dst (OrI (URShiftI src rshift) (LShiftI (AndI src one) lshift)));
10906 predicate(32 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int())));
10908 format %{ "rotr $dst, $src, 1 ...\n\t"
10909 "srl $dst, $dst, ($rshift-1) @ rotI_shr_logical_Reg" %}
10910 ins_encode %{
10911 Register dst = $dst$$Register;
10912 Register src = $src$$Register;
10913 int rshift = $rshift$$constant;
10915 __ rotr(dst, src, 1);
10916 if (rshift - 1) {
10917 __ srl(dst, dst, rshift - 1);
10918 }
10919 %}
10921 ins_pipe( ialu_regI_regI );
10922 %}
10924 instruct orI_Reg_castP2X(mRegL dst, mRegL src1, mRegP src2) %{
10925 match(Set dst (OrI src1 (CastP2X src2)));
10927 format %{ "OR $dst, $src1, $src2 #@orI_Reg_castP2X" %}
10928 ins_encode %{
10929 Register dst = $dst$$Register;
10930 Register src1 = $src1$$Register;
10931 Register src2 = $src2$$Register;
10932 __ orr(dst, src1, src2);
10933 %}
10935 ins_pipe( ialu_regI_regI );
10936 %}
10938 // Logical Shift Right by 8-bit immediate
10939 instruct shr_logical_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
10940 match(Set dst (URShiftI src shift));
10941 // effect(KILL cr);
10943 format %{ "SRL $dst, $src, $shift #@shr_logical_Reg_imm" %}
10944 ins_encode %{
10945 Register src = $src$$Register;
10946 Register dst = $dst$$Register;
10947 int shift = $shift$$constant;
10949 __ srl(dst, src, shift);
10950 %}
10951 ins_pipe( ialu_regI_regI );
10952 %}
10954 instruct shr_logical_Reg_imm_nonneg_mask(mRegI dst, mRegI src, immI_0_31 shift, immI_nonneg_mask mask) %{
10955 match(Set dst (AndI (URShiftI src shift) mask));
10957 format %{ "ext $dst, $src, $shift, one-bits($mask) #@shr_logical_Reg_imm_nonneg_mask" %}
10958 ins_encode %{
10959 Register src = $src$$Register;
10960 Register dst = $dst$$Register;
10961 int pos = $shift$$constant;
10962 int size = Assembler::is_int_mask($mask$$constant);
10964 __ ext(dst, src, pos, size);
10965 %}
10966 ins_pipe( ialu_regI_regI );
10967 %}
10969 instruct rolI_Reg_immI_0_31(mRegI dst, immI_0_31 lshift, immI_0_31 rshift)
10970 %{
10971 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
10972 match(Set dst (OrI (LShiftI dst lshift) (URShiftI dst rshift)));
10974 ins_cost(100);
10975 format %{ "rotr $dst, $dst, $rshift #@rolI_Reg_immI_0_31" %}
10976 ins_encode %{
10977 Register dst = $dst$$Register;
10978 int sa = $rshift$$constant;
10980 __ rotr(dst, dst, sa);
10981 %}
10982 ins_pipe( ialu_regI_regI );
10983 %}
10985 instruct rolL_Reg_immI_0_31(mRegL dst, immI_32_63 lshift, immI_0_31 rshift)
10986 %{
10987 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
10988 match(Set dst (OrL (LShiftL dst lshift) (URShiftL dst rshift)));
10990 ins_cost(100);
10991 format %{ "rotr $dst, $dst, $rshift #@rolL_Reg_immI_0_31" %}
10992 ins_encode %{
10993 Register dst = $dst$$Register;
10994 int sa = $rshift$$constant;
10996 __ drotr(dst, dst, sa);
10997 %}
10998 ins_pipe( ialu_regI_regI );
10999 %}
11001 instruct rolL_Reg_immI_32_63(mRegL dst, immI_0_31 lshift, immI_32_63 rshift)
11002 %{
11003 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11004 match(Set dst (OrL (LShiftL dst lshift) (URShiftL dst rshift)));
11006 ins_cost(100);
11007 format %{ "rotr $dst, $dst, $rshift #@rolL_Reg_immI_32_63" %}
11008 ins_encode %{
11009 Register dst = $dst$$Register;
11010 int sa = $rshift$$constant;
11012 __ drotr32(dst, dst, sa - 32);
11013 %}
11014 ins_pipe( ialu_regI_regI );
11015 %}
11017 instruct rorI_Reg_immI_0_31(mRegI dst, immI_0_31 rshift, immI_0_31 lshift)
11018 %{
11019 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
11020 match(Set dst (OrI (URShiftI dst rshift) (LShiftI dst lshift)));
11022 ins_cost(100);
11023 format %{ "rotr $dst, $dst, $rshift #@rorI_Reg_immI_0_31" %}
11024 ins_encode %{
11025 Register dst = $dst$$Register;
11026 int sa = $rshift$$constant;
11028 __ rotr(dst, dst, sa);
11029 %}
11030 ins_pipe( ialu_regI_regI );
11031 %}
11033 instruct rorL_Reg_immI_0_31(mRegL dst, immI_0_31 rshift, immI_32_63 lshift)
11034 %{
11035 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11036 match(Set dst (OrL (URShiftL dst rshift) (LShiftL dst lshift)));
11038 ins_cost(100);
11039 format %{ "rotr $dst, $dst, $rshift #@rorL_Reg_immI_0_31" %}
11040 ins_encode %{
11041 Register dst = $dst$$Register;
11042 int sa = $rshift$$constant;
11044 __ drotr(dst, dst, sa);
11045 %}
11046 ins_pipe( ialu_regI_regI );
11047 %}
11049 instruct rorL_Reg_immI_32_63(mRegL dst, immI_32_63 rshift, immI_0_31 lshift)
11050 %{
11051 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11052 match(Set dst (OrL (URShiftL dst rshift) (LShiftL dst lshift)));
11054 ins_cost(100);
11055 format %{ "rotr $dst, $dst, $rshift #@rorL_Reg_immI_32_63" %}
11056 ins_encode %{
11057 Register dst = $dst$$Register;
11058 int sa = $rshift$$constant;
11060 __ drotr32(dst, dst, sa - 32);
11061 %}
11062 ins_pipe( ialu_regI_regI );
11063 %}
11065 // Logical Shift Right
11066 instruct shr_logical_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
11067 match(Set dst (URShiftI src shift));
11069 format %{ "SRL $dst, $src, $shift #@shr_logical_Reg_Reg" %}
11070 ins_encode %{
11071 Register src = $src$$Register;
11072 Register dst = $dst$$Register;
11073 Register shift = $shift$$Register;
11074 __ srlv(dst, src, shift);
11075 %}
11076 ins_pipe( ialu_regI_regI );
11077 %}
11080 instruct shr_arith_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
11081 match(Set dst (RShiftI src shift));
11082 // effect(KILL cr);
11084 format %{ "SRA $dst, $src, $shift #@shr_arith_Reg_imm" %}
11085 ins_encode %{
11086 Register src = $src$$Register;
11087 Register dst = $dst$$Register;
11088 int shift = $shift$$constant;
11089 __ sra(dst, src, shift);
11090 %}
11091 ins_pipe( ialu_regI_regI );
11092 %}
11094 instruct shr_arith_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
11095 match(Set dst (RShiftI src shift));
11096 // effect(KILL cr);
11098 format %{ "SRA $dst, $src, $shift #@shr_arith_Reg_Reg" %}
11099 ins_encode %{
11100 Register src = $src$$Register;
11101 Register dst = $dst$$Register;
11102 Register shift = $shift$$Register;
11103 __ srav(dst, src, shift);
11104 %}
11105 ins_pipe( ialu_regI_regI );
11106 %}
11108 //----------Convert Int to Boolean---------------------------------------------
11110 instruct convI2B(mRegI dst, mRegI src) %{
11111 match(Set dst (Conv2B src));
11113 ins_cost(100);
11114 format %{ "convI2B $dst, $src @ convI2B" %}
11115 ins_encode %{
11116 Register dst = as_Register($dst$$reg);
11117 Register src = as_Register($src$$reg);
11119 if (dst != src) {
11120 __ daddiu(dst, R0, 1);
11121 __ movz(dst, R0, src);
11122 } else {
11123 __ move(AT, src);
11124 __ daddiu(dst, R0, 1);
11125 __ movz(dst, R0, AT);
11126 }
11127 %}
11129 ins_pipe( ialu_regL_regL );
11130 %}
11132 instruct convI2L_reg( mRegL dst, mRegI src) %{
11133 match(Set dst (ConvI2L src));
11135 ins_cost(100);
11136 format %{ "SLL $dst, $src @ convI2L_reg\t" %}
11137 ins_encode %{
11138 Register dst = as_Register($dst$$reg);
11139 Register src = as_Register($src$$reg);
11141 if(dst != src) __ sll(dst, src, 0);
11142 %}
11143 ins_pipe( ialu_regL_regL );
11144 %}
11147 instruct convL2I_reg( mRegI dst, mRegL src ) %{
11148 match(Set dst (ConvL2I src));
11150 format %{ "MOV $dst, $src @ convL2I_reg" %}
11151 ins_encode %{
11152 Register dst = as_Register($dst$$reg);
11153 Register src = as_Register($src$$reg);
11155 __ sll(dst, src, 0);
11156 %}
11158 ins_pipe( ialu_regI_regI );
11159 %}
11161 instruct convL2I2L_reg( mRegL dst, mRegL src ) %{
11162 match(Set dst (ConvI2L (ConvL2I src)));
11164 format %{ "sll $dst, $src, 0 @ convL2I2L_reg" %}
11165 ins_encode %{
11166 Register dst = as_Register($dst$$reg);
11167 Register src = as_Register($src$$reg);
11169 __ sll(dst, src, 0);
11170 %}
11172 ins_pipe( ialu_regI_regI );
11173 %}
11175 instruct convL2D_reg( regD dst, mRegL src ) %{
11176 match(Set dst (ConvL2D src));
11177 format %{ "convL2D $dst, $src @ convL2D_reg" %}
11178 ins_encode %{
11179 Register src = as_Register($src$$reg);
11180 FloatRegister dst = as_FloatRegister($dst$$reg);
11182 __ dmtc1(src, dst);
11183 __ cvt_d_l(dst, dst);
11184 %}
11186 ins_pipe( pipe_slow );
11187 %}
11189 instruct convD2L_reg_fast( mRegL dst, regD src ) %{
11190 match(Set dst (ConvD2L src));
11191 ins_cost(150);
11192 format %{ "convD2L $dst, $src @ convD2L_reg_fast" %}
11193 ins_encode %{
11194 Register dst = as_Register($dst$$reg);
11195 FloatRegister src = as_FloatRegister($src$$reg);
11197 Label Done;
11199 __ trunc_l_d(F30, src);
11200 // max_long: 0x7fffffffffffffff
11201 // __ set64(AT, 0x7fffffffffffffff);
11202 __ daddiu(AT, R0, -1);
11203 __ dsrl(AT, AT, 1);
11204 __ dmfc1(dst, F30);
11206 __ bne(dst, AT, Done);
11207 __ delayed()->mtc1(R0, F30);
11209 __ cvt_d_w(F30, F30);
11210 __ c_ult_d(src, F30);
11211 __ bc1f(Done);
11212 __ delayed()->daddiu(T9, R0, -1);
11214 __ c_un_d(src, src); //NaN?
11215 __ subu(dst, T9, AT);
11216 __ movt(dst, R0);
11218 __ bind(Done);
11219 %}
11221 ins_pipe( pipe_slow );
11222 %}
11224 instruct convD2L_reg_slow( mRegL dst, regD src ) %{
11225 match(Set dst (ConvD2L src));
11226 ins_cost(250);
11227 format %{ "convD2L $dst, $src @ convD2L_reg_slow" %}
11228 ins_encode %{
11229 Register dst = as_Register($dst$$reg);
11230 FloatRegister src = as_FloatRegister($src$$reg);
11232 Label L;
11234 __ c_un_d(src, src); //NaN?
11235 __ bc1t(L);
11236 __ delayed();
11237 __ move(dst, R0);
11239 __ trunc_l_d(F30, src);
11240 __ cfc1(AT, 31);
11241 __ li(T9, 0x10000);
11242 __ andr(AT, AT, T9);
11243 __ beq(AT, R0, L);
11244 __ delayed()->dmfc1(dst, F30);
11246 __ mov_d(F12, src);
11247 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1);
11248 __ move(dst, V0);
11249 __ bind(L);
11250 %}
11252 ins_pipe( pipe_slow );
11253 %}
11255 instruct convF2I_reg_fast( mRegI dst, regF src ) %{
11256 match(Set dst (ConvF2I src));
11257 ins_cost(150);
11258 format %{ "convf2i $dst, $src @ convF2I_reg_fast" %}
11259 ins_encode %{
11260 Register dreg = $dst$$Register;
11261 FloatRegister fval = $src$$FloatRegister;
11263 __ trunc_w_s(F30, fval);
11264 __ mfc1(dreg, F30);
11265 __ c_un_s(fval, fval); //NaN?
11266 __ movt(dreg, R0);
11267 %}
11269 ins_pipe( pipe_slow );
11270 %}
11272 instruct convF2I_reg_slow( mRegI dst, regF src ) %{
11273 match(Set dst (ConvF2I src));
11274 ins_cost(250);
11275 format %{ "convf2i $dst, $src @ convF2I_reg_slow" %}
11276 ins_encode %{
11277 Register dreg = $dst$$Register;
11278 FloatRegister fval = $src$$FloatRegister;
11279 Label L;
11281 __ c_un_s(fval, fval); //NaN?
11282 __ bc1t(L);
11283 __ delayed();
11284 __ move(dreg, R0);
11286 __ trunc_w_s(F30, fval);
11288 /* Call SharedRuntime:f2i() to do valid convention */
11289 __ cfc1(AT, 31);
11290 __ li(T9, 0x10000);
11291 __ andr(AT, AT, T9);
11292 __ beq(AT, R0, L);
11293 __ delayed()->mfc1(dreg, F30);
11295 __ mov_s(F12, fval);
11297 /* 2014/01/08 Fu : This bug was found when running ezDS's control-panel.
11298 * J 982 C2 javax.swing.text.BoxView.layoutMajorAxis(II[I[I)V (283 bytes) @ 0x000000555c46aa74
11299 *
11300 * An interger array index has been assigned to V0, and then changed from 1 to Integer.MAX_VALUE.
11301 * V0 is corrupted during call_VM_leaf(), and should be preserved.
11302 */
11303 if(dreg != V0) {
11304 __ push(V0);
11305 }
11306 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
11307 if(dreg != V0) {
11308 __ move(dreg, V0);
11309 __ pop(V0);
11310 }
11311 __ bind(L);
11312 %}
11314 ins_pipe( pipe_slow );
11315 %}
11317 instruct convF2L_reg_fast( mRegL dst, regF src ) %{
11318 match(Set dst (ConvF2L src));
11319 ins_cost(150);
11320 format %{ "convf2l $dst, $src @ convF2L_reg_fast" %}
11321 ins_encode %{
11322 Register dreg = $dst$$Register;
11323 FloatRegister fval = $src$$FloatRegister;
11325 __ trunc_l_s(F30, fval);
11326 __ dmfc1(dreg, F30);
11327 __ c_un_s(fval, fval); //NaN?
11328 __ movt(dreg, R0);
11329 %}
11331 ins_pipe( pipe_slow );
11332 %}
11334 instruct convF2L_reg_slow( mRegL dst, regF src ) %{
11335 match(Set dst (ConvF2L src));
11336 ins_cost(250);
11337 format %{ "convf2l $dst, $src @ convF2L_reg_slow" %}
11338 ins_encode %{
11339 Register dst = as_Register($dst$$reg);
11340 FloatRegister fval = $src$$FloatRegister;
11341 Label L;
11343 __ c_un_s(fval, fval); //NaN?
11344 __ bc1t(L);
11345 __ delayed();
11346 __ move(dst, R0);
11348 __ trunc_l_s(F30, fval);
11349 __ cfc1(AT, 31);
11350 __ li(T9, 0x10000);
11351 __ andr(AT, AT, T9);
11352 __ beq(AT, R0, L);
11353 __ delayed()->dmfc1(dst, F30);
11355 __ mov_s(F12, fval);
11356 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
11357 __ move(dst, V0);
11358 __ bind(L);
11359 %}
11361 ins_pipe( pipe_slow );
11362 %}
11364 instruct convL2F_reg( regF dst, mRegL src ) %{
11365 match(Set dst (ConvL2F src));
11366 format %{ "convl2f $dst, $src @ convL2F_reg" %}
11367 ins_encode %{
11368 FloatRegister dst = $dst$$FloatRegister;
11369 Register src = as_Register($src$$reg);
11370 Label L;
11372 __ dmtc1(src, dst);
11373 __ cvt_s_l(dst, dst);
11374 %}
11376 ins_pipe( pipe_slow );
11377 %}
11379 instruct convI2F_reg( regF dst, mRegI src ) %{
11380 match(Set dst (ConvI2F src));
11381 format %{ "convi2f $dst, $src @ convI2F_reg" %}
11382 ins_encode %{
11383 Register src = $src$$Register;
11384 FloatRegister dst = $dst$$FloatRegister;
11386 __ mtc1(src, dst);
11387 __ cvt_s_w(dst, dst);
11388 %}
11390 ins_pipe( fpu_regF_regF );
11391 %}
11393 instruct cmpLTMask_immI0( mRegI dst, mRegI p, immI0 zero ) %{
11394 match(Set dst (CmpLTMask p zero));
11395 ins_cost(100);
11397 format %{ "sra $dst, $p, 31 @ cmpLTMask_immI0" %}
11398 ins_encode %{
11399 Register src = $p$$Register;
11400 Register dst = $dst$$Register;
11402 __ sra(dst, src, 31);
11403 %}
11404 ins_pipe( pipe_slow );
11405 %}
11408 instruct cmpLTMask( mRegI dst, mRegI p, mRegI q ) %{
11409 match(Set dst (CmpLTMask p q));
11410 ins_cost(400);
11412 format %{ "cmpLTMask $dst, $p, $q @ cmpLTMask" %}
11413 ins_encode %{
11414 Register p = $p$$Register;
11415 Register q = $q$$Register;
11416 Register dst = $dst$$Register;
11418 __ slt(dst, p, q);
11419 __ subu(dst, R0, dst);
11420 %}
11421 ins_pipe( pipe_slow );
11422 %}
11424 instruct convP2B(mRegI dst, mRegP src) %{
11425 match(Set dst (Conv2B src));
11427 ins_cost(100);
11428 format %{ "convP2B $dst, $src @ convP2B" %}
11429 ins_encode %{
11430 Register dst = as_Register($dst$$reg);
11431 Register src = as_Register($src$$reg);
11433 if (dst != src) {
11434 __ daddiu(dst, R0, 1);
11435 __ movz(dst, R0, src);
11436 } else {
11437 __ move(AT, src);
11438 __ daddiu(dst, R0, 1);
11439 __ movz(dst, R0, AT);
11440 }
11441 %}
11443 ins_pipe( ialu_regL_regL );
11444 %}
11447 instruct convI2D_reg_reg(regD dst, mRegI src) %{
11448 match(Set dst (ConvI2D src));
11449 format %{ "conI2D $dst, $src @convI2D_reg" %}
11450 ins_encode %{
11451 Register src = $src$$Register;
11452 FloatRegister dst = $dst$$FloatRegister;
11453 __ mtc1(src, dst);
11454 __ cvt_d_w(dst, dst);
11455 %}
11456 ins_pipe( fpu_regF_regF );
11457 %}
11459 instruct convF2D_reg_reg(regD dst, regF src) %{
11460 match(Set dst (ConvF2D src));
11461 format %{ "convF2D $dst, $src\t# @convF2D_reg_reg" %}
11462 ins_encode %{
11463 FloatRegister dst = $dst$$FloatRegister;
11464 FloatRegister src = $src$$FloatRegister;
11466 __ cvt_d_s(dst, src);
11467 %}
11468 ins_pipe( fpu_regF_regF );
11469 %}
11471 instruct convD2F_reg_reg(regF dst, regD src) %{
11472 match(Set dst (ConvD2F src));
11473 format %{ "convD2F $dst, $src\t# @convD2F_reg_reg" %}
11474 ins_encode %{
11475 FloatRegister dst = $dst$$FloatRegister;
11476 FloatRegister src = $src$$FloatRegister;
11478 __ cvt_s_d(dst, src);
11479 %}
11480 ins_pipe( fpu_regF_regF );
11481 %}
11483 // Convert a double to an int. If the double is a NAN, stuff a zero in instead.
11484 instruct convD2I_reg_reg_fast( mRegI dst, regD src ) %{
11485 match(Set dst (ConvD2I src));
11487 ins_cost(150);
11488 format %{ "convD2I $dst, $src\t# @ convD2I_reg_reg_fast" %}
11490 ins_encode %{
11491 FloatRegister src = $src$$FloatRegister;
11492 Register dst = $dst$$Register;
11494 Label Done;
11496 __ trunc_w_d(F30, src);
11497 // max_int: 2147483647
11498 __ move(AT, 0x7fffffff);
11499 __ mfc1(dst, F30);
11501 __ bne(dst, AT, Done);
11502 __ delayed()->mtc1(R0, F30);
11504 __ cvt_d_w(F30, F30);
11505 __ c_ult_d(src, F30);
11506 __ bc1f(Done);
11507 __ delayed()->addiu(T9, R0, -1);
11509 __ c_un_d(src, src); //NaN?
11510 __ subu32(dst, T9, AT);
11511 __ movt(dst, R0);
11513 __ bind(Done);
11514 %}
11515 ins_pipe( pipe_slow );
11516 %}
11518 instruct convD2I_reg_reg_slow( mRegI dst, regD src ) %{
11519 match(Set dst (ConvD2I src));
11521 ins_cost(250);
11522 format %{ "convD2I $dst, $src\t# @ convD2I_reg_reg_slow" %}
11524 ins_encode %{
11525 FloatRegister src = $src$$FloatRegister;
11526 Register dst = $dst$$Register;
11527 Label L;
11529 __ trunc_w_d(F30, src);
11530 __ cfc1(AT, 31);
11531 __ li(T9, 0x10000);
11532 __ andr(AT, AT, T9);
11533 __ beq(AT, R0, L);
11534 __ delayed()->mfc1(dst, F30);
11536 __ mov_d(F12, src);
11537 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 1);
11538 __ move(dst, V0);
11539 __ bind(L);
11541 %}
11542 ins_pipe( pipe_slow );
11543 %}
11545 // Convert oop pointer into compressed form
11546 instruct encodeHeapOop(mRegN dst, mRegP src) %{
11547 predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
11548 match(Set dst (EncodeP src));
11549 format %{ "encode_heap_oop $dst,$src" %}
11550 ins_encode %{
11551 Register src = $src$$Register;
11552 Register dst = $dst$$Register;
11553 if (src != dst) {
11554 __ move(dst, src);
11555 }
11556 __ encode_heap_oop(dst);
11557 %}
11558 ins_pipe( ialu_regL_regL );
11559 %}
11561 instruct encodeHeapOop_not_null(mRegN dst, mRegP src) %{
11562 predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
11563 match(Set dst (EncodeP src));
11564 format %{ "encode_heap_oop_not_null $dst,$src @ encodeHeapOop_not_null" %}
11565 ins_encode %{
11566 __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
11567 %}
11568 ins_pipe( ialu_regL_regL );
11569 %}
11571 instruct decodeHeapOop(mRegP dst, mRegN src) %{
11572 predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
11573 n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
11574 match(Set dst (DecodeN src));
11575 format %{ "decode_heap_oop $dst,$src @ decodeHeapOop" %}
11576 ins_encode %{
11577 Register s = $src$$Register;
11578 Register d = $dst$$Register;
11579 if (s != d) {
11580 __ move(d, s);
11581 }
11582 __ decode_heap_oop(d);
11583 %}
11584 ins_pipe( ialu_regL_regL );
11585 %}
11587 instruct decodeHeapOop_not_null(mRegP dst, mRegN src) %{
11588 predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
11589 n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
11590 match(Set dst (DecodeN src));
11591 format %{ "decode_heap_oop_not_null $dst,$src @ decodeHeapOop_not_null" %}
11592 ins_encode %{
11593 Register s = $src$$Register;
11594 Register d = $dst$$Register;
11595 if (s != d) {
11596 __ decode_heap_oop_not_null(d, s);
11597 } else {
11598 __ decode_heap_oop_not_null(d);
11599 }
11600 %}
11601 ins_pipe( ialu_regL_regL );
11602 %}
11604 instruct encodeKlass_not_null(mRegN dst, mRegP src) %{
11605 match(Set dst (EncodePKlass src));
11606 format %{ "encode_heap_oop_not_null $dst,$src @ encodeKlass_not_null" %}
11607 ins_encode %{
11608 __ encode_klass_not_null($dst$$Register, $src$$Register);
11609 %}
11610 ins_pipe( ialu_regL_regL );
11611 %}
11613 instruct decodeKlass_not_null(mRegP dst, mRegN src) %{
11614 match(Set dst (DecodeNKlass src));
11615 format %{ "decode_heap_klass_not_null $dst,$src" %}
11616 ins_encode %{
11617 Register s = $src$$Register;
11618 Register d = $dst$$Register;
11619 if (s != d) {
11620 __ decode_klass_not_null(d, s);
11621 } else {
11622 __ decode_klass_not_null(d);
11623 }
11624 %}
11625 ins_pipe( ialu_regL_regL );
11626 %}
11628 //FIXME
11629 instruct tlsLoadP(mRegP dst) %{
11630 match(Set dst (ThreadLocal));
11632 ins_cost(0);
11633 format %{ " get_thread in $dst #@tlsLoadP" %}
11634 ins_encode %{
11635 Register dst = $dst$$Register;
11636 #ifdef OPT_THREAD
11637 __ move(dst, TREG);
11638 #else
11639 __ get_thread(dst);
11640 #endif
11641 %}
11643 ins_pipe( ialu_loadI );
11644 %}
11647 instruct checkCastPP( mRegP dst ) %{
11648 match(Set dst (CheckCastPP dst));
11650 format %{ "#checkcastPP of $dst (empty encoding) #@chekCastPP" %}
11651 ins_encode( /*empty encoding*/ );
11652 ins_pipe( empty );
11653 %}
11655 instruct castPP(mRegP dst)
11656 %{
11657 match(Set dst (CastPP dst));
11659 size(0);
11660 format %{ "# castPP of $dst" %}
11661 ins_encode(/* empty encoding */);
11662 ins_pipe(empty);
11663 %}
11665 instruct castII( mRegI dst ) %{
11666 match(Set dst (CastII dst));
11667 format %{ "#castII of $dst empty encoding" %}
11668 ins_encode( /*empty encoding*/ );
11669 ins_cost(0);
11670 ins_pipe( empty );
11671 %}
11673 // Return Instruction
11674 // Remove the return address & jump to it.
11675 instruct Ret() %{
11676 match(Return);
11677 format %{ "RET #@Ret" %}
11679 ins_encode %{
11680 __ jr(RA);
11681 __ nop();
11682 %}
11684 ins_pipe( pipe_jump );
11685 %}
11687 /*
11688 // For Loongson CPUs, jr seems too slow, so this rule shouldn't be imported.
11689 instruct jumpXtnd(mRegL switch_val) %{
11690 match(Jump switch_val);
11692 ins_cost(350);
11694 format %{ "load T9 <-- [$constanttablebase, $switch_val, $constantoffset] @ jumpXtnd\n\t"
11695 "jr T9\n\t"
11696 "nop" %}
11697 ins_encode %{
11698 Register table_base = $constanttablebase;
11699 int con_offset = $constantoffset;
11700 Register switch_reg = $switch_val$$Register;
11702 if (UseLoongsonISA) {
11703 if (Assembler::is_simm(con_offset, 8)) {
11704 __ gsldx(T9, table_base, switch_reg, con_offset);
11705 } else if (Assembler::is_simm16(con_offset)) {
11706 __ daddu(T9, table_base, switch_reg);
11707 __ ld(T9, T9, con_offset);
11708 } else {
11709 __ move(T9, con_offset);
11710 __ daddu(AT, table_base, switch_reg);
11711 __ gsldx(T9, AT, T9, 0);
11712 }
11713 } else {
11714 if (Assembler::is_simm16(con_offset)) {
11715 __ daddu(T9, table_base, switch_reg);
11716 __ ld(T9, T9, con_offset);
11717 } else {
11718 __ move(T9, con_offset);
11719 __ daddu(AT, table_base, switch_reg);
11720 __ daddu(AT, T9, AT);
11721 __ ld(T9, AT, 0);
11722 }
11723 }
11725 __ jr(T9);
11726 __ nop();
11728 %}
11729 ins_pipe(pipe_jump);
11730 %}
11731 */
11733 // Jump Direct - Label defines a relative address from JMP
11734 instruct jmpDir(label labl) %{
11735 match(Goto);
11736 effect(USE labl);
11738 ins_cost(300);
11739 format %{ "JMP $labl #@jmpDir" %}
11741 ins_encode %{
11742 Label &L = *($labl$$label);
11743 if(&L)
11744 __ b(L);
11745 else
11746 __ b(int(0));
11747 __ nop();
11748 %}
11750 ins_pipe( pipe_jump );
11751 ins_pc_relative(1);
11752 %}
11756 // Tail Jump; remove the return address; jump to target.
11757 // TailCall above leaves the return address around.
11758 // TailJump is used in only one place, the rethrow_Java stub (fancy_jump=2).
11759 // ex_oop (Exception Oop) is needed in %o0 at the jump. As there would be a
11760 // "restore" before this instruction (in Epilogue), we need to materialize it
11761 // in %i0.
11762 //FIXME
11763 instruct tailjmpInd(mRegP jump_target,mRegP ex_oop) %{
11764 match( TailJump jump_target ex_oop );
11765 ins_cost(200);
11766 format %{ "Jmp $jump_target ; ex_oop = $ex_oop #@tailjmpInd" %}
11767 ins_encode %{
11768 Register target = $jump_target$$Register;
11770 /* 2012/9/14 Jin: V0, V1 are indicated in:
11771 * [stubGenerator_mips.cpp] generate_forward_exception()
11772 * [runtime_mips.cpp] OptoRuntime::generate_exception_blob()
11773 */
11774 Register oop = $ex_oop$$Register;
11775 Register exception_oop = V0;
11776 Register exception_pc = V1;
11778 __ move(exception_pc, RA);
11779 __ move(exception_oop, oop);
11781 __ jr(target);
11782 __ nop();
11783 %}
11784 ins_pipe( pipe_jump );
11785 %}
11787 // ============================================================================
11788 // Procedure Call/Return Instructions
11789 // Call Java Static Instruction
11790 // Note: If this code changes, the corresponding ret_addr_offset() and
11791 // compute_padding() functions will have to be adjusted.
11792 instruct CallStaticJavaDirect(method meth) %{
11793 match(CallStaticJava);
11794 effect(USE meth);
11796 ins_cost(300);
11797 format %{ "CALL,static #@CallStaticJavaDirect " %}
11798 ins_encode( Java_Static_Call( meth ) );
11799 ins_pipe( pipe_slow );
11800 ins_pc_relative(1);
11801 ins_alignment(16);
11802 %}
11804 // Call Java Dynamic Instruction
11805 // Note: If this code changes, the corresponding ret_addr_offset() and
11806 // compute_padding() functions will have to be adjusted.
11807 instruct CallDynamicJavaDirect(method meth) %{
11808 match(CallDynamicJava);
11809 effect(USE meth);
11811 ins_cost(300);
11812 format %{"MOV IC_Klass, (oop)-1 @ CallDynamicJavaDirect\n\t"
11813 "CallDynamic @ CallDynamicJavaDirect" %}
11814 ins_encode( Java_Dynamic_Call( meth ) );
11815 ins_pipe( pipe_slow );
11816 ins_pc_relative(1);
11817 ins_alignment(16);
11818 %}
11820 instruct CallLeafNoFPDirect(method meth) %{
11821 match(CallLeafNoFP);
11822 effect(USE meth);
11824 ins_cost(300);
11825 format %{ "CALL_LEAF_NOFP,runtime " %}
11826 ins_encode(Java_To_Runtime(meth));
11827 ins_pipe( pipe_slow );
11828 ins_pc_relative(1);
11829 ins_alignment(16);
11830 %}
11832 // Prefetch instructions.
11834 instruct prefetchrNTA( memory mem ) %{
11835 match(PrefetchRead mem);
11836 ins_cost(125);
11838 format %{ "pref $mem\t# Prefetch into non-temporal cache for read @ prefetchrNTA" %}
11839 ins_encode %{
11840 int base = $mem$$base;
11841 int index = $mem$$index;
11842 int scale = $mem$$scale;
11843 int disp = $mem$$disp;
11845 if( index != 0 ) {
11846 if (scale == 0) {
11847 __ daddu(AT, as_Register(base), as_Register(index));
11848 } else {
11849 __ dsll(AT, as_Register(index), scale);
11850 __ daddu(AT, as_Register(base), AT);
11851 }
11852 } else {
11853 __ move(AT, as_Register(base));
11854 }
11855 if( Assembler::is_simm16(disp) ) {
11856 __ daddiu(AT, as_Register(base), disp);
11857 __ daddiu(AT, AT, disp);
11858 } else {
11859 __ move(T9, disp);
11860 __ daddu(AT, as_Register(base), T9);
11861 }
11862 __ pref(0, AT, 0); //hint: 0:load
11863 %}
11864 ins_pipe(pipe_slow);
11865 %}
11867 instruct prefetchwNTA( memory mem ) %{
11868 match(PrefetchWrite mem);
11869 ins_cost(125);
11870 format %{ "pref $mem\t# Prefetch to non-temporal cache for write @ prefetchwNTA" %}
11871 ins_encode %{
11872 int base = $mem$$base;
11873 int index = $mem$$index;
11874 int scale = $mem$$scale;
11875 int disp = $mem$$disp;
11877 if( index != 0 ) {
11878 if (scale == 0) {
11879 __ daddu(AT, as_Register(base), as_Register(index));
11880 } else {
11881 __ dsll(AT, as_Register(index), scale);
11882 __ daddu(AT, as_Register(base), AT);
11883 }
11884 } else {
11885 __ move(AT, as_Register(base));
11886 }
11887 if( Assembler::is_simm16(disp) ) {
11888 __ daddiu(AT, as_Register(base), disp);
11889 __ daddiu(AT, AT, disp);
11890 } else {
11891 __ move(T9, disp);
11892 __ daddu(AT, as_Register(base), T9);
11893 }
11894 __ pref(1, AT, 0); //hint: 1:store
11895 %}
11896 ins_pipe(pipe_slow);
11897 %}
11899 // Prefetch instructions for allocation.
11901 instruct prefetchAllocNTA( memory mem ) %{
11902 match(PrefetchAllocation mem);
11903 ins_cost(125);
11904 format %{ "pref $mem\t# Prefetch allocation @ prefetchAllocNTA" %}
11905 ins_encode %{
11906 int base = $mem$$base;
11907 int index = $mem$$index;
11908 int scale = $mem$$scale;
11909 int disp = $mem$$disp;
11911 Register dst = R0;
11913 if( index != 0 ) {
11914 if( Assembler::is_simm16(disp) ) {
11915 if( UseLoongsonISA ) {
11916 if (scale == 0) {
11917 __ gslbx(dst, as_Register(base), as_Register(index), disp);
11918 } else {
11919 __ dsll(AT, as_Register(index), scale);
11920 __ gslbx(dst, as_Register(base), AT, disp);
11921 }
11922 } else {
11923 if (scale == 0) {
11924 __ addu(AT, as_Register(base), as_Register(index));
11925 } else {
11926 __ dsll(AT, as_Register(index), scale);
11927 __ addu(AT, as_Register(base), AT);
11928 }
11929 __ lb(dst, AT, disp);
11930 }
11931 } else {
11932 if (scale == 0) {
11933 __ addu(AT, as_Register(base), as_Register(index));
11934 } else {
11935 __ dsll(AT, as_Register(index), scale);
11936 __ addu(AT, as_Register(base), AT);
11937 }
11938 __ move(T9, disp);
11939 if( UseLoongsonISA ) {
11940 __ gslbx(dst, AT, T9, 0);
11941 } else {
11942 __ addu(AT, AT, T9);
11943 __ lb(dst, AT, 0);
11944 }
11945 }
11946 } else {
11947 if( Assembler::is_simm16(disp) ) {
11948 __ lb(dst, as_Register(base), disp);
11949 } else {
11950 __ move(T9, disp);
11951 if( UseLoongsonISA ) {
11952 __ gslbx(dst, as_Register(base), T9, 0);
11953 } else {
11954 __ addu(AT, as_Register(base), T9);
11955 __ lb(dst, AT, 0);
11956 }
11957 }
11958 }
11959 %}
11960 ins_pipe(pipe_slow);
11961 %}
11964 // Call runtime without safepoint
11965 instruct CallLeafDirect(method meth) %{
11966 match(CallLeaf);
11967 effect(USE meth);
11969 ins_cost(300);
11970 format %{ "CALL_LEAF,runtime #@CallLeafDirect " %}
11971 ins_encode(Java_To_Runtime(meth));
11972 ins_pipe( pipe_slow );
11973 ins_pc_relative(1);
11974 ins_alignment(16);
11975 %}
11977 // Load Char (16bit unsigned)
11978 instruct loadUS(mRegI dst, memory mem) %{
11979 match(Set dst (LoadUS mem));
11981 ins_cost(125);
11982 format %{ "loadUS $dst,$mem @ loadC" %}
11983 ins_encode(load_C_enc(dst, mem));
11984 ins_pipe( ialu_loadI );
11985 %}
11987 instruct loadUS_convI2L(mRegL dst, memory mem) %{
11988 match(Set dst (ConvI2L (LoadUS mem)));
11990 ins_cost(125);
11991 format %{ "loadUS $dst,$mem @ loadUS_convI2L" %}
11992 ins_encode(load_C_enc(dst, mem));
11993 ins_pipe( ialu_loadI );
11994 %}
11996 // Store Char (16bit unsigned)
11997 instruct storeC(memory mem, mRegI src) %{
11998 match(Set mem (StoreC mem src));
12000 ins_cost(125);
12001 format %{ "storeC $src,$mem @ storeC" %}
12002 ins_encode(store_C_reg_enc(mem, src));
12003 ins_pipe( ialu_loadI );
12004 %}
12007 instruct loadConF0(regF dst, immF0 zero) %{
12008 match(Set dst zero);
12009 ins_cost(100);
12011 format %{ "mov $dst, zero @ loadConF0\n"%}
12012 ins_encode %{
12013 FloatRegister dst = $dst$$FloatRegister;
12015 __ mtc1(R0, dst);
12016 %}
12017 ins_pipe( fpu_loadF );
12018 %}
12021 instruct loadConF(regF dst, immF src) %{
12022 match(Set dst src);
12023 ins_cost(125);
12025 format %{ "lwc1 $dst, $constantoffset[$constanttablebase] # load FLOAT $src from table @ loadConF" %}
12026 ins_encode %{
12027 int con_offset = $constantoffset($src);
12029 if (Assembler::is_simm16(con_offset)) {
12030 __ lwc1($dst$$FloatRegister, $constanttablebase, con_offset);
12031 } else {
12032 __ set64(AT, con_offset);
12033 if (UseLoongsonISA) {
12034 __ gslwxc1($dst$$FloatRegister, $constanttablebase, AT, 0);
12035 } else {
12036 __ daddu(AT, $constanttablebase, AT);
12037 __ lwc1($dst$$FloatRegister, AT, 0);
12038 }
12039 }
12040 %}
12041 ins_pipe( fpu_loadF );
12042 %}
12045 instruct loadConD0(regD dst, immD0 zero) %{
12046 match(Set dst zero);
12047 ins_cost(100);
12049 format %{ "mov $dst, zero @ loadConD0"%}
12050 ins_encode %{
12051 FloatRegister dst = as_FloatRegister($dst$$reg);
12053 __ dmtc1(R0, dst);
12054 %}
12055 ins_pipe( fpu_loadF );
12056 %}
12058 instruct loadConD(regD dst, immD src) %{
12059 match(Set dst src);
12060 ins_cost(125);
12062 format %{ "ldc1 $dst, $constantoffset[$constanttablebase] # load DOUBLE $src from table @ loadConD" %}
12063 ins_encode %{
12064 int con_offset = $constantoffset($src);
12066 if (Assembler::is_simm16(con_offset)) {
12067 __ ldc1($dst$$FloatRegister, $constanttablebase, con_offset);
12068 } else {
12069 __ set64(AT, con_offset);
12070 if (UseLoongsonISA) {
12071 __ gsldxc1($dst$$FloatRegister, $constanttablebase, AT, 0);
12072 } else {
12073 __ daddu(AT, $constanttablebase, AT);
12074 __ ldc1($dst$$FloatRegister, AT, 0);
12075 }
12076 }
12077 %}
12078 ins_pipe( fpu_loadF );
12079 %}
12081 // Store register Float value (it is faster than store from FPU register)
12082 instruct storeF_reg( memory mem, regF src) %{
12083 match(Set mem (StoreF mem src));
12085 ins_cost(50);
12086 format %{ "store $mem, $src\t# store float @ storeF_reg" %}
12087 ins_encode(store_F_reg_enc(mem, src));
12088 ins_pipe( fpu_storeF );
12089 %}
12091 instruct storeF_imm0( memory mem, immF0 zero) %{
12092 match(Set mem (StoreF mem zero));
12094 ins_cost(40);
12095 format %{ "store $mem, zero\t# store float @ storeF_imm0" %}
12096 ins_encode %{
12097 int base = $mem$$base;
12098 int index = $mem$$index;
12099 int scale = $mem$$scale;
12100 int disp = $mem$$disp;
12102 if( index != 0 ) {
12103 if(scale != 0) {
12104 __ dsll(T9, as_Register(index), scale);
12105 __ addu(AT, as_Register(base), T9);
12106 } else {
12107 __ daddu(AT, as_Register(base), as_Register(index));
12108 }
12109 if( Assembler::is_simm16(disp) ) {
12110 __ sw(R0, AT, disp);
12111 } else {
12112 __ move(T9, disp);
12113 __ addu(AT, AT, T9);
12114 __ sw(R0, AT, 0);
12115 }
12117 } else {
12118 if( Assembler::is_simm16(disp) ) {
12119 __ sw(R0, as_Register(base), disp);
12120 } else {
12121 __ move(T9, disp);
12122 __ addu(AT, as_Register(base), T9);
12123 __ sw(R0, AT, 0);
12124 }
12125 }
12126 %}
12127 ins_pipe( ialu_storeI );
12128 %}
12130 // Load Double
12131 instruct loadD(regD dst, memory mem) %{
12132 match(Set dst (LoadD mem));
12134 ins_cost(150);
12135 format %{ "loadD $dst, $mem #@loadD" %}
12136 ins_encode(load_D_enc(dst, mem));
12137 ins_pipe( ialu_loadI );
12138 %}
12140 // Load Double - UNaligned
12141 instruct loadD_unaligned(regD dst, memory mem ) %{
12142 match(Set dst (LoadD_unaligned mem));
12143 ins_cost(250);
12144 // FIXME: Jin: Need more effective ldl/ldr
12145 format %{ "loadD_unaligned $dst, $mem #@loadD_unaligned" %}
12146 ins_encode(load_D_enc(dst, mem));
12147 ins_pipe( ialu_loadI );
12148 %}
12150 instruct storeD_reg( memory mem, regD src) %{
12151 match(Set mem (StoreD mem src));
12153 ins_cost(50);
12154 format %{ "store $mem, $src\t# store float @ storeD_reg" %}
12155 ins_encode(store_D_reg_enc(mem, src));
12156 ins_pipe( fpu_storeF );
12157 %}
12159 instruct storeD_imm0( memory mem, immD0 zero) %{
12160 match(Set mem (StoreD mem zero));
12162 ins_cost(40);
12163 format %{ "store $mem, zero\t# store float @ storeD_imm0" %}
12164 ins_encode %{
12165 int base = $mem$$base;
12166 int index = $mem$$index;
12167 int scale = $mem$$scale;
12168 int disp = $mem$$disp;
12170 __ mtc1(R0, F30);
12171 __ cvt_d_w(F30, F30);
12173 if( index != 0 ) {
12174 if(scale != 0) {
12175 __ dsll(T9, as_Register(index), scale);
12176 __ addu(AT, as_Register(base), T9);
12177 } else {
12178 __ daddu(AT, as_Register(base), as_Register(index));
12179 }
12180 if( Assembler::is_simm16(disp) ) {
12181 __ sdc1(F30, AT, disp);
12182 } else {
12183 __ move(T9, disp);
12184 __ addu(AT, AT, T9);
12185 __ sdc1(F30, AT, 0);
12186 }
12188 } else {
12189 if( Assembler::is_simm16(disp) ) {
12190 __ sdc1(F30, as_Register(base), disp);
12191 } else {
12192 __ move(T9, disp);
12193 __ addu(AT, as_Register(base), T9);
12194 __ sdc1(F30, AT, 0);
12195 }
12196 }
12197 %}
12198 ins_pipe( ialu_storeI );
12199 %}
12201 instruct loadSSI(mRegI dst, stackSlotI src)
12202 %{
12203 match(Set dst src);
12205 ins_cost(125);
12206 format %{ "lw $dst, $src\t# int stk @ loadSSI" %}
12207 ins_encode %{
12208 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSI) !");
12209 __ lw($dst$$Register, SP, $src$$disp);
12210 %}
12211 ins_pipe(ialu_loadI);
12212 %}
12214 instruct storeSSI(stackSlotI dst, mRegI src)
12215 %{
12216 match(Set dst src);
12218 ins_cost(100);
12219 format %{ "sw $dst, $src\t# int stk @ storeSSI" %}
12220 ins_encode %{
12221 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSI) !");
12222 __ sw($src$$Register, SP, $dst$$disp);
12223 %}
12224 ins_pipe(ialu_storeI);
12225 %}
12227 instruct loadSSL(mRegL dst, stackSlotL src)
12228 %{
12229 match(Set dst src);
12231 ins_cost(125);
12232 format %{ "ld $dst, $src\t# long stk @ loadSSL" %}
12233 ins_encode %{
12234 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSL) !");
12235 __ ld($dst$$Register, SP, $src$$disp);
12236 %}
12237 ins_pipe(ialu_loadI);
12238 %}
12240 instruct storeSSL(stackSlotL dst, mRegL src)
12241 %{
12242 match(Set dst src);
12244 ins_cost(100);
12245 format %{ "sd $dst, $src\t# long stk @ storeSSL" %}
12246 ins_encode %{
12247 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSL) !");
12248 __ sd($src$$Register, SP, $dst$$disp);
12249 %}
12250 ins_pipe(ialu_storeI);
12251 %}
12253 instruct loadSSP(mRegP dst, stackSlotP src)
12254 %{
12255 match(Set dst src);
12257 ins_cost(125);
12258 format %{ "ld $dst, $src\t# ptr stk @ loadSSP" %}
12259 ins_encode %{
12260 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSP) !");
12261 __ ld($dst$$Register, SP, $src$$disp);
12262 %}
12263 ins_pipe(ialu_loadI);
12264 %}
12266 instruct storeSSP(stackSlotP dst, mRegP src)
12267 %{
12268 match(Set dst src);
12270 ins_cost(100);
12271 format %{ "sd $dst, $src\t# ptr stk @ storeSSP" %}
12272 ins_encode %{
12273 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSP) !");
12274 __ sd($src$$Register, SP, $dst$$disp);
12275 %}
12276 ins_pipe(ialu_storeI);
12277 %}
12279 instruct loadSSF(regF dst, stackSlotF src)
12280 %{
12281 match(Set dst src);
12283 ins_cost(125);
12284 format %{ "lwc1 $dst, $src\t# float stk @ loadSSF" %}
12285 ins_encode %{
12286 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSF) !");
12287 __ lwc1($dst$$FloatRegister, SP, $src$$disp);
12288 %}
12289 ins_pipe(ialu_loadI);
12290 %}
12292 instruct storeSSF(stackSlotF dst, regF src)
12293 %{
12294 match(Set dst src);
12296 ins_cost(100);
12297 format %{ "swc1 $dst, $src\t# float stk @ storeSSF" %}
12298 ins_encode %{
12299 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSF) !");
12300 __ swc1($src$$FloatRegister, SP, $dst$$disp);
12301 %}
12302 ins_pipe(fpu_storeF);
12303 %}
12305 // Use the same format since predicate() can not be used here.
12306 instruct loadSSD(regD dst, stackSlotD src)
12307 %{
12308 match(Set dst src);
12310 ins_cost(125);
12311 format %{ "ldc1 $dst, $src\t# double stk @ loadSSD" %}
12312 ins_encode %{
12313 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSD) !");
12314 __ ldc1($dst$$FloatRegister, SP, $src$$disp);
12315 %}
12316 ins_pipe(ialu_loadI);
12317 %}
12319 instruct storeSSD(stackSlotD dst, regD src)
12320 %{
12321 match(Set dst src);
12323 ins_cost(100);
12324 format %{ "sdc1 $dst, $src\t# double stk @ storeSSD" %}
12325 ins_encode %{
12326 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSD) !");
12327 __ sdc1($src$$FloatRegister, SP, $dst$$disp);
12328 %}
12329 ins_pipe(fpu_storeF);
12330 %}
12332 instruct cmpFastLock( FlagsReg cr, mRegP object, s0_RegP box, mRegI tmp, mRegP scr) %{
12333 match( Set cr (FastLock object box) );
12334 effect( TEMP tmp, TEMP scr, USE_KILL box );
12335 ins_cost(300);
12336 format %{ "FASTLOCK $cr $object, $box, $tmp #@ cmpFastLock" %}
12337 ins_encode %{
12338 __ fast_lock($object$$Register, $box$$Register, $tmp$$Register, $scr$$Register);
12339 %}
12341 ins_pipe( pipe_slow );
12342 ins_pc_relative(1);
12343 %}
12345 instruct cmpFastUnlock( FlagsReg cr, mRegP object, s0_RegP box, mRegP tmp ) %{
12346 match( Set cr (FastUnlock object box) );
12347 effect( TEMP tmp, USE_KILL box );
12348 ins_cost(300);
12349 format %{ "FASTUNLOCK $object, $box, $tmp #@cmpFastUnlock" %}
12350 ins_encode %{
12351 __ fast_unlock($object$$Register, $box$$Register, $tmp$$Register);
12352 %}
12354 ins_pipe( pipe_slow );
12355 ins_pc_relative(1);
12356 %}
12358 // Store CMS card-mark Immediate
12359 instruct storeImmCM(memory mem, immI8 src) %{
12360 match(Set mem (StoreCM mem src));
12362 ins_cost(150);
12363 format %{ "MOV8 $mem,$src\t! CMS card-mark imm0" %}
12364 // opcode(0xC6);
12365 ins_encode(store_B_immI_enc_sync(mem, src));
12366 ins_pipe( ialu_storeI );
12367 %}
12369 // Die now
12370 instruct ShouldNotReachHere( )
12371 %{
12372 match(Halt);
12373 ins_cost(300);
12375 // Use the following format syntax
12376 format %{ "ILLTRAP ;#@ShouldNotReachHere" %}
12377 ins_encode %{
12378 // Here we should emit illtrap !
12380 __ stop("in ShoudNotReachHere");
12382 %}
12383 ins_pipe( pipe_jump );
12384 %}
12386 instruct leaP8Narrow(mRegP dst, indOffset8Narrow mem)
12387 %{
12388 predicate(Universe::narrow_oop_shift() == 0);
12389 match(Set dst mem);
12391 ins_cost(110);
12392 format %{ "leaq $dst, $mem\t# ptr off8narrow @ leaP8Narrow" %}
12393 ins_encode %{
12394 Register dst = $dst$$Register;
12395 Register base = as_Register($mem$$base);
12396 int disp = $mem$$disp;
12398 __ daddiu(dst, base, disp);
12399 %}
12400 ins_pipe( ialu_regI_imm16 );
12401 %}
12403 instruct leaPPosIdxScaleOff8(mRegP dst, basePosIndexScaleOffset8 mem)
12404 %{
12405 match(Set dst mem);
12407 ins_cost(110);
12408 format %{ "leaq $dst, $mem\t# @ PosIdxScaleOff8" %}
12409 ins_encode %{
12410 Register dst = $dst$$Register;
12411 Register base = as_Register($mem$$base);
12412 Register index = as_Register($mem$$index);
12413 int scale = $mem$$scale;
12414 int disp = $mem$$disp;
12416 if (scale == 0) {
12417 __ daddu(AT, base, index);
12418 __ daddiu(dst, AT, disp);
12419 } else {
12420 __ dsll(AT, index, scale);
12421 __ daddu(AT, base, AT);
12422 __ daddiu(dst, AT, disp);
12423 }
12424 %}
12426 ins_pipe( ialu_regI_imm16 );
12427 %}
12429 instruct leaPIdxScale(mRegP dst, indIndexScale mem)
12430 %{
12431 match(Set dst mem);
12433 ins_cost(110);
12434 format %{ "leaq $dst, $mem\t# @ leaPIdxScale" %}
12435 ins_encode %{
12436 Register dst = $dst$$Register;
12437 Register base = as_Register($mem$$base);
12438 Register index = as_Register($mem$$index);
12439 int scale = $mem$$scale;
12441 if (scale == 0) {
12442 __ daddu(dst, base, index);
12443 } else {
12444 __ dsll(AT, index, scale);
12445 __ daddu(dst, base, AT);
12446 }
12447 %}
12449 ins_pipe( ialu_regI_imm16 );
12450 %}
12452 // Jump Direct Conditional - Label defines a relative address from Jcc+1
12453 instruct jmpLoopEnd(cmpOp cop, mRegI src1, mRegI src2, label labl) %{
12454 match(CountedLoopEnd cop (CmpI src1 src2));
12455 effect(USE labl);
12457 ins_cost(300);
12458 format %{ "J$cop $src1, $src2, $labl\t# Loop end @ jmpLoopEnd" %}
12459 ins_encode %{
12460 Register op1 = $src1$$Register;
12461 Register op2 = $src2$$Register;
12462 Label &L = *($labl$$label);
12463 int flag = $cop$$cmpcode;
12465 switch(flag)
12466 {
12467 case 0x01: //equal
12468 if (&L)
12469 __ beq(op1, op2, L);
12470 else
12471 __ beq(op1, op2, (int)0);
12472 break;
12473 case 0x02: //not_equal
12474 if (&L)
12475 __ bne(op1, op2, L);
12476 else
12477 __ bne(op1, op2, (int)0);
12478 break;
12479 case 0x03: //above
12480 __ slt(AT, op2, op1);
12481 if(&L)
12482 __ bne(AT, R0, L);
12483 else
12484 __ bne(AT, R0, (int)0);
12485 break;
12486 case 0x04: //above_equal
12487 __ slt(AT, op1, op2);
12488 if(&L)
12489 __ beq(AT, R0, L);
12490 else
12491 __ beq(AT, R0, (int)0);
12492 break;
12493 case 0x05: //below
12494 __ slt(AT, op1, op2);
12495 if(&L)
12496 __ bne(AT, R0, L);
12497 else
12498 __ bne(AT, R0, (int)0);
12499 break;
12500 case 0x06: //below_equal
12501 __ slt(AT, op2, op1);
12502 if(&L)
12503 __ beq(AT, R0, L);
12504 else
12505 __ beq(AT, R0, (int)0);
12506 break;
12507 default:
12508 Unimplemented();
12509 }
12510 __ nop();
12511 %}
12512 ins_pipe( pipe_jump );
12513 ins_pc_relative(1);
12514 %}
12517 instruct jmpLoopEnd_reg_imm16_sub(cmpOp cop, mRegI src1, immI16_sub src2, label labl) %{
12518 match(CountedLoopEnd cop (CmpI src1 src2));
12519 effect(USE labl);
12521 ins_cost(250);
12522 format %{ "J$cop $src1, $src2, $labl\t# Loop end @ jmpLoopEnd_reg_imm16_sub" %}
12523 ins_encode %{
12524 Register op1 = $src1$$Register;
12525 int op2 = $src2$$constant;
12526 Label &L = *($labl$$label);
12527 int flag = $cop$$cmpcode;
12529 __ addiu32(AT, op1, -1 * op2);
12531 switch(flag)
12532 {
12533 case 0x01: //equal
12534 if (&L)
12535 __ beq(AT, R0, L);
12536 else
12537 __ beq(AT, R0, (int)0);
12538 break;
12539 case 0x02: //not_equal
12540 if (&L)
12541 __ bne(AT, R0, L);
12542 else
12543 __ bne(AT, R0, (int)0);
12544 break;
12545 case 0x03: //above
12546 if(&L)
12547 __ bgtz(AT, L);
12548 else
12549 __ bgtz(AT, (int)0);
12550 break;
12551 case 0x04: //above_equal
12552 if(&L)
12553 __ bgez(AT, L);
12554 else
12555 __ bgez(AT,(int)0);
12556 break;
12557 case 0x05: //below
12558 if(&L)
12559 __ bltz(AT, L);
12560 else
12561 __ bltz(AT, (int)0);
12562 break;
12563 case 0x06: //below_equal
12564 if(&L)
12565 __ blez(AT, L);
12566 else
12567 __ blez(AT, (int)0);
12568 break;
12569 default:
12570 Unimplemented();
12571 }
12572 __ nop();
12573 %}
12574 ins_pipe( pipe_jump );
12575 ins_pc_relative(1);
12576 %}
12579 /*
12580 // Jump Direct Conditional - Label defines a relative address from Jcc+1
12581 instruct jmpLoopEndU(cmpOpU cop, eFlagsRegU cmp, label labl) %{
12582 match(CountedLoopEnd cop cmp);
12583 effect(USE labl);
12585 ins_cost(300);
12586 format %{ "J$cop,u $labl\t# Loop end" %}
12587 size(6);
12588 opcode(0x0F, 0x80);
12589 ins_encode( Jcc( cop, labl) );
12590 ins_pipe( pipe_jump );
12591 ins_pc_relative(1);
12592 %}
12594 instruct jmpLoopEndUCF(cmpOpUCF cop, eFlagsRegUCF cmp, label labl) %{
12595 match(CountedLoopEnd cop cmp);
12596 effect(USE labl);
12598 ins_cost(200);
12599 format %{ "J$cop,u $labl\t# Loop end" %}
12600 opcode(0x0F, 0x80);
12601 ins_encode( Jcc( cop, labl) );
12602 ins_pipe( pipe_jump );
12603 ins_pc_relative(1);
12604 %}
12605 */
12607 // This match pattern is created for StoreIConditional since I cannot match IfNode without a RegFlags! fujie 2012/07/17
12608 instruct jmpCon_flags(cmpOp cop, FlagsReg cr, label labl) %{
12609 match(If cop cr);
12610 effect(USE labl);
12612 ins_cost(300);
12613 format %{ "J$cop $labl #mips uses AT as eflag @jmpCon_flags" %}
12615 ins_encode %{
12616 Label &L = *($labl$$label);
12617 switch($cop$$cmpcode)
12618 {
12619 case 0x01: //equal
12620 if (&L)
12621 __ bne(AT, R0, L);
12622 else
12623 __ bne(AT, R0, (int)0);
12624 break;
12625 case 0x02: //not equal
12626 if (&L)
12627 __ beq(AT, R0, L);
12628 else
12629 __ beq(AT, R0, (int)0);
12630 break;
12631 default:
12632 Unimplemented();
12633 }
12634 __ nop();
12635 %}
12637 ins_pipe( pipe_jump );
12638 ins_pc_relative(1);
12639 %}
12642 // ============================================================================
12643 // The 2nd slow-half of a subtype check. Scan the subklass's 2ndary superklass
12644 // array for an instance of the superklass. Set a hidden internal cache on a
12645 // hit (cache is checked with exposed code in gen_subtype_check()). Return
12646 // NZ for a miss or zero for a hit. The encoding ALSO sets flags.
12647 instruct partialSubtypeCheck( mRegP result, no_T8_mRegP sub, no_T8_mRegP super, mT8RegI tmp ) %{
12648 match(Set result (PartialSubtypeCheck sub super));
12649 effect(KILL tmp);
12650 ins_cost(1100); // slightly larger than the next version
12651 format %{ "partialSubtypeCheck result=$result, sub=$sub, super=$super, tmp=$tmp " %}
12653 ins_encode( enc_PartialSubtypeCheck(result, sub, super, tmp) );
12654 ins_pipe( pipe_slow );
12655 %}
12658 // Conditional-store of an int value.
12659 // ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG on Intel.
12660 instruct storeIConditional( memory mem, mRegI oldval, mRegI newval, FlagsReg cr ) %{
12661 match(Set cr (StoreIConditional mem (Binary oldval newval)));
12662 // effect(KILL oldval);
12663 format %{ "CMPXCHG $newval, $mem, $oldval \t# @storeIConditional" %}
12665 ins_encode %{
12666 Register oldval = $oldval$$Register;
12667 Register newval = $newval$$Register;
12668 Address addr(as_Register($mem$$base), $mem$$disp);
12669 Label again, failure;
12671 // int base = $mem$$base;
12672 int index = $mem$$index;
12673 int scale = $mem$$scale;
12674 int disp = $mem$$disp;
12676 guarantee(Assembler::is_simm16(disp), "");
12678 if( index != 0 ) {
12679 __ stop("in storeIConditional: index != 0");
12680 } else {
12681 __ bind(again);
12682 if(UseSyncLevel <= 1000) __ sync();
12683 __ ll(AT, addr);
12684 __ bne(AT, oldval, failure);
12685 __ delayed()->addu(AT, R0, R0);
12687 __ addu(AT, newval, R0);
12688 __ sc(AT, addr);
12689 __ beq(AT, R0, again);
12690 __ delayed()->addiu(AT, R0, 0xFF);
12691 __ bind(failure);
12692 __ sync();
12693 }
12694 %}
12696 ins_pipe( long_memory_op );
12697 %}
12699 // Conditional-store of a long value.
12700 // ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG.
12701 instruct storeLConditional(memory mem, t2RegL oldval, mRegL newval, FlagsReg cr )
12702 %{
12703 match(Set cr (StoreLConditional mem (Binary oldval newval)));
12704 effect(KILL oldval);
12706 format %{ "cmpxchg $mem, $newval\t# If $oldval == $mem then store $newval into $mem" %}
12707 ins_encode%{
12708 Register oldval = $oldval$$Register;
12709 Register newval = $newval$$Register;
12710 Address addr((Register)$mem$$base, $mem$$disp);
12712 int index = $mem$$index;
12713 int scale = $mem$$scale;
12714 int disp = $mem$$disp;
12716 guarantee(Assembler::is_simm16(disp), "");
12718 if( index != 0 ) {
12719 __ stop("in storeIConditional: index != 0");
12720 } else {
12721 __ cmpxchg(newval, addr, oldval);
12722 }
12723 %}
12724 ins_pipe( long_memory_op );
12725 %}
12728 instruct compareAndSwapI( mRegI res, mRegP mem_ptr, mS2RegI oldval, mRegI newval) %{
12729 match(Set res (CompareAndSwapI mem_ptr (Binary oldval newval)));
12730 effect(KILL oldval);
12731 // match(CompareAndSwapI mem_ptr (Binary oldval newval));
12732 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapI\n\t"
12733 "MOV $res, 1 @ compareAndSwapI\n\t"
12734 "BNE AT, R0 @ compareAndSwapI\n\t"
12735 "MOV $res, 0 @ compareAndSwapI\n"
12736 "L:" %}
12737 ins_encode %{
12738 Register newval = $newval$$Register;
12739 Register oldval = $oldval$$Register;
12740 Register res = $res$$Register;
12741 Address addr($mem_ptr$$Register, 0);
12742 Label L;
12744 __ cmpxchg32(newval, addr, oldval);
12745 __ move(res, AT);
12746 %}
12747 ins_pipe( long_memory_op );
12748 %}
12750 //FIXME:
12751 instruct compareAndSwapP( mRegI res, mRegP mem_ptr, s2_RegP oldval, mRegP newval) %{
12752 match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval)));
12753 effect(KILL oldval);
12754 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapP\n\t"
12755 "MOV $res, AT @ compareAndSwapP\n\t"
12756 "L:" %}
12757 ins_encode %{
12758 Register newval = $newval$$Register;
12759 Register oldval = $oldval$$Register;
12760 Register res = $res$$Register;
12761 Address addr($mem_ptr$$Register, 0);
12762 Label L;
12764 __ cmpxchg(newval, addr, oldval);
12765 __ move(res, AT);
12766 %}
12767 ins_pipe( long_memory_op );
12768 %}
12770 instruct compareAndSwapN( mRegI res, mRegP mem_ptr, t2_RegN oldval, mRegN newval) %{
12771 match(Set res (CompareAndSwapN mem_ptr (Binary oldval newval)));
12772 effect(KILL oldval);
12773 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapN\n\t"
12774 "MOV $res, AT @ compareAndSwapN\n\t"
12775 "L:" %}
12776 ins_encode %{
12777 Register newval = $newval$$Register;
12778 Register oldval = $oldval$$Register;
12779 Register res = $res$$Register;
12780 Address addr($mem_ptr$$Register, 0);
12781 Label L;
12783 /* 2013/7/19 Jin: cmpxchg32 is implemented with ll/sc, which will do sign extension.
12784 * Thus, we should extend oldval's sign for correct comparision.
12785 */
12786 __ sll(oldval, oldval, 0);
12788 __ cmpxchg32(newval, addr, oldval);
12789 __ move(res, AT);
12790 %}
12791 ins_pipe( long_memory_op );
12792 %}
12794 //----------Max and Min--------------------------------------------------------
12795 // Min Instructions
12796 ////
12797 // *** Min and Max using the conditional move are slower than the
12798 // *** branch version on a Pentium III.
12799 // // Conditional move for min
12800 //instruct cmovI_reg_lt( eRegI op2, eRegI op1, eFlagsReg cr ) %{
12801 // effect( USE_DEF op2, USE op1, USE cr );
12802 // format %{ "CMOVlt $op2,$op1\t! min" %}
12803 // opcode(0x4C,0x0F);
12804 // ins_encode( OpcS, OpcP, RegReg( op2, op1 ) );
12805 // ins_pipe( pipe_cmov_reg );
12806 //%}
12807 //
12808 //// Min Register with Register (P6 version)
12809 //instruct minI_eReg_p6( eRegI op1, eRegI op2 ) %{
12810 // predicate(VM_Version::supports_cmov() );
12811 // match(Set op2 (MinI op1 op2));
12812 // ins_cost(200);
12813 // expand %{
12814 // eFlagsReg cr;
12815 // compI_eReg(cr,op1,op2);
12816 // cmovI_reg_lt(op2,op1,cr);
12817 // %}
12818 //%}
12820 // Min Register with Register (generic version)
12821 instruct minI_Reg_Reg(mRegI dst, mRegI src) %{
12822 match(Set dst (MinI dst src));
12823 //effect(KILL flags);
12824 ins_cost(80);
12826 format %{ "MIN $dst, $src @minI_Reg_Reg" %}
12827 ins_encode %{
12828 Register dst = $dst$$Register;
12829 Register src = $src$$Register;
12831 __ slt(AT, src, dst);
12832 __ movn(dst, src, AT);
12834 %}
12836 ins_pipe( pipe_slow );
12837 %}
12839 // Max Register with Register
12840 // *** Min and Max using the conditional move are slower than the
12841 // *** branch version on a Pentium III.
12842 // // Conditional move for max
12843 //instruct cmovI_reg_gt( eRegI op2, eRegI op1, eFlagsReg cr ) %{
12844 // effect( USE_DEF op2, USE op1, USE cr );
12845 // format %{ "CMOVgt $op2,$op1\t! max" %}
12846 // opcode(0x4F,0x0F);
12847 // ins_encode( OpcS, OpcP, RegReg( op2, op1 ) );
12848 // ins_pipe( pipe_cmov_reg );
12849 //%}
12850 //
12851 // // Max Register with Register (P6 version)
12852 //instruct maxI_eReg_p6( eRegI op1, eRegI op2 ) %{
12853 // predicate(VM_Version::supports_cmov() );
12854 // match(Set op2 (MaxI op1 op2));
12855 // ins_cost(200);
12856 // expand %{
12857 // eFlagsReg cr;
12858 // compI_eReg(cr,op1,op2);
12859 // cmovI_reg_gt(op2,op1,cr);
12860 // %}
12861 //%}
12863 // Max Register with Register (generic version)
12864 instruct maxI_Reg_Reg(mRegI dst, mRegI src) %{
12865 match(Set dst (MaxI dst src));
12866 ins_cost(80);
12868 format %{ "MAX $dst, $src @maxI_Reg_Reg" %}
12870 ins_encode %{
12871 Register dst = $dst$$Register;
12872 Register src = $src$$Register;
12874 __ slt(AT, dst, src);
12875 __ movn(dst, src, AT);
12877 %}
12879 ins_pipe( pipe_slow );
12880 %}
12882 instruct maxI_Reg_zero(mRegI dst, immI0 zero) %{
12883 match(Set dst (MaxI dst zero));
12884 ins_cost(50);
12886 format %{ "MAX $dst, 0 @maxI_Reg_zero" %}
12888 ins_encode %{
12889 Register dst = $dst$$Register;
12891 __ slt(AT, dst, R0);
12892 __ movn(dst, R0, AT);
12894 %}
12896 ins_pipe( pipe_slow );
12897 %}
12899 instruct zerox_long_reg_reg(mRegL dst, mRegL src, immL_32bits mask)
12900 %{
12901 match(Set dst (AndL src mask));
12903 format %{ "movl $dst, $src\t# zero-extend long @ zerox_long_reg_reg" %}
12904 ins_encode %{
12905 Register dst = $dst$$Register;
12906 Register src = $src$$Register;
12908 __ dext(dst, src, 0, 32);
12909 %}
12910 ins_pipe(ialu_regI_regI);
12911 %}
12913 instruct combine_i2l(mRegL dst, mRegI src1, immL_32bits mask, mRegI src2, immI_32 shift32)
12914 %{
12915 match(Set dst (OrL (AndL (ConvI2L src1) mask) (LShiftL (ConvI2L src2) shift32)));
12917 format %{ "combine_i2l $dst, $src2(H), $src1(L) @ combine_i2l" %}
12918 ins_encode %{
12919 Register dst = $dst$$Register;
12920 Register src1 = $src1$$Register;
12921 Register src2 = $src2$$Register;
12923 if (src1 == dst) {
12924 __ dinsu(dst, src2, 32, 32);
12925 } else if (src2 == dst) {
12926 __ dsll32(dst, dst, 0);
12927 __ dins(dst, src1, 0, 32);
12928 } else {
12929 __ dext(dst, src1, 0, 32);
12930 __ dinsu(dst, src2, 32, 32);
12931 }
12932 %}
12933 ins_pipe(ialu_regI_regI);
12934 %}
12936 // Zero-extend convert int to long
12937 instruct convI2L_reg_reg_zex(mRegL dst, mRegI src, immL_32bits mask)
12938 %{
12939 match(Set dst (AndL (ConvI2L src) mask));
12941 format %{ "movl $dst, $src\t# i2l zero-extend @ convI2L_reg_reg_zex" %}
12942 ins_encode %{
12943 Register dst = $dst$$Register;
12944 Register src = $src$$Register;
12946 __ dext(dst, src, 0, 32);
12947 %}
12948 ins_pipe(ialu_regI_regI);
12949 %}
12951 instruct convL2I2L_reg_reg_zex(mRegL dst, mRegL src, immL_32bits mask)
12952 %{
12953 match(Set dst (AndL (ConvI2L (ConvL2I src)) mask));
12955 format %{ "movl $dst, $src\t# i2l zero-extend @ convL2I2L_reg_reg_zex" %}
12956 ins_encode %{
12957 Register dst = $dst$$Register;
12958 Register src = $src$$Register;
12960 __ dext(dst, src, 0, 32);
12961 %}
12962 ins_pipe(ialu_regI_regI);
12963 %}
12965 // Match loading integer and casting it to unsigned int in long register.
12966 // LoadI + ConvI2L + AndL 0xffffffff.
12967 instruct loadUI2L_rmask(mRegL dst, memory mem, immL_32bits mask) %{
12968 match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
12970 format %{ "lwu $dst, $mem \t// zero-extend to long @ loadUI2L_rmask" %}
12971 ins_encode (load_N_enc(dst, mem));
12972 ins_pipe(ialu_loadI);
12973 %}
12975 instruct loadUI2L_lmask(mRegL dst, memory mem, immL_32bits mask) %{
12976 match(Set dst (AndL mask (ConvI2L (LoadI mem))));
12978 format %{ "lwu $dst, $mem \t// zero-extend to long @ loadUI2L_lmask" %}
12979 ins_encode (load_N_enc(dst, mem));
12980 ins_pipe(ialu_loadI);
12981 %}
12984 // ============================================================================
12985 // Safepoint Instruction
12986 instruct safePoint_poll(mRegP poll) %{
12987 match(SafePoint poll);
12988 effect(USE poll);
12990 ins_cost(125);
12991 format %{ "Safepoint @ [$poll] : poll for GC @ safePoint_poll" %}
12993 ins_encode %{
12994 Register poll_reg = $poll$$Register;
12996 __ block_comment("Safepoint:");
12997 __ relocate(relocInfo::poll_type);
12998 __ lw(AT, poll_reg, 0);
12999 %}
13001 ins_pipe( ialu_storeI );
13002 %}
13004 //----------Arithmetic Conversion Instructions---------------------------------
13006 instruct roundFloat_nop(regF dst)
13007 %{
13008 match(Set dst (RoundFloat dst));
13010 ins_cost(0);
13011 ins_encode();
13012 ins_pipe(empty);
13013 %}
13015 instruct roundDouble_nop(regD dst)
13016 %{
13017 match(Set dst (RoundDouble dst));
13019 ins_cost(0);
13020 ins_encode();
13021 ins_pipe(empty);
13022 %}
13024 //---------- Zeros Count Instructions ------------------------------------------
13025 // CountLeadingZerosINode CountTrailingZerosINode
13026 instruct countLeadingZerosI(mRegI dst, mRegI src) %{
13027 predicate(UseCountLeadingZerosInstruction);
13028 match(Set dst (CountLeadingZerosI src));
13030 format %{ "clz $dst, $src\t# count leading zeros (int)" %}
13031 ins_encode %{
13032 __ clz($dst$$Register, $src$$Register);
13033 %}
13034 ins_pipe( ialu_regL_regL );
13035 %}
13037 instruct countLeadingZerosL(mRegI dst, mRegL src) %{
13038 predicate(UseCountLeadingZerosInstruction);
13039 match(Set dst (CountLeadingZerosL src));
13041 format %{ "dclz $dst, $src\t# count leading zeros (long)" %}
13042 ins_encode %{
13043 __ dclz($dst$$Register, $src$$Register);
13044 %}
13045 ins_pipe( ialu_regL_regL );
13046 %}
13048 instruct countTrailingZerosI(mRegI dst, mRegI src) %{
13049 predicate(UseCountTrailingZerosInstruction);
13050 match(Set dst (CountTrailingZerosI src));
13052 format %{ "ctz $dst, $src\t# count trailing zeros (int)" %}
13053 ins_encode %{
13054 // ctz and dctz is gs instructions.
13055 __ ctz($dst$$Register, $src$$Register);
13056 %}
13057 ins_pipe( ialu_regL_regL );
13058 %}
13060 instruct countTrailingZerosL(mRegI dst, mRegL src) %{
13061 predicate(UseCountTrailingZerosInstruction);
13062 match(Set dst (CountTrailingZerosL src));
13064 format %{ "dcto $dst, $src\t# count trailing zeros (long)" %}
13065 ins_encode %{
13066 __ dctz($dst$$Register, $src$$Register);
13067 %}
13068 ins_pipe( ialu_regL_regL );
13069 %}
13071 // ====================VECTOR INSTRUCTIONS=====================================
13073 // Load vectors (8 bytes long)
13074 instruct loadV8(vecD dst, memory mem) %{
13075 predicate(n->as_LoadVector()->memory_size() == 8);
13076 match(Set dst (LoadVector mem));
13077 ins_cost(125);
13078 format %{ "load $dst, $mem\t! load vector (8 bytes)" %}
13079 ins_encode(load_D_enc(dst, mem));
13080 ins_pipe( fpu_loadF );
13081 %}
13083 // Store vectors (8 bytes long)
13084 instruct storeV8(memory mem, vecD src) %{
13085 predicate(n->as_StoreVector()->memory_size() == 8);
13086 match(Set mem (StoreVector mem src));
13087 ins_cost(145);
13088 format %{ "store $mem, $src\t! store vector (8 bytes)" %}
13089 ins_encode(store_D_reg_enc(mem, src));
13090 ins_pipe( fpu_storeF );
13091 %}
13093 instruct Repl8B(vecD dst, mRegI src) %{
13094 predicate(n->as_Vector()->length() == 8);
13095 match(Set dst (ReplicateB src));
13096 format %{ "replv_ob AT, $src\n\t"
13097 "dmtc1 AT, $dst\t! replicate8B" %}
13098 ins_encode %{
13099 __ replv_ob(AT, $src$$Register);
13100 __ dmtc1(AT, $dst$$FloatRegister);
13101 %}
13102 ins_pipe( pipe_mtc1 );
13103 %}
13105 instruct Repl8B_imm(vecD dst, immI con) %{
13106 predicate(n->as_Vector()->length() == 8);
13107 match(Set dst (ReplicateB con));
13108 format %{ "repl_ob AT, [$con]\n\t"
13109 "dmtc1 AT, $dst,0x00\t! replicate8B($con)" %}
13110 ins_encode %{
13111 int val = $con$$constant;
13112 __ repl_ob(AT, val);
13113 __ dmtc1(AT, $dst$$FloatRegister);
13114 %}
13115 ins_pipe( pipe_mtc1 );
13116 %}
13118 instruct Repl8B_zero(vecD dst, immI0 zero) %{
13119 predicate(n->as_Vector()->length() == 8);
13120 match(Set dst (ReplicateB zero));
13121 format %{ "dmtc1 R0, $dst\t! replicate8B zero" %}
13122 ins_encode %{
13123 __ dmtc1(R0, $dst$$FloatRegister);
13124 %}
13125 ins_pipe( pipe_mtc1 );
13126 %}
13128 instruct Repl8B_M1(vecD dst, immI_M1 M1) %{
13129 predicate(n->as_Vector()->length() == 8);
13130 match(Set dst (ReplicateB M1));
13131 format %{ "dmtc1 -1, $dst\t! replicate8B -1" %}
13132 ins_encode %{
13133 __ nor(AT, R0, R0);
13134 __ dmtc1(AT, $dst$$FloatRegister);
13135 %}
13136 ins_pipe( pipe_mtc1 );
13137 %}
13139 instruct Repl4S(vecD dst, mRegI src) %{
13140 predicate(n->as_Vector()->length() == 4);
13141 match(Set dst (ReplicateS src));
13142 format %{ "replv_qh AT, $src\n\t"
13143 "dmtc1 AT, $dst\t! replicate4S" %}
13144 ins_encode %{
13145 __ replv_qh(AT, $src$$Register);
13146 __ dmtc1(AT, $dst$$FloatRegister);
13147 %}
13148 ins_pipe( pipe_mtc1 );
13149 %}
13151 instruct Repl4S_imm(vecD dst, immI con) %{
13152 predicate(n->as_Vector()->length() == 4);
13153 match(Set dst (ReplicateS con));
13154 format %{ "replv_qh AT, [$con]\n\t"
13155 "dmtc1 AT, $dst\t! replicate4S($con)" %}
13156 ins_encode %{
13157 int val = $con$$constant;
13158 if ( Assembler::is_simm(val, 10)) {
13159 //repl_qh supports 10 bits immediate
13160 __ repl_qh(AT, val);
13161 } else {
13162 __ li32(AT, val);
13163 __ replv_qh(AT, AT);
13164 }
13165 __ dmtc1(AT, $dst$$FloatRegister);
13166 %}
13167 ins_pipe( pipe_mtc1 );
13168 %}
13170 instruct Repl4S_zero(vecD dst, immI0 zero) %{
13171 predicate(n->as_Vector()->length() == 4);
13172 match(Set dst (ReplicateS zero));
13173 format %{ "dmtc1 R0, $dst\t! replicate4S zero" %}
13174 ins_encode %{
13175 __ dmtc1(R0, $dst$$FloatRegister);
13176 %}
13177 ins_pipe( pipe_mtc1 );
13178 %}
13180 instruct Repl4S_M1(vecD dst, immI_M1 M1) %{
13181 predicate(n->as_Vector()->length() == 4);
13182 match(Set dst (ReplicateS M1));
13183 format %{ "dmtc1 -1, $dst\t! replicate4S -1" %}
13184 ins_encode %{
13185 __ nor(AT, R0, R0);
13186 __ dmtc1(AT, $dst$$FloatRegister);
13187 %}
13188 ins_pipe( pipe_mtc1 );
13189 %}
13191 // Replicate integer (4 byte) scalar to be vector
13192 instruct Repl2I(vecD dst, mRegI src) %{
13193 predicate(n->as_Vector()->length() == 2);
13194 match(Set dst (ReplicateI src));
13195 format %{ "dins AT, $src, 0, 32\n\t"
13196 "dinsu AT, $src, 32, 32\n\t"
13197 "dmtc1 AT, $dst\t! replicate2I" %}
13198 ins_encode %{
13199 __ dins(AT, $src$$Register, 0, 32);
13200 __ dinsu(AT, $src$$Register, 32, 32);
13201 __ dmtc1(AT, $dst$$FloatRegister);
13202 %}
13203 ins_pipe( pipe_mtc1 );
13204 %}
13206 // Replicate integer (4 byte) scalar immediate to be vector by loading from const table.
13207 instruct Repl2I_imm(vecD dst, immI con, mA7RegI tmp) %{
13208 predicate(n->as_Vector()->length() == 2);
13209 match(Set dst (ReplicateI con));
13210 effect(KILL tmp);
13211 format %{ "li32 AT, [$con], 32\n\t"
13212 "replv_pw AT, AT\n\t"
13213 "dmtc1 AT, $dst\t! replicate2I($con)" %}
13214 ins_encode %{
13215 int val = $con$$constant;
13216 __ li32(AT, val);
13217 __ replv_pw(AT, AT);
13218 __ dmtc1(AT, $dst$$FloatRegister);
13219 %}
13220 ins_pipe( pipe_mtc1 );
13221 %}
13223 // Replicate integer (4 byte) scalar zero to be vector
13224 instruct Repl2I_zero(vecD dst, immI0 zero) %{
13225 predicate(n->as_Vector()->length() == 2);
13226 match(Set dst (ReplicateI zero));
13227 format %{ "dmtc1 R0, $dst\t! replicate2I zero" %}
13228 ins_encode %{
13229 __ dmtc1(R0, $dst$$FloatRegister);
13230 %}
13231 ins_pipe( pipe_mtc1 );
13232 %}
13234 // Replicate integer (4 byte) scalar -1 to be vector
13235 instruct Repl2I_M1(vecD dst, immI_M1 M1) %{
13236 predicate(n->as_Vector()->length() == 2);
13237 match(Set dst (ReplicateI M1));
13238 format %{ "dmtc1 -1, $dst\t! replicate2I -1, use AT" %}
13239 ins_encode %{
13240 __ nor(AT, R0, R0);
13241 __ dmtc1(AT, $dst$$FloatRegister);
13242 %}
13243 ins_pipe( pipe_mtc1 );
13244 %}
13246 // Replicate float (4 byte) scalar to be vector
13247 instruct Repl2F(vecD dst, regF src) %{
13248 predicate(n->as_Vector()->length() == 2);
13249 match(Set dst (ReplicateF src));
13250 format %{ "cvt.ps $dst, $src, $src\t! replicate2F" %}
13251 ins_encode %{
13252 __ cvt_ps_s($dst$$FloatRegister, $src$$FloatRegister, $src$$FloatRegister);
13253 %}
13254 ins_pipe( pipe_slow );
13255 %}
13257 // Replicate float (4 byte) scalar zero to be vector
13258 instruct Repl2F_zero(vecD dst, immF0 zero) %{
13259 predicate(n->as_Vector()->length() == 2);
13260 match(Set dst (ReplicateF zero));
13261 format %{ "dmtc1 R0, $dst\t! replicate2F zero" %}
13262 ins_encode %{
13263 __ dmtc1(R0, $dst$$FloatRegister);
13264 %}
13265 ins_pipe( pipe_mtc1 );
13266 %}
13269 // ====================VECTOR ARITHMETIC=======================================
13271 // --------------------------------- ADD --------------------------------------
13273 // Floats vector add
13274 instruct vadd2F(vecD dst, vecD src) %{
13275 predicate(n->as_Vector()->length() == 2);
13276 match(Set dst (AddVF dst src));
13277 format %{ "add.ps $dst,$src\t! add packed2F" %}
13278 ins_encode %{
13279 __ add_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
13280 %}
13281 ins_pipe( pipe_slow );
13282 %}
13284 instruct vadd2F3(vecD dst, vecD src1, vecD src2) %{
13285 predicate(n->as_Vector()->length() == 2);
13286 match(Set dst (AddVF src1 src2));
13287 format %{ "add.ps $dst,$src1,$src2\t! add packed2F" %}
13288 ins_encode %{
13289 __ add_ps($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
13290 %}
13291 ins_pipe( fpu_regF_regF );
13292 %}
13294 // --------------------------------- SUB --------------------------------------
13296 // Floats vector sub
13297 instruct vsub2F(vecD dst, vecD src) %{
13298 predicate(n->as_Vector()->length() == 2);
13299 match(Set dst (SubVF dst src));
13300 format %{ "sub.ps $dst,$src\t! sub packed2F" %}
13301 ins_encode %{
13302 __ sub_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
13303 %}
13304 ins_pipe( fpu_regF_regF );
13305 %}
13307 // --------------------------------- MUL --------------------------------------
13309 // Floats vector mul
13310 instruct vmul2F(vecD dst, vecD src) %{
13311 predicate(n->as_Vector()->length() == 2);
13312 match(Set dst (MulVF dst src));
13313 format %{ "mul.ps $dst, $src\t! mul packed2F" %}
13314 ins_encode %{
13315 __ mul_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
13316 %}
13317 ins_pipe( fpu_regF_regF );
13318 %}
13320 instruct vmul2F3(vecD dst, vecD src1, vecD src2) %{
13321 predicate(n->as_Vector()->length() == 2);
13322 match(Set dst (MulVF src1 src2));
13323 format %{ "mul.ps $dst, $src1, $src2\t! mul packed2F" %}
13324 ins_encode %{
13325 __ mul_ps($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
13326 %}
13327 ins_pipe( fpu_regF_regF );
13328 %}
13330 // --------------------------------- DIV --------------------------------------
13331 // MIPS do not have div.ps
13334 //----------PEEPHOLE RULES-----------------------------------------------------
13335 // These must follow all instruction definitions as they use the names
13336 // defined in the instructions definitions.
13337 //
13338 // peepmatch ( root_instr_name [preceeding_instruction]* );
13339 //
13340 // peepconstraint %{
13341 // (instruction_number.operand_name relational_op instruction_number.operand_name
13342 // [, ...] );
13343 // // instruction numbers are zero-based using left to right order in peepmatch
13344 //
13345 // peepreplace ( instr_name ( [instruction_number.operand_name]* ) );
13346 // // provide an instruction_number.operand_name for each operand that appears
13347 // // in the replacement instruction's match rule
13348 //
13349 // ---------VM FLAGS---------------------------------------------------------
13350 //
13351 // All peephole optimizations can be turned off using -XX:-OptoPeephole
13352 //
13353 // Each peephole rule is given an identifying number starting with zero and
13354 // increasing by one in the order seen by the parser. An individual peephole
13355 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
13356 // on the command-line.
13357 //
13358 // ---------CURRENT LIMITATIONS----------------------------------------------
13359 //
13360 // Only match adjacent instructions in same basic block
13361 // Only equality constraints
13362 // Only constraints between operands, not (0.dest_reg == EAX_enc)
13363 // Only one replacement instruction
13364 //
13365 // ---------EXAMPLE----------------------------------------------------------
13366 //
13367 // // pertinent parts of existing instructions in architecture description
13368 // instruct movI(eRegI dst, eRegI src) %{
13369 // match(Set dst (CopyI src));
13370 // %}
13371 //
13372 // instruct incI_eReg(eRegI dst, immI1 src, eFlagsReg cr) %{
13373 // match(Set dst (AddI dst src));
13374 // effect(KILL cr);
13375 // %}
13376 //
13377 // // Change (inc mov) to lea
13378 // peephole %{
13379 // // increment preceeded by register-register move
13380 // peepmatch ( incI_eReg movI );
13381 // // require that the destination register of the increment
13382 // // match the destination register of the move
13383 // peepconstraint ( 0.dst == 1.dst );
13384 // // construct a replacement instruction that sets
13385 // // the destination to ( move's source register + one )
13386 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13387 // %}
13388 //
13389 // Implementation no longer uses movX instructions since
13390 // machine-independent system no longer uses CopyX nodes.
13391 //
13392 // peephole %{
13393 // peepmatch ( incI_eReg movI );
13394 // peepconstraint ( 0.dst == 1.dst );
13395 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13396 // %}
13397 //
13398 // peephole %{
13399 // peepmatch ( decI_eReg movI );
13400 // peepconstraint ( 0.dst == 1.dst );
13401 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13402 // %}
13403 //
13404 // peephole %{
13405 // peepmatch ( addI_eReg_imm movI );
13406 // peepconstraint ( 0.dst == 1.dst );
13407 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13408 // %}
13409 //
13410 // peephole %{
13411 // peepmatch ( addP_eReg_imm movP );
13412 // peepconstraint ( 0.dst == 1.dst );
13413 // peepreplace ( leaP_eReg_immI( 0.dst 1.src 0.src ) );
13414 // %}
13416 // // Change load of spilled value to only a spill
13417 // instruct storeI(memory mem, eRegI src) %{
13418 // match(Set mem (StoreI mem src));
13419 // %}
13420 //
13421 // instruct loadI(eRegI dst, memory mem) %{
13422 // match(Set dst (LoadI mem));
13423 // %}
13424 //
13425 //peephole %{
13426 // peepmatch ( loadI storeI );
13427 // peepconstraint ( 1.src == 0.dst, 1.mem == 0.mem );
13428 // peepreplace ( storeI( 1.mem 1.mem 1.src ) );
13429 //%}
13431 //----------SMARTSPILL RULES---------------------------------------------------
13432 // These must follow all instruction definitions as they use the names
13433 // defined in the instructions definitions.