Tue, 28 Feb 2017 12:02:36 -0500
[C2] Add storeC0 in mips_64.ad
1 //
2 // Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
3 // Copyright (c) 2015, 2016, Loongson Technology. All rights reserved.
4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 //
6 // This code is free software; you can redistribute it and/or modify it
7 // under the terms of the GNU General Public License version 2 only, as
8 // published by the Free Software Foundation.
9 //
10 // This code is distributed in the hope that it will be useful, but WITHOUT
11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 // version 2 for more details (a copy is included in the LICENSE file that
14 // accompanied this code).
15 //
16 // You should have received a copy of the GNU General Public License version
17 // 2 along with this work; if not, write to the Free Software Foundation,
18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 //
20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 // or visit www.oracle.com if you need additional information or have any
22 // questions.
23 //
24 //
26 // GodSon3 Architecture Description File
28 //----------REGISTER DEFINITION BLOCK------------------------------------------
29 // This information is used by the matcher and the register allocator to
30 // describe individual registers and classes of registers within the target
31 // archtecture.
33 // format:
34 // reg_def name (call convention, c-call convention, ideal type, encoding);
35 // call convention :
36 // NS = No-Save
37 // SOC = Save-On-Call
38 // SOE = Save-On-Entry
39 // AS = Always-Save
40 // ideal type :
41 // see opto/opcodes.hpp for more info
42 // reg_class name (reg, ...);
43 // alloc_class name (reg, ...);
44 register %{
46 // General Registers
47 // Integer Registers
48 reg_def R0 ( NS, NS, Op_RegI, 0, VMRegImpl::Bad());
49 reg_def AT ( NS, NS, Op_RegI, 1, AT->as_VMReg());
50 reg_def AT_H ( NS, NS, Op_RegI, 1, AT->as_VMReg()->next());
51 reg_def V0 (SOC, SOC, Op_RegI, 2, V0->as_VMReg());
52 reg_def V0_H (SOC, SOC, Op_RegI, 2, V0->as_VMReg()->next());
53 reg_def V1 (SOC, SOC, Op_RegI, 3, V1->as_VMReg());
54 reg_def V1_H (SOC, SOC, Op_RegI, 3, V1->as_VMReg()->next());
55 reg_def A0 (SOC, SOC, Op_RegI, 4, A0->as_VMReg());
56 reg_def A0_H (SOC, SOC, Op_RegI, 4, A0->as_VMReg()->next());
57 reg_def A1 (SOC, SOC, Op_RegI, 5, A1->as_VMReg());
58 reg_def A1_H (SOC, SOC, Op_RegI, 5, A1->as_VMReg()->next());
59 reg_def A2 (SOC, SOC, Op_RegI, 6, A2->as_VMReg());
60 reg_def A2_H (SOC, SOC, Op_RegI, 6, A2->as_VMReg()->next());
61 reg_def A3 (SOC, SOC, Op_RegI, 7, A3->as_VMReg());
62 reg_def A3_H (SOC, SOC, Op_RegI, 7, A3->as_VMReg()->next());
63 reg_def A4 (SOC, SOC, Op_RegI, 8, A4->as_VMReg());
64 reg_def A4_H (SOC, SOC, Op_RegI, 8, A4->as_VMReg()->next());
65 reg_def A5 (SOC, SOC, Op_RegI, 9, A5->as_VMReg());
66 reg_def A5_H (SOC, SOC, Op_RegI, 9, A5->as_VMReg()->next());
67 reg_def A6 (SOC, SOC, Op_RegI, 10, A6->as_VMReg());
68 reg_def A6_H (SOC, SOC, Op_RegI, 10, A6->as_VMReg()->next());
69 reg_def A7 (SOC, SOC, Op_RegI, 11, A7->as_VMReg());
70 reg_def A7_H (SOC, SOC, Op_RegI, 11, A7->as_VMReg()->next());
71 reg_def T0 (SOC, SOC, Op_RegI, 12, T0->as_VMReg());
72 reg_def T0_H (SOC, SOC, Op_RegI, 12, T0->as_VMReg()->next());
73 reg_def T1 (SOC, SOC, Op_RegI, 13, T1->as_VMReg());
74 reg_def T1_H (SOC, SOC, Op_RegI, 13, T1->as_VMReg()->next());
75 reg_def T2 (SOC, SOC, Op_RegI, 14, T2->as_VMReg());
76 reg_def T2_H (SOC, SOC, Op_RegI, 14, T2->as_VMReg()->next());
77 reg_def T3 (SOC, SOC, Op_RegI, 15, T3->as_VMReg());
78 reg_def T3_H (SOC, SOC, Op_RegI, 15, T3->as_VMReg()->next());
79 reg_def S0 (SOC, SOE, Op_RegI, 16, S0->as_VMReg());
80 reg_def S0_H (SOC, SOE, Op_RegI, 16, S0->as_VMReg()->next());
81 reg_def S1 (SOC, SOE, Op_RegI, 17, S1->as_VMReg());
82 reg_def S1_H (SOC, SOE, Op_RegI, 17, S1->as_VMReg()->next());
83 reg_def S2 (SOC, SOE, Op_RegI, 18, S2->as_VMReg());
84 reg_def S2_H (SOC, SOE, Op_RegI, 18, S2->as_VMReg()->next());
85 reg_def S3 (SOC, SOE, Op_RegI, 19, S3->as_VMReg());
86 reg_def S3_H (SOC, SOE, Op_RegI, 19, S3->as_VMReg()->next());
87 reg_def S4 (SOC, SOE, Op_RegI, 20, S4->as_VMReg());
88 reg_def S4_H (SOC, SOE, Op_RegI, 20, S4->as_VMReg()->next());
89 reg_def S5 (SOC, SOE, Op_RegI, 21, S5->as_VMReg());
90 reg_def S5_H (SOC, SOE, Op_RegI, 21, S5->as_VMReg()->next());
91 reg_def S6 (SOC, SOE, Op_RegI, 22, S6->as_VMReg());
92 reg_def S6_H (SOC, SOE, Op_RegI, 22, S6->as_VMReg()->next());
93 reg_def S7 (SOC, SOE, Op_RegI, 23, S7->as_VMReg());
94 reg_def S7_H (SOC, SOE, Op_RegI, 23, S7->as_VMReg()->next());
95 reg_def T8 (SOC, SOC, Op_RegI, 24, T8->as_VMReg());
96 reg_def T8_H (SOC, SOC, Op_RegI, 24, T8->as_VMReg()->next());
97 reg_def T9 (SOC, SOC, Op_RegI, 25, T9->as_VMReg());
98 reg_def T9_H (SOC, SOC, Op_RegI, 25, T9->as_VMReg()->next());
100 // Special Registers
101 reg_def K0 ( NS, NS, Op_RegI, 26, K0->as_VMReg());
102 reg_def K1 ( NS, NS, Op_RegI, 27, K1->as_VMReg());
103 reg_def GP ( NS, NS, Op_RegI, 28, GP->as_VMReg());
104 reg_def GP_H ( NS, NS, Op_RegI, 28, GP->as_VMReg()->next());
105 reg_def SP ( NS, NS, Op_RegI, 29, SP->as_VMReg());
106 reg_def SP_H ( NS, NS, Op_RegI, 29, SP->as_VMReg()->next());
107 reg_def FP ( NS, NS, Op_RegI, 30, FP->as_VMReg());
108 reg_def FP_H ( NS, NS, Op_RegI, 30, FP->as_VMReg()->next());
109 reg_def RA ( NS, NS, Op_RegI, 31, RA->as_VMReg());
110 reg_def RA_H ( NS, NS, Op_RegI, 31, RA->as_VMReg()->next());
112 // Floating registers.
113 reg_def F0 ( SOC, SOC, Op_RegF, 0, F0->as_VMReg());
114 reg_def F0_H ( SOC, SOC, Op_RegF, 0, F0->as_VMReg()->next());
115 reg_def F1 ( SOC, SOC, Op_RegF, 1, F1->as_VMReg());
116 reg_def F1_H ( SOC, SOC, Op_RegF, 1, F1->as_VMReg()->next());
117 reg_def F2 ( SOC, SOC, Op_RegF, 2, F2->as_VMReg());
118 reg_def F2_H ( SOC, SOC, Op_RegF, 2, F2->as_VMReg()->next());
119 reg_def F3 ( SOC, SOC, Op_RegF, 3, F3->as_VMReg());
120 reg_def F3_H ( SOC, SOC, Op_RegF, 3, F3->as_VMReg()->next());
121 reg_def F4 ( SOC, SOC, Op_RegF, 4, F4->as_VMReg());
122 reg_def F4_H ( SOC, SOC, Op_RegF, 4, F4->as_VMReg()->next());
123 reg_def F5 ( SOC, SOC, Op_RegF, 5, F5->as_VMReg());
124 reg_def F5_H ( SOC, SOC, Op_RegF, 5, F5->as_VMReg()->next());
125 reg_def F6 ( SOC, SOC, Op_RegF, 6, F6->as_VMReg());
126 reg_def F6_H ( SOC, SOC, Op_RegF, 6, F6->as_VMReg()->next());
127 reg_def F7 ( SOC, SOC, Op_RegF, 7, F7->as_VMReg());
128 reg_def F7_H ( SOC, SOC, Op_RegF, 7, F7->as_VMReg()->next());
129 reg_def F8 ( SOC, SOC, Op_RegF, 8, F8->as_VMReg());
130 reg_def F8_H ( SOC, SOC, Op_RegF, 8, F8->as_VMReg()->next());
131 reg_def F9 ( SOC, SOC, Op_RegF, 9, F9->as_VMReg());
132 reg_def F9_H ( SOC, SOC, Op_RegF, 9, F9->as_VMReg()->next());
133 reg_def F10 ( SOC, SOC, Op_RegF, 10, F10->as_VMReg());
134 reg_def F10_H ( SOC, SOC, Op_RegF, 10, F10->as_VMReg()->next());
135 reg_def F11 ( SOC, SOC, Op_RegF, 11, F11->as_VMReg());
136 reg_def F11_H ( SOC, SOC, Op_RegF, 11, F11->as_VMReg()->next());
137 reg_def F12 ( SOC, SOC, Op_RegF, 12, F12->as_VMReg());
138 reg_def F12_H ( SOC, SOC, Op_RegF, 12, F12->as_VMReg()->next());
139 reg_def F13 ( SOC, SOC, Op_RegF, 13, F13->as_VMReg());
140 reg_def F13_H ( SOC, SOC, Op_RegF, 13, F13->as_VMReg()->next());
141 reg_def F14 ( SOC, SOC, Op_RegF, 14, F14->as_VMReg());
142 reg_def F14_H ( SOC, SOC, Op_RegF, 14, F14->as_VMReg()->next());
143 reg_def F15 ( SOC, SOC, Op_RegF, 15, F15->as_VMReg());
144 reg_def F15_H ( SOC, SOC, Op_RegF, 15, F15->as_VMReg()->next());
145 reg_def F16 ( SOC, SOC, Op_RegF, 16, F16->as_VMReg());
146 reg_def F16_H ( SOC, SOC, Op_RegF, 16, F16->as_VMReg()->next());
147 reg_def F17 ( SOC, SOC, Op_RegF, 17, F17->as_VMReg());
148 reg_def F17_H ( SOC, SOC, Op_RegF, 17, F17->as_VMReg()->next());
149 reg_def F18 ( SOC, SOC, Op_RegF, 18, F18->as_VMReg());
150 reg_def F18_H ( SOC, SOC, Op_RegF, 18, F18->as_VMReg()->next());
151 reg_def F19 ( SOC, SOC, Op_RegF, 19, F19->as_VMReg());
152 reg_def F19_H ( SOC, SOC, Op_RegF, 19, F19->as_VMReg()->next());
153 reg_def F20 ( SOC, SOC, Op_RegF, 20, F20->as_VMReg());
154 reg_def F20_H ( SOC, SOC, Op_RegF, 20, F20->as_VMReg()->next());
155 reg_def F21 ( SOC, SOC, Op_RegF, 21, F21->as_VMReg());
156 reg_def F21_H ( SOC, SOC, Op_RegF, 21, F21->as_VMReg()->next());
157 reg_def F22 ( SOC, SOC, Op_RegF, 22, F22->as_VMReg());
158 reg_def F22_H ( SOC, SOC, Op_RegF, 22, F22->as_VMReg()->next());
159 reg_def F23 ( SOC, SOC, Op_RegF, 23, F23->as_VMReg());
160 reg_def F23_H ( SOC, SOC, Op_RegF, 23, F23->as_VMReg()->next());
161 reg_def F24 ( SOC, SOC, Op_RegF, 24, F24->as_VMReg());
162 reg_def F24_H ( SOC, SOC, Op_RegF, 24, F24->as_VMReg()->next());
163 reg_def F25 ( SOC, SOC, Op_RegF, 25, F25->as_VMReg());
164 reg_def F25_H ( SOC, SOC, Op_RegF, 25, F25->as_VMReg()->next());
165 reg_def F26 ( SOC, SOC, Op_RegF, 26, F26->as_VMReg());
166 reg_def F26_H ( SOC, SOC, Op_RegF, 26, F26->as_VMReg()->next());
167 reg_def F27 ( SOC, SOC, Op_RegF, 27, F27->as_VMReg());
168 reg_def F27_H ( SOC, SOC, Op_RegF, 27, F27->as_VMReg()->next());
169 reg_def F28 ( SOC, SOC, Op_RegF, 28, F28->as_VMReg());
170 reg_def F28_H ( SOC, SOC, Op_RegF, 28, F28->as_VMReg()->next());
171 reg_def F29 ( SOC, SOC, Op_RegF, 29, F29->as_VMReg());
172 reg_def F29_H ( SOC, SOC, Op_RegF, 29, F29->as_VMReg()->next());
173 reg_def F30 ( SOC, SOC, Op_RegF, 30, F30->as_VMReg());
174 reg_def F30_H ( SOC, SOC, Op_RegF, 30, F30->as_VMReg()->next());
175 reg_def F31 ( SOC, SOC, Op_RegF, 31, F31->as_VMReg());
176 reg_def F31_H ( SOC, SOC, Op_RegF, 31, F31->as_VMReg()->next());
179 // ----------------------------
180 // Special Registers
181 // Condition Codes Flag Registers
182 reg_def MIPS_FLAG (SOC, SOC, Op_RegFlags, 1, as_Register(1)->as_VMReg());
183 //S6 is used for get_thread(S6)
184 //S5 is uesd for heapbase of compressed oop
185 alloc_class chunk0(
186 S7, S7_H,
187 S0, S0_H,
188 S1, S1_H,
189 S2, S2_H,
190 S4, S4_H,
191 S5, S5_H,
192 S6, S6_H,
193 S3, S3_H,
194 T2, T2_H,
195 T3, T3_H,
196 T8, T8_H,
197 T9, T9_H,
198 T1, T1_H, // inline_cache_reg
199 V1, V1_H,
200 A7, A7_H,
201 A6, A6_H,
202 A5, A5_H,
203 A4, A4_H,
204 V0, V0_H,
205 A3, A3_H,
206 A2, A2_H,
207 A1, A1_H,
208 A0, A0_H,
209 T0, T0_H,
210 GP, GP_H
211 RA, RA_H,
212 SP, SP_H, // stack_pointer
213 FP, FP_H // frame_pointer
214 );
216 alloc_class chunk1( F0, F0_H,
217 F1, F1_H,
218 F2, F2_H,
219 F3, F3_H,
220 F4, F4_H,
221 F5, F5_H,
222 F6, F6_H,
223 F7, F7_H,
224 F8, F8_H,
225 F9, F9_H,
226 F10, F10_H,
227 F11, F11_H,
228 F20, F20_H,
229 F21, F21_H,
230 F22, F22_H,
231 F23, F23_H,
232 F24, F24_H,
233 F25, F25_H,
234 F26, F26_H,
235 F27, F27_H,
236 F28, F28_H,
237 F19, F19_H,
238 F18, F18_H,
239 F17, F17_H,
240 F16, F16_H,
241 F15, F15_H,
242 F14, F14_H,
243 F13, F13_H,
244 F12, F12_H,
245 F29, F29_H,
246 F30, F30_H,
247 F31, F31_H);
249 alloc_class chunk2(MIPS_FLAG);
251 reg_class s_reg( S0, S1, S2, S3, S4, S5, S6, S7 );
252 reg_class s0_reg( S0 );
253 reg_class s1_reg( S1 );
254 reg_class s2_reg( S2 );
255 reg_class s3_reg( S3 );
256 reg_class s4_reg( S4 );
257 reg_class s5_reg( S5 );
258 reg_class s6_reg( S6 );
259 reg_class s7_reg( S7 );
261 reg_class t_reg( T0, T1, T2, T3, T8, T9 );
262 reg_class t0_reg( T0 );
263 reg_class t1_reg( T1 );
264 reg_class t2_reg( T2 );
265 reg_class t3_reg( T3 );
266 reg_class t8_reg( T8 );
267 reg_class t9_reg( T9 );
269 reg_class a_reg( A0, A1, A2, A3, A4, A5, A6, A7 );
270 reg_class a0_reg( A0 );
271 reg_class a1_reg( A1 );
272 reg_class a2_reg( A2 );
273 reg_class a3_reg( A3 );
274 reg_class a4_reg( A4 );
275 reg_class a5_reg( A5 );
276 reg_class a6_reg( A6 );
277 reg_class a7_reg( A7 );
279 reg_class v0_reg( V0 );
280 reg_class v1_reg( V1 );
282 reg_class sp_reg( SP, SP_H );
283 reg_class fp_reg( FP, FP_H );
285 reg_class mips_flags(MIPS_FLAG);
287 reg_class v0_long_reg( V0, V0_H );
288 reg_class v1_long_reg( V1, V1_H );
289 reg_class a0_long_reg( A0, A0_H );
290 reg_class a1_long_reg( A1, A1_H );
291 reg_class a2_long_reg( A2, A2_H );
292 reg_class a3_long_reg( A3, A3_H );
293 reg_class a4_long_reg( A4, A4_H );
294 reg_class a5_long_reg( A5, A5_H );
295 reg_class a6_long_reg( A6, A6_H );
296 reg_class a7_long_reg( A7, A7_H );
297 reg_class t0_long_reg( T0, T0_H );
298 reg_class t1_long_reg( T1, T1_H );
299 reg_class t2_long_reg( T2, T2_H );
300 reg_class t3_long_reg( T3, T3_H );
301 reg_class t8_long_reg( T8, T8_H );
302 reg_class t9_long_reg( T9, T9_H );
303 reg_class s0_long_reg( S0, S0_H );
304 reg_class s1_long_reg( S1, S1_H );
305 reg_class s2_long_reg( S2, S2_H );
306 reg_class s3_long_reg( S3, S3_H );
307 reg_class s4_long_reg( S4, S4_H );
308 reg_class s5_long_reg( S5, S5_H );
309 reg_class s6_long_reg( S6, S6_H );
310 reg_class s7_long_reg( S7, S7_H );
312 reg_class int_reg( S7, S0, S1, S2, S4, S3, T8, T2, T3, T1, V1, A7, A6, A5, A4, V0, A3, A2, A1, A0, T0 );
314 reg_class no_Ax_int_reg( S7, S0, S1, S2, S4, S3, T8, T2, T3, T1, V1, V0, T0 );
316 reg_class p_reg(
317 S7, S7_H,
318 S0, S0_H,
319 S1, S1_H,
320 S2, S2_H,
321 S4, S4_H,
322 S3, S3_H,
323 T8, T8_H,
324 T2, T2_H,
325 T3, T3_H,
326 T1, T1_H,
327 A7, A7_H,
328 A6, A6_H,
329 A5, A5_H,
330 A4, A4_H,
331 A3, A3_H,
332 A2, A2_H,
333 A1, A1_H,
334 A0, A0_H,
335 T0, T0_H
336 );
338 reg_class no_T8_p_reg(
339 S7, S7_H,
340 S0, S0_H,
341 S1, S1_H,
342 S2, S2_H,
343 S4, S4_H,
344 S3, S3_H,
345 T2, T2_H,
346 T3, T3_H,
347 T1, T1_H,
348 A7, A7_H,
349 A6, A6_H,
350 A5, A5_H,
351 A4, A4_H,
352 A3, A3_H,
353 A2, A2_H,
354 A1, A1_H,
355 A0, A0_H,
356 T0, T0_H
357 );
359 reg_class long_reg(
360 S7, S7_H,
361 S0, S0_H,
362 S1, S1_H,
363 S2, S2_H,
364 S4, S4_H,
365 S3, S3_H,
366 T8, T8_H,
367 T2, T2_H,
368 T3, T3_H,
369 T1, T1_H,
370 A7, A7_H,
371 A6, A6_H,
372 A5, A5_H,
373 A4, A4_H,
374 A3, A3_H,
375 A2, A2_H,
376 A1, A1_H,
377 A0, A0_H,
378 T0, T0_H
379 );
382 // Floating point registers.
383 // 2012/8/23 Fu: F30/F31 are used as temporary registers in D2I
384 // 2016/12/1 aoqi: F31 are not used as temporary registers in D2I
385 reg_class flt_reg( F0, F1, F2, F3, F4, F5, F6, F7, F8, F9, F10, F11, F12, F13, F14, F15, F16, F17 F18, F19, F20, F21, F22, F23, F24, F25, F26, F27, F28, F29, F31);
386 reg_class dbl_reg( F0, F0_H,
387 F1, F1_H,
388 F2, F2_H,
389 F3, F3_H,
390 F4, F4_H,
391 F5, F5_H,
392 F6, F6_H,
393 F7, F7_H,
394 F8, F8_H,
395 F9, F9_H,
396 F10, F10_H,
397 F11, F11_H,
398 F12, F12_H,
399 F13, F13_H,
400 F14, F14_H,
401 F15, F15_H,
402 F16, F16_H,
403 F17, F17_H,
404 F18, F18_H,
405 F19, F19_H,
406 F20, F20_H,
407 F21, F21_H,
408 F22, F22_H,
409 F23, F23_H,
410 F24, F24_H,
411 F25, F25_H,
412 F26, F26_H,
413 F27, F27_H,
414 F28, F28_H,
415 F29, F29_H,
416 F31, F31_H);
418 reg_class flt_arg0( F12 );
419 reg_class dbl_arg0( F12, F12_H );
420 reg_class dbl_arg1( F14, F14_H );
422 %}
424 //----------DEFINITION BLOCK---------------------------------------------------
425 // Define name --> value mappings to inform the ADLC of an integer valued name
426 // Current support includes integer values in the range [0, 0x7FFFFFFF]
427 // Format:
428 // int_def <name> ( <int_value>, <expression>);
429 // Generated Code in ad_<arch>.hpp
430 // #define <name> (<expression>)
431 // // value == <int_value>
432 // Generated code in ad_<arch>.cpp adlc_verification()
433 // assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
434 //
435 definitions %{
436 int_def DEFAULT_COST ( 100, 100);
437 int_def HUGE_COST (1000000, 1000000);
439 // Memory refs are twice as expensive as run-of-the-mill.
440 int_def MEMORY_REF_COST ( 200, DEFAULT_COST * 2);
442 // Branches are even more expensive.
443 int_def BRANCH_COST ( 300, DEFAULT_COST * 3);
444 // we use jr instruction to construct call, so more expensive
445 // by yjl 2/28/2006
446 int_def CALL_COST ( 500, DEFAULT_COST * 5);
447 /*
448 int_def EQUAL ( 1, 1 );
449 int_def NOT_EQUAL ( 2, 2 );
450 int_def GREATER ( 3, 3 );
451 int_def GREATER_EQUAL ( 4, 4 );
452 int_def LESS ( 5, 5 );
453 int_def LESS_EQUAL ( 6, 6 );
454 */
455 %}
459 //----------SOURCE BLOCK-------------------------------------------------------
460 // This is a block of C++ code which provides values, functions, and
461 // definitions necessary in the rest of the architecture description
463 source_hpp %{
464 // Header information of the source block.
465 // Method declarations/definitions which are used outside
466 // the ad-scope can conveniently be defined here.
467 //
468 // To keep related declarations/definitions/uses close together,
469 // we switch between source %{ }% and source_hpp %{ }% freely as needed.
471 class CallStubImpl {
473 //--------------------------------------------------------------
474 //---< Used for optimization in Compile::shorten_branches >---
475 //--------------------------------------------------------------
477 public:
478 // Size of call trampoline stub.
479 static uint size_call_trampoline() {
480 return 0; // no call trampolines on this platform
481 }
483 // number of relocations needed by a call trampoline stub
484 static uint reloc_call_trampoline() {
485 return 0; // no call trampolines on this platform
486 }
487 };
489 class HandlerImpl {
491 public:
493 static int emit_exception_handler(CodeBuffer &cbuf);
494 static int emit_deopt_handler(CodeBuffer& cbuf);
496 static uint size_exception_handler() {
497 // NativeCall instruction size is the same as NativeJump.
498 // exception handler starts out as jump and can be patched to
499 // a call be deoptimization. (4932387)
500 // Note that this value is also credited (in output.cpp) to
501 // the size of the code section.
502 // return NativeJump::instruction_size;
503 int size = NativeCall::instruction_size;
504 return round_to(size, 16);
505 }
507 #ifdef _LP64
508 static uint size_deopt_handler() {
509 int size = NativeCall::instruction_size;
510 return round_to(size, 16);
511 }
512 #else
513 static uint size_deopt_handler() {
514 // NativeCall instruction size is the same as NativeJump.
515 // exception handler starts out as jump and can be patched to
516 // a call be deoptimization. (4932387)
517 // Note that this value is also credited (in output.cpp) to
518 // the size of the code section.
519 return 5 + NativeJump::instruction_size; // pushl(); jmp;
520 }
521 #endif
522 };
524 %} // end source_hpp
526 source %{
528 #define NO_INDEX 0
529 #define RELOC_IMM64 Assembler::imm_operand
530 #define RELOC_DISP32 Assembler::disp32_operand
533 #define __ _masm.
536 // Emit exception handler code.
537 // Stuff framesize into a register and call a VM stub routine.
538 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf) {
539 /*
540 // Note that the code buffer's insts_mark is always relative to insts.
541 // That's why we must use the macroassembler to generate a handler.
542 MacroAssembler _masm(&cbuf);
543 address base = __ start_a_stub(size_exception_handler());
544 if (base == NULL) return 0; // CodeBuffer::expand failed
545 int offset = __ offset();
546 __ jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
547 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
548 __ end_a_stub();
549 return offset;
550 */
551 // Note that the code buffer's insts_mark is always relative to insts.
552 // That's why we must use the macroassembler to generate a handler.
553 MacroAssembler _masm(&cbuf);
554 address base =
555 __ start_a_stub(size_exception_handler());
556 if (base == NULL) return 0; // CodeBuffer::expand failed
557 int offset = __ offset();
559 __ block_comment("; emit_exception_handler");
561 /* 2012/9/25 FIXME Jin: According to X86, we should use direct jumpt.
562 * * However, this will trigger an assert after the 40th method:
563 * *
564 * * 39 b java.lang.Throwable::<init> (25 bytes)
565 * * --- ns java.lang.Throwable::fillInStackTrace
566 * * 40 !b java.net.URLClassLoader::findClass (29 bytes)
567 * * /vm/opto/runtime.cpp, 900 , assert(caller.is_compiled_frame(),"must be")
568 * * 40 made not entrant (2) java.net.URLClassLoader::findClass (29 bytes)
569 * *
570 * * If we change from JR to JALR, the assert will disappear, but WebClient will
571 * * fail after the 403th method with unknown reason.
572 * */
573 __ li48(T9, (long)OptoRuntime::exception_blob()->entry_point());
574 __ jr(T9);
575 __ delayed()->nop();
576 __ align(16);
577 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
578 __ end_a_stub();
579 return offset;
580 }
582 // Emit deopt handler code.
583 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) {
584 /*
585 // Note that the code buffer's insts_mark is always relative to insts.
586 // That's why we must use the macroassembler to generate a handler.
587 MacroAssembler _masm(&cbuf);
588 address base = __ start_a_stub(size_deopt_handler());
589 if (base == NULL) return 0; // CodeBuffer::expand failed
590 int offset = __ offset();
592 #ifdef _LP64
593 address the_pc = (address) __ pc();
594 Label next;
595 // push a "the_pc" on the stack without destroying any registers
596 // as they all may be live.
598 // push address of "next"
599 __ call(next, relocInfo::none); // reloc none is fine since it is a disp32
600 __ bind(next);
601 // adjust it so it matches "the_pc"
602 __ subptr(Address(rsp, 0), __ offset() - offset);
603 #else
604 InternalAddress here(__ pc());
605 __ pushptr(here.addr());
606 #endif
608 __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
609 assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
610 __ end_a_stub();
611 return offset;
612 */
613 // Note that the code buffer's insts_mark is always relative to insts.
614 // That's why we must use the macroassembler to generate a handler.
615 MacroAssembler _masm(&cbuf);
616 address base =
617 __ start_a_stub(size_deopt_handler());
619 // FIXME
620 if (base == NULL) return 0; // CodeBuffer::expand failed
621 int offset = __ offset();
623 __ block_comment("; emit_deopt_handler");
625 cbuf.set_insts_mark();
626 __ relocate(relocInfo::runtime_call_type);
628 __ li48(T9, (long)SharedRuntime::deopt_blob()->unpack());
629 __ jalr(T9);
630 __ delayed()->nop();
631 __ align(16);
632 assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
633 __ end_a_stub();
634 return offset;
635 }
638 const bool Matcher::match_rule_supported(int opcode) {
639 if (!has_match_rule(opcode))
640 return false;
642 switch (opcode) {
643 //Op_CountLeadingZerosI Op_CountLeadingZerosL can be deleted, all MIPS CPUs support clz & dclz.
644 case Op_CountLeadingZerosI:
645 case Op_CountLeadingZerosL:
646 if (!UseCountLeadingZerosInstruction)
647 return false;
648 break;
649 case Op_CountTrailingZerosI:
650 case Op_CountTrailingZerosL:
651 if (!UseCountTrailingZerosInstruction)
652 return false;
653 break;
654 }
656 return true; // Per default match rules are supported.
657 }
659 //FIXME
660 // emit call stub, compiled java to interpreter
661 void emit_java_to_interp(CodeBuffer &cbuf ) {
662 // Stub is fixed up when the corresponding call is converted from calling
663 // compiled code to calling interpreted code.
664 // mov rbx,0
665 // jmp -1
667 address mark = cbuf.insts_mark(); // get mark within main instrs section
669 // Note that the code buffer's insts_mark is always relative to insts.
670 // That's why we must use the macroassembler to generate a stub.
671 MacroAssembler _masm(&cbuf);
673 address base =
674 __ start_a_stub(Compile::MAX_stubs_size);
675 if (base == NULL) return; // CodeBuffer::expand failed
676 // static stub relocation stores the instruction address of the call
678 __ relocate(static_stub_Relocation::spec(mark), 0);
680 /* 2012/10/29 Jin: Rmethod contains methodOop, it should be relocated for GC */
681 /*
682 int oop_index = __ oop_recorder()->allocate_index(NULL);
683 RelocationHolder rspec = oop_Relocation::spec(oop_index);
684 __ relocate(rspec);
685 */
687 // static stub relocation also tags the methodOop in the code-stream.
688 __ li48(S3, (long)0);
689 // This is recognized as unresolved by relocs/nativeInst/ic code
691 __ relocate(relocInfo::runtime_call_type);
693 cbuf.set_insts_mark();
694 address call_pc = (address)-1;
695 __ li48(AT, (long)call_pc);
696 __ jr(AT);
697 __ nop();
698 __ align(16);
699 __ end_a_stub();
700 // Update current stubs pointer and restore code_end.
701 }
703 // size of call stub, compiled java to interpretor
704 uint size_java_to_interp() {
705 int size = 4 * 4 + NativeCall::instruction_size; // sizeof(li48) + NativeCall::instruction_size
706 return round_to(size, 16);
707 }
709 // relocation entries for call stub, compiled java to interpreter
710 uint reloc_java_to_interp() {
711 return 16; // in emit_java_to_interp + in Java_Static_Call
712 }
714 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
715 if( Assembler::is_simm16(offset) ) return true;
716 else
717 {
718 assert(false, "Not implemented yet !" );
719 Unimplemented();
720 }
721 }
724 // No additional cost for CMOVL.
725 const int Matcher::long_cmove_cost() { return 0; }
727 // No CMOVF/CMOVD with SSE2
728 const int Matcher::float_cmove_cost() { return ConditionalMoveLimit; }
730 // Does the CPU require late expand (see block.cpp for description of late expand)?
731 const bool Matcher::require_postalloc_expand = false;
733 // Should the Matcher clone shifts on addressing modes, expecting them
734 // to be subsumed into complex addressing expressions or compute them
735 // into registers? True for Intel but false for most RISCs
736 const bool Matcher::clone_shift_expressions = false;
738 // Do we need to mask the count passed to shift instructions or does
739 // the cpu only look at the lower 5/6 bits anyway?
740 const bool Matcher::need_masked_shift_count = false;
742 bool Matcher::narrow_oop_use_complex_address() {
743 NOT_LP64(ShouldNotCallThis());
744 assert(UseCompressedOops, "only for compressed oops code");
745 return false;
746 }
748 bool Matcher::narrow_klass_use_complex_address() {
749 NOT_LP64(ShouldNotCallThis());
750 assert(UseCompressedClassPointers, "only for compressed klass code");
751 return false;
752 }
754 // This is UltraSparc specific, true just means we have fast l2f conversion
755 const bool Matcher::convL2FSupported(void) {
756 return true;
757 }
759 // Max vector size in bytes. 0 if not supported.
760 const int Matcher::vector_width_in_bytes(BasicType bt) {
761 assert(MaxVectorSize == 8, "");
762 return 8;
763 }
765 // Vector ideal reg
766 const int Matcher::vector_ideal_reg(int size) {
767 assert(MaxVectorSize == 8, "");
768 switch(size) {
769 case 8: return Op_VecD;
770 }
771 ShouldNotReachHere();
772 return 0;
773 }
775 // Only lowest bits of xmm reg are used for vector shift count.
776 const int Matcher::vector_shift_count_ideal_reg(int size) {
777 fatal("vector shift is not supported");
778 return Node::NotAMachineReg;
779 }
781 // Limits on vector size (number of elements) loaded into vector.
782 const int Matcher::max_vector_size(const BasicType bt) {
783 assert(is_java_primitive(bt), "only primitive type vectors");
784 return vector_width_in_bytes(bt)/type2aelembytes(bt);
785 }
787 const int Matcher::min_vector_size(const BasicType bt) {
788 return max_vector_size(bt); // Same as max.
789 }
791 // MIPS supports misaligned vectors store/load? FIXME
792 const bool Matcher::misaligned_vectors_ok() {
793 return false;
794 //return !AlignVector; // can be changed by flag
795 }
797 // Register for DIVI projection of divmodI
798 RegMask Matcher::divI_proj_mask() {
799 ShouldNotReachHere();
800 return RegMask();
801 }
803 // Register for MODI projection of divmodI
804 RegMask Matcher::modI_proj_mask() {
805 ShouldNotReachHere();
806 return RegMask();
807 }
809 // Register for DIVL projection of divmodL
810 RegMask Matcher::divL_proj_mask() {
811 ShouldNotReachHere();
812 return RegMask();
813 }
815 int Matcher::regnum_to_fpu_offset(int regnum) {
816 return regnum - 32; // The FP registers are in the second chunk
817 }
820 const bool Matcher::isSimpleConstant64(jlong value) {
821 // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
822 return true;
823 }
826 // Return whether or not this register is ever used as an argument. This
827 // function is used on startup to build the trampoline stubs in generateOptoStub.
828 // Registers not mentioned will be killed by the VM call in the trampoline, and
829 // arguments in those registers not be available to the callee.
830 bool Matcher::can_be_java_arg( int reg ) {
831 /* Refer to: [sharedRuntime_mips_64.cpp] SharedRuntime::java_calling_convention() */
832 if ( reg == T0_num || reg == T0_H_num
833 || reg == A0_num || reg == A0_H_num
834 || reg == A1_num || reg == A1_H_num
835 || reg == A2_num || reg == A2_H_num
836 || reg == A3_num || reg == A3_H_num
837 || reg == A4_num || reg == A4_H_num
838 || reg == A5_num || reg == A5_H_num
839 || reg == A6_num || reg == A6_H_num
840 || reg == A7_num || reg == A7_H_num )
841 return true;
843 if ( reg == F12_num || reg == F12_H_num
844 || reg == F13_num || reg == F13_H_num
845 || reg == F14_num || reg == F14_H_num
846 || reg == F15_num || reg == F15_H_num
847 || reg == F16_num || reg == F16_H_num
848 || reg == F17_num || reg == F17_H_num
849 || reg == F18_num || reg == F18_H_num
850 || reg == F19_num || reg == F19_H_num )
851 return true;
853 return false;
854 }
856 bool Matcher::is_spillable_arg( int reg ) {
857 return can_be_java_arg(reg);
858 }
860 bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) {
861 return false;
862 }
864 // Register for MODL projection of divmodL
865 RegMask Matcher::modL_proj_mask() {
866 ShouldNotReachHere();
867 return RegMask();
868 }
870 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
871 return FP_REG_mask();
872 }
874 // MIPS doesn't support AES intrinsics
875 const bool Matcher::pass_original_key_for_aes() {
876 return false;
877 }
879 // The address of the call instruction needs to be 16-byte aligned to
880 // ensure that it does not span a cache line so that it can be patched.
882 int CallStaticJavaDirectNode::compute_padding(int current_offset) const {
883 //lui
884 //ori
885 //dsll
886 //ori
888 //jalr
889 //nop
891 return round_to(current_offset, alignment_required()) - current_offset;
892 }
894 // The address of the call instruction needs to be 16-byte aligned to
895 // ensure that it does not span a cache line so that it can be patched.
896 int CallDynamicJavaDirectNode::compute_padding(int current_offset) const {
897 //li64 <--- skip
899 //lui
900 //ori
901 //dsll
902 //ori
904 //jalr
905 //nop
907 current_offset += 4 * 6; // skip li64
908 return round_to(current_offset, alignment_required()) - current_offset;
909 }
911 int CallLeafNoFPDirectNode::compute_padding(int current_offset) const {
912 //lui
913 //ori
914 //dsll
915 //ori
917 //jalr
918 //nop
920 return round_to(current_offset, alignment_required()) - current_offset;
921 }
923 int CallLeafDirectNode::compute_padding(int current_offset) const {
924 //lui
925 //ori
926 //dsll
927 //ori
929 //jalr
930 //nop
932 return round_to(current_offset, alignment_required()) - current_offset;
933 }
935 int CallRuntimeDirectNode::compute_padding(int current_offset) const {
936 //lui
937 //ori
938 //dsll
939 //ori
941 //jalr
942 //nop
944 return round_to(current_offset, alignment_required()) - current_offset;
945 }
947 // If CPU can load and store mis-aligned doubles directly then no fixup is
948 // needed. Else we split the double into 2 integer pieces and move it
949 // piece-by-piece. Only happens when passing doubles into C code as the
950 // Java calling convention forces doubles to be aligned.
951 const bool Matcher::misaligned_doubles_ok = false;
952 // Do floats take an entire double register or just half?
953 //const bool Matcher::float_in_double = true;
954 bool Matcher::float_in_double() { return false; }
955 // Threshold size for cleararray.
956 const int Matcher::init_array_short_size = 8 * BytesPerLong;
957 // Do ints take an entire long register or just half?
958 const bool Matcher::int_in_long = true;
959 // Is it better to copy float constants, or load them directly from memory?
960 // Intel can load a float constant from a direct address, requiring no
961 // extra registers. Most RISCs will have to materialize an address into a
962 // register first, so they would do better to copy the constant from stack.
963 const bool Matcher::rematerialize_float_constants = false;
964 // Advertise here if the CPU requires explicit rounding operations
965 // to implement the UseStrictFP mode.
966 const bool Matcher::strict_fp_requires_explicit_rounding = false;
967 // The ecx parameter to rep stos for the ClearArray node is in dwords.
968 const bool Matcher::init_array_count_is_in_bytes = false;
971 // Indicate if the safepoint node needs the polling page as an input.
972 // Since MIPS doesn't have absolute addressing, it needs.
973 bool SafePointNode::needs_polling_address_input() {
974 return true;
975 }
977 // !!!!! Special hack to get all type of calls to specify the byte offset
978 // from the start of the call to the point where the return address
979 // will point.
980 int MachCallStaticJavaNode::ret_addr_offset() {
981 assert(NativeCall::instruction_size == 24, "in MachCallStaticJavaNode::ret_addr_offset");
982 //The value ought to be 16 bytes.
983 //lui
984 //ori
985 //dsll
986 //ori
987 //jalr
988 //nop
989 return NativeCall::instruction_size;
990 }
992 int MachCallDynamicJavaNode::ret_addr_offset() {
993 /* 2012/9/10 Jin: must be kept in sync with Java_Dynamic_Call */
995 // return NativeCall::instruction_size;
996 assert(NativeCall::instruction_size == 24, "in MachCallDynamicJavaNode::ret_addr_offset");
997 //The value ought to be 4 + 16 bytes.
998 //lui IC_Klass,
999 //ori IC_Klass,
1000 //dsll IC_Klass
1001 //ori IC_Klass
1002 //lui T9
1003 //ori T9
1004 //dsll T9
1005 //ori T9
1006 //jalr T9
1007 //nop
1008 return 6 * 4 + NativeCall::instruction_size;
1010 }
1012 /*
1013 // EMIT_OPCODE()
1014 void emit_opcode(CodeBuffer &cbuf, int code) {
1015 *(cbuf.code_end()) = (unsigned char)code;
1016 cbuf.set_code_end(cbuf.code_end() + 1);
1017 }
1018 */
1020 void emit_d32_reloc(CodeBuffer &cbuf, int d32, relocInfo::relocType reloc,
1021 int format) {
1022 cbuf.relocate(cbuf.insts_mark(), reloc, format);
1023 cbuf.insts()->emit_int32(d32);
1024 }
1026 //=============================================================================
1028 // Figure out which register class each belongs in: rc_int, rc_float, rc_stack
1029 enum RC { rc_bad, rc_int, rc_float, rc_stack };
1030 static enum RC rc_class( OptoReg::Name reg ) {
1031 if( !OptoReg::is_valid(reg) ) return rc_bad;
1032 if (OptoReg::is_stack(reg)) return rc_stack;
1033 VMReg r = OptoReg::as_VMReg(reg);
1034 if (r->is_Register()) return rc_int;
1035 assert(r->is_FloatRegister(), "must be");
1036 return rc_float;
1037 }
1039 uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream* st ) const {
1040 // Get registers to move
1041 OptoReg::Name src_second = ra_->get_reg_second(in(1));
1042 OptoReg::Name src_first = ra_->get_reg_first(in(1));
1043 OptoReg::Name dst_second = ra_->get_reg_second(this );
1044 OptoReg::Name dst_first = ra_->get_reg_first(this );
1046 enum RC src_second_rc = rc_class(src_second);
1047 enum RC src_first_rc = rc_class(src_first);
1048 enum RC dst_second_rc = rc_class(dst_second);
1049 enum RC dst_first_rc = rc_class(dst_first);
1051 assert(OptoReg::is_valid(src_first) && OptoReg::is_valid(dst_first), "must move at least 1 register" );
1053 // Generate spill code!
1054 int size = 0;
1056 if( src_first == dst_first && src_second == dst_second )
1057 return 0; // Self copy, no move
1059 if (src_first_rc == rc_stack) {
1060 // mem ->
1061 if (dst_first_rc == rc_stack) {
1062 // mem -> mem
1063 assert(src_second != dst_first, "overlap");
1064 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1065 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1066 // 64-bit
1067 int src_offset = ra_->reg2offset(src_first);
1068 int dst_offset = ra_->reg2offset(dst_first);
1069 if (cbuf) {
1070 MacroAssembler _masm(cbuf);
1071 __ ld(AT, Address(SP, src_offset));
1072 __ sd(AT, Address(SP, dst_offset));
1073 #ifndef PRODUCT
1074 } else {
1075 if(!do_size){
1076 if (size != 0) st->print("\n\t");
1077 st->print("ld AT, [SP + #%d]\t# 64-bit mem-mem spill 1\n\t"
1078 "sd AT, [SP + #%d]",
1079 src_offset, dst_offset);
1080 }
1081 #endif
1082 }
1083 size += 8;
1084 } else {
1085 // 32-bit
1086 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1087 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1088 // No pushl/popl, so:
1089 int src_offset = ra_->reg2offset(src_first);
1090 int dst_offset = ra_->reg2offset(dst_first);
1091 if (cbuf) {
1092 MacroAssembler _masm(cbuf);
1093 __ lw(AT, Address(SP, src_offset));
1094 __ sw(AT, Address(SP, dst_offset));
1095 #ifndef PRODUCT
1096 } else {
1097 if(!do_size){
1098 if (size != 0) st->print("\n\t");
1099 st->print("lw AT, [SP + #%d] spill 2\n\t"
1100 "sw AT, [SP + #%d]\n\t",
1101 src_offset, dst_offset);
1102 }
1103 #endif
1104 }
1105 size += 8;
1106 }
1107 return size;
1108 } else if (dst_first_rc == rc_int) {
1109 // mem -> gpr
1110 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1111 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1112 // 64-bit
1113 int offset = ra_->reg2offset(src_first);
1114 if (cbuf) {
1115 MacroAssembler _masm(cbuf);
1116 __ ld(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1117 #ifndef PRODUCT
1118 } else {
1119 if(!do_size){
1120 if (size != 0) st->print("\n\t");
1121 st->print("ld %s, [SP + #%d]\t# spill 3",
1122 Matcher::regName[dst_first],
1123 offset);
1124 }
1125 #endif
1126 }
1127 size += 4;
1128 } else {
1129 // 32-bit
1130 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1131 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1132 int offset = ra_->reg2offset(src_first);
1133 if (cbuf) {
1134 MacroAssembler _masm(cbuf);
1135 if (this->ideal_reg() == Op_RegI)
1136 __ lw(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1137 else
1138 __ lwu(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1139 #ifndef PRODUCT
1140 } else {
1141 if(!do_size){
1142 if (size != 0) st->print("\n\t");
1143 if (this->ideal_reg() == Op_RegI)
1144 st->print("lw %s, [SP + #%d]\t# spill 4",
1145 Matcher::regName[dst_first],
1146 offset);
1147 else
1148 st->print("lwu %s, [SP + #%d]\t# spill 5",
1149 Matcher::regName[dst_first],
1150 offset);
1151 }
1152 #endif
1153 }
1154 size += 4;
1155 }
1156 return size;
1157 } else if (dst_first_rc == rc_float) {
1158 // mem-> xmm
1159 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1160 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1161 // 64-bit
1162 int offset = ra_->reg2offset(src_first);
1163 if (cbuf) {
1164 MacroAssembler _masm(cbuf);
1165 __ ldc1( as_FloatRegister(Matcher::_regEncode[dst_first]), Address(SP, offset));
1166 #ifndef PRODUCT
1167 } else {
1168 if(!do_size){
1169 if (size != 0) st->print("\n\t");
1170 st->print("ldc1 %s, [SP + #%d]\t# spill 6",
1171 Matcher::regName[dst_first],
1172 offset);
1173 }
1174 #endif
1175 }
1176 size += 4;
1177 } else {
1178 // 32-bit
1179 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1180 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1181 int offset = ra_->reg2offset(src_first);
1182 if (cbuf) {
1183 MacroAssembler _masm(cbuf);
1184 __ lwc1( as_FloatRegister(Matcher::_regEncode[dst_first]), Address(SP, offset));
1185 #ifndef PRODUCT
1186 } else {
1187 if(!do_size){
1188 if (size != 0) st->print("\n\t");
1189 st->print("lwc1 %s, [SP + #%d]\t# spill 7",
1190 Matcher::regName[dst_first],
1191 offset);
1192 }
1193 #endif
1194 }
1195 size += 4;
1196 }
1197 return size;
1198 }
1199 } else if (src_first_rc == rc_int) {
1200 // gpr ->
1201 if (dst_first_rc == rc_stack) {
1202 // gpr -> mem
1203 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1204 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1205 // 64-bit
1206 int offset = ra_->reg2offset(dst_first);
1207 if (cbuf) {
1208 MacroAssembler _masm(cbuf);
1209 __ sd(as_Register(Matcher::_regEncode[src_first]), Address(SP, offset));
1210 #ifndef PRODUCT
1211 } else {
1212 if(!do_size){
1213 if (size != 0) st->print("\n\t");
1214 st->print("sd %s, [SP + #%d] # spill 8",
1215 Matcher::regName[src_first],
1216 offset);
1217 }
1218 #endif
1219 }
1220 size += 4;
1221 } else {
1222 // 32-bit
1223 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1224 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1225 int offset = ra_->reg2offset(dst_first);
1226 if (cbuf) {
1227 MacroAssembler _masm(cbuf);
1228 __ sw(as_Register(Matcher::_regEncode[src_first]), Address(SP, offset));
1229 #ifndef PRODUCT
1230 } else {
1231 if(!do_size){
1232 if (size != 0) st->print("\n\t");
1233 st->print("sw %s, [SP + #%d]\t# spill 9",
1234 Matcher::regName[src_first], offset);
1235 }
1236 #endif
1237 }
1238 size += 4;
1239 }
1240 return size;
1241 } else if (dst_first_rc == rc_int) {
1242 // gpr -> gpr
1243 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1244 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1245 // 64-bit
1246 if (cbuf) {
1247 MacroAssembler _masm(cbuf);
1248 __ move(as_Register(Matcher::_regEncode[dst_first]),
1249 as_Register(Matcher::_regEncode[src_first]));
1250 #ifndef PRODUCT
1251 } else {
1252 if(!do_size){
1253 if (size != 0) st->print("\n\t");
1254 st->print("move(64bit) %s <-- %s\t# spill 10",
1255 Matcher::regName[dst_first],
1256 Matcher::regName[src_first]);
1257 }
1258 #endif
1259 }
1260 size += 4;
1261 return size;
1262 } else {
1263 // 32-bit
1264 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1265 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1266 if (cbuf) {
1267 MacroAssembler _masm(cbuf);
1268 if (this->ideal_reg() == Op_RegI)
1269 __ move_u32(as_Register(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first]));
1270 else
1271 __ daddu(as_Register(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first]), R0);
1273 #ifndef PRODUCT
1274 } else {
1275 if(!do_size){
1276 if (size != 0) st->print("\n\t");
1277 st->print("move(32-bit) %s <-- %s\t# spill 11",
1278 Matcher::regName[dst_first],
1279 Matcher::regName[src_first]);
1280 }
1281 #endif
1282 }
1283 size += 4;
1284 return size;
1285 }
1286 } else if (dst_first_rc == rc_float) {
1287 // gpr -> xmm
1288 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1289 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1290 // 64-bit
1291 if (cbuf) {
1292 MacroAssembler _masm(cbuf);
1293 __ dmtc1(as_Register(Matcher::_regEncode[src_first]), as_FloatRegister(Matcher::_regEncode[dst_first]));
1294 #ifndef PRODUCT
1295 } else {
1296 if(!do_size){
1297 if (size != 0) st->print("\n\t");
1298 st->print("dmtc1 %s, %s\t# spill 12",
1299 Matcher::regName[dst_first],
1300 Matcher::regName[src_first]);
1301 }
1302 #endif
1303 }
1304 size += 4;
1305 } else {
1306 // 32-bit
1307 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1308 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1309 if (cbuf) {
1310 MacroAssembler _masm(cbuf);
1311 __ mtc1( as_Register(Matcher::_regEncode[src_first]), as_FloatRegister(Matcher::_regEncode[dst_first]) );
1312 #ifndef PRODUCT
1313 } else {
1314 if(!do_size){
1315 if (size != 0) st->print("\n\t");
1316 st->print("mtc1 %s, %s\t# spill 13",
1317 Matcher::regName[dst_first],
1318 Matcher::regName[src_first]);
1319 }
1320 #endif
1321 }
1322 size += 4;
1323 }
1324 return size;
1325 }
1326 } else if (src_first_rc == rc_float) {
1327 // xmm ->
1328 if (dst_first_rc == rc_stack) {
1329 // xmm -> mem
1330 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1331 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1332 // 64-bit
1333 int offset = ra_->reg2offset(dst_first);
1334 if (cbuf) {
1335 MacroAssembler _masm(cbuf);
1336 __ sdc1( as_FloatRegister(Matcher::_regEncode[src_first]), Address(SP, offset) );
1337 #ifndef PRODUCT
1338 } else {
1339 if(!do_size){
1340 if (size != 0) st->print("\n\t");
1341 st->print("sdc1 %s, [SP + #%d]\t# spill 14",
1342 Matcher::regName[src_first],
1343 offset);
1344 }
1345 #endif
1346 }
1347 size += 4;
1348 } else {
1349 // 32-bit
1350 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1351 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1352 int offset = ra_->reg2offset(dst_first);
1353 if (cbuf) {
1354 MacroAssembler _masm(cbuf);
1355 __ swc1(as_FloatRegister(Matcher::_regEncode[src_first]), Address(SP, offset));
1356 #ifndef PRODUCT
1357 } else {
1358 if(!do_size){
1359 if (size != 0) st->print("\n\t");
1360 st->print("swc1 %s, [SP + #%d]\t# spill 15",
1361 Matcher::regName[src_first],
1362 offset);
1363 }
1364 #endif
1365 }
1366 size += 4;
1367 }
1368 return size;
1369 } else if (dst_first_rc == rc_int) {
1370 // xmm -> gpr
1371 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1372 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1373 // 64-bit
1374 if (cbuf) {
1375 MacroAssembler _masm(cbuf);
1376 __ dmfc1( as_Register(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1377 #ifndef PRODUCT
1378 } else {
1379 if(!do_size){
1380 if (size != 0) st->print("\n\t");
1381 st->print("dmfc1 %s, %s\t# spill 16",
1382 Matcher::regName[dst_first],
1383 Matcher::regName[src_first]);
1384 }
1385 #endif
1386 }
1387 size += 4;
1388 } else {
1389 // 32-bit
1390 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1391 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1392 if (cbuf) {
1393 MacroAssembler _masm(cbuf);
1394 __ mfc1( as_Register(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1395 #ifndef PRODUCT
1396 } else {
1397 if(!do_size){
1398 if (size != 0) st->print("\n\t");
1399 st->print("mfc1 %s, %s\t# spill 17",
1400 Matcher::regName[dst_first],
1401 Matcher::regName[src_first]);
1402 }
1403 #endif
1404 }
1405 size += 4;
1406 }
1407 return size;
1408 } else if (dst_first_rc == rc_float) {
1409 // xmm -> xmm
1410 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1411 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1412 // 64-bit
1413 if (cbuf) {
1414 MacroAssembler _masm(cbuf);
1415 __ mov_d( as_FloatRegister(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1416 #ifndef PRODUCT
1417 } else {
1418 if(!do_size){
1419 if (size != 0) st->print("\n\t");
1420 st->print("mov_d %s <-- %s\t# spill 18",
1421 Matcher::regName[dst_first],
1422 Matcher::regName[src_first]);
1423 }
1424 #endif
1425 }
1426 size += 4;
1427 } else {
1428 // 32-bit
1429 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1430 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1431 if (cbuf) {
1432 MacroAssembler _masm(cbuf);
1433 __ mov_s( as_FloatRegister(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1434 #ifndef PRODUCT
1435 } else {
1436 if(!do_size){
1437 if (size != 0) st->print("\n\t");
1438 st->print("mov_s %s <-- %s\t# spill 19",
1439 Matcher::regName[dst_first],
1440 Matcher::regName[src_first]);
1441 }
1442 #endif
1443 }
1444 size += 4;
1445 }
1446 return size;
1447 }
1448 }
1450 assert(0," foo ");
1451 Unimplemented();
1452 return size;
1454 }
1456 #ifndef PRODUCT
1457 void MachSpillCopyNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1458 implementation( NULL, ra_, false, st );
1459 }
1460 #endif
1462 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1463 implementation( &cbuf, ra_, false, NULL );
1464 }
1466 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
1467 return implementation( NULL, ra_, true, NULL );
1468 }
1470 //=============================================================================
1471 #
1473 #ifndef PRODUCT
1474 void MachBreakpointNode::format( PhaseRegAlloc *, outputStream* st ) const {
1475 st->print("INT3");
1476 }
1477 #endif
1479 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc* ra_) const {
1480 MacroAssembler _masm(&cbuf);
1481 __ int3();
1482 }
1484 uint MachBreakpointNode::size(PhaseRegAlloc* ra_) const {
1485 return MachNode::size(ra_);
1486 }
1489 //=============================================================================
1490 #ifndef PRODUCT
1491 void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1492 Compile *C = ra_->C;
1493 int framesize = C->frame_size_in_bytes();
1495 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1497 st->print("daddiu SP, SP, %d # Rlease stack @ MachEpilogNode",framesize);
1498 st->cr(); st->print("\t");
1499 if (UseLoongsonISA) {
1500 st->print("gslq RA, FP, SP, %d # Restore FP & RA @ MachEpilogNode", -wordSize*2);
1501 } else {
1502 st->print("ld RA, SP, %d # Restore RA @ MachEpilogNode", -wordSize);
1503 st->cr(); st->print("\t");
1504 st->print("ld FP, SP, %d # Restore FP @ MachEpilogNode", -wordSize*2);
1505 }
1507 if( do_polling() && C->is_method_compilation() ) {
1508 st->print("Poll Safepoint # MachEpilogNode");
1509 }
1510 }
1511 #endif
1513 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1514 Compile *C = ra_->C;
1515 MacroAssembler _masm(&cbuf);
1516 int framesize = C->frame_size_in_bytes();
1518 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1520 __ daddiu(SP, SP, framesize);
1522 if (UseLoongsonISA) {
1523 __ gslq(RA, FP, SP, -wordSize*2);
1524 } else {
1525 __ ld(RA, SP, -wordSize );
1526 __ ld(FP, SP, -wordSize*2 );
1527 }
1529 /* 2012/11/19 Jin: The epilog in a RuntimeStub should not contain a safepoint */
1530 if( do_polling() && C->is_method_compilation() ) {
1531 #ifndef OPT_SAFEPOINT
1532 __ set64(AT, (long)os::get_polling_page());
1533 __ relocate(relocInfo::poll_return_type);
1534 __ lw(AT, AT, 0);
1535 #else
1536 __ lui(AT, Assembler::split_high((intptr_t)os::get_polling_page()));
1537 __ relocate(relocInfo::poll_return_type);
1538 __ lw(AT, AT, Assembler::split_low((intptr_t)os::get_polling_page()));
1539 #endif
1540 }
1541 }
1543 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
1544 return MachNode::size(ra_); // too many variables; just compute it the hard way fujie debug
1545 }
1547 int MachEpilogNode::reloc() const {
1548 return 0; // a large enough number
1549 }
1551 const Pipeline * MachEpilogNode::pipeline() const {
1552 return MachNode::pipeline_class();
1553 }
1555 int MachEpilogNode::safepoint_offset() const { return 0; }
1557 //=============================================================================
1559 #ifndef PRODUCT
1560 void BoxLockNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1561 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1562 int reg = ra_->get_reg_first(this);
1563 st->print("ADDI %s, SP, %d @BoxLockNode",Matcher::regName[reg],offset);
1564 }
1565 #endif
1568 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
1569 return 4;
1570 }
1572 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1573 MacroAssembler _masm(&cbuf);
1574 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1575 int reg = ra_->get_encode(this);
1577 __ addi(as_Register(reg), SP, offset);
1578 /*
1579 if( offset >= 128 ) {
1580 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
1581 emit_rm(cbuf, 0x2, reg, 0x04);
1582 emit_rm(cbuf, 0x0, 0x04, SP_enc);
1583 emit_d32(cbuf, offset);
1584 }
1585 else {
1586 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
1587 emit_rm(cbuf, 0x1, reg, 0x04);
1588 emit_rm(cbuf, 0x0, 0x04, SP_enc);
1589 emit_d8(cbuf, offset);
1590 }
1591 */
1592 }
1595 //static int sizeof_FFree_Float_Stack_All = -1;
1597 int MachCallRuntimeNode::ret_addr_offset() {
1598 //lui
1599 //ori
1600 //dsll
1601 //ori
1602 //jalr
1603 //nop
1604 assert(NativeCall::instruction_size == 24, "in MachCallRuntimeNode::ret_addr_offset()");
1605 return NativeCall::instruction_size;
1606 // return 16;
1607 }
1613 //=============================================================================
1614 #ifndef PRODUCT
1615 void MachNopNode::format( PhaseRegAlloc *, outputStream* st ) const {
1616 st->print("NOP \t# %d bytes pad for loops and calls", 4 * _count);
1617 }
1618 #endif
1620 void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc * ) const {
1621 MacroAssembler _masm(&cbuf);
1622 int i = 0;
1623 for(i = 0; i < _count; i++)
1624 __ nop();
1625 }
1627 uint MachNopNode::size(PhaseRegAlloc *) const {
1628 return 4 * _count;
1629 }
1630 const Pipeline* MachNopNode::pipeline() const {
1631 return MachNode::pipeline_class();
1632 }
1634 //=============================================================================
1636 //=============================================================================
1637 #ifndef PRODUCT
1638 void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1639 st->print_cr("load_klass(AT, T0)");
1640 st->print_cr("\tbeq(AT, iCache, L)");
1641 st->print_cr("\tnop");
1642 st->print_cr("\tjmp(SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type)");
1643 st->print_cr("\tnop");
1644 st->print_cr("\tnop");
1645 st->print_cr(" L:");
1646 }
1647 #endif
1650 void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1651 MacroAssembler _masm(&cbuf);
1652 #ifdef ASSERT
1653 //uint code_size = cbuf.code_size();
1654 #endif
1655 int ic_reg = Matcher::inline_cache_reg_encode();
1656 Label L;
1657 Register receiver = T0;
1658 Register iCache = as_Register(ic_reg);
1659 __ load_klass(AT, receiver);
1660 __ beq(AT, iCache, L);
1661 __ nop();
1663 __ relocate(relocInfo::runtime_call_type);
1664 __ li48(T9, (long)SharedRuntime::get_ic_miss_stub());
1665 __ jr(T9);
1666 __ nop();
1668 /* WARNING these NOPs are critical so that verified entry point is properly
1669 * 8 bytes aligned for patching by NativeJump::patch_verified_entry() */
1670 __ align(CodeEntryAlignment);
1671 __ bind(L);
1672 }
1674 uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
1675 return MachNode::size(ra_);
1676 }
1680 //=============================================================================
1682 const RegMask& MachConstantBaseNode::_out_RegMask = P_REG_mask();
1684 int Compile::ConstantTable::calculate_table_base_offset() const {
1685 return 0; // absolute addressing, no offset
1686 }
1688 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
1689 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
1690 ShouldNotReachHere();
1691 }
1693 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
1694 Compile* C = ra_->C;
1695 Compile::ConstantTable& constant_table = C->constant_table();
1696 MacroAssembler _masm(&cbuf);
1698 Register Rtoc = as_Register(ra_->get_encode(this));
1699 CodeSection* consts_section = __ code()->consts();
1700 int consts_size = consts_section->align_at_start(consts_section->size());
1701 assert(constant_table.size() == consts_size, "must be equal");
1703 if (consts_section->size()) {
1704 // Materialize the constant table base.
1705 address baseaddr = consts_section->start() + -(constant_table.table_base_offset());
1706 // RelocationHolder rspec = internal_word_Relocation::spec(baseaddr);
1707 __ relocate(relocInfo::internal_pc_type);
1708 __ li48(Rtoc, (long)baseaddr);
1709 }
1710 }
1712 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
1713 // li48 (4 insts)
1714 return 4 * 4;
1715 }
1717 #ifndef PRODUCT
1718 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
1719 Register r = as_Register(ra_->get_encode(this));
1720 st->print("li48 %s, &constanttable (constant table base) @ MachConstantBaseNode", r->name());
1721 }
1722 #endif
1725 //=============================================================================
1726 #ifndef PRODUCT
1727 void MachPrologNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1728 Compile* C = ra_->C;
1730 int framesize = C->frame_size_in_bytes();
1731 int bangsize = C->bang_size_in_bytes();
1732 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1734 // Calls to C2R adapters often do not accept exceptional returns.
1735 // We require that their callers must bang for them. But be careful, because
1736 // some VM calls (such as call site linkage) can use several kilobytes of
1737 // stack. But the stack safety zone should account for that.
1738 // See bugs 4446381, 4468289, 4497237.
1739 if (C->need_stack_bang(bangsize)) {
1740 st->print_cr("# stack bang"); st->print("\t");
1741 }
1742 if (UseLoongsonISA) {
1743 st->print("gssq RA, FP, %d(SP) @ MachPrologNode\n\t", -wordSize*2);
1744 } else {
1745 st->print("sd RA, %d(SP) @ MachPrologNode\n\t", -wordSize);
1746 st->print("sd FP, %d(SP) @ MachPrologNode\n\t", -wordSize*2);
1747 }
1748 st->print("daddiu FP, SP, -%d \n\t", wordSize*2);
1749 st->print("daddiu SP, SP, -%d \t",framesize);
1750 }
1751 #endif
1754 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1755 Compile* C = ra_->C;
1756 MacroAssembler _masm(&cbuf);
1758 int framesize = C->frame_size_in_bytes();
1759 int bangsize = C->bang_size_in_bytes();
1761 // __ verified_entry(framesize, C->need_stack_bang(bangsize)?bangsize:0, false);
1763 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1765 if (C->need_stack_bang(framesize)) {
1766 __ generate_stack_overflow_check(framesize);
1767 }
1769 if (UseLoongsonISA) {
1770 __ gssq(RA, FP, SP, -wordSize*2);
1771 } else {
1772 __ sd(RA, SP, -wordSize);
1773 __ sd(FP, SP, -wordSize*2);
1774 }
1775 __ daddiu(FP, SP, -wordSize*2);
1776 __ daddiu(SP, SP, -framesize);
1777 __ nop(); /* 2013.10.22 Jin: Make enough room for patch_verified_entry() */
1778 __ nop();
1780 C->set_frame_complete(cbuf.insts_size());
1781 if (C->has_mach_constant_base_node()) {
1782 // NOTE: We set the table base offset here because users might be
1783 // emitted before MachConstantBaseNode.
1784 Compile::ConstantTable& constant_table = C->constant_table();
1785 constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
1786 }
1788 }
1791 uint MachPrologNode::size(PhaseRegAlloc *ra_) const {
1792 //fprintf(stderr, "\nPrologNode::size(ra_)= %d \n", MachNode::size(ra_));//fujie debug
1793 return MachNode::size(ra_); // too many variables; just compute it the hard way
1794 }
1796 int MachPrologNode::reloc() const {
1797 return 0; // a large enough number
1798 }
1800 %}
1802 //----------ENCODING BLOCK-----------------------------------------------------
1803 // This block specifies the encoding classes used by the compiler to output
1804 // byte streams. Encoding classes generate functions which are called by
1805 // Machine Instruction Nodes in order to generate the bit encoding of the
1806 // instruction. Operands specify their base encoding interface with the
1807 // interface keyword. There are currently supported four interfaces,
1808 // REG_INTER, CONST_INTER, MEMORY_INTER, & COND_INTER. REG_INTER causes an
1809 // operand to generate a function which returns its register number when
1810 // queried. CONST_INTER causes an operand to generate a function which
1811 // returns the value of the constant when queried. MEMORY_INTER causes an
1812 // operand to generate four functions which return the Base Register, the
1813 // Index Register, the Scale Value, and the Offset Value of the operand when
1814 // queried. COND_INTER causes an operand to generate six functions which
1815 // return the encoding code (ie - encoding bits for the instruction)
1816 // associated with each basic boolean condition for a conditional instruction.
1817 // Instructions specify two basic values for encoding. They use the
1818 // ins_encode keyword to specify their encoding class (which must be one of
1819 // the class names specified in the encoding block), and they use the
1820 // opcode keyword to specify, in order, their primary, secondary, and
1821 // tertiary opcode. Only the opcode sections which a particular instruction
1822 // needs for encoding need to be specified.
1823 encode %{
1824 /*
1825 Alias:
1826 1044 b java.io.ObjectInputStream::readHandle (130 bytes)
1827 118 B14: # B19 B15 <- B13 Freq: 0.899955
1828 118 add S1, S2, V0 #@addP_reg_reg
1829 11c lb S0, [S1 + #-8257524] #@loadB
1830 120 BReq S0, #3, B19 #@branchConI_reg_imm P=0.100000 C=-1.000000
1831 */
1832 //Load byte signed
1833 enc_class load_B_enc (mRegI dst, memory mem) %{
1834 MacroAssembler _masm(&cbuf);
1835 int dst = $dst$$reg;
1836 int base = $mem$$base;
1837 int index = $mem$$index;
1838 int scale = $mem$$scale;
1839 int disp = $mem$$disp;
1841 if( index != 0 ) {
1842 if( Assembler::is_simm16(disp) ) {
1843 if( UseLoongsonISA ) {
1844 if (scale == 0) {
1845 __ gslbx(as_Register(dst), as_Register(base), as_Register(index), disp);
1846 } else {
1847 __ dsll(AT, as_Register(index), scale);
1848 __ gslbx(as_Register(dst), as_Register(base), AT, disp);
1849 }
1850 } else {
1851 if (scale == 0) {
1852 __ addu(AT, as_Register(base), as_Register(index));
1853 } else {
1854 __ dsll(AT, as_Register(index), scale);
1855 __ addu(AT, as_Register(base), AT);
1856 }
1857 __ lb(as_Register(dst), AT, disp);
1858 }
1859 } else {
1860 if (scale == 0) {
1861 __ addu(AT, as_Register(base), as_Register(index));
1862 } else {
1863 __ dsll(AT, as_Register(index), scale);
1864 __ addu(AT, as_Register(base), AT);
1865 }
1866 __ move(T9, disp);
1867 if( UseLoongsonISA ) {
1868 __ gslbx(as_Register(dst), AT, T9, 0);
1869 } else {
1870 __ addu(AT, AT, T9);
1871 __ lb(as_Register(dst), AT, 0);
1872 }
1873 }
1874 } else {
1875 if( Assembler::is_simm16(disp) ) {
1876 __ lb(as_Register(dst), as_Register(base), disp);
1877 } else {
1878 __ move(T9, disp);
1879 if( UseLoongsonISA ) {
1880 __ gslbx(as_Register(dst), as_Register(base), T9, 0);
1881 } else {
1882 __ addu(AT, as_Register(base), T9);
1883 __ lb(as_Register(dst), AT, 0);
1884 }
1885 }
1886 }
1887 %}
1889 //Load byte unsigned
1890 enc_class load_UB_enc (mRegI dst, memory mem) %{
1891 MacroAssembler _masm(&cbuf);
1892 int dst = $dst$$reg;
1893 int base = $mem$$base;
1894 int index = $mem$$index;
1895 int scale = $mem$$scale;
1896 int disp = $mem$$disp;
1898 if( index != 0 ) {
1899 if (scale == 0) {
1900 __ daddu(AT, as_Register(base), as_Register(index));
1901 } else {
1902 __ dsll(AT, as_Register(index), scale);
1903 __ daddu(AT, as_Register(base), AT);
1904 }
1905 if( Assembler::is_simm16(disp) ) {
1906 __ lbu(as_Register(dst), AT, disp);
1907 } else {
1908 __ move(T9, disp);
1909 __ daddu(AT, AT, T9);
1910 __ lbu(as_Register(dst), AT, 0);
1911 }
1912 } else {
1913 if( Assembler::is_simm16(disp) ) {
1914 __ lbu(as_Register(dst), as_Register(base), disp);
1915 } else {
1916 __ move(T9, disp);
1917 __ daddu(AT, as_Register(base), T9);
1918 __ lbu(as_Register(dst), AT, 0);
1919 }
1920 }
1921 %}
1923 enc_class store_B_reg_enc (memory mem, mRegI src) %{
1924 MacroAssembler _masm(&cbuf);
1925 int src = $src$$reg;
1926 int base = $mem$$base;
1927 int index = $mem$$index;
1928 int scale = $mem$$scale;
1929 int disp = $mem$$disp;
1931 if( index != 0 ) {
1932 if (scale == 0) {
1933 if( Assembler::is_simm(disp, 8) ) {
1934 if (UseLoongsonISA) {
1935 __ gssbx(as_Register(src), as_Register(base), as_Register(index), disp);
1936 } else {
1937 __ addu(AT, as_Register(base), as_Register(index));
1938 __ sb(as_Register(src), AT, disp);
1939 }
1940 } else if( Assembler::is_simm16(disp) ) {
1941 __ addu(AT, as_Register(base), as_Register(index));
1942 __ sb(as_Register(src), AT, disp);
1943 } else {
1944 __ addu(AT, as_Register(base), as_Register(index));
1945 __ move(T9, disp);
1946 if (UseLoongsonISA) {
1947 __ gssbx(as_Register(src), AT, T9, 0);
1948 } else {
1949 __ addu(AT, AT, T9);
1950 __ sb(as_Register(src), AT, 0);
1951 }
1952 }
1953 } else {
1954 __ dsll(AT, as_Register(index), scale);
1955 if( Assembler::is_simm(disp, 8) ) {
1956 if (UseLoongsonISA) {
1957 __ gssbx(as_Register(src), AT, as_Register(base), disp);
1958 } else {
1959 __ addu(AT, as_Register(base), AT);
1960 __ sb(as_Register(src), AT, disp);
1961 }
1962 } else if( Assembler::is_simm16(disp) ) {
1963 __ addu(AT, as_Register(base), AT);
1964 __ sb(as_Register(src), AT, disp);
1965 } else {
1966 __ addu(AT, as_Register(base), AT);
1967 __ move(T9, disp);
1968 if (UseLoongsonISA) {
1969 __ gssbx(as_Register(src), AT, T9, 0);
1970 } else {
1971 __ addu(AT, AT, T9);
1972 __ sb(as_Register(src), AT, 0);
1973 }
1974 }
1975 }
1976 } else {
1977 if( Assembler::is_simm16(disp) ) {
1978 __ sb(as_Register(src), as_Register(base), disp);
1979 } else {
1980 __ move(T9, disp);
1981 if (UseLoongsonISA) {
1982 __ gssbx(as_Register(src), as_Register(base), T9, 0);
1983 } else {
1984 __ addu(AT, as_Register(base), T9);
1985 __ sb(as_Register(src), AT, 0);
1986 }
1987 }
1988 }
1989 %}
1991 enc_class store_B_immI_enc (memory mem, immI8 src) %{
1992 MacroAssembler _masm(&cbuf);
1993 int base = $mem$$base;
1994 int index = $mem$$index;
1995 int scale = $mem$$scale;
1996 int disp = $mem$$disp;
1997 int value = $src$$constant;
1999 if( index != 0 ) {
2000 if (!UseLoongsonISA) {
2001 if (scale == 0) {
2002 __ daddu(AT, as_Register(base), as_Register(index));
2003 } else {
2004 __ dsll(AT, as_Register(index), scale);
2005 __ daddu(AT, as_Register(base), AT);
2006 }
2007 if( Assembler::is_simm16(disp) ) {
2008 if (value == 0) {
2009 __ sb(R0, AT, disp);
2010 } else {
2011 __ move(T9, value);
2012 __ sb(T9, AT, disp);
2013 }
2014 } else {
2015 if (value == 0) {
2016 __ move(T9, disp);
2017 __ daddu(AT, AT, T9);
2018 __ sb(R0, AT, 0);
2019 } else {
2020 __ move(T9, disp);
2021 __ daddu(AT, AT, T9);
2022 __ move(T9, value);
2023 __ sb(T9, AT, 0);
2024 }
2025 }
2026 } else {
2028 if (scale == 0) {
2029 if( Assembler::is_simm(disp, 8) ) {
2030 if (value == 0) {
2031 __ gssbx(R0, as_Register(base), as_Register(index), disp);
2032 } else {
2033 __ move(T9, value);
2034 __ gssbx(T9, as_Register(base), as_Register(index), disp);
2035 }
2036 } else if( Assembler::is_simm16(disp) ) {
2037 __ daddu(AT, as_Register(base), as_Register(index));
2038 if (value == 0) {
2039 __ sb(R0, AT, disp);
2040 } else {
2041 __ move(T9, value);
2042 __ sb(T9, AT, disp);
2043 }
2044 } else {
2045 if (value == 0) {
2046 __ daddu(AT, as_Register(base), as_Register(index));
2047 __ move(T9, disp);
2048 __ gssbx(R0, AT, T9, 0);
2049 } else {
2050 __ move(AT, disp);
2051 __ move(T9, value);
2052 __ daddu(AT, as_Register(base), AT);
2053 __ gssbx(T9, AT, as_Register(index), 0);
2054 }
2055 }
2057 } else {
2059 if( Assembler::is_simm(disp, 8) ) {
2060 __ dsll(AT, as_Register(index), scale);
2061 if (value == 0) {
2062 __ gssbx(R0, as_Register(base), AT, disp);
2063 } else {
2064 __ move(T9, value);
2065 __ gssbx(T9, as_Register(base), AT, disp);
2066 }
2067 } else if( Assembler::is_simm16(disp) ) {
2068 __ dsll(AT, as_Register(index), scale);
2069 __ daddu(AT, as_Register(base), AT);
2070 if (value == 0) {
2071 __ sb(R0, AT, disp);
2072 } else {
2073 __ move(T9, value);
2074 __ sb(T9, AT, disp);
2075 }
2076 } else {
2077 __ dsll(AT, as_Register(index), scale);
2078 if (value == 0) {
2079 __ daddu(AT, as_Register(base), AT);
2080 __ move(T9, disp);
2081 __ gssbx(R0, AT, T9, 0);
2082 } else {
2083 __ move(T9, disp);
2084 __ daddu(AT, AT, T9);
2085 __ move(T9, value);
2086 __ gssbx(T9, as_Register(base), AT, 0);
2087 }
2088 }
2089 }
2090 }
2091 } else {
2092 if( Assembler::is_simm16(disp) ) {
2093 if (value == 0) {
2094 __ sb(R0, as_Register(base), disp);
2095 } else {
2096 __ move(AT, value);
2097 __ sb(AT, as_Register(base), disp);
2098 }
2099 } else {
2100 if (value == 0) {
2101 __ move(T9, disp);
2102 if (UseLoongsonISA) {
2103 __ gssbx(R0, as_Register(base), T9, 0);
2104 } else {
2105 __ daddu(AT, as_Register(base), T9);
2106 __ sb(R0, AT, 0);
2107 }
2108 } else {
2109 __ move(T9, disp);
2110 if (UseLoongsonISA) {
2111 __ move(AT, value);
2112 __ gssbx(AT, as_Register(base), T9, 0);
2113 } else {
2114 __ daddu(AT, as_Register(base), T9);
2115 __ move(T9, value);
2116 __ sb(T9, AT, 0);
2117 }
2118 }
2119 }
2120 }
2121 %}
2124 enc_class store_B_immI_enc_sync (memory mem, immI8 src) %{
2125 MacroAssembler _masm(&cbuf);
2126 int base = $mem$$base;
2127 int index = $mem$$index;
2128 int scale = $mem$$scale;
2129 int disp = $mem$$disp;
2130 int value = $src$$constant;
2132 if( index != 0 ) {
2133 if (scale == 0) {
2134 __ daddu(AT, as_Register(base), as_Register(index));
2135 } else {
2136 __ dsll(AT, as_Register(index), scale);
2137 __ daddu(AT, as_Register(base), AT);
2138 }
2139 if( Assembler::is_simm16(disp) ) {
2140 if (value == 0) {
2141 __ sb(R0, AT, disp);
2142 } else {
2143 __ move(T9, value);
2144 __ sb(T9, AT, disp);
2145 }
2146 } else {
2147 if (value == 0) {
2148 __ move(T9, disp);
2149 __ daddu(AT, AT, T9);
2150 __ sb(R0, AT, 0);
2151 } else {
2152 __ move(T9, disp);
2153 __ daddu(AT, AT, T9);
2154 __ move(T9, value);
2155 __ sb(T9, AT, 0);
2156 }
2157 }
2158 } else {
2159 if( Assembler::is_simm16(disp) ) {
2160 if (value == 0) {
2161 __ sb(R0, as_Register(base), disp);
2162 } else {
2163 __ move(AT, value);
2164 __ sb(AT, as_Register(base), disp);
2165 }
2166 } else {
2167 if (value == 0) {
2168 __ move(T9, disp);
2169 __ daddu(AT, as_Register(base), T9);
2170 __ sb(R0, AT, 0);
2171 } else {
2172 __ move(T9, disp);
2173 __ daddu(AT, as_Register(base), T9);
2174 __ move(T9, value);
2175 __ sb(T9, AT, 0);
2176 }
2177 }
2178 }
2180 __ sync();
2181 %}
2183 // Load Short (16bit signed)
2184 enc_class load_S_enc (mRegI dst, memory mem) %{
2185 MacroAssembler _masm(&cbuf);
2186 int dst = $dst$$reg;
2187 int base = $mem$$base;
2188 int index = $mem$$index;
2189 int scale = $mem$$scale;
2190 int disp = $mem$$disp;
2192 if( index != 0 ) {
2193 if (scale == 0) {
2194 __ daddu(AT, as_Register(base), as_Register(index));
2195 } else {
2196 __ dsll(AT, as_Register(index), scale);
2197 __ daddu(AT, as_Register(base), AT);
2198 }
2199 if( Assembler::is_simm16(disp) ) {
2200 __ lh(as_Register(dst), AT, disp);
2201 } else {
2202 __ move(T9, disp);
2203 __ addu(AT, AT, T9);
2204 __ lh(as_Register(dst), AT, 0);
2205 }
2206 } else {
2207 if( Assembler::is_simm16(disp) ) {
2208 __ lh(as_Register(dst), as_Register(base), disp);
2209 } else {
2210 __ move(T9, disp);
2211 __ addu(AT, as_Register(base), T9);
2212 __ lh(as_Register(dst), AT, 0);
2213 }
2214 }
2215 %}
2217 // Load Char (16bit unsigned)
2218 enc_class load_C_enc (mRegI dst, memory mem) %{
2219 MacroAssembler _masm(&cbuf);
2220 int dst = $dst$$reg;
2221 int base = $mem$$base;
2222 int index = $mem$$index;
2223 int scale = $mem$$scale;
2224 int disp = $mem$$disp;
2226 if( index != 0 ) {
2227 if (scale == 0) {
2228 __ daddu(AT, as_Register(base), as_Register(index));
2229 } else {
2230 __ dsll(AT, as_Register(index), scale);
2231 __ daddu(AT, as_Register(base), AT);
2232 }
2233 if( Assembler::is_simm16(disp) ) {
2234 __ lhu(as_Register(dst), AT, disp);
2235 } else {
2236 __ move(T9, disp);
2237 __ addu(AT, AT, T9);
2238 __ lhu(as_Register(dst), AT, 0);
2239 }
2240 } else {
2241 if( Assembler::is_simm16(disp) ) {
2242 __ lhu(as_Register(dst), as_Register(base), disp);
2243 } else {
2244 __ move(T9, disp);
2245 __ daddu(AT, as_Register(base), T9);
2246 __ lhu(as_Register(dst), AT, 0);
2247 }
2248 }
2249 %}
2251 // Store Char (16bit unsigned)
2252 enc_class store_C_reg_enc (memory mem, mRegI src) %{
2253 MacroAssembler _masm(&cbuf);
2254 int src = $src$$reg;
2255 int base = $mem$$base;
2256 int index = $mem$$index;
2257 int scale = $mem$$scale;
2258 int disp = $mem$$disp;
2260 if( index != 0 ) {
2261 if( Assembler::is_simm16(disp) ) {
2262 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2263 if (scale == 0) {
2264 __ gsshx(as_Register(src), as_Register(base), as_Register(index), disp);
2265 } else {
2266 __ dsll(AT, as_Register(index), scale);
2267 __ gsshx(as_Register(src), as_Register(base), AT, disp);
2268 }
2269 } else {
2270 if (scale == 0) {
2271 __ addu(AT, as_Register(base), as_Register(index));
2272 } else {
2273 __ dsll(AT, as_Register(index), scale);
2274 __ addu(AT, as_Register(base), AT);
2275 }
2276 __ sh(as_Register(src), AT, disp);
2277 }
2278 } else {
2279 if (scale == 0) {
2280 __ addu(AT, as_Register(base), as_Register(index));
2281 } else {
2282 __ dsll(AT, as_Register(index), scale);
2283 __ addu(AT, as_Register(base), AT);
2284 }
2285 __ move(T9, disp);
2286 if( UseLoongsonISA ) {
2287 __ gsshx(as_Register(src), AT, T9, 0);
2288 } else {
2289 __ addu(AT, AT, T9);
2290 __ sh(as_Register(src), AT, 0);
2291 }
2292 }
2293 } else {
2294 if( Assembler::is_simm16(disp) ) {
2295 __ sh(as_Register(src), as_Register(base), disp);
2296 } else {
2297 __ move(T9, disp);
2298 if( UseLoongsonISA ) {
2299 __ gsshx(as_Register(src), as_Register(base), T9, 0);
2300 } else {
2301 __ addu(AT, as_Register(base), T9);
2302 __ sh(as_Register(src), AT, 0);
2303 }
2304 }
2305 }
2306 %}
2308 enc_class store_C0_enc (memory mem) %{
2309 MacroAssembler _masm(&cbuf);
2310 int base = $mem$$base;
2311 int index = $mem$$index;
2312 int scale = $mem$$scale;
2313 int disp = $mem$$disp;
2315 if( index != 0 ) {
2316 if( Assembler::is_simm16(disp) ) {
2317 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2318 if (scale == 0) {
2319 __ gsshx(R0, as_Register(base), as_Register(index), disp);
2320 } else {
2321 __ dsll(AT, as_Register(index), scale);
2322 __ gsshx(R0, as_Register(base), AT, disp);
2323 }
2324 } else {
2325 if (scale == 0) {
2326 __ addu(AT, as_Register(base), as_Register(index));
2327 } else {
2328 __ dsll(AT, as_Register(index), scale);
2329 __ addu(AT, as_Register(base), AT);
2330 }
2331 __ sh(R0, AT, disp);
2332 }
2333 } else {
2334 if (scale == 0) {
2335 __ addu(AT, as_Register(base), as_Register(index));
2336 } else {
2337 __ dsll(AT, as_Register(index), scale);
2338 __ addu(AT, as_Register(base), AT);
2339 }
2340 __ move(T9, disp);
2341 if( UseLoongsonISA ) {
2342 __ gsshx(R0, AT, T9, 0);
2343 } else {
2344 __ addu(AT, AT, T9);
2345 __ sh(R0, AT, 0);
2346 }
2347 }
2348 } else {
2349 if( Assembler::is_simm16(disp) ) {
2350 __ sh(R0, as_Register(base), disp);
2351 } else {
2352 __ move(T9, disp);
2353 if( UseLoongsonISA ) {
2354 __ gsshx(R0, as_Register(base), T9, 0);
2355 } else {
2356 __ addu(AT, as_Register(base), T9);
2357 __ sh(R0, AT, 0);
2358 }
2359 }
2360 }
2361 %}
2363 enc_class load_I_enc (mRegI dst, memory mem) %{
2364 MacroAssembler _masm(&cbuf);
2365 int dst = $dst$$reg;
2366 int base = $mem$$base;
2367 int index = $mem$$index;
2368 int scale = $mem$$scale;
2369 int disp = $mem$$disp;
2371 if( index != 0 ) {
2372 if( Assembler::is_simm16(disp) ) {
2373 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2374 if (scale == 0) {
2375 __ gslwx(as_Register(dst), as_Register(base), as_Register(index), disp);
2376 } else {
2377 __ dsll(AT, as_Register(index), scale);
2378 __ gslwx(as_Register(dst), as_Register(base), AT, disp);
2379 }
2380 } else {
2381 if (scale == 0) {
2382 __ addu(AT, as_Register(base), as_Register(index));
2383 } else {
2384 __ dsll(AT, as_Register(index), scale);
2385 __ addu(AT, as_Register(base), AT);
2386 }
2387 __ lw(as_Register(dst), AT, disp);
2388 }
2389 } else {
2390 if (scale == 0) {
2391 __ addu(AT, as_Register(base), as_Register(index));
2392 } else {
2393 __ dsll(AT, as_Register(index), scale);
2394 __ addu(AT, as_Register(base), AT);
2395 }
2396 __ move(T9, disp);
2397 if( UseLoongsonISA ) {
2398 __ gslwx(as_Register(dst), AT, T9, 0);
2399 } else {
2400 __ addu(AT, AT, T9);
2401 __ lw(as_Register(dst), AT, 0);
2402 }
2403 }
2404 } else {
2405 if( Assembler::is_simm16(disp) ) {
2406 __ lw(as_Register(dst), as_Register(base), disp);
2407 } else {
2408 __ move(T9, disp);
2409 if( UseLoongsonISA ) {
2410 __ gslwx(as_Register(dst), as_Register(base), T9, 0);
2411 } else {
2412 __ addu(AT, as_Register(base), T9);
2413 __ lw(as_Register(dst), AT, 0);
2414 }
2415 }
2416 }
2417 %}
2419 enc_class store_I_reg_enc (memory mem, mRegI src) %{
2420 MacroAssembler _masm(&cbuf);
2421 int src = $src$$reg;
2422 int base = $mem$$base;
2423 int index = $mem$$index;
2424 int scale = $mem$$scale;
2425 int disp = $mem$$disp;
2427 if( index != 0 ) {
2428 if( Assembler::is_simm16(disp) ) {
2429 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2430 if (scale == 0) {
2431 __ gsswx(as_Register(src), as_Register(base), as_Register(index), disp);
2432 } else {
2433 __ dsll(AT, as_Register(index), scale);
2434 __ gsswx(as_Register(src), as_Register(base), AT, disp);
2435 }
2436 } else {
2437 if (scale == 0) {
2438 __ addu(AT, as_Register(base), as_Register(index));
2439 } else {
2440 __ dsll(AT, as_Register(index), scale);
2441 __ addu(AT, as_Register(base), AT);
2442 }
2443 __ sw(as_Register(src), AT, disp);
2444 }
2445 } else {
2446 if (scale == 0) {
2447 __ addu(AT, as_Register(base), as_Register(index));
2448 } else {
2449 __ dsll(AT, as_Register(index), scale);
2450 __ addu(AT, as_Register(base), AT);
2451 }
2452 __ move(T9, disp);
2453 if( UseLoongsonISA ) {
2454 __ gsswx(as_Register(src), AT, T9, 0);
2455 } else {
2456 __ addu(AT, AT, T9);
2457 __ sw(as_Register(src), AT, 0);
2458 }
2459 }
2460 } else {
2461 if( Assembler::is_simm16(disp) ) {
2462 __ sw(as_Register(src), as_Register(base), disp);
2463 } else {
2464 __ move(T9, disp);
2465 if( UseLoongsonISA ) {
2466 __ gsswx(as_Register(src), as_Register(base), T9, 0);
2467 } else {
2468 __ addu(AT, as_Register(base), T9);
2469 __ sw(as_Register(src), AT, 0);
2470 }
2471 }
2472 }
2473 %}
2475 enc_class store_I_immI_enc (memory mem, immI src) %{
2476 MacroAssembler _masm(&cbuf);
2477 int base = $mem$$base;
2478 int index = $mem$$index;
2479 int scale = $mem$$scale;
2480 int disp = $mem$$disp;
2481 int value = $src$$constant;
2483 if( index != 0 ) {
2484 if (scale == 0) {
2485 __ daddu(AT, as_Register(base), as_Register(index));
2486 } else {
2487 __ dsll(AT, as_Register(index), scale);
2488 __ daddu(AT, as_Register(base), AT);
2489 }
2490 if( Assembler::is_simm16(disp) ) {
2491 if (value == 0) {
2492 __ sw(R0, AT, disp);
2493 } else {
2494 __ move(T9, value);
2495 __ sw(T9, AT, disp);
2496 }
2497 } else {
2498 if (value == 0) {
2499 __ move(T9, disp);
2500 __ addu(AT, AT, T9);
2501 __ sw(R0, AT, 0);
2502 } else {
2503 __ move(T9, disp);
2504 __ addu(AT, AT, T9);
2505 __ move(T9, value);
2506 __ sw(T9, AT, 0);
2507 }
2508 }
2509 } else {
2510 if( Assembler::is_simm16(disp) ) {
2511 if (value == 0) {
2512 __ sw(R0, as_Register(base), disp);
2513 } else {
2514 __ move(AT, value);
2515 __ sw(AT, as_Register(base), disp);
2516 }
2517 } else {
2518 if (value == 0) {
2519 __ move(T9, disp);
2520 __ addu(AT, as_Register(base), T9);
2521 __ sw(R0, AT, 0);
2522 } else {
2523 __ move(T9, disp);
2524 __ addu(AT, as_Register(base), T9);
2525 __ move(T9, value);
2526 __ sw(T9, AT, 0);
2527 }
2528 }
2529 }
2530 %}
2532 enc_class load_N_enc (mRegN dst, memory mem) %{
2533 MacroAssembler _masm(&cbuf);
2534 int dst = $dst$$reg;
2535 int base = $mem$$base;
2536 int index = $mem$$index;
2537 int scale = $mem$$scale;
2538 int disp = $mem$$disp;
2539 relocInfo::relocType disp_reloc = $mem->disp_reloc();
2540 assert(disp_reloc == relocInfo::none, "cannot have disp");
2542 if( index != 0 ) {
2543 if (scale == 0) {
2544 __ daddu(AT, as_Register(base), as_Register(index));
2545 } else {
2546 __ dsll(AT, as_Register(index), scale);
2547 __ daddu(AT, as_Register(base), AT);
2548 }
2549 if( Assembler::is_simm16(disp) ) {
2550 __ lwu(as_Register(dst), AT, disp);
2551 } else {
2552 __ li(T9, disp);
2553 __ daddu(AT, AT, T9);
2554 __ lwu(as_Register(dst), AT, 0);
2555 }
2556 } else {
2557 if( Assembler::is_simm16(disp) ) {
2558 __ lwu(as_Register(dst), as_Register(base), disp);
2559 } else {
2560 __ li(T9, disp);
2561 __ daddu(AT, as_Register(base), T9);
2562 __ lwu(as_Register(dst), AT, 0);
2563 }
2564 }
2566 %}
2569 enc_class load_P_enc (mRegP dst, memory mem) %{
2570 MacroAssembler _masm(&cbuf);
2571 int dst = $dst$$reg;
2572 int base = $mem$$base;
2573 int index = $mem$$index;
2574 int scale = $mem$$scale;
2575 int disp = $mem$$disp;
2576 relocInfo::relocType disp_reloc = $mem->disp_reloc();
2577 assert(disp_reloc == relocInfo::none, "cannot have disp");
2579 if( index != 0 ) {
2580 if (scale == 0) {
2581 __ daddu(AT, as_Register(base), as_Register(index));
2582 } else {
2583 __ dsll(AT, as_Register(index), scale);
2584 __ daddu(AT, as_Register(base), AT);
2585 }
2586 if( Assembler::is_simm16(disp) ) {
2587 __ ld(as_Register(dst), AT, disp);
2588 } else {
2589 __ li(T9, disp);
2590 __ daddu(AT, AT, T9);
2591 __ ld(as_Register(dst), AT, 0);
2592 }
2593 } else {
2594 if( Assembler::is_simm16(disp) ) {
2595 __ ld(as_Register(dst), as_Register(base), disp);
2596 } else {
2597 __ li(T9, disp);
2598 __ daddu(AT, as_Register(base), T9);
2599 __ ld(as_Register(dst), AT, 0);
2600 }
2601 }
2602 // if( disp_reloc != relocInfo::none) __ ld(as_Register(dst), as_Register(dst), 0);
2603 %}
2605 enc_class store_P_reg_enc (memory mem, mRegP src) %{
2606 MacroAssembler _masm(&cbuf);
2607 int src = $src$$reg;
2608 int base = $mem$$base;
2609 int index = $mem$$index;
2610 int scale = $mem$$scale;
2611 int disp = $mem$$disp;
2613 if( index != 0 ) {
2614 if (scale == 0) {
2615 __ daddu(AT, as_Register(base), as_Register(index));
2616 } else {
2617 __ dsll(AT, as_Register(index), scale);
2618 __ daddu(AT, as_Register(base), AT);
2619 }
2620 if( Assembler::is_simm16(disp) ) {
2621 __ sd(as_Register(src), AT, disp);
2622 } else {
2623 __ move(T9, disp);
2624 __ daddu(AT, AT, T9);
2625 __ sd(as_Register(src), AT, 0);
2626 }
2627 } else {
2628 if( Assembler::is_simm16(disp) ) {
2629 __ sd(as_Register(src), as_Register(base), disp);
2630 } else {
2631 __ move(T9, disp);
2632 __ daddu(AT, as_Register(base), T9);
2633 __ sd(as_Register(src), AT, 0);
2634 }
2635 }
2636 %}
2638 enc_class store_N_reg_enc (memory mem, mRegN src) %{
2639 MacroAssembler _masm(&cbuf);
2640 int src = $src$$reg;
2641 int base = $mem$$base;
2642 int index = $mem$$index;
2643 int scale = $mem$$scale;
2644 int disp = $mem$$disp;
2646 if( index != 0 ) {
2647 if (scale == 0) {
2648 __ daddu(AT, as_Register(base), as_Register(index));
2649 } else {
2650 __ dsll(AT, as_Register(index), scale);
2651 __ daddu(AT, as_Register(base), AT);
2652 }
2653 if( Assembler::is_simm16(disp) ) {
2654 __ sw(as_Register(src), AT, disp);
2655 } else {
2656 __ move(T9, disp);
2657 __ addu(AT, AT, T9);
2658 __ sw(as_Register(src), AT, 0);
2659 }
2660 } else {
2661 if( Assembler::is_simm16(disp) ) {
2662 __ sw(as_Register(src), as_Register(base), disp);
2663 } else {
2664 __ move(T9, disp);
2665 __ addu(AT, as_Register(base), T9);
2666 __ sw(as_Register(src), AT, 0);
2667 }
2668 }
2669 %}
2671 enc_class store_P_immP0_enc (memory mem) %{
2672 MacroAssembler _masm(&cbuf);
2673 int base = $mem$$base;
2674 int index = $mem$$index;
2675 int scale = $mem$$scale;
2676 int disp = $mem$$disp;
2678 if( index != 0 ) {
2679 if (scale == 0) {
2680 if( Assembler::is_simm16(disp) ) {
2681 if (UseLoongsonISA && Assembler::is_simm(disp, 8)) {
2682 __ gssdx(R0, as_Register(base), as_Register(index), disp);
2683 } else {
2684 __ daddu(AT, as_Register(base), as_Register(index));
2685 __ sd(R0, AT, disp);
2686 }
2687 } else {
2688 __ daddu(AT, as_Register(base), as_Register(index));
2689 __ move(T9, disp);
2690 if(UseLoongsonISA) {
2691 __ gssdx(R0, AT, T9, 0);
2692 } else {
2693 __ daddu(AT, AT, T9);
2694 __ sd(R0, AT, 0);
2695 }
2696 }
2697 } else {
2698 __ dsll(AT, as_Register(index), scale);
2699 if( Assembler::is_simm16(disp) ) {
2700 if (UseLoongsonISA && Assembler::is_simm(disp, 8)) {
2701 __ gssdx(R0, as_Register(base), AT, disp);
2702 } else {
2703 __ daddu(AT, as_Register(base), AT);
2704 __ sd(R0, AT, disp);
2705 }
2706 } else {
2707 __ daddu(AT, as_Register(base), AT);
2708 __ move(T9, disp);
2709 if (UseLoongsonISA) {
2710 __ gssdx(R0, AT, T9, 0);
2711 } else {
2712 __ daddu(AT, AT, T9);
2713 __ sd(R0, AT, 0);
2714 }
2715 }
2716 }
2717 } else {
2718 if( Assembler::is_simm16(disp) ) {
2719 __ sd(R0, as_Register(base), disp);
2720 } else {
2721 __ move(T9, disp);
2722 if (UseLoongsonISA) {
2723 __ gssdx(R0, as_Register(base), T9, 0);
2724 } else {
2725 __ daddu(AT, as_Register(base), T9);
2726 __ sd(R0, AT, 0);
2727 }
2728 }
2729 }
2730 %}
2733 enc_class storeImmN0_enc(memory mem, ImmN0 src) %{
2734 MacroAssembler _masm(&cbuf);
2735 int base = $mem$$base;
2736 int index = $mem$$index;
2737 int scale = $mem$$scale;
2738 int disp = $mem$$disp;
2740 if(index!=0){
2741 if (scale == 0) {
2742 __ daddu(AT, as_Register(base), as_Register(index));
2743 } else {
2744 __ dsll(AT, as_Register(index), scale);
2745 __ daddu(AT, as_Register(base), AT);
2746 }
2748 if( Assembler::is_simm16(disp) ) {
2749 __ sw(R0, AT, disp);
2750 } else {
2751 __ move(T9, disp);
2752 __ daddu(AT, AT, T9);
2753 __ sw(R0, AT, 0);
2754 }
2755 }
2756 else {
2757 if( Assembler::is_simm16(disp) ) {
2758 __ sw(R0, as_Register(base), disp);
2759 } else {
2760 __ move(T9, disp);
2761 __ daddu(AT, as_Register(base), T9);
2762 __ sw(R0, AT, 0);
2763 }
2764 }
2765 %}
2767 enc_class load_L_enc (mRegL dst, memory mem) %{
2768 MacroAssembler _masm(&cbuf);
2769 int base = $mem$$base;
2770 int index = $mem$$index;
2771 int scale = $mem$$scale;
2772 int disp = $mem$$disp;
2773 Register dst_reg = as_Register($dst$$reg);
2775 /*********************2013/03/27**************************
2776 * Jin: $base may contain a null object.
2777 * Server JIT force the exception_offset to be the pos of
2778 * the first instruction.
2779 * I insert such a 'null_check' at the beginning.
2780 *******************************************************/
2782 __ lw(AT, as_Register(base), 0);
2784 /*********************2012/10/04**************************
2785 * Error case found in SortTest
2786 * 337 b java.util.Arrays::sort1 (401 bytes)
2787 * B73:
2788 * d34 lw T4.lo, [T4 + #16] #@loadL-lo
2789 * lw T4.hi, [T4 + #16]+4 #@loadL-hi
2790 *
2791 * The original instructions generated here are :
2792 * __ lw(dst_lo, as_Register(base), disp);
2793 * __ lw(dst_hi, as_Register(base), disp + 4);
2794 *******************************************************/
2796 if( index != 0 ) {
2797 if (scale == 0) {
2798 __ daddu(AT, as_Register(base), as_Register(index));
2799 } else {
2800 __ dsll(AT, as_Register(index), scale);
2801 __ daddu(AT, as_Register(base), AT);
2802 }
2803 if( Assembler::is_simm16(disp) ) {
2804 __ ld(dst_reg, AT, disp);
2805 } else {
2806 __ move(T9, disp);
2807 __ daddu(AT, AT, T9);
2808 __ ld(dst_reg, AT, 0);
2809 }
2810 } else {
2811 if( Assembler::is_simm16(disp) ) {
2812 __ move(AT, as_Register(base));
2813 __ ld(dst_reg, AT, disp);
2814 } else {
2815 __ move(T9, disp);
2816 __ daddu(AT, as_Register(base), T9);
2817 __ ld(dst_reg, AT, 0);
2818 }
2819 }
2820 %}
2822 enc_class store_L_reg_enc (memory mem, mRegL src) %{
2823 MacroAssembler _masm(&cbuf);
2824 int base = $mem$$base;
2825 int index = $mem$$index;
2826 int scale = $mem$$scale;
2827 int disp = $mem$$disp;
2828 Register src_reg = as_Register($src$$reg);
2830 if( index != 0 ) {
2831 if (scale == 0) {
2832 __ daddu(AT, as_Register(base), as_Register(index));
2833 } else {
2834 __ dsll(AT, as_Register(index), scale);
2835 __ daddu(AT, as_Register(base), AT);
2836 }
2837 if( Assembler::is_simm16(disp) ) {
2838 __ sd(src_reg, AT, disp);
2839 } else {
2840 __ move(T9, disp);
2841 __ daddu(AT, AT, T9);
2842 __ sd(src_reg, AT, 0);
2843 }
2844 } else {
2845 if( Assembler::is_simm16(disp) ) {
2846 __ move(AT, as_Register(base));
2847 __ sd(src_reg, AT, disp);
2848 } else {
2849 __ move(T9, disp);
2850 __ daddu(AT, as_Register(base), T9);
2851 __ sd(src_reg, AT, 0);
2852 }
2853 }
2854 %}
2856 enc_class store_L_immL0_enc (memory mem, immL0 src) %{
2857 MacroAssembler _masm(&cbuf);
2858 int base = $mem$$base;
2859 int index = $mem$$index;
2860 int scale = $mem$$scale;
2861 int disp = $mem$$disp;
2863 if( index != 0 ) {
2864 if (scale == 0) {
2865 __ daddu(AT, as_Register(base), as_Register(index));
2866 } else {
2867 __ dsll(AT, as_Register(index), scale);
2868 __ daddu(AT, as_Register(base), AT);
2869 }
2870 if( Assembler::is_simm16(disp) ) {
2871 __ sd(R0, AT, disp);
2872 } else {
2873 __ move(T9, disp);
2874 __ addu(AT, AT, T9);
2875 __ sd(R0, AT, 0);
2876 }
2877 } else {
2878 if( Assembler::is_simm16(disp) ) {
2879 __ move(AT, as_Register(base));
2880 __ sd(R0, AT, disp);
2881 } else {
2882 __ move(T9, disp);
2883 __ addu(AT, as_Register(base), T9);
2884 __ sd(R0, AT, 0);
2885 }
2886 }
2887 %}
2889 enc_class load_F_enc (regF dst, memory mem) %{
2890 MacroAssembler _masm(&cbuf);
2891 int base = $mem$$base;
2892 int index = $mem$$index;
2893 int scale = $mem$$scale;
2894 int disp = $mem$$disp;
2895 FloatRegister dst = $dst$$FloatRegister;
2897 if( index != 0 ) {
2898 if( Assembler::is_simm16(disp) ) {
2899 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2900 if (scale == 0) {
2901 __ gslwxc1(dst, as_Register(base), as_Register(index), disp);
2902 } else {
2903 __ dsll(AT, as_Register(index), scale);
2904 __ gslwxc1(dst, as_Register(base), AT, disp);
2905 }
2906 } else {
2907 if (scale == 0) {
2908 __ daddu(AT, as_Register(base), as_Register(index));
2909 } else {
2910 __ dsll(AT, as_Register(index), scale);
2911 __ daddu(AT, as_Register(base), AT);
2912 }
2913 __ lwc1(dst, AT, disp);
2914 }
2915 } else {
2916 if (scale == 0) {
2917 __ daddu(AT, as_Register(base), as_Register(index));
2918 } else {
2919 __ dsll(AT, as_Register(index), scale);
2920 __ daddu(AT, as_Register(base), AT);
2921 }
2922 __ move(T9, disp);
2923 if( UseLoongsonISA ) {
2924 __ gslwxc1(dst, AT, T9, 0);
2925 } else {
2926 __ daddu(AT, AT, T9);
2927 __ lwc1(dst, AT, 0);
2928 }
2929 }
2930 } else {
2931 if( Assembler::is_simm16(disp) ) {
2932 __ lwc1(dst, as_Register(base), disp);
2933 } else {
2934 __ move(T9, disp);
2935 if( UseLoongsonISA ) {
2936 __ gslwxc1(dst, as_Register(base), T9, 0);
2937 } else {
2938 __ daddu(AT, as_Register(base), T9);
2939 __ lwc1(dst, AT, 0);
2940 }
2941 }
2942 }
2943 %}
2945 enc_class store_F_reg_enc (memory mem, regF src) %{
2946 MacroAssembler _masm(&cbuf);
2947 int base = $mem$$base;
2948 int index = $mem$$index;
2949 int scale = $mem$$scale;
2950 int disp = $mem$$disp;
2951 FloatRegister src = $src$$FloatRegister;
2953 if( index != 0 ) {
2954 if( Assembler::is_simm16(disp) ) {
2955 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2956 if (scale == 0) {
2957 __ gsswxc1(src, as_Register(base), as_Register(index), disp);
2958 } else {
2959 __ dsll(AT, as_Register(index), scale);
2960 __ gsswxc1(src, as_Register(base), AT, disp);
2961 }
2962 } else {
2963 if (scale == 0) {
2964 __ daddu(AT, as_Register(base), as_Register(index));
2965 } else {
2966 __ dsll(AT, as_Register(index), scale);
2967 __ daddu(AT, as_Register(base), AT);
2968 }
2969 __ swc1(src, AT, disp);
2970 }
2971 } else {
2972 if (scale == 0) {
2973 __ daddu(AT, as_Register(base), as_Register(index));
2974 } else {
2975 __ dsll(AT, as_Register(index), scale);
2976 __ daddu(AT, as_Register(base), AT);
2977 }
2978 __ move(T9, disp);
2979 if( UseLoongsonISA ) {
2980 __ gsswxc1(src, AT, T9, 0);
2981 } else {
2982 __ daddu(AT, AT, T9);
2983 __ swc1(src, AT, 0);
2984 }
2985 }
2986 } else {
2987 if( Assembler::is_simm16(disp) ) {
2988 __ swc1(src, as_Register(base), disp);
2989 } else {
2990 __ move(T9, disp);
2991 if( UseLoongsonISA ) {
2992 __ gslwxc1(src, as_Register(base), T9, 0);
2993 } else {
2994 __ daddu(AT, as_Register(base), T9);
2995 __ swc1(src, AT, 0);
2996 }
2997 }
2998 }
2999 %}
3001 enc_class load_D_enc (regD dst, memory mem) %{
3002 MacroAssembler _masm(&cbuf);
3003 int base = $mem$$base;
3004 int index = $mem$$index;
3005 int scale = $mem$$scale;
3006 int disp = $mem$$disp;
3007 FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3009 if( index != 0 ) {
3010 if( Assembler::is_simm16(disp) ) {
3011 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3012 if (scale == 0) {
3013 __ gsldxc1(dst_reg, as_Register(base), as_Register(index), disp);
3014 } else {
3015 __ dsll(AT, as_Register(index), scale);
3016 __ gsldxc1(dst_reg, as_Register(base), AT, disp);
3017 }
3018 } else {
3019 if (scale == 0) {
3020 __ daddu(AT, as_Register(base), as_Register(index));
3021 } else {
3022 __ dsll(AT, as_Register(index), scale);
3023 __ daddu(AT, as_Register(base), AT);
3024 }
3025 __ ldc1(dst_reg, AT, disp);
3026 }
3027 } else {
3028 if (scale == 0) {
3029 __ daddu(AT, as_Register(base), as_Register(index));
3030 } else {
3031 __ dsll(AT, as_Register(index), scale);
3032 __ daddu(AT, as_Register(base), AT);
3033 }
3034 __ move(T9, disp);
3035 if( UseLoongsonISA ) {
3036 __ gsldxc1(dst_reg, AT, T9, 0);
3037 } else {
3038 __ addu(AT, AT, T9);
3039 __ ldc1(dst_reg, AT, 0);
3040 }
3041 }
3042 } else {
3043 if( Assembler::is_simm16(disp) ) {
3044 __ ldc1(dst_reg, as_Register(base), disp);
3045 } else {
3046 __ move(T9, disp);
3047 if( UseLoongsonISA ) {
3048 __ gsldxc1(dst_reg, as_Register(base), T9, 0);
3049 } else {
3050 __ addu(AT, as_Register(base), T9);
3051 __ ldc1(dst_reg, AT, 0);
3052 }
3053 }
3054 }
3055 %}
3057 enc_class store_D_reg_enc (memory mem, regD src) %{
3058 MacroAssembler _masm(&cbuf);
3059 int base = $mem$$base;
3060 int index = $mem$$index;
3061 int scale = $mem$$scale;
3062 int disp = $mem$$disp;
3063 FloatRegister src_reg = as_FloatRegister($src$$reg);
3065 if( index != 0 ) {
3066 if( Assembler::is_simm16(disp) ) {
3067 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3068 if (scale == 0) {
3069 __ gssdxc1(src_reg, as_Register(base), as_Register(index), disp);
3070 } else {
3071 __ dsll(AT, as_Register(index), scale);
3072 __ gssdxc1(src_reg, as_Register(base), AT, disp);
3073 }
3074 } else {
3075 if (scale == 0) {
3076 __ daddu(AT, as_Register(base), as_Register(index));
3077 } else {
3078 __ dsll(AT, as_Register(index), scale);
3079 __ daddu(AT, as_Register(base), AT);
3080 }
3081 __ sdc1(src_reg, AT, disp);
3082 }
3083 } else {
3084 if (scale == 0) {
3085 __ daddu(AT, as_Register(base), as_Register(index));
3086 } else {
3087 __ dsll(AT, as_Register(index), scale);
3088 __ daddu(AT, as_Register(base), AT);
3089 }
3090 __ move(T9, disp);
3091 if( UseLoongsonISA ) {
3092 __ gssdxc1(src_reg, AT, T9, 0);
3093 } else {
3094 __ addu(AT, AT, T9);
3095 __ sdc1(src_reg, AT, 0);
3096 }
3097 }
3098 } else {
3099 if( Assembler::is_simm16(disp) ) {
3100 __ sdc1(src_reg, as_Register(base), disp);
3101 } else {
3102 __ move(T9, disp);
3103 if( UseLoongsonISA ) {
3104 __ gssdxc1(src_reg, as_Register(base), T9, 0);
3105 } else {
3106 __ addu(AT, as_Register(base), T9);
3107 __ sdc1(src_reg, AT, 0);
3108 }
3109 }
3110 }
3111 %}
3113 enc_class Java_To_Runtime (method meth) %{ // CALL Java_To_Runtime, Java_To_Runtime_Leaf
3114 MacroAssembler _masm(&cbuf);
3115 // This is the instruction starting address for relocation info.
3116 __ block_comment("Java_To_Runtime");
3117 cbuf.set_insts_mark();
3118 __ relocate(relocInfo::runtime_call_type);
3120 __ li48(T9, (long)$meth$$method);
3121 __ jalr(T9);
3122 __ nop();
3123 %}
3125 enc_class Java_Static_Call (method meth) %{ // JAVA STATIC CALL
3126 // CALL to fixup routine. Fixup routine uses ScopeDesc info to determine
3127 // who we intended to call.
3128 MacroAssembler _masm(&cbuf);
3129 cbuf.set_insts_mark();
3131 if ( !_method ) {
3132 __ relocate(relocInfo::runtime_call_type);
3133 //emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.code_end()) - 4),
3134 // runtime_call_Relocation::spec(), RELOC_IMM32 );
3135 } else if(_optimized_virtual) {
3136 __ relocate(relocInfo::opt_virtual_call_type);
3137 //emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.code_end()) - 4),
3138 // opt_virtual_call_Relocation::spec(), RELOC_IMM32 );
3139 } else {
3140 __ relocate(relocInfo::static_call_type);
3141 //emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.code_end()) - 4),
3142 // static_call_Relocation::spec(), RELOC_IMM32 );
3143 }
3145 __ li(T9, $meth$$method);
3146 __ jalr(T9);
3147 __ nop();
3148 if( _method ) { // Emit stub for static call
3149 emit_java_to_interp(cbuf);
3150 }
3151 %}
3154 /*
3155 * [Ref: LIR_Assembler::ic_call() ]
3156 */
3157 enc_class Java_Dynamic_Call (method meth) %{ // JAVA DYNAMIC CALL
3158 MacroAssembler _masm(&cbuf);
3159 __ block_comment("Java_Dynamic_Call");
3160 __ ic_call((address)$meth$$method);
3161 %}
3164 enc_class Set_Flags_After_Fast_Lock_Unlock(FlagsReg cr) %{
3165 Register flags = $cr$$Register;
3166 Label L;
3168 MacroAssembler _masm(&cbuf);
3170 __ addu(flags, R0, R0);
3171 __ beq(AT, R0, L);
3172 __ delayed()->nop();
3173 __ move(flags, 0xFFFFFFFF);
3174 __ bind(L);
3175 %}
3177 enc_class enc_PartialSubtypeCheck(mRegP result, mRegP sub, mRegP super, mRegI tmp) %{
3178 Register result = $result$$Register;
3179 Register sub = $sub$$Register;
3180 Register super = $super$$Register;
3181 Register length = $tmp$$Register;
3182 Register tmp = T9;
3183 Label miss;
3185 /* 2012/9/28 Jin: result may be the same as sub
3186 * 47c B40: # B21 B41 <- B20 Freq: 0.155379
3187 * 47c partialSubtypeCheck result=S1, sub=S1, super=S3, length=S0
3188 * 4bc mov S2, NULL #@loadConP
3189 * 4c0 beq S1, S2, B21 #@branchConP P=0.999999 C=-1.000000
3190 */
3191 MacroAssembler _masm(&cbuf);
3192 Label done;
3193 __ check_klass_subtype_slow_path(sub, super, length, tmp,
3194 NULL, &miss,
3195 /*set_cond_codes:*/ true);
3196 /* 2013/7/22 Jin: Refer to X86_64's RDI */
3197 __ move(result, 0);
3198 __ b(done);
3199 __ nop();
3201 __ bind(miss);
3202 __ move(result, 1);
3203 __ bind(done);
3204 %}
3206 %}
3209 //---------MIPS FRAME--------------------------------------------------------------
3210 // Definition of frame structure and management information.
3211 //
3212 // S T A C K L A Y O U T Allocators stack-slot number
3213 // | (to get allocators register number
3214 // G Owned by | | v add SharedInfo::stack0)
3215 // r CALLER | |
3216 // o | +--------+ pad to even-align allocators stack-slot
3217 // w V | pad0 | numbers; owned by CALLER
3218 // t -----------+--------+----> Matcher::_in_arg_limit, unaligned
3219 // h ^ | in | 5
3220 // | | args | 4 Holes in incoming args owned by SELF
3221 // | | old | | 3
3222 // | | SP-+--------+----> Matcher::_old_SP, even aligned
3223 // v | | ret | 3 return address
3224 // Owned by +--------+
3225 // Self | pad2 | 2 pad to align old SP
3226 // | +--------+ 1
3227 // | | locks | 0
3228 // | +--------+----> SharedInfo::stack0, even aligned
3229 // | | pad1 | 11 pad to align new SP
3230 // | +--------+
3231 // | | | 10
3232 // | | spills | 9 spills
3233 // V | | 8 (pad0 slot for callee)
3234 // -----------+--------+----> Matcher::_out_arg_limit, unaligned
3235 // ^ | out | 7
3236 // | | args | 6 Holes in outgoing args owned by CALLEE
3237 // Owned by new | |
3238 // Callee SP-+--------+----> Matcher::_new_SP, even aligned
3239 // | |
3240 //
3241 // Note 1: Only region 8-11 is determined by the allocator. Region 0-5 is
3242 // known from SELF's arguments and the Java calling convention.
3243 // Region 6-7 is determined per call site.
3244 // Note 2: If the calling convention leaves holes in the incoming argument
3245 // area, those holes are owned by SELF. Holes in the outgoing area
3246 // are owned by the CALLEE. Holes should not be nessecary in the
3247 // incoming area, as the Java calling convention is completely under
3248 // the control of the AD file. Doubles can be sorted and packed to
3249 // avoid holes. Holes in the outgoing arguments may be nessecary for
3250 // varargs C calling conventions.
3251 // Note 3: Region 0-3 is even aligned, with pad2 as needed. Region 3-5 is
3252 // even aligned with pad0 as needed.
3253 // Region 6 is even aligned. Region 6-7 is NOT even aligned;
3254 // region 6-11 is even aligned; it may be padded out more so that
3255 // the region from SP to FP meets the minimum stack alignment.
3256 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
3257 // alignment. Region 11, pad1, may be dynamically extended so that
3258 // SP meets the minimum alignment.
3261 frame %{
3263 stack_direction(TOWARDS_LOW);
3265 // These two registers define part of the calling convention
3266 // between compiled code and the interpreter.
3267 // SEE StartI2CNode::calling_convention & StartC2INode::calling_convention & StartOSRNode::calling_convention
3268 // for more information. by yjl 3/16/2006
3270 inline_cache_reg(T1); // Inline Cache Register
3271 interpreter_method_oop_reg(S3); // Method Oop Register when calling interpreter
3272 /*
3273 inline_cache_reg(T1); // Inline Cache Register or methodOop for I2C
3274 interpreter_arg_ptr_reg(A0); // Argument pointer for I2C adapters
3275 */
3277 // Optional: name the operand used by cisc-spilling to access [stack_pointer + offset]
3278 cisc_spilling_operand_name(indOffset32);
3280 // Number of stack slots consumed by locking an object
3281 // generate Compile::sync_stack_slots
3282 #ifdef _LP64
3283 sync_stack_slots(2);
3284 #else
3285 sync_stack_slots(1);
3286 #endif
3288 frame_pointer(SP);
3290 // Interpreter stores its frame pointer in a register which is
3291 // stored to the stack by I2CAdaptors.
3292 // I2CAdaptors convert from interpreted java to compiled java.
3294 interpreter_frame_pointer(FP);
3296 // generate Matcher::stack_alignment
3297 stack_alignment(StackAlignmentInBytes); //wordSize = sizeof(char*);
3299 // Number of stack slots between incoming argument block and the start of
3300 // a new frame. The PROLOG must add this many slots to the stack. The
3301 // EPILOG must remove this many slots. Intel needs one slot for
3302 // return address.
3303 // generate Matcher::in_preserve_stack_slots
3304 //in_preserve_stack_slots(VerifyStackAtCalls + 2); //Now VerifyStackAtCalls is defined as false ! Leave one stack slot for ra and fp
3305 in_preserve_stack_slots(4); //Now VerifyStackAtCalls is defined as false ! Leave two stack slots for ra and fp
3307 // Number of outgoing stack slots killed above the out_preserve_stack_slots
3308 // for calls to C. Supports the var-args backing area for register parms.
3309 varargs_C_out_slots_killed(0);
3311 // The after-PROLOG location of the return address. Location of
3312 // return address specifies a type (REG or STACK) and a number
3313 // representing the register number (i.e. - use a register name) or
3314 // stack slot.
3315 // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
3316 // Otherwise, it is above the locks and verification slot and alignment word
3317 //return_addr(STACK -1+ round_to(1+VerifyStackAtCalls+Compile::current()->sync()*Compile::current()->sync_stack_slots(),WordsPerLong));
3318 return_addr(REG RA);
3320 // Body of function which returns an integer array locating
3321 // arguments either in registers or in stack slots. Passed an array
3322 // of ideal registers called "sig" and a "length" count. Stack-slot
3323 // offsets are based on outgoing arguments, i.e. a CALLER setting up
3324 // arguments for a CALLEE. Incoming stack arguments are
3325 // automatically biased by the preserve_stack_slots field above.
3328 // will generated to Matcher::calling_convention(OptoRegPair *sig, uint length, bool is_outgoing)
3329 // StartNode::calling_convention call this. by yjl 3/16/2006
3330 calling_convention %{
3331 SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
3332 %}
3337 // Body of function which returns an integer array locating
3338 // arguments either in registers or in stack slots. Passed an array
3339 // of ideal registers called "sig" and a "length" count. Stack-slot
3340 // offsets are based on outgoing arguments, i.e. a CALLER setting up
3341 // arguments for a CALLEE. Incoming stack arguments are
3342 // automatically biased by the preserve_stack_slots field above.
3345 // SEE CallRuntimeNode::calling_convention for more information. by yjl 3/16/2006
3346 c_calling_convention %{
3347 (void) SharedRuntime::c_calling_convention(sig_bt, regs, /*regs2=*/NULL, length);
3348 %}
3351 // Location of C & interpreter return values
3352 // register(s) contain(s) return value for Op_StartI2C and Op_StartOSR.
3353 // SEE Matcher::match. by yjl 3/16/2006
3354 c_return_value %{
3355 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3356 /* -- , -- , Op_RegN, Op_RegI, Op_RegP, Op_RegF, Op_RegD, Op_RegL */
3357 static int lo[Op_RegL+1] = { 0, 0, V0_num, V0_num, V0_num, F0_num, F0_num, V0_num };
3358 static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, V0_H_num, OptoReg::Bad, F0_H_num, V0_H_num };
3359 return OptoRegPair(hi[ideal_reg],lo[ideal_reg]);
3360 %}
3362 // Location of return values
3363 // register(s) contain(s) return value for Op_StartC2I and Op_Start.
3364 // SEE Matcher::match. by yjl 3/16/2006
3366 return_value %{
3367 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3368 /* -- , -- , Op_RegN, Op_RegI, Op_RegP, Op_RegF, Op_RegD, Op_RegL */
3369 static int lo[Op_RegL+1] = { 0, 0, V0_num, V0_num, V0_num, F0_num, F0_num, V0_num };
3370 static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, V0_H_num, OptoReg::Bad, F0_H_num, V0_H_num};
3371 return OptoRegPair(hi[ideal_reg],lo[ideal_reg]);
3372 %}
3374 %}
3376 //----------ATTRIBUTES---------------------------------------------------------
3377 //----------Operand Attributes-------------------------------------------------
3378 op_attrib op_cost(0); // Required cost attribute
3380 //----------Instruction Attributes---------------------------------------------
3381 ins_attrib ins_cost(100); // Required cost attribute
3382 ins_attrib ins_size(32); // Required size attribute (in bits)
3383 ins_attrib ins_pc_relative(0); // Required PC Relative flag
3384 ins_attrib ins_short_branch(0); // Required flag: is this instruction a
3385 // non-matching short branch variant of some
3386 // long branch?
3387 ins_attrib ins_alignment(4); // Required alignment attribute (must be a power of 2)
3388 // specifies the alignment that some part of the instruction (not
3389 // necessarily the start) requires. If > 1, a compute_padding()
3390 // function must be provided for the instruction
3392 //----------OPERANDS-----------------------------------------------------------
3393 // Operand definitions must precede instruction definitions for correct parsing
3394 // in the ADLC because operands constitute user defined types which are used in
3395 // instruction definitions.
3397 // Vectors
3398 operand vecD() %{
3399 constraint(ALLOC_IN_RC(dbl_reg));
3400 match(VecD);
3402 format %{ %}
3403 interface(REG_INTER);
3404 %}
3406 // Flags register, used as output of compare instructions
3407 operand FlagsReg() %{
3408 constraint(ALLOC_IN_RC(mips_flags));
3409 match(RegFlags);
3411 format %{ "EFLAGS" %}
3412 interface(REG_INTER);
3413 %}
3415 //----------Simple Operands----------------------------------------------------
3416 //TODO: Should we need to define some more special immediate number ?
3417 // Immediate Operands
3418 // Integer Immediate
3419 operand immI() %{
3420 match(ConI);
3421 //TODO: should not match immI8 here LEE
3422 match(immI8);
3424 op_cost(20);
3425 format %{ %}
3426 interface(CONST_INTER);
3427 %}
3429 // Long Immediate 8-bit
3430 operand immL8()
3431 %{
3432 predicate(-0x80L <= n->get_long() && n->get_long() < 0x80L);
3433 match(ConL);
3435 op_cost(5);
3436 format %{ %}
3437 interface(CONST_INTER);
3438 %}
3440 // Constant for test vs zero
3441 operand immI0() %{
3442 predicate(n->get_int() == 0);
3443 match(ConI);
3445 op_cost(0);
3446 format %{ %}
3447 interface(CONST_INTER);
3448 %}
3450 // Constant for increment
3451 operand immI1() %{
3452 predicate(n->get_int() == 1);
3453 match(ConI);
3455 op_cost(0);
3456 format %{ %}
3457 interface(CONST_INTER);
3458 %}
3460 // Constant for decrement
3461 operand immI_M1() %{
3462 predicate(n->get_int() == -1);
3463 match(ConI);
3465 op_cost(0);
3466 format %{ %}
3467 interface(CONST_INTER);
3468 %}
3470 operand immI_MaxI() %{
3471 predicate(n->get_int() == 2147483647);
3472 match(ConI);
3474 op_cost(0);
3475 format %{ %}
3476 interface(CONST_INTER);
3477 %}
3479 // Valid scale values for addressing modes
3480 operand immI2() %{
3481 predicate(0 <= n->get_int() && (n->get_int() <= 3));
3482 match(ConI);
3484 format %{ %}
3485 interface(CONST_INTER);
3486 %}
3488 operand immI8() %{
3489 predicate((-128 <= n->get_int()) && (n->get_int() <= 127));
3490 match(ConI);
3492 op_cost(5);
3493 format %{ %}
3494 interface(CONST_INTER);
3495 %}
3497 operand immI16() %{
3498 predicate((-32768 <= n->get_int()) && (n->get_int() <= 32767));
3499 match(ConI);
3501 op_cost(10);
3502 format %{ %}
3503 interface(CONST_INTER);
3504 %}
3506 // Constant for long shifts
3507 operand immI_32() %{
3508 predicate( n->get_int() == 32 );
3509 match(ConI);
3511 op_cost(0);
3512 format %{ %}
3513 interface(CONST_INTER);
3514 %}
3516 operand immI_63() %{
3517 predicate( n->get_int() == 63 );
3518 match(ConI);
3520 op_cost(0);
3521 format %{ %}
3522 interface(CONST_INTER);
3523 %}
3525 operand immI_0_31() %{
3526 predicate( n->get_int() >= 0 && n->get_int() <= 31 );
3527 match(ConI);
3529 op_cost(0);
3530 format %{ %}
3531 interface(CONST_INTER);
3532 %}
3534 // Operand for non-negtive integer mask
3535 operand immI_nonneg_mask() %{
3536 predicate( (n->get_int() >= 0) && (Assembler::is_int_mask(n->get_int()) != -1) );
3537 match(ConI);
3539 op_cost(0);
3540 format %{ %}
3541 interface(CONST_INTER);
3542 %}
3544 operand immI_32_63() %{
3545 predicate( n->get_int() >= 32 && n->get_int() <= 63 );
3546 match(ConI);
3547 op_cost(0);
3549 format %{ %}
3550 interface(CONST_INTER);
3551 %}
3553 operand immI16_sub() %{
3554 predicate((-32767 <= n->get_int()) && (n->get_int() <= 32768));
3555 match(ConI);
3557 op_cost(10);
3558 format %{ %}
3559 interface(CONST_INTER);
3560 %}
3562 operand immI_0_32767() %{
3563 predicate( n->get_int() >= 0 && n->get_int() <= 32767 );
3564 match(ConI);
3565 op_cost(0);
3567 format %{ %}
3568 interface(CONST_INTER);
3569 %}
3571 operand immI_0_65535() %{
3572 predicate( n->get_int() >= 0 && n->get_int() <= 65535 );
3573 match(ConI);
3574 op_cost(0);
3576 format %{ %}
3577 interface(CONST_INTER);
3578 %}
3580 operand immI_1() %{
3581 predicate( n->get_int() == 1 );
3582 match(ConI);
3584 op_cost(0);
3585 format %{ %}
3586 interface(CONST_INTER);
3587 %}
3589 operand immI_2() %{
3590 predicate( n->get_int() == 2 );
3591 match(ConI);
3593 op_cost(0);
3594 format %{ %}
3595 interface(CONST_INTER);
3596 %}
3598 operand immI_3() %{
3599 predicate( n->get_int() == 3 );
3600 match(ConI);
3602 op_cost(0);
3603 format %{ %}
3604 interface(CONST_INTER);
3605 %}
3607 operand immI_7() %{
3608 predicate( n->get_int() == 7 );
3609 match(ConI);
3611 format %{ %}
3612 interface(CONST_INTER);
3613 %}
3615 // Immediates for special shifts (sign extend)
3617 // Constants for increment
3618 operand immI_16() %{
3619 predicate( n->get_int() == 16 );
3620 match(ConI);
3622 format %{ %}
3623 interface(CONST_INTER);
3624 %}
3626 operand immI_24() %{
3627 predicate( n->get_int() == 24 );
3628 match(ConI);
3630 format %{ %}
3631 interface(CONST_INTER);
3632 %}
3634 // Constant for byte-wide masking
3635 operand immI_255() %{
3636 predicate( n->get_int() == 255 );
3637 match(ConI);
3639 op_cost(0);
3640 format %{ %}
3641 interface(CONST_INTER);
3642 %}
3644 operand immI_65535() %{
3645 predicate( n->get_int() == 65535 );
3646 match(ConI);
3648 op_cost(5);
3649 format %{ %}
3650 interface(CONST_INTER);
3651 %}
3653 operand immI_65536() %{
3654 predicate( n->get_int() == 65536 );
3655 match(ConI);
3657 op_cost(5);
3658 format %{ %}
3659 interface(CONST_INTER);
3660 %}
3662 operand immI_M65536() %{
3663 predicate( n->get_int() == -65536 );
3664 match(ConI);
3666 op_cost(5);
3667 format %{ %}
3668 interface(CONST_INTER);
3669 %}
3671 // Pointer Immediate
3672 operand immP() %{
3673 match(ConP);
3675 op_cost(10);
3676 format %{ %}
3677 interface(CONST_INTER);
3678 %}
3680 // NULL Pointer Immediate
3681 operand immP0() %{
3682 predicate( n->get_ptr() == 0 );
3683 match(ConP);
3684 op_cost(0);
3686 format %{ %}
3687 interface(CONST_INTER);
3688 %}
3690 // Pointer Immediate: 64-bit
3691 operand immP_set() %{
3692 match(ConP);
3694 op_cost(5);
3695 // formats are generated automatically for constants and base registers
3696 format %{ %}
3697 interface(CONST_INTER);
3698 %}
3700 // Pointer Immediate: 64-bit
3701 operand immP_load() %{
3702 predicate(n->bottom_type()->isa_oop_ptr() || (MacroAssembler::insts_for_set64(n->get_ptr()) > 3));
3703 match(ConP);
3705 op_cost(5);
3706 // formats are generated automatically for constants and base registers
3707 format %{ %}
3708 interface(CONST_INTER);
3709 %}
3711 // Pointer Immediate: 64-bit
3712 operand immP_no_oop_cheap() %{
3713 predicate(!n->bottom_type()->isa_oop_ptr() && (MacroAssembler::insts_for_set64(n->get_ptr()) <= 3));
3714 match(ConP);
3716 op_cost(5);
3717 // formats are generated automatically for constants and base registers
3718 format %{ %}
3719 interface(CONST_INTER);
3720 %}
3722 // Pointer for polling page
3723 operand immP_poll() %{
3724 predicate(n->get_ptr() != 0 && n->get_ptr() == (intptr_t)os::get_polling_page());
3725 match(ConP);
3726 op_cost(5);
3728 format %{ %}
3729 interface(CONST_INTER);
3730 %}
3732 // Pointer Immediate
3733 operand immN() %{
3734 match(ConN);
3736 op_cost(10);
3737 format %{ %}
3738 interface(CONST_INTER);
3739 %}
3741 operand immNKlass() %{
3742 match(ConNKlass);
3744 op_cost(10);
3745 format %{ %}
3746 interface(CONST_INTER);
3747 %}
3749 // NULL Pointer Immediate
3750 operand immN0() %{
3751 predicate(n->get_narrowcon() == 0);
3752 match(ConN);
3754 op_cost(5);
3755 format %{ %}
3756 interface(CONST_INTER);
3757 %}
3759 // Long Immediate
3760 operand immL() %{
3761 match(ConL);
3763 op_cost(20);
3764 format %{ %}
3765 interface(CONST_INTER);
3766 %}
3768 // Long Immediate zero
3769 operand immL0() %{
3770 predicate( n->get_long() == 0L );
3771 match(ConL);
3772 op_cost(0);
3774 format %{ %}
3775 interface(CONST_INTER);
3776 %}
3778 operand immL7() %{
3779 predicate( n->get_long() == 7L );
3780 match(ConL);
3781 op_cost(0);
3783 format %{ %}
3784 interface(CONST_INTER);
3785 %}
3787 operand immL_M1() %{
3788 predicate( n->get_long() == -1L );
3789 match(ConL);
3790 op_cost(0);
3792 format %{ %}
3793 interface(CONST_INTER);
3794 %}
3796 // bit 0..2 zero
3797 operand immL_M8() %{
3798 predicate( n->get_long() == -8L );
3799 match(ConL);
3800 op_cost(0);
3802 format %{ %}
3803 interface(CONST_INTER);
3804 %}
3806 // bit 2 zero
3807 operand immL_M5() %{
3808 predicate( n->get_long() == -5L );
3809 match(ConL);
3810 op_cost(0);
3812 format %{ %}
3813 interface(CONST_INTER);
3814 %}
3816 // bit 1..2 zero
3817 operand immL_M7() %{
3818 predicate( n->get_long() == -7L );
3819 match(ConL);
3820 op_cost(0);
3822 format %{ %}
3823 interface(CONST_INTER);
3824 %}
3826 // bit 0..1 zero
3827 operand immL_M4() %{
3828 predicate( n->get_long() == -4L );
3829 match(ConL);
3830 op_cost(0);
3832 format %{ %}
3833 interface(CONST_INTER);
3834 %}
3836 // bit 3..6 zero
3837 operand immL_M121() %{
3838 predicate( n->get_long() == -121L );
3839 match(ConL);
3840 op_cost(0);
3842 format %{ %}
3843 interface(CONST_INTER);
3844 %}
3846 // Long immediate from 0 to 127.
3847 // Used for a shorter form of long mul by 10.
3848 operand immL_127() %{
3849 predicate((0 <= n->get_long()) && (n->get_long() <= 127));
3850 match(ConL);
3851 op_cost(0);
3853 format %{ %}
3854 interface(CONST_INTER);
3855 %}
3857 // Operand for non-negtive long mask
3858 operand immL_nonneg_mask() %{
3859 predicate( (n->get_long() >= 0) && (Assembler::is_jlong_mask(n->get_long()) != -1) );
3860 match(ConL);
3862 op_cost(0);
3863 format %{ %}
3864 interface(CONST_INTER);
3865 %}
3867 operand immL_0_65535() %{
3868 predicate( n->get_long() >= 0 && n->get_long() <= 65535 );
3869 match(ConL);
3870 op_cost(0);
3872 format %{ %}
3873 interface(CONST_INTER);
3874 %}
3876 // Long Immediate: cheap (materialize in <= 3 instructions)
3877 operand immL_cheap() %{
3878 predicate(MacroAssembler::insts_for_set64(n->get_long()) <= 3);
3879 match(ConL);
3880 op_cost(0);
3882 format %{ %}
3883 interface(CONST_INTER);
3884 %}
3886 // Long Immediate: expensive (materialize in > 3 instructions)
3887 operand immL_expensive() %{
3888 predicate(MacroAssembler::insts_for_set64(n->get_long()) > 3);
3889 match(ConL);
3890 op_cost(0);
3892 format %{ %}
3893 interface(CONST_INTER);
3894 %}
3896 operand immL16() %{
3897 predicate((-32768 <= n->get_long()) && (n->get_long() <= 32767));
3898 match(ConL);
3900 op_cost(10);
3901 format %{ %}
3902 interface(CONST_INTER);
3903 %}
3905 operand immL16_sub() %{
3906 predicate((-32767 <= n->get_long()) && (n->get_long() <= 32768));
3907 match(ConL);
3909 op_cost(10);
3910 format %{ %}
3911 interface(CONST_INTER);
3912 %}
3914 // Long Immediate: low 32-bit mask
3915 operand immL_32bits() %{
3916 predicate(n->get_long() == 0xFFFFFFFFL);
3917 match(ConL);
3918 op_cost(20);
3920 format %{ %}
3921 interface(CONST_INTER);
3922 %}
3924 // Long Immediate 32-bit signed
3925 operand immL32()
3926 %{
3927 predicate(n->get_long() == (int) (n->get_long()));
3928 match(ConL);
3930 op_cost(15);
3931 format %{ %}
3932 interface(CONST_INTER);
3933 %}
3936 //single-precision floating-point zero
3937 operand immF0() %{
3938 predicate(jint_cast(n->getf()) == 0);
3939 match(ConF);
3941 op_cost(5);
3942 format %{ %}
3943 interface(CONST_INTER);
3944 %}
3946 //single-precision floating-point immediate
3947 operand immF() %{
3948 match(ConF);
3950 op_cost(20);
3951 format %{ %}
3952 interface(CONST_INTER);
3953 %}
3955 //double-precision floating-point zero
3956 operand immD0() %{
3957 predicate(jlong_cast(n->getd()) == 0);
3958 match(ConD);
3960 op_cost(5);
3961 format %{ %}
3962 interface(CONST_INTER);
3963 %}
3965 //double-precision floating-point immediate
3966 operand immD() %{
3967 match(ConD);
3969 op_cost(20);
3970 format %{ %}
3971 interface(CONST_INTER);
3972 %}
3974 // Register Operands
3975 // Integer Register
3976 operand mRegI() %{
3977 constraint(ALLOC_IN_RC(int_reg));
3978 match(RegI);
3980 format %{ %}
3981 interface(REG_INTER);
3982 %}
3984 operand no_Ax_mRegI() %{
3985 constraint(ALLOC_IN_RC(no_Ax_int_reg));
3986 match(RegI);
3987 match(mRegI);
3989 format %{ %}
3990 interface(REG_INTER);
3991 %}
3993 operand mS0RegI() %{
3994 constraint(ALLOC_IN_RC(s0_reg));
3995 match(RegI);
3996 match(mRegI);
3998 format %{ "S0" %}
3999 interface(REG_INTER);
4000 %}
4002 operand mS1RegI() %{
4003 constraint(ALLOC_IN_RC(s1_reg));
4004 match(RegI);
4005 match(mRegI);
4007 format %{ "S1" %}
4008 interface(REG_INTER);
4009 %}
4011 operand mS2RegI() %{
4012 constraint(ALLOC_IN_RC(s2_reg));
4013 match(RegI);
4014 match(mRegI);
4016 format %{ "S2" %}
4017 interface(REG_INTER);
4018 %}
4020 operand mS3RegI() %{
4021 constraint(ALLOC_IN_RC(s3_reg));
4022 match(RegI);
4023 match(mRegI);
4025 format %{ "S3" %}
4026 interface(REG_INTER);
4027 %}
4029 operand mS4RegI() %{
4030 constraint(ALLOC_IN_RC(s4_reg));
4031 match(RegI);
4032 match(mRegI);
4034 format %{ "S4" %}
4035 interface(REG_INTER);
4036 %}
4038 operand mS5RegI() %{
4039 constraint(ALLOC_IN_RC(s5_reg));
4040 match(RegI);
4041 match(mRegI);
4043 format %{ "S5" %}
4044 interface(REG_INTER);
4045 %}
4047 operand mS6RegI() %{
4048 constraint(ALLOC_IN_RC(s6_reg));
4049 match(RegI);
4050 match(mRegI);
4052 format %{ "S6" %}
4053 interface(REG_INTER);
4054 %}
4056 operand mS7RegI() %{
4057 constraint(ALLOC_IN_RC(s7_reg));
4058 match(RegI);
4059 match(mRegI);
4061 format %{ "S7" %}
4062 interface(REG_INTER);
4063 %}
4066 operand mT0RegI() %{
4067 constraint(ALLOC_IN_RC(t0_reg));
4068 match(RegI);
4069 match(mRegI);
4071 format %{ "T0" %}
4072 interface(REG_INTER);
4073 %}
4075 operand mT1RegI() %{
4076 constraint(ALLOC_IN_RC(t1_reg));
4077 match(RegI);
4078 match(mRegI);
4080 format %{ "T1" %}
4081 interface(REG_INTER);
4082 %}
4084 operand mT2RegI() %{
4085 constraint(ALLOC_IN_RC(t2_reg));
4086 match(RegI);
4087 match(mRegI);
4089 format %{ "T2" %}
4090 interface(REG_INTER);
4091 %}
4093 operand mT3RegI() %{
4094 constraint(ALLOC_IN_RC(t3_reg));
4095 match(RegI);
4096 match(mRegI);
4098 format %{ "T3" %}
4099 interface(REG_INTER);
4100 %}
4102 operand mT8RegI() %{
4103 constraint(ALLOC_IN_RC(t8_reg));
4104 match(RegI);
4105 match(mRegI);
4107 format %{ "T8" %}
4108 interface(REG_INTER);
4109 %}
4111 operand mT9RegI() %{
4112 constraint(ALLOC_IN_RC(t9_reg));
4113 match(RegI);
4114 match(mRegI);
4116 format %{ "T9" %}
4117 interface(REG_INTER);
4118 %}
4120 operand mA0RegI() %{
4121 constraint(ALLOC_IN_RC(a0_reg));
4122 match(RegI);
4123 match(mRegI);
4125 format %{ "A0" %}
4126 interface(REG_INTER);
4127 %}
4129 operand mA1RegI() %{
4130 constraint(ALLOC_IN_RC(a1_reg));
4131 match(RegI);
4132 match(mRegI);
4134 format %{ "A1" %}
4135 interface(REG_INTER);
4136 %}
4138 operand mA2RegI() %{
4139 constraint(ALLOC_IN_RC(a2_reg));
4140 match(RegI);
4141 match(mRegI);
4143 format %{ "A2" %}
4144 interface(REG_INTER);
4145 %}
4147 operand mA3RegI() %{
4148 constraint(ALLOC_IN_RC(a3_reg));
4149 match(RegI);
4150 match(mRegI);
4152 format %{ "A3" %}
4153 interface(REG_INTER);
4154 %}
4156 operand mA4RegI() %{
4157 constraint(ALLOC_IN_RC(a4_reg));
4158 match(RegI);
4159 match(mRegI);
4161 format %{ "A4" %}
4162 interface(REG_INTER);
4163 %}
4165 operand mA5RegI() %{
4166 constraint(ALLOC_IN_RC(a5_reg));
4167 match(RegI);
4168 match(mRegI);
4170 format %{ "A5" %}
4171 interface(REG_INTER);
4172 %}
4174 operand mA6RegI() %{
4175 constraint(ALLOC_IN_RC(a6_reg));
4176 match(RegI);
4177 match(mRegI);
4179 format %{ "A6" %}
4180 interface(REG_INTER);
4181 %}
4183 operand mA7RegI() %{
4184 constraint(ALLOC_IN_RC(a7_reg));
4185 match(RegI);
4186 match(mRegI);
4188 format %{ "A7" %}
4189 interface(REG_INTER);
4190 %}
4192 operand mV0RegI() %{
4193 constraint(ALLOC_IN_RC(v0_reg));
4194 match(RegI);
4195 match(mRegI);
4197 format %{ "V0" %}
4198 interface(REG_INTER);
4199 %}
4201 operand mV1RegI() %{
4202 constraint(ALLOC_IN_RC(v1_reg));
4203 match(RegI);
4204 match(mRegI);
4206 format %{ "V1" %}
4207 interface(REG_INTER);
4208 %}
4210 operand mRegN() %{
4211 constraint(ALLOC_IN_RC(int_reg));
4212 match(RegN);
4214 format %{ %}
4215 interface(REG_INTER);
4216 %}
4218 operand t0_RegN() %{
4219 constraint(ALLOC_IN_RC(t0_reg));
4220 match(RegN);
4221 match(mRegN);
4223 format %{ %}
4224 interface(REG_INTER);
4225 %}
4227 operand t1_RegN() %{
4228 constraint(ALLOC_IN_RC(t1_reg));
4229 match(RegN);
4230 match(mRegN);
4232 format %{ %}
4233 interface(REG_INTER);
4234 %}
4236 operand t2_RegN() %{
4237 constraint(ALLOC_IN_RC(t2_reg));
4238 match(RegN);
4239 match(mRegN);
4241 format %{ %}
4242 interface(REG_INTER);
4243 %}
4245 operand t3_RegN() %{
4246 constraint(ALLOC_IN_RC(t3_reg));
4247 match(RegN);
4248 match(mRegN);
4250 format %{ %}
4251 interface(REG_INTER);
4252 %}
4254 operand t8_RegN() %{
4255 constraint(ALLOC_IN_RC(t8_reg));
4256 match(RegN);
4257 match(mRegN);
4259 format %{ %}
4260 interface(REG_INTER);
4261 %}
4263 operand t9_RegN() %{
4264 constraint(ALLOC_IN_RC(t9_reg));
4265 match(RegN);
4266 match(mRegN);
4268 format %{ %}
4269 interface(REG_INTER);
4270 %}
4272 operand a0_RegN() %{
4273 constraint(ALLOC_IN_RC(a0_reg));
4274 match(RegN);
4275 match(mRegN);
4277 format %{ %}
4278 interface(REG_INTER);
4279 %}
4281 operand a1_RegN() %{
4282 constraint(ALLOC_IN_RC(a1_reg));
4283 match(RegN);
4284 match(mRegN);
4286 format %{ %}
4287 interface(REG_INTER);
4288 %}
4290 operand a2_RegN() %{
4291 constraint(ALLOC_IN_RC(a2_reg));
4292 match(RegN);
4293 match(mRegN);
4295 format %{ %}
4296 interface(REG_INTER);
4297 %}
4299 operand a3_RegN() %{
4300 constraint(ALLOC_IN_RC(a3_reg));
4301 match(RegN);
4302 match(mRegN);
4304 format %{ %}
4305 interface(REG_INTER);
4306 %}
4308 operand a4_RegN() %{
4309 constraint(ALLOC_IN_RC(a4_reg));
4310 match(RegN);
4311 match(mRegN);
4313 format %{ %}
4314 interface(REG_INTER);
4315 %}
4317 operand a5_RegN() %{
4318 constraint(ALLOC_IN_RC(a5_reg));
4319 match(RegN);
4320 match(mRegN);
4322 format %{ %}
4323 interface(REG_INTER);
4324 %}
4326 operand a6_RegN() %{
4327 constraint(ALLOC_IN_RC(a6_reg));
4328 match(RegN);
4329 match(mRegN);
4331 format %{ %}
4332 interface(REG_INTER);
4333 %}
4335 operand a7_RegN() %{
4336 constraint(ALLOC_IN_RC(a7_reg));
4337 match(RegN);
4338 match(mRegN);
4340 format %{ %}
4341 interface(REG_INTER);
4342 %}
4344 operand s0_RegN() %{
4345 constraint(ALLOC_IN_RC(s0_reg));
4346 match(RegN);
4347 match(mRegN);
4349 format %{ %}
4350 interface(REG_INTER);
4351 %}
4353 operand s1_RegN() %{
4354 constraint(ALLOC_IN_RC(s1_reg));
4355 match(RegN);
4356 match(mRegN);
4358 format %{ %}
4359 interface(REG_INTER);
4360 %}
4362 operand s2_RegN() %{
4363 constraint(ALLOC_IN_RC(s2_reg));
4364 match(RegN);
4365 match(mRegN);
4367 format %{ %}
4368 interface(REG_INTER);
4369 %}
4371 operand s3_RegN() %{
4372 constraint(ALLOC_IN_RC(s3_reg));
4373 match(RegN);
4374 match(mRegN);
4376 format %{ %}
4377 interface(REG_INTER);
4378 %}
4380 operand s4_RegN() %{
4381 constraint(ALLOC_IN_RC(s4_reg));
4382 match(RegN);
4383 match(mRegN);
4385 format %{ %}
4386 interface(REG_INTER);
4387 %}
4389 operand s5_RegN() %{
4390 constraint(ALLOC_IN_RC(s5_reg));
4391 match(RegN);
4392 match(mRegN);
4394 format %{ %}
4395 interface(REG_INTER);
4396 %}
4398 operand s6_RegN() %{
4399 constraint(ALLOC_IN_RC(s6_reg));
4400 match(RegN);
4401 match(mRegN);
4403 format %{ %}
4404 interface(REG_INTER);
4405 %}
4407 operand s7_RegN() %{
4408 constraint(ALLOC_IN_RC(s7_reg));
4409 match(RegN);
4410 match(mRegN);
4412 format %{ %}
4413 interface(REG_INTER);
4414 %}
4416 operand v0_RegN() %{
4417 constraint(ALLOC_IN_RC(v0_reg));
4418 match(RegN);
4419 match(mRegN);
4421 format %{ %}
4422 interface(REG_INTER);
4423 %}
4425 operand v1_RegN() %{
4426 constraint(ALLOC_IN_RC(v1_reg));
4427 match(RegN);
4428 match(mRegN);
4430 format %{ %}
4431 interface(REG_INTER);
4432 %}
4434 // Pointer Register
4435 operand mRegP() %{
4436 constraint(ALLOC_IN_RC(p_reg));
4437 match(RegP);
4439 format %{ %}
4440 interface(REG_INTER);
4441 %}
4443 operand no_T8_mRegP() %{
4444 constraint(ALLOC_IN_RC(no_T8_p_reg));
4445 match(RegP);
4446 match(mRegP);
4448 format %{ %}
4449 interface(REG_INTER);
4450 %}
4452 operand s0_RegP()
4453 %{
4454 constraint(ALLOC_IN_RC(s0_long_reg));
4455 match(RegP);
4456 match(mRegP);
4457 match(no_T8_mRegP);
4459 format %{ %}
4460 interface(REG_INTER);
4461 %}
4463 operand s1_RegP()
4464 %{
4465 constraint(ALLOC_IN_RC(s1_long_reg));
4466 match(RegP);
4467 match(mRegP);
4468 match(no_T8_mRegP);
4470 format %{ %}
4471 interface(REG_INTER);
4472 %}
4474 operand s2_RegP()
4475 %{
4476 constraint(ALLOC_IN_RC(s2_long_reg));
4477 match(RegP);
4478 match(mRegP);
4479 match(no_T8_mRegP);
4481 format %{ %}
4482 interface(REG_INTER);
4483 %}
4485 operand s3_RegP()
4486 %{
4487 constraint(ALLOC_IN_RC(s3_long_reg));
4488 match(RegP);
4489 match(mRegP);
4490 match(no_T8_mRegP);
4492 format %{ %}
4493 interface(REG_INTER);
4494 %}
4496 operand s4_RegP()
4497 %{
4498 constraint(ALLOC_IN_RC(s4_long_reg));
4499 match(RegP);
4500 match(mRegP);
4501 match(no_T8_mRegP);
4503 format %{ %}
4504 interface(REG_INTER);
4505 %}
4507 operand s5_RegP()
4508 %{
4509 constraint(ALLOC_IN_RC(s5_long_reg));
4510 match(RegP);
4511 match(mRegP);
4512 match(no_T8_mRegP);
4514 format %{ %}
4515 interface(REG_INTER);
4516 %}
4518 operand s6_RegP()
4519 %{
4520 constraint(ALLOC_IN_RC(s6_long_reg));
4521 match(RegP);
4522 match(mRegP);
4523 match(no_T8_mRegP);
4525 format %{ %}
4526 interface(REG_INTER);
4527 %}
4529 operand s7_RegP()
4530 %{
4531 constraint(ALLOC_IN_RC(s7_long_reg));
4532 match(RegP);
4533 match(mRegP);
4534 match(no_T8_mRegP);
4536 format %{ %}
4537 interface(REG_INTER);
4538 %}
4540 operand t0_RegP()
4541 %{
4542 constraint(ALLOC_IN_RC(t0_long_reg));
4543 match(RegP);
4544 match(mRegP);
4545 match(no_T8_mRegP);
4547 format %{ %}
4548 interface(REG_INTER);
4549 %}
4551 operand t1_RegP()
4552 %{
4553 constraint(ALLOC_IN_RC(t1_long_reg));
4554 match(RegP);
4555 match(mRegP);
4556 match(no_T8_mRegP);
4558 format %{ %}
4559 interface(REG_INTER);
4560 %}
4562 operand t2_RegP()
4563 %{
4564 constraint(ALLOC_IN_RC(t2_long_reg));
4565 match(RegP);
4566 match(mRegP);
4567 match(no_T8_mRegP);
4569 format %{ %}
4570 interface(REG_INTER);
4571 %}
4573 operand t3_RegP()
4574 %{
4575 constraint(ALLOC_IN_RC(t3_long_reg));
4576 match(RegP);
4577 match(mRegP);
4578 match(no_T8_mRegP);
4580 format %{ %}
4581 interface(REG_INTER);
4582 %}
4584 operand t8_RegP()
4585 %{
4586 constraint(ALLOC_IN_RC(t8_long_reg));
4587 match(RegP);
4588 match(mRegP);
4590 format %{ %}
4591 interface(REG_INTER);
4592 %}
4594 operand t9_RegP()
4595 %{
4596 constraint(ALLOC_IN_RC(t9_long_reg));
4597 match(RegP);
4598 match(mRegP);
4599 match(no_T8_mRegP);
4601 format %{ %}
4602 interface(REG_INTER);
4603 %}
4605 operand a0_RegP()
4606 %{
4607 constraint(ALLOC_IN_RC(a0_long_reg));
4608 match(RegP);
4609 match(mRegP);
4610 match(no_T8_mRegP);
4612 format %{ %}
4613 interface(REG_INTER);
4614 %}
4616 operand a1_RegP()
4617 %{
4618 constraint(ALLOC_IN_RC(a1_long_reg));
4619 match(RegP);
4620 match(mRegP);
4621 match(no_T8_mRegP);
4623 format %{ %}
4624 interface(REG_INTER);
4625 %}
4627 operand a2_RegP()
4628 %{
4629 constraint(ALLOC_IN_RC(a2_long_reg));
4630 match(RegP);
4631 match(mRegP);
4632 match(no_T8_mRegP);
4634 format %{ %}
4635 interface(REG_INTER);
4636 %}
4638 operand a3_RegP()
4639 %{
4640 constraint(ALLOC_IN_RC(a3_long_reg));
4641 match(RegP);
4642 match(mRegP);
4643 match(no_T8_mRegP);
4645 format %{ %}
4646 interface(REG_INTER);
4647 %}
4649 operand a4_RegP()
4650 %{
4651 constraint(ALLOC_IN_RC(a4_long_reg));
4652 match(RegP);
4653 match(mRegP);
4654 match(no_T8_mRegP);
4656 format %{ %}
4657 interface(REG_INTER);
4658 %}
4661 operand a5_RegP()
4662 %{
4663 constraint(ALLOC_IN_RC(a5_long_reg));
4664 match(RegP);
4665 match(mRegP);
4666 match(no_T8_mRegP);
4668 format %{ %}
4669 interface(REG_INTER);
4670 %}
4672 operand a6_RegP()
4673 %{
4674 constraint(ALLOC_IN_RC(a6_long_reg));
4675 match(RegP);
4676 match(mRegP);
4677 match(no_T8_mRegP);
4679 format %{ %}
4680 interface(REG_INTER);
4681 %}
4683 operand a7_RegP()
4684 %{
4685 constraint(ALLOC_IN_RC(a7_long_reg));
4686 match(RegP);
4687 match(mRegP);
4688 match(no_T8_mRegP);
4690 format %{ %}
4691 interface(REG_INTER);
4692 %}
4694 operand v0_RegP()
4695 %{
4696 constraint(ALLOC_IN_RC(v0_long_reg));
4697 match(RegP);
4698 match(mRegP);
4699 match(no_T8_mRegP);
4701 format %{ %}
4702 interface(REG_INTER);
4703 %}
4705 operand v1_RegP()
4706 %{
4707 constraint(ALLOC_IN_RC(v1_long_reg));
4708 match(RegP);
4709 match(mRegP);
4710 match(no_T8_mRegP);
4712 format %{ %}
4713 interface(REG_INTER);
4714 %}
4716 /*
4717 operand mSPRegP(mRegP reg) %{
4718 constraint(ALLOC_IN_RC(sp_reg));
4719 match(reg);
4721 format %{ "SP" %}
4722 interface(REG_INTER);
4723 %}
4725 operand mFPRegP(mRegP reg) %{
4726 constraint(ALLOC_IN_RC(fp_reg));
4727 match(reg);
4729 format %{ "FP" %}
4730 interface(REG_INTER);
4731 %}
4732 */
4734 operand mRegL() %{
4735 constraint(ALLOC_IN_RC(long_reg));
4736 match(RegL);
4738 format %{ %}
4739 interface(REG_INTER);
4740 %}
4742 operand v0RegL() %{
4743 constraint(ALLOC_IN_RC(v0_long_reg));
4744 match(RegL);
4745 match(mRegL);
4747 format %{ %}
4748 interface(REG_INTER);
4749 %}
4751 operand v1RegL() %{
4752 constraint(ALLOC_IN_RC(v1_long_reg));
4753 match(RegL);
4754 match(mRegL);
4756 format %{ %}
4757 interface(REG_INTER);
4758 %}
4760 operand a0RegL() %{
4761 constraint(ALLOC_IN_RC(a0_long_reg));
4762 match(RegL);
4763 match(mRegL);
4765 format %{ "A0" %}
4766 interface(REG_INTER);
4767 %}
4769 operand a1RegL() %{
4770 constraint(ALLOC_IN_RC(a1_long_reg));
4771 match(RegL);
4772 match(mRegL);
4774 format %{ %}
4775 interface(REG_INTER);
4776 %}
4778 operand a2RegL() %{
4779 constraint(ALLOC_IN_RC(a2_long_reg));
4780 match(RegL);
4781 match(mRegL);
4783 format %{ %}
4784 interface(REG_INTER);
4785 %}
4787 operand a3RegL() %{
4788 constraint(ALLOC_IN_RC(a3_long_reg));
4789 match(RegL);
4790 match(mRegL);
4792 format %{ %}
4793 interface(REG_INTER);
4794 %}
4796 operand t0RegL() %{
4797 constraint(ALLOC_IN_RC(t0_long_reg));
4798 match(RegL);
4799 match(mRegL);
4801 format %{ %}
4802 interface(REG_INTER);
4803 %}
4805 operand t1RegL() %{
4806 constraint(ALLOC_IN_RC(t1_long_reg));
4807 match(RegL);
4808 match(mRegL);
4810 format %{ %}
4811 interface(REG_INTER);
4812 %}
4814 operand t2RegL() %{
4815 constraint(ALLOC_IN_RC(t2_long_reg));
4816 match(RegL);
4817 match(mRegL);
4819 format %{ %}
4820 interface(REG_INTER);
4821 %}
4823 operand t3RegL() %{
4824 constraint(ALLOC_IN_RC(t3_long_reg));
4825 match(RegL);
4826 match(mRegL);
4828 format %{ %}
4829 interface(REG_INTER);
4830 %}
4832 operand t8RegL() %{
4833 constraint(ALLOC_IN_RC(t8_long_reg));
4834 match(RegL);
4835 match(mRegL);
4837 format %{ %}
4838 interface(REG_INTER);
4839 %}
4841 operand a4RegL() %{
4842 constraint(ALLOC_IN_RC(a4_long_reg));
4843 match(RegL);
4844 match(mRegL);
4846 format %{ %}
4847 interface(REG_INTER);
4848 %}
4850 operand a5RegL() %{
4851 constraint(ALLOC_IN_RC(a5_long_reg));
4852 match(RegL);
4853 match(mRegL);
4855 format %{ %}
4856 interface(REG_INTER);
4857 %}
4859 operand a6RegL() %{
4860 constraint(ALLOC_IN_RC(a6_long_reg));
4861 match(RegL);
4862 match(mRegL);
4864 format %{ %}
4865 interface(REG_INTER);
4866 %}
4868 operand a7RegL() %{
4869 constraint(ALLOC_IN_RC(a7_long_reg));
4870 match(RegL);
4871 match(mRegL);
4873 format %{ %}
4874 interface(REG_INTER);
4875 %}
4877 operand s0RegL() %{
4878 constraint(ALLOC_IN_RC(s0_long_reg));
4879 match(RegL);
4880 match(mRegL);
4882 format %{ %}
4883 interface(REG_INTER);
4884 %}
4886 operand s1RegL() %{
4887 constraint(ALLOC_IN_RC(s1_long_reg));
4888 match(RegL);
4889 match(mRegL);
4891 format %{ %}
4892 interface(REG_INTER);
4893 %}
4895 operand s2RegL() %{
4896 constraint(ALLOC_IN_RC(s2_long_reg));
4897 match(RegL);
4898 match(mRegL);
4900 format %{ %}
4901 interface(REG_INTER);
4902 %}
4904 operand s3RegL() %{
4905 constraint(ALLOC_IN_RC(s3_long_reg));
4906 match(RegL);
4907 match(mRegL);
4909 format %{ %}
4910 interface(REG_INTER);
4911 %}
4913 operand s4RegL() %{
4914 constraint(ALLOC_IN_RC(s4_long_reg));
4915 match(RegL);
4916 match(mRegL);
4918 format %{ %}
4919 interface(REG_INTER);
4920 %}
4922 operand s7RegL() %{
4923 constraint(ALLOC_IN_RC(s7_long_reg));
4924 match(RegL);
4925 match(mRegL);
4927 format %{ %}
4928 interface(REG_INTER);
4929 %}
4931 // Floating register operands
4932 operand regF() %{
4933 constraint(ALLOC_IN_RC(flt_reg));
4934 match(RegF);
4936 format %{ %}
4937 interface(REG_INTER);
4938 %}
4940 //Double Precision Floating register operands
4941 operand regD() %{
4942 constraint(ALLOC_IN_RC(dbl_reg));
4943 match(RegD);
4945 format %{ %}
4946 interface(REG_INTER);
4947 %}
4949 //----------Memory Operands----------------------------------------------------
4950 // Indirect Memory Operand
4951 operand indirect(mRegP reg) %{
4952 constraint(ALLOC_IN_RC(p_reg));
4953 match(reg);
4955 format %{ "[$reg] @ indirect" %}
4956 interface(MEMORY_INTER) %{
4957 base($reg);
4958 index(0x0); /* NO_INDEX */
4959 scale(0x0);
4960 disp(0x0);
4961 %}
4962 %}
4964 // Indirect Memory Plus Short Offset Operand
4965 operand indOffset8(mRegP reg, immL8 off)
4966 %{
4967 constraint(ALLOC_IN_RC(p_reg));
4968 match(AddP reg off);
4970 format %{ "[$reg + $off (8-bit)] @ indOffset8" %}
4971 interface(MEMORY_INTER) %{
4972 base($reg);
4973 index(0x0); /* NO_INDEX */
4974 scale(0x0);
4975 disp($off);
4976 %}
4977 %}
4979 // Indirect Memory Times Scale Plus Index Register
4980 operand indIndexScale(mRegP reg, mRegL lreg, immI2 scale)
4981 %{
4982 constraint(ALLOC_IN_RC(p_reg));
4983 match(AddP reg (LShiftL lreg scale));
4985 op_cost(10);
4986 format %{"[$reg + $lreg << $scale] @ indIndexScale" %}
4987 interface(MEMORY_INTER) %{
4988 base($reg);
4989 index($lreg);
4990 scale($scale);
4991 disp(0x0);
4992 %}
4993 %}
4996 // [base + index + offset]
4997 operand baseIndexOffset8(mRegP base, mRegL index, immL8 off)
4998 %{
4999 constraint(ALLOC_IN_RC(p_reg));
5000 op_cost(5);
5001 match(AddP (AddP base index) off);
5003 format %{ "[$base + $index + $off (8-bit)] @ baseIndexOffset8" %}
5004 interface(MEMORY_INTER) %{
5005 base($base);
5006 index($index);
5007 scale(0x0);
5008 disp($off);
5009 %}
5010 %}
5012 // [base + index + offset]
5013 operand baseIndexOffset8_convI2L(mRegP base, mRegI index, immL8 off)
5014 %{
5015 constraint(ALLOC_IN_RC(p_reg));
5016 op_cost(5);
5017 match(AddP (AddP base (ConvI2L index)) off);
5019 format %{ "[$base + $index + $off (8-bit)] @ baseIndexOffset8_convI2L" %}
5020 interface(MEMORY_INTER) %{
5021 base($base);
5022 index($index);
5023 scale(0x0);
5024 disp($off);
5025 %}
5026 %}
5028 // Indirect Memory Times Scale Plus Index Register Plus Offset Operand
5029 operand indIndexScaleOffset8(mRegP reg, immL8 off, mRegL lreg, immI2 scale)
5030 %{
5031 constraint(ALLOC_IN_RC(p_reg));
5032 match(AddP (AddP reg (LShiftL lreg scale)) off);
5034 op_cost(10);
5035 format %{"[$reg + $off + $lreg << $scale] @ indIndexScaleOffset8" %}
5036 interface(MEMORY_INTER) %{
5037 base($reg);
5038 index($lreg);
5039 scale($scale);
5040 disp($off);
5041 %}
5042 %}
5044 operand indIndexScaleOffset8_convI2L(mRegP reg, immL8 off, mRegI ireg, immI2 scale)
5045 %{
5046 constraint(ALLOC_IN_RC(p_reg));
5047 match(AddP (AddP reg (LShiftL (ConvI2L ireg) scale)) off);
5049 op_cost(10);
5050 format %{"[$reg + $off + $ireg << $scale] @ indIndexScaleOffset8_convI2L" %}
5051 interface(MEMORY_INTER) %{
5052 base($reg);
5053 index($ireg);
5054 scale($scale);
5055 disp($off);
5056 %}
5057 %}
5059 // [base + index<<scale + offset]
5060 operand basePosIndexScaleOffset8(mRegP base, mRegI index, immL8 off, immI_0_31 scale)
5061 %{
5062 constraint(ALLOC_IN_RC(p_reg));
5063 //predicate(n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
5064 op_cost(10);
5065 match(AddP (AddP base (LShiftL (ConvI2L index) scale)) off);
5067 format %{ "[$base + $index << $scale + $off (8-bit)] @ basePosIndexScaleOffset8" %}
5068 interface(MEMORY_INTER) %{
5069 base($base);
5070 index($index);
5071 scale($scale);
5072 disp($off);
5073 %}
5074 %}
5076 // Indirect Memory Times Scale Plus Index Register Plus Offset Operand
5077 operand indIndexScaleOffsetNarrow(mRegN reg, immL8 off, mRegL lreg, immI2 scale)
5078 %{
5079 predicate(Universe::narrow_oop_shift() == 0);
5080 constraint(ALLOC_IN_RC(p_reg));
5081 match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
5083 op_cost(10);
5084 format %{"[$reg + $off + $lreg << $scale] @ indIndexScaleOffsetNarrow" %}
5085 interface(MEMORY_INTER) %{
5086 base($reg);
5087 index($lreg);
5088 scale($scale);
5089 disp($off);
5090 %}
5091 %}
5093 // [base + index<<scale + offset] for compressd Oops
5094 operand indPosIndexI2LScaleOffset8Narrow(mRegN base, mRegI index, immL8 off, immI_0_31 scale)
5095 %{
5096 constraint(ALLOC_IN_RC(p_reg));
5097 //predicate(Universe::narrow_oop_shift() == 0 && n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
5098 predicate(Universe::narrow_oop_shift() == 0);
5099 op_cost(10);
5100 match(AddP (AddP (DecodeN base) (LShiftL (ConvI2L index) scale)) off);
5102 format %{ "[$base + $index << $scale + $off (8-bit)] @ indPosIndexI2LScaleOffset8Narrow" %}
5103 interface(MEMORY_INTER) %{
5104 base($base);
5105 index($index);
5106 scale($scale);
5107 disp($off);
5108 %}
5109 %}
5111 //FIXME: I think it's better to limit the immI to be 16-bit at most!
5112 // Indirect Memory Plus Long Offset Operand
5113 operand indOffset32(mRegP reg, immL32 off) %{
5114 constraint(ALLOC_IN_RC(p_reg));
5115 op_cost(20);
5116 match(AddP reg off);
5118 format %{ "[$reg + $off (32-bit)] @ indOffset32" %}
5119 interface(MEMORY_INTER) %{
5120 base($reg);
5121 index(0x0); /* NO_INDEX */
5122 scale(0x0);
5123 disp($off);
5124 %}
5125 %}
5127 // Indirect Memory Plus Index Register
5128 operand indIndex(mRegP addr, mRegL index) %{
5129 constraint(ALLOC_IN_RC(p_reg));
5130 match(AddP addr index);
5132 op_cost(20);
5133 format %{"[$addr + $index] @ indIndex" %}
5134 interface(MEMORY_INTER) %{
5135 base($addr);
5136 index($index);
5137 scale(0x0);
5138 disp(0x0);
5139 %}
5140 %}
5142 operand indirectNarrowKlass(mRegN reg)
5143 %{
5144 predicate(Universe::narrow_klass_shift() == 0);
5145 constraint(ALLOC_IN_RC(p_reg));
5146 op_cost(10);
5147 match(DecodeNKlass reg);
5149 format %{ "[$reg] @ indirectNarrowKlass" %}
5150 interface(MEMORY_INTER) %{
5151 base($reg);
5152 index(0x0);
5153 scale(0x0);
5154 disp(0x0);
5155 %}
5156 %}
5158 operand indOffset8NarrowKlass(mRegN reg, immL8 off)
5159 %{
5160 predicate(Universe::narrow_klass_shift() == 0);
5161 constraint(ALLOC_IN_RC(p_reg));
5162 op_cost(10);
5163 match(AddP (DecodeNKlass reg) off);
5165 format %{ "[$reg + $off (8-bit)] @ indOffset8NarrowKlass" %}
5166 interface(MEMORY_INTER) %{
5167 base($reg);
5168 index(0x0);
5169 scale(0x0);
5170 disp($off);
5171 %}
5172 %}
5174 operand indOffset32NarrowKlass(mRegN reg, immL32 off)
5175 %{
5176 predicate(Universe::narrow_klass_shift() == 0);
5177 constraint(ALLOC_IN_RC(p_reg));
5178 op_cost(10);
5179 match(AddP (DecodeNKlass reg) off);
5181 format %{ "[$reg + $off (32-bit)] @ indOffset32NarrowKlass" %}
5182 interface(MEMORY_INTER) %{
5183 base($reg);
5184 index(0x0);
5185 scale(0x0);
5186 disp($off);
5187 %}
5188 %}
5190 operand indIndexOffsetNarrowKlass(mRegN reg, mRegL lreg, immL32 off)
5191 %{
5192 predicate(Universe::narrow_klass_shift() == 0);
5193 constraint(ALLOC_IN_RC(p_reg));
5194 match(AddP (AddP (DecodeNKlass reg) lreg) off);
5196 op_cost(10);
5197 format %{"[$reg + $off + $lreg] @ indIndexOffsetNarrowKlass" %}
5198 interface(MEMORY_INTER) %{
5199 base($reg);
5200 index($lreg);
5201 scale(0x0);
5202 disp($off);
5203 %}
5204 %}
5206 operand indIndexNarrowKlass(mRegN reg, mRegL lreg)
5207 %{
5208 predicate(Universe::narrow_klass_shift() == 0);
5209 constraint(ALLOC_IN_RC(p_reg));
5210 match(AddP (DecodeNKlass reg) lreg);
5212 op_cost(10);
5213 format %{"[$reg + $lreg] @ indIndexNarrowKlass" %}
5214 interface(MEMORY_INTER) %{
5215 base($reg);
5216 index($lreg);
5217 scale(0x0);
5218 disp(0x0);
5219 %}
5220 %}
5222 // Indirect Memory Operand
5223 operand indirectNarrow(mRegN reg)
5224 %{
5225 predicate(Universe::narrow_oop_shift() == 0);
5226 constraint(ALLOC_IN_RC(p_reg));
5227 op_cost(10);
5228 match(DecodeN reg);
5230 format %{ "[$reg] @ indirectNarrow" %}
5231 interface(MEMORY_INTER) %{
5232 base($reg);
5233 index(0x0);
5234 scale(0x0);
5235 disp(0x0);
5236 %}
5237 %}
5239 // Indirect Memory Plus Short Offset Operand
5240 operand indOffset8Narrow(mRegN reg, immL8 off)
5241 %{
5242 predicate(Universe::narrow_oop_shift() == 0);
5243 constraint(ALLOC_IN_RC(p_reg));
5244 op_cost(10);
5245 match(AddP (DecodeN reg) off);
5247 format %{ "[$reg + $off (8-bit)] @ indOffset8Narrow" %}
5248 interface(MEMORY_INTER) %{
5249 base($reg);
5250 index(0x0);
5251 scale(0x0);
5252 disp($off);
5253 %}
5254 %}
5256 // Indirect Memory Plus Index Register Plus Offset Operand
5257 operand indIndexOffset8Narrow(mRegN reg, mRegL lreg, immL8 off)
5258 %{
5259 predicate(Universe::narrow_oop_shift() == 0);
5260 constraint(ALLOC_IN_RC(p_reg));
5261 match(AddP (AddP (DecodeN reg) lreg) off);
5263 op_cost(10);
5264 format %{"[$reg + $off + $lreg] @ indIndexOffset8Narrow" %}
5265 interface(MEMORY_INTER) %{
5266 base($reg);
5267 index($lreg);
5268 scale(0x0);
5269 disp($off);
5270 %}
5271 %}
5273 //----------Load Long Memory Operands------------------------------------------
5274 // The load-long idiom will use it's address expression again after loading
5275 // the first word of the long. If the load-long destination overlaps with
5276 // registers used in the addressing expression, the 2nd half will be loaded
5277 // from a clobbered address. Fix this by requiring that load-long use
5278 // address registers that do not overlap with the load-long target.
5280 // load-long support
5281 operand load_long_RegP() %{
5282 constraint(ALLOC_IN_RC(p_reg));
5283 match(RegP);
5284 match(mRegP);
5285 op_cost(100);
5286 format %{ %}
5287 interface(REG_INTER);
5288 %}
5290 // Indirect Memory Operand Long
5291 operand load_long_indirect(load_long_RegP reg) %{
5292 constraint(ALLOC_IN_RC(p_reg));
5293 match(reg);
5295 format %{ "[$reg]" %}
5296 interface(MEMORY_INTER) %{
5297 base($reg);
5298 index(0x0);
5299 scale(0x0);
5300 disp(0x0);
5301 %}
5302 %}
5304 // Indirect Memory Plus Long Offset Operand
5305 operand load_long_indOffset32(load_long_RegP reg, immL32 off) %{
5306 match(AddP reg off);
5308 format %{ "[$reg + $off]" %}
5309 interface(MEMORY_INTER) %{
5310 base($reg);
5311 index(0x0);
5312 scale(0x0);
5313 disp($off);
5314 %}
5315 %}
5317 //----------Conditional Branch Operands----------------------------------------
5318 // Comparison Op - This is the operation of the comparison, and is limited to
5319 // the following set of codes:
5320 // L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
5321 //
5322 // Other attributes of the comparison, such as unsignedness, are specified
5323 // by the comparison instruction that sets a condition code flags register.
5324 // That result is represented by a flags operand whose subtype is appropriate
5325 // to the unsignedness (etc.) of the comparison.
5326 //
5327 // Later, the instruction which matches both the Comparison Op (a Bool) and
5328 // the flags (produced by the Cmp) specifies the coding of the comparison op
5329 // by matching a specific subtype of Bool operand below, such as cmpOpU.
5331 // Comparision Code
5332 operand cmpOp() %{
5333 match(Bool);
5335 format %{ "" %}
5336 interface(COND_INTER) %{
5337 equal(0x01);
5338 not_equal(0x02);
5339 greater(0x03);
5340 greater_equal(0x04);
5341 less(0x05);
5342 less_equal(0x06);
5343 overflow(0x7);
5344 no_overflow(0x8);
5345 %}
5346 %}
5349 // Comparision Code
5350 // Comparison Code, unsigned compare. Used by FP also, with
5351 // C2 (unordered) turned into GT or LT already. The other bits
5352 // C0 and C3 are turned into Carry & Zero flags.
5353 operand cmpOpU() %{
5354 match(Bool);
5356 format %{ "" %}
5357 interface(COND_INTER) %{
5358 equal(0x01);
5359 not_equal(0x02);
5360 greater(0x03);
5361 greater_equal(0x04);
5362 less(0x05);
5363 less_equal(0x06);
5364 overflow(0x7);
5365 no_overflow(0x8);
5366 %}
5367 %}
5369 /*
5370 // Comparison Code, unsigned compare. Used by FP also, with
5371 // C2 (unordered) turned into GT or LT already. The other bits
5372 // C0 and C3 are turned into Carry & Zero flags.
5373 operand cmpOpU() %{
5374 match(Bool);
5376 format %{ "" %}
5377 interface(COND_INTER) %{
5378 equal(0x4);
5379 not_equal(0x5);
5380 less(0x2);
5381 greater_equal(0x3);
5382 less_equal(0x6);
5383 greater(0x7);
5384 %}
5385 %}
5386 */
5387 /*
5388 // Comparison Code for FP conditional move
5389 operand cmpOp_fcmov() %{
5390 match(Bool);
5392 format %{ "" %}
5393 interface(COND_INTER) %{
5394 equal (0x01);
5395 not_equal (0x02);
5396 greater (0x03);
5397 greater_equal(0x04);
5398 less (0x05);
5399 less_equal (0x06);
5400 %}
5401 %}
5403 // Comparision Code used in long compares
5404 operand cmpOp_commute() %{
5405 match(Bool);
5407 format %{ "" %}
5408 interface(COND_INTER) %{
5409 equal(0x4);
5410 not_equal(0x5);
5411 less(0xF);
5412 greater_equal(0xE);
5413 less_equal(0xD);
5414 greater(0xC);
5415 %}
5416 %}
5417 */
5419 //----------Special Memory Operands--------------------------------------------
5420 // Stack Slot Operand - This operand is used for loading and storing temporary
5421 // values on the stack where a match requires a value to
5422 // flow through memory.
5423 operand stackSlotP(sRegP reg) %{
5424 constraint(ALLOC_IN_RC(stack_slots));
5425 // No match rule because this operand is only generated in matching
5426 op_cost(50);
5427 format %{ "[$reg]" %}
5428 interface(MEMORY_INTER) %{
5429 base(0x1d); // SP
5430 index(0x0); // No Index
5431 scale(0x0); // No Scale
5432 disp($reg); // Stack Offset
5433 %}
5434 %}
5436 operand stackSlotI(sRegI reg) %{
5437 constraint(ALLOC_IN_RC(stack_slots));
5438 // No match rule because this operand is only generated in matching
5439 op_cost(50);
5440 format %{ "[$reg]" %}
5441 interface(MEMORY_INTER) %{
5442 base(0x1d); // SP
5443 index(0x0); // No Index
5444 scale(0x0); // No Scale
5445 disp($reg); // Stack Offset
5446 %}
5447 %}
5449 operand stackSlotF(sRegF reg) %{
5450 constraint(ALLOC_IN_RC(stack_slots));
5451 // No match rule because this operand is only generated in matching
5452 op_cost(50);
5453 format %{ "[$reg]" %}
5454 interface(MEMORY_INTER) %{
5455 base(0x1d); // SP
5456 index(0x0); // No Index
5457 scale(0x0); // No Scale
5458 disp($reg); // Stack Offset
5459 %}
5460 %}
5462 operand stackSlotD(sRegD reg) %{
5463 constraint(ALLOC_IN_RC(stack_slots));
5464 // No match rule because this operand is only generated in matching
5465 op_cost(50);
5466 format %{ "[$reg]" %}
5467 interface(MEMORY_INTER) %{
5468 base(0x1d); // SP
5469 index(0x0); // No Index
5470 scale(0x0); // No Scale
5471 disp($reg); // Stack Offset
5472 %}
5473 %}
5475 operand stackSlotL(sRegL reg) %{
5476 constraint(ALLOC_IN_RC(stack_slots));
5477 // No match rule because this operand is only generated in matching
5478 op_cost(50);
5479 format %{ "[$reg]" %}
5480 interface(MEMORY_INTER) %{
5481 base(0x1d); // SP
5482 index(0x0); // No Index
5483 scale(0x0); // No Scale
5484 disp($reg); // Stack Offset
5485 %}
5486 %}
5489 //------------------------OPERAND CLASSES--------------------------------------
5490 //opclass memory( direct, indirect, indOffset16, indOffset32, indOffset32X, indIndexOffset );
5491 opclass memory( indirect, indirectNarrow, indOffset8, indOffset32, indIndex, indIndexScale, load_long_indirect, load_long_indOffset32, baseIndexOffset8, baseIndexOffset8_convI2L, indIndexScaleOffset8, indIndexScaleOffset8_convI2L, basePosIndexScaleOffset8, indIndexScaleOffsetNarrow, indPosIndexI2LScaleOffset8Narrow, indOffset8Narrow, indIndexOffset8Narrow);
5494 //----------PIPELINE-----------------------------------------------------------
5495 // Rules which define the behavior of the target architectures pipeline.
5497 pipeline %{
5499 //----------ATTRIBUTES---------------------------------------------------------
5500 attributes %{
5501 fixed_size_instructions; // Fixed size instructions
5502 branch_has_delay_slot; // branch have delay slot in gs2
5503 max_instructions_per_bundle = 1; // 1 instruction per bundle
5504 max_bundles_per_cycle = 4; // Up to 4 bundles per cycle
5505 bundle_unit_size=4;
5506 instruction_unit_size = 4; // An instruction is 4 bytes long
5507 instruction_fetch_unit_size = 16; // The processor fetches one line
5508 instruction_fetch_units = 1; // of 16 bytes
5510 // List of nop instructions
5511 nops( MachNop );
5512 %}
5514 //----------RESOURCES----------------------------------------------------------
5515 // Resources are the functional units available to the machine
5517 resources(D1, D2, D3, D4, DECODE = D1 | D2 | D3| D4, ALU1, ALU2, ALU = ALU1 | ALU2, FPU1, FPU2, FPU = FPU1 | FPU2, MEM, BR);
5519 //----------PIPELINE DESCRIPTION-----------------------------------------------
5520 // Pipeline Description specifies the stages in the machine's pipeline
5522 // IF: fetch
5523 // ID: decode
5524 // RD: read
5525 // CA: caculate
5526 // WB: write back
5527 // CM: commit
5529 pipe_desc(IF, ID, RD, CA, WB, CM);
5532 //----------PIPELINE CLASSES---------------------------------------------------
5533 // Pipeline Classes describe the stages in which input and output are
5534 // referenced by the hardware pipeline.
5536 //No.1 Integer ALU reg-reg operation : dst <-- reg1 op reg2
5537 pipe_class ialu_regI_regI(mRegI dst, mRegI src1, mRegI src2) %{
5538 single_instruction;
5539 src1 : RD(read);
5540 src2 : RD(read);
5541 dst : WB(write)+1;
5542 DECODE : ID;
5543 ALU : CA;
5544 %}
5546 //No.19 Integer mult operation : dst <-- reg1 mult reg2
5547 pipe_class ialu_mult(mRegI dst, mRegI src1, mRegI src2) %{
5548 src1 : RD(read);
5549 src2 : RD(read);
5550 dst : WB(write)+5;
5551 DECODE : ID;
5552 ALU2 : CA;
5553 %}
5555 pipe_class mulL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
5556 src1 : RD(read);
5557 src2 : RD(read);
5558 dst : WB(write)+10;
5559 DECODE : ID;
5560 ALU2 : CA;
5561 %}
5563 //No.19 Integer div operation : dst <-- reg1 div reg2
5564 pipe_class ialu_div(mRegI dst, mRegI src1, mRegI src2) %{
5565 src1 : RD(read);
5566 src2 : RD(read);
5567 dst : WB(write)+10;
5568 DECODE : ID;
5569 ALU2 : CA;
5570 %}
5572 //No.19 Integer mod operation : dst <-- reg1 mod reg2
5573 pipe_class ialu_mod(mRegI dst, mRegI src1, mRegI src2) %{
5574 instruction_count(2);
5575 src1 : RD(read);
5576 src2 : RD(read);
5577 dst : WB(write)+10;
5578 DECODE : ID;
5579 ALU2 : CA;
5580 %}
5582 //No.15 Long ALU reg-reg operation : dst <-- reg1 op reg2
5583 pipe_class ialu_regL_regL(mRegL dst, mRegL src1, mRegL src2) %{
5584 instruction_count(2);
5585 src1 : RD(read);
5586 src2 : RD(read);
5587 dst : WB(write);
5588 DECODE : ID;
5589 ALU : CA;
5590 %}
5592 //No.18 Long ALU reg-imm16 operation : dst <-- reg1 op imm16
5593 pipe_class ialu_regL_imm16(mRegL dst, mRegL src) %{
5594 instruction_count(2);
5595 src : RD(read);
5596 dst : WB(write);
5597 DECODE : ID;
5598 ALU : CA;
5599 %}
5601 //no.16 load Long from memory :
5602 pipe_class ialu_loadL(mRegL dst, memory mem) %{
5603 instruction_count(2);
5604 mem : RD(read);
5605 dst : WB(write)+5;
5606 DECODE : ID;
5607 MEM : RD;
5608 %}
5610 //No.17 Store Long to Memory :
5611 pipe_class ialu_storeL(mRegL src, memory mem) %{
5612 instruction_count(2);
5613 mem : RD(read);
5614 src : RD(read);
5615 DECODE : ID;
5616 MEM : RD;
5617 %}
5619 //No.2 Integer ALU reg-imm16 operation : dst <-- reg1 op imm16
5620 pipe_class ialu_regI_imm16(mRegI dst, mRegI src) %{
5621 single_instruction;
5622 src : RD(read);
5623 dst : WB(write);
5624 DECODE : ID;
5625 ALU : CA;
5626 %}
5628 //No.3 Integer move operation : dst <-- reg
5629 pipe_class ialu_regI_mov(mRegI dst, mRegI src) %{
5630 src : RD(read);
5631 dst : WB(write);
5632 DECODE : ID;
5633 ALU : CA;
5634 %}
5636 //No.4 No instructions : do nothing
5637 pipe_class empty( ) %{
5638 instruction_count(0);
5639 %}
5641 //No.5 UnConditional branch :
5642 pipe_class pipe_jump( label labl ) %{
5643 multiple_bundles;
5644 DECODE : ID;
5645 BR : RD;
5646 %}
5648 //No.6 ALU Conditional branch :
5649 pipe_class pipe_alu_branch(mRegI src1, mRegI src2, label labl ) %{
5650 multiple_bundles;
5651 src1 : RD(read);
5652 src2 : RD(read);
5653 DECODE : ID;
5654 BR : RD;
5655 %}
5657 //no.7 load integer from memory :
5658 pipe_class ialu_loadI(mRegI dst, memory mem) %{
5659 mem : RD(read);
5660 dst : WB(write)+3;
5661 DECODE : ID;
5662 MEM : RD;
5663 %}
5665 //No.8 Store Integer to Memory :
5666 pipe_class ialu_storeI(mRegI src, memory mem) %{
5667 mem : RD(read);
5668 src : RD(read);
5669 DECODE : ID;
5670 MEM : RD;
5671 %}
5674 //No.10 Floating FPU reg-reg operation : dst <-- reg1 op reg2
5675 pipe_class fpu_regF_regF(regF dst, regF src1, regF src2) %{
5676 src1 : RD(read);
5677 src2 : RD(read);
5678 dst : WB(write);
5679 DECODE : ID;
5680 FPU : CA;
5681 %}
5683 //No.22 Floating div operation : dst <-- reg1 div reg2
5684 pipe_class fpu_div(regF dst, regF src1, regF src2) %{
5685 src1 : RD(read);
5686 src2 : RD(read);
5687 dst : WB(write);
5688 DECODE : ID;
5689 FPU2 : CA;
5690 %}
5692 pipe_class fcvt_I2D(regD dst, mRegI src) %{
5693 src : RD(read);
5694 dst : WB(write);
5695 DECODE : ID;
5696 FPU1 : CA;
5697 %}
5699 pipe_class fcvt_D2I(mRegI dst, regD src) %{
5700 src : RD(read);
5701 dst : WB(write);
5702 DECODE : ID;
5703 FPU1 : CA;
5704 %}
5706 pipe_class pipe_mfc1(mRegI dst, regD src) %{
5707 src : RD(read);
5708 dst : WB(write);
5709 DECODE : ID;
5710 MEM : RD;
5711 %}
5713 pipe_class pipe_mtc1(regD dst, mRegI src) %{
5714 src : RD(read);
5715 dst : WB(write);
5716 DECODE : ID;
5717 MEM : RD(5);
5718 %}
5720 //No.23 Floating sqrt operation : dst <-- reg1 sqrt reg2
5721 pipe_class fpu_sqrt(regF dst, regF src1, regF src2) %{
5722 multiple_bundles;
5723 src1 : RD(read);
5724 src2 : RD(read);
5725 dst : WB(write);
5726 DECODE : ID;
5727 FPU2 : CA;
5728 %}
5730 //No.11 Load Floating from Memory :
5731 pipe_class fpu_loadF(regF dst, memory mem) %{
5732 instruction_count(1);
5733 mem : RD(read);
5734 dst : WB(write)+3;
5735 DECODE : ID;
5736 MEM : RD;
5737 %}
5739 //No.12 Store Floating to Memory :
5740 pipe_class fpu_storeF(regF src, memory mem) %{
5741 instruction_count(1);
5742 mem : RD(read);
5743 src : RD(read);
5744 DECODE : ID;
5745 MEM : RD;
5746 %}
5748 //No.13 FPU Conditional branch :
5749 pipe_class pipe_fpu_branch(regF src1, regF src2, label labl ) %{
5750 multiple_bundles;
5751 src1 : RD(read);
5752 src2 : RD(read);
5753 DECODE : ID;
5754 BR : RD;
5755 %}
5757 //No.14 Floating FPU reg operation : dst <-- op reg
5758 pipe_class fpu1_regF(regF dst, regF src) %{
5759 src : RD(read);
5760 dst : WB(write);
5761 DECODE : ID;
5762 FPU : CA;
5763 %}
5765 pipe_class long_memory_op() %{
5766 instruction_count(10); multiple_bundles; force_serialization;
5767 fixed_latency(30);
5768 %}
5770 pipe_class simple_call() %{
5771 instruction_count(10); multiple_bundles; force_serialization;
5772 fixed_latency(200);
5773 BR : RD;
5774 %}
5776 pipe_class call() %{
5777 instruction_count(10); multiple_bundles; force_serialization;
5778 fixed_latency(200);
5779 %}
5781 //FIXME:
5782 //No.9 Piple slow : for multi-instructions
5783 pipe_class pipe_slow( ) %{
5784 instruction_count(20);
5785 force_serialization;
5786 multiple_bundles;
5787 fixed_latency(50);
5788 %}
5790 %}
5794 //----------INSTRUCTIONS-------------------------------------------------------
5795 //
5796 // match -- States which machine-independent subtree may be replaced
5797 // by this instruction.
5798 // ins_cost -- The estimated cost of this instruction is used by instruction
5799 // selection to identify a minimum cost tree of machine
5800 // instructions that matches a tree of machine-independent
5801 // instructions.
5802 // format -- A string providing the disassembly for this instruction.
5803 // The value of an instruction's operand may be inserted
5804 // by referring to it with a '$' prefix.
5805 // opcode -- Three instruction opcodes may be provided. These are referred
5806 // to within an encode class as $primary, $secondary, and $tertiary
5807 // respectively. The primary opcode is commonly used to
5808 // indicate the type of machine instruction, while secondary
5809 // and tertiary are often used for prefix options or addressing
5810 // modes.
5811 // ins_encode -- A list of encode classes with parameters. The encode class
5812 // name must have been defined in an 'enc_class' specification
5813 // in the encode section of the architecture description.
5816 // Load Integer
5817 instruct loadI(mRegI dst, memory mem) %{
5818 match(Set dst (LoadI mem));
5820 ins_cost(125);
5821 format %{ "lw $dst, $mem #@loadI" %}
5822 ins_encode (load_I_enc(dst, mem));
5823 ins_pipe( ialu_loadI );
5824 %}
5826 instruct loadI_convI2L(mRegL dst, memory mem) %{
5827 match(Set dst (ConvI2L (LoadI mem)));
5829 ins_cost(125);
5830 format %{ "lw $dst, $mem #@loadI_convI2L" %}
5831 ins_encode (load_I_enc(dst, mem));
5832 ins_pipe( ialu_loadI );
5833 %}
5835 // Load Integer (32 bit signed) to Byte (8 bit signed)
5836 instruct loadI2B(mRegI dst, memory mem, immI_24 twentyfour) %{
5837 match(Set dst (RShiftI (LShiftI (LoadI mem) twentyfour) twentyfour));
5839 ins_cost(125);
5840 format %{ "lb $dst, $mem\t# int -> byte #@loadI2B" %}
5841 ins_encode(load_B_enc(dst, mem));
5842 ins_pipe(ialu_loadI);
5843 %}
5845 // Load Integer (32 bit signed) to Unsigned Byte (8 bit UNsigned)
5846 instruct loadI2UB(mRegI dst, memory mem, immI_255 mask) %{
5847 match(Set dst (AndI (LoadI mem) mask));
5849 ins_cost(125);
5850 format %{ "lbu $dst, $mem\t# int -> ubyte #@loadI2UB" %}
5851 ins_encode(load_UB_enc(dst, mem));
5852 ins_pipe(ialu_loadI);
5853 %}
5855 // Load Integer (32 bit signed) to Short (16 bit signed)
5856 instruct loadI2S(mRegI dst, memory mem, immI_16 sixteen) %{
5857 match(Set dst (RShiftI (LShiftI (LoadI mem) sixteen) sixteen));
5859 ins_cost(125);
5860 format %{ "lh $dst, $mem\t# int -> short #@loadI2S" %}
5861 ins_encode(load_S_enc(dst, mem));
5862 ins_pipe(ialu_loadI);
5863 %}
5865 // Load Integer (32 bit signed) to Unsigned Short/Char (16 bit UNsigned)
5866 instruct loadI2US(mRegI dst, memory mem, immI_65535 mask) %{
5867 match(Set dst (AndI (LoadI mem) mask));
5869 ins_cost(125);
5870 format %{ "lhu $dst, $mem\t# int -> ushort/char #@loadI2US" %}
5871 ins_encode(load_C_enc(dst, mem));
5872 ins_pipe(ialu_loadI);
5873 %}
5875 // Load Long.
5876 instruct loadL(mRegL dst, memory mem) %{
5877 // predicate(!((LoadLNode*)n)->require_atomic_access());
5878 match(Set dst (LoadL mem));
5880 ins_cost(250);
5881 format %{ "ld $dst, $mem #@loadL" %}
5882 ins_encode(load_L_enc(dst, mem));
5883 ins_pipe( ialu_loadL );
5884 %}
5886 // Load Long - UNaligned
5887 instruct loadL_unaligned(mRegL dst, memory mem) %{
5888 match(Set dst (LoadL_unaligned mem));
5890 // FIXME: Jin: Need more effective ldl/ldr
5891 ins_cost(450);
5892 format %{ "ld $dst, $mem #@loadL_unaligned\n\t" %}
5893 ins_encode(load_L_enc(dst, mem));
5894 ins_pipe( ialu_loadL );
5895 %}
5897 // Store Long
5898 instruct storeL_reg(memory mem, mRegL src) %{
5899 match(Set mem (StoreL mem src));
5901 ins_cost(200);
5902 format %{ "sd $mem, $src #@storeL_reg\n" %}
5903 ins_encode(store_L_reg_enc(mem, src));
5904 ins_pipe( ialu_storeL );
5905 %}
5908 instruct storeL_immL0(memory mem, immL0 zero) %{
5909 match(Set mem (StoreL mem zero));
5911 ins_cost(180);
5912 format %{ "sd $mem, zero #@storeL_immL0" %}
5913 ins_encode(store_L_immL0_enc(mem, zero));
5914 ins_pipe( ialu_storeL );
5915 %}
5917 // Load Compressed Pointer
5918 instruct loadN(mRegN dst, memory mem)
5919 %{
5920 match(Set dst (LoadN mem));
5922 ins_cost(125); // XXX
5923 format %{ "lwu $dst, $mem\t# compressed ptr @ loadN" %}
5924 ins_encode (load_N_enc(dst, mem));
5925 ins_pipe( ialu_loadI ); // XXX
5926 %}
5928 // Load Pointer
5929 instruct loadP(mRegP dst, memory mem) %{
5930 match(Set dst (LoadP mem));
5932 ins_cost(125);
5933 format %{ "ld $dst, $mem #@loadP" %}
5934 ins_encode (load_P_enc(dst, mem));
5935 ins_pipe( ialu_loadI );
5936 %}
5938 // Load Klass Pointer
5939 instruct loadKlass(mRegP dst, memory mem) %{
5940 match(Set dst (LoadKlass mem));
5942 ins_cost(125);
5943 format %{ "MOV $dst,$mem @ loadKlass" %}
5944 ins_encode (load_P_enc(dst, mem));
5945 ins_pipe( ialu_loadI );
5946 %}
5948 // Load narrow Klass Pointer
5949 instruct loadNKlass(mRegN dst, memory mem)
5950 %{
5951 match(Set dst (LoadNKlass mem));
5953 ins_cost(125); // XXX
5954 format %{ "lwu $dst, $mem\t# compressed klass ptr @ loadNKlass" %}
5955 ins_encode (load_N_enc(dst, mem));
5956 ins_pipe( ialu_loadI ); // XXX
5957 %}
5959 // Load Constant
5960 instruct loadConI(mRegI dst, immI src) %{
5961 match(Set dst src);
5963 ins_cost(150);
5964 format %{ "mov $dst, $src #@loadConI" %}
5965 ins_encode %{
5966 Register dst = $dst$$Register;
5967 int value = $src$$constant;
5968 __ move(dst, value);
5969 %}
5970 ins_pipe( ialu_regI_regI );
5971 %}
5974 instruct loadConL_set64(mRegL dst, immL src) %{
5975 match(Set dst src);
5976 ins_cost(120);
5977 format %{ "li $dst, $src @ loadConL_set64" %}
5978 ins_encode %{
5979 __ set64($dst$$Register, $src$$constant);
5980 %}
5981 ins_pipe(ialu_regL_regL);
5982 %}
5984 /*
5985 // Load long value from constant table (predicated by immL_expensive).
5986 instruct loadConL_load(mRegL dst, immL_expensive src) %{
5987 match(Set dst src);
5988 ins_cost(150);
5989 format %{ "ld $dst, $constantoffset[$constanttablebase] # load long $src from table @ loadConL_ldx" %}
5990 ins_encode %{
5991 int con_offset = $constantoffset($src);
5993 if (Assembler::is_simm16(con_offset)) {
5994 __ ld($dst$$Register, $constanttablebase, con_offset);
5995 } else {
5996 __ set64(AT, con_offset);
5997 if (UseLoongsonISA) {
5998 __ gsldx($dst$$Register, $constanttablebase, AT, 0);
5999 } else {
6000 __ daddu(AT, $constanttablebase, AT);
6001 __ ld($dst$$Register, AT, 0);
6002 }
6003 }
6004 %}
6005 ins_pipe(ialu_loadI);
6006 %}
6007 */
6009 instruct loadConL16(mRegL dst, immL16 src) %{
6010 match(Set dst src);
6011 ins_cost(105);
6012 format %{ "mov $dst, $src #@loadConL16" %}
6013 ins_encode %{
6014 Register dst_reg = as_Register($dst$$reg);
6015 int value = $src$$constant;
6016 __ daddiu(dst_reg, R0, value);
6017 %}
6018 ins_pipe( ialu_regL_regL );
6019 %}
6022 instruct loadConL0(mRegL dst, immL0 src) %{
6023 match(Set dst src);
6024 ins_cost(100);
6025 format %{ "mov $dst, zero #@loadConL0" %}
6026 ins_encode %{
6027 Register dst_reg = as_Register($dst$$reg);
6028 __ daddu(dst_reg, R0, R0);
6029 %}
6030 ins_pipe( ialu_regL_regL );
6031 %}
6033 // Load Range
6034 instruct loadRange(mRegI dst, memory mem) %{
6035 match(Set dst (LoadRange mem));
6037 ins_cost(125);
6038 format %{ "MOV $dst,$mem @ loadRange" %}
6039 ins_encode(load_I_enc(dst, mem));
6040 ins_pipe( ialu_loadI );
6041 %}
6044 instruct storeP(memory mem, mRegP src ) %{
6045 match(Set mem (StoreP mem src));
6047 ins_cost(125);
6048 format %{ "sd $src, $mem #@storeP" %}
6049 ins_encode(store_P_reg_enc(mem, src));
6050 ins_pipe( ialu_storeI );
6051 %}
6053 // Store NULL Pointer, mark word, or other simple pointer constant.
6054 instruct storeImmP0(memory mem, immP0 zero) %{
6055 match(Set mem (StoreP mem zero));
6057 ins_cost(125);
6058 format %{ "mov $mem, $zero #@storeImmP0" %}
6059 ins_encode(store_P_immP0_enc(mem));
6060 ins_pipe( ialu_storeI );
6061 %}
6063 // Store Byte Immediate
6064 instruct storeImmB(memory mem, immI8 src) %{
6065 match(Set mem (StoreB mem src));
6067 ins_cost(150);
6068 format %{ "movb $mem, $src #@storeImmB" %}
6069 ins_encode(store_B_immI_enc(mem, src));
6070 ins_pipe( ialu_storeI );
6071 %}
6073 // Store Compressed Pointer
6074 instruct storeN(memory mem, mRegN src)
6075 %{
6076 match(Set mem (StoreN mem src));
6078 ins_cost(125); // XXX
6079 format %{ "sw $mem, $src\t# compressed ptr @ storeN" %}
6080 ins_encode(store_N_reg_enc(mem, src));
6081 ins_pipe( ialu_storeI );
6082 %}
6084 instruct storeNKlass(memory mem, mRegN src)
6085 %{
6086 match(Set mem (StoreNKlass mem src));
6088 ins_cost(125); // XXX
6089 format %{ "sw $mem, $src\t# compressed klass ptr @ storeNKlass" %}
6090 ins_encode(store_N_reg_enc(mem, src));
6091 ins_pipe( ialu_storeI );
6092 %}
6094 instruct storeImmN0(memory mem, immN0 zero)
6095 %{
6096 predicate(Universe::narrow_oop_base() == NULL && Universe::narrow_klass_base() == NULL);
6097 match(Set mem (StoreN mem zero));
6099 ins_cost(125); // XXX
6100 format %{ "storeN0 $mem, R12\t# compressed ptr" %}
6101 ins_encode(storeImmN0_enc(mem, zero));
6102 ins_pipe( ialu_storeI );
6103 %}
6105 // Store Byte
6106 instruct storeB(memory mem, mRegI src) %{
6107 match(Set mem (StoreB mem src));
6109 ins_cost(125);
6110 format %{ "sb $src, $mem #@storeB" %}
6111 ins_encode(store_B_reg_enc(mem, src));
6112 ins_pipe( ialu_storeI );
6113 %}
6115 instruct storeB_convL2I(memory mem, mRegL src) %{
6116 match(Set mem (StoreB mem (ConvL2I src)));
6118 ins_cost(125);
6119 format %{ "sb $src, $mem #@storeB_convL2I" %}
6120 ins_encode(store_B_reg_enc(mem, src));
6121 ins_pipe( ialu_storeI );
6122 %}
6124 // Load Byte (8bit signed)
6125 instruct loadB(mRegI dst, memory mem) %{
6126 match(Set dst (LoadB mem));
6128 ins_cost(125);
6129 format %{ "lb $dst, $mem #@loadB" %}
6130 ins_encode(load_B_enc(dst, mem));
6131 ins_pipe( ialu_loadI );
6132 %}
6134 instruct loadB_convI2L(mRegL dst, memory mem) %{
6135 match(Set dst (ConvI2L (LoadB mem)));
6137 ins_cost(125);
6138 format %{ "lb $dst, $mem #@loadB_convI2L" %}
6139 ins_encode(load_B_enc(dst, mem));
6140 ins_pipe( ialu_loadI );
6141 %}
6143 // Load Byte (8bit UNsigned)
6144 instruct loadUB(mRegI dst, memory mem) %{
6145 match(Set dst (LoadUB mem));
6147 ins_cost(125);
6148 format %{ "lbu $dst, $mem #@loadUB" %}
6149 ins_encode(load_UB_enc(dst, mem));
6150 ins_pipe( ialu_loadI );
6151 %}
6153 instruct loadUB_convI2L(mRegL dst, memory mem) %{
6154 match(Set dst (ConvI2L (LoadUB mem)));
6156 ins_cost(125);
6157 format %{ "lbu $dst, $mem #@loadUB_convI2L" %}
6158 ins_encode(load_UB_enc(dst, mem));
6159 ins_pipe( ialu_loadI );
6160 %}
6162 // Load Short (16bit signed)
6163 instruct loadS(mRegI dst, memory mem) %{
6164 match(Set dst (LoadS mem));
6166 ins_cost(125);
6167 format %{ "lh $dst, $mem #@loadS" %}
6168 ins_encode(load_S_enc(dst, mem));
6169 ins_pipe( ialu_loadI );
6170 %}
6172 // Load Short (16 bit signed) to Byte (8 bit signed)
6173 instruct loadS2B(mRegI dst, memory mem, immI_24 twentyfour) %{
6174 match(Set dst (RShiftI (LShiftI (LoadS mem) twentyfour) twentyfour));
6176 ins_cost(125);
6177 format %{ "lb $dst, $mem\t# short -> byte #@loadS2B" %}
6178 ins_encode(load_B_enc(dst, mem));
6179 ins_pipe(ialu_loadI);
6180 %}
6182 instruct loadS_convI2L(mRegL dst, memory mem) %{
6183 match(Set dst (ConvI2L (LoadS mem)));
6185 ins_cost(125);
6186 format %{ "lh $dst, $mem #@loadS_convI2L" %}
6187 ins_encode(load_S_enc(dst, mem));
6188 ins_pipe( ialu_loadI );
6189 %}
6191 // Store Integer Immediate
6192 instruct storeImmI(memory mem, immI src) %{
6193 match(Set mem (StoreI mem src));
6195 ins_cost(150);
6196 format %{ "mov $mem, $src #@storeImmI" %}
6197 ins_encode(store_I_immI_enc(mem, src));
6198 ins_pipe( ialu_storeI );
6199 %}
6201 // Store Integer
6202 instruct storeI(memory mem, mRegI src) %{
6203 match(Set mem (StoreI mem src));
6205 ins_cost(125);
6206 format %{ "sw $mem, $src #@storeI" %}
6207 ins_encode(store_I_reg_enc(mem, src));
6208 ins_pipe( ialu_storeI );
6209 %}
6211 instruct storeI_convL2I(memory mem, mRegL src) %{
6212 match(Set mem (StoreI mem (ConvL2I src)));
6214 ins_cost(125);
6215 format %{ "sw $mem, $src #@storeI_convL2I" %}
6216 ins_encode(store_I_reg_enc(mem, src));
6217 ins_pipe( ialu_storeI );
6218 %}
6220 // Load Float
6221 instruct loadF(regF dst, memory mem) %{
6222 match(Set dst (LoadF mem));
6224 ins_cost(150);
6225 format %{ "loadF $dst, $mem #@loadF" %}
6226 ins_encode(load_F_enc(dst, mem));
6227 ins_pipe( ialu_loadI );
6228 %}
6230 instruct loadConP_general(mRegP dst, immP src) %{
6231 match(Set dst src);
6233 ins_cost(120);
6234 format %{ "li $dst, $src #@loadConP_general" %}
6236 ins_encode %{
6237 Register dst = $dst$$Register;
6238 long* value = (long*)$src$$constant;
6239 bool is_need_reloc = $src->constant_reloc() != relocInfo::none;
6241 /* During GC, klassOop may be moved to new position in the heap.
6242 * It must be relocated.
6243 * Refer: [c1_LIRAssembler_mips.cpp] jobject2reg()
6244 */
6245 if (is_need_reloc) {
6246 if($src->constant_reloc() == relocInfo::metadata_type){
6247 int klass_index = __ oop_recorder()->find_index((Klass*)value);
6248 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
6250 __ relocate(rspec);
6251 __ li48(dst, (long)value);
6252 }
6254 if($src->constant_reloc() == relocInfo::oop_type){
6255 int oop_index = __ oop_recorder()->find_index((jobject)value);
6256 RelocationHolder rspec = oop_Relocation::spec(oop_index);
6258 __ relocate(rspec);
6259 __ li48(dst, (long)value);
6260 }
6261 } else {
6262 __ set64(dst, (long)value);
6263 }
6264 %}
6266 ins_pipe( ialu_regI_regI );
6267 %}
6269 /*
6270 instruct loadConP_load(mRegP dst, immP_load src) %{
6271 match(Set dst src);
6273 ins_cost(100);
6274 format %{ "ld $dst, [$constanttablebase + $constantoffset] load from constant table: ptr=$src @ loadConP_load" %}
6276 ins_encode %{
6278 int con_offset = $constantoffset($src);
6280 if (Assembler::is_simm16(con_offset)) {
6281 __ ld($dst$$Register, $constanttablebase, con_offset);
6282 } else {
6283 __ set64(AT, con_offset);
6284 if (UseLoongsonISA) {
6285 __ gsldx($dst$$Register, $constanttablebase, AT, 0);
6286 } else {
6287 __ daddu(AT, $constanttablebase, AT);
6288 __ ld($dst$$Register, AT, 0);
6289 }
6290 }
6291 %}
6293 ins_pipe(ialu_loadI);
6294 %}
6295 */
6297 instruct loadConP_no_oop_cheap(mRegP dst, immP_no_oop_cheap src) %{
6298 match(Set dst src);
6300 ins_cost(80);
6301 format %{ "li $dst, $src @ loadConP_no_oop_cheap" %}
6303 ins_encode %{
6304 __ set64($dst$$Register, $src$$constant);
6305 %}
6307 ins_pipe(ialu_regI_regI);
6308 %}
6311 instruct loadConP_poll(mRegP dst, immP_poll src) %{
6312 match(Set dst src);
6314 ins_cost(50);
6315 format %{ "li $dst, $src #@loadConP_poll" %}
6317 ins_encode %{
6318 Register dst = $dst$$Register;
6319 intptr_t value = (intptr_t)$src$$constant;
6321 __ set64(dst, (jlong)value);
6322 %}
6324 ins_pipe( ialu_regI_regI );
6325 %}
6327 instruct loadConP0(mRegP dst, immP0 src)
6328 %{
6329 match(Set dst src);
6331 ins_cost(50);
6332 format %{ "mov $dst, R0\t# ptr" %}
6333 ins_encode %{
6334 Register dst_reg = $dst$$Register;
6335 __ daddu(dst_reg, R0, R0);
6336 %}
6337 ins_pipe( ialu_regI_regI );
6338 %}
6340 instruct loadConN0(mRegN dst, immN0 src) %{
6341 match(Set dst src);
6342 format %{ "move $dst, R0\t# compressed NULL ptr" %}
6343 ins_encode %{
6344 __ move($dst$$Register, R0);
6345 %}
6346 ins_pipe( ialu_regI_regI );
6347 %}
6349 instruct loadConN(mRegN dst, immN src) %{
6350 match(Set dst src);
6352 ins_cost(125);
6353 format %{ "li $dst, $src\t# compressed ptr @ loadConN" %}
6354 ins_encode %{
6355 address con = (address)$src$$constant;
6356 if (con == NULL) {
6357 ShouldNotReachHere();
6358 } else {
6359 assert (UseCompressedOops, "should only be used for compressed headers");
6360 assert (Universe::heap() != NULL, "java heap should be initialized");
6361 assert (__ oop_recorder() != NULL, "this assembler needs an OopRecorder");
6363 Register dst = $dst$$Register;
6364 long* value = (long*)$src$$constant;
6365 int oop_index = __ oop_recorder()->find_index((jobject)value);
6366 RelocationHolder rspec = oop_Relocation::spec(oop_index);
6367 if(rspec.type()!=relocInfo::none){
6368 __ relocate(rspec, Assembler::narrow_oop_operand);
6369 __ li48(dst, oop_index);
6370 } else {
6371 __ set64(dst, oop_index);
6372 }
6373 }
6374 %}
6375 ins_pipe( ialu_regI_regI ); // XXX
6376 %}
6378 instruct loadConNKlass(mRegN dst, immNKlass src) %{
6379 match(Set dst src);
6381 ins_cost(125);
6382 format %{ "li $dst, $src\t# compressed klass ptr @ loadConNKlass" %}
6383 ins_encode %{
6384 address con = (address)$src$$constant;
6385 if (con == NULL) {
6386 ShouldNotReachHere();
6387 } else {
6388 Register dst = $dst$$Register;
6389 long* value = (long*)$src$$constant;
6391 int klass_index = __ oop_recorder()->find_index((Klass*)value);
6392 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
6393 long narrowp = (long)Klass::encode_klass((Klass*)value);
6395 if(rspec.type()!=relocInfo::none){
6396 __ relocate(rspec, Assembler::narrow_oop_operand);
6397 __ li48(dst, narrowp);
6398 } else {
6399 __ set64(dst, narrowp);
6400 }
6401 }
6402 %}
6403 ins_pipe( ialu_regI_regI ); // XXX
6404 %}
6406 //FIXME
6407 // Tail Call; Jump from runtime stub to Java code.
6408 // Also known as an 'interprocedural jump'.
6409 // Target of jump will eventually return to caller.
6410 // TailJump below removes the return address.
6411 instruct TailCalljmpInd(mRegP jump_target, mRegP method_oop) %{
6412 match(TailCall jump_target method_oop );
6413 ins_cost(300);
6414 format %{ "JMP $jump_target \t# @TailCalljmpInd" %}
6416 ins_encode %{
6417 Register target = $jump_target$$Register;
6418 Register oop = $method_oop$$Register;
6420 /* 2012/10/12 Jin: RA will be used in generate_forward_exception() */
6421 __ push(RA);
6423 __ move(S3, oop);
6424 __ jr(target);
6425 __ nop();
6426 %}
6428 ins_pipe( pipe_jump );
6429 %}
6431 // Create exception oop: created by stack-crawling runtime code.
6432 // Created exception is now available to this handler, and is setup
6433 // just prior to jumping to this handler. No code emitted.
6434 instruct CreateException( a0_RegP ex_oop )
6435 %{
6436 match(Set ex_oop (CreateEx));
6438 // use the following format syntax
6439 format %{ "# exception oop is in A0; no code emitted @CreateException" %}
6440 ins_encode %{
6441 /* Jin: X86 leaves this function empty */
6442 __ block_comment("CreateException is empty in X86/MIPS");
6443 %}
6444 ins_pipe( empty );
6445 // ins_pipe( pipe_jump );
6446 %}
6449 /* 2012/9/14 Jin: The mechanism of exception handling is clear now.
6451 - Common try/catch:
6452 2012/9/14 Jin: [stubGenerator_mips.cpp] generate_forward_exception()
6453 |- V0, V1 are created
6454 |- T9 <= SharedRuntime::exception_handler_for_return_address
6455 `- jr T9
6456 `- the caller's exception_handler
6457 `- jr OptoRuntime::exception_blob
6458 `- here
6459 - Rethrow(e.g. 'unwind'):
6460 * The callee:
6461 |- an exception is triggered during execution
6462 `- exits the callee method through RethrowException node
6463 |- The callee pushes exception_oop(T0) and exception_pc(RA)
6464 `- The callee jumps to OptoRuntime::rethrow_stub()
6465 * In OptoRuntime::rethrow_stub:
6466 |- The VM calls _rethrow_Java to determine the return address in the caller method
6467 `- exits the stub with tailjmpInd
6468 |- pops exception_oop(V0) and exception_pc(V1)
6469 `- jumps to the return address(usually an exception_handler)
6470 * The caller:
6471 `- continues processing the exception_blob with V0/V1
6472 */
6474 /*
6475 Disassembling OptoRuntime::rethrow_stub()
6477 ; locals
6478 0x2d3bf320: addiu sp, sp, 0xfffffff8
6479 0x2d3bf324: sw ra, 0x4(sp)
6480 0x2d3bf328: sw fp, 0x0(sp)
6481 0x2d3bf32c: addu fp, sp, zero
6482 0x2d3bf330: addiu sp, sp, 0xfffffff0
6483 0x2d3bf334: sw ra, 0x8(sp)
6484 0x2d3bf338: sw t0, 0x4(sp)
6485 0x2d3bf33c: sw sp, 0x0(sp)
6487 ; get_thread(S2)
6488 0x2d3bf340: addu s2, sp, zero
6489 0x2d3bf344: srl s2, s2, 12
6490 0x2d3bf348: sll s2, s2, 2
6491 0x2d3bf34c: lui at, 0x2c85
6492 0x2d3bf350: addu at, at, s2
6493 0x2d3bf354: lw s2, 0xffffcc80(at)
6495 0x2d3bf358: lw s0, 0x0(sp)
6496 0x2d3bf35c: sw s0, 0x118(s2) // last_sp -> threa
6497 0x2d3bf360: sw s2, 0xc(sp)
6499 ; OptoRuntime::rethrow_C(oopDesc* exception, JavaThread* thread, address ret_pc)
6500 0x2d3bf364: lw a0, 0x4(sp)
6501 0x2d3bf368: lw a1, 0xc(sp)
6502 0x2d3bf36c: lw a2, 0x8(sp)
6503 ;; Java_To_Runtime
6504 0x2d3bf370: lui t9, 0x2c34
6505 0x2d3bf374: addiu t9, t9, 0xffff8a48
6506 0x2d3bf378: jalr t9
6507 0x2d3bf37c: nop
6509 0x2d3bf380: addu s3, v0, zero ; S3: SharedRuntime::raw_exception_handler_for_return_address()
6511 0x2d3bf384: lw s0, 0xc(sp)
6512 0x2d3bf388: sw zero, 0x118(s0)
6513 0x2d3bf38c: sw zero, 0x11c(s0)
6514 0x2d3bf390: lw s1, 0x144(s0) ; ex_oop: S1
6515 0x2d3bf394: addu s2, s0, zero
6516 0x2d3bf398: sw zero, 0x144(s2)
6517 0x2d3bf39c: lw s0, 0x4(s2)
6518 0x2d3bf3a0: addiu s4, zero, 0x0
6519 0x2d3bf3a4: bne s0, s4, 0x2d3bf3d4
6520 0x2d3bf3a8: nop
6521 0x2d3bf3ac: addiu sp, sp, 0x10
6522 0x2d3bf3b0: addiu sp, sp, 0x8
6523 0x2d3bf3b4: lw ra, 0xfffffffc(sp)
6524 0x2d3bf3b8: lw fp, 0xfffffff8(sp)
6525 0x2d3bf3bc: lui at, 0x2b48
6526 0x2d3bf3c0: lw at, 0x100(at)
6528 ; tailjmpInd: Restores exception_oop & exception_pc
6529 0x2d3bf3c4: addu v1, ra, zero
6530 0x2d3bf3c8: addu v0, s1, zero
6531 0x2d3bf3cc: jr s3
6532 0x2d3bf3d0: nop
6533 ; Exception:
6534 0x2d3bf3d4: lui s1, 0x2cc8 ; generate_forward_exception()
6535 0x2d3bf3d8: addiu s1, s1, 0x40
6536 0x2d3bf3dc: addiu s2, zero, 0x0
6537 0x2d3bf3e0: addiu sp, sp, 0x10
6538 0x2d3bf3e4: addiu sp, sp, 0x8
6539 0x2d3bf3e8: lw ra, 0xfffffffc(sp)
6540 0x2d3bf3ec: lw fp, 0xfffffff8(sp)
6541 0x2d3bf3f0: lui at, 0x2b48
6542 0x2d3bf3f4: lw at, 0x100(at)
6543 ; TailCalljmpInd
6544 __ push(RA); ; to be used in generate_forward_exception()
6545 0x2d3bf3f8: addu t7, s2, zero
6546 0x2d3bf3fc: jr s1
6547 0x2d3bf400: nop
6548 */
6549 // Rethrow exception:
6550 // The exception oop will come in the first argument position.
6551 // Then JUMP (not call) to the rethrow stub code.
6552 instruct RethrowException()
6553 %{
6554 match(Rethrow);
6556 // use the following format syntax
6557 format %{ "JMP rethrow_stub #@RethrowException" %}
6558 ins_encode %{
6559 __ block_comment("@ RethrowException");
6561 cbuf.set_insts_mark();
6562 cbuf.relocate(cbuf.insts_mark(), runtime_call_Relocation::spec());
6564 // call OptoRuntime::rethrow_stub to get the exception handler in parent method
6565 __ li(T9, OptoRuntime::rethrow_stub());
6566 __ jr(T9);
6567 __ nop();
6568 %}
6569 ins_pipe( pipe_jump );
6570 %}
6572 instruct branchConP_zero(cmpOpU cmp, mRegP op1, immP0 zero, label labl) %{
6573 match(If cmp (CmpP op1 zero));
6574 effect(USE labl);
6576 ins_cost(180);
6577 format %{ "b$cmp $op1, R0, $labl #@branchConP_zero" %}
6579 ins_encode %{
6580 Register op1 = $op1$$Register;
6581 Register op2 = R0;
6582 Label &L = *($labl$$label);
6583 int flag = $cmp$$cmpcode;
6585 switch(flag)
6586 {
6587 case 0x01: //equal
6588 if (&L)
6589 __ beq(op1, op2, L);
6590 else
6591 __ beq(op1, op2, (int)0);
6592 break;
6593 case 0x02: //not_equal
6594 if (&L)
6595 __ bne(op1, op2, L);
6596 else
6597 __ bne(op1, op2, (int)0);
6598 break;
6599 /*
6600 case 0x03: //above
6601 __ sltu(AT, op2, op1);
6602 if(&L)
6603 __ bne(R0, AT, L);
6604 else
6605 __ bne(R0, AT, (int)0);
6606 break;
6607 case 0x04: //above_equal
6608 __ sltu(AT, op1, op2);
6609 if(&L)
6610 __ beq(AT, R0, L);
6611 else
6612 __ beq(AT, R0, (int)0);
6613 break;
6614 case 0x05: //below
6615 __ sltu(AT, op1, op2);
6616 if(&L)
6617 __ bne(R0, AT, L);
6618 else
6619 __ bne(R0, AT, (int)0);
6620 break;
6621 case 0x06: //below_equal
6622 __ sltu(AT, op2, op1);
6623 if(&L)
6624 __ beq(AT, R0, L);
6625 else
6626 __ beq(AT, R0, (int)0);
6627 break;
6628 */
6629 default:
6630 Unimplemented();
6631 }
6632 __ nop();
6633 %}
6635 ins_pc_relative(1);
6636 ins_pipe( pipe_alu_branch );
6637 %}
6640 instruct branchConP(cmpOpU cmp, mRegP op1, mRegP op2, label labl) %{
6641 match(If cmp (CmpP op1 op2));
6642 // predicate(can_branch_register(_kids[0]->_leaf, _kids[1]->_leaf));
6643 effect(USE labl);
6645 ins_cost(200);
6646 format %{ "b$cmp $op1, $op2, $labl #@branchConP" %}
6648 ins_encode %{
6649 Register op1 = $op1$$Register;
6650 Register op2 = $op2$$Register;
6651 Label &L = *($labl$$label);
6652 int flag = $cmp$$cmpcode;
6654 switch(flag)
6655 {
6656 case 0x01: //equal
6657 if (&L)
6658 __ beq(op1, op2, L);
6659 else
6660 __ beq(op1, op2, (int)0);
6661 break;
6662 case 0x02: //not_equal
6663 if (&L)
6664 __ bne(op1, op2, L);
6665 else
6666 __ bne(op1, op2, (int)0);
6667 break;
6668 case 0x03: //above
6669 __ sltu(AT, op2, op1);
6670 if(&L)
6671 __ bne(R0, AT, L);
6672 else
6673 __ bne(R0, AT, (int)0);
6674 break;
6675 case 0x04: //above_equal
6676 __ sltu(AT, op1, op2);
6677 if(&L)
6678 __ beq(AT, R0, L);
6679 else
6680 __ beq(AT, R0, (int)0);
6681 break;
6682 case 0x05: //below
6683 __ sltu(AT, op1, op2);
6684 if(&L)
6685 __ bne(R0, AT, L);
6686 else
6687 __ bne(R0, AT, (int)0);
6688 break;
6689 case 0x06: //below_equal
6690 __ sltu(AT, op2, op1);
6691 if(&L)
6692 __ beq(AT, R0, L);
6693 else
6694 __ beq(AT, R0, (int)0);
6695 break;
6696 default:
6697 Unimplemented();
6698 }
6699 __ nop();
6700 %}
6702 ins_pc_relative(1);
6703 ins_pipe( pipe_alu_branch );
6704 %}
6706 instruct cmpN_null_branch(cmpOp cmp, mRegN op1, immN0 null, label labl) %{
6707 match(If cmp (CmpN op1 null));
6708 effect(USE labl);
6710 ins_cost(180);
6711 format %{ "CMP $op1,0\t! compressed ptr\n\t"
6712 "BP$cmp $labl @ cmpN_null_branch" %}
6713 ins_encode %{
6714 Register op1 = $op1$$Register;
6715 Register op2 = R0;
6716 Label &L = *($labl$$label);
6717 int flag = $cmp$$cmpcode;
6719 switch(flag)
6720 {
6721 case 0x01: //equal
6722 if (&L)
6723 __ beq(op1, op2, L);
6724 else
6725 __ beq(op1, op2, (int)0);
6726 break;
6727 case 0x02: //not_equal
6728 if (&L)
6729 __ bne(op1, op2, L);
6730 else
6731 __ bne(op1, op2, (int)0);
6732 break;
6733 default:
6734 Unimplemented();
6735 }
6736 __ nop();
6737 %}
6738 //TODO: pipe_branchP or create pipe_branchN LEE
6739 ins_pc_relative(1);
6740 ins_pipe( pipe_alu_branch );
6741 %}
6743 instruct cmpN_reg_branch(cmpOp cmp, mRegN op1, mRegN op2, label labl) %{
6744 match(If cmp (CmpN op1 op2));
6745 effect(USE labl);
6747 ins_cost(180);
6748 format %{ "CMP $op1,$op2\t! compressed ptr\n\t"
6749 "BP$cmp $labl" %}
6750 ins_encode %{
6751 Register op1_reg = $op1$$Register;
6752 Register op2_reg = $op2$$Register;
6753 Label &L = *($labl$$label);
6754 int flag = $cmp$$cmpcode;
6756 switch(flag)
6757 {
6758 case 0x01: //equal
6759 if (&L)
6760 __ beq(op1_reg, op2_reg, L);
6761 else
6762 __ beq(op1_reg, op2_reg, (int)0);
6763 break;
6764 case 0x02: //not_equal
6765 if (&L)
6766 __ bne(op1_reg, op2_reg, L);
6767 else
6768 __ bne(op1_reg, op2_reg, (int)0);
6769 break;
6770 case 0x03: //above
6771 __ sltu(AT, op2_reg, op1_reg);
6772 if(&L)
6773 __ bne(R0, AT, L);
6774 else
6775 __ bne(R0, AT, (int)0);
6776 break;
6777 case 0x04: //above_equal
6778 __ sltu(AT, op1_reg, op2_reg);
6779 if(&L)
6780 __ beq(AT, R0, L);
6781 else
6782 __ beq(AT, R0, (int)0);
6783 break;
6784 case 0x05: //below
6785 __ sltu(AT, op1_reg, op2_reg);
6786 if(&L)
6787 __ bne(R0, AT, L);
6788 else
6789 __ bne(R0, AT, (int)0);
6790 break;
6791 case 0x06: //below_equal
6792 __ sltu(AT, op2_reg, op1_reg);
6793 if(&L)
6794 __ beq(AT, R0, L);
6795 else
6796 __ beq(AT, R0, (int)0);
6797 break;
6798 default:
6799 Unimplemented();
6800 }
6801 __ nop();
6802 %}
6803 ins_pc_relative(1);
6804 ins_pipe( pipe_alu_branch );
6805 %}
6807 instruct branchConIU_reg_reg(cmpOpU cmp, mRegI src1, mRegI src2, label labl) %{
6808 match( If cmp (CmpU src1 src2) );
6809 effect(USE labl);
6810 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_reg" %}
6812 ins_encode %{
6813 Register op1 = $src1$$Register;
6814 Register op2 = $src2$$Register;
6815 Label &L = *($labl$$label);
6816 int flag = $cmp$$cmpcode;
6818 switch(flag)
6819 {
6820 case 0x01: //equal
6821 if (&L)
6822 __ beq(op1, op2, L);
6823 else
6824 __ beq(op1, op2, (int)0);
6825 break;
6826 case 0x02: //not_equal
6827 if (&L)
6828 __ bne(op1, op2, L);
6829 else
6830 __ bne(op1, op2, (int)0);
6831 break;
6832 case 0x03: //above
6833 __ sltu(AT, op2, op1);
6834 if(&L)
6835 __ bne(AT, R0, L);
6836 else
6837 __ bne(AT, R0, (int)0);
6838 break;
6839 case 0x04: //above_equal
6840 __ sltu(AT, op1, op2);
6841 if(&L)
6842 __ beq(AT, R0, L);
6843 else
6844 __ beq(AT, R0, (int)0);
6845 break;
6846 case 0x05: //below
6847 __ sltu(AT, op1, op2);
6848 if(&L)
6849 __ bne(AT, R0, L);
6850 else
6851 __ bne(AT, R0, (int)0);
6852 break;
6853 case 0x06: //below_equal
6854 __ sltu(AT, op2, op1);
6855 if(&L)
6856 __ beq(AT, R0, L);
6857 else
6858 __ beq(AT, R0, (int)0);
6859 break;
6860 default:
6861 Unimplemented();
6862 }
6863 __ nop();
6864 %}
6866 ins_pc_relative(1);
6867 ins_pipe( pipe_alu_branch );
6868 %}
6871 instruct branchConIU_reg_imm(cmpOpU cmp, mRegI src1, immI src2, label labl) %{
6872 match( If cmp (CmpU src1 src2) );
6873 effect(USE labl);
6874 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_imm" %}
6876 ins_encode %{
6877 Register op1 = $src1$$Register;
6878 int val = $src2$$constant;
6879 Label &L = *($labl$$label);
6880 int flag = $cmp$$cmpcode;
6882 __ move(AT, val);
6883 switch(flag)
6884 {
6885 case 0x01: //equal
6886 if (&L)
6887 __ beq(op1, AT, L);
6888 else
6889 __ beq(op1, AT, (int)0);
6890 break;
6891 case 0x02: //not_equal
6892 if (&L)
6893 __ bne(op1, AT, L);
6894 else
6895 __ bne(op1, AT, (int)0);
6896 break;
6897 case 0x03: //above
6898 __ sltu(AT, AT, op1);
6899 if(&L)
6900 __ bne(R0, AT, L);
6901 else
6902 __ bne(R0, AT, (int)0);
6903 break;
6904 case 0x04: //above_equal
6905 __ sltu(AT, op1, AT);
6906 if(&L)
6907 __ beq(AT, R0, L);
6908 else
6909 __ beq(AT, R0, (int)0);
6910 break;
6911 case 0x05: //below
6912 __ sltu(AT, op1, AT);
6913 if(&L)
6914 __ bne(R0, AT, L);
6915 else
6916 __ bne(R0, AT, (int)0);
6917 break;
6918 case 0x06: //below_equal
6919 __ sltu(AT, AT, op1);
6920 if(&L)
6921 __ beq(AT, R0, L);
6922 else
6923 __ beq(AT, R0, (int)0);
6924 break;
6925 default:
6926 Unimplemented();
6927 }
6928 __ nop();
6929 %}
6931 ins_pc_relative(1);
6932 ins_pipe( pipe_alu_branch );
6933 %}
6935 instruct branchConI_reg_reg(cmpOp cmp, mRegI src1, mRegI src2, label labl) %{
6936 match( If cmp (CmpI src1 src2) );
6937 effect(USE labl);
6938 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_reg" %}
6940 ins_encode %{
6941 Register op1 = $src1$$Register;
6942 Register op2 = $src2$$Register;
6943 Label &L = *($labl$$label);
6944 int flag = $cmp$$cmpcode;
6946 switch(flag)
6947 {
6948 case 0x01: //equal
6949 if (&L)
6950 __ beq(op1, op2, L);
6951 else
6952 __ beq(op1, op2, (int)0);
6953 break;
6954 case 0x02: //not_equal
6955 if (&L)
6956 __ bne(op1, op2, L);
6957 else
6958 __ bne(op1, op2, (int)0);
6959 break;
6960 case 0x03: //above
6961 __ slt(AT, op2, op1);
6962 if(&L)
6963 __ bne(R0, AT, L);
6964 else
6965 __ bne(R0, AT, (int)0);
6966 break;
6967 case 0x04: //above_equal
6968 __ slt(AT, op1, op2);
6969 if(&L)
6970 __ beq(AT, R0, L);
6971 else
6972 __ beq(AT, R0, (int)0);
6973 break;
6974 case 0x05: //below
6975 __ slt(AT, op1, op2);
6976 if(&L)
6977 __ bne(R0, AT, L);
6978 else
6979 __ bne(R0, AT, (int)0);
6980 break;
6981 case 0x06: //below_equal
6982 __ slt(AT, op2, op1);
6983 if(&L)
6984 __ beq(AT, R0, L);
6985 else
6986 __ beq(AT, R0, (int)0);
6987 break;
6988 default:
6989 Unimplemented();
6990 }
6991 __ nop();
6992 %}
6994 ins_pc_relative(1);
6995 ins_pipe( pipe_alu_branch );
6996 %}
6998 instruct branchConI_reg_imm0(cmpOp cmp, mRegI src1, immI0 src2, label labl) %{
6999 match( If cmp (CmpI src1 src2) );
7000 effect(USE labl);
7001 ins_cost(170);
7002 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm0" %}
7004 ins_encode %{
7005 Register op1 = $src1$$Register;
7006 // int val = $src2$$constant;
7007 Label &L = *($labl$$label);
7008 int flag = $cmp$$cmpcode;
7010 //__ move(AT, val);
7011 switch(flag)
7012 {
7013 case 0x01: //equal
7014 if (&L)
7015 __ beq(op1, R0, L);
7016 else
7017 __ beq(op1, R0, (int)0);
7018 break;
7019 case 0x02: //not_equal
7020 if (&L)
7021 __ bne(op1, R0, L);
7022 else
7023 __ bne(op1, R0, (int)0);
7024 break;
7025 case 0x03: //greater
7026 if(&L)
7027 __ bgtz(op1, L);
7028 else
7029 __ bgtz(op1, (int)0);
7030 break;
7031 case 0x04: //greater_equal
7032 if(&L)
7033 __ bgez(op1, L);
7034 else
7035 __ bgez(op1, (int)0);
7036 break;
7037 case 0x05: //less
7038 if(&L)
7039 __ bltz(op1, L);
7040 else
7041 __ bltz(op1, (int)0);
7042 break;
7043 case 0x06: //less_equal
7044 if(&L)
7045 __ blez(op1, L);
7046 else
7047 __ blez(op1, (int)0);
7048 break;
7049 default:
7050 Unimplemented();
7051 }
7052 __ nop();
7053 %}
7055 ins_pc_relative(1);
7056 ins_pipe( pipe_alu_branch );
7057 %}
7060 instruct branchConI_reg_imm(cmpOp cmp, mRegI src1, immI src2, label labl) %{
7061 match( If cmp (CmpI src1 src2) );
7062 effect(USE labl);
7063 ins_cost(200);
7064 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm" %}
7066 ins_encode %{
7067 Register op1 = $src1$$Register;
7068 int val = $src2$$constant;
7069 Label &L = *($labl$$label);
7070 int flag = $cmp$$cmpcode;
7072 __ move(AT, val);
7073 switch(flag)
7074 {
7075 case 0x01: //equal
7076 if (&L)
7077 __ beq(op1, AT, L);
7078 else
7079 __ beq(op1, AT, (int)0);
7080 break;
7081 case 0x02: //not_equal
7082 if (&L)
7083 __ bne(op1, AT, L);
7084 else
7085 __ bne(op1, AT, (int)0);
7086 break;
7087 case 0x03: //greater
7088 __ slt(AT, AT, op1);
7089 if(&L)
7090 __ bne(R0, AT, L);
7091 else
7092 __ bne(R0, AT, (int)0);
7093 break;
7094 case 0x04: //greater_equal
7095 __ slt(AT, op1, AT);
7096 if(&L)
7097 __ beq(AT, R0, L);
7098 else
7099 __ beq(AT, R0, (int)0);
7100 break;
7101 case 0x05: //less
7102 __ slt(AT, op1, AT);
7103 if(&L)
7104 __ bne(R0, AT, L);
7105 else
7106 __ bne(R0, AT, (int)0);
7107 break;
7108 case 0x06: //less_equal
7109 __ slt(AT, AT, op1);
7110 if(&L)
7111 __ beq(AT, R0, L);
7112 else
7113 __ beq(AT, R0, (int)0);
7114 break;
7115 default:
7116 Unimplemented();
7117 }
7118 __ nop();
7119 %}
7121 ins_pc_relative(1);
7122 ins_pipe( pipe_alu_branch );
7123 %}
7125 instruct branchConIU_reg_imm0(cmpOpU cmp, mRegI src1, immI0 zero, label labl) %{
7126 match( If cmp (CmpU src1 zero) );
7127 effect(USE labl);
7128 format %{ "BR$cmp $src1, zero, $labl #@branchConIU_reg_imm0" %}
7130 ins_encode %{
7131 Register op1 = $src1$$Register;
7132 Label &L = *($labl$$label);
7133 int flag = $cmp$$cmpcode;
7135 switch(flag)
7136 {
7137 case 0x01: //equal
7138 if (&L)
7139 __ beq(op1, R0, L);
7140 else
7141 __ beq(op1, R0, (int)0);
7142 break;
7143 case 0x02: //not_equal
7144 if (&L)
7145 __ bne(op1, R0, L);
7146 else
7147 __ bne(op1, R0, (int)0);
7148 break;
7149 case 0x03: //above
7150 if(&L)
7151 __ bne(R0, op1, L);
7152 else
7153 __ bne(R0, op1, (int)0);
7154 break;
7155 case 0x04: //above_equal
7156 if(&L)
7157 __ beq(R0, R0, L);
7158 else
7159 __ beq(R0, R0, (int)0);
7160 break;
7161 case 0x05: //below
7162 return;
7163 break;
7164 case 0x06: //below_equal
7165 if(&L)
7166 __ beq(op1, R0, L);
7167 else
7168 __ beq(op1, R0, (int)0);
7169 break;
7170 default:
7171 Unimplemented();
7172 }
7173 __ nop();
7174 %}
7176 ins_pc_relative(1);
7177 ins_pipe( pipe_alu_branch );
7178 %}
7181 instruct branchConIU_reg_immI16(cmpOpU cmp, mRegI src1, immI16 src2, label labl) %{
7182 match( If cmp (CmpU src1 src2) );
7183 effect(USE labl);
7184 ins_cost(180);
7185 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_immI16" %}
7187 ins_encode %{
7188 Register op1 = $src1$$Register;
7189 int val = $src2$$constant;
7190 Label &L = *($labl$$label);
7191 int flag = $cmp$$cmpcode;
7193 switch(flag)
7194 {
7195 case 0x01: //equal
7196 __ move(AT, val);
7197 if (&L)
7198 __ beq(op1, AT, L);
7199 else
7200 __ beq(op1, AT, (int)0);
7201 break;
7202 case 0x02: //not_equal
7203 __ move(AT, val);
7204 if (&L)
7205 __ bne(op1, AT, L);
7206 else
7207 __ bne(op1, AT, (int)0);
7208 break;
7209 case 0x03: //above
7210 __ move(AT, val);
7211 __ sltu(AT, AT, op1);
7212 if(&L)
7213 __ bne(R0, AT, L);
7214 else
7215 __ bne(R0, AT, (int)0);
7216 break;
7217 case 0x04: //above_equal
7218 __ sltiu(AT, op1, val);
7219 if(&L)
7220 __ beq(AT, R0, L);
7221 else
7222 __ beq(AT, R0, (int)0);
7223 break;
7224 case 0x05: //below
7225 __ sltiu(AT, op1, val);
7226 if(&L)
7227 __ bne(R0, AT, L);
7228 else
7229 __ bne(R0, AT, (int)0);
7230 break;
7231 case 0x06: //below_equal
7232 __ move(AT, val);
7233 __ sltu(AT, AT, op1);
7234 if(&L)
7235 __ beq(AT, R0, L);
7236 else
7237 __ beq(AT, R0, (int)0);
7238 break;
7239 default:
7240 Unimplemented();
7241 }
7242 __ nop();
7243 %}
7245 ins_pc_relative(1);
7246 ins_pipe( pipe_alu_branch );
7247 %}
7250 instruct branchConL_regL_regL(cmpOp cmp, mRegL src1, mRegL src2, label labl) %{
7251 match( If cmp (CmpL src1 src2) );
7252 effect(USE labl);
7253 format %{ "BR$cmp $src1, $src2, $labl #@branchConL_regL_regL" %}
7254 ins_cost(250);
7256 ins_encode %{
7257 Register opr1_reg = as_Register($src1$$reg);
7258 Register opr2_reg = as_Register($src2$$reg);
7260 Label &target = *($labl$$label);
7261 int flag = $cmp$$cmpcode;
7263 switch(flag)
7264 {
7265 case 0x01: //equal
7266 if (&target)
7267 __ beq(opr1_reg, opr2_reg, target);
7268 else
7269 __ beq(opr1_reg, opr2_reg, (int)0);
7270 __ delayed()->nop();
7271 break;
7273 case 0x02: //not_equal
7274 if(&target)
7275 __ bne(opr1_reg, opr2_reg, target);
7276 else
7277 __ bne(opr1_reg, opr2_reg, (int)0);
7278 __ delayed()->nop();
7279 break;
7281 case 0x03: //greater
7282 __ slt(AT, opr2_reg, opr1_reg);
7283 if(&target)
7284 __ bne(AT, R0, target);
7285 else
7286 __ bne(AT, R0, (int)0);
7287 __ delayed()->nop();
7288 break;
7290 case 0x04: //greater_equal
7291 __ slt(AT, opr1_reg, opr2_reg);
7292 if(&target)
7293 __ beq(AT, R0, target);
7294 else
7295 __ beq(AT, R0, (int)0);
7296 __ delayed()->nop();
7298 break;
7300 case 0x05: //less
7301 __ slt(AT, opr1_reg, opr2_reg);
7302 if(&target)
7303 __ bne(AT, R0, target);
7304 else
7305 __ bne(AT, R0, (int)0);
7306 __ delayed()->nop();
7308 break;
7310 case 0x06: //less_equal
7311 __ slt(AT, opr2_reg, opr1_reg);
7313 if(&target)
7314 __ beq(AT, R0, target);
7315 else
7316 __ beq(AT, R0, (int)0);
7317 __ delayed()->nop();
7319 break;
7321 default:
7322 Unimplemented();
7323 }
7324 %}
7327 ins_pc_relative(1);
7328 ins_pipe( pipe_alu_branch );
7329 %}
7331 instruct branchConL_reg_immL16_sub(cmpOp cmp, mRegL src1, immL16_sub src2, label labl) %{
7332 match( If cmp (CmpL src1 src2) );
7333 effect(USE labl);
7334 ins_cost(180);
7335 format %{ "BR$cmp $src1, $src2, $labl #@branchConL_reg_immL16_sub" %}
7337 ins_encode %{
7338 Register op1 = $src1$$Register;
7339 int val = $src2$$constant;
7340 Label &L = *($labl$$label);
7341 int flag = $cmp$$cmpcode;
7343 __ daddiu(AT, op1, -1 * val);
7344 switch(flag)
7345 {
7346 case 0x01: //equal
7347 if (&L)
7348 __ beq(R0, AT, L);
7349 else
7350 __ beq(R0, AT, (int)0);
7351 break;
7352 case 0x02: //not_equal
7353 if (&L)
7354 __ bne(R0, AT, L);
7355 else
7356 __ bne(R0, AT, (int)0);
7357 break;
7358 case 0x03: //greater
7359 if(&L)
7360 __ bgtz(AT, L);
7361 else
7362 __ bgtz(AT, (int)0);
7363 break;
7364 case 0x04: //greater_equal
7365 if(&L)
7366 __ bgez(AT, L);
7367 else
7368 __ bgez(AT, (int)0);
7369 break;
7370 case 0x05: //less
7371 if(&L)
7372 __ bltz(AT, L);
7373 else
7374 __ bltz(AT, (int)0);
7375 break;
7376 case 0x06: //less_equal
7377 if(&L)
7378 __ blez(AT, L);
7379 else
7380 __ blez(AT, (int)0);
7381 break;
7382 default:
7383 Unimplemented();
7384 }
7385 __ nop();
7386 %}
7388 ins_pc_relative(1);
7389 ins_pipe( pipe_alu_branch );
7390 %}
7393 instruct branchConI_reg_imm16_sub(cmpOp cmp, mRegI src1, immI16_sub src2, label labl) %{
7394 match( If cmp (CmpI src1 src2) );
7395 effect(USE labl);
7396 ins_cost(180);
7397 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm16_sub" %}
7399 ins_encode %{
7400 Register op1 = $src1$$Register;
7401 int val = $src2$$constant;
7402 Label &L = *($labl$$label);
7403 int flag = $cmp$$cmpcode;
7405 __ addiu32(AT, op1, -1 * val);
7406 switch(flag)
7407 {
7408 case 0x01: //equal
7409 if (&L)
7410 __ beq(R0, AT, L);
7411 else
7412 __ beq(R0, AT, (int)0);
7413 break;
7414 case 0x02: //not_equal
7415 if (&L)
7416 __ bne(R0, AT, L);
7417 else
7418 __ bne(R0, AT, (int)0);
7419 break;
7420 case 0x03: //greater
7421 if(&L)
7422 __ bgtz(AT, L);
7423 else
7424 __ bgtz(AT, (int)0);
7425 break;
7426 case 0x04: //greater_equal
7427 if(&L)
7428 __ bgez(AT, L);
7429 else
7430 __ bgez(AT, (int)0);
7431 break;
7432 case 0x05: //less
7433 if(&L)
7434 __ bltz(AT, L);
7435 else
7436 __ bltz(AT, (int)0);
7437 break;
7438 case 0x06: //less_equal
7439 if(&L)
7440 __ blez(AT, L);
7441 else
7442 __ blez(AT, (int)0);
7443 break;
7444 default:
7445 Unimplemented();
7446 }
7447 __ nop();
7448 %}
7450 ins_pc_relative(1);
7451 ins_pipe( pipe_alu_branch );
7452 %}
7454 instruct branchConL_regL_immL0(cmpOp cmp, mRegL src1, immL0 zero, label labl) %{
7455 match( If cmp (CmpL src1 zero) );
7456 effect(USE labl);
7457 format %{ "BR$cmp $src1, zero, $labl #@branchConL_regL_immL0" %}
7458 ins_cost(150);
7460 ins_encode %{
7461 Register opr1_reg = as_Register($src1$$reg);
7462 Label &target = *($labl$$label);
7463 int flag = $cmp$$cmpcode;
7465 switch(flag)
7466 {
7467 case 0x01: //equal
7468 if (&target)
7469 __ beq(opr1_reg, R0, target);
7470 else
7471 __ beq(opr1_reg, R0, int(0));
7472 break;
7474 case 0x02: //not_equal
7475 if(&target)
7476 __ bne(opr1_reg, R0, target);
7477 else
7478 __ bne(opr1_reg, R0, (int)0);
7479 break;
7481 case 0x03: //greater
7482 if(&target)
7483 __ bgtz(opr1_reg, target);
7484 else
7485 __ bgtz(opr1_reg, (int)0);
7486 break;
7488 case 0x04: //greater_equal
7489 if(&target)
7490 __ bgez(opr1_reg, target);
7491 else
7492 __ bgez(opr1_reg, (int)0);
7493 break;
7495 case 0x05: //less
7496 __ slt(AT, opr1_reg, R0);
7497 if(&target)
7498 __ bne(AT, R0, target);
7499 else
7500 __ bne(AT, R0, (int)0);
7501 break;
7503 case 0x06: //less_equal
7504 if (&target)
7505 __ blez(opr1_reg, target);
7506 else
7507 __ blez(opr1_reg, int(0));
7508 break;
7510 default:
7511 Unimplemented();
7512 }
7513 __ delayed()->nop();
7514 %}
7517 ins_pc_relative(1);
7518 ins_pipe( pipe_alu_branch );
7519 %}
7522 //FIXME
7523 instruct branchConF_reg_reg(cmpOp cmp, regF src1, regF src2, label labl) %{
7524 match( If cmp (CmpF src1 src2) );
7525 effect(USE labl);
7526 format %{ "BR$cmp $src1, $src2, $labl #@branchConF_reg_reg" %}
7528 ins_encode %{
7529 FloatRegister reg_op1 = $src1$$FloatRegister;
7530 FloatRegister reg_op2 = $src2$$FloatRegister;
7531 Label &L = *($labl$$label);
7532 int flag = $cmp$$cmpcode;
7534 switch(flag)
7535 {
7536 case 0x01: //equal
7537 __ c_eq_s(reg_op1, reg_op2);
7538 if (&L)
7539 __ bc1t(L);
7540 else
7541 __ bc1t((int)0);
7542 break;
7543 case 0x02: //not_equal
7544 __ c_eq_s(reg_op1, reg_op2);
7545 if (&L)
7546 __ bc1f(L);
7547 else
7548 __ bc1f((int)0);
7549 break;
7550 case 0x03: //greater
7551 __ c_ule_s(reg_op1, reg_op2);
7552 if(&L)
7553 __ bc1f(L);
7554 else
7555 __ bc1f((int)0);
7556 break;
7557 case 0x04: //greater_equal
7558 __ c_ult_s(reg_op1, reg_op2);
7559 if(&L)
7560 __ bc1f(L);
7561 else
7562 __ bc1f((int)0);
7563 break;
7564 case 0x05: //less
7565 __ c_ult_s(reg_op1, reg_op2);
7566 if(&L)
7567 __ bc1t(L);
7568 else
7569 __ bc1t((int)0);
7570 break;
7571 case 0x06: //less_equal
7572 __ c_ule_s(reg_op1, reg_op2);
7573 if(&L)
7574 __ bc1t(L);
7575 else
7576 __ bc1t((int)0);
7577 break;
7578 default:
7579 Unimplemented();
7580 }
7581 __ nop();
7582 %}
7584 ins_pc_relative(1);
7585 ins_pipe(pipe_slow);
7586 %}
7588 instruct branchConD_reg_reg(cmpOp cmp, regD src1, regD src2, label labl) %{
7589 match( If cmp (CmpD src1 src2) );
7590 effect(USE labl);
7591 format %{ "BR$cmp $src1, $src2, $labl #@branchConD_reg_reg" %}
7593 ins_encode %{
7594 FloatRegister reg_op1 = $src1$$FloatRegister;
7595 FloatRegister reg_op2 = $src2$$FloatRegister;
7596 Label &L = *($labl$$label);
7597 int flag = $cmp$$cmpcode;
7599 switch(flag)
7600 {
7601 case 0x01: //equal
7602 __ c_eq_d(reg_op1, reg_op2);
7603 if (&L)
7604 __ bc1t(L);
7605 else
7606 __ bc1t((int)0);
7607 break;
7608 case 0x02: //not_equal
7609 //2016/4/19 aoqi: c_ueq_d cannot distinguish NaN from equal. Double.isNaN(Double) is implemented by 'f != f', so the use of c_ueq_d causes bugs.
7610 __ c_eq_d(reg_op1, reg_op2);
7611 if (&L)
7612 __ bc1f(L);
7613 else
7614 __ bc1f((int)0);
7615 break;
7616 case 0x03: //greater
7617 __ c_ule_d(reg_op1, reg_op2);
7618 if(&L)
7619 __ bc1f(L);
7620 else
7621 __ bc1f((int)0);
7622 break;
7623 case 0x04: //greater_equal
7624 __ c_ult_d(reg_op1, reg_op2);
7625 if(&L)
7626 __ bc1f(L);
7627 else
7628 __ bc1f((int)0);
7629 break;
7630 case 0x05: //less
7631 __ c_ult_d(reg_op1, reg_op2);
7632 if(&L)
7633 __ bc1t(L);
7634 else
7635 __ bc1t((int)0);
7636 break;
7637 case 0x06: //less_equal
7638 __ c_ule_d(reg_op1, reg_op2);
7639 if(&L)
7640 __ bc1t(L);
7641 else
7642 __ bc1t((int)0);
7643 break;
7644 default:
7645 Unimplemented();
7646 }
7647 __ nop();
7648 %}
7650 ins_pc_relative(1);
7651 ins_pipe(pipe_slow);
7652 %}
7655 // Call Runtime Instruction
7656 instruct CallRuntimeDirect(method meth) %{
7657 match(CallRuntime );
7658 effect(USE meth);
7660 ins_cost(300);
7661 format %{ "CALL,runtime #@CallRuntimeDirect" %}
7662 ins_encode( Java_To_Runtime( meth ) );
7663 ins_pipe( pipe_slow );
7664 ins_alignment(16);
7665 %}
7669 //------------------------MemBar Instructions-------------------------------
7670 //Memory barrier flavors
7672 instruct membar_acquire() %{
7673 match(MemBarAcquire);
7674 ins_cost(0);
7676 size(0);
7677 format %{ "MEMBAR-acquire (empty) @ membar_acquire" %}
7678 ins_encode();
7679 ins_pipe(empty);
7680 %}
7682 instruct load_fence() %{
7683 match(LoadFence);
7684 ins_cost(400);
7686 format %{ "MEMBAR @ load_fence" %}
7687 ins_encode %{
7688 __ sync();
7689 %}
7690 ins_pipe(pipe_slow);
7691 %}
7693 instruct membar_acquire_lock()
7694 %{
7695 match(MemBarAcquireLock);
7696 ins_cost(0);
7698 size(0);
7699 format %{ "MEMBAR-acquire (acquire as part of CAS in prior FastLock so empty encoding) @ membar_acquire_lock" %}
7700 ins_encode();
7701 ins_pipe(empty);
7702 %}
7704 instruct membar_release() %{
7705 match(MemBarRelease);
7706 ins_cost(0);
7708 size(0);
7709 format %{ "MEMBAR-release (empty) @ membar_release" %}
7710 ins_encode();
7711 ins_pipe(empty);
7712 %}
7714 instruct store_fence() %{
7715 match(StoreFence);
7716 ins_cost(400);
7718 format %{ "MEMBAR @ store_fence" %}
7720 ins_encode %{
7721 __ sync();
7722 %}
7724 ins_pipe(pipe_slow);
7725 %}
7727 instruct membar_release_lock()
7728 %{
7729 match(MemBarReleaseLock);
7730 ins_cost(0);
7732 size(0);
7733 format %{ "MEMBAR-release-lock (release in FastUnlock so empty) @ membar_release_lock" %}
7734 ins_encode();
7735 ins_pipe(empty);
7736 %}
7739 instruct membar_volatile() %{
7740 match(MemBarVolatile);
7741 ins_cost(400);
7743 format %{ "MEMBAR-volatile" %}
7744 ins_encode %{
7745 if( !os::is_MP() ) return; // Not needed on single CPU
7746 __ sync();
7748 %}
7749 ins_pipe(pipe_slow);
7750 %}
7752 instruct unnecessary_membar_volatile() %{
7753 match(MemBarVolatile);
7754 predicate(Matcher::post_store_load_barrier(n));
7755 ins_cost(0);
7757 size(0);
7758 format %{ "MEMBAR-volatile (unnecessary so empty encoding) @ unnecessary_membar_volatile" %}
7759 ins_encode( );
7760 ins_pipe(empty);
7761 %}
7763 instruct membar_storestore() %{
7764 match(MemBarStoreStore);
7766 ins_cost(0);
7767 size(0);
7768 format %{ "MEMBAR-storestore (empty encoding) @ membar_storestore" %}
7769 ins_encode( );
7770 ins_pipe(empty);
7771 %}
7773 //----------Move Instructions--------------------------------------------------
7774 instruct castX2P(mRegP dst, mRegL src) %{
7775 match(Set dst (CastX2P src));
7776 format %{ "castX2P $dst, $src @ castX2P" %}
7777 ins_encode %{
7778 Register src = $src$$Register;
7779 Register dst = $dst$$Register;
7781 if(src != dst)
7782 __ move(dst, src);
7783 %}
7784 ins_cost(10);
7785 ins_pipe( ialu_regI_mov );
7786 %}
7788 instruct castP2X(mRegL dst, mRegP src ) %{
7789 match(Set dst (CastP2X src));
7791 format %{ "mov $dst, $src\t #@castP2X" %}
7792 ins_encode %{
7793 Register src = $src$$Register;
7794 Register dst = $dst$$Register;
7796 if(src != dst)
7797 __ move(dst, src);
7798 %}
7799 ins_pipe( ialu_regI_mov );
7800 %}
7802 instruct MoveF2I_reg_reg(mRegI dst, regF src) %{
7803 match(Set dst (MoveF2I src));
7804 effect(DEF dst, USE src);
7805 ins_cost(85);
7806 format %{ "MoveF2I $dst, $src @ MoveF2I_reg_reg" %}
7807 ins_encode %{
7808 Register dst = as_Register($dst$$reg);
7809 FloatRegister src = as_FloatRegister($src$$reg);
7811 __ mfc1(dst, src);
7812 %}
7813 ins_pipe( pipe_slow );
7814 %}
7816 instruct MoveI2F_reg_reg(regF dst, mRegI src) %{
7817 match(Set dst (MoveI2F src));
7818 effect(DEF dst, USE src);
7819 ins_cost(85);
7820 format %{ "MoveI2F $dst, $src @ MoveI2F_reg_reg" %}
7821 ins_encode %{
7822 Register src = as_Register($src$$reg);
7823 FloatRegister dst = as_FloatRegister($dst$$reg);
7825 __ mtc1(src, dst);
7826 %}
7827 ins_pipe( pipe_slow );
7828 %}
7830 instruct MoveD2L_reg_reg(mRegL dst, regD src) %{
7831 match(Set dst (MoveD2L src));
7832 effect(DEF dst, USE src);
7833 ins_cost(85);
7834 format %{ "MoveD2L $dst, $src @ MoveD2L_reg_reg" %}
7835 ins_encode %{
7836 Register dst = as_Register($dst$$reg);
7837 FloatRegister src = as_FloatRegister($src$$reg);
7839 __ dmfc1(dst, src);
7840 %}
7841 ins_pipe( pipe_slow );
7842 %}
7844 instruct MoveL2D_reg_reg(regD dst, mRegL src) %{
7845 match(Set dst (MoveL2D src));
7846 effect(DEF dst, USE src);
7847 ins_cost(85);
7848 format %{ "MoveL2D $dst, $src @ MoveL2D_reg_reg" %}
7849 ins_encode %{
7850 FloatRegister dst = as_FloatRegister($dst$$reg);
7851 Register src = as_Register($src$$reg);
7853 __ dmtc1(src, dst);
7854 %}
7855 ins_pipe( pipe_slow );
7856 %}
7858 //----------Conditional Move---------------------------------------------------
7859 // Conditional move
7860 instruct cmovI_cmpI_reg_reg(mRegI dst, mRegI src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
7861 match(Set dst (CMoveI (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
7862 ins_cost(80);
7863 format %{
7864 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpI_reg_reg\n"
7865 "\tCMOV $dst,$src \t @cmovI_cmpI_reg_reg"
7866 %}
7868 ins_encode %{
7869 Register op1 = $tmp1$$Register;
7870 Register op2 = $tmp2$$Register;
7871 Register dst = $dst$$Register;
7872 Register src = $src$$Register;
7873 int flag = $cop$$cmpcode;
7875 switch(flag)
7876 {
7877 case 0x01: //equal
7878 __ subu32(AT, op1, op2);
7879 __ movz(dst, src, AT);
7880 break;
7882 case 0x02: //not_equal
7883 __ subu32(AT, op1, op2);
7884 __ movn(dst, src, AT);
7885 break;
7887 case 0x03: //great
7888 __ slt(AT, op2, op1);
7889 __ movn(dst, src, AT);
7890 break;
7892 case 0x04: //great_equal
7893 __ slt(AT, op1, op2);
7894 __ movz(dst, src, AT);
7895 break;
7897 case 0x05: //less
7898 __ slt(AT, op1, op2);
7899 __ movn(dst, src, AT);
7900 break;
7902 case 0x06: //less_equal
7903 __ slt(AT, op2, op1);
7904 __ movz(dst, src, AT);
7905 break;
7907 default:
7908 Unimplemented();
7909 }
7910 %}
7912 ins_pipe( pipe_slow );
7913 %}
7915 instruct cmovI_cmpP_reg_reg(mRegI dst, mRegI src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
7916 match(Set dst (CMoveI (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
7917 ins_cost(80);
7918 format %{
7919 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpP_reg_reg\n\t"
7920 "CMOV $dst,$src\t @cmovI_cmpP_reg_reg"
7921 %}
7922 ins_encode %{
7923 Register op1 = $tmp1$$Register;
7924 Register op2 = $tmp2$$Register;
7925 Register dst = $dst$$Register;
7926 Register src = $src$$Register;
7927 int flag = $cop$$cmpcode;
7929 switch(flag)
7930 {
7931 case 0x01: //equal
7932 __ subu(AT, op1, op2);
7933 __ movz(dst, src, AT);
7934 break;
7936 case 0x02: //not_equal
7937 __ subu(AT, op1, op2);
7938 __ movn(dst, src, AT);
7939 break;
7941 case 0x03: //above
7942 __ sltu(AT, op2, op1);
7943 __ movn(dst, src, AT);
7944 break;
7946 case 0x04: //above_equal
7947 __ sltu(AT, op1, op2);
7948 __ movz(dst, src, AT);
7949 break;
7951 case 0x05: //below
7952 __ sltu(AT, op1, op2);
7953 __ movn(dst, src, AT);
7954 break;
7956 case 0x06: //below_equal
7957 __ sltu(AT, op2, op1);
7958 __ movz(dst, src, AT);
7959 break;
7961 default:
7962 Unimplemented();
7963 }
7964 %}
7966 ins_pipe( pipe_slow );
7967 %}
7969 instruct cmovI_cmpN_reg_reg(mRegI dst, mRegI src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
7970 match(Set dst (CMoveI (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
7971 ins_cost(80);
7972 format %{
7973 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpN_reg_reg\n\t"
7974 "CMOV $dst,$src\t @cmovI_cmpN_reg_reg"
7975 %}
7976 ins_encode %{
7977 Register op1 = $tmp1$$Register;
7978 Register op2 = $tmp2$$Register;
7979 Register dst = $dst$$Register;
7980 Register src = $src$$Register;
7981 int flag = $cop$$cmpcode;
7983 switch(flag)
7984 {
7985 case 0x01: //equal
7986 __ subu32(AT, op1, op2);
7987 __ movz(dst, src, AT);
7988 break;
7990 case 0x02: //not_equal
7991 __ subu32(AT, op1, op2);
7992 __ movn(dst, src, AT);
7993 break;
7995 case 0x03: //above
7996 __ sltu(AT, op2, op1);
7997 __ movn(dst, src, AT);
7998 break;
8000 case 0x04: //above_equal
8001 __ sltu(AT, op1, op2);
8002 __ movz(dst, src, AT);
8003 break;
8005 case 0x05: //below
8006 __ sltu(AT, op1, op2);
8007 __ movn(dst, src, AT);
8008 break;
8010 case 0x06: //below_equal
8011 __ sltu(AT, op2, op1);
8012 __ movz(dst, src, AT);
8013 break;
8015 default:
8016 Unimplemented();
8017 }
8018 %}
8020 ins_pipe( pipe_slow );
8021 %}
8023 instruct cmovP_cmpN_reg_reg(mRegP dst, mRegP src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8024 match(Set dst (CMoveP (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8025 ins_cost(80);
8026 format %{
8027 "CMPU$cop $tmp1,$tmp2\t @cmovP_cmpN_reg_reg\n\t"
8028 "CMOV $dst,$src\t @cmovP_cmpN_reg_reg"
8029 %}
8030 ins_encode %{
8031 Register op1 = $tmp1$$Register;
8032 Register op2 = $tmp2$$Register;
8033 Register dst = $dst$$Register;
8034 Register src = $src$$Register;
8035 int flag = $cop$$cmpcode;
8037 switch(flag)
8038 {
8039 case 0x01: //equal
8040 __ subu32(AT, op1, op2);
8041 __ movz(dst, src, AT);
8042 break;
8044 case 0x02: //not_equal
8045 __ subu32(AT, op1, op2);
8046 __ movn(dst, src, AT);
8047 break;
8049 case 0x03: //above
8050 __ sltu(AT, op2, op1);
8051 __ movn(dst, src, AT);
8052 break;
8054 case 0x04: //above_equal
8055 __ sltu(AT, op1, op2);
8056 __ movz(dst, src, AT);
8057 break;
8059 case 0x05: //below
8060 __ sltu(AT, op1, op2);
8061 __ movn(dst, src, AT);
8062 break;
8064 case 0x06: //below_equal
8065 __ sltu(AT, op2, op1);
8066 __ movz(dst, src, AT);
8067 break;
8069 default:
8070 Unimplemented();
8071 }
8072 %}
8074 ins_pipe( pipe_slow );
8075 %}
8077 instruct cmovN_cmpP_reg_reg(mRegN dst, mRegN src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
8078 match(Set dst (CMoveN (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
8079 ins_cost(80);
8080 format %{
8081 "CMPU$cop $tmp1,$tmp2\t @cmovN_cmpP_reg_reg\n\t"
8082 "CMOV $dst,$src\t @cmovN_cmpP_reg_reg"
8083 %}
8084 ins_encode %{
8085 Register op1 = $tmp1$$Register;
8086 Register op2 = $tmp2$$Register;
8087 Register dst = $dst$$Register;
8088 Register src = $src$$Register;
8089 int flag = $cop$$cmpcode;
8091 switch(flag)
8092 {
8093 case 0x01: //equal
8094 __ subu(AT, op1, op2);
8095 __ movz(dst, src, AT);
8096 break;
8098 case 0x02: //not_equal
8099 __ subu(AT, op1, op2);
8100 __ movn(dst, src, AT);
8101 break;
8103 case 0x03: //above
8104 __ sltu(AT, op2, op1);
8105 __ movn(dst, src, AT);
8106 break;
8108 case 0x04: //above_equal
8109 __ sltu(AT, op1, op2);
8110 __ movz(dst, src, AT);
8111 break;
8113 case 0x05: //below
8114 __ sltu(AT, op1, op2);
8115 __ movn(dst, src, AT);
8116 break;
8118 case 0x06: //below_equal
8119 __ sltu(AT, op2, op1);
8120 __ movz(dst, src, AT);
8121 break;
8123 default:
8124 Unimplemented();
8125 }
8126 %}
8128 ins_pipe( pipe_slow );
8129 %}
8131 instruct cmovP_cmpD_reg_reg(mRegP dst, mRegP src, regD tmp1, regD tmp2, cmpOp cop ) %{
8132 match(Set dst (CMoveP (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8133 ins_cost(80);
8134 format %{
8135 "CMP$cop $tmp1, $tmp2\t @cmovP_cmpD_reg_reg\n"
8136 "\tCMOV $dst,$src \t @cmovP_cmpD_reg_reg"
8137 %}
8138 ins_encode %{
8139 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8140 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8141 Register dst = as_Register($dst$$reg);
8142 Register src = as_Register($src$$reg);
8144 int flag = $cop$$cmpcode;
8146 switch(flag)
8147 {
8148 case 0x01: //equal
8149 __ c_eq_d(reg_op1, reg_op2);
8150 __ movt(dst, src);
8151 break;
8152 case 0x02: //not_equal
8153 __ c_eq_d(reg_op1, reg_op2);
8154 __ movf(dst, src);
8155 break;
8156 case 0x03: //greater
8157 __ c_ole_d(reg_op1, reg_op2);
8158 __ movf(dst, src);
8159 break;
8160 case 0x04: //greater_equal
8161 __ c_olt_d(reg_op1, reg_op2);
8162 __ movf(dst, src);
8163 break;
8164 case 0x05: //less
8165 __ c_ult_d(reg_op1, reg_op2);
8166 __ movt(dst, src);
8167 break;
8168 case 0x06: //less_equal
8169 __ c_ule_d(reg_op1, reg_op2);
8170 __ movt(dst, src);
8171 break;
8172 default:
8173 Unimplemented();
8174 }
8175 %}
8177 ins_pipe( pipe_slow );
8178 %}
8181 instruct cmovN_cmpN_reg_reg(mRegN dst, mRegN src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8182 match(Set dst (CMoveN (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8183 ins_cost(80);
8184 format %{
8185 "CMPU$cop $tmp1,$tmp2\t @cmovN_cmpN_reg_reg\n\t"
8186 "CMOV $dst,$src\t @cmovN_cmpN_reg_reg"
8187 %}
8188 ins_encode %{
8189 Register op1 = $tmp1$$Register;
8190 Register op2 = $tmp2$$Register;
8191 Register dst = $dst$$Register;
8192 Register src = $src$$Register;
8193 int flag = $cop$$cmpcode;
8195 switch(flag)
8196 {
8197 case 0x01: //equal
8198 __ subu32(AT, op1, op2);
8199 __ movz(dst, src, AT);
8200 break;
8202 case 0x02: //not_equal
8203 __ subu32(AT, op1, op2);
8204 __ movn(dst, src, AT);
8205 break;
8207 case 0x03: //above
8208 __ sltu(AT, op2, op1);
8209 __ movn(dst, src, AT);
8210 break;
8212 case 0x04: //above_equal
8213 __ sltu(AT, op1, op2);
8214 __ movz(dst, src, AT);
8215 break;
8217 case 0x05: //below
8218 __ sltu(AT, op1, op2);
8219 __ movn(dst, src, AT);
8220 break;
8222 case 0x06: //below_equal
8223 __ sltu(AT, op2, op1);
8224 __ movz(dst, src, AT);
8225 break;
8227 default:
8228 Unimplemented();
8229 }
8230 %}
8232 ins_pipe( pipe_slow );
8233 %}
8236 instruct cmovI_cmpU_reg_reg(mRegI dst, mRegI src, mRegI tmp1, mRegI tmp2, cmpOpU cop ) %{
8237 match(Set dst (CMoveI (Binary cop (CmpU tmp1 tmp2)) (Binary dst src)));
8238 ins_cost(80);
8239 format %{
8240 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpU_reg_reg\n\t"
8241 "CMOV $dst,$src\t @cmovI_cmpU_reg_reg"
8242 %}
8243 ins_encode %{
8244 Register op1 = $tmp1$$Register;
8245 Register op2 = $tmp2$$Register;
8246 Register dst = $dst$$Register;
8247 Register src = $src$$Register;
8248 int flag = $cop$$cmpcode;
8250 switch(flag)
8251 {
8252 case 0x01: //equal
8253 __ subu(AT, op1, op2);
8254 __ movz(dst, src, AT);
8255 break;
8257 case 0x02: //not_equal
8258 __ subu(AT, op1, op2);
8259 __ movn(dst, src, AT);
8260 break;
8262 case 0x03: //above
8263 __ sltu(AT, op2, op1);
8264 __ movn(dst, src, AT);
8265 break;
8267 case 0x04: //above_equal
8268 __ sltu(AT, op1, op2);
8269 __ movz(dst, src, AT);
8270 break;
8272 case 0x05: //below
8273 __ sltu(AT, op1, op2);
8274 __ movn(dst, src, AT);
8275 break;
8277 case 0x06: //below_equal
8278 __ sltu(AT, op2, op1);
8279 __ movz(dst, src, AT);
8280 break;
8282 default:
8283 Unimplemented();
8284 }
8285 %}
8287 ins_pipe( pipe_slow );
8288 %}
8290 instruct cmovI_cmpL_reg_reg(mRegI dst, mRegI src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8291 match(Set dst (CMoveI (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8292 ins_cost(80);
8293 format %{
8294 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpL_reg_reg\n"
8295 "\tCMOV $dst,$src \t @cmovI_cmpL_reg_reg"
8296 %}
8297 ins_encode %{
8298 Register opr1 = as_Register($tmp1$$reg);
8299 Register opr2 = as_Register($tmp2$$reg);
8300 Register dst = $dst$$Register;
8301 Register src = $src$$Register;
8302 int flag = $cop$$cmpcode;
8304 switch(flag)
8305 {
8306 case 0x01: //equal
8307 __ subu(AT, opr1, opr2);
8308 __ movz(dst, src, AT);
8309 break;
8311 case 0x02: //not_equal
8312 __ subu(AT, opr1, opr2);
8313 __ movn(dst, src, AT);
8314 break;
8316 case 0x03: //greater
8317 __ slt(AT, opr2, opr1);
8318 __ movn(dst, src, AT);
8319 break;
8321 case 0x04: //greater_equal
8322 __ slt(AT, opr1, opr2);
8323 __ movz(dst, src, AT);
8324 break;
8326 case 0x05: //less
8327 __ slt(AT, opr1, opr2);
8328 __ movn(dst, src, AT);
8329 break;
8331 case 0x06: //less_equal
8332 __ slt(AT, opr2, opr1);
8333 __ movz(dst, src, AT);
8334 break;
8336 default:
8337 Unimplemented();
8338 }
8339 %}
8341 ins_pipe( pipe_slow );
8342 %}
8344 instruct cmovP_cmpL_reg_reg(mRegP dst, mRegP src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8345 match(Set dst (CMoveP (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8346 ins_cost(80);
8347 format %{
8348 "CMP$cop $tmp1, $tmp2\t @cmovP_cmpL_reg_reg\n"
8349 "\tCMOV $dst,$src \t @cmovP_cmpL_reg_reg"
8350 %}
8351 ins_encode %{
8352 Register opr1 = as_Register($tmp1$$reg);
8353 Register opr2 = as_Register($tmp2$$reg);
8354 Register dst = $dst$$Register;
8355 Register src = $src$$Register;
8356 int flag = $cop$$cmpcode;
8358 switch(flag)
8359 {
8360 case 0x01: //equal
8361 __ subu(AT, opr1, opr2);
8362 __ movz(dst, src, AT);
8363 break;
8365 case 0x02: //not_equal
8366 __ subu(AT, opr1, opr2);
8367 __ movn(dst, src, AT);
8368 break;
8370 case 0x03: //greater
8371 __ slt(AT, opr2, opr1);
8372 __ movn(dst, src, AT);
8373 break;
8375 case 0x04: //greater_equal
8376 __ slt(AT, opr1, opr2);
8377 __ movz(dst, src, AT);
8378 break;
8380 case 0x05: //less
8381 __ slt(AT, opr1, opr2);
8382 __ movn(dst, src, AT);
8383 break;
8385 case 0x06: //less_equal
8386 __ slt(AT, opr2, opr1);
8387 __ movz(dst, src, AT);
8388 break;
8390 default:
8391 Unimplemented();
8392 }
8393 %}
8395 ins_pipe( pipe_slow );
8396 %}
8398 instruct cmovI_cmpD_reg_reg(mRegI dst, mRegI src, regD tmp1, regD tmp2, cmpOp cop ) %{
8399 match(Set dst (CMoveI (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8400 ins_cost(80);
8401 format %{
8402 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpD_reg_reg\n"
8403 "\tCMOV $dst,$src \t @cmovI_cmpD_reg_reg"
8404 %}
8405 ins_encode %{
8406 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8407 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8408 Register dst = as_Register($dst$$reg);
8409 Register src = as_Register($src$$reg);
8411 int flag = $cop$$cmpcode;
8413 switch(flag)
8414 {
8415 case 0x01: //equal
8416 __ c_eq_d(reg_op1, reg_op2);
8417 __ movt(dst, src);
8418 break;
8419 case 0x02: //not_equal
8420 //2016/4/19 aoqi: See instruct branchConD_reg_reg. The change in branchConD_reg_reg fixed a bug. It seems similar here, so I made thesame change.
8421 __ c_eq_d(reg_op1, reg_op2);
8422 __ movf(dst, src);
8423 break;
8424 case 0x03: //greater
8425 __ c_ole_d(reg_op1, reg_op2);
8426 __ movf(dst, src);
8427 break;
8428 case 0x04: //greater_equal
8429 __ c_olt_d(reg_op1, reg_op2);
8430 __ movf(dst, src);
8431 break;
8432 case 0x05: //less
8433 __ c_ult_d(reg_op1, reg_op2);
8434 __ movt(dst, src);
8435 break;
8436 case 0x06: //less_equal
8437 __ c_ule_d(reg_op1, reg_op2);
8438 __ movt(dst, src);
8439 break;
8440 default:
8441 Unimplemented();
8442 }
8443 %}
8445 ins_pipe( pipe_slow );
8446 %}
8449 instruct cmovP_cmpP_reg_reg(mRegP dst, mRegP src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
8450 match(Set dst (CMoveP (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
8451 ins_cost(80);
8452 format %{
8453 "CMPU$cop $tmp1,$tmp2\t @cmovP_cmpP_reg_reg\n\t"
8454 "CMOV $dst,$src\t @cmovP_cmpP_reg_reg"
8455 %}
8456 ins_encode %{
8457 Register op1 = $tmp1$$Register;
8458 Register op2 = $tmp2$$Register;
8459 Register dst = $dst$$Register;
8460 Register src = $src$$Register;
8461 int flag = $cop$$cmpcode;
8463 switch(flag)
8464 {
8465 case 0x01: //equal
8466 __ subu(AT, op1, op2);
8467 __ movz(dst, src, AT);
8468 break;
8470 case 0x02: //not_equal
8471 __ subu(AT, op1, op2);
8472 __ movn(dst, src, AT);
8473 break;
8475 case 0x03: //above
8476 __ sltu(AT, op2, op1);
8477 __ movn(dst, src, AT);
8478 break;
8480 case 0x04: //above_equal
8481 __ sltu(AT, op1, op2);
8482 __ movz(dst, src, AT);
8483 break;
8485 case 0x05: //below
8486 __ sltu(AT, op1, op2);
8487 __ movn(dst, src, AT);
8488 break;
8490 case 0x06: //below_equal
8491 __ sltu(AT, op2, op1);
8492 __ movz(dst, src, AT);
8493 break;
8495 default:
8496 Unimplemented();
8497 }
8498 %}
8500 ins_pipe( pipe_slow );
8501 %}
8503 instruct cmovP_cmpI_reg_reg(mRegP dst, mRegP src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8504 match(Set dst (CMoveP (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8505 ins_cost(80);
8506 format %{
8507 "CMP$cop $tmp1,$tmp2\t @cmovP_cmpI_reg_reg\n\t"
8508 "CMOV $dst,$src\t @cmovP_cmpI_reg_reg"
8509 %}
8510 ins_encode %{
8511 Register op1 = $tmp1$$Register;
8512 Register op2 = $tmp2$$Register;
8513 Register dst = $dst$$Register;
8514 Register src = $src$$Register;
8515 int flag = $cop$$cmpcode;
8517 switch(flag)
8518 {
8519 case 0x01: //equal
8520 __ subu32(AT, op1, op2);
8521 __ movz(dst, src, AT);
8522 break;
8524 case 0x02: //not_equal
8525 __ subu32(AT, op1, op2);
8526 __ movn(dst, src, AT);
8527 break;
8529 case 0x03: //above
8530 __ slt(AT, op2, op1);
8531 __ movn(dst, src, AT);
8532 break;
8534 case 0x04: //above_equal
8535 __ slt(AT, op1, op2);
8536 __ movz(dst, src, AT);
8537 break;
8539 case 0x05: //below
8540 __ slt(AT, op1, op2);
8541 __ movn(dst, src, AT);
8542 break;
8544 case 0x06: //below_equal
8545 __ slt(AT, op2, op1);
8546 __ movz(dst, src, AT);
8547 break;
8549 default:
8550 Unimplemented();
8551 }
8552 %}
8554 ins_pipe( pipe_slow );
8555 %}
8557 instruct cmovN_cmpI_reg_reg(mRegN dst, mRegN src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8558 match(Set dst (CMoveN (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8559 ins_cost(80);
8560 format %{
8561 "CMP$cop $tmp1,$tmp2\t @cmovN_cmpI_reg_reg\n\t"
8562 "CMOV $dst,$src\t @cmovN_cmpI_reg_reg"
8563 %}
8564 ins_encode %{
8565 Register op1 = $tmp1$$Register;
8566 Register op2 = $tmp2$$Register;
8567 Register dst = $dst$$Register;
8568 Register src = $src$$Register;
8569 int flag = $cop$$cmpcode;
8571 switch(flag)
8572 {
8573 case 0x01: //equal
8574 __ subu32(AT, op1, op2);
8575 __ movz(dst, src, AT);
8576 break;
8578 case 0x02: //not_equal
8579 __ subu32(AT, op1, op2);
8580 __ movn(dst, src, AT);
8581 break;
8583 case 0x03: //above
8584 __ slt(AT, op2, op1);
8585 __ movn(dst, src, AT);
8586 break;
8588 case 0x04: //above_equal
8589 __ slt(AT, op1, op2);
8590 __ movz(dst, src, AT);
8591 break;
8593 case 0x05: //below
8594 __ slt(AT, op1, op2);
8595 __ movn(dst, src, AT);
8596 break;
8598 case 0x06: //below_equal
8599 __ slt(AT, op2, op1);
8600 __ movz(dst, src, AT);
8601 break;
8603 default:
8604 Unimplemented();
8605 }
8606 %}
8608 ins_pipe( pipe_slow );
8609 %}
8612 instruct cmovL_cmpI_reg_reg(mRegL dst, mRegL src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8613 match(Set dst (CMoveL (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8614 ins_cost(80);
8615 format %{
8616 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpI_reg_reg\n"
8617 "\tCMOV $dst,$src \t @cmovL_cmpI_reg_reg"
8618 %}
8620 ins_encode %{
8621 Register op1 = $tmp1$$Register;
8622 Register op2 = $tmp2$$Register;
8623 Register dst = as_Register($dst$$reg);
8624 Register src = as_Register($src$$reg);
8625 int flag = $cop$$cmpcode;
8627 switch(flag)
8628 {
8629 case 0x01: //equal
8630 __ subu32(AT, op1, op2);
8631 __ movz(dst, src, AT);
8632 break;
8634 case 0x02: //not_equal
8635 __ subu32(AT, op1, op2);
8636 __ movn(dst, src, AT);
8637 break;
8639 case 0x03: //great
8640 __ slt(AT, op2, op1);
8641 __ movn(dst, src, AT);
8642 break;
8644 case 0x04: //great_equal
8645 __ slt(AT, op1, op2);
8646 __ movz(dst, src, AT);
8647 break;
8649 case 0x05: //less
8650 __ slt(AT, op1, op2);
8651 __ movn(dst, src, AT);
8652 break;
8654 case 0x06: //less_equal
8655 __ slt(AT, op2, op1);
8656 __ movz(dst, src, AT);
8657 break;
8659 default:
8660 Unimplemented();
8661 }
8662 %}
8664 ins_pipe( pipe_slow );
8665 %}
8667 instruct cmovL_cmpL_reg_reg(mRegL dst, mRegL src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8668 match(Set dst (CMoveL (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8669 ins_cost(80);
8670 format %{
8671 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpL_reg_reg\n"
8672 "\tCMOV $dst,$src \t @cmovL_cmpL_reg_reg"
8673 %}
8674 ins_encode %{
8675 Register opr1 = as_Register($tmp1$$reg);
8676 Register opr2 = as_Register($tmp2$$reg);
8677 Register dst = as_Register($dst$$reg);
8678 Register src = as_Register($src$$reg);
8679 int flag = $cop$$cmpcode;
8681 switch(flag)
8682 {
8683 case 0x01: //equal
8684 __ subu(AT, opr1, opr2);
8685 __ movz(dst, src, AT);
8686 break;
8688 case 0x02: //not_equal
8689 __ subu(AT, opr1, opr2);
8690 __ movn(dst, src, AT);
8691 break;
8693 case 0x03: //greater
8694 __ slt(AT, opr2, opr1);
8695 __ movn(dst, src, AT);
8696 break;
8698 case 0x04: //greater_equal
8699 __ slt(AT, opr1, opr2);
8700 __ movz(dst, src, AT);
8701 break;
8703 case 0x05: //less
8704 __ slt(AT, opr1, opr2);
8705 __ movn(dst, src, AT);
8706 break;
8708 case 0x06: //less_equal
8709 __ slt(AT, opr2, opr1);
8710 __ movz(dst, src, AT);
8711 break;
8713 default:
8714 Unimplemented();
8715 }
8716 %}
8718 ins_pipe( pipe_slow );
8719 %}
8721 instruct cmovL_cmpN_reg_reg(mRegL dst, mRegL src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8722 match(Set dst (CMoveL (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8723 ins_cost(80);
8724 format %{
8725 "CMPU$cop $tmp1,$tmp2\t @cmovL_cmpN_reg_reg\n\t"
8726 "CMOV $dst,$src\t @cmovL_cmpN_reg_reg"
8727 %}
8728 ins_encode %{
8729 Register op1 = $tmp1$$Register;
8730 Register op2 = $tmp2$$Register;
8731 Register dst = $dst$$Register;
8732 Register src = $src$$Register;
8733 int flag = $cop$$cmpcode;
8735 switch(flag)
8736 {
8737 case 0x01: //equal
8738 __ subu32(AT, op1, op2);
8739 __ movz(dst, src, AT);
8740 break;
8742 case 0x02: //not_equal
8743 __ subu32(AT, op1, op2);
8744 __ movn(dst, src, AT);
8745 break;
8747 case 0x03: //above
8748 __ sltu(AT, op2, op1);
8749 __ movn(dst, src, AT);
8750 break;
8752 case 0x04: //above_equal
8753 __ sltu(AT, op1, op2);
8754 __ movz(dst, src, AT);
8755 break;
8757 case 0x05: //below
8758 __ sltu(AT, op1, op2);
8759 __ movn(dst, src, AT);
8760 break;
8762 case 0x06: //below_equal
8763 __ sltu(AT, op2, op1);
8764 __ movz(dst, src, AT);
8765 break;
8767 default:
8768 Unimplemented();
8769 }
8770 %}
8772 ins_pipe( pipe_slow );
8773 %}
8776 instruct cmovL_cmpD_reg_reg(mRegL dst, mRegL src, regD tmp1, regD tmp2, cmpOp cop ) %{
8777 match(Set dst (CMoveL (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8778 ins_cost(80);
8779 format %{
8780 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpD_reg_reg\n"
8781 "\tCMOV $dst,$src \t @cmovL_cmpD_reg_reg"
8782 %}
8783 ins_encode %{
8784 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8785 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8786 Register dst = as_Register($dst$$reg);
8787 Register src = as_Register($src$$reg);
8789 int flag = $cop$$cmpcode;
8791 switch(flag)
8792 {
8793 case 0x01: //equal
8794 __ c_eq_d(reg_op1, reg_op2);
8795 __ movt(dst, src);
8796 break;
8797 case 0x02: //not_equal
8798 __ c_eq_d(reg_op1, reg_op2);
8799 __ movf(dst, src);
8800 break;
8801 case 0x03: //greater
8802 __ c_ole_d(reg_op1, reg_op2);
8803 __ movf(dst, src);
8804 break;
8805 case 0x04: //greater_equal
8806 __ c_olt_d(reg_op1, reg_op2);
8807 __ movf(dst, src);
8808 break;
8809 case 0x05: //less
8810 __ c_ult_d(reg_op1, reg_op2);
8811 __ movt(dst, src);
8812 break;
8813 case 0x06: //less_equal
8814 __ c_ule_d(reg_op1, reg_op2);
8815 __ movt(dst, src);
8816 break;
8817 default:
8818 Unimplemented();
8819 }
8820 %}
8822 ins_pipe( pipe_slow );
8823 %}
8825 instruct cmovD_cmpD_reg_reg(regD dst, regD src, regD tmp1, regD tmp2, cmpOp cop ) %{
8826 match(Set dst (CMoveD (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8827 ins_cost(200);
8828 format %{
8829 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpD_reg_reg\n"
8830 "\tCMOV $dst,$src \t @cmovD_cmpD_reg_reg"
8831 %}
8832 ins_encode %{
8833 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8834 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8835 FloatRegister dst = as_FloatRegister($dst$$reg);
8836 FloatRegister src = as_FloatRegister($src$$reg);
8838 int flag = $cop$$cmpcode;
8840 Label L;
8842 switch(flag)
8843 {
8844 case 0x01: //equal
8845 __ c_eq_d(reg_op1, reg_op2);
8846 __ bc1f(L);
8847 __ nop();
8848 __ mov_d(dst, src);
8849 __ bind(L);
8850 break;
8851 case 0x02: //not_equal
8852 //2016/4/19 aoqi: See instruct branchConD_reg_reg. The change in branchConD_reg_reg fixed a bug. It seems similar here, so I made thesame change.
8853 __ c_eq_d(reg_op1, reg_op2);
8854 __ bc1t(L);
8855 __ nop();
8856 __ mov_d(dst, src);
8857 __ bind(L);
8858 break;
8859 case 0x03: //greater
8860 __ c_ole_d(reg_op1, reg_op2);
8861 __ bc1t(L);
8862 __ nop();
8863 __ mov_d(dst, src);
8864 __ bind(L);
8865 break;
8866 case 0x04: //greater_equal
8867 __ c_olt_d(reg_op1, reg_op2);
8868 __ bc1t(L);
8869 __ nop();
8870 __ mov_d(dst, src);
8871 __ bind(L);
8872 break;
8873 case 0x05: //less
8874 __ c_ult_d(reg_op1, reg_op2);
8875 __ bc1f(L);
8876 __ nop();
8877 __ mov_d(dst, src);
8878 __ bind(L);
8879 break;
8880 case 0x06: //less_equal
8881 __ c_ule_d(reg_op1, reg_op2);
8882 __ bc1f(L);
8883 __ nop();
8884 __ mov_d(dst, src);
8885 __ bind(L);
8886 break;
8887 default:
8888 Unimplemented();
8889 }
8890 %}
8892 ins_pipe( pipe_slow );
8893 %}
8895 instruct cmovF_cmpI_reg_reg(regF dst, regF src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8896 match(Set dst (CMoveF (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8897 ins_cost(200);
8898 format %{
8899 "CMP$cop $tmp1, $tmp2\t @cmovF_cmpI_reg_reg\n"
8900 "\tCMOV $dst, $src \t @cmovF_cmpI_reg_reg"
8901 %}
8903 ins_encode %{
8904 Register op1 = $tmp1$$Register;
8905 Register op2 = $tmp2$$Register;
8906 FloatRegister dst = as_FloatRegister($dst$$reg);
8907 FloatRegister src = as_FloatRegister($src$$reg);
8908 int flag = $cop$$cmpcode;
8909 Label L;
8911 switch(flag)
8912 {
8913 case 0x01: //equal
8914 __ bne(op1, op2, L);
8915 __ nop();
8916 __ mov_s(dst, src);
8917 __ bind(L);
8918 break;
8919 case 0x02: //not_equal
8920 __ beq(op1, op2, L);
8921 __ nop();
8922 __ mov_s(dst, src);
8923 __ bind(L);
8924 break;
8925 case 0x03: //great
8926 __ slt(AT, op2, op1);
8927 __ beq(AT, R0, L);
8928 __ nop();
8929 __ mov_s(dst, src);
8930 __ bind(L);
8931 break;
8932 case 0x04: //great_equal
8933 __ slt(AT, op1, op2);
8934 __ bne(AT, R0, L);
8935 __ nop();
8936 __ mov_s(dst, src);
8937 __ bind(L);
8938 break;
8939 case 0x05: //less
8940 __ slt(AT, op1, op2);
8941 __ beq(AT, R0, L);
8942 __ nop();
8943 __ mov_s(dst, src);
8944 __ bind(L);
8945 break;
8946 case 0x06: //less_equal
8947 __ slt(AT, op2, op1);
8948 __ bne(AT, R0, L);
8949 __ nop();
8950 __ mov_s(dst, src);
8951 __ bind(L);
8952 break;
8953 default:
8954 Unimplemented();
8955 }
8956 %}
8958 ins_pipe( pipe_slow );
8959 %}
8961 instruct cmovD_cmpI_reg_reg(regD dst, regD src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8962 match(Set dst (CMoveD (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8963 ins_cost(200);
8964 format %{
8965 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpI_reg_reg\n"
8966 "\tCMOV $dst, $src \t @cmovD_cmpI_reg_reg"
8967 %}
8969 ins_encode %{
8970 Register op1 = $tmp1$$Register;
8971 Register op2 = $tmp2$$Register;
8972 FloatRegister dst = as_FloatRegister($dst$$reg);
8973 FloatRegister src = as_FloatRegister($src$$reg);
8974 int flag = $cop$$cmpcode;
8975 Label L;
8977 switch(flag)
8978 {
8979 case 0x01: //equal
8980 __ bne(op1, op2, L);
8981 __ nop();
8982 __ mov_d(dst, src);
8983 __ bind(L);
8984 break;
8985 case 0x02: //not_equal
8986 __ beq(op1, op2, L);
8987 __ nop();
8988 __ mov_d(dst, src);
8989 __ bind(L);
8990 break;
8991 case 0x03: //great
8992 __ slt(AT, op2, op1);
8993 __ beq(AT, R0, L);
8994 __ nop();
8995 __ mov_d(dst, src);
8996 __ bind(L);
8997 break;
8998 case 0x04: //great_equal
8999 __ slt(AT, op1, op2);
9000 __ bne(AT, R0, L);
9001 __ nop();
9002 __ mov_d(dst, src);
9003 __ bind(L);
9004 break;
9005 case 0x05: //less
9006 __ slt(AT, op1, op2);
9007 __ beq(AT, R0, L);
9008 __ nop();
9009 __ mov_d(dst, src);
9010 __ bind(L);
9011 break;
9012 case 0x06: //less_equal
9013 __ slt(AT, op2, op1);
9014 __ bne(AT, R0, L);
9015 __ nop();
9016 __ mov_d(dst, src);
9017 __ bind(L);
9018 break;
9019 default:
9020 Unimplemented();
9021 }
9022 %}
9024 ins_pipe( pipe_slow );
9025 %}
9027 instruct cmovD_cmpP_reg_reg(regD dst, regD src, mRegP tmp1, mRegP tmp2, cmpOp cop ) %{
9028 match(Set dst (CMoveD (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
9029 ins_cost(200);
9030 format %{
9031 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpP_reg_reg\n"
9032 "\tCMOV $dst, $src \t @cmovD_cmpP_reg_reg"
9033 %}
9035 ins_encode %{
9036 Register op1 = $tmp1$$Register;
9037 Register op2 = $tmp2$$Register;
9038 FloatRegister dst = as_FloatRegister($dst$$reg);
9039 FloatRegister src = as_FloatRegister($src$$reg);
9040 int flag = $cop$$cmpcode;
9041 Label L;
9043 switch(flag)
9044 {
9045 case 0x01: //equal
9046 __ bne(op1, op2, L);
9047 __ nop();
9048 __ mov_d(dst, src);
9049 __ bind(L);
9050 break;
9051 case 0x02: //not_equal
9052 __ beq(op1, op2, L);
9053 __ nop();
9054 __ mov_d(dst, src);
9055 __ bind(L);
9056 break;
9057 case 0x03: //great
9058 __ slt(AT, op2, op1);
9059 __ beq(AT, R0, L);
9060 __ nop();
9061 __ mov_d(dst, src);
9062 __ bind(L);
9063 break;
9064 case 0x04: //great_equal
9065 __ slt(AT, op1, op2);
9066 __ bne(AT, R0, L);
9067 __ nop();
9068 __ mov_d(dst, src);
9069 __ bind(L);
9070 break;
9071 case 0x05: //less
9072 __ slt(AT, op1, op2);
9073 __ beq(AT, R0, L);
9074 __ nop();
9075 __ mov_d(dst, src);
9076 __ bind(L);
9077 break;
9078 case 0x06: //less_equal
9079 __ slt(AT, op2, op1);
9080 __ bne(AT, R0, L);
9081 __ nop();
9082 __ mov_d(dst, src);
9083 __ bind(L);
9084 break;
9085 default:
9086 Unimplemented();
9087 }
9088 %}
9090 ins_pipe( pipe_slow );
9091 %}
9093 //FIXME
9094 instruct cmovI_cmpF_reg_reg(mRegI dst, mRegI src, regF tmp1, regF tmp2, cmpOp cop ) %{
9095 match(Set dst (CMoveI (Binary cop (CmpF tmp1 tmp2)) (Binary dst src)));
9096 ins_cost(80);
9097 format %{
9098 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpF_reg_reg\n"
9099 "\tCMOV $dst,$src \t @cmovI_cmpF_reg_reg"
9100 %}
9102 ins_encode %{
9103 FloatRegister reg_op1 = $tmp1$$FloatRegister;
9104 FloatRegister reg_op2 = $tmp2$$FloatRegister;
9105 Register dst = $dst$$Register;
9106 Register src = $src$$Register;
9107 int flag = $cop$$cmpcode;
9109 switch(flag)
9110 {
9111 case 0x01: //equal
9112 __ c_eq_s(reg_op1, reg_op2);
9113 __ movt(dst, src);
9114 break;
9115 case 0x02: //not_equal
9116 __ c_eq_s(reg_op1, reg_op2);
9117 __ movf(dst, src);
9118 break;
9119 case 0x03: //greater
9120 __ c_ole_s(reg_op1, reg_op2);
9121 __ movf(dst, src);
9122 break;
9123 case 0x04: //greater_equal
9124 __ c_olt_s(reg_op1, reg_op2);
9125 __ movf(dst, src);
9126 break;
9127 case 0x05: //less
9128 __ c_ult_s(reg_op1, reg_op2);
9129 __ movt(dst, src);
9130 break;
9131 case 0x06: //less_equal
9132 __ c_ule_s(reg_op1, reg_op2);
9133 __ movt(dst, src);
9134 break;
9135 default:
9136 Unimplemented();
9137 }
9138 %}
9139 ins_pipe( pipe_slow );
9140 %}
9142 instruct cmovF_cmpF_reg_reg(regF dst, regF src, regF tmp1, regF tmp2, cmpOp cop ) %{
9143 match(Set dst (CMoveF (Binary cop (CmpF tmp1 tmp2)) (Binary dst src)));
9144 ins_cost(200);
9145 format %{
9146 "CMP$cop $tmp1, $tmp2\t @cmovF_cmpF_reg_reg\n"
9147 "\tCMOV $dst,$src \t @cmovF_cmpF_reg_reg"
9148 %}
9150 ins_encode %{
9151 FloatRegister reg_op1 = $tmp1$$FloatRegister;
9152 FloatRegister reg_op2 = $tmp2$$FloatRegister;
9153 FloatRegister dst = $dst$$FloatRegister;
9154 FloatRegister src = $src$$FloatRegister;
9155 Label L;
9156 int flag = $cop$$cmpcode;
9158 switch(flag)
9159 {
9160 case 0x01: //equal
9161 __ c_eq_s(reg_op1, reg_op2);
9162 __ bc1f(L);
9163 __ nop();
9164 __ mov_s(dst, src);
9165 __ bind(L);
9166 break;
9167 case 0x02: //not_equal
9168 __ c_eq_s(reg_op1, reg_op2);
9169 __ bc1t(L);
9170 __ nop();
9171 __ mov_s(dst, src);
9172 __ bind(L);
9173 break;
9174 case 0x03: //greater
9175 __ c_ole_s(reg_op1, reg_op2);
9176 __ bc1t(L);
9177 __ nop();
9178 __ mov_s(dst, src);
9179 __ bind(L);
9180 break;
9181 case 0x04: //greater_equal
9182 __ c_olt_s(reg_op1, reg_op2);
9183 __ bc1t(L);
9184 __ nop();
9185 __ mov_s(dst, src);
9186 __ bind(L);
9187 break;
9188 case 0x05: //less
9189 __ c_ult_s(reg_op1, reg_op2);
9190 __ bc1f(L);
9191 __ nop();
9192 __ mov_s(dst, src);
9193 __ bind(L);
9194 break;
9195 case 0x06: //less_equal
9196 __ c_ule_s(reg_op1, reg_op2);
9197 __ bc1f(L);
9198 __ nop();
9199 __ mov_s(dst, src);
9200 __ bind(L);
9201 break;
9202 default:
9203 Unimplemented();
9204 }
9205 %}
9206 ins_pipe( pipe_slow );
9207 %}
9209 // Manifest a CmpL result in an integer register. Very painful.
9210 // This is the test to avoid.
9211 instruct cmpL3_reg_reg(mRegI dst, mRegL src1, mRegL src2) %{
9212 match(Set dst (CmpL3 src1 src2));
9213 ins_cost(1000);
9214 format %{ "cmpL3 $dst, $src1, $src2 @ cmpL3_reg_reg" %}
9215 ins_encode %{
9216 Register opr1 = as_Register($src1$$reg);
9217 Register opr2 = as_Register($src2$$reg);
9218 Register dst = as_Register($dst$$reg);
9220 Label Done;
9222 __ subu(AT, opr1, opr2);
9223 __ bltz(AT, Done);
9224 __ delayed()->daddiu(dst, R0, -1);
9226 __ move(dst, 1);
9227 __ movz(dst, R0, AT);
9229 __ bind(Done);
9230 %}
9231 ins_pipe( pipe_slow );
9232 %}
9234 //
9235 // less_rsult = -1
9236 // greater_result = 1
9237 // equal_result = 0
9238 // nan_result = -1
9239 //
9240 instruct cmpF3_reg_reg(mRegI dst, regF src1, regF src2) %{
9241 match(Set dst (CmpF3 src1 src2));
9242 ins_cost(1000);
9243 format %{ "cmpF3 $dst, $src1, $src2 @ cmpF3_reg_reg" %}
9244 ins_encode %{
9245 FloatRegister src1 = as_FloatRegister($src1$$reg);
9246 FloatRegister src2 = as_FloatRegister($src2$$reg);
9247 Register dst = as_Register($dst$$reg);
9249 Label Done;
9251 __ c_ult_s(src1, src2);
9252 __ bc1t(Done);
9253 __ delayed()->daddiu(dst, R0, -1);
9255 __ c_eq_s(src1, src2);
9256 __ move(dst, 1);
9257 __ movt(dst, R0);
9259 __ bind(Done);
9260 %}
9261 ins_pipe( pipe_slow );
9262 %}
9264 instruct cmpD3_reg_reg(mRegI dst, regD src1, regD src2) %{
9265 match(Set dst (CmpD3 src1 src2));
9266 ins_cost(1000);
9267 format %{ "cmpD3 $dst, $src1, $src2 @ cmpD3_reg_reg" %}
9268 ins_encode %{
9269 FloatRegister src1 = as_FloatRegister($src1$$reg);
9270 FloatRegister src2 = as_FloatRegister($src2$$reg);
9271 Register dst = as_Register($dst$$reg);
9273 Label Done;
9275 __ c_ult_d(src1, src2);
9276 __ bc1t(Done);
9277 __ delayed()->daddiu(dst, R0, -1);
9279 __ c_eq_d(src1, src2);
9280 __ move(dst, 1);
9281 __ movt(dst, R0);
9283 __ bind(Done);
9284 %}
9285 ins_pipe( pipe_slow );
9286 %}
9288 instruct clear_array(mRegL cnt, mRegP base, Universe dummy) %{
9289 match(Set dummy (ClearArray cnt base));
9290 format %{ "CLEAR_ARRAY base = $base, cnt = $cnt # Clear doublewords" %}
9291 ins_encode %{
9292 //Assume cnt is the number of bytes in an array to be cleared,
9293 //and base points to the starting address of the array.
9294 Register base = $base$$Register;
9295 Register num = $cnt$$Register;
9296 Label Loop, done;
9298 /* 2012/9/21 Jin: according to X86, $cnt is caculated by doublewords(8 bytes) */
9299 __ move(T9, num); /* T9 = words */
9300 __ beq(T9, R0, done);
9301 __ nop();
9302 __ move(AT, base);
9304 __ bind(Loop);
9305 __ sd(R0, Address(AT, 0));
9306 __ daddi(AT, AT, wordSize);
9307 __ daddi(T9, T9, -1);
9308 __ bne(T9, R0, Loop);
9309 __ delayed()->nop();
9310 __ bind(done);
9311 %}
9312 ins_pipe( pipe_slow );
9313 %}
9315 instruct string_compare(a4_RegP str1, mA5RegI cnt1, a6_RegP str2, mA7RegI cnt2, no_Ax_mRegI result) %{
9316 match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
9317 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2);
9319 format %{ "String Compare $str1[len: $cnt1], $str2[len: $cnt2] -> $result @ string_compare" %}
9320 ins_encode %{
9321 // Get the first character position in both strings
9322 // [8] char array, [12] offset, [16] count
9323 Register str1 = $str1$$Register;
9324 Register str2 = $str2$$Register;
9325 Register cnt1 = $cnt1$$Register;
9326 Register cnt2 = $cnt2$$Register;
9327 Register result = $result$$Register;
9329 Label L, Loop, haveResult, done;
9331 // compute the and difference of lengths (in result)
9332 __ subu(result, cnt1, cnt2); // result holds the difference of two lengths
9334 // compute the shorter length (in cnt1)
9335 __ slt(AT, cnt2, cnt1);
9336 __ movn(cnt1, cnt2, AT);
9338 // Now the shorter length is in cnt1 and cnt2 can be used as a tmp register
9339 __ bind(Loop); // Loop begin
9340 __ beq(cnt1, R0, done);
9341 __ delayed()->lhu(AT, str1, 0);;
9343 // compare current character
9344 __ lhu(cnt2, str2, 0);
9345 __ bne(AT, cnt2, haveResult);
9346 __ delayed()->addi(str1, str1, 2);
9347 __ addi(str2, str2, 2);
9348 __ b(Loop);
9349 __ delayed()->addi(cnt1, cnt1, -1); // Loop end
9351 __ bind(haveResult);
9352 __ subu(result, AT, cnt2);
9354 __ bind(done);
9355 %}
9357 ins_pipe( pipe_slow );
9358 %}
9360 // intrinsic optimization
9361 instruct string_equals(a4_RegP str1, a5_RegP str2, mA6RegI cnt, mA7RegI temp, no_Ax_mRegI result) %{
9362 match(Set result (StrEquals (Binary str1 str2) cnt));
9363 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL temp);
9365 format %{ "String Equal $str1, $str2, len:$cnt tmp:$temp -> $result @ string_equals" %}
9366 ins_encode %{
9367 // Get the first character position in both strings
9368 // [8] char array, [12] offset, [16] count
9369 Register str1 = $str1$$Register;
9370 Register str2 = $str2$$Register;
9371 Register cnt = $cnt$$Register;
9372 Register tmp = $temp$$Register;
9373 Register result = $result$$Register;
9375 Label Loop, done;
9378 __ beq(str1, str2, done); // same char[] ?
9379 __ daddiu(result, R0, 1);
9381 __ bind(Loop); // Loop begin
9382 __ beq(cnt, R0, done);
9383 __ daddiu(result, R0, 1); // count == 0
9385 // compare current character
9386 __ lhu(AT, str1, 0);;
9387 __ lhu(tmp, str2, 0);
9388 __ bne(AT, tmp, done);
9389 __ delayed()->daddi(result, R0, 0);
9390 __ addi(str1, str1, 2);
9391 __ addi(str2, str2, 2);
9392 __ b(Loop);
9393 __ delayed()->addi(cnt, cnt, -1); // Loop end
9395 __ bind(done);
9396 %}
9398 ins_pipe( pipe_slow );
9399 %}
9401 //----------Arithmetic Instructions-------------------------------------------
9402 //----------Addition Instructions---------------------------------------------
9403 instruct addI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9404 match(Set dst (AddI src1 src2));
9406 format %{ "add $dst, $src1, $src2 #@addI_Reg_Reg" %}
9407 ins_encode %{
9408 Register dst = $dst$$Register;
9409 Register src1 = $src1$$Register;
9410 Register src2 = $src2$$Register;
9411 __ addu32(dst, src1, src2);
9412 %}
9413 ins_pipe( ialu_regI_regI );
9414 %}
9416 instruct addI_Reg_imm(mRegI dst, mRegI src1, immI src2) %{
9417 match(Set dst (AddI src1 src2));
9419 format %{ "add $dst, $src1, $src2 #@addI_Reg_imm" %}
9420 ins_encode %{
9421 Register dst = $dst$$Register;
9422 Register src1 = $src1$$Register;
9423 int imm = $src2$$constant;
9425 if(Assembler::is_simm16(imm)) {
9426 __ addiu32(dst, src1, imm);
9427 } else {
9428 __ move(AT, imm);
9429 __ addu32(dst, src1, AT);
9430 }
9431 %}
9432 ins_pipe( ialu_regI_regI );
9433 %}
9435 instruct addP_reg_reg(mRegP dst, mRegP src1, mRegL src2) %{
9436 match(Set dst (AddP src1 src2));
9438 format %{ "dadd $dst, $src1, $src2 #@addP_reg_reg" %}
9440 ins_encode %{
9441 Register dst = $dst$$Register;
9442 Register src1 = $src1$$Register;
9443 Register src2 = $src2$$Register;
9444 __ daddu(dst, src1, src2);
9445 %}
9447 ins_pipe( ialu_regI_regI );
9448 %}
9450 instruct addP_reg_reg_convI2L(mRegP dst, mRegP src1, mRegI src2) %{
9451 match(Set dst (AddP src1 (ConvI2L src2)));
9453 format %{ "dadd $dst, $src1, $src2 #@addP_reg_reg_convI2L" %}
9455 ins_encode %{
9456 Register dst = $dst$$Register;
9457 Register src1 = $src1$$Register;
9458 Register src2 = $src2$$Register;
9459 __ daddu(dst, src1, src2);
9460 %}
9462 ins_pipe( ialu_regI_regI );
9463 %}
9465 instruct addP_reg_imm(mRegP dst, mRegP src1, immL src2) %{
9466 match(Set dst (AddP src1 src2));
9468 format %{ "daddi $dst, $src1, $src2 #@addP_reg_imm" %}
9469 ins_encode %{
9470 Register src1 = $src1$$Register;
9471 long src2 = $src2$$constant;
9472 Register dst = $dst$$Register;
9474 if(Assembler::is_simm16(src2)) {
9475 __ daddiu(dst, src1, src2);
9476 } else {
9477 __ set64(AT, src2);
9478 __ daddu(dst, src1, AT);
9479 }
9480 %}
9481 ins_pipe( ialu_regI_imm16 );
9482 %}
9484 // Add Long Register with Register
9485 instruct addL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
9486 match(Set dst (AddL src1 src2));
9487 ins_cost(200);
9488 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_Reg\t" %}
9490 ins_encode %{
9491 Register dst_reg = as_Register($dst$$reg);
9492 Register src1_reg = as_Register($src1$$reg);
9493 Register src2_reg = as_Register($src2$$reg);
9495 __ daddu(dst_reg, src1_reg, src2_reg);
9496 %}
9498 ins_pipe( ialu_regL_regL );
9499 %}
9501 instruct addL_Reg_imm(mRegL dst, mRegL src1, immL16 src2)
9502 %{
9503 match(Set dst (AddL src1 src2));
9505 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_imm " %}
9506 ins_encode %{
9507 Register dst_reg = as_Register($dst$$reg);
9508 Register src1_reg = as_Register($src1$$reg);
9509 int src2_imm = $src2$$constant;
9511 __ daddiu(dst_reg, src1_reg, src2_imm);
9512 %}
9514 ins_pipe( ialu_regL_regL );
9515 %}
9517 instruct addL_RegI2L_imm(mRegL dst, mRegI src1, immL16 src2)
9518 %{
9519 match(Set dst (AddL (ConvI2L src1) src2));
9521 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_imm " %}
9522 ins_encode %{
9523 Register dst_reg = as_Register($dst$$reg);
9524 Register src1_reg = as_Register($src1$$reg);
9525 int src2_imm = $src2$$constant;
9527 __ daddiu(dst_reg, src1_reg, src2_imm);
9528 %}
9530 ins_pipe( ialu_regL_regL );
9531 %}
9533 instruct addL_RegI2L_Reg(mRegL dst, mRegI src1, mRegL src2) %{
9534 match(Set dst (AddL (ConvI2L src1) src2));
9535 ins_cost(200);
9536 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_Reg\t" %}
9538 ins_encode %{
9539 Register dst_reg = as_Register($dst$$reg);
9540 Register src1_reg = as_Register($src1$$reg);
9541 Register src2_reg = as_Register($src2$$reg);
9543 __ daddu(dst_reg, src1_reg, src2_reg);
9544 %}
9546 ins_pipe( ialu_regL_regL );
9547 %}
9549 instruct addL_RegI2L_RegI2L(mRegL dst, mRegI src1, mRegI src2) %{
9550 match(Set dst (AddL (ConvI2L src1) (ConvI2L src2)));
9551 ins_cost(200);
9552 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_RegI2L\t" %}
9554 ins_encode %{
9555 Register dst_reg = as_Register($dst$$reg);
9556 Register src1_reg = as_Register($src1$$reg);
9557 Register src2_reg = as_Register($src2$$reg);
9559 __ daddu(dst_reg, src1_reg, src2_reg);
9560 %}
9562 ins_pipe( ialu_regL_regL );
9563 %}
9565 instruct addL_Reg_RegI2L(mRegL dst, mRegL src1, mRegI src2) %{
9566 match(Set dst (AddL src1 (ConvI2L src2)));
9567 ins_cost(200);
9568 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_RegI2L\t" %}
9570 ins_encode %{
9571 Register dst_reg = as_Register($dst$$reg);
9572 Register src1_reg = as_Register($src1$$reg);
9573 Register src2_reg = as_Register($src2$$reg);
9575 __ daddu(dst_reg, src1_reg, src2_reg);
9576 %}
9578 ins_pipe( ialu_regL_regL );
9579 %}
9581 //----------Subtraction Instructions-------------------------------------------
9582 // Integer Subtraction Instructions
9583 instruct subI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9584 match(Set dst (SubI src1 src2));
9585 ins_cost(100);
9587 format %{ "sub $dst, $src1, $src2 #@subI_Reg_Reg" %}
9588 ins_encode %{
9589 Register dst = $dst$$Register;
9590 Register src1 = $src1$$Register;
9591 Register src2 = $src2$$Register;
9592 __ subu32(dst, src1, src2);
9593 %}
9594 ins_pipe( ialu_regI_regI );
9595 %}
9597 instruct subI_Reg_immI16_sub(mRegI dst, mRegI src1, immI16_sub src2) %{
9598 match(Set dst (SubI src1 src2));
9599 ins_cost(80);
9601 format %{ "sub $dst, $src1, $src2 #@subI_Reg_immI16_sub" %}
9602 ins_encode %{
9603 Register dst = $dst$$Register;
9604 Register src1 = $src1$$Register;
9605 __ addiu32(dst, src1, -1 * $src2$$constant);
9606 %}
9607 ins_pipe( ialu_regI_regI );
9608 %}
9610 instruct negI_Reg(mRegI dst, immI0 zero, mRegI src) %{
9611 match(Set dst (SubI zero src));
9612 ins_cost(80);
9614 format %{ "neg $dst, $src #@negI_Reg" %}
9615 ins_encode %{
9616 Register dst = $dst$$Register;
9617 Register src = $src$$Register;
9618 __ subu32(dst, R0, src);
9619 %}
9620 ins_pipe( ialu_regI_regI );
9621 %}
9623 instruct negL_Reg(mRegL dst, immL0 zero, mRegL src) %{
9624 match(Set dst (SubL zero src));
9625 ins_cost(80);
9627 format %{ "neg $dst, $src #@negL_Reg" %}
9628 ins_encode %{
9629 Register dst = $dst$$Register;
9630 Register src = $src$$Register;
9631 __ subu(dst, R0, src);
9632 %}
9633 ins_pipe( ialu_regI_regI );
9634 %}
9636 instruct subL_Reg_immL16_sub(mRegL dst, mRegL src1, immL16_sub src2) %{
9637 match(Set dst (SubL src1 src2));
9638 ins_cost(80);
9640 format %{ "sub $dst, $src1, $src2 #@subL_Reg_immL16_sub" %}
9641 ins_encode %{
9642 Register dst = $dst$$Register;
9643 Register src1 = $src1$$Register;
9644 __ daddiu(dst, src1, -1 * $src2$$constant);
9645 %}
9646 ins_pipe( ialu_regI_regI );
9647 %}
9649 // Subtract Long Register with Register.
9650 instruct subL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
9651 match(Set dst (SubL src1 src2));
9652 ins_cost(100);
9653 format %{ "SubL $dst, $src1, $src2 @ subL_Reg_Reg" %}
9654 ins_encode %{
9655 Register dst = as_Register($dst$$reg);
9656 Register src1 = as_Register($src1$$reg);
9657 Register src2 = as_Register($src2$$reg);
9659 __ subu(dst, src1, src2);
9660 %}
9661 ins_pipe( ialu_regL_regL );
9662 %}
9664 instruct subL_Reg_RegI2L(mRegL dst, mRegL src1, mRegI src2) %{
9665 match(Set dst (SubL src1 (ConvI2L src2)));
9666 ins_cost(100);
9667 format %{ "SubL $dst, $src1, $src2 @ subL_Reg_RegI2L" %}
9668 ins_encode %{
9669 Register dst = as_Register($dst$$reg);
9670 Register src1 = as_Register($src1$$reg);
9671 Register src2 = as_Register($src2$$reg);
9673 __ subu(dst, src1, src2);
9674 %}
9675 ins_pipe( ialu_regL_regL );
9676 %}
9678 instruct subL_RegI2L_Reg(mRegL dst, mRegI src1, mRegL src2) %{
9679 match(Set dst (SubL (ConvI2L src1) src2));
9680 ins_cost(200);
9681 format %{ "SubL $dst, $src1, $src2 @ subL_RegI2L_Reg" %}
9682 ins_encode %{
9683 Register dst = as_Register($dst$$reg);
9684 Register src1 = as_Register($src1$$reg);
9685 Register src2 = as_Register($src2$$reg);
9687 __ subu(dst, src1, src2);
9688 %}
9689 ins_pipe( ialu_regL_regL );
9690 %}
9692 instruct subL_RegI2L_RegI2L(mRegL dst, mRegI src1, mRegI src2) %{
9693 match(Set dst (SubL (ConvI2L src1) (ConvI2L src2)));
9694 ins_cost(200);
9695 format %{ "SubL $dst, $src1, $src2 @ subL_RegI2L_RegI2L" %}
9696 ins_encode %{
9697 Register dst = as_Register($dst$$reg);
9698 Register src1 = as_Register($src1$$reg);
9699 Register src2 = as_Register($src2$$reg);
9701 __ subu(dst, src1, src2);
9702 %}
9703 ins_pipe( ialu_regL_regL );
9704 %}
9706 // Integer MOD with Register
9707 instruct modI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9708 match(Set dst (ModI src1 src2));
9709 ins_cost(300);
9710 format %{ "modi $dst, $src1, $src2 @ modI_Reg_Reg" %}
9711 ins_encode %{
9712 Register dst = $dst$$Register;
9713 Register src1 = $src1$$Register;
9714 Register src2 = $src2$$Register;
9716 //if (UseLoongsonISA) {
9717 if (0) {
9718 // 2016.08.10
9719 // Experiments show that gsmod is slower that div+mfhi.
9720 // So I just disable it here.
9721 __ gsmod(dst, src1, src2);
9722 } else {
9723 __ div(src1, src2);
9724 __ mfhi(dst);
9725 }
9726 %}
9728 //ins_pipe( ialu_mod );
9729 ins_pipe( ialu_regI_regI );
9730 %}
9732 instruct modL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
9733 match(Set dst (ModL src1 src2));
9734 format %{ "modL $dst, $src1, $src2 @modL_reg_reg" %}
9736 ins_encode %{
9737 Register dst = as_Register($dst$$reg);
9738 Register op1 = as_Register($src1$$reg);
9739 Register op2 = as_Register($src2$$reg);
9741 if (UseLoongsonISA) {
9742 __ gsdmod(dst, op1, op2);
9743 } else {
9744 __ ddiv(op1, op2);
9745 __ mfhi(dst);
9746 }
9747 %}
9748 ins_pipe( pipe_slow );
9749 %}
9751 instruct mulI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9752 match(Set dst (MulI src1 src2));
9754 ins_cost(300);
9755 format %{ "mul $dst, $src1, $src2 @ mulI_Reg_Reg" %}
9756 ins_encode %{
9757 Register src1 = $src1$$Register;
9758 Register src2 = $src2$$Register;
9759 Register dst = $dst$$Register;
9761 __ mul(dst, src1, src2);
9762 %}
9763 ins_pipe( ialu_mult );
9764 %}
9766 instruct maddI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2, mRegI src3) %{
9767 match(Set dst (AddI (MulI src1 src2) src3));
9769 ins_cost(999);
9770 format %{ "madd $dst, $src1 * $src2 + $src3 #@maddI_Reg_Reg" %}
9771 ins_encode %{
9772 Register src1 = $src1$$Register;
9773 Register src2 = $src2$$Register;
9774 Register src3 = $src3$$Register;
9775 Register dst = $dst$$Register;
9777 __ mtlo(src3);
9778 __ madd(src1, src2);
9779 __ mflo(dst);
9780 %}
9781 ins_pipe( ialu_mult );
9782 %}
9784 instruct divI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9785 match(Set dst (DivI src1 src2));
9787 ins_cost(300);
9788 format %{ "div $dst, $src1, $src2 @ divI_Reg_Reg" %}
9789 ins_encode %{
9790 Register src1 = $src1$$Register;
9791 Register src2 = $src2$$Register;
9792 Register dst = $dst$$Register;
9794 /* 2012/4/21 Jin: In MIPS, div does not cause exception.
9795 We must trap an exception manually. */
9796 __ teq(R0, src2, 0x7);
9798 if (UseLoongsonISA) {
9799 __ gsdiv(dst, src1, src2);
9800 } else {
9801 __ div(src1, src2);
9803 __ nop();
9804 __ nop();
9805 __ mflo(dst);
9806 }
9807 %}
9808 ins_pipe( ialu_mod );
9809 %}
9811 instruct divF_Reg_Reg(regF dst, regF src1, regF src2) %{
9812 match(Set dst (DivF src1 src2));
9814 ins_cost(300);
9815 format %{ "divF $dst, $src1, $src2 @ divF_Reg_Reg" %}
9816 ins_encode %{
9817 FloatRegister src1 = $src1$$FloatRegister;
9818 FloatRegister src2 = $src2$$FloatRegister;
9819 FloatRegister dst = $dst$$FloatRegister;
9821 /* Here do we need to trap an exception manually ? */
9822 __ div_s(dst, src1, src2);
9823 %}
9824 ins_pipe( pipe_slow );
9825 %}
9827 instruct divD_Reg_Reg(regD dst, regD src1, regD src2) %{
9828 match(Set dst (DivD src1 src2));
9830 ins_cost(300);
9831 format %{ "divD $dst, $src1, $src2 @ divD_Reg_Reg" %}
9832 ins_encode %{
9833 FloatRegister src1 = $src1$$FloatRegister;
9834 FloatRegister src2 = $src2$$FloatRegister;
9835 FloatRegister dst = $dst$$FloatRegister;
9837 /* Here do we need to trap an exception manually ? */
9838 __ div_d(dst, src1, src2);
9839 %}
9840 ins_pipe( pipe_slow );
9841 %}
9843 instruct mulL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
9844 match(Set dst (MulL src1 src2));
9845 format %{ "mulL $dst, $src1, $src2 @mulL_reg_reg" %}
9846 ins_encode %{
9847 Register dst = as_Register($dst$$reg);
9848 Register op1 = as_Register($src1$$reg);
9849 Register op2 = as_Register($src2$$reg);
9851 if (UseLoongsonISA) {
9852 __ gsdmult(dst, op1, op2);
9853 } else {
9854 __ dmult(op1, op2);
9855 __ mflo(dst);
9856 }
9857 %}
9858 ins_pipe( pipe_slow );
9859 %}
9861 instruct mulL_reg_regI2L(mRegL dst, mRegL src1, mRegI src2) %{
9862 match(Set dst (MulL src1 (ConvI2L src2)));
9863 format %{ "mulL $dst, $src1, $src2 @mulL_reg_regI2L" %}
9864 ins_encode %{
9865 Register dst = as_Register($dst$$reg);
9866 Register op1 = as_Register($src1$$reg);
9867 Register op2 = as_Register($src2$$reg);
9869 if (UseLoongsonISA) {
9870 __ gsdmult(dst, op1, op2);
9871 } else {
9872 __ dmult(op1, op2);
9873 __ mflo(dst);
9874 }
9875 %}
9876 ins_pipe( pipe_slow );
9877 %}
9879 instruct divL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
9880 match(Set dst (DivL src1 src2));
9881 format %{ "divL $dst, $src1, $src2 @divL_reg_reg" %}
9883 ins_encode %{
9884 Register dst = as_Register($dst$$reg);
9885 Register op1 = as_Register($src1$$reg);
9886 Register op2 = as_Register($src2$$reg);
9888 if (UseLoongsonISA) {
9889 __ gsddiv(dst, op1, op2);
9890 } else {
9891 __ ddiv(op1, op2);
9892 __ mflo(dst);
9893 }
9894 %}
9895 ins_pipe( pipe_slow );
9896 %}
9898 instruct addF_reg_reg(regF dst, regF src1, regF src2) %{
9899 match(Set dst (AddF src1 src2));
9900 format %{ "AddF $dst, $src1, $src2 @addF_reg_reg" %}
9901 ins_encode %{
9902 FloatRegister src1 = as_FloatRegister($src1$$reg);
9903 FloatRegister src2 = as_FloatRegister($src2$$reg);
9904 FloatRegister dst = as_FloatRegister($dst$$reg);
9906 __ add_s(dst, src1, src2);
9907 %}
9908 ins_pipe( fpu_regF_regF );
9909 %}
9911 instruct subF_reg_reg(regF dst, regF src1, regF src2) %{
9912 match(Set dst (SubF src1 src2));
9913 format %{ "SubF $dst, $src1, $src2 @subF_reg_reg" %}
9914 ins_encode %{
9915 FloatRegister src1 = as_FloatRegister($src1$$reg);
9916 FloatRegister src2 = as_FloatRegister($src2$$reg);
9917 FloatRegister dst = as_FloatRegister($dst$$reg);
9919 __ sub_s(dst, src1, src2);
9920 %}
9921 ins_pipe( fpu_regF_regF );
9922 %}
9923 instruct addD_reg_reg(regD dst, regD src1, regD src2) %{
9924 match(Set dst (AddD src1 src2));
9925 format %{ "AddD $dst, $src1, $src2 @addD_reg_reg" %}
9926 ins_encode %{
9927 FloatRegister src1 = as_FloatRegister($src1$$reg);
9928 FloatRegister src2 = as_FloatRegister($src2$$reg);
9929 FloatRegister dst = as_FloatRegister($dst$$reg);
9931 __ add_d(dst, src1, src2);
9932 %}
9933 ins_pipe( fpu_regF_regF );
9934 %}
9936 instruct subD_reg_reg(regD dst, regD src1, regD src2) %{
9937 match(Set dst (SubD src1 src2));
9938 format %{ "SubD $dst, $src1, $src2 @subD_reg_reg" %}
9939 ins_encode %{
9940 FloatRegister src1 = as_FloatRegister($src1$$reg);
9941 FloatRegister src2 = as_FloatRegister($src2$$reg);
9942 FloatRegister dst = as_FloatRegister($dst$$reg);
9944 __ sub_d(dst, src1, src2);
9945 %}
9946 ins_pipe( fpu_regF_regF );
9947 %}
9949 instruct negF_reg(regF dst, regF src) %{
9950 match(Set dst (NegF src));
9951 format %{ "negF $dst, $src @negF_reg" %}
9952 ins_encode %{
9953 FloatRegister src = as_FloatRegister($src$$reg);
9954 FloatRegister dst = as_FloatRegister($dst$$reg);
9956 __ neg_s(dst, src);
9957 %}
9958 ins_pipe( fpu_regF_regF );
9959 %}
9961 instruct negD_reg(regD dst, regD src) %{
9962 match(Set dst (NegD src));
9963 format %{ "negD $dst, $src @negD_reg" %}
9964 ins_encode %{
9965 FloatRegister src = as_FloatRegister($src$$reg);
9966 FloatRegister dst = as_FloatRegister($dst$$reg);
9968 __ neg_d(dst, src);
9969 %}
9970 ins_pipe( fpu_regF_regF );
9971 %}
9974 instruct mulF_reg_reg(regF dst, regF src1, regF src2) %{
9975 match(Set dst (MulF src1 src2));
9976 format %{ "MULF $dst, $src1, $src2 @mulF_reg_reg" %}
9977 ins_encode %{
9978 FloatRegister src1 = $src1$$FloatRegister;
9979 FloatRegister src2 = $src2$$FloatRegister;
9980 FloatRegister dst = $dst$$FloatRegister;
9982 __ mul_s(dst, src1, src2);
9983 %}
9984 ins_pipe( fpu_regF_regF );
9985 %}
9987 instruct maddF_reg_reg(regF dst, regF src1, regF src2, regF src3) %{
9988 match(Set dst (AddF (MulF src1 src2) src3));
9989 // For compatibility reason (e.g. on the Loongson platform), disable this guy.
9990 ins_cost(44444);
9991 format %{ "maddF $dst, $src1, $src2, $src3 @maddF_reg_reg" %}
9992 ins_encode %{
9993 FloatRegister src1 = $src1$$FloatRegister;
9994 FloatRegister src2 = $src2$$FloatRegister;
9995 FloatRegister src3 = $src3$$FloatRegister;
9996 FloatRegister dst = $dst$$FloatRegister;
9998 __ madd_s(dst, src1, src2, src3);
9999 %}
10000 ins_pipe( fpu_regF_regF );
10001 %}
10003 // Mul two double precision floating piont number
10004 instruct mulD_reg_reg(regD dst, regD src1, regD src2) %{
10005 match(Set dst (MulD src1 src2));
10006 format %{ "MULD $dst, $src1, $src2 @mulD_reg_reg" %}
10007 ins_encode %{
10008 FloatRegister src1 = $src1$$FloatRegister;
10009 FloatRegister src2 = $src2$$FloatRegister;
10010 FloatRegister dst = $dst$$FloatRegister;
10012 __ mul_d(dst, src1, src2);
10013 %}
10014 ins_pipe( fpu_regF_regF );
10015 %}
10017 instruct maddD_reg_reg(regD dst, regD src1, regD src2, regD src3) %{
10018 match(Set dst (AddD (MulD src1 src2) src3));
10019 // For compatibility reason (e.g. on the Loongson platform), disable this guy.
10020 ins_cost(44444);
10021 format %{ "maddD $dst, $src1, $src2, $src3 @maddD_reg_reg" %}
10022 ins_encode %{
10023 FloatRegister src1 = $src1$$FloatRegister;
10024 FloatRegister src2 = $src2$$FloatRegister;
10025 FloatRegister src3 = $src3$$FloatRegister;
10026 FloatRegister dst = $dst$$FloatRegister;
10028 __ madd_d(dst, src1, src2, src3);
10029 %}
10030 ins_pipe( fpu_regF_regF );
10031 %}
10033 instruct absF_reg(regF dst, regF src) %{
10034 match(Set dst (AbsF src));
10035 ins_cost(100);
10036 format %{ "absF $dst, $src @absF_reg" %}
10037 ins_encode %{
10038 FloatRegister src = as_FloatRegister($src$$reg);
10039 FloatRegister dst = as_FloatRegister($dst$$reg);
10041 __ abs_s(dst, src);
10042 %}
10043 ins_pipe( fpu_regF_regF );
10044 %}
10047 // intrinsics for math_native.
10048 // AbsD SqrtD CosD SinD TanD LogD Log10D
10050 instruct absD_reg(regD dst, regD src) %{
10051 match(Set dst (AbsD src));
10052 ins_cost(100);
10053 format %{ "absD $dst, $src @absD_reg" %}
10054 ins_encode %{
10055 FloatRegister src = as_FloatRegister($src$$reg);
10056 FloatRegister dst = as_FloatRegister($dst$$reg);
10058 __ abs_d(dst, src);
10059 %}
10060 ins_pipe( fpu_regF_regF );
10061 %}
10063 instruct sqrtD_reg(regD dst, regD src) %{
10064 match(Set dst (SqrtD src));
10065 ins_cost(100);
10066 format %{ "SqrtD $dst, $src @sqrtD_reg" %}
10067 ins_encode %{
10068 FloatRegister src = as_FloatRegister($src$$reg);
10069 FloatRegister dst = as_FloatRegister($dst$$reg);
10071 __ sqrt_d(dst, src);
10072 %}
10073 ins_pipe( fpu_regF_regF );
10074 %}
10076 instruct sqrtF_reg(regF dst, regF src) %{
10077 match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
10078 ins_cost(100);
10079 format %{ "SqrtF $dst, $src @sqrtF_reg" %}
10080 ins_encode %{
10081 FloatRegister src = as_FloatRegister($src$$reg);
10082 FloatRegister dst = as_FloatRegister($dst$$reg);
10084 __ sqrt_s(dst, src);
10085 %}
10086 ins_pipe( fpu_regF_regF );
10087 %}
10088 //----------------------------------Logical Instructions----------------------
10089 //__________________________________Integer Logical Instructions-------------
10091 //And Instuctions
10092 // And Register with Immediate
10093 instruct andI_Reg_immI(mRegI dst, mRegI src1, immI src2) %{
10094 match(Set dst (AndI src1 src2));
10096 format %{ "and $dst, $src1, $src2 #@andI_Reg_immI" %}
10097 ins_encode %{
10098 Register dst = $dst$$Register;
10099 Register src = $src1$$Register;
10100 int val = $src2$$constant;
10102 __ move(AT, val);
10103 __ andr(dst, src, AT);
10104 %}
10105 ins_pipe( ialu_regI_regI );
10106 %}
10108 instruct andI_Reg_imm_0_65535(mRegI dst, mRegI src1, immI_0_65535 src2) %{
10109 match(Set dst (AndI src1 src2));
10110 ins_cost(60);
10112 format %{ "and $dst, $src1, $src2 #@andI_Reg_imm_0_65535" %}
10113 ins_encode %{
10114 Register dst = $dst$$Register;
10115 Register src = $src1$$Register;
10116 int val = $src2$$constant;
10118 __ andi(dst, src, val);
10119 %}
10120 ins_pipe( ialu_regI_regI );
10121 %}
10123 instruct andI_Reg_immI_nonneg_mask(mRegI dst, mRegI src1, immI_nonneg_mask mask) %{
10124 match(Set dst (AndI src1 mask));
10125 ins_cost(60);
10127 format %{ "and $dst, $src1, $mask #@andI_Reg_immI_nonneg_mask" %}
10128 ins_encode %{
10129 Register dst = $dst$$Register;
10130 Register src = $src1$$Register;
10131 int size = Assembler::is_int_mask($mask$$constant);
10133 __ ext(dst, src, 0, size);
10134 %}
10135 ins_pipe( ialu_regI_regI );
10136 %}
10138 instruct andL_Reg_immL_nonneg_mask(mRegL dst, mRegL src1, immL_nonneg_mask mask) %{
10139 match(Set dst (AndL src1 mask));
10140 ins_cost(60);
10142 format %{ "and $dst, $src1, $mask #@andL_Reg_immL_nonneg_mask" %}
10143 ins_encode %{
10144 Register dst = $dst$$Register;
10145 Register src = $src1$$Register;
10146 int size = Assembler::is_jlong_mask($mask$$constant);
10148 __ dext(dst, src, 0, size);
10149 %}
10150 ins_pipe( ialu_regI_regI );
10151 %}
10153 instruct xorI_Reg_imm_0_65535(mRegI dst, mRegI src1, immI_0_65535 src2) %{
10154 match(Set dst (XorI src1 src2));
10155 ins_cost(60);
10157 format %{ "xori $dst, $src1, $src2 #@xorI_Reg_imm_0_65535" %}
10158 ins_encode %{
10159 Register dst = $dst$$Register;
10160 Register src = $src1$$Register;
10161 int val = $src2$$constant;
10163 __ xori(dst, src, val);
10164 %}
10165 ins_pipe( ialu_regI_regI );
10166 %}
10168 instruct xorI_Reg_immI_M1(mRegI dst, mRegI src1, immI_M1 M1) %{
10169 match(Set dst (XorI src1 M1));
10170 predicate(UseLoongsonISA);
10171 ins_cost(60);
10173 format %{ "xor $dst, $src1, $M1 #@xorI_Reg_immI_M1" %}
10174 ins_encode %{
10175 Register dst = $dst$$Register;
10176 Register src = $src1$$Register;
10178 __ gsorn(dst, R0, src);
10179 %}
10180 ins_pipe( ialu_regI_regI );
10181 %}
10183 instruct xorL2I_Reg_immI_M1(mRegI dst, mRegL src1, immI_M1 M1) %{
10184 match(Set dst (XorI (ConvL2I src1) M1));
10185 predicate(UseLoongsonISA);
10186 ins_cost(60);
10188 format %{ "xor $dst, $src1, $M1 #@xorL2I_Reg_immI_M1" %}
10189 ins_encode %{
10190 Register dst = $dst$$Register;
10191 Register src = $src1$$Register;
10193 __ gsorn(dst, R0, src);
10194 %}
10195 ins_pipe( ialu_regI_regI );
10196 %}
10198 instruct xorL_Reg_imm_0_65535(mRegL dst, mRegL src1, immL_0_65535 src2) %{
10199 match(Set dst (XorL src1 src2));
10200 ins_cost(60);
10202 format %{ "xori $dst, $src1, $src2 #@xorL_Reg_imm_0_65535" %}
10203 ins_encode %{
10204 Register dst = $dst$$Register;
10205 Register src = $src1$$Register;
10206 int val = $src2$$constant;
10208 __ xori(dst, src, val);
10209 %}
10210 ins_pipe( ialu_regI_regI );
10211 %}
10213 /*
10214 instruct xorL_Reg_immL_M1(mRegL dst, mRegL src1, immL_M1 M1) %{
10215 match(Set dst (XorL src1 M1));
10216 predicate(UseLoongsonISA);
10217 ins_cost(60);
10219 format %{ "xor $dst, $src1, $M1 #@xorL_Reg_immL_M1" %}
10220 ins_encode %{
10221 Register dst = $dst$$Register;
10222 Register src = $src1$$Register;
10224 __ gsorn(dst, R0, src);
10225 %}
10226 ins_pipe( ialu_regI_regI );
10227 %}
10228 */
10230 instruct lbu_and_lmask(mRegI dst, memory mem, immI_255 mask) %{
10231 match(Set dst (AndI mask (LoadB mem)));
10232 ins_cost(60);
10234 format %{ "lhu $dst, $mem #@lbu_and_lmask" %}
10235 ins_encode(load_UB_enc(dst, mem));
10236 ins_pipe( ialu_loadI );
10237 %}
10239 instruct lbu_and_rmask(mRegI dst, memory mem, immI_255 mask) %{
10240 match(Set dst (AndI (LoadB mem) mask));
10241 ins_cost(60);
10243 format %{ "lhu $dst, $mem #@lbu_and_rmask" %}
10244 ins_encode(load_UB_enc(dst, mem));
10245 ins_pipe( ialu_loadI );
10246 %}
10248 instruct andI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10249 match(Set dst (AndI src1 src2));
10251 format %{ "and $dst, $src1, $src2 #@andI_Reg_Reg" %}
10252 ins_encode %{
10253 Register dst = $dst$$Register;
10254 Register src1 = $src1$$Register;
10255 Register src2 = $src2$$Register;
10256 __ andr(dst, src1, src2);
10257 %}
10258 ins_pipe( ialu_regI_regI );
10259 %}
10261 instruct andnI_Reg_nReg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10262 match(Set dst (AndI src1 (XorI src2 M1)));
10263 predicate(UseLoongsonISA);
10265 format %{ "andn $dst, $src1, $src2 #@andnI_Reg_nReg" %}
10266 ins_encode %{
10267 Register dst = $dst$$Register;
10268 Register src1 = $src1$$Register;
10269 Register src2 = $src2$$Register;
10271 __ gsandn(dst, src1, src2);
10272 %}
10273 ins_pipe( ialu_regI_regI );
10274 %}
10276 instruct ornI_Reg_nReg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10277 match(Set dst (OrI src1 (XorI src2 M1)));
10278 predicate(UseLoongsonISA);
10280 format %{ "orn $dst, $src1, $src2 #@ornI_Reg_nReg" %}
10281 ins_encode %{
10282 Register dst = $dst$$Register;
10283 Register src1 = $src1$$Register;
10284 Register src2 = $src2$$Register;
10286 __ gsorn(dst, src1, src2);
10287 %}
10288 ins_pipe( ialu_regI_regI );
10289 %}
10291 instruct andnI_nReg_Reg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10292 match(Set dst (AndI (XorI src1 M1) src2));
10293 predicate(UseLoongsonISA);
10295 format %{ "andn $dst, $src2, $src1 #@andnI_nReg_Reg" %}
10296 ins_encode %{
10297 Register dst = $dst$$Register;
10298 Register src1 = $src1$$Register;
10299 Register src2 = $src2$$Register;
10301 __ gsandn(dst, src2, src1);
10302 %}
10303 ins_pipe( ialu_regI_regI );
10304 %}
10306 instruct ornI_nReg_Reg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10307 match(Set dst (OrI (XorI src1 M1) src2));
10308 predicate(UseLoongsonISA);
10310 format %{ "orn $dst, $src2, $src1 #@ornI_nReg_Reg" %}
10311 ins_encode %{
10312 Register dst = $dst$$Register;
10313 Register src1 = $src1$$Register;
10314 Register src2 = $src2$$Register;
10316 __ gsorn(dst, src2, src1);
10317 %}
10318 ins_pipe( ialu_regI_regI );
10319 %}
10321 // And Long Register with Register
10322 instruct andL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10323 match(Set dst (AndL src1 src2));
10324 format %{ "AND $dst, $src1, $src2 @ andL_Reg_Reg\n\t" %}
10325 ins_encode %{
10326 Register dst_reg = as_Register($dst$$reg);
10327 Register src1_reg = as_Register($src1$$reg);
10328 Register src2_reg = as_Register($src2$$reg);
10330 __ andr(dst_reg, src1_reg, src2_reg);
10331 %}
10332 ins_pipe( ialu_regL_regL );
10333 %}
10335 instruct andL_Reg_Reg_convI2L(mRegL dst, mRegL src1, mRegI src2) %{
10336 match(Set dst (AndL src1 (ConvI2L src2)));
10337 format %{ "AND $dst, $src1, $src2 @ andL_Reg_Reg_convI2L\n\t" %}
10338 ins_encode %{
10339 Register dst_reg = as_Register($dst$$reg);
10340 Register src1_reg = as_Register($src1$$reg);
10341 Register src2_reg = as_Register($src2$$reg);
10343 __ andr(dst_reg, src1_reg, src2_reg);
10344 %}
10345 ins_pipe( ialu_regL_regL );
10346 %}
10348 instruct andL_Reg_imm_0_65535(mRegL dst, mRegL src1, immL_0_65535 src2) %{
10349 match(Set dst (AndL src1 src2));
10350 ins_cost(60);
10352 format %{ "and $dst, $src1, $src2 #@andL_Reg_imm_0_65535" %}
10353 ins_encode %{
10354 Register dst = $dst$$Register;
10355 Register src = $src1$$Register;
10356 long val = $src2$$constant;
10358 __ andi(dst, src, val);
10359 %}
10360 ins_pipe( ialu_regI_regI );
10361 %}
10363 instruct andL2I_Reg_imm_0_65535(mRegI dst, mRegL src1, immL_0_65535 src2) %{
10364 match(Set dst (ConvL2I (AndL src1 src2)));
10365 ins_cost(60);
10367 format %{ "and $dst, $src1, $src2 #@andL2I_Reg_imm_0_65535" %}
10368 ins_encode %{
10369 Register dst = $dst$$Register;
10370 Register src = $src1$$Register;
10371 long val = $src2$$constant;
10373 __ andi(dst, src, val);
10374 %}
10375 ins_pipe( ialu_regI_regI );
10376 %}
10378 /*
10379 instruct andnL_Reg_nReg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10380 match(Set dst (AndL src1 (XorL src2 M1)));
10381 predicate(UseLoongsonISA);
10383 format %{ "andn $dst, $src1, $src2 #@andnL_Reg_nReg" %}
10384 ins_encode %{
10385 Register dst = $dst$$Register;
10386 Register src1 = $src1$$Register;
10387 Register src2 = $src2$$Register;
10389 __ gsandn(dst, src1, src2);
10390 %}
10391 ins_pipe( ialu_regI_regI );
10392 %}
10393 */
10395 /*
10396 instruct ornL_Reg_nReg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10397 match(Set dst (OrL src1 (XorL src2 M1)));
10398 predicate(UseLoongsonISA);
10400 format %{ "orn $dst, $src1, $src2 #@ornL_Reg_nReg" %}
10401 ins_encode %{
10402 Register dst = $dst$$Register;
10403 Register src1 = $src1$$Register;
10404 Register src2 = $src2$$Register;
10406 __ gsorn(dst, src1, src2);
10407 %}
10408 ins_pipe( ialu_regI_regI );
10409 %}
10410 */
10412 /*
10413 instruct andnL_nReg_Reg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10414 match(Set dst (AndL (XorL src1 M1) src2));
10415 predicate(UseLoongsonISA);
10417 format %{ "andn $dst, $src2, $src1 #@andnL_nReg_Reg" %}
10418 ins_encode %{
10419 Register dst = $dst$$Register;
10420 Register src1 = $src1$$Register;
10421 Register src2 = $src2$$Register;
10423 __ gsandn(dst, src2, src1);
10424 %}
10425 ins_pipe( ialu_regI_regI );
10426 %}
10427 */
10429 /*
10430 instruct ornL_nReg_Reg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10431 match(Set dst (OrL (XorL src1 M1) src2));
10432 predicate(UseLoongsonISA);
10434 format %{ "orn $dst, $src2, $src1 #@ornL_nReg_Reg" %}
10435 ins_encode %{
10436 Register dst = $dst$$Register;
10437 Register src1 = $src1$$Register;
10438 Register src2 = $src2$$Register;
10440 __ gsorn(dst, src2, src1);
10441 %}
10442 ins_pipe( ialu_regI_regI );
10443 %}
10444 */
10446 instruct andL_Reg_immL_M8(mRegL dst, immL_M8 M8) %{
10447 match(Set dst (AndL dst M8));
10448 ins_cost(60);
10450 format %{ "and $dst, $dst, $M8 #@andL_Reg_immL_M8" %}
10451 ins_encode %{
10452 Register dst = $dst$$Register;
10454 __ dins(dst, R0, 0, 3);
10455 %}
10456 ins_pipe( ialu_regI_regI );
10457 %}
10459 instruct andL_Reg_immL_M5(mRegL dst, immL_M5 M5) %{
10460 match(Set dst (AndL dst M5));
10461 ins_cost(60);
10463 format %{ "and $dst, $dst, $M5 #@andL_Reg_immL_M5" %}
10464 ins_encode %{
10465 Register dst = $dst$$Register;
10467 __ dins(dst, R0, 2, 1);
10468 %}
10469 ins_pipe( ialu_regI_regI );
10470 %}
10472 instruct andL_Reg_immL_M7(mRegL dst, immL_M7 M7) %{
10473 match(Set dst (AndL dst M7));
10474 ins_cost(60);
10476 format %{ "and $dst, $dst, $M7 #@andL_Reg_immL_M7" %}
10477 ins_encode %{
10478 Register dst = $dst$$Register;
10480 __ dins(dst, R0, 1, 2);
10481 %}
10482 ins_pipe( ialu_regI_regI );
10483 %}
10485 instruct andL_Reg_immL_M4(mRegL dst, immL_M4 M4) %{
10486 match(Set dst (AndL dst M4));
10487 ins_cost(60);
10489 format %{ "and $dst, $dst, $M4 #@andL_Reg_immL_M4" %}
10490 ins_encode %{
10491 Register dst = $dst$$Register;
10493 __ dins(dst, R0, 0, 2);
10494 %}
10495 ins_pipe( ialu_regI_regI );
10496 %}
10498 instruct andL_Reg_immL_M121(mRegL dst, immL_M121 M121) %{
10499 match(Set dst (AndL dst M121));
10500 ins_cost(60);
10502 format %{ "and $dst, $dst, $M121 #@andL_Reg_immL_M121" %}
10503 ins_encode %{
10504 Register dst = $dst$$Register;
10506 __ dins(dst, R0, 3, 4);
10507 %}
10508 ins_pipe( ialu_regI_regI );
10509 %}
10511 // Or Long Register with Register
10512 instruct orL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10513 match(Set dst (OrL src1 src2));
10514 format %{ "OR $dst, $src1, $src2 @ orL_Reg_Reg\t" %}
10515 ins_encode %{
10516 Register dst_reg = $dst$$Register;
10517 Register src1_reg = $src1$$Register;
10518 Register src2_reg = $src2$$Register;
10520 __ orr(dst_reg, src1_reg, src2_reg);
10521 %}
10522 ins_pipe( ialu_regL_regL );
10523 %}
10525 instruct orL_Reg_P2XReg(mRegL dst, mRegP src1, mRegL src2) %{
10526 match(Set dst (OrL (CastP2X src1) src2));
10527 format %{ "OR $dst, $src1, $src2 @ orL_Reg_P2XReg\t" %}
10528 ins_encode %{
10529 Register dst_reg = $dst$$Register;
10530 Register src1_reg = $src1$$Register;
10531 Register src2_reg = $src2$$Register;
10533 __ orr(dst_reg, src1_reg, src2_reg);
10534 %}
10535 ins_pipe( ialu_regL_regL );
10536 %}
10538 // Xor Long Register with Register
10539 instruct xorL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10540 match(Set dst (XorL src1 src2));
10541 format %{ "XOR $dst, $src1, $src2 @ xorL_Reg_Reg\t" %}
10542 ins_encode %{
10543 Register dst_reg = as_Register($dst$$reg);
10544 Register src1_reg = as_Register($src1$$reg);
10545 Register src2_reg = as_Register($src2$$reg);
10547 __ xorr(dst_reg, src1_reg, src2_reg);
10548 %}
10549 ins_pipe( ialu_regL_regL );
10550 %}
10552 // Shift Left by 8-bit immediate
10553 instruct salI_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
10554 match(Set dst (LShiftI src shift));
10556 format %{ "SHL $dst, $src, $shift #@salI_Reg_imm" %}
10557 ins_encode %{
10558 Register src = $src$$Register;
10559 Register dst = $dst$$Register;
10560 int shamt = $shift$$constant;
10562 __ sll(dst, src, shamt);
10563 %}
10564 ins_pipe( ialu_regI_regI );
10565 %}
10567 instruct salL2I_Reg_imm(mRegI dst, mRegL src, immI8 shift) %{
10568 match(Set dst (LShiftI (ConvL2I src) shift));
10570 format %{ "SHL $dst, $src, $shift #@salL2I_Reg_imm" %}
10571 ins_encode %{
10572 Register src = $src$$Register;
10573 Register dst = $dst$$Register;
10574 int shamt = $shift$$constant;
10576 __ sll(dst, src, shamt);
10577 %}
10578 ins_pipe( ialu_regI_regI );
10579 %}
10581 instruct salI_Reg_imm_and_M65536(mRegI dst, mRegI src, immI_16 shift, immI_M65536 mask) %{
10582 match(Set dst (AndI (LShiftI src shift) mask));
10584 format %{ "SHL $dst, $src, $shift #@salI_Reg_imm_and_M65536" %}
10585 ins_encode %{
10586 Register src = $src$$Register;
10587 Register dst = $dst$$Register;
10589 __ sll(dst, src, 16);
10590 %}
10591 ins_pipe( ialu_regI_regI );
10592 %}
10594 instruct land7_2_s(mRegI dst, mRegL src, immL7 seven, immI_16 sixteen)
10595 %{
10596 match(Set dst (RShiftI (LShiftI (ConvL2I (AndL src seven)) sixteen) sixteen));
10598 format %{ "andi $dst, $src, 7\t# @land7_2_s" %}
10599 ins_encode %{
10600 Register src = $src$$Register;
10601 Register dst = $dst$$Register;
10603 __ andi(dst, src, 7);
10604 %}
10605 ins_pipe(ialu_regI_regI);
10606 %}
10608 instruct ori2s(mRegI dst, mRegI src1, immI_0_32767 src2, immI_16 sixteen)
10609 %{
10610 match(Set dst (RShiftI (LShiftI (OrI src1 src2) sixteen) sixteen));
10612 format %{ "ori $dst, $src1, $src2\t# @ori2s" %}
10613 ins_encode %{
10614 Register src = $src1$$Register;
10615 int val = $src2$$constant;
10616 Register dst = $dst$$Register;
10618 __ ori(dst, src, val);
10619 %}
10620 ins_pipe(ialu_regI_regI);
10621 %}
10623 // Logical Shift Right by 16, followed by Arithmetic Shift Left by 16.
10624 // This idiom is used by the compiler the i2s bytecode.
10625 instruct i2s(mRegI dst, mRegI src, immI_16 sixteen)
10626 %{
10627 match(Set dst (RShiftI (LShiftI src sixteen) sixteen));
10629 format %{ "i2s $dst, $src\t# @i2s" %}
10630 ins_encode %{
10631 Register src = $src$$Register;
10632 Register dst = $dst$$Register;
10634 __ seh(dst, src);
10635 %}
10636 ins_pipe(ialu_regI_regI);
10637 %}
10639 // Logical Shift Right by 24, followed by Arithmetic Shift Left by 24.
10640 // This idiom is used by the compiler for the i2b bytecode.
10641 instruct i2b(mRegI dst, mRegI src, immI_24 twentyfour)
10642 %{
10643 match(Set dst (RShiftI (LShiftI src twentyfour) twentyfour));
10645 format %{ "i2b $dst, $src\t# @i2b" %}
10646 ins_encode %{
10647 Register src = $src$$Register;
10648 Register dst = $dst$$Register;
10650 __ seb(dst, src);
10651 %}
10652 ins_pipe(ialu_regI_regI);
10653 %}
10656 instruct salI_RegL2I_imm(mRegI dst, mRegL src, immI8 shift) %{
10657 match(Set dst (LShiftI (ConvL2I src) shift));
10659 format %{ "SHL $dst, $src, $shift #@salI_RegL2I_imm" %}
10660 ins_encode %{
10661 Register src = $src$$Register;
10662 Register dst = $dst$$Register;
10663 int shamt = $shift$$constant;
10665 __ sll(dst, src, shamt);
10666 %}
10667 ins_pipe( ialu_regI_regI );
10668 %}
10670 // Shift Left by 8-bit immediate
10671 instruct salI_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
10672 match(Set dst (LShiftI src shift));
10674 format %{ "SHL $dst, $src, $shift #@salI_Reg_Reg" %}
10675 ins_encode %{
10676 Register src = $src$$Register;
10677 Register dst = $dst$$Register;
10678 Register shamt = $shift$$Register;
10679 __ sllv(dst, src, shamt);
10680 %}
10681 ins_pipe( ialu_regI_regI );
10682 %}
10685 // Shift Left Long
10686 instruct salL_Reg_imm(mRegL dst, mRegL src, immI8 shift) %{
10687 //predicate(UseNewLongLShift);
10688 match(Set dst (LShiftL src shift));
10689 ins_cost(100);
10690 format %{ "salL $dst, $src, $shift @ salL_Reg_imm" %}
10691 ins_encode %{
10692 Register src_reg = as_Register($src$$reg);
10693 Register dst_reg = as_Register($dst$$reg);
10694 int shamt = $shift$$constant;
10696 if (__ is_simm(shamt, 5))
10697 __ dsll(dst_reg, src_reg, shamt);
10698 else
10699 {
10700 int sa = Assembler::low(shamt, 6);
10701 if (sa < 32) {
10702 __ dsll(dst_reg, src_reg, sa);
10703 } else {
10704 __ dsll32(dst_reg, src_reg, sa - 32);
10705 }
10706 }
10707 %}
10708 ins_pipe( ialu_regL_regL );
10709 %}
10711 instruct salL_RegI2L_imm(mRegL dst, mRegI src, immI8 shift) %{
10712 //predicate(UseNewLongLShift);
10713 match(Set dst (LShiftL (ConvI2L src) shift));
10714 ins_cost(100);
10715 format %{ "salL $dst, $src, $shift @ salL_RegI2L_imm" %}
10716 ins_encode %{
10717 Register src_reg = as_Register($src$$reg);
10718 Register dst_reg = as_Register($dst$$reg);
10719 int shamt = $shift$$constant;
10721 if (__ is_simm(shamt, 5))
10722 __ dsll(dst_reg, src_reg, shamt);
10723 else
10724 {
10725 int sa = Assembler::low(shamt, 6);
10726 if (sa < 32) {
10727 __ dsll(dst_reg, src_reg, sa);
10728 } else {
10729 __ dsll32(dst_reg, src_reg, sa - 32);
10730 }
10731 }
10732 %}
10733 ins_pipe( ialu_regL_regL );
10734 %}
10736 // Shift Left Long
10737 instruct salL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
10738 //predicate(UseNewLongLShift);
10739 match(Set dst (LShiftL src shift));
10740 ins_cost(100);
10741 format %{ "salL $dst, $src, $shift @ salL_Reg_Reg" %}
10742 ins_encode %{
10743 Register src_reg = as_Register($src$$reg);
10744 Register dst_reg = as_Register($dst$$reg);
10746 __ dsllv(dst_reg, src_reg, $shift$$Register);
10747 %}
10748 ins_pipe( ialu_regL_regL );
10749 %}
10751 instruct salL_convI2L_Reg_imm(mRegL dst, mRegI src, immI8 shift) %{
10752 match(Set dst (LShiftL (ConvI2L src) shift));
10753 ins_cost(100);
10754 format %{ "salL $dst, $src, $shift @ salL_convI2L_Reg_imm" %}
10755 ins_encode %{
10756 Register src_reg = as_Register($src$$reg);
10757 Register dst_reg = as_Register($dst$$reg);
10758 int shamt = $shift$$constant;
10760 if (__ is_simm(shamt, 5)) {
10761 __ dsll(dst_reg, src_reg, shamt);
10762 } else {
10763 int sa = Assembler::low(shamt, 6);
10764 if (sa < 32) {
10765 __ dsll(dst_reg, src_reg, sa);
10766 } else {
10767 __ dsll32(dst_reg, src_reg, sa - 32);
10768 }
10769 }
10770 %}
10771 ins_pipe( ialu_regL_regL );
10772 %}
10774 // Shift Right Long
10775 instruct sarL_Reg_imm(mRegL dst, mRegL src, immI8 shift) %{
10776 match(Set dst (RShiftL src shift));
10777 ins_cost(100);
10778 format %{ "sarL $dst, $src, $shift @ sarL_Reg_imm" %}
10779 ins_encode %{
10780 Register src_reg = as_Register($src$$reg);
10781 Register dst_reg = as_Register($dst$$reg);
10782 int shamt = ($shift$$constant & 0x3f);
10783 if (__ is_simm(shamt, 5))
10784 __ dsra(dst_reg, src_reg, shamt);
10785 else {
10786 int sa = Assembler::low(shamt, 6);
10787 if (sa < 32) {
10788 __ dsra(dst_reg, src_reg, sa);
10789 } else {
10790 __ dsra32(dst_reg, src_reg, sa - 32);
10791 }
10792 }
10793 %}
10794 ins_pipe( ialu_regL_regL );
10795 %}
10797 instruct sarL2I_Reg_immI_32_63(mRegI dst, mRegL src, immI_32_63 shift) %{
10798 match(Set dst (ConvL2I (RShiftL src shift)));
10799 ins_cost(100);
10800 format %{ "sarL $dst, $src, $shift @ sarL2I_Reg_immI_32_63" %}
10801 ins_encode %{
10802 Register src_reg = as_Register($src$$reg);
10803 Register dst_reg = as_Register($dst$$reg);
10804 int shamt = $shift$$constant;
10806 __ dsra32(dst_reg, src_reg, shamt - 32);
10807 %}
10808 ins_pipe( ialu_regL_regL );
10809 %}
10811 // Shift Right Long arithmetically
10812 instruct sarL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
10813 //predicate(UseNewLongLShift);
10814 match(Set dst (RShiftL src shift));
10815 ins_cost(100);
10816 format %{ "sarL $dst, $src, $shift @ sarL_Reg_Reg" %}
10817 ins_encode %{
10818 Register src_reg = as_Register($src$$reg);
10819 Register dst_reg = as_Register($dst$$reg);
10821 __ dsrav(dst_reg, src_reg, $shift$$Register);
10822 %}
10823 ins_pipe( ialu_regL_regL );
10824 %}
10826 // Shift Right Long logically
10827 instruct slrL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
10828 match(Set dst (URShiftL src shift));
10829 ins_cost(100);
10830 format %{ "slrL $dst, $src, $shift @ slrL_Reg_Reg" %}
10831 ins_encode %{
10832 Register src_reg = as_Register($src$$reg);
10833 Register dst_reg = as_Register($dst$$reg);
10835 __ dsrlv(dst_reg, src_reg, $shift$$Register);
10836 %}
10837 ins_pipe( ialu_regL_regL );
10838 %}
10840 instruct slrL_Reg_immI_0_31(mRegL dst, mRegL src, immI_0_31 shift) %{
10841 match(Set dst (URShiftL src shift));
10842 ins_cost(80);
10843 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_0_31" %}
10844 ins_encode %{
10845 Register src_reg = as_Register($src$$reg);
10846 Register dst_reg = as_Register($dst$$reg);
10847 int shamt = $shift$$constant;
10849 __ dsrl(dst_reg, src_reg, shamt);
10850 %}
10851 ins_pipe( ialu_regL_regL );
10852 %}
10854 instruct slrL_Reg_immI_0_31_and_max_int(mRegI dst, mRegL src, immI_0_31 shift, immI_MaxI max_int) %{
10855 match(Set dst (AndI (ConvL2I (URShiftL src shift)) max_int));
10856 ins_cost(80);
10857 format %{ "dext $dst, $src, $shift, 31 @ slrL_Reg_immI_0_31_and_max_int" %}
10858 ins_encode %{
10859 Register src_reg = as_Register($src$$reg);
10860 Register dst_reg = as_Register($dst$$reg);
10861 int shamt = $shift$$constant;
10863 __ dext(dst_reg, src_reg, shamt, 31);
10864 %}
10865 ins_pipe( ialu_regL_regL );
10866 %}
10868 instruct slrL_P2XReg_immI_0_31(mRegL dst, mRegP src, immI_0_31 shift) %{
10869 match(Set dst (URShiftL (CastP2X src) shift));
10870 ins_cost(80);
10871 format %{ "slrL $dst, $src, $shift @ slrL_P2XReg_immI_0_31" %}
10872 ins_encode %{
10873 Register src_reg = as_Register($src$$reg);
10874 Register dst_reg = as_Register($dst$$reg);
10875 int shamt = $shift$$constant;
10877 __ dsrl(dst_reg, src_reg, shamt);
10878 %}
10879 ins_pipe( ialu_regL_regL );
10880 %}
10882 instruct slrL_Reg_immI_32_63(mRegL dst, mRegL src, immI_32_63 shift) %{
10883 match(Set dst (URShiftL src shift));
10884 ins_cost(80);
10885 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_32_63" %}
10886 ins_encode %{
10887 Register src_reg = as_Register($src$$reg);
10888 Register dst_reg = as_Register($dst$$reg);
10889 int shamt = $shift$$constant;
10891 __ dsrl32(dst_reg, src_reg, shamt - 32);
10892 %}
10893 ins_pipe( ialu_regL_regL );
10894 %}
10896 instruct slrL_Reg_immI_convL2I(mRegI dst, mRegL src, immI_32_63 shift) %{
10897 match(Set dst (ConvL2I (URShiftL src shift)));
10898 predicate(n->in(1)->in(2)->get_int() > 32);
10899 ins_cost(80);
10900 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_convL2I" %}
10901 ins_encode %{
10902 Register src_reg = as_Register($src$$reg);
10903 Register dst_reg = as_Register($dst$$reg);
10904 int shamt = $shift$$constant;
10906 __ dsrl32(dst_reg, src_reg, shamt - 32);
10907 %}
10908 ins_pipe( ialu_regL_regL );
10909 %}
10911 instruct slrL_P2XReg_immI_32_63(mRegL dst, mRegP src, immI_32_63 shift) %{
10912 match(Set dst (URShiftL (CastP2X src) shift));
10913 ins_cost(80);
10914 format %{ "slrL $dst, $src, $shift @ slrL_P2XReg_immI_32_63" %}
10915 ins_encode %{
10916 Register src_reg = as_Register($src$$reg);
10917 Register dst_reg = as_Register($dst$$reg);
10918 int shamt = $shift$$constant;
10920 __ dsrl32(dst_reg, src_reg, shamt - 32);
10921 %}
10922 ins_pipe( ialu_regL_regL );
10923 %}
10925 // Xor Instructions
10926 // Xor Register with Register
10927 instruct xorI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10928 match(Set dst (XorI src1 src2));
10930 format %{ "XOR $dst, $src1, $src2 #@xorI_Reg_Reg" %}
10932 ins_encode %{
10933 Register dst = $dst$$Register;
10934 Register src1 = $src1$$Register;
10935 Register src2 = $src2$$Register;
10936 __ xorr(dst, src1, src2);
10937 __ sll(dst, dst, 0); /* long -> int */
10938 %}
10940 ins_pipe( ialu_regI_regI );
10941 %}
10943 // Or Instructions
10944 // Or Register with Register
10945 instruct orI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10946 match(Set dst (OrI src1 src2));
10948 format %{ "OR $dst, $src1, $src2 #@orI_Reg_Reg" %}
10949 ins_encode %{
10950 Register dst = $dst$$Register;
10951 Register src1 = $src1$$Register;
10952 Register src2 = $src2$$Register;
10953 __ orr(dst, src1, src2);
10954 %}
10956 ins_pipe( ialu_regI_regI );
10957 %}
10959 instruct rotI_shr_logical_Reg(mRegI dst, mRegI src, immI_0_31 rshift, immI_0_31 lshift, immI_1 one) %{
10960 match(Set dst (OrI (URShiftI src rshift) (LShiftI (AndI src one) lshift)));
10961 predicate(32 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int())));
10963 format %{ "rotr $dst, $src, 1 ...\n\t"
10964 "srl $dst, $dst, ($rshift-1) @ rotI_shr_logical_Reg" %}
10965 ins_encode %{
10966 Register dst = $dst$$Register;
10967 Register src = $src$$Register;
10968 int rshift = $rshift$$constant;
10970 __ rotr(dst, src, 1);
10971 if (rshift - 1) {
10972 __ srl(dst, dst, rshift - 1);
10973 }
10974 %}
10976 ins_pipe( ialu_regI_regI );
10977 %}
10979 instruct orI_Reg_castP2X(mRegL dst, mRegL src1, mRegP src2) %{
10980 match(Set dst (OrI src1 (CastP2X src2)));
10982 format %{ "OR $dst, $src1, $src2 #@orI_Reg_castP2X" %}
10983 ins_encode %{
10984 Register dst = $dst$$Register;
10985 Register src1 = $src1$$Register;
10986 Register src2 = $src2$$Register;
10987 __ orr(dst, src1, src2);
10988 %}
10990 ins_pipe( ialu_regI_regI );
10991 %}
10993 // Logical Shift Right by 8-bit immediate
10994 instruct shr_logical_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
10995 match(Set dst (URShiftI src shift));
10996 // effect(KILL cr);
10998 format %{ "SRL $dst, $src, $shift #@shr_logical_Reg_imm" %}
10999 ins_encode %{
11000 Register src = $src$$Register;
11001 Register dst = $dst$$Register;
11002 int shift = $shift$$constant;
11004 __ srl(dst, src, shift);
11005 %}
11006 ins_pipe( ialu_regI_regI );
11007 %}
11009 instruct shr_logical_Reg_imm_nonneg_mask(mRegI dst, mRegI src, immI_0_31 shift, immI_nonneg_mask mask) %{
11010 match(Set dst (AndI (URShiftI src shift) mask));
11012 format %{ "ext $dst, $src, $shift, one-bits($mask) #@shr_logical_Reg_imm_nonneg_mask" %}
11013 ins_encode %{
11014 Register src = $src$$Register;
11015 Register dst = $dst$$Register;
11016 int pos = $shift$$constant;
11017 int size = Assembler::is_int_mask($mask$$constant);
11019 __ ext(dst, src, pos, size);
11020 %}
11021 ins_pipe( ialu_regI_regI );
11022 %}
11024 instruct rolI_Reg_immI_0_31(mRegI dst, immI_0_31 lshift, immI_0_31 rshift)
11025 %{
11026 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
11027 match(Set dst (OrI (LShiftI dst lshift) (URShiftI dst rshift)));
11029 ins_cost(100);
11030 format %{ "rotr $dst, $dst, $rshift #@rolI_Reg_immI_0_31" %}
11031 ins_encode %{
11032 Register dst = $dst$$Register;
11033 int sa = $rshift$$constant;
11035 __ rotr(dst, dst, sa);
11036 %}
11037 ins_pipe( ialu_regI_regI );
11038 %}
11040 instruct rolL_Reg_immI_0_31(mRegL dst, immI_32_63 lshift, immI_0_31 rshift)
11041 %{
11042 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11043 match(Set dst (OrL (LShiftL dst lshift) (URShiftL dst rshift)));
11045 ins_cost(100);
11046 format %{ "rotr $dst, $dst, $rshift #@rolL_Reg_immI_0_31" %}
11047 ins_encode %{
11048 Register dst = $dst$$Register;
11049 int sa = $rshift$$constant;
11051 __ drotr(dst, dst, sa);
11052 %}
11053 ins_pipe( ialu_regI_regI );
11054 %}
11056 instruct rolL_Reg_immI_32_63(mRegL dst, immI_0_31 lshift, immI_32_63 rshift)
11057 %{
11058 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11059 match(Set dst (OrL (LShiftL dst lshift) (URShiftL dst rshift)));
11061 ins_cost(100);
11062 format %{ "rotr $dst, $dst, $rshift #@rolL_Reg_immI_32_63" %}
11063 ins_encode %{
11064 Register dst = $dst$$Register;
11065 int sa = $rshift$$constant;
11067 __ drotr32(dst, dst, sa - 32);
11068 %}
11069 ins_pipe( ialu_regI_regI );
11070 %}
11072 instruct rorI_Reg_immI_0_31(mRegI dst, immI_0_31 rshift, immI_0_31 lshift)
11073 %{
11074 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
11075 match(Set dst (OrI (URShiftI dst rshift) (LShiftI dst lshift)));
11077 ins_cost(100);
11078 format %{ "rotr $dst, $dst, $rshift #@rorI_Reg_immI_0_31" %}
11079 ins_encode %{
11080 Register dst = $dst$$Register;
11081 int sa = $rshift$$constant;
11083 __ rotr(dst, dst, sa);
11084 %}
11085 ins_pipe( ialu_regI_regI );
11086 %}
11088 instruct rorL_Reg_immI_0_31(mRegL dst, immI_0_31 rshift, immI_32_63 lshift)
11089 %{
11090 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11091 match(Set dst (OrL (URShiftL dst rshift) (LShiftL dst lshift)));
11093 ins_cost(100);
11094 format %{ "rotr $dst, $dst, $rshift #@rorL_Reg_immI_0_31" %}
11095 ins_encode %{
11096 Register dst = $dst$$Register;
11097 int sa = $rshift$$constant;
11099 __ drotr(dst, dst, sa);
11100 %}
11101 ins_pipe( ialu_regI_regI );
11102 %}
11104 instruct rorL_Reg_immI_32_63(mRegL dst, immI_32_63 rshift, immI_0_31 lshift)
11105 %{
11106 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11107 match(Set dst (OrL (URShiftL dst rshift) (LShiftL dst lshift)));
11109 ins_cost(100);
11110 format %{ "rotr $dst, $dst, $rshift #@rorL_Reg_immI_32_63" %}
11111 ins_encode %{
11112 Register dst = $dst$$Register;
11113 int sa = $rshift$$constant;
11115 __ drotr32(dst, dst, sa - 32);
11116 %}
11117 ins_pipe( ialu_regI_regI );
11118 %}
11120 // Logical Shift Right
11121 instruct shr_logical_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
11122 match(Set dst (URShiftI src shift));
11124 format %{ "SRL $dst, $src, $shift #@shr_logical_Reg_Reg" %}
11125 ins_encode %{
11126 Register src = $src$$Register;
11127 Register dst = $dst$$Register;
11128 Register shift = $shift$$Register;
11129 __ srlv(dst, src, shift);
11130 %}
11131 ins_pipe( ialu_regI_regI );
11132 %}
11135 instruct shr_arith_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
11136 match(Set dst (RShiftI src shift));
11137 // effect(KILL cr);
11139 format %{ "SRA $dst, $src, $shift #@shr_arith_Reg_imm" %}
11140 ins_encode %{
11141 Register src = $src$$Register;
11142 Register dst = $dst$$Register;
11143 int shift = $shift$$constant;
11144 __ sra(dst, src, shift);
11145 %}
11146 ins_pipe( ialu_regI_regI );
11147 %}
11149 instruct shr_arith_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
11150 match(Set dst (RShiftI src shift));
11151 // effect(KILL cr);
11153 format %{ "SRA $dst, $src, $shift #@shr_arith_Reg_Reg" %}
11154 ins_encode %{
11155 Register src = $src$$Register;
11156 Register dst = $dst$$Register;
11157 Register shift = $shift$$Register;
11158 __ srav(dst, src, shift);
11159 %}
11160 ins_pipe( ialu_regI_regI );
11161 %}
11163 //----------Convert Int to Boolean---------------------------------------------
11165 instruct convI2B(mRegI dst, mRegI src) %{
11166 match(Set dst (Conv2B src));
11168 ins_cost(100);
11169 format %{ "convI2B $dst, $src @ convI2B" %}
11170 ins_encode %{
11171 Register dst = as_Register($dst$$reg);
11172 Register src = as_Register($src$$reg);
11174 if (dst != src) {
11175 __ daddiu(dst, R0, 1);
11176 __ movz(dst, R0, src);
11177 } else {
11178 __ move(AT, src);
11179 __ daddiu(dst, R0, 1);
11180 __ movz(dst, R0, AT);
11181 }
11182 %}
11184 ins_pipe( ialu_regL_regL );
11185 %}
11187 instruct convI2L_reg( mRegL dst, mRegI src) %{
11188 match(Set dst (ConvI2L src));
11190 ins_cost(100);
11191 format %{ "SLL $dst, $src @ convI2L_reg\t" %}
11192 ins_encode %{
11193 Register dst = as_Register($dst$$reg);
11194 Register src = as_Register($src$$reg);
11196 if(dst != src) __ sll(dst, src, 0);
11197 %}
11198 ins_pipe( ialu_regL_regL );
11199 %}
11202 instruct convL2I_reg( mRegI dst, mRegL src ) %{
11203 match(Set dst (ConvL2I src));
11205 format %{ "MOV $dst, $src @ convL2I_reg" %}
11206 ins_encode %{
11207 Register dst = as_Register($dst$$reg);
11208 Register src = as_Register($src$$reg);
11210 __ sll(dst, src, 0);
11211 %}
11213 ins_pipe( ialu_regI_regI );
11214 %}
11216 instruct convL2I2L_reg( mRegL dst, mRegL src ) %{
11217 match(Set dst (ConvI2L (ConvL2I src)));
11219 format %{ "sll $dst, $src, 0 @ convL2I2L_reg" %}
11220 ins_encode %{
11221 Register dst = as_Register($dst$$reg);
11222 Register src = as_Register($src$$reg);
11224 __ sll(dst, src, 0);
11225 %}
11227 ins_pipe( ialu_regI_regI );
11228 %}
11230 instruct convL2D_reg( regD dst, mRegL src ) %{
11231 match(Set dst (ConvL2D src));
11232 format %{ "convL2D $dst, $src @ convL2D_reg" %}
11233 ins_encode %{
11234 Register src = as_Register($src$$reg);
11235 FloatRegister dst = as_FloatRegister($dst$$reg);
11237 __ dmtc1(src, dst);
11238 __ cvt_d_l(dst, dst);
11239 %}
11241 ins_pipe( pipe_slow );
11242 %}
11244 instruct convD2L_reg_fast( mRegL dst, regD src ) %{
11245 match(Set dst (ConvD2L src));
11246 ins_cost(150);
11247 format %{ "convD2L $dst, $src @ convD2L_reg_fast" %}
11248 ins_encode %{
11249 Register dst = as_Register($dst$$reg);
11250 FloatRegister src = as_FloatRegister($src$$reg);
11252 Label Done;
11254 __ trunc_l_d(F30, src);
11255 // max_long: 0x7fffffffffffffff
11256 // __ set64(AT, 0x7fffffffffffffff);
11257 __ daddiu(AT, R0, -1);
11258 __ dsrl(AT, AT, 1);
11259 __ dmfc1(dst, F30);
11261 __ bne(dst, AT, Done);
11262 __ delayed()->mtc1(R0, F30);
11264 __ cvt_d_w(F30, F30);
11265 __ c_ult_d(src, F30);
11266 __ bc1f(Done);
11267 __ delayed()->daddiu(T9, R0, -1);
11269 __ c_un_d(src, src); //NaN?
11270 __ subu(dst, T9, AT);
11271 __ movt(dst, R0);
11273 __ bind(Done);
11274 %}
11276 ins_pipe( pipe_slow );
11277 %}
11279 instruct convD2L_reg_slow( mRegL dst, regD src ) %{
11280 match(Set dst (ConvD2L src));
11281 ins_cost(250);
11282 format %{ "convD2L $dst, $src @ convD2L_reg_slow" %}
11283 ins_encode %{
11284 Register dst = as_Register($dst$$reg);
11285 FloatRegister src = as_FloatRegister($src$$reg);
11287 Label L;
11289 __ c_un_d(src, src); //NaN?
11290 __ bc1t(L);
11291 __ delayed();
11292 __ move(dst, R0);
11294 __ trunc_l_d(F30, src);
11295 __ cfc1(AT, 31);
11296 __ li(T9, 0x10000);
11297 __ andr(AT, AT, T9);
11298 __ beq(AT, R0, L);
11299 __ delayed()->dmfc1(dst, F30);
11301 __ mov_d(F12, src);
11302 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1);
11303 __ move(dst, V0);
11304 __ bind(L);
11305 %}
11307 ins_pipe( pipe_slow );
11308 %}
11310 instruct convF2I_reg_fast( mRegI dst, regF src ) %{
11311 match(Set dst (ConvF2I src));
11312 ins_cost(150);
11313 format %{ "convf2i $dst, $src @ convF2I_reg_fast" %}
11314 ins_encode %{
11315 Register dreg = $dst$$Register;
11316 FloatRegister fval = $src$$FloatRegister;
11318 __ trunc_w_s(F30, fval);
11319 __ mfc1(dreg, F30);
11320 __ c_un_s(fval, fval); //NaN?
11321 __ movt(dreg, R0);
11322 %}
11324 ins_pipe( pipe_slow );
11325 %}
11327 instruct convF2I_reg_slow( mRegI dst, regF src ) %{
11328 match(Set dst (ConvF2I src));
11329 ins_cost(250);
11330 format %{ "convf2i $dst, $src @ convF2I_reg_slow" %}
11331 ins_encode %{
11332 Register dreg = $dst$$Register;
11333 FloatRegister fval = $src$$FloatRegister;
11334 Label L;
11336 __ c_un_s(fval, fval); //NaN?
11337 __ bc1t(L);
11338 __ delayed();
11339 __ move(dreg, R0);
11341 __ trunc_w_s(F30, fval);
11343 /* Call SharedRuntime:f2i() to do valid convention */
11344 __ cfc1(AT, 31);
11345 __ li(T9, 0x10000);
11346 __ andr(AT, AT, T9);
11347 __ beq(AT, R0, L);
11348 __ delayed()->mfc1(dreg, F30);
11350 __ mov_s(F12, fval);
11352 /* 2014/01/08 Fu : This bug was found when running ezDS's control-panel.
11353 * J 982 C2 javax.swing.text.BoxView.layoutMajorAxis(II[I[I)V (283 bytes) @ 0x000000555c46aa74
11354 *
11355 * An interger array index has been assigned to V0, and then changed from 1 to Integer.MAX_VALUE.
11356 * V0 is corrupted during call_VM_leaf(), and should be preserved.
11357 */
11358 if(dreg != V0) {
11359 __ push(V0);
11360 }
11361 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
11362 if(dreg != V0) {
11363 __ move(dreg, V0);
11364 __ pop(V0);
11365 }
11366 __ bind(L);
11367 %}
11369 ins_pipe( pipe_slow );
11370 %}
11372 instruct convF2L_reg_fast( mRegL dst, regF src ) %{
11373 match(Set dst (ConvF2L src));
11374 ins_cost(150);
11375 format %{ "convf2l $dst, $src @ convF2L_reg_fast" %}
11376 ins_encode %{
11377 Register dreg = $dst$$Register;
11378 FloatRegister fval = $src$$FloatRegister;
11380 __ trunc_l_s(F30, fval);
11381 __ dmfc1(dreg, F30);
11382 __ c_un_s(fval, fval); //NaN?
11383 __ movt(dreg, R0);
11384 %}
11386 ins_pipe( pipe_slow );
11387 %}
11389 instruct convF2L_reg_slow( mRegL dst, regF src ) %{
11390 match(Set dst (ConvF2L src));
11391 ins_cost(250);
11392 format %{ "convf2l $dst, $src @ convF2L_reg_slow" %}
11393 ins_encode %{
11394 Register dst = as_Register($dst$$reg);
11395 FloatRegister fval = $src$$FloatRegister;
11396 Label L;
11398 __ c_un_s(fval, fval); //NaN?
11399 __ bc1t(L);
11400 __ delayed();
11401 __ move(dst, R0);
11403 __ trunc_l_s(F30, fval);
11404 __ cfc1(AT, 31);
11405 __ li(T9, 0x10000);
11406 __ andr(AT, AT, T9);
11407 __ beq(AT, R0, L);
11408 __ delayed()->dmfc1(dst, F30);
11410 __ mov_s(F12, fval);
11411 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
11412 __ move(dst, V0);
11413 __ bind(L);
11414 %}
11416 ins_pipe( pipe_slow );
11417 %}
11419 instruct convL2F_reg( regF dst, mRegL src ) %{
11420 match(Set dst (ConvL2F src));
11421 format %{ "convl2f $dst, $src @ convL2F_reg" %}
11422 ins_encode %{
11423 FloatRegister dst = $dst$$FloatRegister;
11424 Register src = as_Register($src$$reg);
11425 Label L;
11427 __ dmtc1(src, dst);
11428 __ cvt_s_l(dst, dst);
11429 %}
11431 ins_pipe( pipe_slow );
11432 %}
11434 instruct convI2F_reg( regF dst, mRegI src ) %{
11435 match(Set dst (ConvI2F src));
11436 format %{ "convi2f $dst, $src @ convI2F_reg" %}
11437 ins_encode %{
11438 Register src = $src$$Register;
11439 FloatRegister dst = $dst$$FloatRegister;
11441 __ mtc1(src, dst);
11442 __ cvt_s_w(dst, dst);
11443 %}
11445 ins_pipe( fpu_regF_regF );
11446 %}
11448 instruct cmpLTMask_immI0( mRegI dst, mRegI p, immI0 zero ) %{
11449 match(Set dst (CmpLTMask p zero));
11450 ins_cost(100);
11452 format %{ "sra $dst, $p, 31 @ cmpLTMask_immI0" %}
11453 ins_encode %{
11454 Register src = $p$$Register;
11455 Register dst = $dst$$Register;
11457 __ sra(dst, src, 31);
11458 %}
11459 ins_pipe( pipe_slow );
11460 %}
11463 instruct cmpLTMask( mRegI dst, mRegI p, mRegI q ) %{
11464 match(Set dst (CmpLTMask p q));
11465 ins_cost(400);
11467 format %{ "cmpLTMask $dst, $p, $q @ cmpLTMask" %}
11468 ins_encode %{
11469 Register p = $p$$Register;
11470 Register q = $q$$Register;
11471 Register dst = $dst$$Register;
11473 __ slt(dst, p, q);
11474 __ subu(dst, R0, dst);
11475 %}
11476 ins_pipe( pipe_slow );
11477 %}
11479 instruct convP2B(mRegI dst, mRegP src) %{
11480 match(Set dst (Conv2B src));
11482 ins_cost(100);
11483 format %{ "convP2B $dst, $src @ convP2B" %}
11484 ins_encode %{
11485 Register dst = as_Register($dst$$reg);
11486 Register src = as_Register($src$$reg);
11488 if (dst != src) {
11489 __ daddiu(dst, R0, 1);
11490 __ movz(dst, R0, src);
11491 } else {
11492 __ move(AT, src);
11493 __ daddiu(dst, R0, 1);
11494 __ movz(dst, R0, AT);
11495 }
11496 %}
11498 ins_pipe( ialu_regL_regL );
11499 %}
11502 instruct convI2D_reg_reg(regD dst, mRegI src) %{
11503 match(Set dst (ConvI2D src));
11504 format %{ "conI2D $dst, $src @convI2D_reg" %}
11505 ins_encode %{
11506 Register src = $src$$Register;
11507 FloatRegister dst = $dst$$FloatRegister;
11508 __ mtc1(src, dst);
11509 __ cvt_d_w(dst, dst);
11510 %}
11511 ins_pipe( fpu_regF_regF );
11512 %}
11514 instruct convF2D_reg_reg(regD dst, regF src) %{
11515 match(Set dst (ConvF2D src));
11516 format %{ "convF2D $dst, $src\t# @convF2D_reg_reg" %}
11517 ins_encode %{
11518 FloatRegister dst = $dst$$FloatRegister;
11519 FloatRegister src = $src$$FloatRegister;
11521 __ cvt_d_s(dst, src);
11522 %}
11523 ins_pipe( fpu_regF_regF );
11524 %}
11526 instruct convD2F_reg_reg(regF dst, regD src) %{
11527 match(Set dst (ConvD2F src));
11528 format %{ "convD2F $dst, $src\t# @convD2F_reg_reg" %}
11529 ins_encode %{
11530 FloatRegister dst = $dst$$FloatRegister;
11531 FloatRegister src = $src$$FloatRegister;
11533 __ cvt_s_d(dst, src);
11534 %}
11535 ins_pipe( fpu_regF_regF );
11536 %}
11538 // Convert a double to an int. If the double is a NAN, stuff a zero in instead.
11539 instruct convD2I_reg_reg_fast( mRegI dst, regD src ) %{
11540 match(Set dst (ConvD2I src));
11542 ins_cost(150);
11543 format %{ "convD2I $dst, $src\t# @ convD2I_reg_reg_fast" %}
11545 ins_encode %{
11546 FloatRegister src = $src$$FloatRegister;
11547 Register dst = $dst$$Register;
11549 Label Done;
11551 __ trunc_w_d(F30, src);
11552 // max_int: 2147483647
11553 __ move(AT, 0x7fffffff);
11554 __ mfc1(dst, F30);
11556 __ bne(dst, AT, Done);
11557 __ delayed()->mtc1(R0, F30);
11559 __ cvt_d_w(F30, F30);
11560 __ c_ult_d(src, F30);
11561 __ bc1f(Done);
11562 __ delayed()->addiu(T9, R0, -1);
11564 __ c_un_d(src, src); //NaN?
11565 __ subu32(dst, T9, AT);
11566 __ movt(dst, R0);
11568 __ bind(Done);
11569 %}
11570 ins_pipe( pipe_slow );
11571 %}
11573 instruct convD2I_reg_reg_slow( mRegI dst, regD src ) %{
11574 match(Set dst (ConvD2I src));
11576 ins_cost(250);
11577 format %{ "convD2I $dst, $src\t# @ convD2I_reg_reg_slow" %}
11579 ins_encode %{
11580 FloatRegister src = $src$$FloatRegister;
11581 Register dst = $dst$$Register;
11582 Label L;
11584 __ trunc_w_d(F30, src);
11585 __ cfc1(AT, 31);
11586 __ li(T9, 0x10000);
11587 __ andr(AT, AT, T9);
11588 __ beq(AT, R0, L);
11589 __ delayed()->mfc1(dst, F30);
11591 __ mov_d(F12, src);
11592 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 1);
11593 __ move(dst, V0);
11594 __ bind(L);
11596 %}
11597 ins_pipe( pipe_slow );
11598 %}
11600 // Convert oop pointer into compressed form
11601 instruct encodeHeapOop(mRegN dst, mRegP src) %{
11602 predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
11603 match(Set dst (EncodeP src));
11604 format %{ "encode_heap_oop $dst,$src" %}
11605 ins_encode %{
11606 Register src = $src$$Register;
11607 Register dst = $dst$$Register;
11608 if (src != dst) {
11609 __ move(dst, src);
11610 }
11611 __ encode_heap_oop(dst);
11612 %}
11613 ins_pipe( ialu_regL_regL );
11614 %}
11616 instruct encodeHeapOop_not_null(mRegN dst, mRegP src) %{
11617 predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
11618 match(Set dst (EncodeP src));
11619 format %{ "encode_heap_oop_not_null $dst,$src @ encodeHeapOop_not_null" %}
11620 ins_encode %{
11621 __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
11622 %}
11623 ins_pipe( ialu_regL_regL );
11624 %}
11626 instruct decodeHeapOop(mRegP dst, mRegN src) %{
11627 predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
11628 n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
11629 match(Set dst (DecodeN src));
11630 format %{ "decode_heap_oop $dst,$src @ decodeHeapOop" %}
11631 ins_encode %{
11632 Register s = $src$$Register;
11633 Register d = $dst$$Register;
11634 if (s != d) {
11635 __ move(d, s);
11636 }
11637 __ decode_heap_oop(d);
11638 %}
11639 ins_pipe( ialu_regL_regL );
11640 %}
11642 instruct decodeHeapOop_not_null(mRegP dst, mRegN src) %{
11643 predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
11644 n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
11645 match(Set dst (DecodeN src));
11646 format %{ "decode_heap_oop_not_null $dst,$src @ decodeHeapOop_not_null" %}
11647 ins_encode %{
11648 Register s = $src$$Register;
11649 Register d = $dst$$Register;
11650 if (s != d) {
11651 __ decode_heap_oop_not_null(d, s);
11652 } else {
11653 __ decode_heap_oop_not_null(d);
11654 }
11655 %}
11656 ins_pipe( ialu_regL_regL );
11657 %}
11659 instruct encodeKlass_not_null(mRegN dst, mRegP src) %{
11660 match(Set dst (EncodePKlass src));
11661 format %{ "encode_heap_oop_not_null $dst,$src @ encodeKlass_not_null" %}
11662 ins_encode %{
11663 __ encode_klass_not_null($dst$$Register, $src$$Register);
11664 %}
11665 ins_pipe( ialu_regL_regL );
11666 %}
11668 instruct decodeKlass_not_null(mRegP dst, mRegN src) %{
11669 match(Set dst (DecodeNKlass src));
11670 format %{ "decode_heap_klass_not_null $dst,$src" %}
11671 ins_encode %{
11672 Register s = $src$$Register;
11673 Register d = $dst$$Register;
11674 if (s != d) {
11675 __ decode_klass_not_null(d, s);
11676 } else {
11677 __ decode_klass_not_null(d);
11678 }
11679 %}
11680 ins_pipe( ialu_regL_regL );
11681 %}
11683 //FIXME
11684 instruct tlsLoadP(mRegP dst) %{
11685 match(Set dst (ThreadLocal));
11687 ins_cost(0);
11688 format %{ " get_thread in $dst #@tlsLoadP" %}
11689 ins_encode %{
11690 Register dst = $dst$$Register;
11691 #ifdef OPT_THREAD
11692 __ move(dst, TREG);
11693 #else
11694 __ get_thread(dst);
11695 #endif
11696 %}
11698 ins_pipe( ialu_loadI );
11699 %}
11702 instruct checkCastPP( mRegP dst ) %{
11703 match(Set dst (CheckCastPP dst));
11705 format %{ "#checkcastPP of $dst (empty encoding) #@chekCastPP" %}
11706 ins_encode( /*empty encoding*/ );
11707 ins_pipe( empty );
11708 %}
11710 instruct castPP(mRegP dst)
11711 %{
11712 match(Set dst (CastPP dst));
11714 size(0);
11715 format %{ "# castPP of $dst" %}
11716 ins_encode(/* empty encoding */);
11717 ins_pipe(empty);
11718 %}
11720 instruct castII( mRegI dst ) %{
11721 match(Set dst (CastII dst));
11722 format %{ "#castII of $dst empty encoding" %}
11723 ins_encode( /*empty encoding*/ );
11724 ins_cost(0);
11725 ins_pipe( empty );
11726 %}
11728 // Return Instruction
11729 // Remove the return address & jump to it.
11730 instruct Ret() %{
11731 match(Return);
11732 format %{ "RET #@Ret" %}
11734 ins_encode %{
11735 __ jr(RA);
11736 __ nop();
11737 %}
11739 ins_pipe( pipe_jump );
11740 %}
11742 /*
11743 // For Loongson CPUs, jr seems too slow, so this rule shouldn't be imported.
11744 instruct jumpXtnd(mRegL switch_val) %{
11745 match(Jump switch_val);
11747 ins_cost(350);
11749 format %{ "load T9 <-- [$constanttablebase, $switch_val, $constantoffset] @ jumpXtnd\n\t"
11750 "jr T9\n\t"
11751 "nop" %}
11752 ins_encode %{
11753 Register table_base = $constanttablebase;
11754 int con_offset = $constantoffset;
11755 Register switch_reg = $switch_val$$Register;
11757 if (UseLoongsonISA) {
11758 if (Assembler::is_simm(con_offset, 8)) {
11759 __ gsldx(T9, table_base, switch_reg, con_offset);
11760 } else if (Assembler::is_simm16(con_offset)) {
11761 __ daddu(T9, table_base, switch_reg);
11762 __ ld(T9, T9, con_offset);
11763 } else {
11764 __ move(T9, con_offset);
11765 __ daddu(AT, table_base, switch_reg);
11766 __ gsldx(T9, AT, T9, 0);
11767 }
11768 } else {
11769 if (Assembler::is_simm16(con_offset)) {
11770 __ daddu(T9, table_base, switch_reg);
11771 __ ld(T9, T9, con_offset);
11772 } else {
11773 __ move(T9, con_offset);
11774 __ daddu(AT, table_base, switch_reg);
11775 __ daddu(AT, T9, AT);
11776 __ ld(T9, AT, 0);
11777 }
11778 }
11780 __ jr(T9);
11781 __ nop();
11783 %}
11784 ins_pipe(pipe_jump);
11785 %}
11786 */
11788 // Jump Direct - Label defines a relative address from JMP
11789 instruct jmpDir(label labl) %{
11790 match(Goto);
11791 effect(USE labl);
11793 ins_cost(300);
11794 format %{ "JMP $labl #@jmpDir" %}
11796 ins_encode %{
11797 Label &L = *($labl$$label);
11798 if(&L)
11799 __ b(L);
11800 else
11801 __ b(int(0));
11802 __ nop();
11803 %}
11805 ins_pipe( pipe_jump );
11806 ins_pc_relative(1);
11807 %}
11811 // Tail Jump; remove the return address; jump to target.
11812 // TailCall above leaves the return address around.
11813 // TailJump is used in only one place, the rethrow_Java stub (fancy_jump=2).
11814 // ex_oop (Exception Oop) is needed in %o0 at the jump. As there would be a
11815 // "restore" before this instruction (in Epilogue), we need to materialize it
11816 // in %i0.
11817 //FIXME
11818 instruct tailjmpInd(mRegP jump_target,mRegP ex_oop) %{
11819 match( TailJump jump_target ex_oop );
11820 ins_cost(200);
11821 format %{ "Jmp $jump_target ; ex_oop = $ex_oop #@tailjmpInd" %}
11822 ins_encode %{
11823 Register target = $jump_target$$Register;
11825 /* 2012/9/14 Jin: V0, V1 are indicated in:
11826 * [stubGenerator_mips.cpp] generate_forward_exception()
11827 * [runtime_mips.cpp] OptoRuntime::generate_exception_blob()
11828 */
11829 Register oop = $ex_oop$$Register;
11830 Register exception_oop = V0;
11831 Register exception_pc = V1;
11833 __ move(exception_pc, RA);
11834 __ move(exception_oop, oop);
11836 __ jr(target);
11837 __ nop();
11838 %}
11839 ins_pipe( pipe_jump );
11840 %}
11842 // ============================================================================
11843 // Procedure Call/Return Instructions
11844 // Call Java Static Instruction
11845 // Note: If this code changes, the corresponding ret_addr_offset() and
11846 // compute_padding() functions will have to be adjusted.
11847 instruct CallStaticJavaDirect(method meth) %{
11848 match(CallStaticJava);
11849 effect(USE meth);
11851 ins_cost(300);
11852 format %{ "CALL,static #@CallStaticJavaDirect " %}
11853 ins_encode( Java_Static_Call( meth ) );
11854 ins_pipe( pipe_slow );
11855 ins_pc_relative(1);
11856 ins_alignment(16);
11857 %}
11859 // Call Java Dynamic Instruction
11860 // Note: If this code changes, the corresponding ret_addr_offset() and
11861 // compute_padding() functions will have to be adjusted.
11862 instruct CallDynamicJavaDirect(method meth) %{
11863 match(CallDynamicJava);
11864 effect(USE meth);
11866 ins_cost(300);
11867 format %{"MOV IC_Klass, (oop)-1 @ CallDynamicJavaDirect\n\t"
11868 "CallDynamic @ CallDynamicJavaDirect" %}
11869 ins_encode( Java_Dynamic_Call( meth ) );
11870 ins_pipe( pipe_slow );
11871 ins_pc_relative(1);
11872 ins_alignment(16);
11873 %}
11875 instruct CallLeafNoFPDirect(method meth) %{
11876 match(CallLeafNoFP);
11877 effect(USE meth);
11879 ins_cost(300);
11880 format %{ "CALL_LEAF_NOFP,runtime " %}
11881 ins_encode(Java_To_Runtime(meth));
11882 ins_pipe( pipe_slow );
11883 ins_pc_relative(1);
11884 ins_alignment(16);
11885 %}
11887 // Prefetch instructions.
11889 instruct prefetchrNTA( memory mem ) %{
11890 match(PrefetchRead mem);
11891 ins_cost(125);
11893 format %{ "pref $mem\t# Prefetch into non-temporal cache for read @ prefetchrNTA" %}
11894 ins_encode %{
11895 int base = $mem$$base;
11896 int index = $mem$$index;
11897 int scale = $mem$$scale;
11898 int disp = $mem$$disp;
11900 if( index != 0 ) {
11901 if (scale == 0) {
11902 __ daddu(AT, as_Register(base), as_Register(index));
11903 } else {
11904 __ dsll(AT, as_Register(index), scale);
11905 __ daddu(AT, as_Register(base), AT);
11906 }
11907 } else {
11908 __ move(AT, as_Register(base));
11909 }
11910 if( Assembler::is_simm16(disp) ) {
11911 __ daddiu(AT, as_Register(base), disp);
11912 __ daddiu(AT, AT, disp);
11913 } else {
11914 __ move(T9, disp);
11915 __ daddu(AT, as_Register(base), T9);
11916 }
11917 __ pref(0, AT, 0); //hint: 0:load
11918 %}
11919 ins_pipe(pipe_slow);
11920 %}
11922 instruct prefetchwNTA( memory mem ) %{
11923 match(PrefetchWrite mem);
11924 ins_cost(125);
11925 format %{ "pref $mem\t# Prefetch to non-temporal cache for write @ prefetchwNTA" %}
11926 ins_encode %{
11927 int base = $mem$$base;
11928 int index = $mem$$index;
11929 int scale = $mem$$scale;
11930 int disp = $mem$$disp;
11932 if( index != 0 ) {
11933 if (scale == 0) {
11934 __ daddu(AT, as_Register(base), as_Register(index));
11935 } else {
11936 __ dsll(AT, as_Register(index), scale);
11937 __ daddu(AT, as_Register(base), AT);
11938 }
11939 } else {
11940 __ move(AT, as_Register(base));
11941 }
11942 if( Assembler::is_simm16(disp) ) {
11943 __ daddiu(AT, as_Register(base), disp);
11944 __ daddiu(AT, AT, disp);
11945 } else {
11946 __ move(T9, disp);
11947 __ daddu(AT, as_Register(base), T9);
11948 }
11949 __ pref(1, AT, 0); //hint: 1:store
11950 %}
11951 ins_pipe(pipe_slow);
11952 %}
11954 // Prefetch instructions for allocation.
11956 instruct prefetchAllocNTA( memory mem ) %{
11957 match(PrefetchAllocation mem);
11958 ins_cost(125);
11959 format %{ "pref $mem\t# Prefetch allocation @ prefetchAllocNTA" %}
11960 ins_encode %{
11961 int base = $mem$$base;
11962 int index = $mem$$index;
11963 int scale = $mem$$scale;
11964 int disp = $mem$$disp;
11966 Register dst = R0;
11968 if( index != 0 ) {
11969 if( Assembler::is_simm16(disp) ) {
11970 if( UseLoongsonISA ) {
11971 if (scale == 0) {
11972 __ gslbx(dst, as_Register(base), as_Register(index), disp);
11973 } else {
11974 __ dsll(AT, as_Register(index), scale);
11975 __ gslbx(dst, as_Register(base), AT, disp);
11976 }
11977 } else {
11978 if (scale == 0) {
11979 __ addu(AT, as_Register(base), as_Register(index));
11980 } else {
11981 __ dsll(AT, as_Register(index), scale);
11982 __ addu(AT, as_Register(base), AT);
11983 }
11984 __ lb(dst, AT, disp);
11985 }
11986 } else {
11987 if (scale == 0) {
11988 __ addu(AT, as_Register(base), as_Register(index));
11989 } else {
11990 __ dsll(AT, as_Register(index), scale);
11991 __ addu(AT, as_Register(base), AT);
11992 }
11993 __ move(T9, disp);
11994 if( UseLoongsonISA ) {
11995 __ gslbx(dst, AT, T9, 0);
11996 } else {
11997 __ addu(AT, AT, T9);
11998 __ lb(dst, AT, 0);
11999 }
12000 }
12001 } else {
12002 if( Assembler::is_simm16(disp) ) {
12003 __ lb(dst, as_Register(base), disp);
12004 } else {
12005 __ move(T9, disp);
12006 if( UseLoongsonISA ) {
12007 __ gslbx(dst, as_Register(base), T9, 0);
12008 } else {
12009 __ addu(AT, as_Register(base), T9);
12010 __ lb(dst, AT, 0);
12011 }
12012 }
12013 }
12014 %}
12015 ins_pipe(pipe_slow);
12016 %}
12019 // Call runtime without safepoint
12020 instruct CallLeafDirect(method meth) %{
12021 match(CallLeaf);
12022 effect(USE meth);
12024 ins_cost(300);
12025 format %{ "CALL_LEAF,runtime #@CallLeafDirect " %}
12026 ins_encode(Java_To_Runtime(meth));
12027 ins_pipe( pipe_slow );
12028 ins_pc_relative(1);
12029 ins_alignment(16);
12030 %}
12032 // Load Char (16bit unsigned)
12033 instruct loadUS(mRegI dst, memory mem) %{
12034 match(Set dst (LoadUS mem));
12036 ins_cost(125);
12037 format %{ "loadUS $dst,$mem @ loadC" %}
12038 ins_encode(load_C_enc(dst, mem));
12039 ins_pipe( ialu_loadI );
12040 %}
12042 instruct loadUS_convI2L(mRegL dst, memory mem) %{
12043 match(Set dst (ConvI2L (LoadUS mem)));
12045 ins_cost(125);
12046 format %{ "loadUS $dst,$mem @ loadUS_convI2L" %}
12047 ins_encode(load_C_enc(dst, mem));
12048 ins_pipe( ialu_loadI );
12049 %}
12051 // Store Char (16bit unsigned)
12052 instruct storeC(memory mem, mRegI src) %{
12053 match(Set mem (StoreC mem src));
12055 ins_cost(125);
12056 format %{ "storeC $src, $mem @ storeC" %}
12057 ins_encode(store_C_reg_enc(mem, src));
12058 ins_pipe( ialu_loadI );
12059 %}
12061 instruct storeC0(memory mem, immI0 zero) %{
12062 match(Set mem (StoreC mem zero));
12064 ins_cost(125);
12065 format %{ "storeC $zero, $mem @ storeC0" %}
12066 ins_encode(store_C0_enc(mem));
12067 ins_pipe( ialu_loadI );
12068 %}
12071 instruct loadConF0(regF dst, immF0 zero) %{
12072 match(Set dst zero);
12073 ins_cost(100);
12075 format %{ "mov $dst, zero @ loadConF0\n"%}
12076 ins_encode %{
12077 FloatRegister dst = $dst$$FloatRegister;
12079 __ mtc1(R0, dst);
12080 %}
12081 ins_pipe( fpu_loadF );
12082 %}
12085 instruct loadConF(regF dst, immF src) %{
12086 match(Set dst src);
12087 ins_cost(125);
12089 format %{ "lwc1 $dst, $constantoffset[$constanttablebase] # load FLOAT $src from table @ loadConF" %}
12090 ins_encode %{
12091 int con_offset = $constantoffset($src);
12093 if (Assembler::is_simm16(con_offset)) {
12094 __ lwc1($dst$$FloatRegister, $constanttablebase, con_offset);
12095 } else {
12096 __ set64(AT, con_offset);
12097 if (UseLoongsonISA) {
12098 __ gslwxc1($dst$$FloatRegister, $constanttablebase, AT, 0);
12099 } else {
12100 __ daddu(AT, $constanttablebase, AT);
12101 __ lwc1($dst$$FloatRegister, AT, 0);
12102 }
12103 }
12104 %}
12105 ins_pipe( fpu_loadF );
12106 %}
12109 instruct loadConD0(regD dst, immD0 zero) %{
12110 match(Set dst zero);
12111 ins_cost(100);
12113 format %{ "mov $dst, zero @ loadConD0"%}
12114 ins_encode %{
12115 FloatRegister dst = as_FloatRegister($dst$$reg);
12117 __ dmtc1(R0, dst);
12118 %}
12119 ins_pipe( fpu_loadF );
12120 %}
12122 instruct loadConD(regD dst, immD src) %{
12123 match(Set dst src);
12124 ins_cost(125);
12126 format %{ "ldc1 $dst, $constantoffset[$constanttablebase] # load DOUBLE $src from table @ loadConD" %}
12127 ins_encode %{
12128 int con_offset = $constantoffset($src);
12130 if (Assembler::is_simm16(con_offset)) {
12131 __ ldc1($dst$$FloatRegister, $constanttablebase, con_offset);
12132 } else {
12133 __ set64(AT, con_offset);
12134 if (UseLoongsonISA) {
12135 __ gsldxc1($dst$$FloatRegister, $constanttablebase, AT, 0);
12136 } else {
12137 __ daddu(AT, $constanttablebase, AT);
12138 __ ldc1($dst$$FloatRegister, AT, 0);
12139 }
12140 }
12141 %}
12142 ins_pipe( fpu_loadF );
12143 %}
12145 // Store register Float value (it is faster than store from FPU register)
12146 instruct storeF_reg( memory mem, regF src) %{
12147 match(Set mem (StoreF mem src));
12149 ins_cost(50);
12150 format %{ "store $mem, $src\t# store float @ storeF_reg" %}
12151 ins_encode(store_F_reg_enc(mem, src));
12152 ins_pipe( fpu_storeF );
12153 %}
12155 instruct storeF_imm0( memory mem, immF0 zero) %{
12156 match(Set mem (StoreF mem zero));
12158 ins_cost(40);
12159 format %{ "store $mem, zero\t# store float @ storeF_imm0" %}
12160 ins_encode %{
12161 int base = $mem$$base;
12162 int index = $mem$$index;
12163 int scale = $mem$$scale;
12164 int disp = $mem$$disp;
12166 if( index != 0 ) {
12167 if(scale != 0) {
12168 __ dsll(T9, as_Register(index), scale);
12169 __ addu(AT, as_Register(base), T9);
12170 } else {
12171 __ daddu(AT, as_Register(base), as_Register(index));
12172 }
12173 if( Assembler::is_simm16(disp) ) {
12174 __ sw(R0, AT, disp);
12175 } else {
12176 __ move(T9, disp);
12177 __ addu(AT, AT, T9);
12178 __ sw(R0, AT, 0);
12179 }
12181 } else {
12182 if( Assembler::is_simm16(disp) ) {
12183 __ sw(R0, as_Register(base), disp);
12184 } else {
12185 __ move(T9, disp);
12186 __ addu(AT, as_Register(base), T9);
12187 __ sw(R0, AT, 0);
12188 }
12189 }
12190 %}
12191 ins_pipe( ialu_storeI );
12192 %}
12194 // Load Double
12195 instruct loadD(regD dst, memory mem) %{
12196 match(Set dst (LoadD mem));
12198 ins_cost(150);
12199 format %{ "loadD $dst, $mem #@loadD" %}
12200 ins_encode(load_D_enc(dst, mem));
12201 ins_pipe( ialu_loadI );
12202 %}
12204 // Load Double - UNaligned
12205 instruct loadD_unaligned(regD dst, memory mem ) %{
12206 match(Set dst (LoadD_unaligned mem));
12207 ins_cost(250);
12208 // FIXME: Jin: Need more effective ldl/ldr
12209 format %{ "loadD_unaligned $dst, $mem #@loadD_unaligned" %}
12210 ins_encode(load_D_enc(dst, mem));
12211 ins_pipe( ialu_loadI );
12212 %}
12214 instruct storeD_reg( memory mem, regD src) %{
12215 match(Set mem (StoreD mem src));
12217 ins_cost(50);
12218 format %{ "store $mem, $src\t# store float @ storeD_reg" %}
12219 ins_encode(store_D_reg_enc(mem, src));
12220 ins_pipe( fpu_storeF );
12221 %}
12223 instruct storeD_imm0( memory mem, immD0 zero) %{
12224 match(Set mem (StoreD mem zero));
12226 ins_cost(40);
12227 format %{ "store $mem, zero\t# store float @ storeD_imm0" %}
12228 ins_encode %{
12229 int base = $mem$$base;
12230 int index = $mem$$index;
12231 int scale = $mem$$scale;
12232 int disp = $mem$$disp;
12234 __ mtc1(R0, F30);
12235 __ cvt_d_w(F30, F30);
12237 if( index != 0 ) {
12238 if(scale != 0) {
12239 __ dsll(T9, as_Register(index), scale);
12240 __ addu(AT, as_Register(base), T9);
12241 } else {
12242 __ daddu(AT, as_Register(base), as_Register(index));
12243 }
12244 if( Assembler::is_simm16(disp) ) {
12245 __ sdc1(F30, AT, disp);
12246 } else {
12247 __ move(T9, disp);
12248 __ addu(AT, AT, T9);
12249 __ sdc1(F30, AT, 0);
12250 }
12252 } else {
12253 if( Assembler::is_simm16(disp) ) {
12254 __ sdc1(F30, as_Register(base), disp);
12255 } else {
12256 __ move(T9, disp);
12257 __ addu(AT, as_Register(base), T9);
12258 __ sdc1(F30, AT, 0);
12259 }
12260 }
12261 %}
12262 ins_pipe( ialu_storeI );
12263 %}
12265 instruct loadSSI(mRegI dst, stackSlotI src)
12266 %{
12267 match(Set dst src);
12269 ins_cost(125);
12270 format %{ "lw $dst, $src\t# int stk @ loadSSI" %}
12271 ins_encode %{
12272 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSI) !");
12273 __ lw($dst$$Register, SP, $src$$disp);
12274 %}
12275 ins_pipe(ialu_loadI);
12276 %}
12278 instruct storeSSI(stackSlotI dst, mRegI src)
12279 %{
12280 match(Set dst src);
12282 ins_cost(100);
12283 format %{ "sw $dst, $src\t# int stk @ storeSSI" %}
12284 ins_encode %{
12285 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSI) !");
12286 __ sw($src$$Register, SP, $dst$$disp);
12287 %}
12288 ins_pipe(ialu_storeI);
12289 %}
12291 instruct loadSSL(mRegL dst, stackSlotL src)
12292 %{
12293 match(Set dst src);
12295 ins_cost(125);
12296 format %{ "ld $dst, $src\t# long stk @ loadSSL" %}
12297 ins_encode %{
12298 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSL) !");
12299 __ ld($dst$$Register, SP, $src$$disp);
12300 %}
12301 ins_pipe(ialu_loadI);
12302 %}
12304 instruct storeSSL(stackSlotL dst, mRegL src)
12305 %{
12306 match(Set dst src);
12308 ins_cost(100);
12309 format %{ "sd $dst, $src\t# long stk @ storeSSL" %}
12310 ins_encode %{
12311 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSL) !");
12312 __ sd($src$$Register, SP, $dst$$disp);
12313 %}
12314 ins_pipe(ialu_storeI);
12315 %}
12317 instruct loadSSP(mRegP dst, stackSlotP src)
12318 %{
12319 match(Set dst src);
12321 ins_cost(125);
12322 format %{ "ld $dst, $src\t# ptr stk @ loadSSP" %}
12323 ins_encode %{
12324 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSP) !");
12325 __ ld($dst$$Register, SP, $src$$disp);
12326 %}
12327 ins_pipe(ialu_loadI);
12328 %}
12330 instruct storeSSP(stackSlotP dst, mRegP src)
12331 %{
12332 match(Set dst src);
12334 ins_cost(100);
12335 format %{ "sd $dst, $src\t# ptr stk @ storeSSP" %}
12336 ins_encode %{
12337 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSP) !");
12338 __ sd($src$$Register, SP, $dst$$disp);
12339 %}
12340 ins_pipe(ialu_storeI);
12341 %}
12343 instruct loadSSF(regF dst, stackSlotF src)
12344 %{
12345 match(Set dst src);
12347 ins_cost(125);
12348 format %{ "lwc1 $dst, $src\t# float stk @ loadSSF" %}
12349 ins_encode %{
12350 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSF) !");
12351 __ lwc1($dst$$FloatRegister, SP, $src$$disp);
12352 %}
12353 ins_pipe(ialu_loadI);
12354 %}
12356 instruct storeSSF(stackSlotF dst, regF src)
12357 %{
12358 match(Set dst src);
12360 ins_cost(100);
12361 format %{ "swc1 $dst, $src\t# float stk @ storeSSF" %}
12362 ins_encode %{
12363 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSF) !");
12364 __ swc1($src$$FloatRegister, SP, $dst$$disp);
12365 %}
12366 ins_pipe(fpu_storeF);
12367 %}
12369 // Use the same format since predicate() can not be used here.
12370 instruct loadSSD(regD dst, stackSlotD src)
12371 %{
12372 match(Set dst src);
12374 ins_cost(125);
12375 format %{ "ldc1 $dst, $src\t# double stk @ loadSSD" %}
12376 ins_encode %{
12377 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSD) !");
12378 __ ldc1($dst$$FloatRegister, SP, $src$$disp);
12379 %}
12380 ins_pipe(ialu_loadI);
12381 %}
12383 instruct storeSSD(stackSlotD dst, regD src)
12384 %{
12385 match(Set dst src);
12387 ins_cost(100);
12388 format %{ "sdc1 $dst, $src\t# double stk @ storeSSD" %}
12389 ins_encode %{
12390 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSD) !");
12391 __ sdc1($src$$FloatRegister, SP, $dst$$disp);
12392 %}
12393 ins_pipe(fpu_storeF);
12394 %}
12396 instruct cmpFastLock( FlagsReg cr, mRegP object, s0_RegP box, mRegI tmp, mRegP scr) %{
12397 match( Set cr (FastLock object box) );
12398 effect( TEMP tmp, TEMP scr, USE_KILL box );
12399 ins_cost(300);
12400 format %{ "FASTLOCK $cr $object, $box, $tmp #@ cmpFastLock" %}
12401 ins_encode %{
12402 __ fast_lock($object$$Register, $box$$Register, $tmp$$Register, $scr$$Register);
12403 %}
12405 ins_pipe( pipe_slow );
12406 ins_pc_relative(1);
12407 %}
12409 instruct cmpFastUnlock( FlagsReg cr, mRegP object, s0_RegP box, mRegP tmp ) %{
12410 match( Set cr (FastUnlock object box) );
12411 effect( TEMP tmp, USE_KILL box );
12412 ins_cost(300);
12413 format %{ "FASTUNLOCK $object, $box, $tmp #@cmpFastUnlock" %}
12414 ins_encode %{
12415 __ fast_unlock($object$$Register, $box$$Register, $tmp$$Register);
12416 %}
12418 ins_pipe( pipe_slow );
12419 ins_pc_relative(1);
12420 %}
12422 // Store CMS card-mark Immediate
12423 instruct storeImmCM(memory mem, immI8 src) %{
12424 match(Set mem (StoreCM mem src));
12426 ins_cost(150);
12427 format %{ "MOV8 $mem,$src\t! CMS card-mark imm0" %}
12428 // opcode(0xC6);
12429 ins_encode(store_B_immI_enc_sync(mem, src));
12430 ins_pipe( ialu_storeI );
12431 %}
12433 // Die now
12434 instruct ShouldNotReachHere( )
12435 %{
12436 match(Halt);
12437 ins_cost(300);
12439 // Use the following format syntax
12440 format %{ "ILLTRAP ;#@ShouldNotReachHere" %}
12441 ins_encode %{
12442 // Here we should emit illtrap !
12444 __ stop("in ShoudNotReachHere");
12446 %}
12447 ins_pipe( pipe_jump );
12448 %}
12450 instruct leaP8Narrow(mRegP dst, indOffset8Narrow mem)
12451 %{
12452 predicate(Universe::narrow_oop_shift() == 0);
12453 match(Set dst mem);
12455 ins_cost(110);
12456 format %{ "leaq $dst, $mem\t# ptr off8narrow @ leaP8Narrow" %}
12457 ins_encode %{
12458 Register dst = $dst$$Register;
12459 Register base = as_Register($mem$$base);
12460 int disp = $mem$$disp;
12462 __ daddiu(dst, base, disp);
12463 %}
12464 ins_pipe( ialu_regI_imm16 );
12465 %}
12467 instruct leaPPosIdxScaleOff8(mRegP dst, basePosIndexScaleOffset8 mem)
12468 %{
12469 match(Set dst mem);
12471 ins_cost(110);
12472 format %{ "leaq $dst, $mem\t# @ PosIdxScaleOff8" %}
12473 ins_encode %{
12474 Register dst = $dst$$Register;
12475 Register base = as_Register($mem$$base);
12476 Register index = as_Register($mem$$index);
12477 int scale = $mem$$scale;
12478 int disp = $mem$$disp;
12480 if (scale == 0) {
12481 __ daddu(AT, base, index);
12482 __ daddiu(dst, AT, disp);
12483 } else {
12484 __ dsll(AT, index, scale);
12485 __ daddu(AT, base, AT);
12486 __ daddiu(dst, AT, disp);
12487 }
12488 %}
12490 ins_pipe( ialu_regI_imm16 );
12491 %}
12493 instruct leaPIdxScale(mRegP dst, indIndexScale mem)
12494 %{
12495 match(Set dst mem);
12497 ins_cost(110);
12498 format %{ "leaq $dst, $mem\t# @ leaPIdxScale" %}
12499 ins_encode %{
12500 Register dst = $dst$$Register;
12501 Register base = as_Register($mem$$base);
12502 Register index = as_Register($mem$$index);
12503 int scale = $mem$$scale;
12505 if (scale == 0) {
12506 __ daddu(dst, base, index);
12507 } else {
12508 __ dsll(AT, index, scale);
12509 __ daddu(dst, base, AT);
12510 }
12511 %}
12513 ins_pipe( ialu_regI_imm16 );
12514 %}
12516 // Jump Direct Conditional - Label defines a relative address from Jcc+1
12517 instruct jmpLoopEnd(cmpOp cop, mRegI src1, mRegI src2, label labl) %{
12518 match(CountedLoopEnd cop (CmpI src1 src2));
12519 effect(USE labl);
12521 ins_cost(300);
12522 format %{ "J$cop $src1, $src2, $labl\t# Loop end @ jmpLoopEnd" %}
12523 ins_encode %{
12524 Register op1 = $src1$$Register;
12525 Register op2 = $src2$$Register;
12526 Label &L = *($labl$$label);
12527 int flag = $cop$$cmpcode;
12529 switch(flag)
12530 {
12531 case 0x01: //equal
12532 if (&L)
12533 __ beq(op1, op2, L);
12534 else
12535 __ beq(op1, op2, (int)0);
12536 break;
12537 case 0x02: //not_equal
12538 if (&L)
12539 __ bne(op1, op2, L);
12540 else
12541 __ bne(op1, op2, (int)0);
12542 break;
12543 case 0x03: //above
12544 __ slt(AT, op2, op1);
12545 if(&L)
12546 __ bne(AT, R0, L);
12547 else
12548 __ bne(AT, R0, (int)0);
12549 break;
12550 case 0x04: //above_equal
12551 __ slt(AT, op1, op2);
12552 if(&L)
12553 __ beq(AT, R0, L);
12554 else
12555 __ beq(AT, R0, (int)0);
12556 break;
12557 case 0x05: //below
12558 __ slt(AT, op1, op2);
12559 if(&L)
12560 __ bne(AT, R0, L);
12561 else
12562 __ bne(AT, R0, (int)0);
12563 break;
12564 case 0x06: //below_equal
12565 __ slt(AT, op2, op1);
12566 if(&L)
12567 __ beq(AT, R0, L);
12568 else
12569 __ beq(AT, R0, (int)0);
12570 break;
12571 default:
12572 Unimplemented();
12573 }
12574 __ nop();
12575 %}
12576 ins_pipe( pipe_jump );
12577 ins_pc_relative(1);
12578 %}
12581 instruct jmpLoopEnd_reg_imm16_sub(cmpOp cop, mRegI src1, immI16_sub src2, label labl) %{
12582 match(CountedLoopEnd cop (CmpI src1 src2));
12583 effect(USE labl);
12585 ins_cost(250);
12586 format %{ "J$cop $src1, $src2, $labl\t# Loop end @ jmpLoopEnd_reg_imm16_sub" %}
12587 ins_encode %{
12588 Register op1 = $src1$$Register;
12589 int op2 = $src2$$constant;
12590 Label &L = *($labl$$label);
12591 int flag = $cop$$cmpcode;
12593 __ addiu32(AT, op1, -1 * op2);
12595 switch(flag)
12596 {
12597 case 0x01: //equal
12598 if (&L)
12599 __ beq(AT, R0, L);
12600 else
12601 __ beq(AT, R0, (int)0);
12602 break;
12603 case 0x02: //not_equal
12604 if (&L)
12605 __ bne(AT, R0, L);
12606 else
12607 __ bne(AT, R0, (int)0);
12608 break;
12609 case 0x03: //above
12610 if(&L)
12611 __ bgtz(AT, L);
12612 else
12613 __ bgtz(AT, (int)0);
12614 break;
12615 case 0x04: //above_equal
12616 if(&L)
12617 __ bgez(AT, L);
12618 else
12619 __ bgez(AT,(int)0);
12620 break;
12621 case 0x05: //below
12622 if(&L)
12623 __ bltz(AT, L);
12624 else
12625 __ bltz(AT, (int)0);
12626 break;
12627 case 0x06: //below_equal
12628 if(&L)
12629 __ blez(AT, L);
12630 else
12631 __ blez(AT, (int)0);
12632 break;
12633 default:
12634 Unimplemented();
12635 }
12636 __ nop();
12637 %}
12638 ins_pipe( pipe_jump );
12639 ins_pc_relative(1);
12640 %}
12643 /*
12644 // Jump Direct Conditional - Label defines a relative address from Jcc+1
12645 instruct jmpLoopEndU(cmpOpU cop, eFlagsRegU cmp, label labl) %{
12646 match(CountedLoopEnd cop cmp);
12647 effect(USE labl);
12649 ins_cost(300);
12650 format %{ "J$cop,u $labl\t# Loop end" %}
12651 size(6);
12652 opcode(0x0F, 0x80);
12653 ins_encode( Jcc( cop, labl) );
12654 ins_pipe( pipe_jump );
12655 ins_pc_relative(1);
12656 %}
12658 instruct jmpLoopEndUCF(cmpOpUCF cop, eFlagsRegUCF cmp, label labl) %{
12659 match(CountedLoopEnd cop cmp);
12660 effect(USE labl);
12662 ins_cost(200);
12663 format %{ "J$cop,u $labl\t# Loop end" %}
12664 opcode(0x0F, 0x80);
12665 ins_encode( Jcc( cop, labl) );
12666 ins_pipe( pipe_jump );
12667 ins_pc_relative(1);
12668 %}
12669 */
12671 // This match pattern is created for StoreIConditional since I cannot match IfNode without a RegFlags! fujie 2012/07/17
12672 instruct jmpCon_flags(cmpOp cop, FlagsReg cr, label labl) %{
12673 match(If cop cr);
12674 effect(USE labl);
12676 ins_cost(300);
12677 format %{ "J$cop $labl #mips uses AT as eflag @jmpCon_flags" %}
12679 ins_encode %{
12680 Label &L = *($labl$$label);
12681 switch($cop$$cmpcode)
12682 {
12683 case 0x01: //equal
12684 if (&L)
12685 __ bne(AT, R0, L);
12686 else
12687 __ bne(AT, R0, (int)0);
12688 break;
12689 case 0x02: //not equal
12690 if (&L)
12691 __ beq(AT, R0, L);
12692 else
12693 __ beq(AT, R0, (int)0);
12694 break;
12695 default:
12696 Unimplemented();
12697 }
12698 __ nop();
12699 %}
12701 ins_pipe( pipe_jump );
12702 ins_pc_relative(1);
12703 %}
12706 // ============================================================================
12707 // The 2nd slow-half of a subtype check. Scan the subklass's 2ndary superklass
12708 // array for an instance of the superklass. Set a hidden internal cache on a
12709 // hit (cache is checked with exposed code in gen_subtype_check()). Return
12710 // NZ for a miss or zero for a hit. The encoding ALSO sets flags.
12711 instruct partialSubtypeCheck( mRegP result, no_T8_mRegP sub, no_T8_mRegP super, mT8RegI tmp ) %{
12712 match(Set result (PartialSubtypeCheck sub super));
12713 effect(KILL tmp);
12714 ins_cost(1100); // slightly larger than the next version
12715 format %{ "partialSubtypeCheck result=$result, sub=$sub, super=$super, tmp=$tmp " %}
12717 ins_encode( enc_PartialSubtypeCheck(result, sub, super, tmp) );
12718 ins_pipe( pipe_slow );
12719 %}
12722 // Conditional-store of an int value.
12723 // ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG on Intel.
12724 instruct storeIConditional( memory mem, mRegI oldval, mRegI newval, FlagsReg cr ) %{
12725 match(Set cr (StoreIConditional mem (Binary oldval newval)));
12726 // effect(KILL oldval);
12727 format %{ "CMPXCHG $newval, $mem, $oldval \t# @storeIConditional" %}
12729 ins_encode %{
12730 Register oldval = $oldval$$Register;
12731 Register newval = $newval$$Register;
12732 Address addr(as_Register($mem$$base), $mem$$disp);
12733 Label again, failure;
12735 // int base = $mem$$base;
12736 int index = $mem$$index;
12737 int scale = $mem$$scale;
12738 int disp = $mem$$disp;
12740 guarantee(Assembler::is_simm16(disp), "");
12742 if( index != 0 ) {
12743 __ stop("in storeIConditional: index != 0");
12744 } else {
12745 __ bind(again);
12746 if(UseSyncLevel <= 1000) __ sync();
12747 __ ll(AT, addr);
12748 __ bne(AT, oldval, failure);
12749 __ delayed()->addu(AT, R0, R0);
12751 __ addu(AT, newval, R0);
12752 __ sc(AT, addr);
12753 __ beq(AT, R0, again);
12754 __ delayed()->addiu(AT, R0, 0xFF);
12755 __ bind(failure);
12756 __ sync();
12757 }
12758 %}
12760 ins_pipe( long_memory_op );
12761 %}
12763 // Conditional-store of a long value.
12764 // ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG.
12765 instruct storeLConditional(memory mem, t2RegL oldval, mRegL newval, FlagsReg cr )
12766 %{
12767 match(Set cr (StoreLConditional mem (Binary oldval newval)));
12768 effect(KILL oldval);
12770 format %{ "cmpxchg $mem, $newval\t# If $oldval == $mem then store $newval into $mem" %}
12771 ins_encode%{
12772 Register oldval = $oldval$$Register;
12773 Register newval = $newval$$Register;
12774 Address addr((Register)$mem$$base, $mem$$disp);
12776 int index = $mem$$index;
12777 int scale = $mem$$scale;
12778 int disp = $mem$$disp;
12780 guarantee(Assembler::is_simm16(disp), "");
12782 if( index != 0 ) {
12783 __ stop("in storeIConditional: index != 0");
12784 } else {
12785 __ cmpxchg(newval, addr, oldval);
12786 }
12787 %}
12788 ins_pipe( long_memory_op );
12789 %}
12792 instruct compareAndSwapI( mRegI res, mRegP mem_ptr, mS2RegI oldval, mRegI newval) %{
12793 match(Set res (CompareAndSwapI mem_ptr (Binary oldval newval)));
12794 effect(KILL oldval);
12795 // match(CompareAndSwapI mem_ptr (Binary oldval newval));
12796 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapI\n\t"
12797 "MOV $res, 1 @ compareAndSwapI\n\t"
12798 "BNE AT, R0 @ compareAndSwapI\n\t"
12799 "MOV $res, 0 @ compareAndSwapI\n"
12800 "L:" %}
12801 ins_encode %{
12802 Register newval = $newval$$Register;
12803 Register oldval = $oldval$$Register;
12804 Register res = $res$$Register;
12805 Address addr($mem_ptr$$Register, 0);
12806 Label L;
12808 __ cmpxchg32(newval, addr, oldval);
12809 __ move(res, AT);
12810 %}
12811 ins_pipe( long_memory_op );
12812 %}
12814 //FIXME:
12815 instruct compareAndSwapP( mRegI res, mRegP mem_ptr, s2_RegP oldval, mRegP newval) %{
12816 match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval)));
12817 effect(KILL oldval);
12818 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapP\n\t"
12819 "MOV $res, AT @ compareAndSwapP\n\t"
12820 "L:" %}
12821 ins_encode %{
12822 Register newval = $newval$$Register;
12823 Register oldval = $oldval$$Register;
12824 Register res = $res$$Register;
12825 Address addr($mem_ptr$$Register, 0);
12826 Label L;
12828 __ cmpxchg(newval, addr, oldval);
12829 __ move(res, AT);
12830 %}
12831 ins_pipe( long_memory_op );
12832 %}
12834 instruct compareAndSwapN( mRegI res, mRegP mem_ptr, t2_RegN oldval, mRegN newval) %{
12835 match(Set res (CompareAndSwapN mem_ptr (Binary oldval newval)));
12836 effect(KILL oldval);
12837 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapN\n\t"
12838 "MOV $res, AT @ compareAndSwapN\n\t"
12839 "L:" %}
12840 ins_encode %{
12841 Register newval = $newval$$Register;
12842 Register oldval = $oldval$$Register;
12843 Register res = $res$$Register;
12844 Address addr($mem_ptr$$Register, 0);
12845 Label L;
12847 /* 2013/7/19 Jin: cmpxchg32 is implemented with ll/sc, which will do sign extension.
12848 * Thus, we should extend oldval's sign for correct comparision.
12849 */
12850 __ sll(oldval, oldval, 0);
12852 __ cmpxchg32(newval, addr, oldval);
12853 __ move(res, AT);
12854 %}
12855 ins_pipe( long_memory_op );
12856 %}
12858 //----------Max and Min--------------------------------------------------------
12859 // Min Instructions
12860 ////
12861 // *** Min and Max using the conditional move are slower than the
12862 // *** branch version on a Pentium III.
12863 // // Conditional move for min
12864 //instruct cmovI_reg_lt( eRegI op2, eRegI op1, eFlagsReg cr ) %{
12865 // effect( USE_DEF op2, USE op1, USE cr );
12866 // format %{ "CMOVlt $op2,$op1\t! min" %}
12867 // opcode(0x4C,0x0F);
12868 // ins_encode( OpcS, OpcP, RegReg( op2, op1 ) );
12869 // ins_pipe( pipe_cmov_reg );
12870 //%}
12871 //
12872 //// Min Register with Register (P6 version)
12873 //instruct minI_eReg_p6( eRegI op1, eRegI op2 ) %{
12874 // predicate(VM_Version::supports_cmov() );
12875 // match(Set op2 (MinI op1 op2));
12876 // ins_cost(200);
12877 // expand %{
12878 // eFlagsReg cr;
12879 // compI_eReg(cr,op1,op2);
12880 // cmovI_reg_lt(op2,op1,cr);
12881 // %}
12882 //%}
12884 // Min Register with Register (generic version)
12885 instruct minI_Reg_Reg(mRegI dst, mRegI src) %{
12886 match(Set dst (MinI dst src));
12887 //effect(KILL flags);
12888 ins_cost(80);
12890 format %{ "MIN $dst, $src @minI_Reg_Reg" %}
12891 ins_encode %{
12892 Register dst = $dst$$Register;
12893 Register src = $src$$Register;
12895 __ slt(AT, src, dst);
12896 __ movn(dst, src, AT);
12898 %}
12900 ins_pipe( pipe_slow );
12901 %}
12903 // Max Register with Register
12904 // *** Min and Max using the conditional move are slower than the
12905 // *** branch version on a Pentium III.
12906 // // Conditional move for max
12907 //instruct cmovI_reg_gt( eRegI op2, eRegI op1, eFlagsReg cr ) %{
12908 // effect( USE_DEF op2, USE op1, USE cr );
12909 // format %{ "CMOVgt $op2,$op1\t! max" %}
12910 // opcode(0x4F,0x0F);
12911 // ins_encode( OpcS, OpcP, RegReg( op2, op1 ) );
12912 // ins_pipe( pipe_cmov_reg );
12913 //%}
12914 //
12915 // // Max Register with Register (P6 version)
12916 //instruct maxI_eReg_p6( eRegI op1, eRegI op2 ) %{
12917 // predicate(VM_Version::supports_cmov() );
12918 // match(Set op2 (MaxI op1 op2));
12919 // ins_cost(200);
12920 // expand %{
12921 // eFlagsReg cr;
12922 // compI_eReg(cr,op1,op2);
12923 // cmovI_reg_gt(op2,op1,cr);
12924 // %}
12925 //%}
12927 // Max Register with Register (generic version)
12928 instruct maxI_Reg_Reg(mRegI dst, mRegI src) %{
12929 match(Set dst (MaxI dst src));
12930 ins_cost(80);
12932 format %{ "MAX $dst, $src @maxI_Reg_Reg" %}
12934 ins_encode %{
12935 Register dst = $dst$$Register;
12936 Register src = $src$$Register;
12938 __ slt(AT, dst, src);
12939 __ movn(dst, src, AT);
12941 %}
12943 ins_pipe( pipe_slow );
12944 %}
12946 instruct maxI_Reg_zero(mRegI dst, immI0 zero) %{
12947 match(Set dst (MaxI dst zero));
12948 ins_cost(50);
12950 format %{ "MAX $dst, 0 @maxI_Reg_zero" %}
12952 ins_encode %{
12953 Register dst = $dst$$Register;
12955 __ slt(AT, dst, R0);
12956 __ movn(dst, R0, AT);
12958 %}
12960 ins_pipe( pipe_slow );
12961 %}
12963 instruct zerox_long_reg_reg(mRegL dst, mRegL src, immL_32bits mask)
12964 %{
12965 match(Set dst (AndL src mask));
12967 format %{ "movl $dst, $src\t# zero-extend long @ zerox_long_reg_reg" %}
12968 ins_encode %{
12969 Register dst = $dst$$Register;
12970 Register src = $src$$Register;
12972 __ dext(dst, src, 0, 32);
12973 %}
12974 ins_pipe(ialu_regI_regI);
12975 %}
12977 instruct combine_i2l(mRegL dst, mRegI src1, immL_32bits mask, mRegI src2, immI_32 shift32)
12978 %{
12979 match(Set dst (OrL (AndL (ConvI2L src1) mask) (LShiftL (ConvI2L src2) shift32)));
12981 format %{ "combine_i2l $dst, $src2(H), $src1(L) @ combine_i2l" %}
12982 ins_encode %{
12983 Register dst = $dst$$Register;
12984 Register src1 = $src1$$Register;
12985 Register src2 = $src2$$Register;
12987 if (src1 == dst) {
12988 __ dinsu(dst, src2, 32, 32);
12989 } else if (src2 == dst) {
12990 __ dsll32(dst, dst, 0);
12991 __ dins(dst, src1, 0, 32);
12992 } else {
12993 __ dext(dst, src1, 0, 32);
12994 __ dinsu(dst, src2, 32, 32);
12995 }
12996 %}
12997 ins_pipe(ialu_regI_regI);
12998 %}
13000 // Zero-extend convert int to long
13001 instruct convI2L_reg_reg_zex(mRegL dst, mRegI src, immL_32bits mask)
13002 %{
13003 match(Set dst (AndL (ConvI2L src) mask));
13005 format %{ "movl $dst, $src\t# i2l zero-extend @ convI2L_reg_reg_zex" %}
13006 ins_encode %{
13007 Register dst = $dst$$Register;
13008 Register src = $src$$Register;
13010 __ dext(dst, src, 0, 32);
13011 %}
13012 ins_pipe(ialu_regI_regI);
13013 %}
13015 instruct convL2I2L_reg_reg_zex(mRegL dst, mRegL src, immL_32bits mask)
13016 %{
13017 match(Set dst (AndL (ConvI2L (ConvL2I src)) mask));
13019 format %{ "movl $dst, $src\t# i2l zero-extend @ convL2I2L_reg_reg_zex" %}
13020 ins_encode %{
13021 Register dst = $dst$$Register;
13022 Register src = $src$$Register;
13024 __ dext(dst, src, 0, 32);
13025 %}
13026 ins_pipe(ialu_regI_regI);
13027 %}
13029 // Match loading integer and casting it to unsigned int in long register.
13030 // LoadI + ConvI2L + AndL 0xffffffff.
13031 instruct loadUI2L_rmask(mRegL dst, memory mem, immL_32bits mask) %{
13032 match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
13034 format %{ "lwu $dst, $mem \t// zero-extend to long @ loadUI2L_rmask" %}
13035 ins_encode (load_N_enc(dst, mem));
13036 ins_pipe(ialu_loadI);
13037 %}
13039 instruct loadUI2L_lmask(mRegL dst, memory mem, immL_32bits mask) %{
13040 match(Set dst (AndL mask (ConvI2L (LoadI mem))));
13042 format %{ "lwu $dst, $mem \t// zero-extend to long @ loadUI2L_lmask" %}
13043 ins_encode (load_N_enc(dst, mem));
13044 ins_pipe(ialu_loadI);
13045 %}
13048 // ============================================================================
13049 // Safepoint Instruction
13050 instruct safePoint_poll(mRegP poll) %{
13051 match(SafePoint poll);
13052 effect(USE poll);
13054 ins_cost(125);
13055 format %{ "Safepoint @ [$poll] : poll for GC @ safePoint_poll" %}
13057 ins_encode %{
13058 Register poll_reg = $poll$$Register;
13060 __ block_comment("Safepoint:");
13061 __ relocate(relocInfo::poll_type);
13062 __ lw(AT, poll_reg, 0);
13063 %}
13065 ins_pipe( ialu_storeI );
13066 %}
13068 //----------Arithmetic Conversion Instructions---------------------------------
13070 instruct roundFloat_nop(regF dst)
13071 %{
13072 match(Set dst (RoundFloat dst));
13074 ins_cost(0);
13075 ins_encode();
13076 ins_pipe(empty);
13077 %}
13079 instruct roundDouble_nop(regD dst)
13080 %{
13081 match(Set dst (RoundDouble dst));
13083 ins_cost(0);
13084 ins_encode();
13085 ins_pipe(empty);
13086 %}
13088 //---------- Zeros Count Instructions ------------------------------------------
13089 // CountLeadingZerosINode CountTrailingZerosINode
13090 instruct countLeadingZerosI(mRegI dst, mRegI src) %{
13091 predicate(UseCountLeadingZerosInstruction);
13092 match(Set dst (CountLeadingZerosI src));
13094 format %{ "clz $dst, $src\t# count leading zeros (int)" %}
13095 ins_encode %{
13096 __ clz($dst$$Register, $src$$Register);
13097 %}
13098 ins_pipe( ialu_regL_regL );
13099 %}
13101 instruct countLeadingZerosL(mRegI dst, mRegL src) %{
13102 predicate(UseCountLeadingZerosInstruction);
13103 match(Set dst (CountLeadingZerosL src));
13105 format %{ "dclz $dst, $src\t# count leading zeros (long)" %}
13106 ins_encode %{
13107 __ dclz($dst$$Register, $src$$Register);
13108 %}
13109 ins_pipe( ialu_regL_regL );
13110 %}
13112 instruct countTrailingZerosI(mRegI dst, mRegI src) %{
13113 predicate(UseCountTrailingZerosInstruction);
13114 match(Set dst (CountTrailingZerosI src));
13116 format %{ "ctz $dst, $src\t# count trailing zeros (int)" %}
13117 ins_encode %{
13118 // ctz and dctz is gs instructions.
13119 __ ctz($dst$$Register, $src$$Register);
13120 %}
13121 ins_pipe( ialu_regL_regL );
13122 %}
13124 instruct countTrailingZerosL(mRegI dst, mRegL src) %{
13125 predicate(UseCountTrailingZerosInstruction);
13126 match(Set dst (CountTrailingZerosL src));
13128 format %{ "dcto $dst, $src\t# count trailing zeros (long)" %}
13129 ins_encode %{
13130 __ dctz($dst$$Register, $src$$Register);
13131 %}
13132 ins_pipe( ialu_regL_regL );
13133 %}
13135 // ====================VECTOR INSTRUCTIONS=====================================
13137 // Load vectors (8 bytes long)
13138 instruct loadV8(vecD dst, memory mem) %{
13139 predicate(n->as_LoadVector()->memory_size() == 8);
13140 match(Set dst (LoadVector mem));
13141 ins_cost(125);
13142 format %{ "load $dst, $mem\t! load vector (8 bytes)" %}
13143 ins_encode(load_D_enc(dst, mem));
13144 ins_pipe( fpu_loadF );
13145 %}
13147 // Store vectors (8 bytes long)
13148 instruct storeV8(memory mem, vecD src) %{
13149 predicate(n->as_StoreVector()->memory_size() == 8);
13150 match(Set mem (StoreVector mem src));
13151 ins_cost(145);
13152 format %{ "store $mem, $src\t! store vector (8 bytes)" %}
13153 ins_encode(store_D_reg_enc(mem, src));
13154 ins_pipe( fpu_storeF );
13155 %}
13157 instruct Repl8B(vecD dst, mRegI src) %{
13158 predicate(n->as_Vector()->length() == 8);
13159 match(Set dst (ReplicateB src));
13160 format %{ "replv_ob AT, $src\n\t"
13161 "dmtc1 AT, $dst\t! replicate8B" %}
13162 ins_encode %{
13163 __ replv_ob(AT, $src$$Register);
13164 __ dmtc1(AT, $dst$$FloatRegister);
13165 %}
13166 ins_pipe( pipe_mtc1 );
13167 %}
13169 instruct Repl8B_imm(vecD dst, immI con) %{
13170 predicate(n->as_Vector()->length() == 8);
13171 match(Set dst (ReplicateB con));
13172 format %{ "repl_ob AT, [$con]\n\t"
13173 "dmtc1 AT, $dst,0x00\t! replicate8B($con)" %}
13174 ins_encode %{
13175 int val = $con$$constant;
13176 __ repl_ob(AT, val);
13177 __ dmtc1(AT, $dst$$FloatRegister);
13178 %}
13179 ins_pipe( pipe_mtc1 );
13180 %}
13182 instruct Repl8B_zero(vecD dst, immI0 zero) %{
13183 predicate(n->as_Vector()->length() == 8);
13184 match(Set dst (ReplicateB zero));
13185 format %{ "dmtc1 R0, $dst\t! replicate8B zero" %}
13186 ins_encode %{
13187 __ dmtc1(R0, $dst$$FloatRegister);
13188 %}
13189 ins_pipe( pipe_mtc1 );
13190 %}
13192 instruct Repl8B_M1(vecD dst, immI_M1 M1) %{
13193 predicate(n->as_Vector()->length() == 8);
13194 match(Set dst (ReplicateB M1));
13195 format %{ "dmtc1 -1, $dst\t! replicate8B -1" %}
13196 ins_encode %{
13197 __ nor(AT, R0, R0);
13198 __ dmtc1(AT, $dst$$FloatRegister);
13199 %}
13200 ins_pipe( pipe_mtc1 );
13201 %}
13203 instruct Repl4S(vecD dst, mRegI src) %{
13204 predicate(n->as_Vector()->length() == 4);
13205 match(Set dst (ReplicateS src));
13206 format %{ "replv_qh AT, $src\n\t"
13207 "dmtc1 AT, $dst\t! replicate4S" %}
13208 ins_encode %{
13209 __ replv_qh(AT, $src$$Register);
13210 __ dmtc1(AT, $dst$$FloatRegister);
13211 %}
13212 ins_pipe( pipe_mtc1 );
13213 %}
13215 instruct Repl4S_imm(vecD dst, immI con) %{
13216 predicate(n->as_Vector()->length() == 4);
13217 match(Set dst (ReplicateS con));
13218 format %{ "replv_qh AT, [$con]\n\t"
13219 "dmtc1 AT, $dst\t! replicate4S($con)" %}
13220 ins_encode %{
13221 int val = $con$$constant;
13222 if ( Assembler::is_simm(val, 10)) {
13223 //repl_qh supports 10 bits immediate
13224 __ repl_qh(AT, val);
13225 } else {
13226 __ li32(AT, val);
13227 __ replv_qh(AT, AT);
13228 }
13229 __ dmtc1(AT, $dst$$FloatRegister);
13230 %}
13231 ins_pipe( pipe_mtc1 );
13232 %}
13234 instruct Repl4S_zero(vecD dst, immI0 zero) %{
13235 predicate(n->as_Vector()->length() == 4);
13236 match(Set dst (ReplicateS zero));
13237 format %{ "dmtc1 R0, $dst\t! replicate4S zero" %}
13238 ins_encode %{
13239 __ dmtc1(R0, $dst$$FloatRegister);
13240 %}
13241 ins_pipe( pipe_mtc1 );
13242 %}
13244 instruct Repl4S_M1(vecD dst, immI_M1 M1) %{
13245 predicate(n->as_Vector()->length() == 4);
13246 match(Set dst (ReplicateS M1));
13247 format %{ "dmtc1 -1, $dst\t! replicate4S -1" %}
13248 ins_encode %{
13249 __ nor(AT, R0, R0);
13250 __ dmtc1(AT, $dst$$FloatRegister);
13251 %}
13252 ins_pipe( pipe_mtc1 );
13253 %}
13255 // Replicate integer (4 byte) scalar to be vector
13256 instruct Repl2I(vecD dst, mRegI src) %{
13257 predicate(n->as_Vector()->length() == 2);
13258 match(Set dst (ReplicateI src));
13259 format %{ "dins AT, $src, 0, 32\n\t"
13260 "dinsu AT, $src, 32, 32\n\t"
13261 "dmtc1 AT, $dst\t! replicate2I" %}
13262 ins_encode %{
13263 __ dins(AT, $src$$Register, 0, 32);
13264 __ dinsu(AT, $src$$Register, 32, 32);
13265 __ dmtc1(AT, $dst$$FloatRegister);
13266 %}
13267 ins_pipe( pipe_mtc1 );
13268 %}
13270 // Replicate integer (4 byte) scalar immediate to be vector by loading from const table.
13271 instruct Repl2I_imm(vecD dst, immI con, mA7RegI tmp) %{
13272 predicate(n->as_Vector()->length() == 2);
13273 match(Set dst (ReplicateI con));
13274 effect(KILL tmp);
13275 format %{ "li32 AT, [$con], 32\n\t"
13276 "replv_pw AT, AT\n\t"
13277 "dmtc1 AT, $dst\t! replicate2I($con)" %}
13278 ins_encode %{
13279 int val = $con$$constant;
13280 __ li32(AT, val);
13281 __ replv_pw(AT, AT);
13282 __ dmtc1(AT, $dst$$FloatRegister);
13283 %}
13284 ins_pipe( pipe_mtc1 );
13285 %}
13287 // Replicate integer (4 byte) scalar zero to be vector
13288 instruct Repl2I_zero(vecD dst, immI0 zero) %{
13289 predicate(n->as_Vector()->length() == 2);
13290 match(Set dst (ReplicateI zero));
13291 format %{ "dmtc1 R0, $dst\t! replicate2I zero" %}
13292 ins_encode %{
13293 __ dmtc1(R0, $dst$$FloatRegister);
13294 %}
13295 ins_pipe( pipe_mtc1 );
13296 %}
13298 // Replicate integer (4 byte) scalar -1 to be vector
13299 instruct Repl2I_M1(vecD dst, immI_M1 M1) %{
13300 predicate(n->as_Vector()->length() == 2);
13301 match(Set dst (ReplicateI M1));
13302 format %{ "dmtc1 -1, $dst\t! replicate2I -1, use AT" %}
13303 ins_encode %{
13304 __ nor(AT, R0, R0);
13305 __ dmtc1(AT, $dst$$FloatRegister);
13306 %}
13307 ins_pipe( pipe_mtc1 );
13308 %}
13310 // Replicate float (4 byte) scalar to be vector
13311 instruct Repl2F(vecD dst, regF src) %{
13312 predicate(n->as_Vector()->length() == 2);
13313 match(Set dst (ReplicateF src));
13314 format %{ "cvt.ps $dst, $src, $src\t! replicate2F" %}
13315 ins_encode %{
13316 __ cvt_ps_s($dst$$FloatRegister, $src$$FloatRegister, $src$$FloatRegister);
13317 %}
13318 ins_pipe( pipe_slow );
13319 %}
13321 // Replicate float (4 byte) scalar zero to be vector
13322 instruct Repl2F_zero(vecD dst, immF0 zero) %{
13323 predicate(n->as_Vector()->length() == 2);
13324 match(Set dst (ReplicateF zero));
13325 format %{ "dmtc1 R0, $dst\t! replicate2F zero" %}
13326 ins_encode %{
13327 __ dmtc1(R0, $dst$$FloatRegister);
13328 %}
13329 ins_pipe( pipe_mtc1 );
13330 %}
13333 // ====================VECTOR ARITHMETIC=======================================
13335 // --------------------------------- ADD --------------------------------------
13337 // Floats vector add
13338 instruct vadd2F(vecD dst, vecD src) %{
13339 predicate(n->as_Vector()->length() == 2);
13340 match(Set dst (AddVF dst src));
13341 format %{ "add.ps $dst,$src\t! add packed2F" %}
13342 ins_encode %{
13343 __ add_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
13344 %}
13345 ins_pipe( pipe_slow );
13346 %}
13348 instruct vadd2F3(vecD dst, vecD src1, vecD src2) %{
13349 predicate(n->as_Vector()->length() == 2);
13350 match(Set dst (AddVF src1 src2));
13351 format %{ "add.ps $dst,$src1,$src2\t! add packed2F" %}
13352 ins_encode %{
13353 __ add_ps($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
13354 %}
13355 ins_pipe( fpu_regF_regF );
13356 %}
13358 // --------------------------------- SUB --------------------------------------
13360 // Floats vector sub
13361 instruct vsub2F(vecD dst, vecD src) %{
13362 predicate(n->as_Vector()->length() == 2);
13363 match(Set dst (SubVF dst src));
13364 format %{ "sub.ps $dst,$src\t! sub packed2F" %}
13365 ins_encode %{
13366 __ sub_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
13367 %}
13368 ins_pipe( fpu_regF_regF );
13369 %}
13371 // --------------------------------- MUL --------------------------------------
13373 // Floats vector mul
13374 instruct vmul2F(vecD dst, vecD src) %{
13375 predicate(n->as_Vector()->length() == 2);
13376 match(Set dst (MulVF dst src));
13377 format %{ "mul.ps $dst, $src\t! mul packed2F" %}
13378 ins_encode %{
13379 __ mul_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
13380 %}
13381 ins_pipe( fpu_regF_regF );
13382 %}
13384 instruct vmul2F3(vecD dst, vecD src1, vecD src2) %{
13385 predicate(n->as_Vector()->length() == 2);
13386 match(Set dst (MulVF src1 src2));
13387 format %{ "mul.ps $dst, $src1, $src2\t! mul packed2F" %}
13388 ins_encode %{
13389 __ mul_ps($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
13390 %}
13391 ins_pipe( fpu_regF_regF );
13392 %}
13394 // --------------------------------- DIV --------------------------------------
13395 // MIPS do not have div.ps
13398 //----------PEEPHOLE RULES-----------------------------------------------------
13399 // These must follow all instruction definitions as they use the names
13400 // defined in the instructions definitions.
13401 //
13402 // peepmatch ( root_instr_name [preceeding_instruction]* );
13403 //
13404 // peepconstraint %{
13405 // (instruction_number.operand_name relational_op instruction_number.operand_name
13406 // [, ...] );
13407 // // instruction numbers are zero-based using left to right order in peepmatch
13408 //
13409 // peepreplace ( instr_name ( [instruction_number.operand_name]* ) );
13410 // // provide an instruction_number.operand_name for each operand that appears
13411 // // in the replacement instruction's match rule
13412 //
13413 // ---------VM FLAGS---------------------------------------------------------
13414 //
13415 // All peephole optimizations can be turned off using -XX:-OptoPeephole
13416 //
13417 // Each peephole rule is given an identifying number starting with zero and
13418 // increasing by one in the order seen by the parser. An individual peephole
13419 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
13420 // on the command-line.
13421 //
13422 // ---------CURRENT LIMITATIONS----------------------------------------------
13423 //
13424 // Only match adjacent instructions in same basic block
13425 // Only equality constraints
13426 // Only constraints between operands, not (0.dest_reg == EAX_enc)
13427 // Only one replacement instruction
13428 //
13429 // ---------EXAMPLE----------------------------------------------------------
13430 //
13431 // // pertinent parts of existing instructions in architecture description
13432 // instruct movI(eRegI dst, eRegI src) %{
13433 // match(Set dst (CopyI src));
13434 // %}
13435 //
13436 // instruct incI_eReg(eRegI dst, immI1 src, eFlagsReg cr) %{
13437 // match(Set dst (AddI dst src));
13438 // effect(KILL cr);
13439 // %}
13440 //
13441 // // Change (inc mov) to lea
13442 // peephole %{
13443 // // increment preceeded by register-register move
13444 // peepmatch ( incI_eReg movI );
13445 // // require that the destination register of the increment
13446 // // match the destination register of the move
13447 // peepconstraint ( 0.dst == 1.dst );
13448 // // construct a replacement instruction that sets
13449 // // the destination to ( move's source register + one )
13450 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13451 // %}
13452 //
13453 // Implementation no longer uses movX instructions since
13454 // machine-independent system no longer uses CopyX nodes.
13455 //
13456 // peephole %{
13457 // peepmatch ( incI_eReg movI );
13458 // peepconstraint ( 0.dst == 1.dst );
13459 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13460 // %}
13461 //
13462 // peephole %{
13463 // peepmatch ( decI_eReg movI );
13464 // peepconstraint ( 0.dst == 1.dst );
13465 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13466 // %}
13467 //
13468 // peephole %{
13469 // peepmatch ( addI_eReg_imm movI );
13470 // peepconstraint ( 0.dst == 1.dst );
13471 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13472 // %}
13473 //
13474 // peephole %{
13475 // peepmatch ( addP_eReg_imm movP );
13476 // peepconstraint ( 0.dst == 1.dst );
13477 // peepreplace ( leaP_eReg_immI( 0.dst 1.src 0.src ) );
13478 // %}
13480 // // Change load of spilled value to only a spill
13481 // instruct storeI(memory mem, eRegI src) %{
13482 // match(Set mem (StoreI mem src));
13483 // %}
13484 //
13485 // instruct loadI(eRegI dst, memory mem) %{
13486 // match(Set dst (LoadI mem));
13487 // %}
13488 //
13489 //peephole %{
13490 // peepmatch ( loadI storeI );
13491 // peepconstraint ( 1.src == 0.dst, 1.mem == 0.mem );
13492 // peepreplace ( storeI( 1.mem 1.mem 1.src ) );
13493 //%}
13495 //----------SMARTSPILL RULES---------------------------------------------------
13496 // These must follow all instruction definitions as they use the names
13497 // defined in the instructions definitions.