Mon, 22 May 2017 08:01:12 -0400
Remove unnecessary guarantees in mips_64.ad
1 //
2 // Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
3 // Copyright (c) 2015, 2016, Loongson Technology. All rights reserved.
4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 //
6 // This code is free software; you can redistribute it and/or modify it
7 // under the terms of the GNU General Public License version 2 only, as
8 // published by the Free Software Foundation.
9 //
10 // This code is distributed in the hope that it will be useful, but WITHOUT
11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 // version 2 for more details (a copy is included in the LICENSE file that
14 // accompanied this code).
15 //
16 // You should have received a copy of the GNU General Public License version
17 // 2 along with this work; if not, write to the Free Software Foundation,
18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 //
20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 // or visit www.oracle.com if you need additional information or have any
22 // questions.
23 //
24 //
26 // GodSon3 Architecture Description File
28 //----------REGISTER DEFINITION BLOCK------------------------------------------
29 // This information is used by the matcher and the register allocator to
30 // describe individual registers and classes of registers within the target
31 // archtecture.
33 // format:
34 // reg_def name (call convention, c-call convention, ideal type, encoding);
35 // call convention :
36 // NS = No-Save
37 // SOC = Save-On-Call
38 // SOE = Save-On-Entry
39 // AS = Always-Save
40 // ideal type :
41 // see opto/opcodes.hpp for more info
42 // reg_class name (reg, ...);
43 // alloc_class name (reg, ...);
44 register %{
46 // General Registers
47 // Integer Registers
48 reg_def R0 ( NS, NS, Op_RegI, 0, VMRegImpl::Bad());
49 reg_def AT ( NS, NS, Op_RegI, 1, AT->as_VMReg());
50 reg_def AT_H ( NS, NS, Op_RegI, 1, AT->as_VMReg()->next());
51 reg_def V0 (SOC, SOC, Op_RegI, 2, V0->as_VMReg());
52 reg_def V0_H (SOC, SOC, Op_RegI, 2, V0->as_VMReg()->next());
53 reg_def V1 (SOC, SOC, Op_RegI, 3, V1->as_VMReg());
54 reg_def V1_H (SOC, SOC, Op_RegI, 3, V1->as_VMReg()->next());
55 reg_def A0 (SOC, SOC, Op_RegI, 4, A0->as_VMReg());
56 reg_def A0_H (SOC, SOC, Op_RegI, 4, A0->as_VMReg()->next());
57 reg_def A1 (SOC, SOC, Op_RegI, 5, A1->as_VMReg());
58 reg_def A1_H (SOC, SOC, Op_RegI, 5, A1->as_VMReg()->next());
59 reg_def A2 (SOC, SOC, Op_RegI, 6, A2->as_VMReg());
60 reg_def A2_H (SOC, SOC, Op_RegI, 6, A2->as_VMReg()->next());
61 reg_def A3 (SOC, SOC, Op_RegI, 7, A3->as_VMReg());
62 reg_def A3_H (SOC, SOC, Op_RegI, 7, A3->as_VMReg()->next());
63 reg_def A4 (SOC, SOC, Op_RegI, 8, A4->as_VMReg());
64 reg_def A4_H (SOC, SOC, Op_RegI, 8, A4->as_VMReg()->next());
65 reg_def A5 (SOC, SOC, Op_RegI, 9, A5->as_VMReg());
66 reg_def A5_H (SOC, SOC, Op_RegI, 9, A5->as_VMReg()->next());
67 reg_def A6 (SOC, SOC, Op_RegI, 10, A6->as_VMReg());
68 reg_def A6_H (SOC, SOC, Op_RegI, 10, A6->as_VMReg()->next());
69 reg_def A7 (SOC, SOC, Op_RegI, 11, A7->as_VMReg());
70 reg_def A7_H (SOC, SOC, Op_RegI, 11, A7->as_VMReg()->next());
71 reg_def T0 (SOC, SOC, Op_RegI, 12, T0->as_VMReg());
72 reg_def T0_H (SOC, SOC, Op_RegI, 12, T0->as_VMReg()->next());
73 reg_def T1 (SOC, SOC, Op_RegI, 13, T1->as_VMReg());
74 reg_def T1_H (SOC, SOC, Op_RegI, 13, T1->as_VMReg()->next());
75 reg_def T2 (SOC, SOC, Op_RegI, 14, T2->as_VMReg());
76 reg_def T2_H (SOC, SOC, Op_RegI, 14, T2->as_VMReg()->next());
77 reg_def T3 (SOC, SOC, Op_RegI, 15, T3->as_VMReg());
78 reg_def T3_H (SOC, SOC, Op_RegI, 15, T3->as_VMReg()->next());
79 reg_def S0 (SOC, SOE, Op_RegI, 16, S0->as_VMReg());
80 reg_def S0_H (SOC, SOE, Op_RegI, 16, S0->as_VMReg()->next());
81 reg_def S1 (SOC, SOE, Op_RegI, 17, S1->as_VMReg());
82 reg_def S1_H (SOC, SOE, Op_RegI, 17, S1->as_VMReg()->next());
83 reg_def S2 (SOC, SOE, Op_RegI, 18, S2->as_VMReg());
84 reg_def S2_H (SOC, SOE, Op_RegI, 18, S2->as_VMReg()->next());
85 reg_def S3 (SOC, SOE, Op_RegI, 19, S3->as_VMReg());
86 reg_def S3_H (SOC, SOE, Op_RegI, 19, S3->as_VMReg()->next());
87 reg_def S4 (SOC, SOE, Op_RegI, 20, S4->as_VMReg());
88 reg_def S4_H (SOC, SOE, Op_RegI, 20, S4->as_VMReg()->next());
89 reg_def S5 (SOC, SOE, Op_RegI, 21, S5->as_VMReg());
90 reg_def S5_H (SOC, SOE, Op_RegI, 21, S5->as_VMReg()->next());
91 reg_def S6 (SOC, SOE, Op_RegI, 22, S6->as_VMReg());
92 reg_def S6_H (SOC, SOE, Op_RegI, 22, S6->as_VMReg()->next());
93 reg_def S7 (SOC, SOE, Op_RegI, 23, S7->as_VMReg());
94 reg_def S7_H (SOC, SOE, Op_RegI, 23, S7->as_VMReg()->next());
95 reg_def T8 (SOC, SOC, Op_RegI, 24, T8->as_VMReg());
96 reg_def T8_H (SOC, SOC, Op_RegI, 24, T8->as_VMReg()->next());
97 reg_def T9 (SOC, SOC, Op_RegI, 25, T9->as_VMReg());
98 reg_def T9_H (SOC, SOC, Op_RegI, 25, T9->as_VMReg()->next());
100 // Special Registers
101 reg_def K0 ( NS, NS, Op_RegI, 26, K0->as_VMReg());
102 reg_def K1 ( NS, NS, Op_RegI, 27, K1->as_VMReg());
103 reg_def GP ( NS, NS, Op_RegI, 28, GP->as_VMReg());
104 reg_def GP_H ( NS, NS, Op_RegI, 28, GP->as_VMReg()->next());
105 reg_def SP ( NS, NS, Op_RegI, 29, SP->as_VMReg());
106 reg_def SP_H ( NS, NS, Op_RegI, 29, SP->as_VMReg()->next());
107 reg_def FP ( NS, NS, Op_RegI, 30, FP->as_VMReg());
108 reg_def FP_H ( NS, NS, Op_RegI, 30, FP->as_VMReg()->next());
109 reg_def RA ( NS, NS, Op_RegI, 31, RA->as_VMReg());
110 reg_def RA_H ( NS, NS, Op_RegI, 31, RA->as_VMReg()->next());
112 // Floating registers.
113 reg_def F0 ( SOC, SOC, Op_RegF, 0, F0->as_VMReg());
114 reg_def F0_H ( SOC, SOC, Op_RegF, 0, F0->as_VMReg()->next());
115 reg_def F1 ( SOC, SOC, Op_RegF, 1, F1->as_VMReg());
116 reg_def F1_H ( SOC, SOC, Op_RegF, 1, F1->as_VMReg()->next());
117 reg_def F2 ( SOC, SOC, Op_RegF, 2, F2->as_VMReg());
118 reg_def F2_H ( SOC, SOC, Op_RegF, 2, F2->as_VMReg()->next());
119 reg_def F3 ( SOC, SOC, Op_RegF, 3, F3->as_VMReg());
120 reg_def F3_H ( SOC, SOC, Op_RegF, 3, F3->as_VMReg()->next());
121 reg_def F4 ( SOC, SOC, Op_RegF, 4, F4->as_VMReg());
122 reg_def F4_H ( SOC, SOC, Op_RegF, 4, F4->as_VMReg()->next());
123 reg_def F5 ( SOC, SOC, Op_RegF, 5, F5->as_VMReg());
124 reg_def F5_H ( SOC, SOC, Op_RegF, 5, F5->as_VMReg()->next());
125 reg_def F6 ( SOC, SOC, Op_RegF, 6, F6->as_VMReg());
126 reg_def F6_H ( SOC, SOC, Op_RegF, 6, F6->as_VMReg()->next());
127 reg_def F7 ( SOC, SOC, Op_RegF, 7, F7->as_VMReg());
128 reg_def F7_H ( SOC, SOC, Op_RegF, 7, F7->as_VMReg()->next());
129 reg_def F8 ( SOC, SOC, Op_RegF, 8, F8->as_VMReg());
130 reg_def F8_H ( SOC, SOC, Op_RegF, 8, F8->as_VMReg()->next());
131 reg_def F9 ( SOC, SOC, Op_RegF, 9, F9->as_VMReg());
132 reg_def F9_H ( SOC, SOC, Op_RegF, 9, F9->as_VMReg()->next());
133 reg_def F10 ( SOC, SOC, Op_RegF, 10, F10->as_VMReg());
134 reg_def F10_H ( SOC, SOC, Op_RegF, 10, F10->as_VMReg()->next());
135 reg_def F11 ( SOC, SOC, Op_RegF, 11, F11->as_VMReg());
136 reg_def F11_H ( SOC, SOC, Op_RegF, 11, F11->as_VMReg()->next());
137 reg_def F12 ( SOC, SOC, Op_RegF, 12, F12->as_VMReg());
138 reg_def F12_H ( SOC, SOC, Op_RegF, 12, F12->as_VMReg()->next());
139 reg_def F13 ( SOC, SOC, Op_RegF, 13, F13->as_VMReg());
140 reg_def F13_H ( SOC, SOC, Op_RegF, 13, F13->as_VMReg()->next());
141 reg_def F14 ( SOC, SOC, Op_RegF, 14, F14->as_VMReg());
142 reg_def F14_H ( SOC, SOC, Op_RegF, 14, F14->as_VMReg()->next());
143 reg_def F15 ( SOC, SOC, Op_RegF, 15, F15->as_VMReg());
144 reg_def F15_H ( SOC, SOC, Op_RegF, 15, F15->as_VMReg()->next());
145 reg_def F16 ( SOC, SOC, Op_RegF, 16, F16->as_VMReg());
146 reg_def F16_H ( SOC, SOC, Op_RegF, 16, F16->as_VMReg()->next());
147 reg_def F17 ( SOC, SOC, Op_RegF, 17, F17->as_VMReg());
148 reg_def F17_H ( SOC, SOC, Op_RegF, 17, F17->as_VMReg()->next());
149 reg_def F18 ( SOC, SOC, Op_RegF, 18, F18->as_VMReg());
150 reg_def F18_H ( SOC, SOC, Op_RegF, 18, F18->as_VMReg()->next());
151 reg_def F19 ( SOC, SOC, Op_RegF, 19, F19->as_VMReg());
152 reg_def F19_H ( SOC, SOC, Op_RegF, 19, F19->as_VMReg()->next());
153 reg_def F20 ( SOC, SOC, Op_RegF, 20, F20->as_VMReg());
154 reg_def F20_H ( SOC, SOC, Op_RegF, 20, F20->as_VMReg()->next());
155 reg_def F21 ( SOC, SOC, Op_RegF, 21, F21->as_VMReg());
156 reg_def F21_H ( SOC, SOC, Op_RegF, 21, F21->as_VMReg()->next());
157 reg_def F22 ( SOC, SOC, Op_RegF, 22, F22->as_VMReg());
158 reg_def F22_H ( SOC, SOC, Op_RegF, 22, F22->as_VMReg()->next());
159 reg_def F23 ( SOC, SOC, Op_RegF, 23, F23->as_VMReg());
160 reg_def F23_H ( SOC, SOC, Op_RegF, 23, F23->as_VMReg()->next());
161 reg_def F24 ( SOC, SOC, Op_RegF, 24, F24->as_VMReg());
162 reg_def F24_H ( SOC, SOC, Op_RegF, 24, F24->as_VMReg()->next());
163 reg_def F25 ( SOC, SOC, Op_RegF, 25, F25->as_VMReg());
164 reg_def F25_H ( SOC, SOC, Op_RegF, 25, F25->as_VMReg()->next());
165 reg_def F26 ( SOC, SOC, Op_RegF, 26, F26->as_VMReg());
166 reg_def F26_H ( SOC, SOC, Op_RegF, 26, F26->as_VMReg()->next());
167 reg_def F27 ( SOC, SOC, Op_RegF, 27, F27->as_VMReg());
168 reg_def F27_H ( SOC, SOC, Op_RegF, 27, F27->as_VMReg()->next());
169 reg_def F28 ( SOC, SOC, Op_RegF, 28, F28->as_VMReg());
170 reg_def F28_H ( SOC, SOC, Op_RegF, 28, F28->as_VMReg()->next());
171 reg_def F29 ( SOC, SOC, Op_RegF, 29, F29->as_VMReg());
172 reg_def F29_H ( SOC, SOC, Op_RegF, 29, F29->as_VMReg()->next());
173 reg_def F30 ( SOC, SOC, Op_RegF, 30, F30->as_VMReg());
174 reg_def F30_H ( SOC, SOC, Op_RegF, 30, F30->as_VMReg()->next());
175 reg_def F31 ( SOC, SOC, Op_RegF, 31, F31->as_VMReg());
176 reg_def F31_H ( SOC, SOC, Op_RegF, 31, F31->as_VMReg()->next());
179 // ----------------------------
180 // Special Registers
181 // Condition Codes Flag Registers
182 reg_def MIPS_FLAG (SOC, SOC, Op_RegFlags, 1, as_Register(1)->as_VMReg());
183 //S6 is used for get_thread(S6)
184 //S5 is uesd for heapbase of compressed oop
185 alloc_class chunk0(
186 S7, S7_H,
187 S0, S0_H,
188 S1, S1_H,
189 S2, S2_H,
190 S4, S4_H,
191 S5, S5_H,
192 S6, S6_H,
193 S3, S3_H,
194 T2, T2_H,
195 T3, T3_H,
196 T8, T8_H,
197 T9, T9_H,
198 T1, T1_H, // inline_cache_reg
199 V1, V1_H,
200 A7, A7_H,
201 A6, A6_H,
202 A5, A5_H,
203 A4, A4_H,
204 V0, V0_H,
205 A3, A3_H,
206 A2, A2_H,
207 A1, A1_H,
208 A0, A0_H,
209 T0, T0_H,
210 GP, GP_H
211 RA, RA_H,
212 SP, SP_H, // stack_pointer
213 FP, FP_H // frame_pointer
214 );
216 alloc_class chunk1( F0, F0_H,
217 F1, F1_H,
218 F2, F2_H,
219 F3, F3_H,
220 F4, F4_H,
221 F5, F5_H,
222 F6, F6_H,
223 F7, F7_H,
224 F8, F8_H,
225 F9, F9_H,
226 F10, F10_H,
227 F11, F11_H,
228 F20, F20_H,
229 F21, F21_H,
230 F22, F22_H,
231 F23, F23_H,
232 F24, F24_H,
233 F25, F25_H,
234 F26, F26_H,
235 F27, F27_H,
236 F28, F28_H,
237 F19, F19_H,
238 F18, F18_H,
239 F17, F17_H,
240 F16, F16_H,
241 F15, F15_H,
242 F14, F14_H,
243 F13, F13_H,
244 F12, F12_H,
245 F29, F29_H,
246 F30, F30_H,
247 F31, F31_H);
249 alloc_class chunk2(MIPS_FLAG);
251 reg_class s_reg( S0, S1, S2, S3, S4, S5, S6, S7 );
252 reg_class s0_reg( S0 );
253 reg_class s1_reg( S1 );
254 reg_class s2_reg( S2 );
255 reg_class s3_reg( S3 );
256 reg_class s4_reg( S4 );
257 reg_class s5_reg( S5 );
258 reg_class s6_reg( S6 );
259 reg_class s7_reg( S7 );
261 reg_class t_reg( T0, T1, T2, T3, T8, T9 );
262 reg_class t0_reg( T0 );
263 reg_class t1_reg( T1 );
264 reg_class t2_reg( T2 );
265 reg_class t3_reg( T3 );
266 reg_class t8_reg( T8 );
267 reg_class t9_reg( T9 );
269 reg_class a_reg( A0, A1, A2, A3, A4, A5, A6, A7 );
270 reg_class a0_reg( A0 );
271 reg_class a1_reg( A1 );
272 reg_class a2_reg( A2 );
273 reg_class a3_reg( A3 );
274 reg_class a4_reg( A4 );
275 reg_class a5_reg( A5 );
276 reg_class a6_reg( A6 );
277 reg_class a7_reg( A7 );
279 reg_class v0_reg( V0 );
280 reg_class v1_reg( V1 );
282 reg_class sp_reg( SP, SP_H );
283 reg_class fp_reg( FP, FP_H );
285 reg_class mips_flags(MIPS_FLAG);
287 reg_class v0_long_reg( V0, V0_H );
288 reg_class v1_long_reg( V1, V1_H );
289 reg_class a0_long_reg( A0, A0_H );
290 reg_class a1_long_reg( A1, A1_H );
291 reg_class a2_long_reg( A2, A2_H );
292 reg_class a3_long_reg( A3, A3_H );
293 reg_class a4_long_reg( A4, A4_H );
294 reg_class a5_long_reg( A5, A5_H );
295 reg_class a6_long_reg( A6, A6_H );
296 reg_class a7_long_reg( A7, A7_H );
297 reg_class t0_long_reg( T0, T0_H );
298 reg_class t1_long_reg( T1, T1_H );
299 reg_class t2_long_reg( T2, T2_H );
300 reg_class t3_long_reg( T3, T3_H );
301 reg_class t8_long_reg( T8, T8_H );
302 reg_class t9_long_reg( T9, T9_H );
303 reg_class s0_long_reg( S0, S0_H );
304 reg_class s1_long_reg( S1, S1_H );
305 reg_class s2_long_reg( S2, S2_H );
306 reg_class s3_long_reg( S3, S3_H );
307 reg_class s4_long_reg( S4, S4_H );
308 reg_class s5_long_reg( S5, S5_H );
309 reg_class s6_long_reg( S6, S6_H );
310 reg_class s7_long_reg( S7, S7_H );
312 reg_class int_reg( S7, S0, S1, S2, S4, S3, T8, T2, T3, T1, V1, A7, A6, A5, A4, V0, A3, A2, A1, A0, T0 );
314 reg_class no_Ax_int_reg( S7, S0, S1, S2, S4, S3, T8, T2, T3, T1, V1, V0, T0 );
316 reg_class p_reg(
317 S7, S7_H,
318 S0, S0_H,
319 S1, S1_H,
320 S2, S2_H,
321 S4, S4_H,
322 S3, S3_H,
323 T8, T8_H,
324 T2, T2_H,
325 T3, T3_H,
326 T1, T1_H,
327 A7, A7_H,
328 A6, A6_H,
329 A5, A5_H,
330 A4, A4_H,
331 A3, A3_H,
332 A2, A2_H,
333 A1, A1_H,
334 A0, A0_H,
335 T0, T0_H
336 );
338 reg_class no_T8_p_reg(
339 S7, S7_H,
340 S0, S0_H,
341 S1, S1_H,
342 S2, S2_H,
343 S4, S4_H,
344 S3, S3_H,
345 T2, T2_H,
346 T3, T3_H,
347 T1, T1_H,
348 A7, A7_H,
349 A6, A6_H,
350 A5, A5_H,
351 A4, A4_H,
352 A3, A3_H,
353 A2, A2_H,
354 A1, A1_H,
355 A0, A0_H,
356 T0, T0_H
357 );
359 reg_class long_reg(
360 S7, S7_H,
361 S0, S0_H,
362 S1, S1_H,
363 S2, S2_H,
364 S4, S4_H,
365 S3, S3_H,
366 T8, T8_H,
367 T2, T2_H,
368 T3, T3_H,
369 T1, T1_H,
370 A7, A7_H,
371 A6, A6_H,
372 A5, A5_H,
373 A4, A4_H,
374 A3, A3_H,
375 A2, A2_H,
376 A1, A1_H,
377 A0, A0_H,
378 T0, T0_H
379 );
382 // Floating point registers.
383 // 2012/8/23 Fu: F30/F31 are used as temporary registers in D2I
384 // 2016/12/1 aoqi: F31 are not used as temporary registers in D2I
385 reg_class flt_reg( F0, F1, F2, F3, F4, F5, F6, F7, F8, F9, F10, F11, F12, F13, F14, F15, F16, F17 F18, F19, F20, F21, F22, F23, F24, F25, F26, F27, F28, F29, F31);
386 reg_class dbl_reg( F0, F0_H,
387 F1, F1_H,
388 F2, F2_H,
389 F3, F3_H,
390 F4, F4_H,
391 F5, F5_H,
392 F6, F6_H,
393 F7, F7_H,
394 F8, F8_H,
395 F9, F9_H,
396 F10, F10_H,
397 F11, F11_H,
398 F12, F12_H,
399 F13, F13_H,
400 F14, F14_H,
401 F15, F15_H,
402 F16, F16_H,
403 F17, F17_H,
404 F18, F18_H,
405 F19, F19_H,
406 F20, F20_H,
407 F21, F21_H,
408 F22, F22_H,
409 F23, F23_H,
410 F24, F24_H,
411 F25, F25_H,
412 F26, F26_H,
413 F27, F27_H,
414 F28, F28_H,
415 F29, F29_H,
416 F31, F31_H);
418 reg_class flt_arg0( F12 );
419 reg_class dbl_arg0( F12, F12_H );
420 reg_class dbl_arg1( F14, F14_H );
422 %}
424 //----------DEFINITION BLOCK---------------------------------------------------
425 // Define name --> value mappings to inform the ADLC of an integer valued name
426 // Current support includes integer values in the range [0, 0x7FFFFFFF]
427 // Format:
428 // int_def <name> ( <int_value>, <expression>);
429 // Generated Code in ad_<arch>.hpp
430 // #define <name> (<expression>)
431 // // value == <int_value>
432 // Generated code in ad_<arch>.cpp adlc_verification()
433 // assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
434 //
435 definitions %{
436 int_def DEFAULT_COST ( 100, 100);
437 int_def HUGE_COST (1000000, 1000000);
439 // Memory refs are twice as expensive as run-of-the-mill.
440 int_def MEMORY_REF_COST ( 200, DEFAULT_COST * 2);
442 // Branches are even more expensive.
443 int_def BRANCH_COST ( 300, DEFAULT_COST * 3);
444 // we use jr instruction to construct call, so more expensive
445 // by yjl 2/28/2006
446 int_def CALL_COST ( 500, DEFAULT_COST * 5);
447 /*
448 int_def EQUAL ( 1, 1 );
449 int_def NOT_EQUAL ( 2, 2 );
450 int_def GREATER ( 3, 3 );
451 int_def GREATER_EQUAL ( 4, 4 );
452 int_def LESS ( 5, 5 );
453 int_def LESS_EQUAL ( 6, 6 );
454 */
455 %}
459 //----------SOURCE BLOCK-------------------------------------------------------
460 // This is a block of C++ code which provides values, functions, and
461 // definitions necessary in the rest of the architecture description
463 source_hpp %{
464 // Header information of the source block.
465 // Method declarations/definitions which are used outside
466 // the ad-scope can conveniently be defined here.
467 //
468 // To keep related declarations/definitions/uses close together,
469 // we switch between source %{ }% and source_hpp %{ }% freely as needed.
471 class CallStubImpl {
473 //--------------------------------------------------------------
474 //---< Used for optimization in Compile::shorten_branches >---
475 //--------------------------------------------------------------
477 public:
478 // Size of call trampoline stub.
479 static uint size_call_trampoline() {
480 return 0; // no call trampolines on this platform
481 }
483 // number of relocations needed by a call trampoline stub
484 static uint reloc_call_trampoline() {
485 return 0; // no call trampolines on this platform
486 }
487 };
489 class HandlerImpl {
491 public:
493 static int emit_exception_handler(CodeBuffer &cbuf);
494 static int emit_deopt_handler(CodeBuffer& cbuf);
496 static uint size_exception_handler() {
497 // NativeCall instruction size is the same as NativeJump.
498 // exception handler starts out as jump and can be patched to
499 // a call be deoptimization. (4932387)
500 // Note that this value is also credited (in output.cpp) to
501 // the size of the code section.
502 // return NativeJump::instruction_size;
503 int size = NativeCall::instruction_size;
504 return round_to(size, 16);
505 }
507 #ifdef _LP64
508 static uint size_deopt_handler() {
509 int size = NativeCall::instruction_size;
510 return round_to(size, 16);
511 }
512 #else
513 static uint size_deopt_handler() {
514 // NativeCall instruction size is the same as NativeJump.
515 // exception handler starts out as jump and can be patched to
516 // a call be deoptimization. (4932387)
517 // Note that this value is also credited (in output.cpp) to
518 // the size of the code section.
519 return 5 + NativeJump::instruction_size; // pushl(); jmp;
520 }
521 #endif
522 };
524 %} // end source_hpp
526 source %{
528 #define NO_INDEX 0
529 #define RELOC_IMM64 Assembler::imm_operand
530 #define RELOC_DISP32 Assembler::disp32_operand
533 #define __ _masm.
536 // Emit exception handler code.
537 // Stuff framesize into a register and call a VM stub routine.
538 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf) {
539 // Note that the code buffer's insts_mark is always relative to insts.
540 // That's why we must use the macroassembler to generate a handler.
541 MacroAssembler _masm(&cbuf);
542 address base =
543 __ start_a_stub(size_exception_handler());
544 if (base == NULL) return 0; // CodeBuffer::expand failed
545 int offset = __ offset();
547 __ block_comment("; emit_exception_handler");
549 cbuf.set_insts_mark();
550 __ relocate(relocInfo::runtime_call_type);
551 __ patchable_jump((address)OptoRuntime::exception_blob()->entry_point());
552 __ align(16);
553 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
554 __ end_a_stub();
555 return offset;
556 }
558 // Emit deopt handler code.
559 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) {
560 // Note that the code buffer's insts_mark is always relative to insts.
561 // That's why we must use the macroassembler to generate a handler.
562 MacroAssembler _masm(&cbuf);
563 address base =
564 __ start_a_stub(size_deopt_handler());
566 // FIXME
567 if (base == NULL) return 0; // CodeBuffer::expand failed
568 int offset = __ offset();
570 __ block_comment("; emit_deopt_handler");
572 cbuf.set_insts_mark();
573 __ relocate(relocInfo::runtime_call_type);
574 __ patchable_call(SharedRuntime::deopt_blob()->unpack());
575 __ align(16);
576 assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
577 __ end_a_stub();
578 return offset;
579 }
582 const bool Matcher::match_rule_supported(int opcode) {
583 if (!has_match_rule(opcode))
584 return false;
586 switch (opcode) {
587 //Op_CountLeadingZerosI Op_CountLeadingZerosL can be deleted, all MIPS CPUs support clz & dclz.
588 case Op_CountLeadingZerosI:
589 case Op_CountLeadingZerosL:
590 if (!UseCountLeadingZerosInstruction)
591 return false;
592 break;
593 case Op_CountTrailingZerosI:
594 case Op_CountTrailingZerosL:
595 if (!UseCountTrailingZerosInstruction)
596 return false;
597 break;
598 }
600 return true; // Per default match rules are supported.
601 }
603 //FIXME
604 // emit call stub, compiled java to interpreter
605 void emit_java_to_interp(CodeBuffer &cbuf ) {
606 // Stub is fixed up when the corresponding call is converted from calling
607 // compiled code to calling interpreted code.
608 // mov rbx,0
609 // jmp -1
611 address mark = cbuf.insts_mark(); // get mark within main instrs section
613 // Note that the code buffer's insts_mark is always relative to insts.
614 // That's why we must use the macroassembler to generate a stub.
615 MacroAssembler _masm(&cbuf);
617 address base =
618 __ start_a_stub(Compile::MAX_stubs_size);
619 if (base == NULL) return; // CodeBuffer::expand failed
620 // static stub relocation stores the instruction address of the call
622 __ relocate(static_stub_Relocation::spec(mark), 0);
624 // static stub relocation also tags the methodOop in the code-stream.
625 __ patchable_set48(S3, (long)0);
626 // This is recognized as unresolved by relocs/nativeInst/ic code
628 __ relocate(relocInfo::runtime_call_type);
630 cbuf.set_insts_mark();
631 address call_pc = (address)-1;
632 __ patchable_jump(call_pc);
633 __ align(16);
634 __ end_a_stub();
635 // Update current stubs pointer and restore code_end.
636 }
638 // size of call stub, compiled java to interpretor
639 uint size_java_to_interp() {
640 int size = 4 * 4 + NativeCall::instruction_size; // sizeof(li48) + NativeCall::instruction_size
641 return round_to(size, 16);
642 }
644 // relocation entries for call stub, compiled java to interpreter
645 uint reloc_java_to_interp() {
646 return 16; // in emit_java_to_interp + in Java_Static_Call
647 }
649 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
650 if( Assembler::is_simm16(offset) ) return true;
651 else {
652 assert(false, "Not implemented yet !" );
653 Unimplemented();
654 }
655 }
658 // No additional cost for CMOVL.
659 const int Matcher::long_cmove_cost() { return 0; }
661 // No CMOVF/CMOVD with SSE2
662 const int Matcher::float_cmove_cost() { return ConditionalMoveLimit; }
664 // Does the CPU require late expand (see block.cpp for description of late expand)?
665 const bool Matcher::require_postalloc_expand = false;
667 // Should the Matcher clone shifts on addressing modes, expecting them
668 // to be subsumed into complex addressing expressions or compute them
669 // into registers? True for Intel but false for most RISCs
670 const bool Matcher::clone_shift_expressions = false;
672 // Do we need to mask the count passed to shift instructions or does
673 // the cpu only look at the lower 5/6 bits anyway?
674 const bool Matcher::need_masked_shift_count = false;
676 bool Matcher::narrow_oop_use_complex_address() {
677 NOT_LP64(ShouldNotCallThis());
678 assert(UseCompressedOops, "only for compressed oops code");
679 return false;
680 }
682 bool Matcher::narrow_klass_use_complex_address() {
683 NOT_LP64(ShouldNotCallThis());
684 assert(UseCompressedClassPointers, "only for compressed klass code");
685 return false;
686 }
688 // This is UltraSparc specific, true just means we have fast l2f conversion
689 const bool Matcher::convL2FSupported(void) {
690 return true;
691 }
693 // Max vector size in bytes. 0 if not supported.
694 const int Matcher::vector_width_in_bytes(BasicType bt) {
695 if (MaxVectorSize == 0)
696 return 0;
697 assert(MaxVectorSize == 8, "");
698 return 8;
699 }
701 // Vector ideal reg
702 const int Matcher::vector_ideal_reg(int size) {
703 assert(MaxVectorSize == 8, "");
704 switch(size) {
705 case 8: return Op_VecD;
706 }
707 ShouldNotReachHere();
708 return 0;
709 }
711 // Only lowest bits of xmm reg are used for vector shift count.
712 const int Matcher::vector_shift_count_ideal_reg(int size) {
713 fatal("vector shift is not supported");
714 return Node::NotAMachineReg;
715 }
717 // Limits on vector size (number of elements) loaded into vector.
718 const int Matcher::max_vector_size(const BasicType bt) {
719 assert(is_java_primitive(bt), "only primitive type vectors");
720 return vector_width_in_bytes(bt)/type2aelembytes(bt);
721 }
723 const int Matcher::min_vector_size(const BasicType bt) {
724 return max_vector_size(bt); // Same as max.
725 }
727 // MIPS supports misaligned vectors store/load? FIXME
728 const bool Matcher::misaligned_vectors_ok() {
729 return false;
730 //return !AlignVector; // can be changed by flag
731 }
733 // Register for DIVI projection of divmodI
734 RegMask Matcher::divI_proj_mask() {
735 ShouldNotReachHere();
736 return RegMask();
737 }
739 // Register for MODI projection of divmodI
740 RegMask Matcher::modI_proj_mask() {
741 ShouldNotReachHere();
742 return RegMask();
743 }
745 // Register for DIVL projection of divmodL
746 RegMask Matcher::divL_proj_mask() {
747 ShouldNotReachHere();
748 return RegMask();
749 }
751 int Matcher::regnum_to_fpu_offset(int regnum) {
752 return regnum - 32; // The FP registers are in the second chunk
753 }
756 const bool Matcher::isSimpleConstant64(jlong value) {
757 // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
758 return true;
759 }
762 // Return whether or not this register is ever used as an argument. This
763 // function is used on startup to build the trampoline stubs in generateOptoStub.
764 // Registers not mentioned will be killed by the VM call in the trampoline, and
765 // arguments in those registers not be available to the callee.
766 bool Matcher::can_be_java_arg( int reg ) {
767 /* Refer to: [sharedRuntime_mips_64.cpp] SharedRuntime::java_calling_convention() */
768 if ( reg == T0_num || reg == T0_H_num
769 || reg == A0_num || reg == A0_H_num
770 || reg == A1_num || reg == A1_H_num
771 || reg == A2_num || reg == A2_H_num
772 || reg == A3_num || reg == A3_H_num
773 || reg == A4_num || reg == A4_H_num
774 || reg == A5_num || reg == A5_H_num
775 || reg == A6_num || reg == A6_H_num
776 || reg == A7_num || reg == A7_H_num )
777 return true;
779 if ( reg == F12_num || reg == F12_H_num
780 || reg == F13_num || reg == F13_H_num
781 || reg == F14_num || reg == F14_H_num
782 || reg == F15_num || reg == F15_H_num
783 || reg == F16_num || reg == F16_H_num
784 || reg == F17_num || reg == F17_H_num
785 || reg == F18_num || reg == F18_H_num
786 || reg == F19_num || reg == F19_H_num )
787 return true;
789 return false;
790 }
792 bool Matcher::is_spillable_arg( int reg ) {
793 return can_be_java_arg(reg);
794 }
796 bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) {
797 return false;
798 }
800 // Register for MODL projection of divmodL
801 RegMask Matcher::modL_proj_mask() {
802 ShouldNotReachHere();
803 return RegMask();
804 }
806 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
807 return FP_REG_mask();
808 }
810 // MIPS doesn't support AES intrinsics
811 const bool Matcher::pass_original_key_for_aes() {
812 return false;
813 }
815 int CallLeafNoFPDirectNode::compute_padding(int current_offset) const {
816 //lui
817 //ori
818 //dsll
819 //ori
821 //jalr
822 //nop
824 return round_to(current_offset, alignment_required()) - current_offset;
825 }
827 int CallLeafDirectNode::compute_padding(int current_offset) const {
828 //lui
829 //ori
830 //dsll
831 //ori
833 //jalr
834 //nop
836 return round_to(current_offset, alignment_required()) - current_offset;
837 }
839 int CallRuntimeDirectNode::compute_padding(int current_offset) const {
840 //lui
841 //ori
842 //dsll
843 //ori
845 //jalr
846 //nop
848 return round_to(current_offset, alignment_required()) - current_offset;
849 }
851 // If CPU can load and store mis-aligned doubles directly then no fixup is
852 // needed. Else we split the double into 2 integer pieces and move it
853 // piece-by-piece. Only happens when passing doubles into C code as the
854 // Java calling convention forces doubles to be aligned.
855 const bool Matcher::misaligned_doubles_ok = false;
856 // Do floats take an entire double register or just half?
857 //const bool Matcher::float_in_double = true;
858 bool Matcher::float_in_double() { return false; }
859 // Threshold size for cleararray.
860 const int Matcher::init_array_short_size = 8 * BytesPerLong;
861 // Do ints take an entire long register or just half?
862 const bool Matcher::int_in_long = true;
863 // Is it better to copy float constants, or load them directly from memory?
864 // Intel can load a float constant from a direct address, requiring no
865 // extra registers. Most RISCs will have to materialize an address into a
866 // register first, so they would do better to copy the constant from stack.
867 const bool Matcher::rematerialize_float_constants = false;
868 // Advertise here if the CPU requires explicit rounding operations
869 // to implement the UseStrictFP mode.
870 const bool Matcher::strict_fp_requires_explicit_rounding = false;
871 // The ecx parameter to rep stos for the ClearArray node is in dwords.
872 const bool Matcher::init_array_count_is_in_bytes = false;
875 // Indicate if the safepoint node needs the polling page as an input.
876 // Since MIPS doesn't have absolute addressing, it needs.
877 bool SafePointNode::needs_polling_address_input() {
878 return false;
879 }
881 // !!!!! Special hack to get all type of calls to specify the byte offset
882 // from the start of the call to the point where the return address
883 // will point.
884 int MachCallStaticJavaNode::ret_addr_offset() {
885 //lui
886 //ori
887 //nop
888 //nop
889 //jalr
890 //nop
891 return 24;
892 }
894 int MachCallDynamicJavaNode::ret_addr_offset() {
895 //lui IC_Klass,
896 //ori IC_Klass,
897 //dsll IC_Klass
898 //ori IC_Klass
900 //lui T9
901 //ori T9
902 //nop
903 //nop
904 //jalr T9
905 //nop
906 return 4 * 4 + 4 * 6;
907 }
909 //=============================================================================
911 // Figure out which register class each belongs in: rc_int, rc_float, rc_stack
912 enum RC { rc_bad, rc_int, rc_float, rc_stack };
913 static enum RC rc_class( OptoReg::Name reg ) {
914 if( !OptoReg::is_valid(reg) ) return rc_bad;
915 if (OptoReg::is_stack(reg)) return rc_stack;
916 VMReg r = OptoReg::as_VMReg(reg);
917 if (r->is_Register()) return rc_int;
918 assert(r->is_FloatRegister(), "must be");
919 return rc_float;
920 }
922 uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream* st ) const {
923 // Get registers to move
924 OptoReg::Name src_second = ra_->get_reg_second(in(1));
925 OptoReg::Name src_first = ra_->get_reg_first(in(1));
926 OptoReg::Name dst_second = ra_->get_reg_second(this );
927 OptoReg::Name dst_first = ra_->get_reg_first(this );
929 enum RC src_second_rc = rc_class(src_second);
930 enum RC src_first_rc = rc_class(src_first);
931 enum RC dst_second_rc = rc_class(dst_second);
932 enum RC dst_first_rc = rc_class(dst_first);
934 assert(OptoReg::is_valid(src_first) && OptoReg::is_valid(dst_first), "must move at least 1 register" );
936 // Generate spill code!
937 int size = 0;
939 if( src_first == dst_first && src_second == dst_second )
940 return 0; // Self copy, no move
942 if (src_first_rc == rc_stack) {
943 // mem ->
944 if (dst_first_rc == rc_stack) {
945 // mem -> mem
946 assert(src_second != dst_first, "overlap");
947 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
948 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
949 // 64-bit
950 int src_offset = ra_->reg2offset(src_first);
951 int dst_offset = ra_->reg2offset(dst_first);
952 if (cbuf) {
953 MacroAssembler _masm(cbuf);
954 __ ld(AT, Address(SP, src_offset));
955 __ sd(AT, Address(SP, dst_offset));
956 #ifndef PRODUCT
957 } else {
958 if(!do_size){
959 if (size != 0) st->print("\n\t");
960 st->print("ld AT, [SP + #%d]\t# 64-bit mem-mem spill 1\n\t"
961 "sd AT, [SP + #%d]",
962 src_offset, dst_offset);
963 }
964 #endif
965 }
966 size += 8;
967 } else {
968 // 32-bit
969 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
970 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
971 // No pushl/popl, so:
972 int src_offset = ra_->reg2offset(src_first);
973 int dst_offset = ra_->reg2offset(dst_first);
974 if (cbuf) {
975 MacroAssembler _masm(cbuf);
976 __ lw(AT, Address(SP, src_offset));
977 __ sw(AT, Address(SP, dst_offset));
978 #ifndef PRODUCT
979 } else {
980 if(!do_size){
981 if (size != 0) st->print("\n\t");
982 st->print("lw AT, [SP + #%d] spill 2\n\t"
983 "sw AT, [SP + #%d]\n\t",
984 src_offset, dst_offset);
985 }
986 #endif
987 }
988 size += 8;
989 }
990 return size;
991 } else if (dst_first_rc == rc_int) {
992 // mem -> gpr
993 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
994 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
995 // 64-bit
996 int offset = ra_->reg2offset(src_first);
997 if (cbuf) {
998 MacroAssembler _masm(cbuf);
999 __ ld(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1000 #ifndef PRODUCT
1001 } else {
1002 if(!do_size){
1003 if (size != 0) st->print("\n\t");
1004 st->print("ld %s, [SP + #%d]\t# spill 3",
1005 Matcher::regName[dst_first],
1006 offset);
1007 }
1008 #endif
1009 }
1010 size += 4;
1011 } else {
1012 // 32-bit
1013 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1014 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1015 int offset = ra_->reg2offset(src_first);
1016 if (cbuf) {
1017 MacroAssembler _masm(cbuf);
1018 if (this->ideal_reg() == Op_RegI)
1019 __ lw(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1020 else
1021 __ lwu(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1022 #ifndef PRODUCT
1023 } else {
1024 if(!do_size){
1025 if (size != 0) st->print("\n\t");
1026 if (this->ideal_reg() == Op_RegI)
1027 st->print("lw %s, [SP + #%d]\t# spill 4",
1028 Matcher::regName[dst_first],
1029 offset);
1030 else
1031 st->print("lwu %s, [SP + #%d]\t# spill 5",
1032 Matcher::regName[dst_first],
1033 offset);
1034 }
1035 #endif
1036 }
1037 size += 4;
1038 }
1039 return size;
1040 } else if (dst_first_rc == rc_float) {
1041 // mem-> xmm
1042 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1043 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1044 // 64-bit
1045 int offset = ra_->reg2offset(src_first);
1046 if (cbuf) {
1047 MacroAssembler _masm(cbuf);
1048 __ ldc1( as_FloatRegister(Matcher::_regEncode[dst_first]), Address(SP, offset));
1049 #ifndef PRODUCT
1050 } else {
1051 if(!do_size){
1052 if (size != 0) st->print("\n\t");
1053 st->print("ldc1 %s, [SP + #%d]\t# spill 6",
1054 Matcher::regName[dst_first],
1055 offset);
1056 }
1057 #endif
1058 }
1059 size += 4;
1060 } else {
1061 // 32-bit
1062 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1063 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1064 int offset = ra_->reg2offset(src_first);
1065 if (cbuf) {
1066 MacroAssembler _masm(cbuf);
1067 __ lwc1( as_FloatRegister(Matcher::_regEncode[dst_first]), Address(SP, offset));
1068 #ifndef PRODUCT
1069 } else {
1070 if(!do_size){
1071 if (size != 0) st->print("\n\t");
1072 st->print("lwc1 %s, [SP + #%d]\t# spill 7",
1073 Matcher::regName[dst_first],
1074 offset);
1075 }
1076 #endif
1077 }
1078 size += 4;
1079 }
1080 return size;
1081 }
1082 } else if (src_first_rc == rc_int) {
1083 // gpr ->
1084 if (dst_first_rc == rc_stack) {
1085 // gpr -> mem
1086 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1087 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1088 // 64-bit
1089 int offset = ra_->reg2offset(dst_first);
1090 if (cbuf) {
1091 MacroAssembler _masm(cbuf);
1092 __ sd(as_Register(Matcher::_regEncode[src_first]), Address(SP, offset));
1093 #ifndef PRODUCT
1094 } else {
1095 if(!do_size){
1096 if (size != 0) st->print("\n\t");
1097 st->print("sd %s, [SP + #%d] # spill 8",
1098 Matcher::regName[src_first],
1099 offset);
1100 }
1101 #endif
1102 }
1103 size += 4;
1104 } else {
1105 // 32-bit
1106 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1107 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1108 int offset = ra_->reg2offset(dst_first);
1109 if (cbuf) {
1110 MacroAssembler _masm(cbuf);
1111 __ sw(as_Register(Matcher::_regEncode[src_first]), Address(SP, offset));
1112 #ifndef PRODUCT
1113 } else {
1114 if(!do_size){
1115 if (size != 0) st->print("\n\t");
1116 st->print("sw %s, [SP + #%d]\t# spill 9",
1117 Matcher::regName[src_first], offset);
1118 }
1119 #endif
1120 }
1121 size += 4;
1122 }
1123 return size;
1124 } else if (dst_first_rc == rc_int) {
1125 // gpr -> gpr
1126 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1127 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1128 // 64-bit
1129 if (cbuf) {
1130 MacroAssembler _masm(cbuf);
1131 __ move(as_Register(Matcher::_regEncode[dst_first]),
1132 as_Register(Matcher::_regEncode[src_first]));
1133 #ifndef PRODUCT
1134 } else {
1135 if(!do_size){
1136 if (size != 0) st->print("\n\t");
1137 st->print("move(64bit) %s <-- %s\t# spill 10",
1138 Matcher::regName[dst_first],
1139 Matcher::regName[src_first]);
1140 }
1141 #endif
1142 }
1143 size += 4;
1144 return size;
1145 } else {
1146 // 32-bit
1147 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1148 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1149 if (cbuf) {
1150 MacroAssembler _masm(cbuf);
1151 if (this->ideal_reg() == Op_RegI)
1152 __ move_u32(as_Register(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first]));
1153 else
1154 __ daddu(as_Register(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first]), R0);
1156 #ifndef PRODUCT
1157 } else {
1158 if(!do_size){
1159 if (size != 0) st->print("\n\t");
1160 st->print("move(32-bit) %s <-- %s\t# spill 11",
1161 Matcher::regName[dst_first],
1162 Matcher::regName[src_first]);
1163 }
1164 #endif
1165 }
1166 size += 4;
1167 return size;
1168 }
1169 } else if (dst_first_rc == rc_float) {
1170 // gpr -> xmm
1171 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1172 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1173 // 64-bit
1174 if (cbuf) {
1175 MacroAssembler _masm(cbuf);
1176 __ dmtc1(as_Register(Matcher::_regEncode[src_first]), as_FloatRegister(Matcher::_regEncode[dst_first]));
1177 #ifndef PRODUCT
1178 } else {
1179 if(!do_size){
1180 if (size != 0) st->print("\n\t");
1181 st->print("dmtc1 %s, %s\t# spill 12",
1182 Matcher::regName[dst_first],
1183 Matcher::regName[src_first]);
1184 }
1185 #endif
1186 }
1187 size += 4;
1188 } else {
1189 // 32-bit
1190 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1191 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1192 if (cbuf) {
1193 MacroAssembler _masm(cbuf);
1194 __ mtc1( as_Register(Matcher::_regEncode[src_first]), as_FloatRegister(Matcher::_regEncode[dst_first]) );
1195 #ifndef PRODUCT
1196 } else {
1197 if(!do_size){
1198 if (size != 0) st->print("\n\t");
1199 st->print("mtc1 %s, %s\t# spill 13",
1200 Matcher::regName[dst_first],
1201 Matcher::regName[src_first]);
1202 }
1203 #endif
1204 }
1205 size += 4;
1206 }
1207 return size;
1208 }
1209 } else if (src_first_rc == rc_float) {
1210 // xmm ->
1211 if (dst_first_rc == rc_stack) {
1212 // xmm -> mem
1213 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1214 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1215 // 64-bit
1216 int offset = ra_->reg2offset(dst_first);
1217 if (cbuf) {
1218 MacroAssembler _masm(cbuf);
1219 __ sdc1( as_FloatRegister(Matcher::_regEncode[src_first]), Address(SP, offset) );
1220 #ifndef PRODUCT
1221 } else {
1222 if(!do_size){
1223 if (size != 0) st->print("\n\t");
1224 st->print("sdc1 %s, [SP + #%d]\t# spill 14",
1225 Matcher::regName[src_first],
1226 offset);
1227 }
1228 #endif
1229 }
1230 size += 4;
1231 } else {
1232 // 32-bit
1233 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1234 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1235 int offset = ra_->reg2offset(dst_first);
1236 if (cbuf) {
1237 MacroAssembler _masm(cbuf);
1238 __ swc1(as_FloatRegister(Matcher::_regEncode[src_first]), Address(SP, offset));
1239 #ifndef PRODUCT
1240 } else {
1241 if(!do_size){
1242 if (size != 0) st->print("\n\t");
1243 st->print("swc1 %s, [SP + #%d]\t# spill 15",
1244 Matcher::regName[src_first],
1245 offset);
1246 }
1247 #endif
1248 }
1249 size += 4;
1250 }
1251 return size;
1252 } else if (dst_first_rc == rc_int) {
1253 // xmm -> gpr
1254 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1255 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1256 // 64-bit
1257 if (cbuf) {
1258 MacroAssembler _masm(cbuf);
1259 __ dmfc1( as_Register(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1260 #ifndef PRODUCT
1261 } else {
1262 if(!do_size){
1263 if (size != 0) st->print("\n\t");
1264 st->print("dmfc1 %s, %s\t# spill 16",
1265 Matcher::regName[dst_first],
1266 Matcher::regName[src_first]);
1267 }
1268 #endif
1269 }
1270 size += 4;
1271 } else {
1272 // 32-bit
1273 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1274 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1275 if (cbuf) {
1276 MacroAssembler _masm(cbuf);
1277 __ mfc1( as_Register(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1278 #ifndef PRODUCT
1279 } else {
1280 if(!do_size){
1281 if (size != 0) st->print("\n\t");
1282 st->print("mfc1 %s, %s\t# spill 17",
1283 Matcher::regName[dst_first],
1284 Matcher::regName[src_first]);
1285 }
1286 #endif
1287 }
1288 size += 4;
1289 }
1290 return size;
1291 } else if (dst_first_rc == rc_float) {
1292 // xmm -> xmm
1293 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1294 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1295 // 64-bit
1296 if (cbuf) {
1297 MacroAssembler _masm(cbuf);
1298 __ mov_d( as_FloatRegister(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1299 #ifndef PRODUCT
1300 } else {
1301 if(!do_size){
1302 if (size != 0) st->print("\n\t");
1303 st->print("mov_d %s <-- %s\t# spill 18",
1304 Matcher::regName[dst_first],
1305 Matcher::regName[src_first]);
1306 }
1307 #endif
1308 }
1309 size += 4;
1310 } else {
1311 // 32-bit
1312 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1313 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1314 if (cbuf) {
1315 MacroAssembler _masm(cbuf);
1316 __ mov_s( as_FloatRegister(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1317 #ifndef PRODUCT
1318 } else {
1319 if(!do_size){
1320 if (size != 0) st->print("\n\t");
1321 st->print("mov_s %s <-- %s\t# spill 19",
1322 Matcher::regName[dst_first],
1323 Matcher::regName[src_first]);
1324 }
1325 #endif
1326 }
1327 size += 4;
1328 }
1329 return size;
1330 }
1331 }
1333 assert(0," foo ");
1334 Unimplemented();
1335 return size;
1337 }
1339 #ifndef PRODUCT
1340 void MachSpillCopyNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1341 implementation( NULL, ra_, false, st );
1342 }
1343 #endif
1345 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1346 implementation( &cbuf, ra_, false, NULL );
1347 }
1349 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
1350 return implementation( NULL, ra_, true, NULL );
1351 }
1353 //=============================================================================
1354 #
1356 #ifndef PRODUCT
1357 void MachBreakpointNode::format( PhaseRegAlloc *, outputStream* st ) const {
1358 st->print("INT3");
1359 }
1360 #endif
1362 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc* ra_) const {
1363 MacroAssembler _masm(&cbuf);
1364 __ int3();
1365 }
1367 uint MachBreakpointNode::size(PhaseRegAlloc* ra_) const {
1368 return MachNode::size(ra_);
1369 }
1372 //=============================================================================
1373 #ifndef PRODUCT
1374 void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1375 Compile *C = ra_->C;
1376 int framesize = C->frame_size_in_bytes();
1378 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1380 st->print("daddiu SP, SP, %d # Rlease stack @ MachEpilogNode",framesize);
1381 st->cr(); st->print("\t");
1382 if (UseLoongsonISA) {
1383 st->print("gslq RA, FP, SP, %d # Restore FP & RA @ MachEpilogNode", -wordSize*2);
1384 } else {
1385 st->print("ld RA, SP, %d # Restore RA @ MachEpilogNode", -wordSize);
1386 st->cr(); st->print("\t");
1387 st->print("ld FP, SP, %d # Restore FP @ MachEpilogNode", -wordSize*2);
1388 }
1390 if( do_polling() && C->is_method_compilation() ) {
1391 st->print("Poll Safepoint # MachEpilogNode");
1392 }
1393 }
1394 #endif
1396 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1397 Compile *C = ra_->C;
1398 MacroAssembler _masm(&cbuf);
1399 int framesize = C->frame_size_in_bytes();
1401 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1403 __ daddiu(SP, SP, framesize);
1405 if (UseLoongsonISA) {
1406 __ gslq(RA, FP, SP, -wordSize*2);
1407 } else {
1408 __ ld(RA, SP, -wordSize );
1409 __ ld(FP, SP, -wordSize*2 );
1410 }
1412 if( do_polling() && C->is_method_compilation() ) {
1413 __ set64(AT, (long)os::get_polling_page());
1414 __ relocate(relocInfo::poll_return_type);
1415 __ lw(AT, AT, 0);
1416 }
1417 }
1419 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
1420 return MachNode::size(ra_); // too many variables; just compute it the hard way fujie debug
1421 }
1423 int MachEpilogNode::reloc() const {
1424 return 0; // a large enough number
1425 }
1427 const Pipeline * MachEpilogNode::pipeline() const {
1428 return MachNode::pipeline_class();
1429 }
1431 int MachEpilogNode::safepoint_offset() const { return 0; }
1433 //=============================================================================
1435 #ifndef PRODUCT
1436 void BoxLockNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1437 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1438 int reg = ra_->get_reg_first(this);
1439 st->print("ADDI %s, SP, %d @BoxLockNode",Matcher::regName[reg],offset);
1440 }
1441 #endif
1444 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
1445 return 4;
1446 }
1448 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1449 MacroAssembler _masm(&cbuf);
1450 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1451 int reg = ra_->get_encode(this);
1453 __ addi(as_Register(reg), SP, offset);
1454 /*
1455 if( offset >= 128 ) {
1456 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
1457 emit_rm(cbuf, 0x2, reg, 0x04);
1458 emit_rm(cbuf, 0x0, 0x04, SP_enc);
1459 emit_d32(cbuf, offset);
1460 }
1461 else {
1462 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
1463 emit_rm(cbuf, 0x1, reg, 0x04);
1464 emit_rm(cbuf, 0x0, 0x04, SP_enc);
1465 emit_d8(cbuf, offset);
1466 }
1467 */
1468 }
1471 //static int sizeof_FFree_Float_Stack_All = -1;
1473 int MachCallRuntimeNode::ret_addr_offset() {
1474 //lui
1475 //ori
1476 //dsll
1477 //ori
1478 //jalr
1479 //nop
1480 assert(NativeCall::instruction_size == 24, "in MachCallRuntimeNode::ret_addr_offset()");
1481 return NativeCall::instruction_size;
1482 // return 16;
1483 }
1489 //=============================================================================
1490 #ifndef PRODUCT
1491 void MachNopNode::format( PhaseRegAlloc *, outputStream* st ) const {
1492 st->print("NOP \t# %d bytes pad for loops and calls", 4 * _count);
1493 }
1494 #endif
1496 void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc * ) const {
1497 MacroAssembler _masm(&cbuf);
1498 int i = 0;
1499 for(i = 0; i < _count; i++)
1500 __ nop();
1501 }
1503 uint MachNopNode::size(PhaseRegAlloc *) const {
1504 return 4 * _count;
1505 }
1506 const Pipeline* MachNopNode::pipeline() const {
1507 return MachNode::pipeline_class();
1508 }
1510 //=============================================================================
1512 //=============================================================================
1513 #ifndef PRODUCT
1514 void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1515 st->print_cr("load_klass(T9, T0)");
1516 st->print_cr("\tbeq(T9, iCache, L)");
1517 st->print_cr("\tnop");
1518 st->print_cr("\tjmp(SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type)");
1519 st->print_cr("\tnop");
1520 st->print_cr("\tnop");
1521 st->print_cr(" L:");
1522 }
1523 #endif
1526 void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1527 MacroAssembler _masm(&cbuf);
1528 #ifdef ASSERT
1529 //uint code_size = cbuf.code_size();
1530 #endif
1531 int ic_reg = Matcher::inline_cache_reg_encode();
1532 Label L;
1533 Register receiver = T0;
1534 Register iCache = as_Register(ic_reg);
1535 __ load_klass(T9, receiver);
1536 __ beq(T9, iCache, L);
1537 __ nop();
1539 __ relocate(relocInfo::runtime_call_type);
1540 __ patchable_jump((address)SharedRuntime::get_ic_miss_stub());
1542 /* WARNING these NOPs are critical so that verified entry point is properly
1543 * 8 bytes aligned for patching by NativeJump::patch_verified_entry() */
1544 __ align(CodeEntryAlignment);
1545 __ bind(L);
1546 }
1548 uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
1549 return MachNode::size(ra_);
1550 }
1554 //=============================================================================
1556 const RegMask& MachConstantBaseNode::_out_RegMask = P_REG_mask();
1558 int Compile::ConstantTable::calculate_table_base_offset() const {
1559 return 0; // absolute addressing, no offset
1560 }
1562 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
1563 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
1564 ShouldNotReachHere();
1565 }
1567 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
1568 Compile* C = ra_->C;
1569 Compile::ConstantTable& constant_table = C->constant_table();
1570 MacroAssembler _masm(&cbuf);
1572 Register Rtoc = as_Register(ra_->get_encode(this));
1573 CodeSection* consts_section = __ code()->consts();
1574 int consts_size = consts_section->align_at_start(consts_section->size());
1575 assert(constant_table.size() == consts_size, "must be equal");
1577 if (consts_section->size()) {
1578 // Materialize the constant table base.
1579 address baseaddr = consts_section->start() + -(constant_table.table_base_offset());
1580 // RelocationHolder rspec = internal_word_Relocation::spec(baseaddr);
1581 __ relocate(relocInfo::internal_pc_type);
1582 __ patchable_set48(Rtoc, (long)baseaddr);
1583 }
1584 }
1586 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
1587 // patchable_set48 (4 insts)
1588 return 4 * 4;
1589 }
1591 #ifndef PRODUCT
1592 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
1593 Register r = as_Register(ra_->get_encode(this));
1594 st->print("patchable_set48 %s, &constanttable (constant table base) @ MachConstantBaseNode", r->name());
1595 }
1596 #endif
1599 //=============================================================================
1600 #ifndef PRODUCT
1601 void MachPrologNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1602 Compile* C = ra_->C;
1604 int framesize = C->frame_size_in_bytes();
1605 int bangsize = C->bang_size_in_bytes();
1606 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1608 // Calls to C2R adapters often do not accept exceptional returns.
1609 // We require that their callers must bang for them. But be careful, because
1610 // some VM calls (such as call site linkage) can use several kilobytes of
1611 // stack. But the stack safety zone should account for that.
1612 // See bugs 4446381, 4468289, 4497237.
1613 if (C->need_stack_bang(bangsize)) {
1614 st->print_cr("# stack bang"); st->print("\t");
1615 }
1616 if (UseLoongsonISA) {
1617 st->print("gssq RA, FP, %d(SP) @ MachPrologNode\n\t", -wordSize*2);
1618 } else {
1619 st->print("sd RA, %d(SP) @ MachPrologNode\n\t", -wordSize);
1620 st->print("sd FP, %d(SP) @ MachPrologNode\n\t", -wordSize*2);
1621 }
1622 st->print("daddiu FP, SP, -%d \n\t", wordSize*2);
1623 st->print("daddiu SP, SP, -%d \t",framesize);
1624 }
1625 #endif
1628 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1629 Compile* C = ra_->C;
1630 MacroAssembler _masm(&cbuf);
1632 int framesize = C->frame_size_in_bytes();
1633 int bangsize = C->bang_size_in_bytes();
1635 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1637 if (C->need_stack_bang(bangsize)) {
1638 __ generate_stack_overflow_check(bangsize);
1639 }
1641 if (UseLoongsonISA) {
1642 __ gssq(RA, FP, SP, -wordSize*2);
1643 } else {
1644 __ sd(RA, SP, -wordSize);
1645 __ sd(FP, SP, -wordSize*2);
1646 }
1647 __ daddiu(FP, SP, -wordSize*2);
1648 __ daddiu(SP, SP, -framesize);
1649 __ nop(); /* 2013.10.22 Jin: Make enough room for patch_verified_entry() */
1650 __ nop();
1652 C->set_frame_complete(cbuf.insts_size());
1653 if (C->has_mach_constant_base_node()) {
1654 // NOTE: We set the table base offset here because users might be
1655 // emitted before MachConstantBaseNode.
1656 Compile::ConstantTable& constant_table = C->constant_table();
1657 constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
1658 }
1660 }
1663 uint MachPrologNode::size(PhaseRegAlloc *ra_) const {
1664 return MachNode::size(ra_); // too many variables; just compute it the hard way
1665 }
1667 int MachPrologNode::reloc() const {
1668 return 0; // a large enough number
1669 }
1671 %}
1673 //----------ENCODING BLOCK-----------------------------------------------------
1674 // This block specifies the encoding classes used by the compiler to output
1675 // byte streams. Encoding classes generate functions which are called by
1676 // Machine Instruction Nodes in order to generate the bit encoding of the
1677 // instruction. Operands specify their base encoding interface with the
1678 // interface keyword. There are currently supported four interfaces,
1679 // REG_INTER, CONST_INTER, MEMORY_INTER, & COND_INTER. REG_INTER causes an
1680 // operand to generate a function which returns its register number when
1681 // queried. CONST_INTER causes an operand to generate a function which
1682 // returns the value of the constant when queried. MEMORY_INTER causes an
1683 // operand to generate four functions which return the Base Register, the
1684 // Index Register, the Scale Value, and the Offset Value of the operand when
1685 // queried. COND_INTER causes an operand to generate six functions which
1686 // return the encoding code (ie - encoding bits for the instruction)
1687 // associated with each basic boolean condition for a conditional instruction.
1688 // Instructions specify two basic values for encoding. They use the
1689 // ins_encode keyword to specify their encoding class (which must be one of
1690 // the class names specified in the encoding block), and they use the
1691 // opcode keyword to specify, in order, their primary, secondary, and
1692 // tertiary opcode. Only the opcode sections which a particular instruction
1693 // needs for encoding need to be specified.
1694 encode %{
1696 //Load byte signed
1697 enc_class load_B_enc (mRegI dst, memory mem) %{
1698 MacroAssembler _masm(&cbuf);
1699 int dst = $dst$$reg;
1700 int base = $mem$$base;
1701 int index = $mem$$index;
1702 int scale = $mem$$scale;
1703 int disp = $mem$$disp;
1705 if( index != 0 ) {
1706 if( Assembler::is_simm16(disp) ) {
1707 if( UseLoongsonISA ) {
1708 if (scale == 0) {
1709 __ gslbx(as_Register(dst), as_Register(base), as_Register(index), disp);
1710 } else {
1711 __ dsll(AT, as_Register(index), scale);
1712 __ gslbx(as_Register(dst), as_Register(base), AT, disp);
1713 }
1714 } else {
1715 if (scale == 0) {
1716 __ addu(AT, as_Register(base), as_Register(index));
1717 } else {
1718 __ dsll(AT, as_Register(index), scale);
1719 __ addu(AT, as_Register(base), AT);
1720 }
1721 __ lb(as_Register(dst), AT, disp);
1722 }
1723 } else {
1724 if (scale == 0) {
1725 __ addu(AT, as_Register(base), as_Register(index));
1726 } else {
1727 __ dsll(AT, as_Register(index), scale);
1728 __ addu(AT, as_Register(base), AT);
1729 }
1730 __ move(T9, disp);
1731 if( UseLoongsonISA ) {
1732 __ gslbx(as_Register(dst), AT, T9, 0);
1733 } else {
1734 __ addu(AT, AT, T9);
1735 __ lb(as_Register(dst), AT, 0);
1736 }
1737 }
1738 } else {
1739 if( Assembler::is_simm16(disp) ) {
1740 __ lb(as_Register(dst), as_Register(base), disp);
1741 } else {
1742 __ move(T9, disp);
1743 if( UseLoongsonISA ) {
1744 __ gslbx(as_Register(dst), as_Register(base), T9, 0);
1745 } else {
1746 __ addu(AT, as_Register(base), T9);
1747 __ lb(as_Register(dst), AT, 0);
1748 }
1749 }
1750 }
1751 %}
1753 //Load byte unsigned
1754 enc_class load_UB_enc (mRegI dst, memory mem) %{
1755 MacroAssembler _masm(&cbuf);
1756 int dst = $dst$$reg;
1757 int base = $mem$$base;
1758 int index = $mem$$index;
1759 int scale = $mem$$scale;
1760 int disp = $mem$$disp;
1762 if( index != 0 ) {
1763 if (scale == 0) {
1764 __ daddu(AT, as_Register(base), as_Register(index));
1765 } else {
1766 __ dsll(AT, as_Register(index), scale);
1767 __ daddu(AT, as_Register(base), AT);
1768 }
1769 if( Assembler::is_simm16(disp) ) {
1770 __ lbu(as_Register(dst), AT, disp);
1771 } else {
1772 __ move(T9, disp);
1773 __ daddu(AT, AT, T9);
1774 __ lbu(as_Register(dst), AT, 0);
1775 }
1776 } else {
1777 if( Assembler::is_simm16(disp) ) {
1778 __ lbu(as_Register(dst), as_Register(base), disp);
1779 } else {
1780 __ move(T9, disp);
1781 __ daddu(AT, as_Register(base), T9);
1782 __ lbu(as_Register(dst), AT, 0);
1783 }
1784 }
1785 %}
1787 enc_class store_B_reg_enc (memory mem, mRegI src) %{
1788 MacroAssembler _masm(&cbuf);
1789 int src = $src$$reg;
1790 int base = $mem$$base;
1791 int index = $mem$$index;
1792 int scale = $mem$$scale;
1793 int disp = $mem$$disp;
1795 if( index != 0 ) {
1796 if (scale == 0) {
1797 if( Assembler::is_simm(disp, 8) ) {
1798 if (UseLoongsonISA) {
1799 __ gssbx(as_Register(src), as_Register(base), as_Register(index), disp);
1800 } else {
1801 __ addu(AT, as_Register(base), as_Register(index));
1802 __ sb(as_Register(src), AT, disp);
1803 }
1804 } else if( Assembler::is_simm16(disp) ) {
1805 __ addu(AT, as_Register(base), as_Register(index));
1806 __ sb(as_Register(src), AT, disp);
1807 } else {
1808 __ addu(AT, as_Register(base), as_Register(index));
1809 __ move(T9, disp);
1810 if (UseLoongsonISA) {
1811 __ gssbx(as_Register(src), AT, T9, 0);
1812 } else {
1813 __ addu(AT, AT, T9);
1814 __ sb(as_Register(src), AT, 0);
1815 }
1816 }
1817 } else {
1818 __ dsll(AT, as_Register(index), scale);
1819 if( Assembler::is_simm(disp, 8) ) {
1820 if (UseLoongsonISA) {
1821 __ gssbx(as_Register(src), AT, as_Register(base), disp);
1822 } else {
1823 __ addu(AT, as_Register(base), AT);
1824 __ sb(as_Register(src), AT, disp);
1825 }
1826 } else if( Assembler::is_simm16(disp) ) {
1827 __ addu(AT, as_Register(base), AT);
1828 __ sb(as_Register(src), AT, disp);
1829 } else {
1830 __ addu(AT, as_Register(base), AT);
1831 __ move(T9, disp);
1832 if (UseLoongsonISA) {
1833 __ gssbx(as_Register(src), AT, T9, 0);
1834 } else {
1835 __ addu(AT, AT, T9);
1836 __ sb(as_Register(src), AT, 0);
1837 }
1838 }
1839 }
1840 } else {
1841 if( Assembler::is_simm16(disp) ) {
1842 __ sb(as_Register(src), as_Register(base), disp);
1843 } else {
1844 __ move(T9, disp);
1845 if (UseLoongsonISA) {
1846 __ gssbx(as_Register(src), as_Register(base), T9, 0);
1847 } else {
1848 __ addu(AT, as_Register(base), T9);
1849 __ sb(as_Register(src), AT, 0);
1850 }
1851 }
1852 }
1853 %}
1855 enc_class store_B_immI_enc (memory mem, immI8 src) %{
1856 MacroAssembler _masm(&cbuf);
1857 int base = $mem$$base;
1858 int index = $mem$$index;
1859 int scale = $mem$$scale;
1860 int disp = $mem$$disp;
1861 int value = $src$$constant;
1863 if( index != 0 ) {
1864 if (!UseLoongsonISA) {
1865 if (scale == 0) {
1866 __ daddu(AT, as_Register(base), as_Register(index));
1867 } else {
1868 __ dsll(AT, as_Register(index), scale);
1869 __ daddu(AT, as_Register(base), AT);
1870 }
1871 if( Assembler::is_simm16(disp) ) {
1872 if (value == 0) {
1873 __ sb(R0, AT, disp);
1874 } else {
1875 __ move(T9, value);
1876 __ sb(T9, AT, disp);
1877 }
1878 } else {
1879 if (value == 0) {
1880 __ move(T9, disp);
1881 __ daddu(AT, AT, T9);
1882 __ sb(R0, AT, 0);
1883 } else {
1884 __ move(T9, disp);
1885 __ daddu(AT, AT, T9);
1886 __ move(T9, value);
1887 __ sb(T9, AT, 0);
1888 }
1889 }
1890 } else {
1892 if (scale == 0) {
1893 if( Assembler::is_simm(disp, 8) ) {
1894 if (value == 0) {
1895 __ gssbx(R0, as_Register(base), as_Register(index), disp);
1896 } else {
1897 __ move(T9, value);
1898 __ gssbx(T9, as_Register(base), as_Register(index), disp);
1899 }
1900 } else if( Assembler::is_simm16(disp) ) {
1901 __ daddu(AT, as_Register(base), as_Register(index));
1902 if (value == 0) {
1903 __ sb(R0, AT, disp);
1904 } else {
1905 __ move(T9, value);
1906 __ sb(T9, AT, disp);
1907 }
1908 } else {
1909 if (value == 0) {
1910 __ daddu(AT, as_Register(base), as_Register(index));
1911 __ move(T9, disp);
1912 __ gssbx(R0, AT, T9, 0);
1913 } else {
1914 __ move(AT, disp);
1915 __ move(T9, value);
1916 __ daddu(AT, as_Register(base), AT);
1917 __ gssbx(T9, AT, as_Register(index), 0);
1918 }
1919 }
1921 } else {
1923 if( Assembler::is_simm(disp, 8) ) {
1924 __ dsll(AT, as_Register(index), scale);
1925 if (value == 0) {
1926 __ gssbx(R0, as_Register(base), AT, disp);
1927 } else {
1928 __ move(T9, value);
1929 __ gssbx(T9, as_Register(base), AT, disp);
1930 }
1931 } else if( Assembler::is_simm16(disp) ) {
1932 __ dsll(AT, as_Register(index), scale);
1933 __ daddu(AT, as_Register(base), AT);
1934 if (value == 0) {
1935 __ sb(R0, AT, disp);
1936 } else {
1937 __ move(T9, value);
1938 __ sb(T9, AT, disp);
1939 }
1940 } else {
1941 __ dsll(AT, as_Register(index), scale);
1942 if (value == 0) {
1943 __ daddu(AT, as_Register(base), AT);
1944 __ move(T9, disp);
1945 __ gssbx(R0, AT, T9, 0);
1946 } else {
1947 __ move(T9, disp);
1948 __ daddu(AT, AT, T9);
1949 __ move(T9, value);
1950 __ gssbx(T9, as_Register(base), AT, 0);
1951 }
1952 }
1953 }
1954 }
1955 } else {
1956 if( Assembler::is_simm16(disp) ) {
1957 if (value == 0) {
1958 __ sb(R0, as_Register(base), disp);
1959 } else {
1960 __ move(AT, value);
1961 __ sb(AT, as_Register(base), disp);
1962 }
1963 } else {
1964 if (value == 0) {
1965 __ move(T9, disp);
1966 if (UseLoongsonISA) {
1967 __ gssbx(R0, as_Register(base), T9, 0);
1968 } else {
1969 __ daddu(AT, as_Register(base), T9);
1970 __ sb(R0, AT, 0);
1971 }
1972 } else {
1973 __ move(T9, disp);
1974 if (UseLoongsonISA) {
1975 __ move(AT, value);
1976 __ gssbx(AT, as_Register(base), T9, 0);
1977 } else {
1978 __ daddu(AT, as_Register(base), T9);
1979 __ move(T9, value);
1980 __ sb(T9, AT, 0);
1981 }
1982 }
1983 }
1984 }
1985 %}
1988 enc_class store_B_immI_enc_sync (memory mem, immI8 src) %{
1989 MacroAssembler _masm(&cbuf);
1990 int base = $mem$$base;
1991 int index = $mem$$index;
1992 int scale = $mem$$scale;
1993 int disp = $mem$$disp;
1994 int value = $src$$constant;
1996 if( index != 0 ) {
1997 if ( UseLoongsonISA ) {
1998 if ( Assembler::is_simm(disp,8) ) {
1999 if ( scale == 0 ) {
2000 if ( value == 0 ) {
2001 __ gssbx(R0, as_Register(base), as_Register(index), disp);
2002 } else {
2003 __ move(AT, value);
2004 __ gssbx(AT, as_Register(base), as_Register(index), disp);
2005 }
2006 } else {
2007 __ dsll(AT, as_Register(index), scale);
2008 if ( value == 0 ) {
2009 __ gssbx(R0, as_Register(base), AT, disp);
2010 } else {
2011 __ move(T9, value);
2012 __ gssbx(T9, as_Register(base), AT, disp);
2013 }
2014 }
2015 } else if ( Assembler::is_simm16(disp) ) {
2016 if ( scale == 0 ) {
2017 __ daddu(AT, as_Register(base), as_Register(index));
2018 if ( value == 0 ){
2019 __ sb(R0, AT, disp);
2020 } else {
2021 __ move(T9, value);
2022 __ sb(T9, AT, disp);
2023 }
2024 } else {
2025 __ dsll(AT, as_Register(index), scale);
2026 __ daddu(AT, as_Register(base), AT);
2027 if ( value == 0 ) {
2028 __ sb(R0, AT, disp);
2029 } else {
2030 __ move(T9, value);
2031 __ sb(T9, AT, disp);
2032 }
2033 }
2034 } else {
2035 if ( scale == 0 ) {
2036 __ move(AT, disp);
2037 __ daddu(AT, as_Register(index), AT);
2038 if ( value == 0 ) {
2039 __ gssbx(R0, as_Register(base), AT, 0);
2040 } else {
2041 __ move(T9, value);
2042 __ gssbx(T9, as_Register(base), AT, 0);
2043 }
2044 } else {
2045 __ dsll(AT, as_Register(index), scale);
2046 __ move(T9, disp);
2047 __ daddu(AT, AT, T9);
2048 if ( value == 0 ) {
2049 __ gssbx(R0, as_Register(base), AT, 0);
2050 } else {
2051 __ move(T9, value);
2052 __ gssbx(T9, as_Register(base), AT, 0);
2053 }
2054 }
2055 }
2056 } else { //not use loongson isa
2057 if (scale == 0) {
2058 __ daddu(AT, as_Register(base), as_Register(index));
2059 } else {
2060 __ dsll(AT, as_Register(index), scale);
2061 __ daddu(AT, as_Register(base), AT);
2062 }
2063 if( Assembler::is_simm16(disp) ) {
2064 if (value == 0) {
2065 __ sb(R0, AT, disp);
2066 } else {
2067 __ move(T9, value);
2068 __ sb(T9, AT, disp);
2069 }
2070 } else {
2071 if (value == 0) {
2072 __ move(T9, disp);
2073 __ daddu(AT, AT, T9);
2074 __ sb(R0, AT, 0);
2075 } else {
2076 __ move(T9, disp);
2077 __ daddu(AT, AT, T9);
2078 __ move(T9, value);
2079 __ sb(T9, AT, 0);
2080 }
2081 }
2082 }
2083 } else {
2084 if ( UseLoongsonISA ){
2085 if ( Assembler::is_simm16(disp) ){
2086 if ( value == 0 ) {
2087 __ sb(R0, as_Register(base), disp);
2088 } else {
2089 __ move(AT, value);
2090 __ sb(AT, as_Register(base), disp);
2091 }
2092 } else {
2093 __ move(AT, disp);
2094 if ( value == 0 ) {
2095 __ gssbx(R0, as_Register(base), AT, 0);
2096 } else {
2097 __ move(T9, value);
2098 __ gssbx(T9, as_Register(base), AT, 0);
2099 }
2100 }
2101 } else {
2102 if( Assembler::is_simm16(disp) ) {
2103 if (value == 0) {
2104 __ sb(R0, as_Register(base), disp);
2105 } else {
2106 __ move(AT, value);
2107 __ sb(AT, as_Register(base), disp);
2108 }
2109 } else {
2110 if (value == 0) {
2111 __ move(T9, disp);
2112 __ daddu(AT, as_Register(base), T9);
2113 __ sb(R0, AT, 0);
2114 } else {
2115 __ move(T9, disp);
2116 __ daddu(AT, as_Register(base), T9);
2117 __ move(T9, value);
2118 __ sb(T9, AT, 0);
2119 }
2120 }
2121 }
2122 }
2124 __ sync();
2125 %}
2127 // Load Short (16bit signed)
2128 enc_class load_S_enc (mRegI dst, memory mem) %{
2129 MacroAssembler _masm(&cbuf);
2130 int dst = $dst$$reg;
2131 int base = $mem$$base;
2132 int index = $mem$$index;
2133 int scale = $mem$$scale;
2134 int disp = $mem$$disp;
2136 if( index != 0 ) {
2137 if ( UseLoongsonISA ) {
2138 if ( Assembler::is_simm(disp, 8) ) {
2139 if (scale == 0) {
2140 __ gslhx(as_Register(dst), as_Register(base), as_Register(index), disp);
2141 } else {
2142 __ dsll(AT, as_Register(index), scale);
2143 __ gslhx(as_Register(dst), as_Register(base), AT, disp);
2144 }
2145 } else if ( Assembler::is_simm16(disp) ) {
2146 if (scale == 0) {
2147 __ daddu(AT, as_Register(base), as_Register(index));
2148 __ lh(as_Register(dst), AT, disp);
2149 } else {
2150 __ dsll(AT, as_Register(index), scale);
2151 __ daddu(AT, as_Register(base), AT);
2152 __ lh(as_Register(dst), AT, disp);
2153 }
2154 } else {
2155 if (scale == 0) {
2156 __ move(AT, disp);
2157 __ daddu(AT, as_Register(index), AT);
2158 __ gslhx(as_Register(dst), as_Register(base), AT, 0);
2159 } else {
2160 __ dsll(AT, as_Register(index), scale);
2161 __ move(T9, disp);
2162 __ daddu(AT, AT, T9);
2163 __ gslhx(as_Register(dst), as_Register(base), AT, 0);
2164 }
2165 }
2166 } else { // not use loongson isa
2167 if (scale == 0) {
2168 __ daddu(AT, as_Register(base), as_Register(index));
2169 } else {
2170 __ dsll(AT, as_Register(index), scale);
2171 __ daddu(AT, as_Register(base), AT);
2172 }
2173 if( Assembler::is_simm16(disp) ) {
2174 __ lh(as_Register(dst), AT, disp);
2175 } else {
2176 __ move(T9, disp);
2177 __ daddu(AT, AT, T9);
2178 __ lh(as_Register(dst), AT, 0);
2179 }
2180 }
2181 } else { // index is 0
2182 if ( UseLoongsonISA ) {
2183 if ( Assembler::is_simm16(disp) ) {
2184 __ lh(as_Register(dst), as_Register(base), disp);
2185 } else {
2186 __ move(T9, disp);
2187 __ gslhx(as_Register(dst), as_Register(base), T9, 0);
2188 }
2189 } else { //not use loongson isa
2190 if( Assembler::is_simm16(disp) ) {
2191 __ lh(as_Register(dst), as_Register(base), disp);
2192 } else {
2193 __ move(T9, disp);
2194 __ daddu(AT, as_Register(base), T9);
2195 __ lh(as_Register(dst), AT, 0);
2196 }
2197 }
2198 }
2199 %}
2201 // Load Char (16bit unsigned)
2202 enc_class load_C_enc (mRegI dst, memory mem) %{
2203 MacroAssembler _masm(&cbuf);
2204 int dst = $dst$$reg;
2205 int base = $mem$$base;
2206 int index = $mem$$index;
2207 int scale = $mem$$scale;
2208 int disp = $mem$$disp;
2210 if( index != 0 ) {
2211 if (scale == 0) {
2212 __ daddu(AT, as_Register(base), as_Register(index));
2213 } else {
2214 __ dsll(AT, as_Register(index), scale);
2215 __ daddu(AT, as_Register(base), AT);
2216 }
2217 if( Assembler::is_simm16(disp) ) {
2218 __ lhu(as_Register(dst), AT, disp);
2219 } else {
2220 __ move(T9, disp);
2221 __ addu(AT, AT, T9);
2222 __ lhu(as_Register(dst), AT, 0);
2223 }
2224 } else {
2225 if( Assembler::is_simm16(disp) ) {
2226 __ lhu(as_Register(dst), as_Register(base), disp);
2227 } else {
2228 __ move(T9, disp);
2229 __ daddu(AT, as_Register(base), T9);
2230 __ lhu(as_Register(dst), AT, 0);
2231 }
2232 }
2233 %}
2235 // Store Char (16bit unsigned)
2236 enc_class store_C_reg_enc (memory mem, mRegI src) %{
2237 MacroAssembler _masm(&cbuf);
2238 int src = $src$$reg;
2239 int base = $mem$$base;
2240 int index = $mem$$index;
2241 int scale = $mem$$scale;
2242 int disp = $mem$$disp;
2244 if( index != 0 ) {
2245 if( Assembler::is_simm16(disp) ) {
2246 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2247 if (scale == 0) {
2248 __ gsshx(as_Register(src), as_Register(base), as_Register(index), disp);
2249 } else {
2250 __ dsll(AT, as_Register(index), scale);
2251 __ gsshx(as_Register(src), as_Register(base), AT, disp);
2252 }
2253 } else {
2254 if (scale == 0) {
2255 __ addu(AT, as_Register(base), as_Register(index));
2256 } else {
2257 __ dsll(AT, as_Register(index), scale);
2258 __ addu(AT, as_Register(base), AT);
2259 }
2260 __ sh(as_Register(src), AT, disp);
2261 }
2262 } else {
2263 if (scale == 0) {
2264 __ addu(AT, as_Register(base), as_Register(index));
2265 } else {
2266 __ dsll(AT, as_Register(index), scale);
2267 __ addu(AT, as_Register(base), AT);
2268 }
2269 __ move(T9, disp);
2270 if( UseLoongsonISA ) {
2271 __ gsshx(as_Register(src), AT, T9, 0);
2272 } else {
2273 __ addu(AT, AT, T9);
2274 __ sh(as_Register(src), AT, 0);
2275 }
2276 }
2277 } else {
2278 if( Assembler::is_simm16(disp) ) {
2279 __ sh(as_Register(src), as_Register(base), disp);
2280 } else {
2281 __ move(T9, disp);
2282 if( UseLoongsonISA ) {
2283 __ gsshx(as_Register(src), as_Register(base), T9, 0);
2284 } else {
2285 __ addu(AT, as_Register(base), T9);
2286 __ sh(as_Register(src), AT, 0);
2287 }
2288 }
2289 }
2290 %}
2292 enc_class store_C0_enc (memory mem) %{
2293 MacroAssembler _masm(&cbuf);
2294 int base = $mem$$base;
2295 int index = $mem$$index;
2296 int scale = $mem$$scale;
2297 int disp = $mem$$disp;
2299 if( index != 0 ) {
2300 if( Assembler::is_simm16(disp) ) {
2301 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2302 if (scale == 0) {
2303 __ gsshx(R0, as_Register(base), as_Register(index), disp);
2304 } else {
2305 __ dsll(AT, as_Register(index), scale);
2306 __ gsshx(R0, as_Register(base), AT, disp);
2307 }
2308 } else {
2309 if (scale == 0) {
2310 __ addu(AT, as_Register(base), as_Register(index));
2311 } else {
2312 __ dsll(AT, as_Register(index), scale);
2313 __ addu(AT, as_Register(base), AT);
2314 }
2315 __ sh(R0, AT, disp);
2316 }
2317 } else {
2318 if (scale == 0) {
2319 __ addu(AT, as_Register(base), as_Register(index));
2320 } else {
2321 __ dsll(AT, as_Register(index), scale);
2322 __ addu(AT, as_Register(base), AT);
2323 }
2324 __ move(T9, disp);
2325 if( UseLoongsonISA ) {
2326 __ gsshx(R0, AT, T9, 0);
2327 } else {
2328 __ addu(AT, AT, T9);
2329 __ sh(R0, AT, 0);
2330 }
2331 }
2332 } else {
2333 if( Assembler::is_simm16(disp) ) {
2334 __ sh(R0, as_Register(base), disp);
2335 } else {
2336 __ move(T9, disp);
2337 if( UseLoongsonISA ) {
2338 __ gsshx(R0, as_Register(base), T9, 0);
2339 } else {
2340 __ addu(AT, as_Register(base), T9);
2341 __ sh(R0, AT, 0);
2342 }
2343 }
2344 }
2345 %}
2347 enc_class load_I_enc (mRegI dst, memory mem) %{
2348 MacroAssembler _masm(&cbuf);
2349 int dst = $dst$$reg;
2350 int base = $mem$$base;
2351 int index = $mem$$index;
2352 int scale = $mem$$scale;
2353 int disp = $mem$$disp;
2355 if( index != 0 ) {
2356 if( Assembler::is_simm16(disp) ) {
2357 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2358 if (scale == 0) {
2359 __ gslwx(as_Register(dst), as_Register(base), as_Register(index), disp);
2360 } else {
2361 __ dsll(AT, as_Register(index), scale);
2362 __ gslwx(as_Register(dst), as_Register(base), AT, disp);
2363 }
2364 } else {
2365 if (scale == 0) {
2366 __ addu(AT, as_Register(base), as_Register(index));
2367 } else {
2368 __ dsll(AT, as_Register(index), scale);
2369 __ addu(AT, as_Register(base), AT);
2370 }
2371 __ lw(as_Register(dst), AT, disp);
2372 }
2373 } else {
2374 if (scale == 0) {
2375 __ addu(AT, as_Register(base), as_Register(index));
2376 } else {
2377 __ dsll(AT, as_Register(index), scale);
2378 __ addu(AT, as_Register(base), AT);
2379 }
2380 __ move(T9, disp);
2381 if( UseLoongsonISA ) {
2382 __ gslwx(as_Register(dst), AT, T9, 0);
2383 } else {
2384 __ addu(AT, AT, T9);
2385 __ lw(as_Register(dst), AT, 0);
2386 }
2387 }
2388 } else {
2389 if( Assembler::is_simm16(disp) ) {
2390 __ lw(as_Register(dst), as_Register(base), disp);
2391 } else {
2392 __ move(T9, disp);
2393 if( UseLoongsonISA ) {
2394 __ gslwx(as_Register(dst), as_Register(base), T9, 0);
2395 } else {
2396 __ addu(AT, as_Register(base), T9);
2397 __ lw(as_Register(dst), AT, 0);
2398 }
2399 }
2400 }
2401 %}
2403 enc_class store_I_reg_enc (memory mem, mRegI src) %{
2404 MacroAssembler _masm(&cbuf);
2405 int src = $src$$reg;
2406 int base = $mem$$base;
2407 int index = $mem$$index;
2408 int scale = $mem$$scale;
2409 int disp = $mem$$disp;
2411 if( index != 0 ) {
2412 if( Assembler::is_simm16(disp) ) {
2413 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2414 if (scale == 0) {
2415 __ gsswx(as_Register(src), as_Register(base), as_Register(index), disp);
2416 } else {
2417 __ dsll(AT, as_Register(index), scale);
2418 __ gsswx(as_Register(src), as_Register(base), AT, disp);
2419 }
2420 } else {
2421 if (scale == 0) {
2422 __ addu(AT, as_Register(base), as_Register(index));
2423 } else {
2424 __ dsll(AT, as_Register(index), scale);
2425 __ addu(AT, as_Register(base), AT);
2426 }
2427 __ sw(as_Register(src), AT, disp);
2428 }
2429 } else {
2430 if (scale == 0) {
2431 __ addu(AT, as_Register(base), as_Register(index));
2432 } else {
2433 __ dsll(AT, as_Register(index), scale);
2434 __ addu(AT, as_Register(base), AT);
2435 }
2436 __ move(T9, disp);
2437 if( UseLoongsonISA ) {
2438 __ gsswx(as_Register(src), AT, T9, 0);
2439 } else {
2440 __ addu(AT, AT, T9);
2441 __ sw(as_Register(src), AT, 0);
2442 }
2443 }
2444 } else {
2445 if( Assembler::is_simm16(disp) ) {
2446 __ sw(as_Register(src), as_Register(base), disp);
2447 } else {
2448 __ move(T9, disp);
2449 if( UseLoongsonISA ) {
2450 __ gsswx(as_Register(src), as_Register(base), T9, 0);
2451 } else {
2452 __ addu(AT, as_Register(base), T9);
2453 __ sw(as_Register(src), AT, 0);
2454 }
2455 }
2456 }
2457 %}
2459 enc_class store_I_immI_enc (memory mem, immI src) %{
2460 MacroAssembler _masm(&cbuf);
2461 int base = $mem$$base;
2462 int index = $mem$$index;
2463 int scale = $mem$$scale;
2464 int disp = $mem$$disp;
2465 int value = $src$$constant;
2467 if( index != 0 ) {
2468 if ( UseLoongsonISA ) {
2469 if ( Assembler::is_simm(disp, 8) ) {
2470 if ( scale == 0 ) {
2471 if ( value == 0 ) {
2472 __ gsswx(R0, as_Register(base), as_Register(index), disp);
2473 } else {
2474 __ move(T9, value);
2475 __ gsswx(T9, as_Register(base), as_Register(index), disp);
2476 }
2477 } else {
2478 __ dsll(AT, as_Register(index), scale);
2479 if ( value == 0 ) {
2480 __ gsswx(R0, as_Register(base), AT, disp);
2481 } else {
2482 __ move(T9, value);
2483 __ gsswx(T9, as_Register(base), AT, disp);
2484 }
2485 }
2486 } else if ( Assembler::is_simm16(disp) ) {
2487 if ( scale == 0 ) {
2488 __ daddu(AT, as_Register(base), as_Register(index));
2489 if ( value == 0 ) {
2490 __ sw(R0, AT, disp);
2491 } else {
2492 __ move(T9, value);
2493 __ sw(T9, AT, disp);
2494 }
2495 } else {
2496 __ dsll(AT, as_Register(index), scale);
2497 __ daddu(AT, as_Register(base), AT);
2498 if ( value == 0 ) {
2499 __ sw(R0, AT, disp);
2500 } else {
2501 __ move(T9, value);
2502 __ sw(T9, AT, disp);
2503 }
2504 }
2505 } else {
2506 if ( scale == 0 ) {
2507 __ move(T9, disp);
2508 __ daddu(AT, as_Register(index), T9);
2509 if ( value ==0 ) {
2510 __ gsswx(R0, as_Register(base), AT, 0);
2511 } else {
2512 __ move(T9, value);
2513 __ gsswx(T9, as_Register(base), AT, 0);
2514 }
2515 } else {
2516 __ dsll(AT, as_Register(index), scale);
2517 __ move(T9, disp);
2518 __ daddu(AT, AT, T9);
2519 if ( value == 0 ) {
2520 __ gsswx(R0, as_Register(base), AT, 0);
2521 } else {
2522 __ move(T9, value);
2523 __ gsswx(T9, as_Register(base), AT, 0);
2524 }
2525 }
2526 }
2527 } else { //not use loongson isa
2528 if (scale == 0) {
2529 __ daddu(AT, as_Register(base), as_Register(index));
2530 } else {
2531 __ dsll(AT, as_Register(index), scale);
2532 __ daddu(AT, as_Register(base), AT);
2533 }
2534 if( Assembler::is_simm16(disp) ) {
2535 if (value == 0) {
2536 __ sw(R0, AT, disp);
2537 } else {
2538 __ move(T9, value);
2539 __ sw(T9, AT, disp);
2540 }
2541 } else {
2542 if (value == 0) {
2543 __ move(T9, disp);
2544 __ daddu(AT, AT, T9);
2545 __ sw(R0, AT, 0);
2546 } else {
2547 __ move(T9, disp);
2548 __ daddu(AT, AT, T9);
2549 __ move(T9, value);
2550 __ sw(T9, AT, 0);
2551 }
2552 }
2553 }
2554 } else {
2555 if ( UseLoongsonISA ) {
2556 if ( Assembler::is_simm16(disp) ) {
2557 if ( value == 0 ) {
2558 __ sw(R0, as_Register(base), disp);
2559 } else {
2560 __ move(AT, value);
2561 __ sw(AT, as_Register(base), disp);
2562 }
2563 } else {
2564 __ move(T9, disp);
2565 if ( value == 0 ) {
2566 __ gsswx(R0, as_Register(base), T9, 0);
2567 } else {
2568 __ move(AT, value);
2569 __ gsswx(AT, as_Register(base), T9, 0);
2570 }
2571 }
2572 } else {
2573 if( Assembler::is_simm16(disp) ) {
2574 if (value == 0) {
2575 __ sw(R0, as_Register(base), disp);
2576 } else {
2577 __ move(AT, value);
2578 __ sw(AT, as_Register(base), disp);
2579 }
2580 } else {
2581 if (value == 0) {
2582 __ move(T9, disp);
2583 __ daddu(AT, as_Register(base), T9);
2584 __ sw(R0, AT, 0);
2585 } else {
2586 __ move(T9, disp);
2587 __ daddu(AT, as_Register(base), T9);
2588 __ move(T9, value);
2589 __ sw(T9, AT, 0);
2590 }
2591 }
2592 }
2593 }
2594 %}
2596 enc_class load_N_enc (mRegN dst, memory mem) %{
2597 MacroAssembler _masm(&cbuf);
2598 int dst = $dst$$reg;
2599 int base = $mem$$base;
2600 int index = $mem$$index;
2601 int scale = $mem$$scale;
2602 int disp = $mem$$disp;
2603 relocInfo::relocType disp_reloc = $mem->disp_reloc();
2604 assert(disp_reloc == relocInfo::none, "cannot have disp");
2606 if( index != 0 ) {
2607 if (scale == 0) {
2608 __ daddu(AT, as_Register(base), as_Register(index));
2609 } else {
2610 __ dsll(AT, as_Register(index), scale);
2611 __ daddu(AT, as_Register(base), AT);
2612 }
2613 if( Assembler::is_simm16(disp) ) {
2614 __ lwu(as_Register(dst), AT, disp);
2615 } else {
2616 __ set64(T9, disp);
2617 __ daddu(AT, AT, T9);
2618 __ lwu(as_Register(dst), AT, 0);
2619 }
2620 } else {
2621 if( Assembler::is_simm16(disp) ) {
2622 __ lwu(as_Register(dst), as_Register(base), disp);
2623 } else {
2624 __ set64(T9, disp);
2625 __ daddu(AT, as_Register(base), T9);
2626 __ lwu(as_Register(dst), AT, 0);
2627 }
2628 }
2630 %}
2633 enc_class load_P_enc (mRegP dst, memory mem) %{
2634 MacroAssembler _masm(&cbuf);
2635 int dst = $dst$$reg;
2636 int base = $mem$$base;
2637 int index = $mem$$index;
2638 int scale = $mem$$scale;
2639 int disp = $mem$$disp;
2640 relocInfo::relocType disp_reloc = $mem->disp_reloc();
2641 assert(disp_reloc == relocInfo::none, "cannot have disp");
2643 if( index != 0 ) {
2644 if ( UseLoongsonISA ) {
2645 if ( Assembler::is_simm(disp, 8) ) {
2646 if ( scale != 0 ) {
2647 __ dsll(AT, as_Register(index), scale);
2648 __ gsldx(as_Register(dst), as_Register(base), AT, disp);
2649 } else {
2650 __ gsldx(as_Register(dst), as_Register(base), as_Register(index), disp);
2651 }
2652 } else if ( Assembler::is_simm16(disp) ){
2653 if ( scale != 0 ) {
2654 __ dsll(AT, as_Register(index), scale);
2655 __ daddu(AT, AT, as_Register(base));
2656 } else {
2657 __ daddu(AT, as_Register(index), as_Register(base));
2658 }
2659 __ ld(as_Register(dst), AT, disp);
2660 } else {
2661 if ( scale != 0 ) {
2662 __ dsll(AT, as_Register(index), scale);
2663 __ move(T9, disp);
2664 __ daddu(AT, AT, T9);
2665 } else {
2666 __ move(T9, disp);
2667 __ daddu(AT, as_Register(index), T9);
2668 }
2669 __ gsldx(as_Register(dst), as_Register(base), AT, 0);
2670 }
2671 } else { //not use loongson isa
2672 if (scale == 0) {
2673 __ daddu(AT, as_Register(base), as_Register(index));
2674 } else {
2675 __ dsll(AT, as_Register(index), scale);
2676 __ daddu(AT, as_Register(base), AT);
2677 }
2678 if( Assembler::is_simm16(disp) ) {
2679 __ ld(as_Register(dst), AT, disp);
2680 } else {
2681 __ set64(T9, disp);
2682 __ daddu(AT, AT, T9);
2683 __ ld(as_Register(dst), AT, 0);
2684 }
2685 }
2686 } else {
2687 if ( UseLoongsonISA ) {
2688 if ( Assembler::is_simm16(disp) ){
2689 __ ld(as_Register(dst), as_Register(base), disp);
2690 } else {
2691 __ set64(T9, disp);
2692 __ gsldx(as_Register(dst), as_Register(base), T9, 0);
2693 }
2694 } else { //not use loongson isa
2695 if( Assembler::is_simm16(disp) ) {
2696 __ ld(as_Register(dst), as_Register(base), disp);
2697 } else {
2698 __ set64(T9, disp);
2699 __ daddu(AT, as_Register(base), T9);
2700 __ ld(as_Register(dst), AT, 0);
2701 }
2702 }
2703 }
2704 // if( disp_reloc != relocInfo::none) __ ld(as_Register(dst), as_Register(dst), 0);
2705 %}
2707 enc_class store_P_reg_enc (memory mem, mRegP src) %{
2708 MacroAssembler _masm(&cbuf);
2709 int src = $src$$reg;
2710 int base = $mem$$base;
2711 int index = $mem$$index;
2712 int scale = $mem$$scale;
2713 int disp = $mem$$disp;
2715 if( index != 0 ) {
2716 if ( UseLoongsonISA ){
2717 if ( Assembler::is_simm(disp, 8) ) {
2718 if ( scale == 0 ) {
2719 __ gssdx(as_Register(src), as_Register(base), as_Register(index), disp);
2720 } else {
2721 __ dsll(AT, as_Register(index), scale);
2722 __ gssdx(as_Register(src), as_Register(base), AT, disp);
2723 }
2724 } else if ( Assembler::is_simm16(disp) ) {
2725 if ( scale == 0 ) {
2726 __ daddu(AT, as_Register(base), as_Register(index));
2727 } else {
2728 __ dsll(AT, as_Register(index), scale);
2729 __ daddu(AT, as_Register(base), AT);
2730 }
2731 __ sd(as_Register(src), AT, disp);
2732 } else {
2733 if ( scale == 0 ) {
2734 __ move(T9, disp);
2735 __ daddu(AT, as_Register(index), T9);
2736 } else {
2737 __ dsll(AT, as_Register(index), scale);
2738 __ move(T9, disp);
2739 __ daddu(AT, AT, T9);
2740 }
2741 __ gssdx(as_Register(src), as_Register(base), AT, 0);
2742 }
2743 } else { //not use loongson isa
2744 if (scale == 0) {
2745 __ daddu(AT, as_Register(base), as_Register(index));
2746 } else {
2747 __ dsll(AT, as_Register(index), scale);
2748 __ daddu(AT, as_Register(base), AT);
2749 }
2750 if( Assembler::is_simm16(disp) ) {
2751 __ sd(as_Register(src), AT, disp);
2752 } else {
2753 __ move(T9, disp);
2754 __ daddu(AT, AT, T9);
2755 __ sd(as_Register(src), AT, 0);
2756 }
2757 }
2758 } else {
2759 if ( UseLoongsonISA ) {
2760 if ( Assembler::is_simm16(disp) ) {
2761 __ sd(as_Register(src), as_Register(base), disp);
2762 } else {
2763 __ move(T9, disp);
2764 __ gssdx(as_Register(src), as_Register(base), T9, 0);
2765 }
2766 } else {
2767 if( Assembler::is_simm16(disp) ) {
2768 __ sd(as_Register(src), as_Register(base), disp);
2769 } else {
2770 __ move(T9, disp);
2771 __ daddu(AT, as_Register(base), T9);
2772 __ sd(as_Register(src), AT, 0);
2773 }
2774 }
2775 }
2776 %}
2778 enc_class store_N_reg_enc (memory mem, mRegN src) %{
2779 MacroAssembler _masm(&cbuf);
2780 int src = $src$$reg;
2781 int base = $mem$$base;
2782 int index = $mem$$index;
2783 int scale = $mem$$scale;
2784 int disp = $mem$$disp;
2786 if( index != 0 ) {
2787 if ( UseLoongsonISA ){
2788 if ( Assembler::is_simm(disp, 8) ) {
2789 if ( scale == 0 ) {
2790 __ gsswx(as_Register(src), as_Register(base), as_Register(index), disp);
2791 } else {
2792 __ dsll(AT, as_Register(index), scale);
2793 __ gsswx(as_Register(src), as_Register(base), AT, disp);
2794 }
2795 } else if ( Assembler::is_simm16(disp) ) {
2796 if ( scale == 0 ) {
2797 __ daddu(AT, as_Register(base), as_Register(index));
2798 } else {
2799 __ dsll(AT, as_Register(index), scale);
2800 __ daddu(AT, as_Register(base), AT);
2801 }
2802 __ sw(as_Register(src), AT, disp);
2803 } else {
2804 if ( scale == 0 ) {
2805 __ move(T9, disp);
2806 __ daddu(AT, as_Register(index), T9);
2807 } else {
2808 __ dsll(AT, as_Register(index), scale);
2809 __ move(T9, disp);
2810 __ daddu(AT, AT, T9);
2811 }
2812 __ gsswx(as_Register(src), as_Register(base), AT, 0);
2813 }
2814 } else { //not use loongson isa
2815 if (scale == 0) {
2816 __ daddu(AT, as_Register(base), as_Register(index));
2817 } else {
2818 __ dsll(AT, as_Register(index), scale);
2819 __ daddu(AT, as_Register(base), AT);
2820 }
2821 if( Assembler::is_simm16(disp) ) {
2822 __ sw(as_Register(src), AT, disp);
2823 } else {
2824 __ move(T9, disp);
2825 __ daddu(AT, AT, T9);
2826 __ sw(as_Register(src), AT, 0);
2827 }
2828 }
2829 } else {
2830 if ( UseLoongsonISA ) {
2831 if ( Assembler::is_simm16(disp) ) {
2832 __ sw(as_Register(src), as_Register(base), disp);
2833 } else {
2834 __ move(T9, disp);
2835 __ gsswx(as_Register(src), as_Register(base), T9, 0);
2836 }
2837 } else {
2838 if( Assembler::is_simm16(disp) ) {
2839 __ sw(as_Register(src), as_Register(base), disp);
2840 } else {
2841 __ move(T9, disp);
2842 __ daddu(AT, as_Register(base), T9);
2843 __ sw(as_Register(src), AT, 0);
2844 }
2845 }
2846 }
2847 %}
2849 enc_class store_P_immP0_enc (memory mem) %{
2850 MacroAssembler _masm(&cbuf);
2851 int base = $mem$$base;
2852 int index = $mem$$index;
2853 int scale = $mem$$scale;
2854 int disp = $mem$$disp;
2856 if( index != 0 ) {
2857 if (scale == 0) {
2858 if( Assembler::is_simm16(disp) ) {
2859 if (UseLoongsonISA && Assembler::is_simm(disp, 8)) {
2860 __ gssdx(R0, as_Register(base), as_Register(index), disp);
2861 } else {
2862 __ daddu(AT, as_Register(base), as_Register(index));
2863 __ sd(R0, AT, disp);
2864 }
2865 } else {
2866 __ daddu(AT, as_Register(base), as_Register(index));
2867 __ move(T9, disp);
2868 if(UseLoongsonISA) {
2869 __ gssdx(R0, AT, T9, 0);
2870 } else {
2871 __ daddu(AT, AT, T9);
2872 __ sd(R0, AT, 0);
2873 }
2874 }
2875 } else {
2876 __ dsll(AT, as_Register(index), scale);
2877 if( Assembler::is_simm16(disp) ) {
2878 if (UseLoongsonISA && Assembler::is_simm(disp, 8)) {
2879 __ gssdx(R0, as_Register(base), AT, disp);
2880 } else {
2881 __ daddu(AT, as_Register(base), AT);
2882 __ sd(R0, AT, disp);
2883 }
2884 } else {
2885 __ daddu(AT, as_Register(base), AT);
2886 __ move(T9, disp);
2887 if (UseLoongsonISA) {
2888 __ gssdx(R0, AT, T9, 0);
2889 } else {
2890 __ daddu(AT, AT, T9);
2891 __ sd(R0, AT, 0);
2892 }
2893 }
2894 }
2895 } else {
2896 if( Assembler::is_simm16(disp) ) {
2897 __ sd(R0, as_Register(base), disp);
2898 } else {
2899 __ move(T9, disp);
2900 if (UseLoongsonISA) {
2901 __ gssdx(R0, as_Register(base), T9, 0);
2902 } else {
2903 __ daddu(AT, as_Register(base), T9);
2904 __ sd(R0, AT, 0);
2905 }
2906 }
2907 }
2908 %}
2910 enc_class store_P_immP_enc (memory mem, immP31 src) %{
2911 MacroAssembler _masm(&cbuf);
2912 int base = $mem$$base;
2913 int index = $mem$$index;
2914 int scale = $mem$$scale;
2915 int disp = $mem$$disp;
2916 long value = $src$$constant;
2918 if( index != 0 ) {
2919 if (scale == 0) {
2920 __ daddu(AT, as_Register(base), as_Register(index));
2921 } else {
2922 __ dsll(AT, as_Register(index), scale);
2923 __ daddu(AT, as_Register(base), AT);
2924 }
2925 if( Assembler::is_simm16(disp) ) {
2926 if (value == 0) {
2927 __ sd(R0, AT, disp);
2928 } else {
2929 __ move(T9, value);
2930 __ sd(T9, AT, disp);
2931 }
2932 } else {
2933 if (value == 0) {
2934 __ move(T9, disp);
2935 __ daddu(AT, AT, T9);
2936 __ sd(R0, AT, 0);
2937 } else {
2938 __ move(T9, disp);
2939 __ daddu(AT, AT, T9);
2940 __ move(T9, value);
2941 __ sd(T9, AT, 0);
2942 }
2943 }
2944 } else {
2945 if( Assembler::is_simm16(disp) ) {
2946 if (value == 0) {
2947 __ sd(R0, as_Register(base), disp);
2948 } else {
2949 __ move(AT, value);
2950 __ sd(AT, as_Register(base), disp);
2951 }
2952 } else {
2953 if (value == 0) {
2954 __ move(T9, disp);
2955 __ daddu(AT, as_Register(base), T9);
2956 __ sd(R0, AT, 0);
2957 } else {
2958 __ move(T9, disp);
2959 __ daddu(AT, as_Register(base), T9);
2960 __ move(T9, value);
2961 __ sd(T9, AT, 0);
2962 }
2963 }
2964 }
2965 %}
2967 enc_class storeImmN0_enc(memory mem, ImmN0 src) %{
2968 MacroAssembler _masm(&cbuf);
2969 int base = $mem$$base;
2970 int index = $mem$$index;
2971 int scale = $mem$$scale;
2972 int disp = $mem$$disp;
2974 if(index!=0){
2975 if (scale == 0) {
2976 __ daddu(AT, as_Register(base), as_Register(index));
2977 } else {
2978 __ dsll(AT, as_Register(index), scale);
2979 __ daddu(AT, as_Register(base), AT);
2980 }
2982 if( Assembler::is_simm16(disp) ) {
2983 __ sw(R0, AT, disp);
2984 } else {
2985 __ move(T9, disp);
2986 __ daddu(AT, AT, T9);
2987 __ sw(R0, AT, 0);
2988 }
2989 }
2990 else {
2991 if( Assembler::is_simm16(disp) ) {
2992 __ sw(R0, as_Register(base), disp);
2993 } else {
2994 __ move(T9, disp);
2995 __ daddu(AT, as_Register(base), T9);
2996 __ sw(R0, AT, 0);
2997 }
2998 }
2999 %}
3001 enc_class storeImmN_enc (memory mem, immN src) %{
3002 MacroAssembler _masm(&cbuf);
3003 int base = $mem$$base;
3004 int index = $mem$$index;
3005 int scale = $mem$$scale;
3006 int disp = $mem$$disp;
3007 long * value = (long *)$src$$constant;
3009 if (value == NULL) {
3010 guarantee(Assembler::is_simm16(disp), "FIXME: disp is not simm16!");
3011 if (index == 0) {
3012 __ sw(R0, as_Register(base), disp);
3013 } else {
3014 if (scale == 0) {
3015 __ daddu(AT, as_Register(base), as_Register(index));
3016 } else {
3017 __ dsll(AT, as_Register(index), scale);
3018 __ daddu(AT, as_Register(base), AT);
3019 }
3020 __ sw(R0, AT, disp);
3021 }
3023 return;
3024 }
3026 int oop_index = __ oop_recorder()->find_index((jobject)value);
3027 RelocationHolder rspec = oop_Relocation::spec(oop_index);
3029 if (index != 0) {
3030 if (scale == 0) {
3031 __ daddu(AT, as_Register(base), as_Register(index));
3032 } else {
3033 __ dsll(AT, as_Register(index), scale);
3034 __ daddu(AT, as_Register(base), AT);
3035 }
3036 if( Assembler::is_simm16(disp) ) {
3037 if(rspec.type() != relocInfo::none) {
3038 __ relocate(rspec, Assembler::narrow_oop_operand);
3039 __ patchable_set48(T9, oop_index);
3040 } else {
3041 __ set64(T9, oop_index);
3042 }
3043 __ sw(T9, AT, disp);
3044 } else {
3045 __ move(T9, disp);
3046 __ addu(AT, AT, T9);
3048 if(rspec.type() != relocInfo::none) {
3049 __ relocate(rspec, Assembler::narrow_oop_operand);
3050 __ patchable_set48(T9, oop_index);
3051 } else {
3052 __ set64(T9, oop_index);
3053 }
3054 __ sw(T9, AT, 0);
3055 }
3056 }
3057 else {
3058 if( Assembler::is_simm16(disp) ) {
3059 if($src->constant_reloc() != relocInfo::none) {
3060 __ relocate(rspec, Assembler::narrow_oop_operand);
3061 __ patchable_set48(T9, oop_index);
3062 } else {
3063 __ set64(T9, oop_index);
3064 }
3065 __ sw(T9, as_Register(base), disp);
3066 } else {
3067 __ move(T9, disp);
3068 __ daddu(AT, as_Register(base), T9);
3070 if($src->constant_reloc() != relocInfo::none){
3071 __ relocate(rspec, Assembler::narrow_oop_operand);
3072 __ patchable_set48(T9, oop_index);
3073 } else {
3074 __ set64(T9, oop_index);
3075 }
3076 __ sw(T9, AT, 0);
3077 }
3078 }
3079 %}
3081 enc_class storeImmNKlass_enc (memory mem, immNKlass src) %{
3082 MacroAssembler _masm(&cbuf);
3084 assert (UseCompressedOops, "should only be used for compressed headers");
3085 assert (__ oop_recorder() != NULL, "this assembler needs an OopRecorder");
3087 int base = $mem$$base;
3088 int index = $mem$$index;
3089 int scale = $mem$$scale;
3090 int disp = $mem$$disp;
3091 long value = $src$$constant;
3093 int klass_index = __ oop_recorder()->find_index((Klass*)value);
3094 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
3095 long narrowp = Klass::encode_klass((Klass*)value);
3097 if(index!=0){
3098 if (scale == 0) {
3099 __ daddu(AT, as_Register(base), as_Register(index));
3100 } else {
3101 __ dsll(AT, as_Register(index), scale);
3102 __ daddu(AT, as_Register(base), AT);
3103 }
3105 if( Assembler::is_simm16(disp) ) {
3106 if(rspec.type() != relocInfo::none){
3107 __ relocate(rspec, Assembler::narrow_oop_operand);
3108 __ patchable_set48(T9, narrowp);
3109 } else {
3110 __ set64(T9, narrowp);
3111 }
3112 __ sw(T9, AT, disp);
3113 } else {
3114 __ move(T9, disp);
3115 __ daddu(AT, AT, T9);
3117 if(rspec.type() != relocInfo::none){
3118 __ relocate(rspec, Assembler::narrow_oop_operand);
3119 __ patchable_set48(T9, narrowp);
3120 } else {
3121 __ set64(T9, narrowp);
3122 }
3124 __ sw(T9, AT, 0);
3125 }
3126 } else {
3127 if( Assembler::is_simm16(disp) ) {
3128 if(rspec.type() != relocInfo::none){
3129 __ relocate(rspec, Assembler::narrow_oop_operand);
3130 __ patchable_set48(T9, narrowp);
3131 }
3132 else {
3133 __ set64(T9, narrowp);
3134 }
3135 __ sw(T9, as_Register(base), disp);
3136 } else {
3137 __ move(T9, disp);
3138 __ daddu(AT, as_Register(base), T9);
3140 if(rspec.type() != relocInfo::none){
3141 __ relocate(rspec, Assembler::narrow_oop_operand);
3142 __ patchable_set48(T9, narrowp);
3143 } else {
3144 __ set64(T9, narrowp);
3145 }
3146 __ sw(T9, AT, 0);
3147 }
3148 }
3149 %}
3151 enc_class load_L_enc (mRegL dst, memory mem) %{
3152 MacroAssembler _masm(&cbuf);
3153 int base = $mem$$base;
3154 int index = $mem$$index;
3155 int scale = $mem$$scale;
3156 int disp = $mem$$disp;
3157 Register dst_reg = as_Register($dst$$reg);
3159 // For implicit null check
3160 __ lb(AT, as_Register(base), 0);
3162 if( index != 0 ) {
3163 if (scale == 0) {
3164 __ daddu(AT, as_Register(base), as_Register(index));
3165 } else {
3166 __ dsll(AT, as_Register(index), scale);
3167 __ daddu(AT, as_Register(base), AT);
3168 }
3169 if( Assembler::is_simm16(disp) ) {
3170 __ ld(dst_reg, AT, disp);
3171 } else {
3172 __ move(T9, disp);
3173 __ daddu(AT, AT, T9);
3174 __ ld(dst_reg, AT, 0);
3175 }
3176 } else {
3177 if( Assembler::is_simm16(disp) ) {
3178 __ ld(dst_reg, as_Register(base), disp);
3179 } else {
3180 __ move(T9, disp);
3181 __ daddu(AT, as_Register(base), T9);
3182 __ ld(dst_reg, AT, 0);
3183 }
3184 }
3185 %}
3187 enc_class store_L_reg_enc (memory mem, mRegL src) %{
3188 MacroAssembler _masm(&cbuf);
3189 int base = $mem$$base;
3190 int index = $mem$$index;
3191 int scale = $mem$$scale;
3192 int disp = $mem$$disp;
3193 Register src_reg = as_Register($src$$reg);
3195 if( index != 0 ) {
3196 if (scale == 0) {
3197 __ daddu(AT, as_Register(base), as_Register(index));
3198 } else {
3199 __ dsll(AT, as_Register(index), scale);
3200 __ daddu(AT, as_Register(base), AT);
3201 }
3202 if( Assembler::is_simm16(disp) ) {
3203 __ sd(src_reg, AT, disp);
3204 } else {
3205 __ move(T9, disp);
3206 __ daddu(AT, AT, T9);
3207 __ sd(src_reg, AT, 0);
3208 }
3209 } else {
3210 if( Assembler::is_simm16(disp) ) {
3211 __ sd(src_reg, as_Register(base), disp);
3212 } else {
3213 __ move(T9, disp);
3214 __ daddu(AT, as_Register(base), T9);
3215 __ sd(src_reg, AT, 0);
3216 }
3217 }
3218 %}
3220 enc_class store_L_immL0_enc (memory mem, immL0 src) %{
3221 MacroAssembler _masm(&cbuf);
3222 int base = $mem$$base;
3223 int index = $mem$$index;
3224 int scale = $mem$$scale;
3225 int disp = $mem$$disp;
3227 if( index != 0 ) {
3228 // For implicit null check
3229 __ lb(AT, as_Register(base), 0);
3231 if (scale == 0) {
3232 __ daddu(AT, as_Register(base), as_Register(index));
3233 } else {
3234 __ dsll(AT, as_Register(index), scale);
3235 __ daddu(AT, as_Register(base), AT);
3236 }
3237 if( Assembler::is_simm16(disp) ) {
3238 __ sd(R0, AT, disp);
3239 } else {
3240 __ move(T9, disp);
3241 __ addu(AT, AT, T9);
3242 __ sd(R0, AT, 0);
3243 }
3244 } else {
3245 if( Assembler::is_simm16(disp) ) {
3246 __ sd(R0, as_Register(base), disp);
3247 } else {
3248 __ move(T9, disp);
3249 __ addu(AT, as_Register(base), T9);
3250 __ sd(R0, AT, 0);
3251 }
3252 }
3253 %}
3255 enc_class store_L_immL_enc (memory mem, immL src) %{
3256 MacroAssembler _masm(&cbuf);
3257 int base = $mem$$base;
3258 int index = $mem$$index;
3259 int scale = $mem$$scale;
3260 int disp = $mem$$disp;
3261 long imm = $src$$constant;
3263 if( index != 0 ) {
3264 if (scale == 0) {
3265 __ daddu(AT, as_Register(base), as_Register(index));
3266 } else {
3267 __ dsll(AT, as_Register(index), scale);
3268 __ daddu(AT, as_Register(base), AT);
3269 }
3270 if( Assembler::is_simm16(disp) ) {
3271 __ set64(T9, imm);
3272 __ sd(T9, AT, disp);
3273 } else {
3274 __ move(T9, disp);
3275 __ addu(AT, AT, T9);
3276 __ set64(T9, imm);
3277 __ sd(T9, AT, 0);
3278 }
3279 } else {
3280 if( Assembler::is_simm16(disp) ) {
3281 __ move(AT, as_Register(base));
3282 __ set64(T9, imm);
3283 __ sd(T9, AT, disp);
3284 } else {
3285 __ move(T9, disp);
3286 __ addu(AT, as_Register(base), T9);
3287 __ set64(T9, imm);
3288 __ sd(T9, AT, 0);
3289 }
3290 }
3291 %}
3293 enc_class load_F_enc (regF dst, memory mem) %{
3294 MacroAssembler _masm(&cbuf);
3295 int base = $mem$$base;
3296 int index = $mem$$index;
3297 int scale = $mem$$scale;
3298 int disp = $mem$$disp;
3299 FloatRegister dst = $dst$$FloatRegister;
3301 if( index != 0 ) {
3302 if( Assembler::is_simm16(disp) ) {
3303 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3304 if (scale == 0) {
3305 __ gslwxc1(dst, as_Register(base), as_Register(index), disp);
3306 } else {
3307 __ dsll(AT, as_Register(index), scale);
3308 __ gslwxc1(dst, as_Register(base), AT, disp);
3309 }
3310 } else {
3311 if (scale == 0) {
3312 __ daddu(AT, as_Register(base), as_Register(index));
3313 } else {
3314 __ dsll(AT, as_Register(index), scale);
3315 __ daddu(AT, as_Register(base), AT);
3316 }
3317 __ lwc1(dst, AT, disp);
3318 }
3319 } else {
3320 if (scale == 0) {
3321 __ daddu(AT, as_Register(base), as_Register(index));
3322 } else {
3323 __ dsll(AT, as_Register(index), scale);
3324 __ daddu(AT, as_Register(base), AT);
3325 }
3326 __ move(T9, disp);
3327 if( UseLoongsonISA ) {
3328 __ gslwxc1(dst, AT, T9, 0);
3329 } else {
3330 __ daddu(AT, AT, T9);
3331 __ lwc1(dst, AT, 0);
3332 }
3333 }
3334 } else {
3335 if( Assembler::is_simm16(disp) ) {
3336 __ lwc1(dst, as_Register(base), disp);
3337 } else {
3338 __ move(T9, disp);
3339 if( UseLoongsonISA ) {
3340 __ gslwxc1(dst, as_Register(base), T9, 0);
3341 } else {
3342 __ daddu(AT, as_Register(base), T9);
3343 __ lwc1(dst, AT, 0);
3344 }
3345 }
3346 }
3347 %}
3349 enc_class store_F_reg_enc (memory mem, regF src) %{
3350 MacroAssembler _masm(&cbuf);
3351 int base = $mem$$base;
3352 int index = $mem$$index;
3353 int scale = $mem$$scale;
3354 int disp = $mem$$disp;
3355 FloatRegister src = $src$$FloatRegister;
3357 if( index != 0 ) {
3358 if( Assembler::is_simm16(disp) ) {
3359 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3360 if (scale == 0) {
3361 __ gsswxc1(src, as_Register(base), as_Register(index), disp);
3362 } else {
3363 __ dsll(AT, as_Register(index), scale);
3364 __ gsswxc1(src, as_Register(base), AT, disp);
3365 }
3366 } else {
3367 if (scale == 0) {
3368 __ daddu(AT, as_Register(base), as_Register(index));
3369 } else {
3370 __ dsll(AT, as_Register(index), scale);
3371 __ daddu(AT, as_Register(base), AT);
3372 }
3373 __ swc1(src, AT, disp);
3374 }
3375 } else {
3376 if (scale == 0) {
3377 __ daddu(AT, as_Register(base), as_Register(index));
3378 } else {
3379 __ dsll(AT, as_Register(index), scale);
3380 __ daddu(AT, as_Register(base), AT);
3381 }
3382 __ move(T9, disp);
3383 if( UseLoongsonISA ) {
3384 __ gsswxc1(src, AT, T9, 0);
3385 } else {
3386 __ daddu(AT, AT, T9);
3387 __ swc1(src, AT, 0);
3388 }
3389 }
3390 } else {
3391 if( Assembler::is_simm16(disp) ) {
3392 __ swc1(src, as_Register(base), disp);
3393 } else {
3394 __ move(T9, disp);
3395 if( UseLoongsonISA ) {
3396 __ gslwxc1(src, as_Register(base), T9, 0);
3397 } else {
3398 __ daddu(AT, as_Register(base), T9);
3399 __ swc1(src, AT, 0);
3400 }
3401 }
3402 }
3403 %}
3405 enc_class load_D_enc (regD dst, memory mem) %{
3406 MacroAssembler _masm(&cbuf);
3407 int base = $mem$$base;
3408 int index = $mem$$index;
3409 int scale = $mem$$scale;
3410 int disp = $mem$$disp;
3411 FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3413 if( index != 0 ) {
3414 if( Assembler::is_simm16(disp) ) {
3415 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3416 if (scale == 0) {
3417 __ gsldxc1(dst_reg, as_Register(base), as_Register(index), disp);
3418 } else {
3419 __ dsll(AT, as_Register(index), scale);
3420 __ gsldxc1(dst_reg, as_Register(base), AT, disp);
3421 }
3422 } else {
3423 if (scale == 0) {
3424 __ daddu(AT, as_Register(base), as_Register(index));
3425 } else {
3426 __ dsll(AT, as_Register(index), scale);
3427 __ daddu(AT, as_Register(base), AT);
3428 }
3429 __ ldc1(dst_reg, AT, disp);
3430 }
3431 } else {
3432 if (scale == 0) {
3433 __ daddu(AT, as_Register(base), as_Register(index));
3434 } else {
3435 __ dsll(AT, as_Register(index), scale);
3436 __ daddu(AT, as_Register(base), AT);
3437 }
3438 __ move(T9, disp);
3439 if( UseLoongsonISA ) {
3440 __ gsldxc1(dst_reg, AT, T9, 0);
3441 } else {
3442 __ addu(AT, AT, T9);
3443 __ ldc1(dst_reg, AT, 0);
3444 }
3445 }
3446 } else {
3447 if( Assembler::is_simm16(disp) ) {
3448 __ ldc1(dst_reg, as_Register(base), disp);
3449 } else {
3450 __ move(T9, disp);
3451 if( UseLoongsonISA ) {
3452 __ gsldxc1(dst_reg, as_Register(base), T9, 0);
3453 } else {
3454 __ addu(AT, as_Register(base), T9);
3455 __ ldc1(dst_reg, AT, 0);
3456 }
3457 }
3458 }
3459 %}
3461 enc_class store_D_reg_enc (memory mem, regD src) %{
3462 MacroAssembler _masm(&cbuf);
3463 int base = $mem$$base;
3464 int index = $mem$$index;
3465 int scale = $mem$$scale;
3466 int disp = $mem$$disp;
3467 FloatRegister src_reg = as_FloatRegister($src$$reg);
3469 if( index != 0 ) {
3470 if( Assembler::is_simm16(disp) ) {
3471 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3472 if (scale == 0) {
3473 __ gssdxc1(src_reg, as_Register(base), as_Register(index), disp);
3474 } else {
3475 __ dsll(AT, as_Register(index), scale);
3476 __ gssdxc1(src_reg, as_Register(base), AT, disp);
3477 }
3478 } else {
3479 if (scale == 0) {
3480 __ daddu(AT, as_Register(base), as_Register(index));
3481 } else {
3482 __ dsll(AT, as_Register(index), scale);
3483 __ daddu(AT, as_Register(base), AT);
3484 }
3485 __ sdc1(src_reg, AT, disp);
3486 }
3487 } else {
3488 if (scale == 0) {
3489 __ daddu(AT, as_Register(base), as_Register(index));
3490 } else {
3491 __ dsll(AT, as_Register(index), scale);
3492 __ daddu(AT, as_Register(base), AT);
3493 }
3494 __ move(T9, disp);
3495 if( UseLoongsonISA ) {
3496 __ gssdxc1(src_reg, AT, T9, 0);
3497 } else {
3498 __ addu(AT, AT, T9);
3499 __ sdc1(src_reg, AT, 0);
3500 }
3501 }
3502 } else {
3503 if( Assembler::is_simm16(disp) ) {
3504 __ sdc1(src_reg, as_Register(base), disp);
3505 } else {
3506 __ move(T9, disp);
3507 if( UseLoongsonISA ) {
3508 __ gssdxc1(src_reg, as_Register(base), T9, 0);
3509 } else {
3510 __ addu(AT, as_Register(base), T9);
3511 __ sdc1(src_reg, AT, 0);
3512 }
3513 }
3514 }
3515 %}
3517 enc_class Java_To_Runtime (method meth) %{ // CALL Java_To_Runtime, Java_To_Runtime_Leaf
3518 MacroAssembler _masm(&cbuf);
3519 // This is the instruction starting address for relocation info.
3520 __ block_comment("Java_To_Runtime");
3521 cbuf.set_insts_mark();
3522 __ relocate(relocInfo::runtime_call_type);
3524 __ patchable_call((address)$meth$$method);
3525 %}
3527 enc_class Java_Static_Call (method meth) %{ // JAVA STATIC CALL
3528 // CALL to fixup routine. Fixup routine uses ScopeDesc info to determine
3529 // who we intended to call.
3530 MacroAssembler _masm(&cbuf);
3531 cbuf.set_insts_mark();
3533 if ( !_method ) {
3534 __ relocate(relocInfo::runtime_call_type);
3535 } else if(_optimized_virtual) {
3536 __ relocate(relocInfo::opt_virtual_call_type);
3537 } else {
3538 __ relocate(relocInfo::static_call_type);
3539 }
3541 __ patchable_call((address)($meth$$method));
3542 if( _method ) { // Emit stub for static call
3543 emit_java_to_interp(cbuf);
3544 }
3545 %}
3548 /*
3549 * [Ref: LIR_Assembler::ic_call() ]
3550 */
3551 enc_class Java_Dynamic_Call (method meth) %{ // JAVA DYNAMIC CALL
3552 MacroAssembler _masm(&cbuf);
3553 __ block_comment("Java_Dynamic_Call");
3554 __ ic_call((address)$meth$$method);
3555 %}
3558 enc_class Set_Flags_After_Fast_Lock_Unlock(FlagsReg cr) %{
3559 Register flags = $cr$$Register;
3560 Label L;
3562 MacroAssembler _masm(&cbuf);
3564 __ addu(flags, R0, R0);
3565 __ beq(AT, R0, L);
3566 __ delayed()->nop();
3567 __ move(flags, 0xFFFFFFFF);
3568 __ bind(L);
3569 %}
3571 enc_class enc_PartialSubtypeCheck(mRegP result, mRegP sub, mRegP super, mRegI tmp) %{
3572 Register result = $result$$Register;
3573 Register sub = $sub$$Register;
3574 Register super = $super$$Register;
3575 Register length = $tmp$$Register;
3576 Register tmp = T9;
3577 Label miss;
3579 /* 2012/9/28 Jin: result may be the same as sub
3580 * 47c B40: # B21 B41 <- B20 Freq: 0.155379
3581 * 47c partialSubtypeCheck result=S1, sub=S1, super=S3, length=S0
3582 * 4bc mov S2, NULL #@loadConP
3583 * 4c0 beq S1, S2, B21 #@branchConP P=0.999999 C=-1.000000
3584 */
3585 MacroAssembler _masm(&cbuf);
3586 Label done;
3587 __ check_klass_subtype_slow_path(sub, super, length, tmp,
3588 NULL, &miss,
3589 /*set_cond_codes:*/ true);
3590 /* 2013/7/22 Jin: Refer to X86_64's RDI */
3591 __ move(result, 0);
3592 __ b(done);
3593 __ nop();
3595 __ bind(miss);
3596 __ move(result, 1);
3597 __ bind(done);
3598 %}
3600 %}
3603 //---------MIPS FRAME--------------------------------------------------------------
3604 // Definition of frame structure and management information.
3605 //
3606 // S T A C K L A Y O U T Allocators stack-slot number
3607 // | (to get allocators register number
3608 // G Owned by | | v add SharedInfo::stack0)
3609 // r CALLER | |
3610 // o | +--------+ pad to even-align allocators stack-slot
3611 // w V | pad0 | numbers; owned by CALLER
3612 // t -----------+--------+----> Matcher::_in_arg_limit, unaligned
3613 // h ^ | in | 5
3614 // | | args | 4 Holes in incoming args owned by SELF
3615 // | | old | | 3
3616 // | | SP-+--------+----> Matcher::_old_SP, even aligned
3617 // v | | ret | 3 return address
3618 // Owned by +--------+
3619 // Self | pad2 | 2 pad to align old SP
3620 // | +--------+ 1
3621 // | | locks | 0
3622 // | +--------+----> SharedInfo::stack0, even aligned
3623 // | | pad1 | 11 pad to align new SP
3624 // | +--------+
3625 // | | | 10
3626 // | | spills | 9 spills
3627 // V | | 8 (pad0 slot for callee)
3628 // -----------+--------+----> Matcher::_out_arg_limit, unaligned
3629 // ^ | out | 7
3630 // | | args | 6 Holes in outgoing args owned by CALLEE
3631 // Owned by new | |
3632 // Callee SP-+--------+----> Matcher::_new_SP, even aligned
3633 // | |
3634 //
3635 // Note 1: Only region 8-11 is determined by the allocator. Region 0-5 is
3636 // known from SELF's arguments and the Java calling convention.
3637 // Region 6-7 is determined per call site.
3638 // Note 2: If the calling convention leaves holes in the incoming argument
3639 // area, those holes are owned by SELF. Holes in the outgoing area
3640 // are owned by the CALLEE. Holes should not be nessecary in the
3641 // incoming area, as the Java calling convention is completely under
3642 // the control of the AD file. Doubles can be sorted and packed to
3643 // avoid holes. Holes in the outgoing arguments may be nessecary for
3644 // varargs C calling conventions.
3645 // Note 3: Region 0-3 is even aligned, with pad2 as needed. Region 3-5 is
3646 // even aligned with pad0 as needed.
3647 // Region 6 is even aligned. Region 6-7 is NOT even aligned;
3648 // region 6-11 is even aligned; it may be padded out more so that
3649 // the region from SP to FP meets the minimum stack alignment.
3650 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
3651 // alignment. Region 11, pad1, may be dynamically extended so that
3652 // SP meets the minimum alignment.
3655 frame %{
3657 stack_direction(TOWARDS_LOW);
3659 // These two registers define part of the calling convention
3660 // between compiled code and the interpreter.
3661 // SEE StartI2CNode::calling_convention & StartC2INode::calling_convention & StartOSRNode::calling_convention
3662 // for more information. by yjl 3/16/2006
3664 inline_cache_reg(T1); // Inline Cache Register
3665 interpreter_method_oop_reg(S3); // Method Oop Register when calling interpreter
3666 /*
3667 inline_cache_reg(T1); // Inline Cache Register or methodOop for I2C
3668 interpreter_arg_ptr_reg(A0); // Argument pointer for I2C adapters
3669 */
3671 // Optional: name the operand used by cisc-spilling to access [stack_pointer + offset]
3672 cisc_spilling_operand_name(indOffset32);
3674 // Number of stack slots consumed by locking an object
3675 // generate Compile::sync_stack_slots
3676 #ifdef _LP64
3677 sync_stack_slots(2);
3678 #else
3679 sync_stack_slots(1);
3680 #endif
3682 frame_pointer(SP);
3684 // Interpreter stores its frame pointer in a register which is
3685 // stored to the stack by I2CAdaptors.
3686 // I2CAdaptors convert from interpreted java to compiled java.
3688 interpreter_frame_pointer(FP);
3690 // generate Matcher::stack_alignment
3691 stack_alignment(StackAlignmentInBytes); //wordSize = sizeof(char*);
3693 // Number of stack slots between incoming argument block and the start of
3694 // a new frame. The PROLOG must add this many slots to the stack. The
3695 // EPILOG must remove this many slots. Intel needs one slot for
3696 // return address.
3697 // generate Matcher::in_preserve_stack_slots
3698 //in_preserve_stack_slots(VerifyStackAtCalls + 2); //Now VerifyStackAtCalls is defined as false ! Leave one stack slot for ra and fp
3699 in_preserve_stack_slots(4); //Now VerifyStackAtCalls is defined as false ! Leave two stack slots for ra and fp
3701 // Number of outgoing stack slots killed above the out_preserve_stack_slots
3702 // for calls to C. Supports the var-args backing area for register parms.
3703 varargs_C_out_slots_killed(0);
3705 // The after-PROLOG location of the return address. Location of
3706 // return address specifies a type (REG or STACK) and a number
3707 // representing the register number (i.e. - use a register name) or
3708 // stack slot.
3709 // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
3710 // Otherwise, it is above the locks and verification slot and alignment word
3711 //return_addr(STACK -1+ round_to(1+VerifyStackAtCalls+Compile::current()->sync()*Compile::current()->sync_stack_slots(),WordsPerLong));
3712 return_addr(REG RA);
3714 // Body of function which returns an integer array locating
3715 // arguments either in registers or in stack slots. Passed an array
3716 // of ideal registers called "sig" and a "length" count. Stack-slot
3717 // offsets are based on outgoing arguments, i.e. a CALLER setting up
3718 // arguments for a CALLEE. Incoming stack arguments are
3719 // automatically biased by the preserve_stack_slots field above.
3722 // will generated to Matcher::calling_convention(OptoRegPair *sig, uint length, bool is_outgoing)
3723 // StartNode::calling_convention call this. by yjl 3/16/2006
3724 calling_convention %{
3725 SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
3726 %}
3731 // Body of function which returns an integer array locating
3732 // arguments either in registers or in stack slots. Passed an array
3733 // of ideal registers called "sig" and a "length" count. Stack-slot
3734 // offsets are based on outgoing arguments, i.e. a CALLER setting up
3735 // arguments for a CALLEE. Incoming stack arguments are
3736 // automatically biased by the preserve_stack_slots field above.
3739 // SEE CallRuntimeNode::calling_convention for more information. by yjl 3/16/2006
3740 c_calling_convention %{
3741 (void) SharedRuntime::c_calling_convention(sig_bt, regs, /*regs2=*/NULL, length);
3742 %}
3745 // Location of C & interpreter return values
3746 // register(s) contain(s) return value for Op_StartI2C and Op_StartOSR.
3747 // SEE Matcher::match. by yjl 3/16/2006
3748 c_return_value %{
3749 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3750 /* -- , -- , Op_RegN, Op_RegI, Op_RegP, Op_RegF, Op_RegD, Op_RegL */
3751 static int lo[Op_RegL+1] = { 0, 0, V0_num, V0_num, V0_num, F0_num, F0_num, V0_num };
3752 static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, V0_H_num, OptoReg::Bad, F0_H_num, V0_H_num };
3753 return OptoRegPair(hi[ideal_reg],lo[ideal_reg]);
3754 %}
3756 // Location of return values
3757 // register(s) contain(s) return value for Op_StartC2I and Op_Start.
3758 // SEE Matcher::match. by yjl 3/16/2006
3760 return_value %{
3761 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3762 /* -- , -- , Op_RegN, Op_RegI, Op_RegP, Op_RegF, Op_RegD, Op_RegL */
3763 static int lo[Op_RegL+1] = { 0, 0, V0_num, V0_num, V0_num, F0_num, F0_num, V0_num };
3764 static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, V0_H_num, OptoReg::Bad, F0_H_num, V0_H_num};
3765 return OptoRegPair(hi[ideal_reg],lo[ideal_reg]);
3766 %}
3768 %}
3770 //----------ATTRIBUTES---------------------------------------------------------
3771 //----------Operand Attributes-------------------------------------------------
3772 op_attrib op_cost(0); // Required cost attribute
3774 //----------Instruction Attributes---------------------------------------------
3775 ins_attrib ins_cost(100); // Required cost attribute
3776 ins_attrib ins_size(32); // Required size attribute (in bits)
3777 ins_attrib ins_pc_relative(0); // Required PC Relative flag
3778 ins_attrib ins_short_branch(0); // Required flag: is this instruction a
3779 // non-matching short branch variant of some
3780 // long branch?
3781 ins_attrib ins_alignment(4); // Required alignment attribute (must be a power of 2)
3782 // specifies the alignment that some part of the instruction (not
3783 // necessarily the start) requires. If > 1, a compute_padding()
3784 // function must be provided for the instruction
3786 //----------OPERANDS-----------------------------------------------------------
3787 // Operand definitions must precede instruction definitions for correct parsing
3788 // in the ADLC because operands constitute user defined types which are used in
3789 // instruction definitions.
3791 // Vectors
3792 operand vecD() %{
3793 constraint(ALLOC_IN_RC(dbl_reg));
3794 match(VecD);
3796 format %{ %}
3797 interface(REG_INTER);
3798 %}
3800 // Flags register, used as output of compare instructions
3801 operand FlagsReg() %{
3802 constraint(ALLOC_IN_RC(mips_flags));
3803 match(RegFlags);
3805 format %{ "EFLAGS" %}
3806 interface(REG_INTER);
3807 %}
3809 //----------Simple Operands----------------------------------------------------
3810 //TODO: Should we need to define some more special immediate number ?
3811 // Immediate Operands
3812 // Integer Immediate
3813 operand immI() %{
3814 match(ConI);
3815 //TODO: should not match immI8 here LEE
3816 match(immI8);
3818 op_cost(20);
3819 format %{ %}
3820 interface(CONST_INTER);
3821 %}
3823 // Long Immediate 8-bit
3824 operand immL8()
3825 %{
3826 predicate(-0x80L <= n->get_long() && n->get_long() < 0x80L);
3827 match(ConL);
3829 op_cost(5);
3830 format %{ %}
3831 interface(CONST_INTER);
3832 %}
3834 // Constant for test vs zero
3835 operand immI0() %{
3836 predicate(n->get_int() == 0);
3837 match(ConI);
3839 op_cost(0);
3840 format %{ %}
3841 interface(CONST_INTER);
3842 %}
3844 // Constant for increment
3845 operand immI1() %{
3846 predicate(n->get_int() == 1);
3847 match(ConI);
3849 op_cost(0);
3850 format %{ %}
3851 interface(CONST_INTER);
3852 %}
3854 // Constant for decrement
3855 operand immI_M1() %{
3856 predicate(n->get_int() == -1);
3857 match(ConI);
3859 op_cost(0);
3860 format %{ %}
3861 interface(CONST_INTER);
3862 %}
3864 operand immI_MaxI() %{
3865 predicate(n->get_int() == 2147483647);
3866 match(ConI);
3868 op_cost(0);
3869 format %{ %}
3870 interface(CONST_INTER);
3871 %}
3873 // Valid scale values for addressing modes
3874 operand immI2() %{
3875 predicate(0 <= n->get_int() && (n->get_int() <= 3));
3876 match(ConI);
3878 format %{ %}
3879 interface(CONST_INTER);
3880 %}
3882 operand immI8() %{
3883 predicate((-128 <= n->get_int()) && (n->get_int() <= 127));
3884 match(ConI);
3886 op_cost(5);
3887 format %{ %}
3888 interface(CONST_INTER);
3889 %}
3891 operand immI16() %{
3892 predicate((-32768 <= n->get_int()) && (n->get_int() <= 32767));
3893 match(ConI);
3895 op_cost(10);
3896 format %{ %}
3897 interface(CONST_INTER);
3898 %}
3900 // Constant for long shifts
3901 operand immI_32() %{
3902 predicate( n->get_int() == 32 );
3903 match(ConI);
3905 op_cost(0);
3906 format %{ %}
3907 interface(CONST_INTER);
3908 %}
3910 operand immI_63() %{
3911 predicate( n->get_int() == 63 );
3912 match(ConI);
3914 op_cost(0);
3915 format %{ %}
3916 interface(CONST_INTER);
3917 %}
3919 operand immI_0_31() %{
3920 predicate( n->get_int() >= 0 && n->get_int() <= 31 );
3921 match(ConI);
3923 op_cost(0);
3924 format %{ %}
3925 interface(CONST_INTER);
3926 %}
3928 // Operand for non-negtive integer mask
3929 operand immI_nonneg_mask() %{
3930 predicate( (n->get_int() >= 0) && (Assembler::is_int_mask(n->get_int()) != -1) );
3931 match(ConI);
3933 op_cost(0);
3934 format %{ %}
3935 interface(CONST_INTER);
3936 %}
3938 operand immI_32_63() %{
3939 predicate( n->get_int() >= 32 && n->get_int() <= 63 );
3940 match(ConI);
3941 op_cost(0);
3943 format %{ %}
3944 interface(CONST_INTER);
3945 %}
3947 operand immI16_sub() %{
3948 predicate((-32767 <= n->get_int()) && (n->get_int() <= 32768));
3949 match(ConI);
3951 op_cost(10);
3952 format %{ %}
3953 interface(CONST_INTER);
3954 %}
3956 operand immI_0_32767() %{
3957 predicate( n->get_int() >= 0 && n->get_int() <= 32767 );
3958 match(ConI);
3959 op_cost(0);
3961 format %{ %}
3962 interface(CONST_INTER);
3963 %}
3965 operand immI_0_65535() %{
3966 predicate( n->get_int() >= 0 && n->get_int() <= 65535 );
3967 match(ConI);
3968 op_cost(0);
3970 format %{ %}
3971 interface(CONST_INTER);
3972 %}
3974 operand immI_1() %{
3975 predicate( n->get_int() == 1 );
3976 match(ConI);
3978 op_cost(0);
3979 format %{ %}
3980 interface(CONST_INTER);
3981 %}
3983 operand immI_2() %{
3984 predicate( n->get_int() == 2 );
3985 match(ConI);
3987 op_cost(0);
3988 format %{ %}
3989 interface(CONST_INTER);
3990 %}
3992 operand immI_3() %{
3993 predicate( n->get_int() == 3 );
3994 match(ConI);
3996 op_cost(0);
3997 format %{ %}
3998 interface(CONST_INTER);
3999 %}
4001 operand immI_7() %{
4002 predicate( n->get_int() == 7 );
4003 match(ConI);
4005 format %{ %}
4006 interface(CONST_INTER);
4007 %}
4009 // Immediates for special shifts (sign extend)
4011 // Constants for increment
4012 operand immI_16() %{
4013 predicate( n->get_int() == 16 );
4014 match(ConI);
4016 format %{ %}
4017 interface(CONST_INTER);
4018 %}
4020 operand immI_24() %{
4021 predicate( n->get_int() == 24 );
4022 match(ConI);
4024 format %{ %}
4025 interface(CONST_INTER);
4026 %}
4028 // Constant for byte-wide masking
4029 operand immI_255() %{
4030 predicate( n->get_int() == 255 );
4031 match(ConI);
4033 op_cost(0);
4034 format %{ %}
4035 interface(CONST_INTER);
4036 %}
4038 operand immI_65535() %{
4039 predicate( n->get_int() == 65535 );
4040 match(ConI);
4042 op_cost(5);
4043 format %{ %}
4044 interface(CONST_INTER);
4045 %}
4047 operand immI_65536() %{
4048 predicate( n->get_int() == 65536 );
4049 match(ConI);
4051 op_cost(5);
4052 format %{ %}
4053 interface(CONST_INTER);
4054 %}
4056 operand immI_M65536() %{
4057 predicate( n->get_int() == -65536 );
4058 match(ConI);
4060 op_cost(5);
4061 format %{ %}
4062 interface(CONST_INTER);
4063 %}
4065 // Pointer Immediate
4066 operand immP() %{
4067 match(ConP);
4069 op_cost(10);
4070 format %{ %}
4071 interface(CONST_INTER);
4072 %}
4074 operand immP31()
4075 %{
4076 predicate(n->as_Type()->type()->reloc() == relocInfo::none
4077 && (n->get_ptr() >> 31) == 0);
4078 match(ConP);
4080 op_cost(5);
4081 format %{ %}
4082 interface(CONST_INTER);
4083 %}
4085 // NULL Pointer Immediate
4086 operand immP0() %{
4087 predicate( n->get_ptr() == 0 );
4088 match(ConP);
4089 op_cost(0);
4091 format %{ %}
4092 interface(CONST_INTER);
4093 %}
4095 // Pointer Immediate: 64-bit
4096 operand immP_set() %{
4097 match(ConP);
4099 op_cost(5);
4100 // formats are generated automatically for constants and base registers
4101 format %{ %}
4102 interface(CONST_INTER);
4103 %}
4105 // Pointer Immediate: 64-bit
4106 operand immP_load() %{
4107 predicate(n->bottom_type()->isa_oop_ptr() || (MacroAssembler::insts_for_set64(n->get_ptr()) > 3));
4108 match(ConP);
4110 op_cost(5);
4111 // formats are generated automatically for constants and base registers
4112 format %{ %}
4113 interface(CONST_INTER);
4114 %}
4116 // Pointer Immediate: 64-bit
4117 operand immP_no_oop_cheap() %{
4118 predicate(!n->bottom_type()->isa_oop_ptr() && (MacroAssembler::insts_for_set64(n->get_ptr()) <= 3));
4119 match(ConP);
4121 op_cost(5);
4122 // formats are generated automatically for constants and base registers
4123 format %{ %}
4124 interface(CONST_INTER);
4125 %}
4127 // Pointer for polling page
4128 operand immP_poll() %{
4129 predicate(n->get_ptr() != 0 && n->get_ptr() == (intptr_t)os::get_polling_page());
4130 match(ConP);
4131 op_cost(5);
4133 format %{ %}
4134 interface(CONST_INTER);
4135 %}
4137 // Pointer Immediate
4138 operand immN() %{
4139 match(ConN);
4141 op_cost(10);
4142 format %{ %}
4143 interface(CONST_INTER);
4144 %}
4146 operand immNKlass() %{
4147 match(ConNKlass);
4149 op_cost(10);
4150 format %{ %}
4151 interface(CONST_INTER);
4152 %}
4154 // NULL Pointer Immediate
4155 operand immN0() %{
4156 predicate(n->get_narrowcon() == 0);
4157 match(ConN);
4159 op_cost(5);
4160 format %{ %}
4161 interface(CONST_INTER);
4162 %}
4164 // Long Immediate
4165 operand immL() %{
4166 match(ConL);
4168 op_cost(20);
4169 format %{ %}
4170 interface(CONST_INTER);
4171 %}
4173 // Long Immediate zero
4174 operand immL0() %{
4175 predicate( n->get_long() == 0L );
4176 match(ConL);
4177 op_cost(0);
4179 format %{ %}
4180 interface(CONST_INTER);
4181 %}
4183 operand immL7() %{
4184 predicate( n->get_long() == 7L );
4185 match(ConL);
4186 op_cost(0);
4188 format %{ %}
4189 interface(CONST_INTER);
4190 %}
4192 operand immL_M1() %{
4193 predicate( n->get_long() == -1L );
4194 match(ConL);
4195 op_cost(0);
4197 format %{ %}
4198 interface(CONST_INTER);
4199 %}
4201 // bit 0..2 zero
4202 operand immL_M8() %{
4203 predicate( n->get_long() == -8L );
4204 match(ConL);
4205 op_cost(0);
4207 format %{ %}
4208 interface(CONST_INTER);
4209 %}
4211 // bit 2 zero
4212 operand immL_M5() %{
4213 predicate( n->get_long() == -5L );
4214 match(ConL);
4215 op_cost(0);
4217 format %{ %}
4218 interface(CONST_INTER);
4219 %}
4221 // bit 1..2 zero
4222 operand immL_M7() %{
4223 predicate( n->get_long() == -7L );
4224 match(ConL);
4225 op_cost(0);
4227 format %{ %}
4228 interface(CONST_INTER);
4229 %}
4231 // bit 0..1 zero
4232 operand immL_M4() %{
4233 predicate( n->get_long() == -4L );
4234 match(ConL);
4235 op_cost(0);
4237 format %{ %}
4238 interface(CONST_INTER);
4239 %}
4241 // bit 3..6 zero
4242 operand immL_M121() %{
4243 predicate( n->get_long() == -121L );
4244 match(ConL);
4245 op_cost(0);
4247 format %{ %}
4248 interface(CONST_INTER);
4249 %}
4251 // Long immediate from 0 to 127.
4252 // Used for a shorter form of long mul by 10.
4253 operand immL_127() %{
4254 predicate((0 <= n->get_long()) && (n->get_long() <= 127));
4255 match(ConL);
4256 op_cost(0);
4258 format %{ %}
4259 interface(CONST_INTER);
4260 %}
4262 // Operand for non-negtive long mask
4263 operand immL_nonneg_mask() %{
4264 predicate( (n->get_long() >= 0) && (Assembler::is_jlong_mask(n->get_long()) != -1) );
4265 match(ConL);
4267 op_cost(0);
4268 format %{ %}
4269 interface(CONST_INTER);
4270 %}
4272 operand immL_0_65535() %{
4273 predicate( n->get_long() >= 0 && n->get_long() <= 65535 );
4274 match(ConL);
4275 op_cost(0);
4277 format %{ %}
4278 interface(CONST_INTER);
4279 %}
4281 // Long Immediate: cheap (materialize in <= 3 instructions)
4282 operand immL_cheap() %{
4283 predicate(MacroAssembler::insts_for_set64(n->get_long()) <= 3);
4284 match(ConL);
4285 op_cost(0);
4287 format %{ %}
4288 interface(CONST_INTER);
4289 %}
4291 // Long Immediate: expensive (materialize in > 3 instructions)
4292 operand immL_expensive() %{
4293 predicate(MacroAssembler::insts_for_set64(n->get_long()) > 3);
4294 match(ConL);
4295 op_cost(0);
4297 format %{ %}
4298 interface(CONST_INTER);
4299 %}
4301 operand immL16() %{
4302 predicate((-32768 <= n->get_long()) && (n->get_long() <= 32767));
4303 match(ConL);
4305 op_cost(10);
4306 format %{ %}
4307 interface(CONST_INTER);
4308 %}
4310 operand immL16_sub() %{
4311 predicate((-32767 <= n->get_long()) && (n->get_long() <= 32768));
4312 match(ConL);
4314 op_cost(10);
4315 format %{ %}
4316 interface(CONST_INTER);
4317 %}
4319 // Long Immediate: low 32-bit mask
4320 operand immL_32bits() %{
4321 predicate(n->get_long() == 0xFFFFFFFFL);
4322 match(ConL);
4323 op_cost(20);
4325 format %{ %}
4326 interface(CONST_INTER);
4327 %}
4329 // Long Immediate 32-bit signed
4330 operand immL32()
4331 %{
4332 predicate(n->get_long() == (int) (n->get_long()));
4333 match(ConL);
4335 op_cost(15);
4336 format %{ %}
4337 interface(CONST_INTER);
4338 %}
4341 //single-precision floating-point zero
4342 operand immF0() %{
4343 predicate(jint_cast(n->getf()) == 0);
4344 match(ConF);
4346 op_cost(5);
4347 format %{ %}
4348 interface(CONST_INTER);
4349 %}
4351 //single-precision floating-point immediate
4352 operand immF() %{
4353 match(ConF);
4355 op_cost(20);
4356 format %{ %}
4357 interface(CONST_INTER);
4358 %}
4360 //double-precision floating-point zero
4361 operand immD0() %{
4362 predicate(jlong_cast(n->getd()) == 0);
4363 match(ConD);
4365 op_cost(5);
4366 format %{ %}
4367 interface(CONST_INTER);
4368 %}
4370 //double-precision floating-point immediate
4371 operand immD() %{
4372 match(ConD);
4374 op_cost(20);
4375 format %{ %}
4376 interface(CONST_INTER);
4377 %}
4379 // Register Operands
4380 // Integer Register
4381 operand mRegI() %{
4382 constraint(ALLOC_IN_RC(int_reg));
4383 match(RegI);
4385 format %{ %}
4386 interface(REG_INTER);
4387 %}
4389 operand no_Ax_mRegI() %{
4390 constraint(ALLOC_IN_RC(no_Ax_int_reg));
4391 match(RegI);
4392 match(mRegI);
4394 format %{ %}
4395 interface(REG_INTER);
4396 %}
4398 operand mS0RegI() %{
4399 constraint(ALLOC_IN_RC(s0_reg));
4400 match(RegI);
4401 match(mRegI);
4403 format %{ "S0" %}
4404 interface(REG_INTER);
4405 %}
4407 operand mS1RegI() %{
4408 constraint(ALLOC_IN_RC(s1_reg));
4409 match(RegI);
4410 match(mRegI);
4412 format %{ "S1" %}
4413 interface(REG_INTER);
4414 %}
4416 operand mS2RegI() %{
4417 constraint(ALLOC_IN_RC(s2_reg));
4418 match(RegI);
4419 match(mRegI);
4421 format %{ "S2" %}
4422 interface(REG_INTER);
4423 %}
4425 operand mS3RegI() %{
4426 constraint(ALLOC_IN_RC(s3_reg));
4427 match(RegI);
4428 match(mRegI);
4430 format %{ "S3" %}
4431 interface(REG_INTER);
4432 %}
4434 operand mS4RegI() %{
4435 constraint(ALLOC_IN_RC(s4_reg));
4436 match(RegI);
4437 match(mRegI);
4439 format %{ "S4" %}
4440 interface(REG_INTER);
4441 %}
4443 operand mS5RegI() %{
4444 constraint(ALLOC_IN_RC(s5_reg));
4445 match(RegI);
4446 match(mRegI);
4448 format %{ "S5" %}
4449 interface(REG_INTER);
4450 %}
4452 operand mS6RegI() %{
4453 constraint(ALLOC_IN_RC(s6_reg));
4454 match(RegI);
4455 match(mRegI);
4457 format %{ "S6" %}
4458 interface(REG_INTER);
4459 %}
4461 operand mS7RegI() %{
4462 constraint(ALLOC_IN_RC(s7_reg));
4463 match(RegI);
4464 match(mRegI);
4466 format %{ "S7" %}
4467 interface(REG_INTER);
4468 %}
4471 operand mT0RegI() %{
4472 constraint(ALLOC_IN_RC(t0_reg));
4473 match(RegI);
4474 match(mRegI);
4476 format %{ "T0" %}
4477 interface(REG_INTER);
4478 %}
4480 operand mT1RegI() %{
4481 constraint(ALLOC_IN_RC(t1_reg));
4482 match(RegI);
4483 match(mRegI);
4485 format %{ "T1" %}
4486 interface(REG_INTER);
4487 %}
4489 operand mT2RegI() %{
4490 constraint(ALLOC_IN_RC(t2_reg));
4491 match(RegI);
4492 match(mRegI);
4494 format %{ "T2" %}
4495 interface(REG_INTER);
4496 %}
4498 operand mT3RegI() %{
4499 constraint(ALLOC_IN_RC(t3_reg));
4500 match(RegI);
4501 match(mRegI);
4503 format %{ "T3" %}
4504 interface(REG_INTER);
4505 %}
4507 operand mT8RegI() %{
4508 constraint(ALLOC_IN_RC(t8_reg));
4509 match(RegI);
4510 match(mRegI);
4512 format %{ "T8" %}
4513 interface(REG_INTER);
4514 %}
4516 operand mT9RegI() %{
4517 constraint(ALLOC_IN_RC(t9_reg));
4518 match(RegI);
4519 match(mRegI);
4521 format %{ "T9" %}
4522 interface(REG_INTER);
4523 %}
4525 operand mA0RegI() %{
4526 constraint(ALLOC_IN_RC(a0_reg));
4527 match(RegI);
4528 match(mRegI);
4530 format %{ "A0" %}
4531 interface(REG_INTER);
4532 %}
4534 operand mA1RegI() %{
4535 constraint(ALLOC_IN_RC(a1_reg));
4536 match(RegI);
4537 match(mRegI);
4539 format %{ "A1" %}
4540 interface(REG_INTER);
4541 %}
4543 operand mA2RegI() %{
4544 constraint(ALLOC_IN_RC(a2_reg));
4545 match(RegI);
4546 match(mRegI);
4548 format %{ "A2" %}
4549 interface(REG_INTER);
4550 %}
4552 operand mA3RegI() %{
4553 constraint(ALLOC_IN_RC(a3_reg));
4554 match(RegI);
4555 match(mRegI);
4557 format %{ "A3" %}
4558 interface(REG_INTER);
4559 %}
4561 operand mA4RegI() %{
4562 constraint(ALLOC_IN_RC(a4_reg));
4563 match(RegI);
4564 match(mRegI);
4566 format %{ "A4" %}
4567 interface(REG_INTER);
4568 %}
4570 operand mA5RegI() %{
4571 constraint(ALLOC_IN_RC(a5_reg));
4572 match(RegI);
4573 match(mRegI);
4575 format %{ "A5" %}
4576 interface(REG_INTER);
4577 %}
4579 operand mA6RegI() %{
4580 constraint(ALLOC_IN_RC(a6_reg));
4581 match(RegI);
4582 match(mRegI);
4584 format %{ "A6" %}
4585 interface(REG_INTER);
4586 %}
4588 operand mA7RegI() %{
4589 constraint(ALLOC_IN_RC(a7_reg));
4590 match(RegI);
4591 match(mRegI);
4593 format %{ "A7" %}
4594 interface(REG_INTER);
4595 %}
4597 operand mV0RegI() %{
4598 constraint(ALLOC_IN_RC(v0_reg));
4599 match(RegI);
4600 match(mRegI);
4602 format %{ "V0" %}
4603 interface(REG_INTER);
4604 %}
4606 operand mV1RegI() %{
4607 constraint(ALLOC_IN_RC(v1_reg));
4608 match(RegI);
4609 match(mRegI);
4611 format %{ "V1" %}
4612 interface(REG_INTER);
4613 %}
4615 operand mRegN() %{
4616 constraint(ALLOC_IN_RC(int_reg));
4617 match(RegN);
4619 format %{ %}
4620 interface(REG_INTER);
4621 %}
4623 operand t0_RegN() %{
4624 constraint(ALLOC_IN_RC(t0_reg));
4625 match(RegN);
4626 match(mRegN);
4628 format %{ %}
4629 interface(REG_INTER);
4630 %}
4632 operand t1_RegN() %{
4633 constraint(ALLOC_IN_RC(t1_reg));
4634 match(RegN);
4635 match(mRegN);
4637 format %{ %}
4638 interface(REG_INTER);
4639 %}
4641 operand t2_RegN() %{
4642 constraint(ALLOC_IN_RC(t2_reg));
4643 match(RegN);
4644 match(mRegN);
4646 format %{ %}
4647 interface(REG_INTER);
4648 %}
4650 operand t3_RegN() %{
4651 constraint(ALLOC_IN_RC(t3_reg));
4652 match(RegN);
4653 match(mRegN);
4655 format %{ %}
4656 interface(REG_INTER);
4657 %}
4659 operand t8_RegN() %{
4660 constraint(ALLOC_IN_RC(t8_reg));
4661 match(RegN);
4662 match(mRegN);
4664 format %{ %}
4665 interface(REG_INTER);
4666 %}
4668 operand t9_RegN() %{
4669 constraint(ALLOC_IN_RC(t9_reg));
4670 match(RegN);
4671 match(mRegN);
4673 format %{ %}
4674 interface(REG_INTER);
4675 %}
4677 operand a0_RegN() %{
4678 constraint(ALLOC_IN_RC(a0_reg));
4679 match(RegN);
4680 match(mRegN);
4682 format %{ %}
4683 interface(REG_INTER);
4684 %}
4686 operand a1_RegN() %{
4687 constraint(ALLOC_IN_RC(a1_reg));
4688 match(RegN);
4689 match(mRegN);
4691 format %{ %}
4692 interface(REG_INTER);
4693 %}
4695 operand a2_RegN() %{
4696 constraint(ALLOC_IN_RC(a2_reg));
4697 match(RegN);
4698 match(mRegN);
4700 format %{ %}
4701 interface(REG_INTER);
4702 %}
4704 operand a3_RegN() %{
4705 constraint(ALLOC_IN_RC(a3_reg));
4706 match(RegN);
4707 match(mRegN);
4709 format %{ %}
4710 interface(REG_INTER);
4711 %}
4713 operand a4_RegN() %{
4714 constraint(ALLOC_IN_RC(a4_reg));
4715 match(RegN);
4716 match(mRegN);
4718 format %{ %}
4719 interface(REG_INTER);
4720 %}
4722 operand a5_RegN() %{
4723 constraint(ALLOC_IN_RC(a5_reg));
4724 match(RegN);
4725 match(mRegN);
4727 format %{ %}
4728 interface(REG_INTER);
4729 %}
4731 operand a6_RegN() %{
4732 constraint(ALLOC_IN_RC(a6_reg));
4733 match(RegN);
4734 match(mRegN);
4736 format %{ %}
4737 interface(REG_INTER);
4738 %}
4740 operand a7_RegN() %{
4741 constraint(ALLOC_IN_RC(a7_reg));
4742 match(RegN);
4743 match(mRegN);
4745 format %{ %}
4746 interface(REG_INTER);
4747 %}
4749 operand s0_RegN() %{
4750 constraint(ALLOC_IN_RC(s0_reg));
4751 match(RegN);
4752 match(mRegN);
4754 format %{ %}
4755 interface(REG_INTER);
4756 %}
4758 operand s1_RegN() %{
4759 constraint(ALLOC_IN_RC(s1_reg));
4760 match(RegN);
4761 match(mRegN);
4763 format %{ %}
4764 interface(REG_INTER);
4765 %}
4767 operand s2_RegN() %{
4768 constraint(ALLOC_IN_RC(s2_reg));
4769 match(RegN);
4770 match(mRegN);
4772 format %{ %}
4773 interface(REG_INTER);
4774 %}
4776 operand s3_RegN() %{
4777 constraint(ALLOC_IN_RC(s3_reg));
4778 match(RegN);
4779 match(mRegN);
4781 format %{ %}
4782 interface(REG_INTER);
4783 %}
4785 operand s4_RegN() %{
4786 constraint(ALLOC_IN_RC(s4_reg));
4787 match(RegN);
4788 match(mRegN);
4790 format %{ %}
4791 interface(REG_INTER);
4792 %}
4794 operand s5_RegN() %{
4795 constraint(ALLOC_IN_RC(s5_reg));
4796 match(RegN);
4797 match(mRegN);
4799 format %{ %}
4800 interface(REG_INTER);
4801 %}
4803 operand s6_RegN() %{
4804 constraint(ALLOC_IN_RC(s6_reg));
4805 match(RegN);
4806 match(mRegN);
4808 format %{ %}
4809 interface(REG_INTER);
4810 %}
4812 operand s7_RegN() %{
4813 constraint(ALLOC_IN_RC(s7_reg));
4814 match(RegN);
4815 match(mRegN);
4817 format %{ %}
4818 interface(REG_INTER);
4819 %}
4821 operand v0_RegN() %{
4822 constraint(ALLOC_IN_RC(v0_reg));
4823 match(RegN);
4824 match(mRegN);
4826 format %{ %}
4827 interface(REG_INTER);
4828 %}
4830 operand v1_RegN() %{
4831 constraint(ALLOC_IN_RC(v1_reg));
4832 match(RegN);
4833 match(mRegN);
4835 format %{ %}
4836 interface(REG_INTER);
4837 %}
4839 // Pointer Register
4840 operand mRegP() %{
4841 constraint(ALLOC_IN_RC(p_reg));
4842 match(RegP);
4844 format %{ %}
4845 interface(REG_INTER);
4846 %}
4848 operand no_T8_mRegP() %{
4849 constraint(ALLOC_IN_RC(no_T8_p_reg));
4850 match(RegP);
4851 match(mRegP);
4853 format %{ %}
4854 interface(REG_INTER);
4855 %}
4857 operand s0_RegP()
4858 %{
4859 constraint(ALLOC_IN_RC(s0_long_reg));
4860 match(RegP);
4861 match(mRegP);
4862 match(no_T8_mRegP);
4864 format %{ %}
4865 interface(REG_INTER);
4866 %}
4868 operand s1_RegP()
4869 %{
4870 constraint(ALLOC_IN_RC(s1_long_reg));
4871 match(RegP);
4872 match(mRegP);
4873 match(no_T8_mRegP);
4875 format %{ %}
4876 interface(REG_INTER);
4877 %}
4879 operand s2_RegP()
4880 %{
4881 constraint(ALLOC_IN_RC(s2_long_reg));
4882 match(RegP);
4883 match(mRegP);
4884 match(no_T8_mRegP);
4886 format %{ %}
4887 interface(REG_INTER);
4888 %}
4890 operand s3_RegP()
4891 %{
4892 constraint(ALLOC_IN_RC(s3_long_reg));
4893 match(RegP);
4894 match(mRegP);
4895 match(no_T8_mRegP);
4897 format %{ %}
4898 interface(REG_INTER);
4899 %}
4901 operand s4_RegP()
4902 %{
4903 constraint(ALLOC_IN_RC(s4_long_reg));
4904 match(RegP);
4905 match(mRegP);
4906 match(no_T8_mRegP);
4908 format %{ %}
4909 interface(REG_INTER);
4910 %}
4912 operand s5_RegP()
4913 %{
4914 constraint(ALLOC_IN_RC(s5_long_reg));
4915 match(RegP);
4916 match(mRegP);
4917 match(no_T8_mRegP);
4919 format %{ %}
4920 interface(REG_INTER);
4921 %}
4923 operand s6_RegP()
4924 %{
4925 constraint(ALLOC_IN_RC(s6_long_reg));
4926 match(RegP);
4927 match(mRegP);
4928 match(no_T8_mRegP);
4930 format %{ %}
4931 interface(REG_INTER);
4932 %}
4934 operand s7_RegP()
4935 %{
4936 constraint(ALLOC_IN_RC(s7_long_reg));
4937 match(RegP);
4938 match(mRegP);
4939 match(no_T8_mRegP);
4941 format %{ %}
4942 interface(REG_INTER);
4943 %}
4945 operand t0_RegP()
4946 %{
4947 constraint(ALLOC_IN_RC(t0_long_reg));
4948 match(RegP);
4949 match(mRegP);
4950 match(no_T8_mRegP);
4952 format %{ %}
4953 interface(REG_INTER);
4954 %}
4956 operand t1_RegP()
4957 %{
4958 constraint(ALLOC_IN_RC(t1_long_reg));
4959 match(RegP);
4960 match(mRegP);
4961 match(no_T8_mRegP);
4963 format %{ %}
4964 interface(REG_INTER);
4965 %}
4967 operand t2_RegP()
4968 %{
4969 constraint(ALLOC_IN_RC(t2_long_reg));
4970 match(RegP);
4971 match(mRegP);
4972 match(no_T8_mRegP);
4974 format %{ %}
4975 interface(REG_INTER);
4976 %}
4978 operand t3_RegP()
4979 %{
4980 constraint(ALLOC_IN_RC(t3_long_reg));
4981 match(RegP);
4982 match(mRegP);
4983 match(no_T8_mRegP);
4985 format %{ %}
4986 interface(REG_INTER);
4987 %}
4989 operand t8_RegP()
4990 %{
4991 constraint(ALLOC_IN_RC(t8_long_reg));
4992 match(RegP);
4993 match(mRegP);
4995 format %{ %}
4996 interface(REG_INTER);
4997 %}
4999 operand t9_RegP()
5000 %{
5001 constraint(ALLOC_IN_RC(t9_long_reg));
5002 match(RegP);
5003 match(mRegP);
5004 match(no_T8_mRegP);
5006 format %{ %}
5007 interface(REG_INTER);
5008 %}
5010 operand a0_RegP()
5011 %{
5012 constraint(ALLOC_IN_RC(a0_long_reg));
5013 match(RegP);
5014 match(mRegP);
5015 match(no_T8_mRegP);
5017 format %{ %}
5018 interface(REG_INTER);
5019 %}
5021 operand a1_RegP()
5022 %{
5023 constraint(ALLOC_IN_RC(a1_long_reg));
5024 match(RegP);
5025 match(mRegP);
5026 match(no_T8_mRegP);
5028 format %{ %}
5029 interface(REG_INTER);
5030 %}
5032 operand a2_RegP()
5033 %{
5034 constraint(ALLOC_IN_RC(a2_long_reg));
5035 match(RegP);
5036 match(mRegP);
5037 match(no_T8_mRegP);
5039 format %{ %}
5040 interface(REG_INTER);
5041 %}
5043 operand a3_RegP()
5044 %{
5045 constraint(ALLOC_IN_RC(a3_long_reg));
5046 match(RegP);
5047 match(mRegP);
5048 match(no_T8_mRegP);
5050 format %{ %}
5051 interface(REG_INTER);
5052 %}
5054 operand a4_RegP()
5055 %{
5056 constraint(ALLOC_IN_RC(a4_long_reg));
5057 match(RegP);
5058 match(mRegP);
5059 match(no_T8_mRegP);
5061 format %{ %}
5062 interface(REG_INTER);
5063 %}
5066 operand a5_RegP()
5067 %{
5068 constraint(ALLOC_IN_RC(a5_long_reg));
5069 match(RegP);
5070 match(mRegP);
5071 match(no_T8_mRegP);
5073 format %{ %}
5074 interface(REG_INTER);
5075 %}
5077 operand a6_RegP()
5078 %{
5079 constraint(ALLOC_IN_RC(a6_long_reg));
5080 match(RegP);
5081 match(mRegP);
5082 match(no_T8_mRegP);
5084 format %{ %}
5085 interface(REG_INTER);
5086 %}
5088 operand a7_RegP()
5089 %{
5090 constraint(ALLOC_IN_RC(a7_long_reg));
5091 match(RegP);
5092 match(mRegP);
5093 match(no_T8_mRegP);
5095 format %{ %}
5096 interface(REG_INTER);
5097 %}
5099 operand v0_RegP()
5100 %{
5101 constraint(ALLOC_IN_RC(v0_long_reg));
5102 match(RegP);
5103 match(mRegP);
5104 match(no_T8_mRegP);
5106 format %{ %}
5107 interface(REG_INTER);
5108 %}
5110 operand v1_RegP()
5111 %{
5112 constraint(ALLOC_IN_RC(v1_long_reg));
5113 match(RegP);
5114 match(mRegP);
5115 match(no_T8_mRegP);
5117 format %{ %}
5118 interface(REG_INTER);
5119 %}
5121 /*
5122 operand mSPRegP(mRegP reg) %{
5123 constraint(ALLOC_IN_RC(sp_reg));
5124 match(reg);
5126 format %{ "SP" %}
5127 interface(REG_INTER);
5128 %}
5130 operand mFPRegP(mRegP reg) %{
5131 constraint(ALLOC_IN_RC(fp_reg));
5132 match(reg);
5134 format %{ "FP" %}
5135 interface(REG_INTER);
5136 %}
5137 */
5139 operand mRegL() %{
5140 constraint(ALLOC_IN_RC(long_reg));
5141 match(RegL);
5143 format %{ %}
5144 interface(REG_INTER);
5145 %}
5147 operand v0RegL() %{
5148 constraint(ALLOC_IN_RC(v0_long_reg));
5149 match(RegL);
5150 match(mRegL);
5152 format %{ %}
5153 interface(REG_INTER);
5154 %}
5156 operand v1RegL() %{
5157 constraint(ALLOC_IN_RC(v1_long_reg));
5158 match(RegL);
5159 match(mRegL);
5161 format %{ %}
5162 interface(REG_INTER);
5163 %}
5165 operand a0RegL() %{
5166 constraint(ALLOC_IN_RC(a0_long_reg));
5167 match(RegL);
5168 match(mRegL);
5170 format %{ "A0" %}
5171 interface(REG_INTER);
5172 %}
5174 operand a1RegL() %{
5175 constraint(ALLOC_IN_RC(a1_long_reg));
5176 match(RegL);
5177 match(mRegL);
5179 format %{ %}
5180 interface(REG_INTER);
5181 %}
5183 operand a2RegL() %{
5184 constraint(ALLOC_IN_RC(a2_long_reg));
5185 match(RegL);
5186 match(mRegL);
5188 format %{ %}
5189 interface(REG_INTER);
5190 %}
5192 operand a3RegL() %{
5193 constraint(ALLOC_IN_RC(a3_long_reg));
5194 match(RegL);
5195 match(mRegL);
5197 format %{ %}
5198 interface(REG_INTER);
5199 %}
5201 operand t0RegL() %{
5202 constraint(ALLOC_IN_RC(t0_long_reg));
5203 match(RegL);
5204 match(mRegL);
5206 format %{ %}
5207 interface(REG_INTER);
5208 %}
5210 operand t1RegL() %{
5211 constraint(ALLOC_IN_RC(t1_long_reg));
5212 match(RegL);
5213 match(mRegL);
5215 format %{ %}
5216 interface(REG_INTER);
5217 %}
5219 operand t2RegL() %{
5220 constraint(ALLOC_IN_RC(t2_long_reg));
5221 match(RegL);
5222 match(mRegL);
5224 format %{ %}
5225 interface(REG_INTER);
5226 %}
5228 operand t3RegL() %{
5229 constraint(ALLOC_IN_RC(t3_long_reg));
5230 match(RegL);
5231 match(mRegL);
5233 format %{ %}
5234 interface(REG_INTER);
5235 %}
5237 operand t8RegL() %{
5238 constraint(ALLOC_IN_RC(t8_long_reg));
5239 match(RegL);
5240 match(mRegL);
5242 format %{ %}
5243 interface(REG_INTER);
5244 %}
5246 operand a4RegL() %{
5247 constraint(ALLOC_IN_RC(a4_long_reg));
5248 match(RegL);
5249 match(mRegL);
5251 format %{ %}
5252 interface(REG_INTER);
5253 %}
5255 operand a5RegL() %{
5256 constraint(ALLOC_IN_RC(a5_long_reg));
5257 match(RegL);
5258 match(mRegL);
5260 format %{ %}
5261 interface(REG_INTER);
5262 %}
5264 operand a6RegL() %{
5265 constraint(ALLOC_IN_RC(a6_long_reg));
5266 match(RegL);
5267 match(mRegL);
5269 format %{ %}
5270 interface(REG_INTER);
5271 %}
5273 operand a7RegL() %{
5274 constraint(ALLOC_IN_RC(a7_long_reg));
5275 match(RegL);
5276 match(mRegL);
5278 format %{ %}
5279 interface(REG_INTER);
5280 %}
5282 operand s0RegL() %{
5283 constraint(ALLOC_IN_RC(s0_long_reg));
5284 match(RegL);
5285 match(mRegL);
5287 format %{ %}
5288 interface(REG_INTER);
5289 %}
5291 operand s1RegL() %{
5292 constraint(ALLOC_IN_RC(s1_long_reg));
5293 match(RegL);
5294 match(mRegL);
5296 format %{ %}
5297 interface(REG_INTER);
5298 %}
5300 operand s2RegL() %{
5301 constraint(ALLOC_IN_RC(s2_long_reg));
5302 match(RegL);
5303 match(mRegL);
5305 format %{ %}
5306 interface(REG_INTER);
5307 %}
5309 operand s3RegL() %{
5310 constraint(ALLOC_IN_RC(s3_long_reg));
5311 match(RegL);
5312 match(mRegL);
5314 format %{ %}
5315 interface(REG_INTER);
5316 %}
5318 operand s4RegL() %{
5319 constraint(ALLOC_IN_RC(s4_long_reg));
5320 match(RegL);
5321 match(mRegL);
5323 format %{ %}
5324 interface(REG_INTER);
5325 %}
5327 operand s7RegL() %{
5328 constraint(ALLOC_IN_RC(s7_long_reg));
5329 match(RegL);
5330 match(mRegL);
5332 format %{ %}
5333 interface(REG_INTER);
5334 %}
5336 // Floating register operands
5337 operand regF() %{
5338 constraint(ALLOC_IN_RC(flt_reg));
5339 match(RegF);
5341 format %{ %}
5342 interface(REG_INTER);
5343 %}
5345 //Double Precision Floating register operands
5346 operand regD() %{
5347 constraint(ALLOC_IN_RC(dbl_reg));
5348 match(RegD);
5350 format %{ %}
5351 interface(REG_INTER);
5352 %}
5354 //----------Memory Operands----------------------------------------------------
5355 // Indirect Memory Operand
5356 operand indirect(mRegP reg) %{
5357 constraint(ALLOC_IN_RC(p_reg));
5358 match(reg);
5360 format %{ "[$reg] @ indirect" %}
5361 interface(MEMORY_INTER) %{
5362 base($reg);
5363 index(0x0); /* NO_INDEX */
5364 scale(0x0);
5365 disp(0x0);
5366 %}
5367 %}
5369 // Indirect Memory Plus Short Offset Operand
5370 operand indOffset8(mRegP reg, immL8 off)
5371 %{
5372 constraint(ALLOC_IN_RC(p_reg));
5373 match(AddP reg off);
5375 op_cost(10);
5376 format %{ "[$reg + $off (8-bit)] @ indOffset8" %}
5377 interface(MEMORY_INTER) %{
5378 base($reg);
5379 index(0x0); /* NO_INDEX */
5380 scale(0x0);
5381 disp($off);
5382 %}
5383 %}
5385 // Indirect Memory Times Scale Plus Index Register
5386 operand indIndexScale(mRegP reg, mRegL lreg, immI2 scale)
5387 %{
5388 constraint(ALLOC_IN_RC(p_reg));
5389 match(AddP reg (LShiftL lreg scale));
5391 op_cost(10);
5392 format %{"[$reg + $lreg << $scale] @ indIndexScale" %}
5393 interface(MEMORY_INTER) %{
5394 base($reg);
5395 index($lreg);
5396 scale($scale);
5397 disp(0x0);
5398 %}
5399 %}
5402 // [base + index + offset]
5403 operand baseIndexOffset8(mRegP base, mRegL index, immL8 off)
5404 %{
5405 constraint(ALLOC_IN_RC(p_reg));
5406 op_cost(5);
5407 match(AddP (AddP base index) off);
5409 format %{ "[$base + $index + $off (8-bit)] @ baseIndexOffset8" %}
5410 interface(MEMORY_INTER) %{
5411 base($base);
5412 index($index);
5413 scale(0x0);
5414 disp($off);
5415 %}
5416 %}
5418 // [base + index + offset]
5419 operand baseIndexOffset8_convI2L(mRegP base, mRegI index, immL8 off)
5420 %{
5421 constraint(ALLOC_IN_RC(p_reg));
5422 op_cost(5);
5423 match(AddP (AddP base (ConvI2L index)) off);
5425 format %{ "[$base + $index + $off (8-bit)] @ baseIndexOffset8_convI2L" %}
5426 interface(MEMORY_INTER) %{
5427 base($base);
5428 index($index);
5429 scale(0x0);
5430 disp($off);
5431 %}
5432 %}
5434 // Indirect Memory Times Scale Plus Index Register Plus Offset Operand
5435 operand indIndexScaleOffset8(mRegP reg, immL8 off, mRegL lreg, immI2 scale)
5436 %{
5437 constraint(ALLOC_IN_RC(p_reg));
5438 match(AddP (AddP reg (LShiftL lreg scale)) off);
5440 op_cost(10);
5441 format %{"[$reg + $off + $lreg << $scale] @ indIndexScaleOffset8" %}
5442 interface(MEMORY_INTER) %{
5443 base($reg);
5444 index($lreg);
5445 scale($scale);
5446 disp($off);
5447 %}
5448 %}
5450 operand indIndexScaleOffset8_convI2L(mRegP reg, immL8 off, mRegI ireg, immI2 scale)
5451 %{
5452 constraint(ALLOC_IN_RC(p_reg));
5453 match(AddP (AddP reg (LShiftL (ConvI2L ireg) scale)) off);
5455 op_cost(10);
5456 format %{"[$reg + $off + $ireg << $scale] @ indIndexScaleOffset8_convI2L" %}
5457 interface(MEMORY_INTER) %{
5458 base($reg);
5459 index($ireg);
5460 scale($scale);
5461 disp($off);
5462 %}
5463 %}
5465 // [base + index<<scale + offset]
5466 operand basePosIndexScaleOffset8(mRegP base, mRegI index, immL8 off, immI_0_31 scale)
5467 %{
5468 constraint(ALLOC_IN_RC(p_reg));
5469 //predicate(n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
5470 op_cost(10);
5471 match(AddP (AddP base (LShiftL (ConvI2L index) scale)) off);
5473 format %{ "[$base + $index << $scale + $off (8-bit)] @ basePosIndexScaleOffset8" %}
5474 interface(MEMORY_INTER) %{
5475 base($base);
5476 index($index);
5477 scale($scale);
5478 disp($off);
5479 %}
5480 %}
5482 // Indirect Memory Times Scale Plus Index Register Plus Offset Operand
5483 operand indIndexScaleOffsetNarrow(mRegN reg, immL8 off, mRegL lreg, immI2 scale)
5484 %{
5485 predicate(Universe::narrow_oop_shift() == 0);
5486 constraint(ALLOC_IN_RC(p_reg));
5487 match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
5489 op_cost(10);
5490 format %{"[$reg + $off + $lreg << $scale] @ indIndexScaleOffsetNarrow" %}
5491 interface(MEMORY_INTER) %{
5492 base($reg);
5493 index($lreg);
5494 scale($scale);
5495 disp($off);
5496 %}
5497 %}
5499 // [base + index<<scale + offset] for compressd Oops
5500 operand indPosIndexI2LScaleOffset8Narrow(mRegN base, mRegI index, immL8 off, immI_0_31 scale)
5501 %{
5502 constraint(ALLOC_IN_RC(p_reg));
5503 //predicate(Universe::narrow_oop_shift() == 0 && n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
5504 predicate(Universe::narrow_oop_shift() == 0);
5505 op_cost(10);
5506 match(AddP (AddP (DecodeN base) (LShiftL (ConvI2L index) scale)) off);
5508 format %{ "[$base + $index << $scale + $off (8-bit)] @ indPosIndexI2LScaleOffset8Narrow" %}
5509 interface(MEMORY_INTER) %{
5510 base($base);
5511 index($index);
5512 scale($scale);
5513 disp($off);
5514 %}
5515 %}
5517 //FIXME: I think it's better to limit the immI to be 16-bit at most!
5518 // Indirect Memory Plus Long Offset Operand
5519 operand indOffset32(mRegP reg, immL32 off) %{
5520 constraint(ALLOC_IN_RC(p_reg));
5521 op_cost(20);
5522 match(AddP reg off);
5524 format %{ "[$reg + $off (32-bit)] @ indOffset32" %}
5525 interface(MEMORY_INTER) %{
5526 base($reg);
5527 index(0x0); /* NO_INDEX */
5528 scale(0x0);
5529 disp($off);
5530 %}
5531 %}
5533 // Indirect Memory Plus Index Register
5534 operand indIndex(mRegP addr, mRegL index) %{
5535 constraint(ALLOC_IN_RC(p_reg));
5536 match(AddP addr index);
5538 op_cost(20);
5539 format %{"[$addr + $index] @ indIndex" %}
5540 interface(MEMORY_INTER) %{
5541 base($addr);
5542 index($index);
5543 scale(0x0);
5544 disp(0x0);
5545 %}
5546 %}
5548 operand indirectNarrowKlass(mRegN reg)
5549 %{
5550 predicate(Universe::narrow_klass_shift() == 0);
5551 constraint(ALLOC_IN_RC(p_reg));
5552 op_cost(10);
5553 match(DecodeNKlass reg);
5555 format %{ "[$reg] @ indirectNarrowKlass" %}
5556 interface(MEMORY_INTER) %{
5557 base($reg);
5558 index(0x0);
5559 scale(0x0);
5560 disp(0x0);
5561 %}
5562 %}
5564 operand indOffset8NarrowKlass(mRegN reg, immL8 off)
5565 %{
5566 predicate(Universe::narrow_klass_shift() == 0);
5567 constraint(ALLOC_IN_RC(p_reg));
5568 op_cost(10);
5569 match(AddP (DecodeNKlass reg) off);
5571 format %{ "[$reg + $off (8-bit)] @ indOffset8NarrowKlass" %}
5572 interface(MEMORY_INTER) %{
5573 base($reg);
5574 index(0x0);
5575 scale(0x0);
5576 disp($off);
5577 %}
5578 %}
5580 operand indOffset32NarrowKlass(mRegN reg, immL32 off)
5581 %{
5582 predicate(Universe::narrow_klass_shift() == 0);
5583 constraint(ALLOC_IN_RC(p_reg));
5584 op_cost(10);
5585 match(AddP (DecodeNKlass reg) off);
5587 format %{ "[$reg + $off (32-bit)] @ indOffset32NarrowKlass" %}
5588 interface(MEMORY_INTER) %{
5589 base($reg);
5590 index(0x0);
5591 scale(0x0);
5592 disp($off);
5593 %}
5594 %}
5596 operand indIndexOffsetNarrowKlass(mRegN reg, mRegL lreg, immL32 off)
5597 %{
5598 predicate(Universe::narrow_klass_shift() == 0);
5599 constraint(ALLOC_IN_RC(p_reg));
5600 match(AddP (AddP (DecodeNKlass reg) lreg) off);
5602 op_cost(10);
5603 format %{"[$reg + $off + $lreg] @ indIndexOffsetNarrowKlass" %}
5604 interface(MEMORY_INTER) %{
5605 base($reg);
5606 index($lreg);
5607 scale(0x0);
5608 disp($off);
5609 %}
5610 %}
5612 operand indIndexNarrowKlass(mRegN reg, mRegL lreg)
5613 %{
5614 predicate(Universe::narrow_klass_shift() == 0);
5615 constraint(ALLOC_IN_RC(p_reg));
5616 match(AddP (DecodeNKlass reg) lreg);
5618 op_cost(10);
5619 format %{"[$reg + $lreg] @ indIndexNarrowKlass" %}
5620 interface(MEMORY_INTER) %{
5621 base($reg);
5622 index($lreg);
5623 scale(0x0);
5624 disp(0x0);
5625 %}
5626 %}
5628 // Indirect Memory Operand
5629 operand indirectNarrow(mRegN reg)
5630 %{
5631 predicate(Universe::narrow_oop_shift() == 0);
5632 constraint(ALLOC_IN_RC(p_reg));
5633 op_cost(10);
5634 match(DecodeN reg);
5636 format %{ "[$reg] @ indirectNarrow" %}
5637 interface(MEMORY_INTER) %{
5638 base($reg);
5639 index(0x0);
5640 scale(0x0);
5641 disp(0x0);
5642 %}
5643 %}
5645 // Indirect Memory Plus Short Offset Operand
5646 operand indOffset8Narrow(mRegN reg, immL8 off)
5647 %{
5648 predicate(Universe::narrow_oop_shift() == 0);
5649 constraint(ALLOC_IN_RC(p_reg));
5650 op_cost(10);
5651 match(AddP (DecodeN reg) off);
5653 format %{ "[$reg + $off (8-bit)] @ indOffset8Narrow" %}
5654 interface(MEMORY_INTER) %{
5655 base($reg);
5656 index(0x0);
5657 scale(0x0);
5658 disp($off);
5659 %}
5660 %}
5662 // Indirect Memory Plus Index Register Plus Offset Operand
5663 operand indIndexOffset8Narrow(mRegN reg, mRegL lreg, immL8 off)
5664 %{
5665 predicate(Universe::narrow_oop_shift() == 0);
5666 constraint(ALLOC_IN_RC(p_reg));
5667 match(AddP (AddP (DecodeN reg) lreg) off);
5669 op_cost(10);
5670 format %{"[$reg + $off + $lreg] @ indIndexOffset8Narrow" %}
5671 interface(MEMORY_INTER) %{
5672 base($reg);
5673 index($lreg);
5674 scale(0x0);
5675 disp($off);
5676 %}
5677 %}
5679 //----------Load Long Memory Operands------------------------------------------
5680 // The load-long idiom will use it's address expression again after loading
5681 // the first word of the long. If the load-long destination overlaps with
5682 // registers used in the addressing expression, the 2nd half will be loaded
5683 // from a clobbered address. Fix this by requiring that load-long use
5684 // address registers that do not overlap with the load-long target.
5686 // load-long support
5687 operand load_long_RegP() %{
5688 constraint(ALLOC_IN_RC(p_reg));
5689 match(RegP);
5690 match(mRegP);
5691 op_cost(100);
5692 format %{ %}
5693 interface(REG_INTER);
5694 %}
5696 // Indirect Memory Operand Long
5697 operand load_long_indirect(load_long_RegP reg) %{
5698 constraint(ALLOC_IN_RC(p_reg));
5699 match(reg);
5701 format %{ "[$reg]" %}
5702 interface(MEMORY_INTER) %{
5703 base($reg);
5704 index(0x0);
5705 scale(0x0);
5706 disp(0x0);
5707 %}
5708 %}
5710 // Indirect Memory Plus Long Offset Operand
5711 operand load_long_indOffset32(load_long_RegP reg, immL32 off) %{
5712 match(AddP reg off);
5714 format %{ "[$reg + $off]" %}
5715 interface(MEMORY_INTER) %{
5716 base($reg);
5717 index(0x0);
5718 scale(0x0);
5719 disp($off);
5720 %}
5721 %}
5723 //----------Conditional Branch Operands----------------------------------------
5724 // Comparison Op - This is the operation of the comparison, and is limited to
5725 // the following set of codes:
5726 // L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
5727 //
5728 // Other attributes of the comparison, such as unsignedness, are specified
5729 // by the comparison instruction that sets a condition code flags register.
5730 // That result is represented by a flags operand whose subtype is appropriate
5731 // to the unsignedness (etc.) of the comparison.
5732 //
5733 // Later, the instruction which matches both the Comparison Op (a Bool) and
5734 // the flags (produced by the Cmp) specifies the coding of the comparison op
5735 // by matching a specific subtype of Bool operand below, such as cmpOpU.
5737 // Comparision Code
5738 operand cmpOp() %{
5739 match(Bool);
5741 format %{ "" %}
5742 interface(COND_INTER) %{
5743 equal(0x01);
5744 not_equal(0x02);
5745 greater(0x03);
5746 greater_equal(0x04);
5747 less(0x05);
5748 less_equal(0x06);
5749 overflow(0x7);
5750 no_overflow(0x8);
5751 %}
5752 %}
5755 // Comparision Code
5756 // Comparison Code, unsigned compare. Used by FP also, with
5757 // C2 (unordered) turned into GT or LT already. The other bits
5758 // C0 and C3 are turned into Carry & Zero flags.
5759 operand cmpOpU() %{
5760 match(Bool);
5762 format %{ "" %}
5763 interface(COND_INTER) %{
5764 equal(0x01);
5765 not_equal(0x02);
5766 greater(0x03);
5767 greater_equal(0x04);
5768 less(0x05);
5769 less_equal(0x06);
5770 overflow(0x7);
5771 no_overflow(0x8);
5772 %}
5773 %}
5775 /*
5776 // Comparison Code, unsigned compare. Used by FP also, with
5777 // C2 (unordered) turned into GT or LT already. The other bits
5778 // C0 and C3 are turned into Carry & Zero flags.
5779 operand cmpOpU() %{
5780 match(Bool);
5782 format %{ "" %}
5783 interface(COND_INTER) %{
5784 equal(0x4);
5785 not_equal(0x5);
5786 less(0x2);
5787 greater_equal(0x3);
5788 less_equal(0x6);
5789 greater(0x7);
5790 %}
5791 %}
5792 */
5793 /*
5794 // Comparison Code for FP conditional move
5795 operand cmpOp_fcmov() %{
5796 match(Bool);
5798 format %{ "" %}
5799 interface(COND_INTER) %{
5800 equal (0x01);
5801 not_equal (0x02);
5802 greater (0x03);
5803 greater_equal(0x04);
5804 less (0x05);
5805 less_equal (0x06);
5806 %}
5807 %}
5809 // Comparision Code used in long compares
5810 operand cmpOp_commute() %{
5811 match(Bool);
5813 format %{ "" %}
5814 interface(COND_INTER) %{
5815 equal(0x4);
5816 not_equal(0x5);
5817 less(0xF);
5818 greater_equal(0xE);
5819 less_equal(0xD);
5820 greater(0xC);
5821 %}
5822 %}
5823 */
5825 //----------Special Memory Operands--------------------------------------------
5826 // Stack Slot Operand - This operand is used for loading and storing temporary
5827 // values on the stack where a match requires a value to
5828 // flow through memory.
5829 operand stackSlotP(sRegP reg) %{
5830 constraint(ALLOC_IN_RC(stack_slots));
5831 // No match rule because this operand is only generated in matching
5832 op_cost(50);
5833 format %{ "[$reg]" %}
5834 interface(MEMORY_INTER) %{
5835 base(0x1d); // SP
5836 index(0x0); // No Index
5837 scale(0x0); // No Scale
5838 disp($reg); // Stack Offset
5839 %}
5840 %}
5842 operand stackSlotI(sRegI reg) %{
5843 constraint(ALLOC_IN_RC(stack_slots));
5844 // No match rule because this operand is only generated in matching
5845 op_cost(50);
5846 format %{ "[$reg]" %}
5847 interface(MEMORY_INTER) %{
5848 base(0x1d); // SP
5849 index(0x0); // No Index
5850 scale(0x0); // No Scale
5851 disp($reg); // Stack Offset
5852 %}
5853 %}
5855 operand stackSlotF(sRegF reg) %{
5856 constraint(ALLOC_IN_RC(stack_slots));
5857 // No match rule because this operand is only generated in matching
5858 op_cost(50);
5859 format %{ "[$reg]" %}
5860 interface(MEMORY_INTER) %{
5861 base(0x1d); // SP
5862 index(0x0); // No Index
5863 scale(0x0); // No Scale
5864 disp($reg); // Stack Offset
5865 %}
5866 %}
5868 operand stackSlotD(sRegD reg) %{
5869 constraint(ALLOC_IN_RC(stack_slots));
5870 // No match rule because this operand is only generated in matching
5871 op_cost(50);
5872 format %{ "[$reg]" %}
5873 interface(MEMORY_INTER) %{
5874 base(0x1d); // SP
5875 index(0x0); // No Index
5876 scale(0x0); // No Scale
5877 disp($reg); // Stack Offset
5878 %}
5879 %}
5881 operand stackSlotL(sRegL reg) %{
5882 constraint(ALLOC_IN_RC(stack_slots));
5883 // No match rule because this operand is only generated in matching
5884 op_cost(50);
5885 format %{ "[$reg]" %}
5886 interface(MEMORY_INTER) %{
5887 base(0x1d); // SP
5888 index(0x0); // No Index
5889 scale(0x0); // No Scale
5890 disp($reg); // Stack Offset
5891 %}
5892 %}
5895 //------------------------OPERAND CLASSES--------------------------------------
5896 //opclass memory( direct, indirect, indOffset16, indOffset32, indOffset32X, indIndexOffset );
5897 opclass memory( indirect, indirectNarrow, indOffset8, indOffset32, indIndex, indIndexScale, load_long_indirect, load_long_indOffset32, baseIndexOffset8, baseIndexOffset8_convI2L, indIndexScaleOffset8, indIndexScaleOffset8_convI2L, basePosIndexScaleOffset8, indIndexScaleOffsetNarrow, indPosIndexI2LScaleOffset8Narrow, indOffset8Narrow, indIndexOffset8Narrow);
5900 //----------PIPELINE-----------------------------------------------------------
5901 // Rules which define the behavior of the target architectures pipeline.
5903 pipeline %{
5905 //----------ATTRIBUTES---------------------------------------------------------
5906 attributes %{
5907 fixed_size_instructions; // Fixed size instructions
5908 branch_has_delay_slot; // branch have delay slot in gs2
5909 max_instructions_per_bundle = 1; // 1 instruction per bundle
5910 max_bundles_per_cycle = 4; // Up to 4 bundles per cycle
5911 bundle_unit_size=4;
5912 instruction_unit_size = 4; // An instruction is 4 bytes long
5913 instruction_fetch_unit_size = 16; // The processor fetches one line
5914 instruction_fetch_units = 1; // of 16 bytes
5916 // List of nop instructions
5917 nops( MachNop );
5918 %}
5920 //----------RESOURCES----------------------------------------------------------
5921 // Resources are the functional units available to the machine
5923 resources(D1, D2, D3, D4, DECODE = D1 | D2 | D3| D4, ALU1, ALU2, ALU = ALU1 | ALU2, FPU1, FPU2, FPU = FPU1 | FPU2, MEM, BR);
5925 //----------PIPELINE DESCRIPTION-----------------------------------------------
5926 // Pipeline Description specifies the stages in the machine's pipeline
5928 // IF: fetch
5929 // ID: decode
5930 // RD: read
5931 // CA: caculate
5932 // WB: write back
5933 // CM: commit
5935 pipe_desc(IF, ID, RD, CA, WB, CM);
5938 //----------PIPELINE CLASSES---------------------------------------------------
5939 // Pipeline Classes describe the stages in which input and output are
5940 // referenced by the hardware pipeline.
5942 //No.1 Integer ALU reg-reg operation : dst <-- reg1 op reg2
5943 pipe_class ialu_regI_regI(mRegI dst, mRegI src1, mRegI src2) %{
5944 single_instruction;
5945 src1 : RD(read);
5946 src2 : RD(read);
5947 dst : WB(write)+1;
5948 DECODE : ID;
5949 ALU : CA;
5950 %}
5952 //No.19 Integer mult operation : dst <-- reg1 mult reg2
5953 pipe_class ialu_mult(mRegI dst, mRegI src1, mRegI src2) %{
5954 src1 : RD(read);
5955 src2 : RD(read);
5956 dst : WB(write)+5;
5957 DECODE : ID;
5958 ALU2 : CA;
5959 %}
5961 pipe_class mulL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
5962 src1 : RD(read);
5963 src2 : RD(read);
5964 dst : WB(write)+10;
5965 DECODE : ID;
5966 ALU2 : CA;
5967 %}
5969 //No.19 Integer div operation : dst <-- reg1 div reg2
5970 pipe_class ialu_div(mRegI dst, mRegI src1, mRegI src2) %{
5971 src1 : RD(read);
5972 src2 : RD(read);
5973 dst : WB(write)+10;
5974 DECODE : ID;
5975 ALU2 : CA;
5976 %}
5978 //No.19 Integer mod operation : dst <-- reg1 mod reg2
5979 pipe_class ialu_mod(mRegI dst, mRegI src1, mRegI src2) %{
5980 instruction_count(2);
5981 src1 : RD(read);
5982 src2 : RD(read);
5983 dst : WB(write)+10;
5984 DECODE : ID;
5985 ALU2 : CA;
5986 %}
5988 //No.15 Long ALU reg-reg operation : dst <-- reg1 op reg2
5989 pipe_class ialu_regL_regL(mRegL dst, mRegL src1, mRegL src2) %{
5990 instruction_count(2);
5991 src1 : RD(read);
5992 src2 : RD(read);
5993 dst : WB(write);
5994 DECODE : ID;
5995 ALU : CA;
5996 %}
5998 //No.18 Long ALU reg-imm16 operation : dst <-- reg1 op imm16
5999 pipe_class ialu_regL_imm16(mRegL dst, mRegL src) %{
6000 instruction_count(2);
6001 src : RD(read);
6002 dst : WB(write);
6003 DECODE : ID;
6004 ALU : CA;
6005 %}
6007 //no.16 load Long from memory :
6008 pipe_class ialu_loadL(mRegL dst, memory mem) %{
6009 instruction_count(2);
6010 mem : RD(read);
6011 dst : WB(write)+5;
6012 DECODE : ID;
6013 MEM : RD;
6014 %}
6016 //No.17 Store Long to Memory :
6017 pipe_class ialu_storeL(mRegL src, memory mem) %{
6018 instruction_count(2);
6019 mem : RD(read);
6020 src : RD(read);
6021 DECODE : ID;
6022 MEM : RD;
6023 %}
6025 //No.2 Integer ALU reg-imm16 operation : dst <-- reg1 op imm16
6026 pipe_class ialu_regI_imm16(mRegI dst, mRegI src) %{
6027 single_instruction;
6028 src : RD(read);
6029 dst : WB(write);
6030 DECODE : ID;
6031 ALU : CA;
6032 %}
6034 //No.3 Integer move operation : dst <-- reg
6035 pipe_class ialu_regI_mov(mRegI dst, mRegI src) %{
6036 src : RD(read);
6037 dst : WB(write);
6038 DECODE : ID;
6039 ALU : CA;
6040 %}
6042 //No.4 No instructions : do nothing
6043 pipe_class empty( ) %{
6044 instruction_count(0);
6045 %}
6047 //No.5 UnConditional branch :
6048 pipe_class pipe_jump( label labl ) %{
6049 multiple_bundles;
6050 DECODE : ID;
6051 BR : RD;
6052 %}
6054 //No.6 ALU Conditional branch :
6055 pipe_class pipe_alu_branch(mRegI src1, mRegI src2, label labl ) %{
6056 multiple_bundles;
6057 src1 : RD(read);
6058 src2 : RD(read);
6059 DECODE : ID;
6060 BR : RD;
6061 %}
6063 //no.7 load integer from memory :
6064 pipe_class ialu_loadI(mRegI dst, memory mem) %{
6065 mem : RD(read);
6066 dst : WB(write)+3;
6067 DECODE : ID;
6068 MEM : RD;
6069 %}
6071 //No.8 Store Integer to Memory :
6072 pipe_class ialu_storeI(mRegI src, memory mem) %{
6073 mem : RD(read);
6074 src : RD(read);
6075 DECODE : ID;
6076 MEM : RD;
6077 %}
6080 //No.10 Floating FPU reg-reg operation : dst <-- reg1 op reg2
6081 pipe_class fpu_regF_regF(regF dst, regF src1, regF src2) %{
6082 src1 : RD(read);
6083 src2 : RD(read);
6084 dst : WB(write);
6085 DECODE : ID;
6086 FPU : CA;
6087 %}
6089 //No.22 Floating div operation : dst <-- reg1 div reg2
6090 pipe_class fpu_div(regF dst, regF src1, regF src2) %{
6091 src1 : RD(read);
6092 src2 : RD(read);
6093 dst : WB(write);
6094 DECODE : ID;
6095 FPU2 : CA;
6096 %}
6098 pipe_class fcvt_I2D(regD dst, mRegI src) %{
6099 src : RD(read);
6100 dst : WB(write);
6101 DECODE : ID;
6102 FPU1 : CA;
6103 %}
6105 pipe_class fcvt_D2I(mRegI dst, regD src) %{
6106 src : RD(read);
6107 dst : WB(write);
6108 DECODE : ID;
6109 FPU1 : CA;
6110 %}
6112 pipe_class pipe_mfc1(mRegI dst, regD src) %{
6113 src : RD(read);
6114 dst : WB(write);
6115 DECODE : ID;
6116 MEM : RD;
6117 %}
6119 pipe_class pipe_mtc1(regD dst, mRegI src) %{
6120 src : RD(read);
6121 dst : WB(write);
6122 DECODE : ID;
6123 MEM : RD(5);
6124 %}
6126 //No.23 Floating sqrt operation : dst <-- reg1 sqrt reg2
6127 pipe_class fpu_sqrt(regF dst, regF src1, regF src2) %{
6128 multiple_bundles;
6129 src1 : RD(read);
6130 src2 : RD(read);
6131 dst : WB(write);
6132 DECODE : ID;
6133 FPU2 : CA;
6134 %}
6136 //No.11 Load Floating from Memory :
6137 pipe_class fpu_loadF(regF dst, memory mem) %{
6138 instruction_count(1);
6139 mem : RD(read);
6140 dst : WB(write)+3;
6141 DECODE : ID;
6142 MEM : RD;
6143 %}
6145 //No.12 Store Floating to Memory :
6146 pipe_class fpu_storeF(regF src, memory mem) %{
6147 instruction_count(1);
6148 mem : RD(read);
6149 src : RD(read);
6150 DECODE : ID;
6151 MEM : RD;
6152 %}
6154 //No.13 FPU Conditional branch :
6155 pipe_class pipe_fpu_branch(regF src1, regF src2, label labl ) %{
6156 multiple_bundles;
6157 src1 : RD(read);
6158 src2 : RD(read);
6159 DECODE : ID;
6160 BR : RD;
6161 %}
6163 //No.14 Floating FPU reg operation : dst <-- op reg
6164 pipe_class fpu1_regF(regF dst, regF src) %{
6165 src : RD(read);
6166 dst : WB(write);
6167 DECODE : ID;
6168 FPU : CA;
6169 %}
6171 pipe_class long_memory_op() %{
6172 instruction_count(10); multiple_bundles; force_serialization;
6173 fixed_latency(30);
6174 %}
6176 pipe_class simple_call() %{
6177 instruction_count(10); multiple_bundles; force_serialization;
6178 fixed_latency(200);
6179 BR : RD;
6180 %}
6182 pipe_class call() %{
6183 instruction_count(10); multiple_bundles; force_serialization;
6184 fixed_latency(200);
6185 %}
6187 //FIXME:
6188 //No.9 Piple slow : for multi-instructions
6189 pipe_class pipe_slow( ) %{
6190 instruction_count(20);
6191 force_serialization;
6192 multiple_bundles;
6193 fixed_latency(50);
6194 %}
6196 %}
6200 //----------INSTRUCTIONS-------------------------------------------------------
6201 //
6202 // match -- States which machine-independent subtree may be replaced
6203 // by this instruction.
6204 // ins_cost -- The estimated cost of this instruction is used by instruction
6205 // selection to identify a minimum cost tree of machine
6206 // instructions that matches a tree of machine-independent
6207 // instructions.
6208 // format -- A string providing the disassembly for this instruction.
6209 // The value of an instruction's operand may be inserted
6210 // by referring to it with a '$' prefix.
6211 // opcode -- Three instruction opcodes may be provided. These are referred
6212 // to within an encode class as $primary, $secondary, and $tertiary
6213 // respectively. The primary opcode is commonly used to
6214 // indicate the type of machine instruction, while secondary
6215 // and tertiary are often used for prefix options or addressing
6216 // modes.
6217 // ins_encode -- A list of encode classes with parameters. The encode class
6218 // name must have been defined in an 'enc_class' specification
6219 // in the encode section of the architecture description.
6222 // Load Integer
6223 instruct loadI(mRegI dst, memory mem) %{
6224 match(Set dst (LoadI mem));
6226 ins_cost(125);
6227 format %{ "lw $dst, $mem #@loadI" %}
6228 ins_encode (load_I_enc(dst, mem));
6229 ins_pipe( ialu_loadI );
6230 %}
6232 instruct loadI_convI2L(mRegL dst, memory mem) %{
6233 match(Set dst (ConvI2L (LoadI mem)));
6235 ins_cost(125);
6236 format %{ "lw $dst, $mem #@loadI_convI2L" %}
6237 ins_encode (load_I_enc(dst, mem));
6238 ins_pipe( ialu_loadI );
6239 %}
6241 // Load Integer (32 bit signed) to Byte (8 bit signed)
6242 instruct loadI2B(mRegI dst, memory mem, immI_24 twentyfour) %{
6243 match(Set dst (RShiftI (LShiftI (LoadI mem) twentyfour) twentyfour));
6245 ins_cost(125);
6246 format %{ "lb $dst, $mem\t# int -> byte #@loadI2B" %}
6247 ins_encode(load_B_enc(dst, mem));
6248 ins_pipe(ialu_loadI);
6249 %}
6251 // Load Integer (32 bit signed) to Unsigned Byte (8 bit UNsigned)
6252 instruct loadI2UB(mRegI dst, memory mem, immI_255 mask) %{
6253 match(Set dst (AndI (LoadI mem) mask));
6255 ins_cost(125);
6256 format %{ "lbu $dst, $mem\t# int -> ubyte #@loadI2UB" %}
6257 ins_encode(load_UB_enc(dst, mem));
6258 ins_pipe(ialu_loadI);
6259 %}
6261 // Load Integer (32 bit signed) to Short (16 bit signed)
6262 instruct loadI2S(mRegI dst, memory mem, immI_16 sixteen) %{
6263 match(Set dst (RShiftI (LShiftI (LoadI mem) sixteen) sixteen));
6265 ins_cost(125);
6266 format %{ "lh $dst, $mem\t# int -> short #@loadI2S" %}
6267 ins_encode(load_S_enc(dst, mem));
6268 ins_pipe(ialu_loadI);
6269 %}
6271 // Load Integer (32 bit signed) to Unsigned Short/Char (16 bit UNsigned)
6272 instruct loadI2US(mRegI dst, memory mem, immI_65535 mask) %{
6273 match(Set dst (AndI (LoadI mem) mask));
6275 ins_cost(125);
6276 format %{ "lhu $dst, $mem\t# int -> ushort/char #@loadI2US" %}
6277 ins_encode(load_C_enc(dst, mem));
6278 ins_pipe(ialu_loadI);
6279 %}
6281 // Load Long.
6282 instruct loadL(mRegL dst, memory mem) %{
6283 // predicate(!((LoadLNode*)n)->require_atomic_access());
6284 match(Set dst (LoadL mem));
6286 ins_cost(250);
6287 format %{ "ld $dst, $mem #@loadL" %}
6288 ins_encode(load_L_enc(dst, mem));
6289 ins_pipe( ialu_loadL );
6290 %}
6292 // Load Long - UNaligned
6293 instruct loadL_unaligned(mRegL dst, memory mem) %{
6294 match(Set dst (LoadL_unaligned mem));
6296 // FIXME: Jin: Need more effective ldl/ldr
6297 ins_cost(450);
6298 format %{ "ld $dst, $mem #@loadL_unaligned\n\t" %}
6299 ins_encode(load_L_enc(dst, mem));
6300 ins_pipe( ialu_loadL );
6301 %}
6303 // Store Long
6304 instruct storeL_reg(memory mem, mRegL src) %{
6305 match(Set mem (StoreL mem src));
6307 ins_cost(200);
6308 format %{ "sd $mem, $src #@storeL_reg\n" %}
6309 ins_encode(store_L_reg_enc(mem, src));
6310 ins_pipe( ialu_storeL );
6311 %}
6313 instruct storeL_immL0(memory mem, immL0 zero) %{
6314 match(Set mem (StoreL mem zero));
6316 ins_cost(180);
6317 format %{ "sd zero, $mem #@storeL_immL0" %}
6318 ins_encode(store_L_immL0_enc(mem, zero));
6319 ins_pipe( ialu_storeL );
6320 %}
6322 instruct storeL_imm(memory mem, immL src) %{
6323 match(Set mem (StoreL mem src));
6325 ins_cost(200);
6326 format %{ "sd $src, $mem #@storeL_imm" %}
6327 ins_encode(store_L_immL_enc(mem, src));
6328 ins_pipe( ialu_storeL );
6329 %}
6331 // Load Compressed Pointer
6332 instruct loadN(mRegN dst, memory mem)
6333 %{
6334 match(Set dst (LoadN mem));
6336 ins_cost(125); // XXX
6337 format %{ "lwu $dst, $mem\t# compressed ptr @ loadN" %}
6338 ins_encode (load_N_enc(dst, mem));
6339 ins_pipe( ialu_loadI ); // XXX
6340 %}
6342 instruct loadN2P(mRegP dst, memory mem)
6343 %{
6344 match(Set dst (DecodeN (LoadN mem)));
6345 predicate(Universe::narrow_oop_base() == NULL && Universe::narrow_oop_shift() == 0);
6347 ins_cost(125); // XXX
6348 format %{ "lwu $dst, $mem\t# @ loadN2P" %}
6349 ins_encode (load_N_enc(dst, mem));
6350 ins_pipe( ialu_loadI ); // XXX
6351 %}
6353 // Load Pointer
6354 instruct loadP(mRegP dst, memory mem) %{
6355 match(Set dst (LoadP mem));
6357 ins_cost(125);
6358 format %{ "ld $dst, $mem #@loadP" %}
6359 ins_encode (load_P_enc(dst, mem));
6360 ins_pipe( ialu_loadI );
6361 %}
6363 // Load Klass Pointer
6364 instruct loadKlass(mRegP dst, memory mem) %{
6365 match(Set dst (LoadKlass mem));
6367 ins_cost(125);
6368 format %{ "MOV $dst,$mem @ loadKlass" %}
6369 ins_encode (load_P_enc(dst, mem));
6370 ins_pipe( ialu_loadI );
6371 %}
6373 // Load narrow Klass Pointer
6374 instruct loadNKlass(mRegN dst, memory mem)
6375 %{
6376 match(Set dst (LoadNKlass mem));
6378 ins_cost(125); // XXX
6379 format %{ "lwu $dst, $mem\t# compressed klass ptr @ loadNKlass" %}
6380 ins_encode (load_N_enc(dst, mem));
6381 ins_pipe( ialu_loadI ); // XXX
6382 %}
6384 instruct loadN2PKlass(mRegP dst, memory mem)
6385 %{
6386 match(Set dst (DecodeNKlass (LoadNKlass mem)));
6387 predicate(Universe::narrow_klass_base() == NULL && Universe::narrow_klass_shift() == 0);
6389 ins_cost(125); // XXX
6390 format %{ "lwu $dst, $mem\t# compressed klass ptr @ loadN2PKlass" %}
6391 ins_encode (load_N_enc(dst, mem));
6392 ins_pipe( ialu_loadI ); // XXX
6393 %}
6395 // Load Constant
6396 instruct loadConI(mRegI dst, immI src) %{
6397 match(Set dst src);
6399 ins_cost(150);
6400 format %{ "mov $dst, $src #@loadConI" %}
6401 ins_encode %{
6402 Register dst = $dst$$Register;
6403 int value = $src$$constant;
6404 __ move(dst, value);
6405 %}
6406 ins_pipe( ialu_regI_regI );
6407 %}
6410 instruct loadConL_set64(mRegL dst, immL src) %{
6411 match(Set dst src);
6412 ins_cost(120);
6413 format %{ "li $dst, $src @ loadConL_set64" %}
6414 ins_encode %{
6415 __ set64($dst$$Register, $src$$constant);
6416 %}
6417 ins_pipe(ialu_regL_regL);
6418 %}
6420 /*
6421 // Load long value from constant table (predicated by immL_expensive).
6422 instruct loadConL_load(mRegL dst, immL_expensive src) %{
6423 match(Set dst src);
6424 ins_cost(150);
6425 format %{ "ld $dst, $constantoffset[$constanttablebase] # load long $src from table @ loadConL_ldx" %}
6426 ins_encode %{
6427 int con_offset = $constantoffset($src);
6429 if (Assembler::is_simm16(con_offset)) {
6430 __ ld($dst$$Register, $constanttablebase, con_offset);
6431 } else {
6432 __ set64(AT, con_offset);
6433 if (UseLoongsonISA) {
6434 __ gsldx($dst$$Register, $constanttablebase, AT, 0);
6435 } else {
6436 __ daddu(AT, $constanttablebase, AT);
6437 __ ld($dst$$Register, AT, 0);
6438 }
6439 }
6440 %}
6441 ins_pipe(ialu_loadI);
6442 %}
6443 */
6445 instruct loadConL16(mRegL dst, immL16 src) %{
6446 match(Set dst src);
6447 ins_cost(105);
6448 format %{ "mov $dst, $src #@loadConL16" %}
6449 ins_encode %{
6450 Register dst_reg = as_Register($dst$$reg);
6451 int value = $src$$constant;
6452 __ daddiu(dst_reg, R0, value);
6453 %}
6454 ins_pipe( ialu_regL_regL );
6455 %}
6458 instruct loadConL0(mRegL dst, immL0 src) %{
6459 match(Set dst src);
6460 ins_cost(100);
6461 format %{ "mov $dst, zero #@loadConL0" %}
6462 ins_encode %{
6463 Register dst_reg = as_Register($dst$$reg);
6464 __ daddu(dst_reg, R0, R0);
6465 %}
6466 ins_pipe( ialu_regL_regL );
6467 %}
6469 // Load Range
6470 instruct loadRange(mRegI dst, memory mem) %{
6471 match(Set dst (LoadRange mem));
6473 ins_cost(125);
6474 format %{ "MOV $dst,$mem @ loadRange" %}
6475 ins_encode(load_I_enc(dst, mem));
6476 ins_pipe( ialu_loadI );
6477 %}
6480 instruct storeP(memory mem, mRegP src ) %{
6481 match(Set mem (StoreP mem src));
6483 ins_cost(125);
6484 format %{ "sd $src, $mem #@storeP" %}
6485 ins_encode(store_P_reg_enc(mem, src));
6486 ins_pipe( ialu_storeI );
6487 %}
6489 // Store NULL Pointer, mark word, or other simple pointer constant.
6490 instruct storeImmP0(memory mem, immP0 zero) %{
6491 match(Set mem (StoreP mem zero));
6493 ins_cost(125);
6494 format %{ "mov $mem, $zero #@storeImmP0" %}
6495 ins_encode(store_P_immP0_enc(mem));
6496 ins_pipe( ialu_storeI );
6497 %}
6499 // Store NULL Pointer, mark word, or other simple pointer constant.
6500 instruct storeImmP(memory mem, immP31 src) %{
6501 match(Set mem (StoreP mem src));
6503 ins_cost(150);
6504 format %{ "mov $mem, $src #@storeImmP" %}
6505 ins_encode(store_P_immP_enc(mem, src));
6506 ins_pipe( ialu_storeI );
6507 %}
6509 // Store Byte Immediate
6510 instruct storeImmB(memory mem, immI8 src) %{
6511 match(Set mem (StoreB mem src));
6513 ins_cost(150);
6514 format %{ "movb $mem, $src #@storeImmB" %}
6515 ins_encode(store_B_immI_enc(mem, src));
6516 ins_pipe( ialu_storeI );
6517 %}
6519 // Store Compressed Pointer
6520 instruct storeN(memory mem, mRegN src)
6521 %{
6522 match(Set mem (StoreN mem src));
6524 ins_cost(125); // XXX
6525 format %{ "sw $mem, $src\t# compressed ptr @ storeN" %}
6526 ins_encode(store_N_reg_enc(mem, src));
6527 ins_pipe( ialu_storeI );
6528 %}
6530 instruct storeP2N(memory mem, mRegP src)
6531 %{
6532 match(Set mem (StoreN mem (EncodeP src)));
6533 predicate(Universe::narrow_oop_base() == NULL && Universe::narrow_oop_shift() == 0);
6535 ins_cost(125); // XXX
6536 format %{ "sw $mem, $src\t# @ storeP2N" %}
6537 ins_encode(store_N_reg_enc(mem, src));
6538 ins_pipe( ialu_storeI );
6539 %}
6541 instruct storeNKlass(memory mem, mRegN src)
6542 %{
6543 match(Set mem (StoreNKlass mem src));
6545 ins_cost(125); // XXX
6546 format %{ "sw $mem, $src\t# compressed klass ptr @ storeNKlass" %}
6547 ins_encode(store_N_reg_enc(mem, src));
6548 ins_pipe( ialu_storeI );
6549 %}
6551 instruct storeP2NKlass(memory mem, mRegP src)
6552 %{
6553 match(Set mem (StoreNKlass mem (EncodePKlass src)));
6554 predicate(Universe::narrow_klass_base() == NULL && Universe::narrow_klass_shift() == 0);
6556 ins_cost(125); // XXX
6557 format %{ "sw $mem, $src\t# @ storeP2NKlass" %}
6558 ins_encode(store_N_reg_enc(mem, src));
6559 ins_pipe( ialu_storeI );
6560 %}
6562 instruct storeImmN0(memory mem, immN0 zero)
6563 %{
6564 match(Set mem (StoreN mem zero));
6566 ins_cost(125); // XXX
6567 format %{ "storeN0 zero, $mem\t# compressed ptr" %}
6568 ins_encode(storeImmN0_enc(mem, zero));
6569 ins_pipe( ialu_storeI );
6570 %}
6572 instruct storeImmN(memory mem, immN src)
6573 %{
6574 match(Set mem (StoreN mem src));
6576 ins_cost(150);
6577 format %{ "storeImmN $mem, $src\t# compressed ptr @ storeImmN" %}
6578 ins_encode(storeImmN_enc(mem, src));
6579 ins_pipe( ialu_storeI );
6580 %}
6582 instruct storeImmNKlass(memory mem, immNKlass src)
6583 %{
6584 match(Set mem (StoreNKlass mem src));
6586 ins_cost(150); // XXX
6587 format %{ "sw $mem, $src\t# compressed klass ptr @ storeImmNKlass" %}
6588 ins_encode(storeImmNKlass_enc(mem, src));
6589 ins_pipe( ialu_storeI );
6590 %}
6592 // Store Byte
6593 instruct storeB(memory mem, mRegI src) %{
6594 match(Set mem (StoreB mem src));
6596 ins_cost(125);
6597 format %{ "sb $src, $mem #@storeB" %}
6598 ins_encode(store_B_reg_enc(mem, src));
6599 ins_pipe( ialu_storeI );
6600 %}
6602 instruct storeB_convL2I(memory mem, mRegL src) %{
6603 match(Set mem (StoreB mem (ConvL2I src)));
6605 ins_cost(125);
6606 format %{ "sb $src, $mem #@storeB_convL2I" %}
6607 ins_encode(store_B_reg_enc(mem, src));
6608 ins_pipe( ialu_storeI );
6609 %}
6611 // Load Byte (8bit signed)
6612 instruct loadB(mRegI dst, memory mem) %{
6613 match(Set dst (LoadB mem));
6615 ins_cost(125);
6616 format %{ "lb $dst, $mem #@loadB" %}
6617 ins_encode(load_B_enc(dst, mem));
6618 ins_pipe( ialu_loadI );
6619 %}
6621 instruct loadB_convI2L(mRegL dst, memory mem) %{
6622 match(Set dst (ConvI2L (LoadB mem)));
6624 ins_cost(125);
6625 format %{ "lb $dst, $mem #@loadB_convI2L" %}
6626 ins_encode(load_B_enc(dst, mem));
6627 ins_pipe( ialu_loadI );
6628 %}
6630 // Load Byte (8bit UNsigned)
6631 instruct loadUB(mRegI dst, memory mem) %{
6632 match(Set dst (LoadUB mem));
6634 ins_cost(125);
6635 format %{ "lbu $dst, $mem #@loadUB" %}
6636 ins_encode(load_UB_enc(dst, mem));
6637 ins_pipe( ialu_loadI );
6638 %}
6640 instruct loadUB_convI2L(mRegL dst, memory mem) %{
6641 match(Set dst (ConvI2L (LoadUB mem)));
6643 ins_cost(125);
6644 format %{ "lbu $dst, $mem #@loadUB_convI2L" %}
6645 ins_encode(load_UB_enc(dst, mem));
6646 ins_pipe( ialu_loadI );
6647 %}
6649 // Load Short (16bit signed)
6650 instruct loadS(mRegI dst, memory mem) %{
6651 match(Set dst (LoadS mem));
6653 ins_cost(125);
6654 format %{ "lh $dst, $mem #@loadS" %}
6655 ins_encode(load_S_enc(dst, mem));
6656 ins_pipe( ialu_loadI );
6657 %}
6659 // Load Short (16 bit signed) to Byte (8 bit signed)
6660 instruct loadS2B(mRegI dst, memory mem, immI_24 twentyfour) %{
6661 match(Set dst (RShiftI (LShiftI (LoadS mem) twentyfour) twentyfour));
6663 ins_cost(125);
6664 format %{ "lb $dst, $mem\t# short -> byte #@loadS2B" %}
6665 ins_encode(load_B_enc(dst, mem));
6666 ins_pipe(ialu_loadI);
6667 %}
6669 instruct loadS_convI2L(mRegL dst, memory mem) %{
6670 match(Set dst (ConvI2L (LoadS mem)));
6672 ins_cost(125);
6673 format %{ "lh $dst, $mem #@loadS_convI2L" %}
6674 ins_encode(load_S_enc(dst, mem));
6675 ins_pipe( ialu_loadI );
6676 %}
6678 // Store Integer Immediate
6679 instruct storeImmI(memory mem, immI src) %{
6680 match(Set mem (StoreI mem src));
6682 ins_cost(150);
6683 format %{ "mov $mem, $src #@storeImmI" %}
6684 ins_encode(store_I_immI_enc(mem, src));
6685 ins_pipe( ialu_storeI );
6686 %}
6688 // Store Integer
6689 instruct storeI(memory mem, mRegI src) %{
6690 match(Set mem (StoreI mem src));
6692 ins_cost(125);
6693 format %{ "sw $mem, $src #@storeI" %}
6694 ins_encode(store_I_reg_enc(mem, src));
6695 ins_pipe( ialu_storeI );
6696 %}
6698 instruct storeI_convL2I(memory mem, mRegL src) %{
6699 match(Set mem (StoreI mem (ConvL2I src)));
6701 ins_cost(125);
6702 format %{ "sw $mem, $src #@storeI_convL2I" %}
6703 ins_encode(store_I_reg_enc(mem, src));
6704 ins_pipe( ialu_storeI );
6705 %}
6707 // Load Float
6708 instruct loadF(regF dst, memory mem) %{
6709 match(Set dst (LoadF mem));
6711 ins_cost(150);
6712 format %{ "loadF $dst, $mem #@loadF" %}
6713 ins_encode(load_F_enc(dst, mem));
6714 ins_pipe( ialu_loadI );
6715 %}
6717 instruct loadConP_general(mRegP dst, immP src) %{
6718 match(Set dst src);
6720 ins_cost(120);
6721 format %{ "li $dst, $src #@loadConP_general" %}
6723 ins_encode %{
6724 Register dst = $dst$$Register;
6725 long* value = (long*)$src$$constant;
6727 if($src->constant_reloc() == relocInfo::metadata_type){
6728 int klass_index = __ oop_recorder()->find_index((Klass*)value);
6729 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
6731 __ relocate(rspec);
6732 __ patchable_set48(dst, (long)value);
6733 }else if($src->constant_reloc() == relocInfo::oop_type){
6734 int oop_index = __ oop_recorder()->find_index((jobject)value);
6735 RelocationHolder rspec = oop_Relocation::spec(oop_index);
6737 __ relocate(rspec);
6738 __ patchable_set48(dst, (long)value);
6739 } else if ($src->constant_reloc() == relocInfo::none) {
6740 __ set64(dst, (long)value);
6741 }
6742 %}
6744 ins_pipe( ialu_regI_regI );
6745 %}
6747 /*
6748 instruct loadConP_load(mRegP dst, immP_load src) %{
6749 match(Set dst src);
6751 ins_cost(100);
6752 format %{ "ld $dst, [$constanttablebase + $constantoffset] load from constant table: ptr=$src @ loadConP_load" %}
6754 ins_encode %{
6756 int con_offset = $constantoffset($src);
6758 if (Assembler::is_simm16(con_offset)) {
6759 __ ld($dst$$Register, $constanttablebase, con_offset);
6760 } else {
6761 __ set64(AT, con_offset);
6762 if (UseLoongsonISA) {
6763 __ gsldx($dst$$Register, $constanttablebase, AT, 0);
6764 } else {
6765 __ daddu(AT, $constanttablebase, AT);
6766 __ ld($dst$$Register, AT, 0);
6767 }
6768 }
6769 %}
6771 ins_pipe(ialu_loadI);
6772 %}
6773 */
6775 instruct loadConP_no_oop_cheap(mRegP dst, immP_no_oop_cheap src) %{
6776 match(Set dst src);
6778 ins_cost(80);
6779 format %{ "li $dst, $src @ loadConP_no_oop_cheap" %}
6781 ins_encode %{
6782 __ set64($dst$$Register, $src$$constant);
6783 %}
6785 ins_pipe(ialu_regI_regI);
6786 %}
6789 instruct loadConP_poll(mRegP dst, immP_poll src) %{
6790 match(Set dst src);
6792 ins_cost(50);
6793 format %{ "li $dst, $src #@loadConP_poll" %}
6795 ins_encode %{
6796 Register dst = $dst$$Register;
6797 intptr_t value = (intptr_t)$src$$constant;
6799 __ set64(dst, (jlong)value);
6800 %}
6802 ins_pipe( ialu_regI_regI );
6803 %}
6805 instruct loadConP0(mRegP dst, immP0 src)
6806 %{
6807 match(Set dst src);
6809 ins_cost(50);
6810 format %{ "mov $dst, R0\t# ptr" %}
6811 ins_encode %{
6812 Register dst_reg = $dst$$Register;
6813 __ daddu(dst_reg, R0, R0);
6814 %}
6815 ins_pipe( ialu_regI_regI );
6816 %}
6818 instruct loadConN0(mRegN dst, immN0 src) %{
6819 match(Set dst src);
6820 format %{ "move $dst, R0\t# compressed NULL ptr" %}
6821 ins_encode %{
6822 __ move($dst$$Register, R0);
6823 %}
6824 ins_pipe( ialu_regI_regI );
6825 %}
6827 instruct loadConN(mRegN dst, immN src) %{
6828 match(Set dst src);
6830 ins_cost(125);
6831 format %{ "li $dst, $src\t# compressed ptr @ loadConN" %}
6832 ins_encode %{
6833 Register dst = $dst$$Register;
6834 __ set_narrow_oop(dst, (jobject)$src$$constant);
6835 %}
6836 ins_pipe( ialu_regI_regI ); // XXX
6837 %}
6839 instruct loadConNKlass(mRegN dst, immNKlass src) %{
6840 match(Set dst src);
6842 ins_cost(125);
6843 format %{ "li $dst, $src\t# compressed klass ptr @ loadConNKlass" %}
6844 ins_encode %{
6845 Register dst = $dst$$Register;
6846 __ set_narrow_klass(dst, (Klass*)$src$$constant);
6847 %}
6848 ins_pipe( ialu_regI_regI ); // XXX
6849 %}
6851 //FIXME
6852 // Tail Call; Jump from runtime stub to Java code.
6853 // Also known as an 'interprocedural jump'.
6854 // Target of jump will eventually return to caller.
6855 // TailJump below removes the return address.
6856 instruct TailCalljmpInd(mRegP jump_target, mRegP method_oop) %{
6857 match(TailCall jump_target method_oop );
6858 ins_cost(300);
6859 format %{ "JMP $jump_target \t# @TailCalljmpInd" %}
6861 ins_encode %{
6862 Register target = $jump_target$$Register;
6863 Register oop = $method_oop$$Register;
6865 /* 2012/10/12 Jin: RA will be used in generate_forward_exception() */
6866 __ push(RA);
6868 __ move(S3, oop);
6869 __ jr(target);
6870 __ nop();
6871 %}
6873 ins_pipe( pipe_jump );
6874 %}
6876 // Create exception oop: created by stack-crawling runtime code.
6877 // Created exception is now available to this handler, and is setup
6878 // just prior to jumping to this handler. No code emitted.
6879 instruct CreateException( a0_RegP ex_oop )
6880 %{
6881 match(Set ex_oop (CreateEx));
6883 // use the following format syntax
6884 format %{ "# exception oop is in A0; no code emitted @CreateException" %}
6885 ins_encode %{
6886 /* Jin: X86 leaves this function empty */
6887 __ block_comment("CreateException is empty in X86/MIPS");
6888 %}
6889 ins_pipe( empty );
6890 // ins_pipe( pipe_jump );
6891 %}
6894 /* 2012/9/14 Jin: The mechanism of exception handling is clear now.
6896 - Common try/catch:
6897 2012/9/14 Jin: [stubGenerator_mips.cpp] generate_forward_exception()
6898 |- V0, V1 are created
6899 |- T9 <= SharedRuntime::exception_handler_for_return_address
6900 `- jr T9
6901 `- the caller's exception_handler
6902 `- jr OptoRuntime::exception_blob
6903 `- here
6904 - Rethrow(e.g. 'unwind'):
6905 * The callee:
6906 |- an exception is triggered during execution
6907 `- exits the callee method through RethrowException node
6908 |- The callee pushes exception_oop(T0) and exception_pc(RA)
6909 `- The callee jumps to OptoRuntime::rethrow_stub()
6910 * In OptoRuntime::rethrow_stub:
6911 |- The VM calls _rethrow_Java to determine the return address in the caller method
6912 `- exits the stub with tailjmpInd
6913 |- pops exception_oop(V0) and exception_pc(V1)
6914 `- jumps to the return address(usually an exception_handler)
6915 * The caller:
6916 `- continues processing the exception_blob with V0/V1
6917 */
6919 /*
6920 Disassembling OptoRuntime::rethrow_stub()
6922 ; locals
6923 0x2d3bf320: addiu sp, sp, 0xfffffff8
6924 0x2d3bf324: sw ra, 0x4(sp)
6925 0x2d3bf328: sw fp, 0x0(sp)
6926 0x2d3bf32c: addu fp, sp, zero
6927 0x2d3bf330: addiu sp, sp, 0xfffffff0
6928 0x2d3bf334: sw ra, 0x8(sp)
6929 0x2d3bf338: sw t0, 0x4(sp)
6930 0x2d3bf33c: sw sp, 0x0(sp)
6932 ; get_thread(S2)
6933 0x2d3bf340: addu s2, sp, zero
6934 0x2d3bf344: srl s2, s2, 12
6935 0x2d3bf348: sll s2, s2, 2
6936 0x2d3bf34c: lui at, 0x2c85
6937 0x2d3bf350: addu at, at, s2
6938 0x2d3bf354: lw s2, 0xffffcc80(at)
6940 0x2d3bf358: lw s0, 0x0(sp)
6941 0x2d3bf35c: sw s0, 0x118(s2) // last_sp -> threa
6942 0x2d3bf360: sw s2, 0xc(sp)
6944 ; OptoRuntime::rethrow_C(oopDesc* exception, JavaThread* thread, address ret_pc)
6945 0x2d3bf364: lw a0, 0x4(sp)
6946 0x2d3bf368: lw a1, 0xc(sp)
6947 0x2d3bf36c: lw a2, 0x8(sp)
6948 ;; Java_To_Runtime
6949 0x2d3bf370: lui t9, 0x2c34
6950 0x2d3bf374: addiu t9, t9, 0xffff8a48
6951 0x2d3bf378: jalr t9
6952 0x2d3bf37c: nop
6954 0x2d3bf380: addu s3, v0, zero ; S3: SharedRuntime::raw_exception_handler_for_return_address()
6956 0x2d3bf384: lw s0, 0xc(sp)
6957 0x2d3bf388: sw zero, 0x118(s0)
6958 0x2d3bf38c: sw zero, 0x11c(s0)
6959 0x2d3bf390: lw s1, 0x144(s0) ; ex_oop: S1
6960 0x2d3bf394: addu s2, s0, zero
6961 0x2d3bf398: sw zero, 0x144(s2)
6962 0x2d3bf39c: lw s0, 0x4(s2)
6963 0x2d3bf3a0: addiu s4, zero, 0x0
6964 0x2d3bf3a4: bne s0, s4, 0x2d3bf3d4
6965 0x2d3bf3a8: nop
6966 0x2d3bf3ac: addiu sp, sp, 0x10
6967 0x2d3bf3b0: addiu sp, sp, 0x8
6968 0x2d3bf3b4: lw ra, 0xfffffffc(sp)
6969 0x2d3bf3b8: lw fp, 0xfffffff8(sp)
6970 0x2d3bf3bc: lui at, 0x2b48
6971 0x2d3bf3c0: lw at, 0x100(at)
6973 ; tailjmpInd: Restores exception_oop & exception_pc
6974 0x2d3bf3c4: addu v1, ra, zero
6975 0x2d3bf3c8: addu v0, s1, zero
6976 0x2d3bf3cc: jr s3
6977 0x2d3bf3d0: nop
6978 ; Exception:
6979 0x2d3bf3d4: lui s1, 0x2cc8 ; generate_forward_exception()
6980 0x2d3bf3d8: addiu s1, s1, 0x40
6981 0x2d3bf3dc: addiu s2, zero, 0x0
6982 0x2d3bf3e0: addiu sp, sp, 0x10
6983 0x2d3bf3e4: addiu sp, sp, 0x8
6984 0x2d3bf3e8: lw ra, 0xfffffffc(sp)
6985 0x2d3bf3ec: lw fp, 0xfffffff8(sp)
6986 0x2d3bf3f0: lui at, 0x2b48
6987 0x2d3bf3f4: lw at, 0x100(at)
6988 ; TailCalljmpInd
6989 __ push(RA); ; to be used in generate_forward_exception()
6990 0x2d3bf3f8: addu t7, s2, zero
6991 0x2d3bf3fc: jr s1
6992 0x2d3bf400: nop
6993 */
6994 // Rethrow exception:
6995 // The exception oop will come in the first argument position.
6996 // Then JUMP (not call) to the rethrow stub code.
6997 instruct RethrowException()
6998 %{
6999 match(Rethrow);
7001 // use the following format syntax
7002 format %{ "JMP rethrow_stub #@RethrowException" %}
7003 ins_encode %{
7004 __ block_comment("@ RethrowException");
7006 cbuf.set_insts_mark();
7007 cbuf.relocate(cbuf.insts_mark(), runtime_call_Relocation::spec());
7009 // call OptoRuntime::rethrow_stub to get the exception handler in parent method
7010 __ patchable_jump((address)OptoRuntime::rethrow_stub());
7011 %}
7012 ins_pipe( pipe_jump );
7013 %}
7015 instruct branchConP_zero(cmpOpU cmp, mRegP op1, immP0 zero, label labl) %{
7016 match(If cmp (CmpP op1 zero));
7017 effect(USE labl);
7019 ins_cost(180);
7020 format %{ "b$cmp $op1, R0, $labl #@branchConP_zero" %}
7022 ins_encode %{
7023 Register op1 = $op1$$Register;
7024 Register op2 = R0;
7025 Label &L = *($labl$$label);
7026 int flag = $cmp$$cmpcode;
7028 switch(flag)
7029 {
7030 case 0x01: //equal
7031 if (&L)
7032 __ beq(op1, op2, L);
7033 else
7034 __ beq(op1, op2, (int)0);
7035 break;
7036 case 0x02: //not_equal
7037 if (&L)
7038 __ bne(op1, op2, L);
7039 else
7040 __ bne(op1, op2, (int)0);
7041 break;
7042 /*
7043 case 0x03: //above
7044 __ sltu(AT, op2, op1);
7045 if(&L)
7046 __ bne(R0, AT, L);
7047 else
7048 __ bne(R0, AT, (int)0);
7049 break;
7050 case 0x04: //above_equal
7051 __ sltu(AT, op1, op2);
7052 if(&L)
7053 __ beq(AT, R0, L);
7054 else
7055 __ beq(AT, R0, (int)0);
7056 break;
7057 case 0x05: //below
7058 __ sltu(AT, op1, op2);
7059 if(&L)
7060 __ bne(R0, AT, L);
7061 else
7062 __ bne(R0, AT, (int)0);
7063 break;
7064 case 0x06: //below_equal
7065 __ sltu(AT, op2, op1);
7066 if(&L)
7067 __ beq(AT, R0, L);
7068 else
7069 __ beq(AT, R0, (int)0);
7070 break;
7071 */
7072 default:
7073 Unimplemented();
7074 }
7075 __ nop();
7076 %}
7078 ins_pc_relative(1);
7079 ins_pipe( pipe_alu_branch );
7080 %}
7082 instruct branchConN2P_zero(cmpOpU cmp, mRegN op1, immP0 zero, label labl) %{
7083 match(If cmp (CmpP (DecodeN op1) zero));
7084 predicate(Universe::narrow_oop_base() == NULL && Universe::narrow_oop_shift() == 0);
7085 effect(USE labl);
7087 ins_cost(180);
7088 format %{ "b$cmp $op1, R0, $labl #@branchConN2P_zero" %}
7090 ins_encode %{
7091 Register op1 = $op1$$Register;
7092 Register op2 = R0;
7093 Label &L = *($labl$$label);
7094 int flag = $cmp$$cmpcode;
7096 switch(flag)
7097 {
7098 case 0x01: //equal
7099 if (&L)
7100 __ beq(op1, op2, L);
7101 else
7102 __ beq(op1, op2, (int)0);
7103 break;
7104 case 0x02: //not_equal
7105 if (&L)
7106 __ bne(op1, op2, L);
7107 else
7108 __ bne(op1, op2, (int)0);
7109 break;
7110 default:
7111 Unimplemented();
7112 }
7113 __ nop();
7114 %}
7116 ins_pc_relative(1);
7117 ins_pipe( pipe_alu_branch );
7118 %}
7121 instruct branchConP(cmpOpU cmp, mRegP op1, mRegP op2, label labl) %{
7122 match(If cmp (CmpP op1 op2));
7123 // predicate(can_branch_register(_kids[0]->_leaf, _kids[1]->_leaf));
7124 effect(USE labl);
7126 ins_cost(200);
7127 format %{ "b$cmp $op1, $op2, $labl #@branchConP" %}
7129 ins_encode %{
7130 Register op1 = $op1$$Register;
7131 Register op2 = $op2$$Register;
7132 Label &L = *($labl$$label);
7133 int flag = $cmp$$cmpcode;
7135 switch(flag)
7136 {
7137 case 0x01: //equal
7138 if (&L)
7139 __ beq(op1, op2, L);
7140 else
7141 __ beq(op1, op2, (int)0);
7142 break;
7143 case 0x02: //not_equal
7144 if (&L)
7145 __ bne(op1, op2, L);
7146 else
7147 __ bne(op1, op2, (int)0);
7148 break;
7149 case 0x03: //above
7150 __ sltu(AT, op2, op1);
7151 if(&L)
7152 __ bne(R0, AT, L);
7153 else
7154 __ bne(R0, AT, (int)0);
7155 break;
7156 case 0x04: //above_equal
7157 __ sltu(AT, op1, op2);
7158 if(&L)
7159 __ beq(AT, R0, L);
7160 else
7161 __ beq(AT, R0, (int)0);
7162 break;
7163 case 0x05: //below
7164 __ sltu(AT, op1, op2);
7165 if(&L)
7166 __ bne(R0, AT, L);
7167 else
7168 __ bne(R0, AT, (int)0);
7169 break;
7170 case 0x06: //below_equal
7171 __ sltu(AT, op2, op1);
7172 if(&L)
7173 __ beq(AT, R0, L);
7174 else
7175 __ beq(AT, R0, (int)0);
7176 break;
7177 default:
7178 Unimplemented();
7179 }
7180 __ nop();
7181 %}
7183 ins_pc_relative(1);
7184 ins_pipe( pipe_alu_branch );
7185 %}
7187 instruct cmpN_null_branch(cmpOp cmp, mRegN op1, immN0 null, label labl) %{
7188 match(If cmp (CmpN op1 null));
7189 effect(USE labl);
7191 ins_cost(180);
7192 format %{ "CMP $op1,0\t! compressed ptr\n\t"
7193 "BP$cmp $labl @ cmpN_null_branch" %}
7194 ins_encode %{
7195 Register op1 = $op1$$Register;
7196 Register op2 = R0;
7197 Label &L = *($labl$$label);
7198 int flag = $cmp$$cmpcode;
7200 switch(flag)
7201 {
7202 case 0x01: //equal
7203 if (&L)
7204 __ beq(op1, op2, L);
7205 else
7206 __ beq(op1, op2, (int)0);
7207 break;
7208 case 0x02: //not_equal
7209 if (&L)
7210 __ bne(op1, op2, L);
7211 else
7212 __ bne(op1, op2, (int)0);
7213 break;
7214 default:
7215 Unimplemented();
7216 }
7217 __ nop();
7218 %}
7219 //TODO: pipe_branchP or create pipe_branchN LEE
7220 ins_pc_relative(1);
7221 ins_pipe( pipe_alu_branch );
7222 %}
7224 instruct cmpN_reg_branch(cmpOp cmp, mRegN op1, mRegN op2, label labl) %{
7225 match(If cmp (CmpN op1 op2));
7226 effect(USE labl);
7228 ins_cost(180);
7229 format %{ "CMP $op1,$op2\t! compressed ptr\n\t"
7230 "BP$cmp $labl" %}
7231 ins_encode %{
7232 Register op1_reg = $op1$$Register;
7233 Register op2_reg = $op2$$Register;
7234 Label &L = *($labl$$label);
7235 int flag = $cmp$$cmpcode;
7237 switch(flag)
7238 {
7239 case 0x01: //equal
7240 if (&L)
7241 __ beq(op1_reg, op2_reg, L);
7242 else
7243 __ beq(op1_reg, op2_reg, (int)0);
7244 break;
7245 case 0x02: //not_equal
7246 if (&L)
7247 __ bne(op1_reg, op2_reg, L);
7248 else
7249 __ bne(op1_reg, op2_reg, (int)0);
7250 break;
7251 case 0x03: //above
7252 __ sltu(AT, op2_reg, op1_reg);
7253 if(&L)
7254 __ bne(R0, AT, L);
7255 else
7256 __ bne(R0, AT, (int)0);
7257 break;
7258 case 0x04: //above_equal
7259 __ sltu(AT, op1_reg, op2_reg);
7260 if(&L)
7261 __ beq(AT, R0, L);
7262 else
7263 __ beq(AT, R0, (int)0);
7264 break;
7265 case 0x05: //below
7266 __ sltu(AT, op1_reg, op2_reg);
7267 if(&L)
7268 __ bne(R0, AT, L);
7269 else
7270 __ bne(R0, AT, (int)0);
7271 break;
7272 case 0x06: //below_equal
7273 __ sltu(AT, op2_reg, op1_reg);
7274 if(&L)
7275 __ beq(AT, R0, L);
7276 else
7277 __ beq(AT, R0, (int)0);
7278 break;
7279 default:
7280 Unimplemented();
7281 }
7282 __ nop();
7283 %}
7284 ins_pc_relative(1);
7285 ins_pipe( pipe_alu_branch );
7286 %}
7288 instruct branchConIU_reg_reg(cmpOpU cmp, mRegI src1, mRegI src2, label labl) %{
7289 match( If cmp (CmpU src1 src2) );
7290 effect(USE labl);
7291 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_reg" %}
7293 ins_encode %{
7294 Register op1 = $src1$$Register;
7295 Register op2 = $src2$$Register;
7296 Label &L = *($labl$$label);
7297 int flag = $cmp$$cmpcode;
7299 switch(flag)
7300 {
7301 case 0x01: //equal
7302 if (&L)
7303 __ beq(op1, op2, L);
7304 else
7305 __ beq(op1, op2, (int)0);
7306 break;
7307 case 0x02: //not_equal
7308 if (&L)
7309 __ bne(op1, op2, L);
7310 else
7311 __ bne(op1, op2, (int)0);
7312 break;
7313 case 0x03: //above
7314 __ sltu(AT, op2, op1);
7315 if(&L)
7316 __ bne(AT, R0, L);
7317 else
7318 __ bne(AT, R0, (int)0);
7319 break;
7320 case 0x04: //above_equal
7321 __ sltu(AT, op1, op2);
7322 if(&L)
7323 __ beq(AT, R0, L);
7324 else
7325 __ beq(AT, R0, (int)0);
7326 break;
7327 case 0x05: //below
7328 __ sltu(AT, op1, op2);
7329 if(&L)
7330 __ bne(AT, R0, L);
7331 else
7332 __ bne(AT, R0, (int)0);
7333 break;
7334 case 0x06: //below_equal
7335 __ sltu(AT, op2, op1);
7336 if(&L)
7337 __ beq(AT, R0, L);
7338 else
7339 __ beq(AT, R0, (int)0);
7340 break;
7341 default:
7342 Unimplemented();
7343 }
7344 __ nop();
7345 %}
7347 ins_pc_relative(1);
7348 ins_pipe( pipe_alu_branch );
7349 %}
7352 instruct branchConIU_reg_imm(cmpOpU cmp, mRegI src1, immI src2, label labl) %{
7353 match( If cmp (CmpU src1 src2) );
7354 effect(USE labl);
7355 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_imm" %}
7357 ins_encode %{
7358 Register op1 = $src1$$Register;
7359 int val = $src2$$constant;
7360 Label &L = *($labl$$label);
7361 int flag = $cmp$$cmpcode;
7363 __ move(AT, val);
7364 switch(flag)
7365 {
7366 case 0x01: //equal
7367 if (&L)
7368 __ beq(op1, AT, L);
7369 else
7370 __ beq(op1, AT, (int)0);
7371 break;
7372 case 0x02: //not_equal
7373 if (&L)
7374 __ bne(op1, AT, L);
7375 else
7376 __ bne(op1, AT, (int)0);
7377 break;
7378 case 0x03: //above
7379 __ sltu(AT, AT, op1);
7380 if(&L)
7381 __ bne(R0, AT, L);
7382 else
7383 __ bne(R0, AT, (int)0);
7384 break;
7385 case 0x04: //above_equal
7386 __ sltu(AT, op1, AT);
7387 if(&L)
7388 __ beq(AT, R0, L);
7389 else
7390 __ beq(AT, R0, (int)0);
7391 break;
7392 case 0x05: //below
7393 __ sltu(AT, op1, AT);
7394 if(&L)
7395 __ bne(R0, AT, L);
7396 else
7397 __ bne(R0, AT, (int)0);
7398 break;
7399 case 0x06: //below_equal
7400 __ sltu(AT, AT, op1);
7401 if(&L)
7402 __ beq(AT, R0, L);
7403 else
7404 __ beq(AT, R0, (int)0);
7405 break;
7406 default:
7407 Unimplemented();
7408 }
7409 __ nop();
7410 %}
7412 ins_pc_relative(1);
7413 ins_pipe( pipe_alu_branch );
7414 %}
7416 instruct branchConI_reg_reg(cmpOp cmp, mRegI src1, mRegI src2, label labl) %{
7417 match( If cmp (CmpI src1 src2) );
7418 effect(USE labl);
7419 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_reg" %}
7421 ins_encode %{
7422 Register op1 = $src1$$Register;
7423 Register op2 = $src2$$Register;
7424 Label &L = *($labl$$label);
7425 int flag = $cmp$$cmpcode;
7427 switch(flag)
7428 {
7429 case 0x01: //equal
7430 if (&L)
7431 __ beq(op1, op2, L);
7432 else
7433 __ beq(op1, op2, (int)0);
7434 break;
7435 case 0x02: //not_equal
7436 if (&L)
7437 __ bne(op1, op2, L);
7438 else
7439 __ bne(op1, op2, (int)0);
7440 break;
7441 case 0x03: //above
7442 __ slt(AT, op2, op1);
7443 if(&L)
7444 __ bne(R0, AT, L);
7445 else
7446 __ bne(R0, AT, (int)0);
7447 break;
7448 case 0x04: //above_equal
7449 __ slt(AT, op1, op2);
7450 if(&L)
7451 __ beq(AT, R0, L);
7452 else
7453 __ beq(AT, R0, (int)0);
7454 break;
7455 case 0x05: //below
7456 __ slt(AT, op1, op2);
7457 if(&L)
7458 __ bne(R0, AT, L);
7459 else
7460 __ bne(R0, AT, (int)0);
7461 break;
7462 case 0x06: //below_equal
7463 __ slt(AT, op2, op1);
7464 if(&L)
7465 __ beq(AT, R0, L);
7466 else
7467 __ beq(AT, R0, (int)0);
7468 break;
7469 default:
7470 Unimplemented();
7471 }
7472 __ nop();
7473 %}
7475 ins_pc_relative(1);
7476 ins_pipe( pipe_alu_branch );
7477 %}
7479 instruct branchConI_reg_imm0(cmpOp cmp, mRegI src1, immI0 src2, label labl) %{
7480 match( If cmp (CmpI src1 src2) );
7481 effect(USE labl);
7482 ins_cost(170);
7483 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm0" %}
7485 ins_encode %{
7486 Register op1 = $src1$$Register;
7487 // int val = $src2$$constant;
7488 Label &L = *($labl$$label);
7489 int flag = $cmp$$cmpcode;
7491 //__ move(AT, val);
7492 switch(flag)
7493 {
7494 case 0x01: //equal
7495 if (&L)
7496 __ beq(op1, R0, L);
7497 else
7498 __ beq(op1, R0, (int)0);
7499 break;
7500 case 0x02: //not_equal
7501 if (&L)
7502 __ bne(op1, R0, L);
7503 else
7504 __ bne(op1, R0, (int)0);
7505 break;
7506 case 0x03: //greater
7507 if(&L)
7508 __ bgtz(op1, L);
7509 else
7510 __ bgtz(op1, (int)0);
7511 break;
7512 case 0x04: //greater_equal
7513 if(&L)
7514 __ bgez(op1, L);
7515 else
7516 __ bgez(op1, (int)0);
7517 break;
7518 case 0x05: //less
7519 if(&L)
7520 __ bltz(op1, L);
7521 else
7522 __ bltz(op1, (int)0);
7523 break;
7524 case 0x06: //less_equal
7525 if(&L)
7526 __ blez(op1, L);
7527 else
7528 __ blez(op1, (int)0);
7529 break;
7530 default:
7531 Unimplemented();
7532 }
7533 __ nop();
7534 %}
7536 ins_pc_relative(1);
7537 ins_pipe( pipe_alu_branch );
7538 %}
7541 instruct branchConI_reg_imm(cmpOp cmp, mRegI src1, immI src2, label labl) %{
7542 match( If cmp (CmpI src1 src2) );
7543 effect(USE labl);
7544 ins_cost(200);
7545 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm" %}
7547 ins_encode %{
7548 Register op1 = $src1$$Register;
7549 int val = $src2$$constant;
7550 Label &L = *($labl$$label);
7551 int flag = $cmp$$cmpcode;
7553 __ move(AT, val);
7554 switch(flag)
7555 {
7556 case 0x01: //equal
7557 if (&L)
7558 __ beq(op1, AT, L);
7559 else
7560 __ beq(op1, AT, (int)0);
7561 break;
7562 case 0x02: //not_equal
7563 if (&L)
7564 __ bne(op1, AT, L);
7565 else
7566 __ bne(op1, AT, (int)0);
7567 break;
7568 case 0x03: //greater
7569 __ slt(AT, AT, op1);
7570 if(&L)
7571 __ bne(R0, AT, L);
7572 else
7573 __ bne(R0, AT, (int)0);
7574 break;
7575 case 0x04: //greater_equal
7576 __ slt(AT, op1, AT);
7577 if(&L)
7578 __ beq(AT, R0, L);
7579 else
7580 __ beq(AT, R0, (int)0);
7581 break;
7582 case 0x05: //less
7583 __ slt(AT, op1, AT);
7584 if(&L)
7585 __ bne(R0, AT, L);
7586 else
7587 __ bne(R0, AT, (int)0);
7588 break;
7589 case 0x06: //less_equal
7590 __ slt(AT, AT, op1);
7591 if(&L)
7592 __ beq(AT, R0, L);
7593 else
7594 __ beq(AT, R0, (int)0);
7595 break;
7596 default:
7597 Unimplemented();
7598 }
7599 __ nop();
7600 %}
7602 ins_pc_relative(1);
7603 ins_pipe( pipe_alu_branch );
7604 %}
7606 instruct branchConIU_reg_imm0(cmpOpU cmp, mRegI src1, immI0 zero, label labl) %{
7607 match( If cmp (CmpU src1 zero) );
7608 effect(USE labl);
7609 format %{ "BR$cmp $src1, zero, $labl #@branchConIU_reg_imm0" %}
7611 ins_encode %{
7612 Register op1 = $src1$$Register;
7613 Label &L = *($labl$$label);
7614 int flag = $cmp$$cmpcode;
7616 switch(flag)
7617 {
7618 case 0x01: //equal
7619 if (&L)
7620 __ beq(op1, R0, L);
7621 else
7622 __ beq(op1, R0, (int)0);
7623 break;
7624 case 0x02: //not_equal
7625 if (&L)
7626 __ bne(op1, R0, L);
7627 else
7628 __ bne(op1, R0, (int)0);
7629 break;
7630 case 0x03: //above
7631 if(&L)
7632 __ bne(R0, op1, L);
7633 else
7634 __ bne(R0, op1, (int)0);
7635 break;
7636 case 0x04: //above_equal
7637 if(&L)
7638 __ beq(R0, R0, L);
7639 else
7640 __ beq(R0, R0, (int)0);
7641 break;
7642 case 0x05: //below
7643 return;
7644 break;
7645 case 0x06: //below_equal
7646 if(&L)
7647 __ beq(op1, R0, L);
7648 else
7649 __ beq(op1, R0, (int)0);
7650 break;
7651 default:
7652 Unimplemented();
7653 }
7654 __ nop();
7655 %}
7657 ins_pc_relative(1);
7658 ins_pipe( pipe_alu_branch );
7659 %}
7662 instruct branchConIU_reg_immI16(cmpOpU cmp, mRegI src1, immI16 src2, label labl) %{
7663 match( If cmp (CmpU src1 src2) );
7664 effect(USE labl);
7665 ins_cost(180);
7666 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_immI16" %}
7668 ins_encode %{
7669 Register op1 = $src1$$Register;
7670 int val = $src2$$constant;
7671 Label &L = *($labl$$label);
7672 int flag = $cmp$$cmpcode;
7674 switch(flag)
7675 {
7676 case 0x01: //equal
7677 __ move(AT, val);
7678 if (&L)
7679 __ beq(op1, AT, L);
7680 else
7681 __ beq(op1, AT, (int)0);
7682 break;
7683 case 0x02: //not_equal
7684 __ move(AT, val);
7685 if (&L)
7686 __ bne(op1, AT, L);
7687 else
7688 __ bne(op1, AT, (int)0);
7689 break;
7690 case 0x03: //above
7691 __ move(AT, val);
7692 __ sltu(AT, AT, op1);
7693 if(&L)
7694 __ bne(R0, AT, L);
7695 else
7696 __ bne(R0, AT, (int)0);
7697 break;
7698 case 0x04: //above_equal
7699 __ sltiu(AT, op1, val);
7700 if(&L)
7701 __ beq(AT, R0, L);
7702 else
7703 __ beq(AT, R0, (int)0);
7704 break;
7705 case 0x05: //below
7706 __ sltiu(AT, op1, val);
7707 if(&L)
7708 __ bne(R0, AT, L);
7709 else
7710 __ bne(R0, AT, (int)0);
7711 break;
7712 case 0x06: //below_equal
7713 __ move(AT, val);
7714 __ sltu(AT, AT, op1);
7715 if(&L)
7716 __ beq(AT, R0, L);
7717 else
7718 __ beq(AT, R0, (int)0);
7719 break;
7720 default:
7721 Unimplemented();
7722 }
7723 __ nop();
7724 %}
7726 ins_pc_relative(1);
7727 ins_pipe( pipe_alu_branch );
7728 %}
7731 instruct branchConL_regL_regL(cmpOp cmp, mRegL src1, mRegL src2, label labl) %{
7732 match( If cmp (CmpL src1 src2) );
7733 effect(USE labl);
7734 format %{ "BR$cmp $src1, $src2, $labl #@branchConL_regL_regL" %}
7735 ins_cost(250);
7737 ins_encode %{
7738 Register opr1_reg = as_Register($src1$$reg);
7739 Register opr2_reg = as_Register($src2$$reg);
7741 Label &target = *($labl$$label);
7742 int flag = $cmp$$cmpcode;
7744 switch(flag)
7745 {
7746 case 0x01: //equal
7747 if (&target)
7748 __ beq(opr1_reg, opr2_reg, target);
7749 else
7750 __ beq(opr1_reg, opr2_reg, (int)0);
7751 __ delayed()->nop();
7752 break;
7754 case 0x02: //not_equal
7755 if(&target)
7756 __ bne(opr1_reg, opr2_reg, target);
7757 else
7758 __ bne(opr1_reg, opr2_reg, (int)0);
7759 __ delayed()->nop();
7760 break;
7762 case 0x03: //greater
7763 __ slt(AT, opr2_reg, opr1_reg);
7764 if(&target)
7765 __ bne(AT, R0, target);
7766 else
7767 __ bne(AT, R0, (int)0);
7768 __ delayed()->nop();
7769 break;
7771 case 0x04: //greater_equal
7772 __ slt(AT, opr1_reg, opr2_reg);
7773 if(&target)
7774 __ beq(AT, R0, target);
7775 else
7776 __ beq(AT, R0, (int)0);
7777 __ delayed()->nop();
7779 break;
7781 case 0x05: //less
7782 __ slt(AT, opr1_reg, opr2_reg);
7783 if(&target)
7784 __ bne(AT, R0, target);
7785 else
7786 __ bne(AT, R0, (int)0);
7787 __ delayed()->nop();
7789 break;
7791 case 0x06: //less_equal
7792 __ slt(AT, opr2_reg, opr1_reg);
7794 if(&target)
7795 __ beq(AT, R0, target);
7796 else
7797 __ beq(AT, R0, (int)0);
7798 __ delayed()->nop();
7800 break;
7802 default:
7803 Unimplemented();
7804 }
7805 %}
7808 ins_pc_relative(1);
7809 ins_pipe( pipe_alu_branch );
7810 %}
7812 instruct branchConL_reg_immL16_sub(cmpOp cmp, mRegL src1, immL16_sub src2, label labl) %{
7813 match( If cmp (CmpL src1 src2) );
7814 effect(USE labl);
7815 ins_cost(180);
7816 format %{ "BR$cmp $src1, $src2, $labl #@branchConL_reg_immL16_sub" %}
7818 ins_encode %{
7819 Register op1 = $src1$$Register;
7820 int val = $src2$$constant;
7821 Label &L = *($labl$$label);
7822 int flag = $cmp$$cmpcode;
7824 __ daddiu(AT, op1, -1 * val);
7825 switch(flag)
7826 {
7827 case 0x01: //equal
7828 if (&L)
7829 __ beq(R0, AT, L);
7830 else
7831 __ beq(R0, AT, (int)0);
7832 break;
7833 case 0x02: //not_equal
7834 if (&L)
7835 __ bne(R0, AT, L);
7836 else
7837 __ bne(R0, AT, (int)0);
7838 break;
7839 case 0x03: //greater
7840 if(&L)
7841 __ bgtz(AT, L);
7842 else
7843 __ bgtz(AT, (int)0);
7844 break;
7845 case 0x04: //greater_equal
7846 if(&L)
7847 __ bgez(AT, L);
7848 else
7849 __ bgez(AT, (int)0);
7850 break;
7851 case 0x05: //less
7852 if(&L)
7853 __ bltz(AT, L);
7854 else
7855 __ bltz(AT, (int)0);
7856 break;
7857 case 0x06: //less_equal
7858 if(&L)
7859 __ blez(AT, L);
7860 else
7861 __ blez(AT, (int)0);
7862 break;
7863 default:
7864 Unimplemented();
7865 }
7866 __ nop();
7867 %}
7869 ins_pc_relative(1);
7870 ins_pipe( pipe_alu_branch );
7871 %}
7874 instruct branchConI_reg_imm16_sub(cmpOp cmp, mRegI src1, immI16_sub src2, label labl) %{
7875 match( If cmp (CmpI src1 src2) );
7876 effect(USE labl);
7877 ins_cost(180);
7878 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm16_sub" %}
7880 ins_encode %{
7881 Register op1 = $src1$$Register;
7882 int val = $src2$$constant;
7883 Label &L = *($labl$$label);
7884 int flag = $cmp$$cmpcode;
7886 __ addiu32(AT, op1, -1 * val);
7887 switch(flag)
7888 {
7889 case 0x01: //equal
7890 if (&L)
7891 __ beq(R0, AT, L);
7892 else
7893 __ beq(R0, AT, (int)0);
7894 break;
7895 case 0x02: //not_equal
7896 if (&L)
7897 __ bne(R0, AT, L);
7898 else
7899 __ bne(R0, AT, (int)0);
7900 break;
7901 case 0x03: //greater
7902 if(&L)
7903 __ bgtz(AT, L);
7904 else
7905 __ bgtz(AT, (int)0);
7906 break;
7907 case 0x04: //greater_equal
7908 if(&L)
7909 __ bgez(AT, L);
7910 else
7911 __ bgez(AT, (int)0);
7912 break;
7913 case 0x05: //less
7914 if(&L)
7915 __ bltz(AT, L);
7916 else
7917 __ bltz(AT, (int)0);
7918 break;
7919 case 0x06: //less_equal
7920 if(&L)
7921 __ blez(AT, L);
7922 else
7923 __ blez(AT, (int)0);
7924 break;
7925 default:
7926 Unimplemented();
7927 }
7928 __ nop();
7929 %}
7931 ins_pc_relative(1);
7932 ins_pipe( pipe_alu_branch );
7933 %}
7935 instruct branchConL_regL_immL0(cmpOp cmp, mRegL src1, immL0 zero, label labl) %{
7936 match( If cmp (CmpL src1 zero) );
7937 effect(USE labl);
7938 format %{ "BR$cmp $src1, zero, $labl #@branchConL_regL_immL0" %}
7939 ins_cost(150);
7941 ins_encode %{
7942 Register opr1_reg = as_Register($src1$$reg);
7943 Label &target = *($labl$$label);
7944 int flag = $cmp$$cmpcode;
7946 switch(flag)
7947 {
7948 case 0x01: //equal
7949 if (&target)
7950 __ beq(opr1_reg, R0, target);
7951 else
7952 __ beq(opr1_reg, R0, int(0));
7953 break;
7955 case 0x02: //not_equal
7956 if(&target)
7957 __ bne(opr1_reg, R0, target);
7958 else
7959 __ bne(opr1_reg, R0, (int)0);
7960 break;
7962 case 0x03: //greater
7963 if(&target)
7964 __ bgtz(opr1_reg, target);
7965 else
7966 __ bgtz(opr1_reg, (int)0);
7967 break;
7969 case 0x04: //greater_equal
7970 if(&target)
7971 __ bgez(opr1_reg, target);
7972 else
7973 __ bgez(opr1_reg, (int)0);
7974 break;
7976 case 0x05: //less
7977 __ slt(AT, opr1_reg, R0);
7978 if(&target)
7979 __ bne(AT, R0, target);
7980 else
7981 __ bne(AT, R0, (int)0);
7982 break;
7984 case 0x06: //less_equal
7985 if (&target)
7986 __ blez(opr1_reg, target);
7987 else
7988 __ blez(opr1_reg, int(0));
7989 break;
7991 default:
7992 Unimplemented();
7993 }
7994 __ delayed()->nop();
7995 %}
7998 ins_pc_relative(1);
7999 ins_pipe( pipe_alu_branch );
8000 %}
8003 //FIXME
8004 instruct branchConF_reg_reg(cmpOp cmp, regF src1, regF src2, label labl) %{
8005 match( If cmp (CmpF src1 src2) );
8006 effect(USE labl);
8007 format %{ "BR$cmp $src1, $src2, $labl #@branchConF_reg_reg" %}
8009 ins_encode %{
8010 FloatRegister reg_op1 = $src1$$FloatRegister;
8011 FloatRegister reg_op2 = $src2$$FloatRegister;
8012 Label &L = *($labl$$label);
8013 int flag = $cmp$$cmpcode;
8015 switch(flag)
8016 {
8017 case 0x01: //equal
8018 __ c_eq_s(reg_op1, reg_op2);
8019 if (&L)
8020 __ bc1t(L);
8021 else
8022 __ bc1t((int)0);
8023 break;
8024 case 0x02: //not_equal
8025 __ c_eq_s(reg_op1, reg_op2);
8026 if (&L)
8027 __ bc1f(L);
8028 else
8029 __ bc1f((int)0);
8030 break;
8031 case 0x03: //greater
8032 __ c_ule_s(reg_op1, reg_op2);
8033 if(&L)
8034 __ bc1f(L);
8035 else
8036 __ bc1f((int)0);
8037 break;
8038 case 0x04: //greater_equal
8039 __ c_ult_s(reg_op1, reg_op2);
8040 if(&L)
8041 __ bc1f(L);
8042 else
8043 __ bc1f((int)0);
8044 break;
8045 case 0x05: //less
8046 __ c_ult_s(reg_op1, reg_op2);
8047 if(&L)
8048 __ bc1t(L);
8049 else
8050 __ bc1t((int)0);
8051 break;
8052 case 0x06: //less_equal
8053 __ c_ule_s(reg_op1, reg_op2);
8054 if(&L)
8055 __ bc1t(L);
8056 else
8057 __ bc1t((int)0);
8058 break;
8059 default:
8060 Unimplemented();
8061 }
8062 __ nop();
8063 %}
8065 ins_pc_relative(1);
8066 ins_pipe(pipe_slow);
8067 %}
8069 instruct branchConD_reg_reg(cmpOp cmp, regD src1, regD src2, label labl) %{
8070 match( If cmp (CmpD src1 src2) );
8071 effect(USE labl);
8072 format %{ "BR$cmp $src1, $src2, $labl #@branchConD_reg_reg" %}
8074 ins_encode %{
8075 FloatRegister reg_op1 = $src1$$FloatRegister;
8076 FloatRegister reg_op2 = $src2$$FloatRegister;
8077 Label &L = *($labl$$label);
8078 int flag = $cmp$$cmpcode;
8080 switch(flag)
8081 {
8082 case 0x01: //equal
8083 __ c_eq_d(reg_op1, reg_op2);
8084 if (&L)
8085 __ bc1t(L);
8086 else
8087 __ bc1t((int)0);
8088 break;
8089 case 0x02: //not_equal
8090 //2016/4/19 aoqi: c_ueq_d cannot distinguish NaN from equal. Double.isNaN(Double) is implemented by 'f != f', so the use of c_ueq_d causes bugs.
8091 __ c_eq_d(reg_op1, reg_op2);
8092 if (&L)
8093 __ bc1f(L);
8094 else
8095 __ bc1f((int)0);
8096 break;
8097 case 0x03: //greater
8098 __ c_ule_d(reg_op1, reg_op2);
8099 if(&L)
8100 __ bc1f(L);
8101 else
8102 __ bc1f((int)0);
8103 break;
8104 case 0x04: //greater_equal
8105 __ c_ult_d(reg_op1, reg_op2);
8106 if(&L)
8107 __ bc1f(L);
8108 else
8109 __ bc1f((int)0);
8110 break;
8111 case 0x05: //less
8112 __ c_ult_d(reg_op1, reg_op2);
8113 if(&L)
8114 __ bc1t(L);
8115 else
8116 __ bc1t((int)0);
8117 break;
8118 case 0x06: //less_equal
8119 __ c_ule_d(reg_op1, reg_op2);
8120 if(&L)
8121 __ bc1t(L);
8122 else
8123 __ bc1t((int)0);
8124 break;
8125 default:
8126 Unimplemented();
8127 }
8128 __ nop();
8129 %}
8131 ins_pc_relative(1);
8132 ins_pipe(pipe_slow);
8133 %}
8136 // Call Runtime Instruction
8137 instruct CallRuntimeDirect(method meth) %{
8138 match(CallRuntime );
8139 effect(USE meth);
8141 ins_cost(300);
8142 format %{ "CALL,runtime #@CallRuntimeDirect" %}
8143 ins_encode( Java_To_Runtime( meth ) );
8144 ins_pipe( pipe_slow );
8145 ins_alignment(16);
8146 %}
8150 //------------------------MemBar Instructions-------------------------------
8151 //Memory barrier flavors
8153 instruct membar_acquire() %{
8154 match(MemBarAcquire);
8155 ins_cost(0);
8157 size(0);
8158 format %{ "MEMBAR-acquire (empty) @ membar_acquire" %}
8159 ins_encode();
8160 ins_pipe(empty);
8161 %}
8163 instruct load_fence() %{
8164 match(LoadFence);
8165 ins_cost(400);
8167 format %{ "MEMBAR @ load_fence" %}
8168 ins_encode %{
8169 __ sync();
8170 %}
8171 ins_pipe(pipe_slow);
8172 %}
8174 instruct membar_acquire_lock()
8175 %{
8176 match(MemBarAcquireLock);
8177 ins_cost(0);
8179 size(0);
8180 format %{ "MEMBAR-acquire (acquire as part of CAS in prior FastLock so empty encoding) @ membar_acquire_lock" %}
8181 ins_encode();
8182 ins_pipe(empty);
8183 %}
8185 instruct membar_release() %{
8186 match(MemBarRelease);
8187 ins_cost(400);
8189 format %{ "MEMBAR-release @ membar_release" %}
8191 ins_encode %{
8192 // Attention: DO NOT DELETE THIS GUY!
8193 __ sync();
8194 %}
8196 ins_pipe(pipe_slow);
8197 %}
8199 instruct store_fence() %{
8200 match(StoreFence);
8201 ins_cost(400);
8203 format %{ "MEMBAR @ store_fence" %}
8205 ins_encode %{
8206 __ sync();
8207 %}
8209 ins_pipe(pipe_slow);
8210 %}
8212 instruct membar_release_lock()
8213 %{
8214 match(MemBarReleaseLock);
8215 ins_cost(0);
8217 size(0);
8218 format %{ "MEMBAR-release-lock (release in FastUnlock so empty) @ membar_release_lock" %}
8219 ins_encode();
8220 ins_pipe(empty);
8221 %}
8224 instruct membar_volatile() %{
8225 match(MemBarVolatile);
8226 ins_cost(400);
8228 format %{ "MEMBAR-volatile" %}
8229 ins_encode %{
8230 if( !os::is_MP() ) return; // Not needed on single CPU
8231 __ sync();
8233 %}
8234 ins_pipe(pipe_slow);
8235 %}
8237 instruct unnecessary_membar_volatile() %{
8238 match(MemBarVolatile);
8239 predicate(Matcher::post_store_load_barrier(n));
8240 ins_cost(0);
8242 size(0);
8243 format %{ "MEMBAR-volatile (unnecessary so empty encoding) @ unnecessary_membar_volatile" %}
8244 ins_encode( );
8245 ins_pipe(empty);
8246 %}
8248 instruct membar_storestore() %{
8249 match(MemBarStoreStore);
8251 ins_cost(0);
8252 size(0);
8253 format %{ "MEMBAR-storestore (empty encoding) @ membar_storestore" %}
8254 ins_encode( );
8255 ins_pipe(empty);
8256 %}
8258 //----------Move Instructions--------------------------------------------------
8259 instruct castX2P(mRegP dst, mRegL src) %{
8260 match(Set dst (CastX2P src));
8261 format %{ "castX2P $dst, $src @ castX2P" %}
8262 ins_encode %{
8263 Register src = $src$$Register;
8264 Register dst = $dst$$Register;
8266 if(src != dst)
8267 __ move(dst, src);
8268 %}
8269 ins_cost(10);
8270 ins_pipe( ialu_regI_mov );
8271 %}
8273 instruct castP2X(mRegL dst, mRegP src ) %{
8274 match(Set dst (CastP2X src));
8276 format %{ "mov $dst, $src\t #@castP2X" %}
8277 ins_encode %{
8278 Register src = $src$$Register;
8279 Register dst = $dst$$Register;
8281 if(src != dst)
8282 __ move(dst, src);
8283 %}
8284 ins_pipe( ialu_regI_mov );
8285 %}
8287 instruct MoveF2I_reg_reg(mRegI dst, regF src) %{
8288 match(Set dst (MoveF2I src));
8289 effect(DEF dst, USE src);
8290 ins_cost(85);
8291 format %{ "MoveF2I $dst, $src @ MoveF2I_reg_reg" %}
8292 ins_encode %{
8293 Register dst = as_Register($dst$$reg);
8294 FloatRegister src = as_FloatRegister($src$$reg);
8296 __ mfc1(dst, src);
8297 %}
8298 ins_pipe( pipe_slow );
8299 %}
8301 instruct MoveI2F_reg_reg(regF dst, mRegI src) %{
8302 match(Set dst (MoveI2F src));
8303 effect(DEF dst, USE src);
8304 ins_cost(85);
8305 format %{ "MoveI2F $dst, $src @ MoveI2F_reg_reg" %}
8306 ins_encode %{
8307 Register src = as_Register($src$$reg);
8308 FloatRegister dst = as_FloatRegister($dst$$reg);
8310 __ mtc1(src, dst);
8311 %}
8312 ins_pipe( pipe_slow );
8313 %}
8315 instruct MoveD2L_reg_reg(mRegL dst, regD src) %{
8316 match(Set dst (MoveD2L src));
8317 effect(DEF dst, USE src);
8318 ins_cost(85);
8319 format %{ "MoveD2L $dst, $src @ MoveD2L_reg_reg" %}
8320 ins_encode %{
8321 Register dst = as_Register($dst$$reg);
8322 FloatRegister src = as_FloatRegister($src$$reg);
8324 __ dmfc1(dst, src);
8325 %}
8326 ins_pipe( pipe_slow );
8327 %}
8329 instruct MoveL2D_reg_reg(regD dst, mRegL src) %{
8330 match(Set dst (MoveL2D src));
8331 effect(DEF dst, USE src);
8332 ins_cost(85);
8333 format %{ "MoveL2D $dst, $src @ MoveL2D_reg_reg" %}
8334 ins_encode %{
8335 FloatRegister dst = as_FloatRegister($dst$$reg);
8336 Register src = as_Register($src$$reg);
8338 __ dmtc1(src, dst);
8339 %}
8340 ins_pipe( pipe_slow );
8341 %}
8343 //----------Conditional Move---------------------------------------------------
8344 // Conditional move
8345 instruct cmovI_cmpI_reg_reg(mRegI dst, mRegI src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8346 match(Set dst (CMoveI (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8347 ins_cost(80);
8348 format %{
8349 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpI_reg_reg\n"
8350 "\tCMOV $dst,$src \t @cmovI_cmpI_reg_reg"
8351 %}
8353 ins_encode %{
8354 Register op1 = $tmp1$$Register;
8355 Register op2 = $tmp2$$Register;
8356 Register dst = $dst$$Register;
8357 Register src = $src$$Register;
8358 int flag = $cop$$cmpcode;
8360 switch(flag)
8361 {
8362 case 0x01: //equal
8363 __ subu32(AT, op1, op2);
8364 __ movz(dst, src, AT);
8365 break;
8367 case 0x02: //not_equal
8368 __ subu32(AT, op1, op2);
8369 __ movn(dst, src, AT);
8370 break;
8372 case 0x03: //great
8373 __ slt(AT, op2, op1);
8374 __ movn(dst, src, AT);
8375 break;
8377 case 0x04: //great_equal
8378 __ slt(AT, op1, op2);
8379 __ movz(dst, src, AT);
8380 break;
8382 case 0x05: //less
8383 __ slt(AT, op1, op2);
8384 __ movn(dst, src, AT);
8385 break;
8387 case 0x06: //less_equal
8388 __ slt(AT, op2, op1);
8389 __ movz(dst, src, AT);
8390 break;
8392 default:
8393 Unimplemented();
8394 }
8395 %}
8397 ins_pipe( pipe_slow );
8398 %}
8400 instruct cmovI_cmpP_reg_reg(mRegI dst, mRegI src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
8401 match(Set dst (CMoveI (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
8402 ins_cost(80);
8403 format %{
8404 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpP_reg_reg\n\t"
8405 "CMOV $dst,$src\t @cmovI_cmpP_reg_reg"
8406 %}
8407 ins_encode %{
8408 Register op1 = $tmp1$$Register;
8409 Register op2 = $tmp2$$Register;
8410 Register dst = $dst$$Register;
8411 Register src = $src$$Register;
8412 int flag = $cop$$cmpcode;
8414 switch(flag)
8415 {
8416 case 0x01: //equal
8417 __ subu(AT, op1, op2);
8418 __ movz(dst, src, AT);
8419 break;
8421 case 0x02: //not_equal
8422 __ subu(AT, op1, op2);
8423 __ movn(dst, src, AT);
8424 break;
8426 case 0x03: //above
8427 __ sltu(AT, op2, op1);
8428 __ movn(dst, src, AT);
8429 break;
8431 case 0x04: //above_equal
8432 __ sltu(AT, op1, op2);
8433 __ movz(dst, src, AT);
8434 break;
8436 case 0x05: //below
8437 __ sltu(AT, op1, op2);
8438 __ movn(dst, src, AT);
8439 break;
8441 case 0x06: //below_equal
8442 __ sltu(AT, op2, op1);
8443 __ movz(dst, src, AT);
8444 break;
8446 default:
8447 Unimplemented();
8448 }
8449 %}
8451 ins_pipe( pipe_slow );
8452 %}
8454 instruct cmovI_cmpN_reg_reg(mRegI dst, mRegI src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8455 match(Set dst (CMoveI (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8456 ins_cost(80);
8457 format %{
8458 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpN_reg_reg\n\t"
8459 "CMOV $dst,$src\t @cmovI_cmpN_reg_reg"
8460 %}
8461 ins_encode %{
8462 Register op1 = $tmp1$$Register;
8463 Register op2 = $tmp2$$Register;
8464 Register dst = $dst$$Register;
8465 Register src = $src$$Register;
8466 int flag = $cop$$cmpcode;
8468 switch(flag)
8469 {
8470 case 0x01: //equal
8471 __ subu32(AT, op1, op2);
8472 __ movz(dst, src, AT);
8473 break;
8475 case 0x02: //not_equal
8476 __ subu32(AT, op1, op2);
8477 __ movn(dst, src, AT);
8478 break;
8480 case 0x03: //above
8481 __ sltu(AT, op2, op1);
8482 __ movn(dst, src, AT);
8483 break;
8485 case 0x04: //above_equal
8486 __ sltu(AT, op1, op2);
8487 __ movz(dst, src, AT);
8488 break;
8490 case 0x05: //below
8491 __ sltu(AT, op1, op2);
8492 __ movn(dst, src, AT);
8493 break;
8495 case 0x06: //below_equal
8496 __ sltu(AT, op2, op1);
8497 __ movz(dst, src, AT);
8498 break;
8500 default:
8501 Unimplemented();
8502 }
8503 %}
8505 ins_pipe( pipe_slow );
8506 %}
8508 instruct cmovP_cmpN_reg_reg(mRegP dst, mRegP src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8509 match(Set dst (CMoveP (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8510 ins_cost(80);
8511 format %{
8512 "CMPU$cop $tmp1,$tmp2\t @cmovP_cmpN_reg_reg\n\t"
8513 "CMOV $dst,$src\t @cmovP_cmpN_reg_reg"
8514 %}
8515 ins_encode %{
8516 Register op1 = $tmp1$$Register;
8517 Register op2 = $tmp2$$Register;
8518 Register dst = $dst$$Register;
8519 Register src = $src$$Register;
8520 int flag = $cop$$cmpcode;
8522 switch(flag)
8523 {
8524 case 0x01: //equal
8525 __ subu32(AT, op1, op2);
8526 __ movz(dst, src, AT);
8527 break;
8529 case 0x02: //not_equal
8530 __ subu32(AT, op1, op2);
8531 __ movn(dst, src, AT);
8532 break;
8534 case 0x03: //above
8535 __ sltu(AT, op2, op1);
8536 __ movn(dst, src, AT);
8537 break;
8539 case 0x04: //above_equal
8540 __ sltu(AT, op1, op2);
8541 __ movz(dst, src, AT);
8542 break;
8544 case 0x05: //below
8545 __ sltu(AT, op1, op2);
8546 __ movn(dst, src, AT);
8547 break;
8549 case 0x06: //below_equal
8550 __ sltu(AT, op2, op1);
8551 __ movz(dst, src, AT);
8552 break;
8554 default:
8555 Unimplemented();
8556 }
8557 %}
8559 ins_pipe( pipe_slow );
8560 %}
8562 instruct cmovN_cmpP_reg_reg(mRegN dst, mRegN src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
8563 match(Set dst (CMoveN (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
8564 ins_cost(80);
8565 format %{
8566 "CMPU$cop $tmp1,$tmp2\t @cmovN_cmpP_reg_reg\n\t"
8567 "CMOV $dst,$src\t @cmovN_cmpP_reg_reg"
8568 %}
8569 ins_encode %{
8570 Register op1 = $tmp1$$Register;
8571 Register op2 = $tmp2$$Register;
8572 Register dst = $dst$$Register;
8573 Register src = $src$$Register;
8574 int flag = $cop$$cmpcode;
8576 switch(flag)
8577 {
8578 case 0x01: //equal
8579 __ subu(AT, op1, op2);
8580 __ movz(dst, src, AT);
8581 break;
8583 case 0x02: //not_equal
8584 __ subu(AT, op1, op2);
8585 __ movn(dst, src, AT);
8586 break;
8588 case 0x03: //above
8589 __ sltu(AT, op2, op1);
8590 __ movn(dst, src, AT);
8591 break;
8593 case 0x04: //above_equal
8594 __ sltu(AT, op1, op2);
8595 __ movz(dst, src, AT);
8596 break;
8598 case 0x05: //below
8599 __ sltu(AT, op1, op2);
8600 __ movn(dst, src, AT);
8601 break;
8603 case 0x06: //below_equal
8604 __ sltu(AT, op2, op1);
8605 __ movz(dst, src, AT);
8606 break;
8608 default:
8609 Unimplemented();
8610 }
8611 %}
8613 ins_pipe( pipe_slow );
8614 %}
8616 instruct cmovP_cmpD_reg_reg(mRegP dst, mRegP src, regD tmp1, regD tmp2, cmpOp cop ) %{
8617 match(Set dst (CMoveP (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8618 ins_cost(80);
8619 format %{
8620 "CMP$cop $tmp1, $tmp2\t @cmovP_cmpD_reg_reg\n"
8621 "\tCMOV $dst,$src \t @cmovP_cmpD_reg_reg"
8622 %}
8623 ins_encode %{
8624 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8625 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8626 Register dst = as_Register($dst$$reg);
8627 Register src = as_Register($src$$reg);
8629 int flag = $cop$$cmpcode;
8631 switch(flag)
8632 {
8633 case 0x01: //equal
8634 __ c_eq_d(reg_op1, reg_op2);
8635 __ movt(dst, src);
8636 break;
8637 case 0x02: //not_equal
8638 __ c_eq_d(reg_op1, reg_op2);
8639 __ movf(dst, src);
8640 break;
8641 case 0x03: //greater
8642 __ c_ole_d(reg_op1, reg_op2);
8643 __ movf(dst, src);
8644 break;
8645 case 0x04: //greater_equal
8646 __ c_olt_d(reg_op1, reg_op2);
8647 __ movf(dst, src);
8648 break;
8649 case 0x05: //less
8650 __ c_ult_d(reg_op1, reg_op2);
8651 __ movt(dst, src);
8652 break;
8653 case 0x06: //less_equal
8654 __ c_ule_d(reg_op1, reg_op2);
8655 __ movt(dst, src);
8656 break;
8657 default:
8658 Unimplemented();
8659 }
8660 %}
8662 ins_pipe( pipe_slow );
8663 %}
8666 instruct cmovN_cmpN_reg_reg(mRegN dst, mRegN src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8667 match(Set dst (CMoveN (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8668 ins_cost(80);
8669 format %{
8670 "CMPU$cop $tmp1,$tmp2\t @cmovN_cmpN_reg_reg\n\t"
8671 "CMOV $dst,$src\t @cmovN_cmpN_reg_reg"
8672 %}
8673 ins_encode %{
8674 Register op1 = $tmp1$$Register;
8675 Register op2 = $tmp2$$Register;
8676 Register dst = $dst$$Register;
8677 Register src = $src$$Register;
8678 int flag = $cop$$cmpcode;
8680 switch(flag)
8681 {
8682 case 0x01: //equal
8683 __ subu32(AT, op1, op2);
8684 __ movz(dst, src, AT);
8685 break;
8687 case 0x02: //not_equal
8688 __ subu32(AT, op1, op2);
8689 __ movn(dst, src, AT);
8690 break;
8692 case 0x03: //above
8693 __ sltu(AT, op2, op1);
8694 __ movn(dst, src, AT);
8695 break;
8697 case 0x04: //above_equal
8698 __ sltu(AT, op1, op2);
8699 __ movz(dst, src, AT);
8700 break;
8702 case 0x05: //below
8703 __ sltu(AT, op1, op2);
8704 __ movn(dst, src, AT);
8705 break;
8707 case 0x06: //below_equal
8708 __ sltu(AT, op2, op1);
8709 __ movz(dst, src, AT);
8710 break;
8712 default:
8713 Unimplemented();
8714 }
8715 %}
8717 ins_pipe( pipe_slow );
8718 %}
8721 instruct cmovI_cmpU_reg_reg(mRegI dst, mRegI src, mRegI tmp1, mRegI tmp2, cmpOpU cop ) %{
8722 match(Set dst (CMoveI (Binary cop (CmpU tmp1 tmp2)) (Binary dst src)));
8723 ins_cost(80);
8724 format %{
8725 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpU_reg_reg\n\t"
8726 "CMOV $dst,$src\t @cmovI_cmpU_reg_reg"
8727 %}
8728 ins_encode %{
8729 Register op1 = $tmp1$$Register;
8730 Register op2 = $tmp2$$Register;
8731 Register dst = $dst$$Register;
8732 Register src = $src$$Register;
8733 int flag = $cop$$cmpcode;
8735 switch(flag)
8736 {
8737 case 0x01: //equal
8738 __ subu(AT, op1, op2);
8739 __ movz(dst, src, AT);
8740 break;
8742 case 0x02: //not_equal
8743 __ subu(AT, op1, op2);
8744 __ movn(dst, src, AT);
8745 break;
8747 case 0x03: //above
8748 __ sltu(AT, op2, op1);
8749 __ movn(dst, src, AT);
8750 break;
8752 case 0x04: //above_equal
8753 __ sltu(AT, op1, op2);
8754 __ movz(dst, src, AT);
8755 break;
8757 case 0x05: //below
8758 __ sltu(AT, op1, op2);
8759 __ movn(dst, src, AT);
8760 break;
8762 case 0x06: //below_equal
8763 __ sltu(AT, op2, op1);
8764 __ movz(dst, src, AT);
8765 break;
8767 default:
8768 Unimplemented();
8769 }
8770 %}
8772 ins_pipe( pipe_slow );
8773 %}
8775 instruct cmovI_cmpL_reg_reg(mRegI dst, mRegI src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8776 match(Set dst (CMoveI (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8777 ins_cost(80);
8778 format %{
8779 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpL_reg_reg\n"
8780 "\tCMOV $dst,$src \t @cmovI_cmpL_reg_reg"
8781 %}
8782 ins_encode %{
8783 Register opr1 = as_Register($tmp1$$reg);
8784 Register opr2 = as_Register($tmp2$$reg);
8785 Register dst = $dst$$Register;
8786 Register src = $src$$Register;
8787 int flag = $cop$$cmpcode;
8789 switch(flag)
8790 {
8791 case 0x01: //equal
8792 __ subu(AT, opr1, opr2);
8793 __ movz(dst, src, AT);
8794 break;
8796 case 0x02: //not_equal
8797 __ subu(AT, opr1, opr2);
8798 __ movn(dst, src, AT);
8799 break;
8801 case 0x03: //greater
8802 __ slt(AT, opr2, opr1);
8803 __ movn(dst, src, AT);
8804 break;
8806 case 0x04: //greater_equal
8807 __ slt(AT, opr1, opr2);
8808 __ movz(dst, src, AT);
8809 break;
8811 case 0x05: //less
8812 __ slt(AT, opr1, opr2);
8813 __ movn(dst, src, AT);
8814 break;
8816 case 0x06: //less_equal
8817 __ slt(AT, opr2, opr1);
8818 __ movz(dst, src, AT);
8819 break;
8821 default:
8822 Unimplemented();
8823 }
8824 %}
8826 ins_pipe( pipe_slow );
8827 %}
8829 instruct cmovP_cmpL_reg_reg(mRegP dst, mRegP src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8830 match(Set dst (CMoveP (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8831 ins_cost(80);
8832 format %{
8833 "CMP$cop $tmp1, $tmp2\t @cmovP_cmpL_reg_reg\n"
8834 "\tCMOV $dst,$src \t @cmovP_cmpL_reg_reg"
8835 %}
8836 ins_encode %{
8837 Register opr1 = as_Register($tmp1$$reg);
8838 Register opr2 = as_Register($tmp2$$reg);
8839 Register dst = $dst$$Register;
8840 Register src = $src$$Register;
8841 int flag = $cop$$cmpcode;
8843 switch(flag)
8844 {
8845 case 0x01: //equal
8846 __ subu(AT, opr1, opr2);
8847 __ movz(dst, src, AT);
8848 break;
8850 case 0x02: //not_equal
8851 __ subu(AT, opr1, opr2);
8852 __ movn(dst, src, AT);
8853 break;
8855 case 0x03: //greater
8856 __ slt(AT, opr2, opr1);
8857 __ movn(dst, src, AT);
8858 break;
8860 case 0x04: //greater_equal
8861 __ slt(AT, opr1, opr2);
8862 __ movz(dst, src, AT);
8863 break;
8865 case 0x05: //less
8866 __ slt(AT, opr1, opr2);
8867 __ movn(dst, src, AT);
8868 break;
8870 case 0x06: //less_equal
8871 __ slt(AT, opr2, opr1);
8872 __ movz(dst, src, AT);
8873 break;
8875 default:
8876 Unimplemented();
8877 }
8878 %}
8880 ins_pipe( pipe_slow );
8881 %}
8883 instruct cmovI_cmpD_reg_reg(mRegI dst, mRegI src, regD tmp1, regD tmp2, cmpOp cop ) %{
8884 match(Set dst (CMoveI (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8885 ins_cost(80);
8886 format %{
8887 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpD_reg_reg\n"
8888 "\tCMOV $dst,$src \t @cmovI_cmpD_reg_reg"
8889 %}
8890 ins_encode %{
8891 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8892 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8893 Register dst = as_Register($dst$$reg);
8894 Register src = as_Register($src$$reg);
8896 int flag = $cop$$cmpcode;
8898 switch(flag)
8899 {
8900 case 0x01: //equal
8901 __ c_eq_d(reg_op1, reg_op2);
8902 __ movt(dst, src);
8903 break;
8904 case 0x02: //not_equal
8905 //2016/4/19 aoqi: See instruct branchConD_reg_reg. The change in branchConD_reg_reg fixed a bug. It seems similar here, so I made thesame change.
8906 __ c_eq_d(reg_op1, reg_op2);
8907 __ movf(dst, src);
8908 break;
8909 case 0x03: //greater
8910 __ c_ole_d(reg_op1, reg_op2);
8911 __ movf(dst, src);
8912 break;
8913 case 0x04: //greater_equal
8914 __ c_olt_d(reg_op1, reg_op2);
8915 __ movf(dst, src);
8916 break;
8917 case 0x05: //less
8918 __ c_ult_d(reg_op1, reg_op2);
8919 __ movt(dst, src);
8920 break;
8921 case 0x06: //less_equal
8922 __ c_ule_d(reg_op1, reg_op2);
8923 __ movt(dst, src);
8924 break;
8925 default:
8926 Unimplemented();
8927 }
8928 %}
8930 ins_pipe( pipe_slow );
8931 %}
8934 instruct cmovP_cmpP_reg_reg(mRegP dst, mRegP src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
8935 match(Set dst (CMoveP (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
8936 ins_cost(80);
8937 format %{
8938 "CMPU$cop $tmp1,$tmp2\t @cmovP_cmpP_reg_reg\n\t"
8939 "CMOV $dst,$src\t @cmovP_cmpP_reg_reg"
8940 %}
8941 ins_encode %{
8942 Register op1 = $tmp1$$Register;
8943 Register op2 = $tmp2$$Register;
8944 Register dst = $dst$$Register;
8945 Register src = $src$$Register;
8946 int flag = $cop$$cmpcode;
8948 switch(flag)
8949 {
8950 case 0x01: //equal
8951 __ subu(AT, op1, op2);
8952 __ movz(dst, src, AT);
8953 break;
8955 case 0x02: //not_equal
8956 __ subu(AT, op1, op2);
8957 __ movn(dst, src, AT);
8958 break;
8960 case 0x03: //above
8961 __ sltu(AT, op2, op1);
8962 __ movn(dst, src, AT);
8963 break;
8965 case 0x04: //above_equal
8966 __ sltu(AT, op1, op2);
8967 __ movz(dst, src, AT);
8968 break;
8970 case 0x05: //below
8971 __ sltu(AT, op1, op2);
8972 __ movn(dst, src, AT);
8973 break;
8975 case 0x06: //below_equal
8976 __ sltu(AT, op2, op1);
8977 __ movz(dst, src, AT);
8978 break;
8980 default:
8981 Unimplemented();
8982 }
8983 %}
8985 ins_pipe( pipe_slow );
8986 %}
8988 instruct cmovP_cmpI_reg_reg(mRegP dst, mRegP src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8989 match(Set dst (CMoveP (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8990 ins_cost(80);
8991 format %{
8992 "CMP$cop $tmp1,$tmp2\t @cmovP_cmpI_reg_reg\n\t"
8993 "CMOV $dst,$src\t @cmovP_cmpI_reg_reg"
8994 %}
8995 ins_encode %{
8996 Register op1 = $tmp1$$Register;
8997 Register op2 = $tmp2$$Register;
8998 Register dst = $dst$$Register;
8999 Register src = $src$$Register;
9000 int flag = $cop$$cmpcode;
9002 switch(flag)
9003 {
9004 case 0x01: //equal
9005 __ subu32(AT, op1, op2);
9006 __ movz(dst, src, AT);
9007 break;
9009 case 0x02: //not_equal
9010 __ subu32(AT, op1, op2);
9011 __ movn(dst, src, AT);
9012 break;
9014 case 0x03: //above
9015 __ slt(AT, op2, op1);
9016 __ movn(dst, src, AT);
9017 break;
9019 case 0x04: //above_equal
9020 __ slt(AT, op1, op2);
9021 __ movz(dst, src, AT);
9022 break;
9024 case 0x05: //below
9025 __ slt(AT, op1, op2);
9026 __ movn(dst, src, AT);
9027 break;
9029 case 0x06: //below_equal
9030 __ slt(AT, op2, op1);
9031 __ movz(dst, src, AT);
9032 break;
9034 default:
9035 Unimplemented();
9036 }
9037 %}
9039 ins_pipe( pipe_slow );
9040 %}
9042 instruct cmovN_cmpI_reg_reg(mRegN dst, mRegN src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
9043 match(Set dst (CMoveN (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
9044 ins_cost(80);
9045 format %{
9046 "CMP$cop $tmp1,$tmp2\t @cmovN_cmpI_reg_reg\n\t"
9047 "CMOV $dst,$src\t @cmovN_cmpI_reg_reg"
9048 %}
9049 ins_encode %{
9050 Register op1 = $tmp1$$Register;
9051 Register op2 = $tmp2$$Register;
9052 Register dst = $dst$$Register;
9053 Register src = $src$$Register;
9054 int flag = $cop$$cmpcode;
9056 switch(flag)
9057 {
9058 case 0x01: //equal
9059 __ subu32(AT, op1, op2);
9060 __ movz(dst, src, AT);
9061 break;
9063 case 0x02: //not_equal
9064 __ subu32(AT, op1, op2);
9065 __ movn(dst, src, AT);
9066 break;
9068 case 0x03: //above
9069 __ slt(AT, op2, op1);
9070 __ movn(dst, src, AT);
9071 break;
9073 case 0x04: //above_equal
9074 __ slt(AT, op1, op2);
9075 __ movz(dst, src, AT);
9076 break;
9078 case 0x05: //below
9079 __ slt(AT, op1, op2);
9080 __ movn(dst, src, AT);
9081 break;
9083 case 0x06: //below_equal
9084 __ slt(AT, op2, op1);
9085 __ movz(dst, src, AT);
9086 break;
9088 default:
9089 Unimplemented();
9090 }
9091 %}
9093 ins_pipe( pipe_slow );
9094 %}
9097 instruct cmovL_cmpI_reg_reg(mRegL dst, mRegL src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
9098 match(Set dst (CMoveL (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
9099 ins_cost(80);
9100 format %{
9101 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpI_reg_reg\n"
9102 "\tCMOV $dst,$src \t @cmovL_cmpI_reg_reg"
9103 %}
9105 ins_encode %{
9106 Register op1 = $tmp1$$Register;
9107 Register op2 = $tmp2$$Register;
9108 Register dst = as_Register($dst$$reg);
9109 Register src = as_Register($src$$reg);
9110 int flag = $cop$$cmpcode;
9112 switch(flag)
9113 {
9114 case 0x01: //equal
9115 __ subu32(AT, op1, op2);
9116 __ movz(dst, src, AT);
9117 break;
9119 case 0x02: //not_equal
9120 __ subu32(AT, op1, op2);
9121 __ movn(dst, src, AT);
9122 break;
9124 case 0x03: //great
9125 __ slt(AT, op2, op1);
9126 __ movn(dst, src, AT);
9127 break;
9129 case 0x04: //great_equal
9130 __ slt(AT, op1, op2);
9131 __ movz(dst, src, AT);
9132 break;
9134 case 0x05: //less
9135 __ slt(AT, op1, op2);
9136 __ movn(dst, src, AT);
9137 break;
9139 case 0x06: //less_equal
9140 __ slt(AT, op2, op1);
9141 __ movz(dst, src, AT);
9142 break;
9144 default:
9145 Unimplemented();
9146 }
9147 %}
9149 ins_pipe( pipe_slow );
9150 %}
9152 instruct cmovL_cmpL_reg_reg(mRegL dst, mRegL src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
9153 match(Set dst (CMoveL (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
9154 ins_cost(80);
9155 format %{
9156 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpL_reg_reg\n"
9157 "\tCMOV $dst,$src \t @cmovL_cmpL_reg_reg"
9158 %}
9159 ins_encode %{
9160 Register opr1 = as_Register($tmp1$$reg);
9161 Register opr2 = as_Register($tmp2$$reg);
9162 Register dst = as_Register($dst$$reg);
9163 Register src = as_Register($src$$reg);
9164 int flag = $cop$$cmpcode;
9166 switch(flag)
9167 {
9168 case 0x01: //equal
9169 __ subu(AT, opr1, opr2);
9170 __ movz(dst, src, AT);
9171 break;
9173 case 0x02: //not_equal
9174 __ subu(AT, opr1, opr2);
9175 __ movn(dst, src, AT);
9176 break;
9178 case 0x03: //greater
9179 __ slt(AT, opr2, opr1);
9180 __ movn(dst, src, AT);
9181 break;
9183 case 0x04: //greater_equal
9184 __ slt(AT, opr1, opr2);
9185 __ movz(dst, src, AT);
9186 break;
9188 case 0x05: //less
9189 __ slt(AT, opr1, opr2);
9190 __ movn(dst, src, AT);
9191 break;
9193 case 0x06: //less_equal
9194 __ slt(AT, opr2, opr1);
9195 __ movz(dst, src, AT);
9196 break;
9198 default:
9199 Unimplemented();
9200 }
9201 %}
9203 ins_pipe( pipe_slow );
9204 %}
9206 instruct cmovL_cmpN_reg_reg(mRegL dst, mRegL src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
9207 match(Set dst (CMoveL (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
9208 ins_cost(80);
9209 format %{
9210 "CMPU$cop $tmp1,$tmp2\t @cmovL_cmpN_reg_reg\n\t"
9211 "CMOV $dst,$src\t @cmovL_cmpN_reg_reg"
9212 %}
9213 ins_encode %{
9214 Register op1 = $tmp1$$Register;
9215 Register op2 = $tmp2$$Register;
9216 Register dst = $dst$$Register;
9217 Register src = $src$$Register;
9218 int flag = $cop$$cmpcode;
9220 switch(flag)
9221 {
9222 case 0x01: //equal
9223 __ subu32(AT, op1, op2);
9224 __ movz(dst, src, AT);
9225 break;
9227 case 0x02: //not_equal
9228 __ subu32(AT, op1, op2);
9229 __ movn(dst, src, AT);
9230 break;
9232 case 0x03: //above
9233 __ sltu(AT, op2, op1);
9234 __ movn(dst, src, AT);
9235 break;
9237 case 0x04: //above_equal
9238 __ sltu(AT, op1, op2);
9239 __ movz(dst, src, AT);
9240 break;
9242 case 0x05: //below
9243 __ sltu(AT, op1, op2);
9244 __ movn(dst, src, AT);
9245 break;
9247 case 0x06: //below_equal
9248 __ sltu(AT, op2, op1);
9249 __ movz(dst, src, AT);
9250 break;
9252 default:
9253 Unimplemented();
9254 }
9255 %}
9257 ins_pipe( pipe_slow );
9258 %}
9261 instruct cmovL_cmpD_reg_reg(mRegL dst, mRegL src, regD tmp1, regD tmp2, cmpOp cop ) %{
9262 match(Set dst (CMoveL (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
9263 ins_cost(80);
9264 format %{
9265 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpD_reg_reg\n"
9266 "\tCMOV $dst,$src \t @cmovL_cmpD_reg_reg"
9267 %}
9268 ins_encode %{
9269 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
9270 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
9271 Register dst = as_Register($dst$$reg);
9272 Register src = as_Register($src$$reg);
9274 int flag = $cop$$cmpcode;
9276 switch(flag)
9277 {
9278 case 0x01: //equal
9279 __ c_eq_d(reg_op1, reg_op2);
9280 __ movt(dst, src);
9281 break;
9282 case 0x02: //not_equal
9283 __ c_eq_d(reg_op1, reg_op2);
9284 __ movf(dst, src);
9285 break;
9286 case 0x03: //greater
9287 __ c_ole_d(reg_op1, reg_op2);
9288 __ movf(dst, src);
9289 break;
9290 case 0x04: //greater_equal
9291 __ c_olt_d(reg_op1, reg_op2);
9292 __ movf(dst, src);
9293 break;
9294 case 0x05: //less
9295 __ c_ult_d(reg_op1, reg_op2);
9296 __ movt(dst, src);
9297 break;
9298 case 0x06: //less_equal
9299 __ c_ule_d(reg_op1, reg_op2);
9300 __ movt(dst, src);
9301 break;
9302 default:
9303 Unimplemented();
9304 }
9305 %}
9307 ins_pipe( pipe_slow );
9308 %}
9310 instruct cmovD_cmpD_reg_reg(regD dst, regD src, regD tmp1, regD tmp2, cmpOp cop ) %{
9311 match(Set dst (CMoveD (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
9312 ins_cost(200);
9313 format %{
9314 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpD_reg_reg\n"
9315 "\tCMOV $dst,$src \t @cmovD_cmpD_reg_reg"
9316 %}
9317 ins_encode %{
9318 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
9319 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
9320 FloatRegister dst = as_FloatRegister($dst$$reg);
9321 FloatRegister src = as_FloatRegister($src$$reg);
9323 int flag = $cop$$cmpcode;
9325 switch(flag)
9326 {
9327 case 0x01: //equal
9328 __ c_eq_d(reg_op1, reg_op2);
9329 __ movt_d(dst, src);
9330 break;
9331 case 0x02: //not_equal
9332 __ c_eq_d(reg_op1, reg_op2);
9333 __ movf_d(dst, src);
9334 break;
9335 case 0x03: //greater
9336 __ c_ole_d(reg_op1, reg_op2);
9337 __ movf_d(dst, src);
9338 break;
9339 case 0x04: //greater_equal
9340 __ c_olt_d(reg_op1, reg_op2);
9341 __ movf_d(dst, src);
9342 break;
9343 case 0x05: //less
9344 __ c_ult_d(reg_op1, reg_op2);
9345 __ movt_d(dst, src);
9346 break;
9347 case 0x06: //less_equal
9348 __ c_ule_d(reg_op1, reg_op2);
9349 __ movt_d(dst, src);
9350 break;
9351 default:
9352 Unimplemented();
9353 }
9354 %}
9356 ins_pipe( pipe_slow );
9357 %}
9359 instruct cmovF_cmpI_reg_reg(regF dst, regF src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
9360 match(Set dst (CMoveF (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
9361 ins_cost(200);
9362 format %{
9363 "CMP$cop $tmp1, $tmp2\t @cmovF_cmpI_reg_reg\n"
9364 "\tCMOV $dst, $src \t @cmovF_cmpI_reg_reg"
9365 %}
9367 ins_encode %{
9368 Register op1 = $tmp1$$Register;
9369 Register op2 = $tmp2$$Register;
9370 FloatRegister dst = as_FloatRegister($dst$$reg);
9371 FloatRegister src = as_FloatRegister($src$$reg);
9372 int flag = $cop$$cmpcode;
9373 Label L;
9375 switch(flag)
9376 {
9377 case 0x01: //equal
9378 __ bne(op1, op2, L);
9379 __ nop();
9380 __ mov_s(dst, src);
9381 __ bind(L);
9382 break;
9383 case 0x02: //not_equal
9384 __ beq(op1, op2, L);
9385 __ nop();
9386 __ mov_s(dst, src);
9387 __ bind(L);
9388 break;
9389 case 0x03: //great
9390 __ slt(AT, op2, op1);
9391 __ beq(AT, R0, L);
9392 __ nop();
9393 __ mov_s(dst, src);
9394 __ bind(L);
9395 break;
9396 case 0x04: //great_equal
9397 __ slt(AT, op1, op2);
9398 __ bne(AT, R0, L);
9399 __ nop();
9400 __ mov_s(dst, src);
9401 __ bind(L);
9402 break;
9403 case 0x05: //less
9404 __ slt(AT, op1, op2);
9405 __ beq(AT, R0, L);
9406 __ nop();
9407 __ mov_s(dst, src);
9408 __ bind(L);
9409 break;
9410 case 0x06: //less_equal
9411 __ slt(AT, op2, op1);
9412 __ bne(AT, R0, L);
9413 __ nop();
9414 __ mov_s(dst, src);
9415 __ bind(L);
9416 break;
9417 default:
9418 Unimplemented();
9419 }
9420 %}
9422 ins_pipe( pipe_slow );
9423 %}
9425 instruct cmovD_cmpI_reg_reg(regD dst, regD src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
9426 match(Set dst (CMoveD (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
9427 ins_cost(200);
9428 format %{
9429 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpI_reg_reg\n"
9430 "\tCMOV $dst, $src \t @cmovD_cmpI_reg_reg"
9431 %}
9433 ins_encode %{
9434 Register op1 = $tmp1$$Register;
9435 Register op2 = $tmp2$$Register;
9436 FloatRegister dst = as_FloatRegister($dst$$reg);
9437 FloatRegister src = as_FloatRegister($src$$reg);
9438 int flag = $cop$$cmpcode;
9439 Label L;
9441 switch(flag)
9442 {
9443 case 0x01: //equal
9444 __ bne(op1, op2, L);
9445 __ nop();
9446 __ mov_d(dst, src);
9447 __ bind(L);
9448 break;
9449 case 0x02: //not_equal
9450 __ beq(op1, op2, L);
9451 __ nop();
9452 __ mov_d(dst, src);
9453 __ bind(L);
9454 break;
9455 case 0x03: //great
9456 __ slt(AT, op2, op1);
9457 __ beq(AT, R0, L);
9458 __ nop();
9459 __ mov_d(dst, src);
9460 __ bind(L);
9461 break;
9462 case 0x04: //great_equal
9463 __ slt(AT, op1, op2);
9464 __ bne(AT, R0, L);
9465 __ nop();
9466 __ mov_d(dst, src);
9467 __ bind(L);
9468 break;
9469 case 0x05: //less
9470 __ slt(AT, op1, op2);
9471 __ beq(AT, R0, L);
9472 __ nop();
9473 __ mov_d(dst, src);
9474 __ bind(L);
9475 break;
9476 case 0x06: //less_equal
9477 __ slt(AT, op2, op1);
9478 __ bne(AT, R0, L);
9479 __ nop();
9480 __ mov_d(dst, src);
9481 __ bind(L);
9482 break;
9483 default:
9484 Unimplemented();
9485 }
9486 %}
9488 ins_pipe( pipe_slow );
9489 %}
9491 instruct cmovD_cmpP_reg_reg(regD dst, regD src, mRegP tmp1, mRegP tmp2, cmpOp cop ) %{
9492 match(Set dst (CMoveD (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
9493 ins_cost(200);
9494 format %{
9495 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpP_reg_reg\n"
9496 "\tCMOV $dst, $src \t @cmovD_cmpP_reg_reg"
9497 %}
9499 ins_encode %{
9500 Register op1 = $tmp1$$Register;
9501 Register op2 = $tmp2$$Register;
9502 FloatRegister dst = as_FloatRegister($dst$$reg);
9503 FloatRegister src = as_FloatRegister($src$$reg);
9504 int flag = $cop$$cmpcode;
9505 Label L;
9507 switch(flag)
9508 {
9509 case 0x01: //equal
9510 __ bne(op1, op2, L);
9511 __ nop();
9512 __ mov_d(dst, src);
9513 __ bind(L);
9514 break;
9515 case 0x02: //not_equal
9516 __ beq(op1, op2, L);
9517 __ nop();
9518 __ mov_d(dst, src);
9519 __ bind(L);
9520 break;
9521 case 0x03: //great
9522 __ slt(AT, op2, op1);
9523 __ beq(AT, R0, L);
9524 __ nop();
9525 __ mov_d(dst, src);
9526 __ bind(L);
9527 break;
9528 case 0x04: //great_equal
9529 __ slt(AT, op1, op2);
9530 __ bne(AT, R0, L);
9531 __ nop();
9532 __ mov_d(dst, src);
9533 __ bind(L);
9534 break;
9535 case 0x05: //less
9536 __ slt(AT, op1, op2);
9537 __ beq(AT, R0, L);
9538 __ nop();
9539 __ mov_d(dst, src);
9540 __ bind(L);
9541 break;
9542 case 0x06: //less_equal
9543 __ slt(AT, op2, op1);
9544 __ bne(AT, R0, L);
9545 __ nop();
9546 __ mov_d(dst, src);
9547 __ bind(L);
9548 break;
9549 default:
9550 Unimplemented();
9551 }
9552 %}
9554 ins_pipe( pipe_slow );
9555 %}
9557 //FIXME
9558 instruct cmovI_cmpF_reg_reg(mRegI dst, mRegI src, regF tmp1, regF tmp2, cmpOp cop ) %{
9559 match(Set dst (CMoveI (Binary cop (CmpF tmp1 tmp2)) (Binary dst src)));
9560 ins_cost(80);
9561 format %{
9562 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpF_reg_reg\n"
9563 "\tCMOV $dst,$src \t @cmovI_cmpF_reg_reg"
9564 %}
9566 ins_encode %{
9567 FloatRegister reg_op1 = $tmp1$$FloatRegister;
9568 FloatRegister reg_op2 = $tmp2$$FloatRegister;
9569 Register dst = $dst$$Register;
9570 Register src = $src$$Register;
9571 int flag = $cop$$cmpcode;
9573 switch(flag)
9574 {
9575 case 0x01: //equal
9576 __ c_eq_s(reg_op1, reg_op2);
9577 __ movt(dst, src);
9578 break;
9579 case 0x02: //not_equal
9580 __ c_eq_s(reg_op1, reg_op2);
9581 __ movf(dst, src);
9582 break;
9583 case 0x03: //greater
9584 __ c_ole_s(reg_op1, reg_op2);
9585 __ movf(dst, src);
9586 break;
9587 case 0x04: //greater_equal
9588 __ c_olt_s(reg_op1, reg_op2);
9589 __ movf(dst, src);
9590 break;
9591 case 0x05: //less
9592 __ c_ult_s(reg_op1, reg_op2);
9593 __ movt(dst, src);
9594 break;
9595 case 0x06: //less_equal
9596 __ c_ule_s(reg_op1, reg_op2);
9597 __ movt(dst, src);
9598 break;
9599 default:
9600 Unimplemented();
9601 }
9602 %}
9603 ins_pipe( pipe_slow );
9604 %}
9606 instruct cmovF_cmpF_reg_reg(regF dst, regF src, regF tmp1, regF tmp2, cmpOp cop ) %{
9607 match(Set dst (CMoveF (Binary cop (CmpF tmp1 tmp2)) (Binary dst src)));
9608 ins_cost(200);
9609 format %{
9610 "CMP$cop $tmp1, $tmp2\t @cmovF_cmpF_reg_reg\n"
9611 "\tCMOV $dst,$src \t @cmovF_cmpF_reg_reg"
9612 %}
9614 ins_encode %{
9615 FloatRegister reg_op1 = $tmp1$$FloatRegister;
9616 FloatRegister reg_op2 = $tmp2$$FloatRegister;
9617 FloatRegister dst = $dst$$FloatRegister;
9618 FloatRegister src = $src$$FloatRegister;
9619 int flag = $cop$$cmpcode;
9621 switch(flag)
9622 {
9623 case 0x01: //equal
9624 __ c_eq_s(reg_op1, reg_op2);
9625 __ movt_s(dst, src);
9626 break;
9627 case 0x02: //not_equal
9628 __ c_eq_s(reg_op1, reg_op2);
9629 __ movf_s(dst, src);
9630 break;
9631 case 0x03: //greater
9632 __ c_ole_s(reg_op1, reg_op2);
9633 __ movf_s(dst, src);
9634 break;
9635 case 0x04: //greater_equal
9636 __ c_olt_s(reg_op1, reg_op2);
9637 __ movf_s(dst, src);
9638 break;
9639 case 0x05: //less
9640 __ c_ult_s(reg_op1, reg_op2);
9641 __ movt_s(dst, src);
9642 break;
9643 case 0x06: //less_equal
9644 __ c_ule_s(reg_op1, reg_op2);
9645 __ movt_s(dst, src);
9646 break;
9647 default:
9648 Unimplemented();
9649 }
9650 %}
9651 ins_pipe( pipe_slow );
9652 %}
9654 // Manifest a CmpL result in an integer register. Very painful.
9655 // This is the test to avoid.
9656 instruct cmpL3_reg_reg(mRegI dst, mRegL src1, mRegL src2) %{
9657 match(Set dst (CmpL3 src1 src2));
9658 ins_cost(1000);
9659 format %{ "cmpL3 $dst, $src1, $src2 @ cmpL3_reg_reg" %}
9660 ins_encode %{
9661 Register opr1 = as_Register($src1$$reg);
9662 Register opr2 = as_Register($src2$$reg);
9663 Register dst = as_Register($dst$$reg);
9665 Label Done;
9667 __ subu(AT, opr1, opr2);
9668 __ bltz(AT, Done);
9669 __ delayed()->daddiu(dst, R0, -1);
9671 __ move(dst, 1);
9672 __ movz(dst, R0, AT);
9674 __ bind(Done);
9675 %}
9676 ins_pipe( pipe_slow );
9677 %}
9679 //
9680 // less_rsult = -1
9681 // greater_result = 1
9682 // equal_result = 0
9683 // nan_result = -1
9684 //
9685 instruct cmpF3_reg_reg(mRegI dst, regF src1, regF src2) %{
9686 match(Set dst (CmpF3 src1 src2));
9687 ins_cost(1000);
9688 format %{ "cmpF3 $dst, $src1, $src2 @ cmpF3_reg_reg" %}
9689 ins_encode %{
9690 FloatRegister src1 = as_FloatRegister($src1$$reg);
9691 FloatRegister src2 = as_FloatRegister($src2$$reg);
9692 Register dst = as_Register($dst$$reg);
9694 Label Done;
9696 __ c_ult_s(src1, src2);
9697 __ bc1t(Done);
9698 __ delayed()->daddiu(dst, R0, -1);
9700 __ c_eq_s(src1, src2);
9701 __ move(dst, 1);
9702 __ movt(dst, R0);
9704 __ bind(Done);
9705 %}
9706 ins_pipe( pipe_slow );
9707 %}
9709 instruct cmpD3_reg_reg(mRegI dst, regD src1, regD src2) %{
9710 match(Set dst (CmpD3 src1 src2));
9711 ins_cost(1000);
9712 format %{ "cmpD3 $dst, $src1, $src2 @ cmpD3_reg_reg" %}
9713 ins_encode %{
9714 FloatRegister src1 = as_FloatRegister($src1$$reg);
9715 FloatRegister src2 = as_FloatRegister($src2$$reg);
9716 Register dst = as_Register($dst$$reg);
9718 Label Done;
9720 __ c_ult_d(src1, src2);
9721 __ bc1t(Done);
9722 __ delayed()->daddiu(dst, R0, -1);
9724 __ c_eq_d(src1, src2);
9725 __ move(dst, 1);
9726 __ movt(dst, R0);
9728 __ bind(Done);
9729 %}
9730 ins_pipe( pipe_slow );
9731 %}
9733 instruct clear_array(mRegL cnt, mRegP base, Universe dummy) %{
9734 match(Set dummy (ClearArray cnt base));
9735 format %{ "CLEAR_ARRAY base = $base, cnt = $cnt # Clear doublewords" %}
9736 ins_encode %{
9737 //Assume cnt is the number of bytes in an array to be cleared,
9738 //and base points to the starting address of the array.
9739 Register base = $base$$Register;
9740 Register num = $cnt$$Register;
9741 Label Loop, done;
9743 __ beq(num, R0, done);
9744 __ delayed()->daddu(AT, base, R0);
9746 __ move(T9, num); /* T9 = words */
9748 __ bind(Loop);
9749 __ sd(R0, AT, 0);
9750 __ daddi(T9, T9, -1);
9751 __ bne(T9, R0, Loop);
9752 __ delayed()->daddi(AT, AT, wordSize);
9754 __ bind(done);
9755 %}
9756 ins_pipe( pipe_slow );
9757 %}
9759 instruct string_compare(a4_RegP str1, mA5RegI cnt1, a6_RegP str2, mA7RegI cnt2, no_Ax_mRegI result) %{
9760 match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
9761 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2);
9763 format %{ "String Compare $str1[len: $cnt1], $str2[len: $cnt2] -> $result @ string_compare" %}
9764 ins_encode %{
9765 // Get the first character position in both strings
9766 // [8] char array, [12] offset, [16] count
9767 Register str1 = $str1$$Register;
9768 Register str2 = $str2$$Register;
9769 Register cnt1 = $cnt1$$Register;
9770 Register cnt2 = $cnt2$$Register;
9771 Register result = $result$$Register;
9773 Label L, Loop, haveResult, done;
9775 // compute the and difference of lengths (in result)
9776 __ subu(result, cnt1, cnt2); // result holds the difference of two lengths
9778 // compute the shorter length (in cnt1)
9779 __ slt(AT, cnt2, cnt1);
9780 __ movn(cnt1, cnt2, AT);
9782 // Now the shorter length is in cnt1 and cnt2 can be used as a tmp register
9783 __ bind(Loop); // Loop begin
9784 __ beq(cnt1, R0, done);
9785 __ delayed()->lhu(AT, str1, 0);;
9787 // compare current character
9788 __ lhu(cnt2, str2, 0);
9789 __ bne(AT, cnt2, haveResult);
9790 __ delayed()->addi(str1, str1, 2);
9791 __ addi(str2, str2, 2);
9792 __ b(Loop);
9793 __ delayed()->addi(cnt1, cnt1, -1); // Loop end
9795 __ bind(haveResult);
9796 __ subu(result, AT, cnt2);
9798 __ bind(done);
9799 %}
9801 ins_pipe( pipe_slow );
9802 %}
9804 // intrinsic optimization
9805 instruct string_equals(a4_RegP str1, a5_RegP str2, mA6RegI cnt, mA7RegI temp, no_Ax_mRegI result) %{
9806 match(Set result (StrEquals (Binary str1 str2) cnt));
9807 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL temp);
9809 format %{ "String Equal $str1, $str2, len:$cnt tmp:$temp -> $result @ string_equals" %}
9810 ins_encode %{
9811 // Get the first character position in both strings
9812 // [8] char array, [12] offset, [16] count
9813 Register str1 = $str1$$Register;
9814 Register str2 = $str2$$Register;
9815 Register cnt = $cnt$$Register;
9816 Register tmp = $temp$$Register;
9817 Register result = $result$$Register;
9819 Label Loop, done;
9822 __ beq(str1, str2, done); // same char[] ?
9823 __ daddiu(result, R0, 1);
9825 __ bind(Loop); // Loop begin
9826 __ beq(cnt, R0, done);
9827 __ daddiu(result, R0, 1); // count == 0
9829 // compare current character
9830 __ lhu(AT, str1, 0);;
9831 __ lhu(tmp, str2, 0);
9832 __ bne(AT, tmp, done);
9833 __ delayed()->daddi(result, R0, 0);
9834 __ addi(str1, str1, 2);
9835 __ addi(str2, str2, 2);
9836 __ b(Loop);
9837 __ delayed()->addi(cnt, cnt, -1); // Loop end
9839 __ bind(done);
9840 %}
9842 ins_pipe( pipe_slow );
9843 %}
9845 //----------Arithmetic Instructions-------------------------------------------
9846 //----------Addition Instructions---------------------------------------------
9847 instruct addI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9848 match(Set dst (AddI src1 src2));
9850 format %{ "add $dst, $src1, $src2 #@addI_Reg_Reg" %}
9851 ins_encode %{
9852 Register dst = $dst$$Register;
9853 Register src1 = $src1$$Register;
9854 Register src2 = $src2$$Register;
9855 __ addu32(dst, src1, src2);
9856 %}
9857 ins_pipe( ialu_regI_regI );
9858 %}
9860 instruct addI_Reg_imm(mRegI dst, mRegI src1, immI src2) %{
9861 match(Set dst (AddI src1 src2));
9863 format %{ "add $dst, $src1, $src2 #@addI_Reg_imm" %}
9864 ins_encode %{
9865 Register dst = $dst$$Register;
9866 Register src1 = $src1$$Register;
9867 int imm = $src2$$constant;
9869 if(Assembler::is_simm16(imm)) {
9870 __ addiu32(dst, src1, imm);
9871 } else {
9872 __ move(AT, imm);
9873 __ addu32(dst, src1, AT);
9874 }
9875 %}
9876 ins_pipe( ialu_regI_regI );
9877 %}
9879 instruct addP_reg_reg(mRegP dst, mRegP src1, mRegL src2) %{
9880 match(Set dst (AddP src1 src2));
9882 format %{ "dadd $dst, $src1, $src2 #@addP_reg_reg" %}
9884 ins_encode %{
9885 Register dst = $dst$$Register;
9886 Register src1 = $src1$$Register;
9887 Register src2 = $src2$$Register;
9888 __ daddu(dst, src1, src2);
9889 %}
9891 ins_pipe( ialu_regI_regI );
9892 %}
9894 instruct addP_reg_reg_convI2L(mRegP dst, mRegP src1, mRegI src2) %{
9895 match(Set dst (AddP src1 (ConvI2L src2)));
9897 format %{ "dadd $dst, $src1, $src2 #@addP_reg_reg_convI2L" %}
9899 ins_encode %{
9900 Register dst = $dst$$Register;
9901 Register src1 = $src1$$Register;
9902 Register src2 = $src2$$Register;
9903 __ daddu(dst, src1, src2);
9904 %}
9906 ins_pipe( ialu_regI_regI );
9907 %}
9909 instruct addP_reg_imm(mRegP dst, mRegP src1, immL src2) %{
9910 match(Set dst (AddP src1 src2));
9912 format %{ "daddi $dst, $src1, $src2 #@addP_reg_imm" %}
9913 ins_encode %{
9914 Register src1 = $src1$$Register;
9915 long src2 = $src2$$constant;
9916 Register dst = $dst$$Register;
9918 if(Assembler::is_simm16(src2)) {
9919 __ daddiu(dst, src1, src2);
9920 } else {
9921 __ set64(AT, src2);
9922 __ daddu(dst, src1, AT);
9923 }
9924 %}
9925 ins_pipe( ialu_regI_imm16 );
9926 %}
9928 // Add Long Register with Register
9929 instruct addL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
9930 match(Set dst (AddL src1 src2));
9931 ins_cost(200);
9932 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_Reg\t" %}
9934 ins_encode %{
9935 Register dst_reg = as_Register($dst$$reg);
9936 Register src1_reg = as_Register($src1$$reg);
9937 Register src2_reg = as_Register($src2$$reg);
9939 __ daddu(dst_reg, src1_reg, src2_reg);
9940 %}
9942 ins_pipe( ialu_regL_regL );
9943 %}
9945 instruct addL_Reg_imm(mRegL dst, mRegL src1, immL16 src2)
9946 %{
9947 match(Set dst (AddL src1 src2));
9949 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_imm " %}
9950 ins_encode %{
9951 Register dst_reg = as_Register($dst$$reg);
9952 Register src1_reg = as_Register($src1$$reg);
9953 int src2_imm = $src2$$constant;
9955 __ daddiu(dst_reg, src1_reg, src2_imm);
9956 %}
9958 ins_pipe( ialu_regL_regL );
9959 %}
9961 instruct addL_RegI2L_imm(mRegL dst, mRegI src1, immL16 src2)
9962 %{
9963 match(Set dst (AddL (ConvI2L src1) src2));
9965 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_imm " %}
9966 ins_encode %{
9967 Register dst_reg = as_Register($dst$$reg);
9968 Register src1_reg = as_Register($src1$$reg);
9969 int src2_imm = $src2$$constant;
9971 __ daddiu(dst_reg, src1_reg, src2_imm);
9972 %}
9974 ins_pipe( ialu_regL_regL );
9975 %}
9977 instruct addL_RegI2L_Reg(mRegL dst, mRegI src1, mRegL src2) %{
9978 match(Set dst (AddL (ConvI2L src1) src2));
9979 ins_cost(200);
9980 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_Reg\t" %}
9982 ins_encode %{
9983 Register dst_reg = as_Register($dst$$reg);
9984 Register src1_reg = as_Register($src1$$reg);
9985 Register src2_reg = as_Register($src2$$reg);
9987 __ daddu(dst_reg, src1_reg, src2_reg);
9988 %}
9990 ins_pipe( ialu_regL_regL );
9991 %}
9993 instruct addL_RegI2L_RegI2L(mRegL dst, mRegI src1, mRegI src2) %{
9994 match(Set dst (AddL (ConvI2L src1) (ConvI2L src2)));
9995 ins_cost(200);
9996 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_RegI2L\t" %}
9998 ins_encode %{
9999 Register dst_reg = as_Register($dst$$reg);
10000 Register src1_reg = as_Register($src1$$reg);
10001 Register src2_reg = as_Register($src2$$reg);
10003 __ daddu(dst_reg, src1_reg, src2_reg);
10004 %}
10006 ins_pipe( ialu_regL_regL );
10007 %}
10009 instruct addL_Reg_RegI2L(mRegL dst, mRegL src1, mRegI src2) %{
10010 match(Set dst (AddL src1 (ConvI2L src2)));
10011 ins_cost(200);
10012 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_RegI2L\t" %}
10014 ins_encode %{
10015 Register dst_reg = as_Register($dst$$reg);
10016 Register src1_reg = as_Register($src1$$reg);
10017 Register src2_reg = as_Register($src2$$reg);
10019 __ daddu(dst_reg, src1_reg, src2_reg);
10020 %}
10022 ins_pipe( ialu_regL_regL );
10023 %}
10025 //----------Subtraction Instructions-------------------------------------------
10026 // Integer Subtraction Instructions
10027 instruct subI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10028 match(Set dst (SubI src1 src2));
10029 ins_cost(100);
10031 format %{ "sub $dst, $src1, $src2 #@subI_Reg_Reg" %}
10032 ins_encode %{
10033 Register dst = $dst$$Register;
10034 Register src1 = $src1$$Register;
10035 Register src2 = $src2$$Register;
10036 __ subu32(dst, src1, src2);
10037 %}
10038 ins_pipe( ialu_regI_regI );
10039 %}
10041 instruct subI_Reg_immI16_sub(mRegI dst, mRegI src1, immI16_sub src2) %{
10042 match(Set dst (SubI src1 src2));
10043 ins_cost(80);
10045 format %{ "sub $dst, $src1, $src2 #@subI_Reg_immI16_sub" %}
10046 ins_encode %{
10047 Register dst = $dst$$Register;
10048 Register src1 = $src1$$Register;
10049 __ addiu32(dst, src1, -1 * $src2$$constant);
10050 %}
10051 ins_pipe( ialu_regI_regI );
10052 %}
10054 instruct negI_Reg(mRegI dst, immI0 zero, mRegI src) %{
10055 match(Set dst (SubI zero src));
10056 ins_cost(80);
10058 format %{ "neg $dst, $src #@negI_Reg" %}
10059 ins_encode %{
10060 Register dst = $dst$$Register;
10061 Register src = $src$$Register;
10062 __ subu32(dst, R0, src);
10063 %}
10064 ins_pipe( ialu_regI_regI );
10065 %}
10067 instruct negL_Reg(mRegL dst, immL0 zero, mRegL src) %{
10068 match(Set dst (SubL zero src));
10069 ins_cost(80);
10071 format %{ "neg $dst, $src #@negL_Reg" %}
10072 ins_encode %{
10073 Register dst = $dst$$Register;
10074 Register src = $src$$Register;
10075 __ subu(dst, R0, src);
10076 %}
10077 ins_pipe( ialu_regI_regI );
10078 %}
10080 instruct subL_Reg_immL16_sub(mRegL dst, mRegL src1, immL16_sub src2) %{
10081 match(Set dst (SubL src1 src2));
10082 ins_cost(80);
10084 format %{ "sub $dst, $src1, $src2 #@subL_Reg_immL16_sub" %}
10085 ins_encode %{
10086 Register dst = $dst$$Register;
10087 Register src1 = $src1$$Register;
10088 __ daddiu(dst, src1, -1 * $src2$$constant);
10089 %}
10090 ins_pipe( ialu_regI_regI );
10091 %}
10093 // Subtract Long Register with Register.
10094 instruct subL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10095 match(Set dst (SubL src1 src2));
10096 ins_cost(100);
10097 format %{ "SubL $dst, $src1, $src2 @ subL_Reg_Reg" %}
10098 ins_encode %{
10099 Register dst = as_Register($dst$$reg);
10100 Register src1 = as_Register($src1$$reg);
10101 Register src2 = as_Register($src2$$reg);
10103 __ subu(dst, src1, src2);
10104 %}
10105 ins_pipe( ialu_regL_regL );
10106 %}
10108 instruct subL_Reg_RegI2L(mRegL dst, mRegL src1, mRegI src2) %{
10109 match(Set dst (SubL src1 (ConvI2L src2)));
10110 ins_cost(100);
10111 format %{ "SubL $dst, $src1, $src2 @ subL_Reg_RegI2L" %}
10112 ins_encode %{
10113 Register dst = as_Register($dst$$reg);
10114 Register src1 = as_Register($src1$$reg);
10115 Register src2 = as_Register($src2$$reg);
10117 __ subu(dst, src1, src2);
10118 %}
10119 ins_pipe( ialu_regL_regL );
10120 %}
10122 instruct subL_RegI2L_Reg(mRegL dst, mRegI src1, mRegL src2) %{
10123 match(Set dst (SubL (ConvI2L src1) src2));
10124 ins_cost(200);
10125 format %{ "SubL $dst, $src1, $src2 @ subL_RegI2L_Reg" %}
10126 ins_encode %{
10127 Register dst = as_Register($dst$$reg);
10128 Register src1 = as_Register($src1$$reg);
10129 Register src2 = as_Register($src2$$reg);
10131 __ subu(dst, src1, src2);
10132 %}
10133 ins_pipe( ialu_regL_regL );
10134 %}
10136 instruct subL_RegI2L_RegI2L(mRegL dst, mRegI src1, mRegI src2) %{
10137 match(Set dst (SubL (ConvI2L src1) (ConvI2L src2)));
10138 ins_cost(200);
10139 format %{ "SubL $dst, $src1, $src2 @ subL_RegI2L_RegI2L" %}
10140 ins_encode %{
10141 Register dst = as_Register($dst$$reg);
10142 Register src1 = as_Register($src1$$reg);
10143 Register src2 = as_Register($src2$$reg);
10145 __ subu(dst, src1, src2);
10146 %}
10147 ins_pipe( ialu_regL_regL );
10148 %}
10150 // Integer MOD with Register
10151 instruct modI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10152 match(Set dst (ModI src1 src2));
10153 ins_cost(300);
10154 format %{ "modi $dst, $src1, $src2 @ modI_Reg_Reg" %}
10155 ins_encode %{
10156 Register dst = $dst$$Register;
10157 Register src1 = $src1$$Register;
10158 Register src2 = $src2$$Register;
10160 //if (UseLoongsonISA) {
10161 if (0) {
10162 // 2016.08.10
10163 // Experiments show that gsmod is slower that div+mfhi.
10164 // So I just disable it here.
10165 __ gsmod(dst, src1, src2);
10166 } else {
10167 __ div(src1, src2);
10168 __ mfhi(dst);
10169 }
10170 %}
10172 //ins_pipe( ialu_mod );
10173 ins_pipe( ialu_regI_regI );
10174 %}
10176 instruct modL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
10177 match(Set dst (ModL src1 src2));
10178 format %{ "modL $dst, $src1, $src2 @modL_reg_reg" %}
10180 ins_encode %{
10181 Register dst = as_Register($dst$$reg);
10182 Register op1 = as_Register($src1$$reg);
10183 Register op2 = as_Register($src2$$reg);
10185 if (UseLoongsonISA) {
10186 __ gsdmod(dst, op1, op2);
10187 } else {
10188 __ ddiv(op1, op2);
10189 __ mfhi(dst);
10190 }
10191 %}
10192 ins_pipe( pipe_slow );
10193 %}
10195 instruct mulI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10196 match(Set dst (MulI src1 src2));
10198 ins_cost(300);
10199 format %{ "mul $dst, $src1, $src2 @ mulI_Reg_Reg" %}
10200 ins_encode %{
10201 Register src1 = $src1$$Register;
10202 Register src2 = $src2$$Register;
10203 Register dst = $dst$$Register;
10205 __ mul(dst, src1, src2);
10206 %}
10207 ins_pipe( ialu_mult );
10208 %}
10210 instruct maddI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2, mRegI src3) %{
10211 match(Set dst (AddI (MulI src1 src2) src3));
10213 ins_cost(999);
10214 format %{ "madd $dst, $src1 * $src2 + $src3 #@maddI_Reg_Reg" %}
10215 ins_encode %{
10216 Register src1 = $src1$$Register;
10217 Register src2 = $src2$$Register;
10218 Register src3 = $src3$$Register;
10219 Register dst = $dst$$Register;
10221 __ mtlo(src3);
10222 __ madd(src1, src2);
10223 __ mflo(dst);
10224 %}
10225 ins_pipe( ialu_mult );
10226 %}
10228 instruct divI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10229 match(Set dst (DivI src1 src2));
10231 ins_cost(300);
10232 format %{ "div $dst, $src1, $src2 @ divI_Reg_Reg" %}
10233 ins_encode %{
10234 Register src1 = $src1$$Register;
10235 Register src2 = $src2$$Register;
10236 Register dst = $dst$$Register;
10238 /* 2012/4/21 Jin: In MIPS, div does not cause exception.
10239 We must trap an exception manually. */
10240 __ teq(R0, src2, 0x7);
10242 if (UseLoongsonISA) {
10243 __ gsdiv(dst, src1, src2);
10244 } else {
10245 __ div(src1, src2);
10247 __ nop();
10248 __ nop();
10249 __ mflo(dst);
10250 }
10251 %}
10252 ins_pipe( ialu_mod );
10253 %}
10255 instruct divF_Reg_Reg(regF dst, regF src1, regF src2) %{
10256 match(Set dst (DivF src1 src2));
10258 ins_cost(300);
10259 format %{ "divF $dst, $src1, $src2 @ divF_Reg_Reg" %}
10260 ins_encode %{
10261 FloatRegister src1 = $src1$$FloatRegister;
10262 FloatRegister src2 = $src2$$FloatRegister;
10263 FloatRegister dst = $dst$$FloatRegister;
10265 /* Here do we need to trap an exception manually ? */
10266 __ div_s(dst, src1, src2);
10267 %}
10268 ins_pipe( pipe_slow );
10269 %}
10271 instruct divD_Reg_Reg(regD dst, regD src1, regD src2) %{
10272 match(Set dst (DivD src1 src2));
10274 ins_cost(300);
10275 format %{ "divD $dst, $src1, $src2 @ divD_Reg_Reg" %}
10276 ins_encode %{
10277 FloatRegister src1 = $src1$$FloatRegister;
10278 FloatRegister src2 = $src2$$FloatRegister;
10279 FloatRegister dst = $dst$$FloatRegister;
10281 /* Here do we need to trap an exception manually ? */
10282 __ div_d(dst, src1, src2);
10283 %}
10284 ins_pipe( pipe_slow );
10285 %}
10287 instruct mulL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
10288 match(Set dst (MulL src1 src2));
10289 format %{ "mulL $dst, $src1, $src2 @mulL_reg_reg" %}
10290 ins_encode %{
10291 Register dst = as_Register($dst$$reg);
10292 Register op1 = as_Register($src1$$reg);
10293 Register op2 = as_Register($src2$$reg);
10295 if (UseLoongsonISA) {
10296 __ gsdmult(dst, op1, op2);
10297 } else {
10298 __ dmult(op1, op2);
10299 __ mflo(dst);
10300 }
10301 %}
10302 ins_pipe( pipe_slow );
10303 %}
10305 instruct mulL_reg_regI2L(mRegL dst, mRegL src1, mRegI src2) %{
10306 match(Set dst (MulL src1 (ConvI2L src2)));
10307 format %{ "mulL $dst, $src1, $src2 @mulL_reg_regI2L" %}
10308 ins_encode %{
10309 Register dst = as_Register($dst$$reg);
10310 Register op1 = as_Register($src1$$reg);
10311 Register op2 = as_Register($src2$$reg);
10313 if (UseLoongsonISA) {
10314 __ gsdmult(dst, op1, op2);
10315 } else {
10316 __ dmult(op1, op2);
10317 __ mflo(dst);
10318 }
10319 %}
10320 ins_pipe( pipe_slow );
10321 %}
10323 instruct divL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
10324 match(Set dst (DivL src1 src2));
10325 format %{ "divL $dst, $src1, $src2 @divL_reg_reg" %}
10327 ins_encode %{
10328 Register dst = as_Register($dst$$reg);
10329 Register op1 = as_Register($src1$$reg);
10330 Register op2 = as_Register($src2$$reg);
10332 if (UseLoongsonISA) {
10333 __ gsddiv(dst, op1, op2);
10334 } else {
10335 __ ddiv(op1, op2);
10336 __ mflo(dst);
10337 }
10338 %}
10339 ins_pipe( pipe_slow );
10340 %}
10342 instruct addF_reg_reg(regF dst, regF src1, regF src2) %{
10343 match(Set dst (AddF src1 src2));
10344 format %{ "AddF $dst, $src1, $src2 @addF_reg_reg" %}
10345 ins_encode %{
10346 FloatRegister src1 = as_FloatRegister($src1$$reg);
10347 FloatRegister src2 = as_FloatRegister($src2$$reg);
10348 FloatRegister dst = as_FloatRegister($dst$$reg);
10350 __ add_s(dst, src1, src2);
10351 %}
10352 ins_pipe( fpu_regF_regF );
10353 %}
10355 instruct subF_reg_reg(regF dst, regF src1, regF src2) %{
10356 match(Set dst (SubF src1 src2));
10357 format %{ "SubF $dst, $src1, $src2 @subF_reg_reg" %}
10358 ins_encode %{
10359 FloatRegister src1 = as_FloatRegister($src1$$reg);
10360 FloatRegister src2 = as_FloatRegister($src2$$reg);
10361 FloatRegister dst = as_FloatRegister($dst$$reg);
10363 __ sub_s(dst, src1, src2);
10364 %}
10365 ins_pipe( fpu_regF_regF );
10366 %}
10367 instruct addD_reg_reg(regD dst, regD src1, regD src2) %{
10368 match(Set dst (AddD src1 src2));
10369 format %{ "AddD $dst, $src1, $src2 @addD_reg_reg" %}
10370 ins_encode %{
10371 FloatRegister src1 = as_FloatRegister($src1$$reg);
10372 FloatRegister src2 = as_FloatRegister($src2$$reg);
10373 FloatRegister dst = as_FloatRegister($dst$$reg);
10375 __ add_d(dst, src1, src2);
10376 %}
10377 ins_pipe( fpu_regF_regF );
10378 %}
10380 instruct subD_reg_reg(regD dst, regD src1, regD src2) %{
10381 match(Set dst (SubD src1 src2));
10382 format %{ "SubD $dst, $src1, $src2 @subD_reg_reg" %}
10383 ins_encode %{
10384 FloatRegister src1 = as_FloatRegister($src1$$reg);
10385 FloatRegister src2 = as_FloatRegister($src2$$reg);
10386 FloatRegister dst = as_FloatRegister($dst$$reg);
10388 __ sub_d(dst, src1, src2);
10389 %}
10390 ins_pipe( fpu_regF_regF );
10391 %}
10393 instruct negF_reg(regF dst, regF src) %{
10394 match(Set dst (NegF src));
10395 format %{ "negF $dst, $src @negF_reg" %}
10396 ins_encode %{
10397 FloatRegister src = as_FloatRegister($src$$reg);
10398 FloatRegister dst = as_FloatRegister($dst$$reg);
10400 __ neg_s(dst, src);
10401 %}
10402 ins_pipe( fpu_regF_regF );
10403 %}
10405 instruct negD_reg(regD dst, regD src) %{
10406 match(Set dst (NegD src));
10407 format %{ "negD $dst, $src @negD_reg" %}
10408 ins_encode %{
10409 FloatRegister src = as_FloatRegister($src$$reg);
10410 FloatRegister dst = as_FloatRegister($dst$$reg);
10412 __ neg_d(dst, src);
10413 %}
10414 ins_pipe( fpu_regF_regF );
10415 %}
10418 instruct mulF_reg_reg(regF dst, regF src1, regF src2) %{
10419 match(Set dst (MulF src1 src2));
10420 format %{ "MULF $dst, $src1, $src2 @mulF_reg_reg" %}
10421 ins_encode %{
10422 FloatRegister src1 = $src1$$FloatRegister;
10423 FloatRegister src2 = $src2$$FloatRegister;
10424 FloatRegister dst = $dst$$FloatRegister;
10426 __ mul_s(dst, src1, src2);
10427 %}
10428 ins_pipe( fpu_regF_regF );
10429 %}
10431 instruct maddF_reg_reg(regF dst, regF src1, regF src2, regF src3) %{
10432 match(Set dst (AddF (MulF src1 src2) src3));
10433 // For compatibility reason (e.g. on the Loongson platform), disable this guy.
10434 ins_cost(44444);
10435 format %{ "maddF $dst, $src1, $src2, $src3 @maddF_reg_reg" %}
10436 ins_encode %{
10437 FloatRegister src1 = $src1$$FloatRegister;
10438 FloatRegister src2 = $src2$$FloatRegister;
10439 FloatRegister src3 = $src3$$FloatRegister;
10440 FloatRegister dst = $dst$$FloatRegister;
10442 __ madd_s(dst, src1, src2, src3);
10443 %}
10444 ins_pipe( fpu_regF_regF );
10445 %}
10447 // Mul two double precision floating piont number
10448 instruct mulD_reg_reg(regD dst, regD src1, regD src2) %{
10449 match(Set dst (MulD src1 src2));
10450 format %{ "MULD $dst, $src1, $src2 @mulD_reg_reg" %}
10451 ins_encode %{
10452 FloatRegister src1 = $src1$$FloatRegister;
10453 FloatRegister src2 = $src2$$FloatRegister;
10454 FloatRegister dst = $dst$$FloatRegister;
10456 __ mul_d(dst, src1, src2);
10457 %}
10458 ins_pipe( fpu_regF_regF );
10459 %}
10461 instruct maddD_reg_reg(regD dst, regD src1, regD src2, regD src3) %{
10462 match(Set dst (AddD (MulD src1 src2) src3));
10463 // For compatibility reason (e.g. on the Loongson platform), disable this guy.
10464 ins_cost(44444);
10465 format %{ "maddD $dst, $src1, $src2, $src3 @maddD_reg_reg" %}
10466 ins_encode %{
10467 FloatRegister src1 = $src1$$FloatRegister;
10468 FloatRegister src2 = $src2$$FloatRegister;
10469 FloatRegister src3 = $src3$$FloatRegister;
10470 FloatRegister dst = $dst$$FloatRegister;
10472 __ madd_d(dst, src1, src2, src3);
10473 %}
10474 ins_pipe( fpu_regF_regF );
10475 %}
10477 instruct absF_reg(regF dst, regF src) %{
10478 match(Set dst (AbsF src));
10479 ins_cost(100);
10480 format %{ "absF $dst, $src @absF_reg" %}
10481 ins_encode %{
10482 FloatRegister src = as_FloatRegister($src$$reg);
10483 FloatRegister dst = as_FloatRegister($dst$$reg);
10485 __ abs_s(dst, src);
10486 %}
10487 ins_pipe( fpu_regF_regF );
10488 %}
10491 // intrinsics for math_native.
10492 // AbsD SqrtD CosD SinD TanD LogD Log10D
10494 instruct absD_reg(regD dst, regD src) %{
10495 match(Set dst (AbsD src));
10496 ins_cost(100);
10497 format %{ "absD $dst, $src @absD_reg" %}
10498 ins_encode %{
10499 FloatRegister src = as_FloatRegister($src$$reg);
10500 FloatRegister dst = as_FloatRegister($dst$$reg);
10502 __ abs_d(dst, src);
10503 %}
10504 ins_pipe( fpu_regF_regF );
10505 %}
10507 instruct sqrtD_reg(regD dst, regD src) %{
10508 match(Set dst (SqrtD src));
10509 ins_cost(100);
10510 format %{ "SqrtD $dst, $src @sqrtD_reg" %}
10511 ins_encode %{
10512 FloatRegister src = as_FloatRegister($src$$reg);
10513 FloatRegister dst = as_FloatRegister($dst$$reg);
10515 __ sqrt_d(dst, src);
10516 %}
10517 ins_pipe( fpu_regF_regF );
10518 %}
10520 instruct sqrtF_reg(regF dst, regF src) %{
10521 match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
10522 ins_cost(100);
10523 format %{ "SqrtF $dst, $src @sqrtF_reg" %}
10524 ins_encode %{
10525 FloatRegister src = as_FloatRegister($src$$reg);
10526 FloatRegister dst = as_FloatRegister($dst$$reg);
10528 __ sqrt_s(dst, src);
10529 %}
10530 ins_pipe( fpu_regF_regF );
10531 %}
10532 //----------------------------------Logical Instructions----------------------
10533 //__________________________________Integer Logical Instructions-------------
10535 //And Instuctions
10536 // And Register with Immediate
10537 instruct andI_Reg_immI(mRegI dst, mRegI src1, immI src2) %{
10538 match(Set dst (AndI src1 src2));
10540 format %{ "and $dst, $src1, $src2 #@andI_Reg_immI" %}
10541 ins_encode %{
10542 Register dst = $dst$$Register;
10543 Register src = $src1$$Register;
10544 int val = $src2$$constant;
10546 __ move(AT, val);
10547 __ andr(dst, src, AT);
10548 %}
10549 ins_pipe( ialu_regI_regI );
10550 %}
10552 instruct andI_Reg_imm_0_65535(mRegI dst, mRegI src1, immI_0_65535 src2) %{
10553 match(Set dst (AndI src1 src2));
10554 ins_cost(60);
10556 format %{ "and $dst, $src1, $src2 #@andI_Reg_imm_0_65535" %}
10557 ins_encode %{
10558 Register dst = $dst$$Register;
10559 Register src = $src1$$Register;
10560 int val = $src2$$constant;
10562 __ andi(dst, src, val);
10563 %}
10564 ins_pipe( ialu_regI_regI );
10565 %}
10567 instruct andI_Reg_immI_nonneg_mask(mRegI dst, mRegI src1, immI_nonneg_mask mask) %{
10568 match(Set dst (AndI src1 mask));
10569 ins_cost(60);
10571 format %{ "and $dst, $src1, $mask #@andI_Reg_immI_nonneg_mask" %}
10572 ins_encode %{
10573 Register dst = $dst$$Register;
10574 Register src = $src1$$Register;
10575 int size = Assembler::is_int_mask($mask$$constant);
10577 __ ext(dst, src, 0, size);
10578 %}
10579 ins_pipe( ialu_regI_regI );
10580 %}
10582 instruct andL_Reg_immL_nonneg_mask(mRegL dst, mRegL src1, immL_nonneg_mask mask) %{
10583 match(Set dst (AndL src1 mask));
10584 ins_cost(60);
10586 format %{ "and $dst, $src1, $mask #@andL_Reg_immL_nonneg_mask" %}
10587 ins_encode %{
10588 Register dst = $dst$$Register;
10589 Register src = $src1$$Register;
10590 int size = Assembler::is_jlong_mask($mask$$constant);
10592 __ dext(dst, src, 0, size);
10593 %}
10594 ins_pipe( ialu_regI_regI );
10595 %}
10597 instruct xorI_Reg_imm_0_65535(mRegI dst, mRegI src1, immI_0_65535 src2) %{
10598 match(Set dst (XorI src1 src2));
10599 ins_cost(60);
10601 format %{ "xori $dst, $src1, $src2 #@xorI_Reg_imm_0_65535" %}
10602 ins_encode %{
10603 Register dst = $dst$$Register;
10604 Register src = $src1$$Register;
10605 int val = $src2$$constant;
10607 __ xori(dst, src, val);
10608 %}
10609 ins_pipe( ialu_regI_regI );
10610 %}
10612 instruct xorI_Reg_immI_M1(mRegI dst, mRegI src1, immI_M1 M1) %{
10613 match(Set dst (XorI src1 M1));
10614 predicate(UseLoongsonISA && Use3A2000);
10615 ins_cost(60);
10617 format %{ "xor $dst, $src1, $M1 #@xorI_Reg_immI_M1" %}
10618 ins_encode %{
10619 Register dst = $dst$$Register;
10620 Register src = $src1$$Register;
10622 __ gsorn(dst, R0, src);
10623 %}
10624 ins_pipe( ialu_regI_regI );
10625 %}
10627 instruct xorL2I_Reg_immI_M1(mRegI dst, mRegL src1, immI_M1 M1) %{
10628 match(Set dst (XorI (ConvL2I src1) M1));
10629 predicate(UseLoongsonISA && Use3A2000);
10630 ins_cost(60);
10632 format %{ "xor $dst, $src1, $M1 #@xorL2I_Reg_immI_M1" %}
10633 ins_encode %{
10634 Register dst = $dst$$Register;
10635 Register src = $src1$$Register;
10637 __ gsorn(dst, R0, src);
10638 %}
10639 ins_pipe( ialu_regI_regI );
10640 %}
10642 instruct xorL_Reg_imm_0_65535(mRegL dst, mRegL src1, immL_0_65535 src2) %{
10643 match(Set dst (XorL src1 src2));
10644 ins_cost(60);
10646 format %{ "xori $dst, $src1, $src2 #@xorL_Reg_imm_0_65535" %}
10647 ins_encode %{
10648 Register dst = $dst$$Register;
10649 Register src = $src1$$Register;
10650 int val = $src2$$constant;
10652 __ xori(dst, src, val);
10653 %}
10654 ins_pipe( ialu_regI_regI );
10655 %}
10657 /*
10658 instruct xorL_Reg_immL_M1(mRegL dst, mRegL src1, immL_M1 M1) %{
10659 match(Set dst (XorL src1 M1));
10660 predicate(UseLoongsonISA);
10661 ins_cost(60);
10663 format %{ "xor $dst, $src1, $M1 #@xorL_Reg_immL_M1" %}
10664 ins_encode %{
10665 Register dst = $dst$$Register;
10666 Register src = $src1$$Register;
10668 __ gsorn(dst, R0, src);
10669 %}
10670 ins_pipe( ialu_regI_regI );
10671 %}
10672 */
10674 instruct lbu_and_lmask(mRegI dst, memory mem, immI_255 mask) %{
10675 match(Set dst (AndI mask (LoadB mem)));
10676 ins_cost(60);
10678 format %{ "lhu $dst, $mem #@lbu_and_lmask" %}
10679 ins_encode(load_UB_enc(dst, mem));
10680 ins_pipe( ialu_loadI );
10681 %}
10683 instruct lbu_and_rmask(mRegI dst, memory mem, immI_255 mask) %{
10684 match(Set dst (AndI (LoadB mem) mask));
10685 ins_cost(60);
10687 format %{ "lhu $dst, $mem #@lbu_and_rmask" %}
10688 ins_encode(load_UB_enc(dst, mem));
10689 ins_pipe( ialu_loadI );
10690 %}
10692 instruct andI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10693 match(Set dst (AndI src1 src2));
10695 format %{ "and $dst, $src1, $src2 #@andI_Reg_Reg" %}
10696 ins_encode %{
10697 Register dst = $dst$$Register;
10698 Register src1 = $src1$$Register;
10699 Register src2 = $src2$$Register;
10700 __ andr(dst, src1, src2);
10701 %}
10702 ins_pipe( ialu_regI_regI );
10703 %}
10705 instruct andnI_Reg_nReg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10706 match(Set dst (AndI src1 (XorI src2 M1)));
10707 predicate(UseLoongsonISA && Use3A2000);
10709 format %{ "andn $dst, $src1, $src2 #@andnI_Reg_nReg" %}
10710 ins_encode %{
10711 Register dst = $dst$$Register;
10712 Register src1 = $src1$$Register;
10713 Register src2 = $src2$$Register;
10715 __ gsandn(dst, src1, src2);
10716 %}
10717 ins_pipe( ialu_regI_regI );
10718 %}
10720 instruct ornI_Reg_nReg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10721 match(Set dst (OrI src1 (XorI src2 M1)));
10722 predicate(UseLoongsonISA && Use3A2000);
10724 format %{ "orn $dst, $src1, $src2 #@ornI_Reg_nReg" %}
10725 ins_encode %{
10726 Register dst = $dst$$Register;
10727 Register src1 = $src1$$Register;
10728 Register src2 = $src2$$Register;
10730 __ gsorn(dst, src1, src2);
10731 %}
10732 ins_pipe( ialu_regI_regI );
10733 %}
10735 instruct andnI_nReg_Reg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10736 match(Set dst (AndI (XorI src1 M1) src2));
10737 predicate(UseLoongsonISA && Use3A2000);
10739 format %{ "andn $dst, $src2, $src1 #@andnI_nReg_Reg" %}
10740 ins_encode %{
10741 Register dst = $dst$$Register;
10742 Register src1 = $src1$$Register;
10743 Register src2 = $src2$$Register;
10745 __ gsandn(dst, src2, src1);
10746 %}
10747 ins_pipe( ialu_regI_regI );
10748 %}
10750 instruct ornI_nReg_Reg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10751 match(Set dst (OrI (XorI src1 M1) src2));
10752 predicate(UseLoongsonISA && Use3A2000);
10754 format %{ "orn $dst, $src2, $src1 #@ornI_nReg_Reg" %}
10755 ins_encode %{
10756 Register dst = $dst$$Register;
10757 Register src1 = $src1$$Register;
10758 Register src2 = $src2$$Register;
10760 __ gsorn(dst, src2, src1);
10761 %}
10762 ins_pipe( ialu_regI_regI );
10763 %}
10765 // And Long Register with Register
10766 instruct andL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10767 match(Set dst (AndL src1 src2));
10768 format %{ "AND $dst, $src1, $src2 @ andL_Reg_Reg\n\t" %}
10769 ins_encode %{
10770 Register dst_reg = as_Register($dst$$reg);
10771 Register src1_reg = as_Register($src1$$reg);
10772 Register src2_reg = as_Register($src2$$reg);
10774 __ andr(dst_reg, src1_reg, src2_reg);
10775 %}
10776 ins_pipe( ialu_regL_regL );
10777 %}
10779 instruct andL_Reg_Reg_convI2L(mRegL dst, mRegL src1, mRegI src2) %{
10780 match(Set dst (AndL src1 (ConvI2L src2)));
10781 format %{ "AND $dst, $src1, $src2 @ andL_Reg_Reg_convI2L\n\t" %}
10782 ins_encode %{
10783 Register dst_reg = as_Register($dst$$reg);
10784 Register src1_reg = as_Register($src1$$reg);
10785 Register src2_reg = as_Register($src2$$reg);
10787 __ andr(dst_reg, src1_reg, src2_reg);
10788 %}
10789 ins_pipe( ialu_regL_regL );
10790 %}
10792 instruct andL_Reg_imm_0_65535(mRegL dst, mRegL src1, immL_0_65535 src2) %{
10793 match(Set dst (AndL src1 src2));
10794 ins_cost(60);
10796 format %{ "and $dst, $src1, $src2 #@andL_Reg_imm_0_65535" %}
10797 ins_encode %{
10798 Register dst = $dst$$Register;
10799 Register src = $src1$$Register;
10800 long val = $src2$$constant;
10802 __ andi(dst, src, val);
10803 %}
10804 ins_pipe( ialu_regI_regI );
10805 %}
10807 instruct andL2I_Reg_imm_0_65535(mRegI dst, mRegL src1, immL_0_65535 src2) %{
10808 match(Set dst (ConvL2I (AndL src1 src2)));
10809 ins_cost(60);
10811 format %{ "and $dst, $src1, $src2 #@andL2I_Reg_imm_0_65535" %}
10812 ins_encode %{
10813 Register dst = $dst$$Register;
10814 Register src = $src1$$Register;
10815 long val = $src2$$constant;
10817 __ andi(dst, src, val);
10818 %}
10819 ins_pipe( ialu_regI_regI );
10820 %}
10822 /*
10823 instruct andnL_Reg_nReg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10824 match(Set dst (AndL src1 (XorL src2 M1)));
10825 predicate(UseLoongsonISA);
10827 format %{ "andn $dst, $src1, $src2 #@andnL_Reg_nReg" %}
10828 ins_encode %{
10829 Register dst = $dst$$Register;
10830 Register src1 = $src1$$Register;
10831 Register src2 = $src2$$Register;
10833 __ gsandn(dst, src1, src2);
10834 %}
10835 ins_pipe( ialu_regI_regI );
10836 %}
10837 */
10839 /*
10840 instruct ornL_Reg_nReg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10841 match(Set dst (OrL src1 (XorL src2 M1)));
10842 predicate(UseLoongsonISA);
10844 format %{ "orn $dst, $src1, $src2 #@ornL_Reg_nReg" %}
10845 ins_encode %{
10846 Register dst = $dst$$Register;
10847 Register src1 = $src1$$Register;
10848 Register src2 = $src2$$Register;
10850 __ gsorn(dst, src1, src2);
10851 %}
10852 ins_pipe( ialu_regI_regI );
10853 %}
10854 */
10856 /*
10857 instruct andnL_nReg_Reg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10858 match(Set dst (AndL (XorL src1 M1) src2));
10859 predicate(UseLoongsonISA);
10861 format %{ "andn $dst, $src2, $src1 #@andnL_nReg_Reg" %}
10862 ins_encode %{
10863 Register dst = $dst$$Register;
10864 Register src1 = $src1$$Register;
10865 Register src2 = $src2$$Register;
10867 __ gsandn(dst, src2, src1);
10868 %}
10869 ins_pipe( ialu_regI_regI );
10870 %}
10871 */
10873 /*
10874 instruct ornL_nReg_Reg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10875 match(Set dst (OrL (XorL src1 M1) src2));
10876 predicate(UseLoongsonISA);
10878 format %{ "orn $dst, $src2, $src1 #@ornL_nReg_Reg" %}
10879 ins_encode %{
10880 Register dst = $dst$$Register;
10881 Register src1 = $src1$$Register;
10882 Register src2 = $src2$$Register;
10884 __ gsorn(dst, src2, src1);
10885 %}
10886 ins_pipe( ialu_regI_regI );
10887 %}
10888 */
10890 instruct andL_Reg_immL_M8(mRegL dst, immL_M8 M8) %{
10891 match(Set dst (AndL dst M8));
10892 ins_cost(60);
10894 format %{ "and $dst, $dst, $M8 #@andL_Reg_immL_M8" %}
10895 ins_encode %{
10896 Register dst = $dst$$Register;
10898 __ dins(dst, R0, 0, 3);
10899 %}
10900 ins_pipe( ialu_regI_regI );
10901 %}
10903 instruct andL_Reg_immL_M5(mRegL dst, immL_M5 M5) %{
10904 match(Set dst (AndL dst M5));
10905 ins_cost(60);
10907 format %{ "and $dst, $dst, $M5 #@andL_Reg_immL_M5" %}
10908 ins_encode %{
10909 Register dst = $dst$$Register;
10911 __ dins(dst, R0, 2, 1);
10912 %}
10913 ins_pipe( ialu_regI_regI );
10914 %}
10916 instruct andL_Reg_immL_M7(mRegL dst, immL_M7 M7) %{
10917 match(Set dst (AndL dst M7));
10918 ins_cost(60);
10920 format %{ "and $dst, $dst, $M7 #@andL_Reg_immL_M7" %}
10921 ins_encode %{
10922 Register dst = $dst$$Register;
10924 __ dins(dst, R0, 1, 2);
10925 %}
10926 ins_pipe( ialu_regI_regI );
10927 %}
10929 instruct andL_Reg_immL_M4(mRegL dst, immL_M4 M4) %{
10930 match(Set dst (AndL dst M4));
10931 ins_cost(60);
10933 format %{ "and $dst, $dst, $M4 #@andL_Reg_immL_M4" %}
10934 ins_encode %{
10935 Register dst = $dst$$Register;
10937 __ dins(dst, R0, 0, 2);
10938 %}
10939 ins_pipe( ialu_regI_regI );
10940 %}
10942 instruct andL_Reg_immL_M121(mRegL dst, immL_M121 M121) %{
10943 match(Set dst (AndL dst M121));
10944 ins_cost(60);
10946 format %{ "and $dst, $dst, $M121 #@andL_Reg_immL_M121" %}
10947 ins_encode %{
10948 Register dst = $dst$$Register;
10950 __ dins(dst, R0, 3, 4);
10951 %}
10952 ins_pipe( ialu_regI_regI );
10953 %}
10955 // Or Long Register with Register
10956 instruct orL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10957 match(Set dst (OrL src1 src2));
10958 format %{ "OR $dst, $src1, $src2 @ orL_Reg_Reg\t" %}
10959 ins_encode %{
10960 Register dst_reg = $dst$$Register;
10961 Register src1_reg = $src1$$Register;
10962 Register src2_reg = $src2$$Register;
10964 __ orr(dst_reg, src1_reg, src2_reg);
10965 %}
10966 ins_pipe( ialu_regL_regL );
10967 %}
10969 instruct orL_Reg_P2XReg(mRegL dst, mRegP src1, mRegL src2) %{
10970 match(Set dst (OrL (CastP2X src1) src2));
10971 format %{ "OR $dst, $src1, $src2 @ orL_Reg_P2XReg\t" %}
10972 ins_encode %{
10973 Register dst_reg = $dst$$Register;
10974 Register src1_reg = $src1$$Register;
10975 Register src2_reg = $src2$$Register;
10977 __ orr(dst_reg, src1_reg, src2_reg);
10978 %}
10979 ins_pipe( ialu_regL_regL );
10980 %}
10982 // Xor Long Register with Register
10983 instruct xorL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10984 match(Set dst (XorL src1 src2));
10985 format %{ "XOR $dst, $src1, $src2 @ xorL_Reg_Reg\t" %}
10986 ins_encode %{
10987 Register dst_reg = as_Register($dst$$reg);
10988 Register src1_reg = as_Register($src1$$reg);
10989 Register src2_reg = as_Register($src2$$reg);
10991 __ xorr(dst_reg, src1_reg, src2_reg);
10992 %}
10993 ins_pipe( ialu_regL_regL );
10994 %}
10996 // Shift Left by 8-bit immediate
10997 instruct salI_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
10998 match(Set dst (LShiftI src shift));
11000 format %{ "SHL $dst, $src, $shift #@salI_Reg_imm" %}
11001 ins_encode %{
11002 Register src = $src$$Register;
11003 Register dst = $dst$$Register;
11004 int shamt = $shift$$constant;
11006 __ sll(dst, src, shamt);
11007 %}
11008 ins_pipe( ialu_regI_regI );
11009 %}
11011 instruct salL2I_Reg_imm(mRegI dst, mRegL src, immI8 shift) %{
11012 match(Set dst (LShiftI (ConvL2I src) shift));
11014 format %{ "SHL $dst, $src, $shift #@salL2I_Reg_imm" %}
11015 ins_encode %{
11016 Register src = $src$$Register;
11017 Register dst = $dst$$Register;
11018 int shamt = $shift$$constant;
11020 __ sll(dst, src, shamt);
11021 %}
11022 ins_pipe( ialu_regI_regI );
11023 %}
11025 instruct salI_Reg_imm_and_M65536(mRegI dst, mRegI src, immI_16 shift, immI_M65536 mask) %{
11026 match(Set dst (AndI (LShiftI src shift) mask));
11028 format %{ "SHL $dst, $src, $shift #@salI_Reg_imm_and_M65536" %}
11029 ins_encode %{
11030 Register src = $src$$Register;
11031 Register dst = $dst$$Register;
11033 __ sll(dst, src, 16);
11034 %}
11035 ins_pipe( ialu_regI_regI );
11036 %}
11038 instruct land7_2_s(mRegI dst, mRegL src, immL7 seven, immI_16 sixteen)
11039 %{
11040 match(Set dst (RShiftI (LShiftI (ConvL2I (AndL src seven)) sixteen) sixteen));
11042 format %{ "andi $dst, $src, 7\t# @land7_2_s" %}
11043 ins_encode %{
11044 Register src = $src$$Register;
11045 Register dst = $dst$$Register;
11047 __ andi(dst, src, 7);
11048 %}
11049 ins_pipe(ialu_regI_regI);
11050 %}
11052 instruct ori2s(mRegI dst, mRegI src1, immI_0_32767 src2, immI_16 sixteen)
11053 %{
11054 match(Set dst (RShiftI (LShiftI (OrI src1 src2) sixteen) sixteen));
11056 format %{ "ori $dst, $src1, $src2\t# @ori2s" %}
11057 ins_encode %{
11058 Register src = $src1$$Register;
11059 int val = $src2$$constant;
11060 Register dst = $dst$$Register;
11062 __ ori(dst, src, val);
11063 %}
11064 ins_pipe(ialu_regI_regI);
11065 %}
11067 // Logical Shift Right by 16, followed by Arithmetic Shift Left by 16.
11068 // This idiom is used by the compiler the i2s bytecode.
11069 instruct i2s(mRegI dst, mRegI src, immI_16 sixteen)
11070 %{
11071 match(Set dst (RShiftI (LShiftI src sixteen) sixteen));
11073 format %{ "i2s $dst, $src\t# @i2s" %}
11074 ins_encode %{
11075 Register src = $src$$Register;
11076 Register dst = $dst$$Register;
11078 __ seh(dst, src);
11079 %}
11080 ins_pipe(ialu_regI_regI);
11081 %}
11083 // Logical Shift Right by 24, followed by Arithmetic Shift Left by 24.
11084 // This idiom is used by the compiler for the i2b bytecode.
11085 instruct i2b(mRegI dst, mRegI src, immI_24 twentyfour)
11086 %{
11087 match(Set dst (RShiftI (LShiftI src twentyfour) twentyfour));
11089 format %{ "i2b $dst, $src\t# @i2b" %}
11090 ins_encode %{
11091 Register src = $src$$Register;
11092 Register dst = $dst$$Register;
11094 __ seb(dst, src);
11095 %}
11096 ins_pipe(ialu_regI_regI);
11097 %}
11100 instruct salI_RegL2I_imm(mRegI dst, mRegL src, immI8 shift) %{
11101 match(Set dst (LShiftI (ConvL2I src) shift));
11103 format %{ "SHL $dst, $src, $shift #@salI_RegL2I_imm" %}
11104 ins_encode %{
11105 Register src = $src$$Register;
11106 Register dst = $dst$$Register;
11107 int shamt = $shift$$constant;
11109 __ sll(dst, src, shamt);
11110 %}
11111 ins_pipe( ialu_regI_regI );
11112 %}
11114 // Shift Left by 8-bit immediate
11115 instruct salI_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
11116 match(Set dst (LShiftI src shift));
11118 format %{ "SHL $dst, $src, $shift #@salI_Reg_Reg" %}
11119 ins_encode %{
11120 Register src = $src$$Register;
11121 Register dst = $dst$$Register;
11122 Register shamt = $shift$$Register;
11123 __ sllv(dst, src, shamt);
11124 %}
11125 ins_pipe( ialu_regI_regI );
11126 %}
11129 // Shift Left Long
11130 instruct salL_Reg_imm(mRegL dst, mRegL src, immI8 shift) %{
11131 //predicate(UseNewLongLShift);
11132 match(Set dst (LShiftL src shift));
11133 ins_cost(100);
11134 format %{ "salL $dst, $src, $shift @ salL_Reg_imm" %}
11135 ins_encode %{
11136 Register src_reg = as_Register($src$$reg);
11137 Register dst_reg = as_Register($dst$$reg);
11138 int shamt = $shift$$constant;
11140 if (__ is_simm(shamt, 5))
11141 __ dsll(dst_reg, src_reg, shamt);
11142 else
11143 {
11144 int sa = Assembler::low(shamt, 6);
11145 if (sa < 32) {
11146 __ dsll(dst_reg, src_reg, sa);
11147 } else {
11148 __ dsll32(dst_reg, src_reg, sa - 32);
11149 }
11150 }
11151 %}
11152 ins_pipe( ialu_regL_regL );
11153 %}
11155 instruct salL_RegI2L_imm(mRegL dst, mRegI src, immI8 shift) %{
11156 //predicate(UseNewLongLShift);
11157 match(Set dst (LShiftL (ConvI2L src) shift));
11158 ins_cost(100);
11159 format %{ "salL $dst, $src, $shift @ salL_RegI2L_imm" %}
11160 ins_encode %{
11161 Register src_reg = as_Register($src$$reg);
11162 Register dst_reg = as_Register($dst$$reg);
11163 int shamt = $shift$$constant;
11165 if (__ is_simm(shamt, 5))
11166 __ dsll(dst_reg, src_reg, shamt);
11167 else
11168 {
11169 int sa = Assembler::low(shamt, 6);
11170 if (sa < 32) {
11171 __ dsll(dst_reg, src_reg, sa);
11172 } else {
11173 __ dsll32(dst_reg, src_reg, sa - 32);
11174 }
11175 }
11176 %}
11177 ins_pipe( ialu_regL_regL );
11178 %}
11180 // Shift Left Long
11181 instruct salL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
11182 //predicate(UseNewLongLShift);
11183 match(Set dst (LShiftL src shift));
11184 ins_cost(100);
11185 format %{ "salL $dst, $src, $shift @ salL_Reg_Reg" %}
11186 ins_encode %{
11187 Register src_reg = as_Register($src$$reg);
11188 Register dst_reg = as_Register($dst$$reg);
11190 __ dsllv(dst_reg, src_reg, $shift$$Register);
11191 %}
11192 ins_pipe( ialu_regL_regL );
11193 %}
11195 instruct salL_convI2L_Reg_imm(mRegL dst, mRegI src, immI8 shift) %{
11196 match(Set dst (LShiftL (ConvI2L src) shift));
11197 ins_cost(100);
11198 format %{ "salL $dst, $src, $shift @ salL_convI2L_Reg_imm" %}
11199 ins_encode %{
11200 Register src_reg = as_Register($src$$reg);
11201 Register dst_reg = as_Register($dst$$reg);
11202 int shamt = $shift$$constant;
11204 if (__ is_simm(shamt, 5)) {
11205 __ dsll(dst_reg, src_reg, shamt);
11206 } else {
11207 int sa = Assembler::low(shamt, 6);
11208 if (sa < 32) {
11209 __ dsll(dst_reg, src_reg, sa);
11210 } else {
11211 __ dsll32(dst_reg, src_reg, sa - 32);
11212 }
11213 }
11214 %}
11215 ins_pipe( ialu_regL_regL );
11216 %}
11218 // Shift Right Long
11219 instruct sarL_Reg_imm(mRegL dst, mRegL src, immI8 shift) %{
11220 match(Set dst (RShiftL src shift));
11221 ins_cost(100);
11222 format %{ "sarL $dst, $src, $shift @ sarL_Reg_imm" %}
11223 ins_encode %{
11224 Register src_reg = as_Register($src$$reg);
11225 Register dst_reg = as_Register($dst$$reg);
11226 int shamt = ($shift$$constant & 0x3f);
11227 if (__ is_simm(shamt, 5))
11228 __ dsra(dst_reg, src_reg, shamt);
11229 else {
11230 int sa = Assembler::low(shamt, 6);
11231 if (sa < 32) {
11232 __ dsra(dst_reg, src_reg, sa);
11233 } else {
11234 __ dsra32(dst_reg, src_reg, sa - 32);
11235 }
11236 }
11237 %}
11238 ins_pipe( ialu_regL_regL );
11239 %}
11241 instruct sarL2I_Reg_immI_32_63(mRegI dst, mRegL src, immI_32_63 shift) %{
11242 match(Set dst (ConvL2I (RShiftL src shift)));
11243 ins_cost(100);
11244 format %{ "sarL $dst, $src, $shift @ sarL2I_Reg_immI_32_63" %}
11245 ins_encode %{
11246 Register src_reg = as_Register($src$$reg);
11247 Register dst_reg = as_Register($dst$$reg);
11248 int shamt = $shift$$constant;
11250 __ dsra32(dst_reg, src_reg, shamt - 32);
11251 %}
11252 ins_pipe( ialu_regL_regL );
11253 %}
11255 // Shift Right Long arithmetically
11256 instruct sarL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
11257 //predicate(UseNewLongLShift);
11258 match(Set dst (RShiftL src shift));
11259 ins_cost(100);
11260 format %{ "sarL $dst, $src, $shift @ sarL_Reg_Reg" %}
11261 ins_encode %{
11262 Register src_reg = as_Register($src$$reg);
11263 Register dst_reg = as_Register($dst$$reg);
11265 __ dsrav(dst_reg, src_reg, $shift$$Register);
11266 %}
11267 ins_pipe( ialu_regL_regL );
11268 %}
11270 // Shift Right Long logically
11271 instruct slrL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
11272 match(Set dst (URShiftL src shift));
11273 ins_cost(100);
11274 format %{ "slrL $dst, $src, $shift @ slrL_Reg_Reg" %}
11275 ins_encode %{
11276 Register src_reg = as_Register($src$$reg);
11277 Register dst_reg = as_Register($dst$$reg);
11279 __ dsrlv(dst_reg, src_reg, $shift$$Register);
11280 %}
11281 ins_pipe( ialu_regL_regL );
11282 %}
11284 instruct slrL_Reg_immI_0_31(mRegL dst, mRegL src, immI_0_31 shift) %{
11285 match(Set dst (URShiftL src shift));
11286 ins_cost(80);
11287 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_0_31" %}
11288 ins_encode %{
11289 Register src_reg = as_Register($src$$reg);
11290 Register dst_reg = as_Register($dst$$reg);
11291 int shamt = $shift$$constant;
11293 __ dsrl(dst_reg, src_reg, shamt);
11294 %}
11295 ins_pipe( ialu_regL_regL );
11296 %}
11298 instruct slrL_Reg_immI_0_31_and_max_int(mRegI dst, mRegL src, immI_0_31 shift, immI_MaxI max_int) %{
11299 match(Set dst (AndI (ConvL2I (URShiftL src shift)) max_int));
11300 ins_cost(80);
11301 format %{ "dext $dst, $src, $shift, 31 @ slrL_Reg_immI_0_31_and_max_int" %}
11302 ins_encode %{
11303 Register src_reg = as_Register($src$$reg);
11304 Register dst_reg = as_Register($dst$$reg);
11305 int shamt = $shift$$constant;
11307 __ dext(dst_reg, src_reg, shamt, 31);
11308 %}
11309 ins_pipe( ialu_regL_regL );
11310 %}
11312 instruct slrL_P2XReg_immI_0_31(mRegL dst, mRegP src, immI_0_31 shift) %{
11313 match(Set dst (URShiftL (CastP2X src) shift));
11314 ins_cost(80);
11315 format %{ "slrL $dst, $src, $shift @ slrL_P2XReg_immI_0_31" %}
11316 ins_encode %{
11317 Register src_reg = as_Register($src$$reg);
11318 Register dst_reg = as_Register($dst$$reg);
11319 int shamt = $shift$$constant;
11321 __ dsrl(dst_reg, src_reg, shamt);
11322 %}
11323 ins_pipe( ialu_regL_regL );
11324 %}
11326 instruct slrL_Reg_immI_32_63(mRegL dst, mRegL src, immI_32_63 shift) %{
11327 match(Set dst (URShiftL src shift));
11328 ins_cost(80);
11329 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_32_63" %}
11330 ins_encode %{
11331 Register src_reg = as_Register($src$$reg);
11332 Register dst_reg = as_Register($dst$$reg);
11333 int shamt = $shift$$constant;
11335 __ dsrl32(dst_reg, src_reg, shamt - 32);
11336 %}
11337 ins_pipe( ialu_regL_regL );
11338 %}
11340 instruct slrL_Reg_immI_convL2I(mRegI dst, mRegL src, immI_32_63 shift) %{
11341 match(Set dst (ConvL2I (URShiftL src shift)));
11342 predicate(n->in(1)->in(2)->get_int() > 32);
11343 ins_cost(80);
11344 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_convL2I" %}
11345 ins_encode %{
11346 Register src_reg = as_Register($src$$reg);
11347 Register dst_reg = as_Register($dst$$reg);
11348 int shamt = $shift$$constant;
11350 __ dsrl32(dst_reg, src_reg, shamt - 32);
11351 %}
11352 ins_pipe( ialu_regL_regL );
11353 %}
11355 instruct slrL_P2XReg_immI_32_63(mRegL dst, mRegP src, immI_32_63 shift) %{
11356 match(Set dst (URShiftL (CastP2X src) shift));
11357 ins_cost(80);
11358 format %{ "slrL $dst, $src, $shift @ slrL_P2XReg_immI_32_63" %}
11359 ins_encode %{
11360 Register src_reg = as_Register($src$$reg);
11361 Register dst_reg = as_Register($dst$$reg);
11362 int shamt = $shift$$constant;
11364 __ dsrl32(dst_reg, src_reg, shamt - 32);
11365 %}
11366 ins_pipe( ialu_regL_regL );
11367 %}
11369 // Xor Instructions
11370 // Xor Register with Register
11371 instruct xorI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
11372 match(Set dst (XorI src1 src2));
11374 format %{ "XOR $dst, $src1, $src2 #@xorI_Reg_Reg" %}
11376 ins_encode %{
11377 Register dst = $dst$$Register;
11378 Register src1 = $src1$$Register;
11379 Register src2 = $src2$$Register;
11380 __ xorr(dst, src1, src2);
11381 __ sll(dst, dst, 0); /* long -> int */
11382 %}
11384 ins_pipe( ialu_regI_regI );
11385 %}
11387 // Or Instructions
11388 // Or Register with Register
11389 instruct orI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
11390 match(Set dst (OrI src1 src2));
11392 format %{ "OR $dst, $src1, $src2 #@orI_Reg_Reg" %}
11393 ins_encode %{
11394 Register dst = $dst$$Register;
11395 Register src1 = $src1$$Register;
11396 Register src2 = $src2$$Register;
11397 __ orr(dst, src1, src2);
11398 %}
11400 ins_pipe( ialu_regI_regI );
11401 %}
11403 instruct rotI_shr_logical_Reg(mRegI dst, mRegI src, immI_0_31 rshift, immI_0_31 lshift, immI_1 one) %{
11404 match(Set dst (OrI (URShiftI src rshift) (LShiftI (AndI src one) lshift)));
11405 predicate(32 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int())));
11407 format %{ "rotr $dst, $src, 1 ...\n\t"
11408 "srl $dst, $dst, ($rshift-1) @ rotI_shr_logical_Reg" %}
11409 ins_encode %{
11410 Register dst = $dst$$Register;
11411 Register src = $src$$Register;
11412 int rshift = $rshift$$constant;
11414 __ rotr(dst, src, 1);
11415 if (rshift - 1) {
11416 __ srl(dst, dst, rshift - 1);
11417 }
11418 %}
11420 ins_pipe( ialu_regI_regI );
11421 %}
11423 instruct orI_Reg_castP2X(mRegL dst, mRegL src1, mRegP src2) %{
11424 match(Set dst (OrI src1 (CastP2X src2)));
11426 format %{ "OR $dst, $src1, $src2 #@orI_Reg_castP2X" %}
11427 ins_encode %{
11428 Register dst = $dst$$Register;
11429 Register src1 = $src1$$Register;
11430 Register src2 = $src2$$Register;
11431 __ orr(dst, src1, src2);
11432 %}
11434 ins_pipe( ialu_regI_regI );
11435 %}
11437 // Logical Shift Right by 8-bit immediate
11438 instruct shr_logical_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
11439 match(Set dst (URShiftI src shift));
11440 // effect(KILL cr);
11442 format %{ "SRL $dst, $src, $shift #@shr_logical_Reg_imm" %}
11443 ins_encode %{
11444 Register src = $src$$Register;
11445 Register dst = $dst$$Register;
11446 int shift = $shift$$constant;
11448 __ srl(dst, src, shift);
11449 %}
11450 ins_pipe( ialu_regI_regI );
11451 %}
11453 instruct shr_logical_Reg_imm_nonneg_mask(mRegI dst, mRegI src, immI_0_31 shift, immI_nonneg_mask mask) %{
11454 match(Set dst (AndI (URShiftI src shift) mask));
11456 format %{ "ext $dst, $src, $shift, one-bits($mask) #@shr_logical_Reg_imm_nonneg_mask" %}
11457 ins_encode %{
11458 Register src = $src$$Register;
11459 Register dst = $dst$$Register;
11460 int pos = $shift$$constant;
11461 int size = Assembler::is_int_mask($mask$$constant);
11463 __ ext(dst, src, pos, size);
11464 %}
11465 ins_pipe( ialu_regI_regI );
11466 %}
11468 instruct rolI_Reg_immI_0_31(mRegI dst, immI_0_31 lshift, immI_0_31 rshift)
11469 %{
11470 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
11471 match(Set dst (OrI (LShiftI dst lshift) (URShiftI dst rshift)));
11473 ins_cost(100);
11474 format %{ "rotr $dst, $dst, $rshift #@rolI_Reg_immI_0_31" %}
11475 ins_encode %{
11476 Register dst = $dst$$Register;
11477 int sa = $rshift$$constant;
11479 __ rotr(dst, dst, sa);
11480 %}
11481 ins_pipe( ialu_regI_regI );
11482 %}
11484 instruct rolL_Reg_immI_0_31(mRegL dst, immI_32_63 lshift, immI_0_31 rshift)
11485 %{
11486 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11487 match(Set dst (OrL (LShiftL dst lshift) (URShiftL dst rshift)));
11489 ins_cost(100);
11490 format %{ "rotr $dst, $dst, $rshift #@rolL_Reg_immI_0_31" %}
11491 ins_encode %{
11492 Register dst = $dst$$Register;
11493 int sa = $rshift$$constant;
11495 __ drotr(dst, dst, sa);
11496 %}
11497 ins_pipe( ialu_regI_regI );
11498 %}
11500 instruct rolL_Reg_immI_32_63(mRegL dst, immI_0_31 lshift, immI_32_63 rshift)
11501 %{
11502 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11503 match(Set dst (OrL (LShiftL dst lshift) (URShiftL dst rshift)));
11505 ins_cost(100);
11506 format %{ "rotr $dst, $dst, $rshift #@rolL_Reg_immI_32_63" %}
11507 ins_encode %{
11508 Register dst = $dst$$Register;
11509 int sa = $rshift$$constant;
11511 __ drotr32(dst, dst, sa - 32);
11512 %}
11513 ins_pipe( ialu_regI_regI );
11514 %}
11516 instruct rorI_Reg_immI_0_31(mRegI dst, immI_0_31 rshift, immI_0_31 lshift)
11517 %{
11518 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
11519 match(Set dst (OrI (URShiftI dst rshift) (LShiftI dst lshift)));
11521 ins_cost(100);
11522 format %{ "rotr $dst, $dst, $rshift #@rorI_Reg_immI_0_31" %}
11523 ins_encode %{
11524 Register dst = $dst$$Register;
11525 int sa = $rshift$$constant;
11527 __ rotr(dst, dst, sa);
11528 %}
11529 ins_pipe( ialu_regI_regI );
11530 %}
11532 instruct rorL_Reg_immI_0_31(mRegL dst, immI_0_31 rshift, immI_32_63 lshift)
11533 %{
11534 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11535 match(Set dst (OrL (URShiftL dst rshift) (LShiftL dst lshift)));
11537 ins_cost(100);
11538 format %{ "rotr $dst, $dst, $rshift #@rorL_Reg_immI_0_31" %}
11539 ins_encode %{
11540 Register dst = $dst$$Register;
11541 int sa = $rshift$$constant;
11543 __ drotr(dst, dst, sa);
11544 %}
11545 ins_pipe( ialu_regI_regI );
11546 %}
11548 instruct rorL_Reg_immI_32_63(mRegL dst, immI_32_63 rshift, immI_0_31 lshift)
11549 %{
11550 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11551 match(Set dst (OrL (URShiftL dst rshift) (LShiftL dst lshift)));
11553 ins_cost(100);
11554 format %{ "rotr $dst, $dst, $rshift #@rorL_Reg_immI_32_63" %}
11555 ins_encode %{
11556 Register dst = $dst$$Register;
11557 int sa = $rshift$$constant;
11559 __ drotr32(dst, dst, sa - 32);
11560 %}
11561 ins_pipe( ialu_regI_regI );
11562 %}
11564 // Logical Shift Right
11565 instruct shr_logical_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
11566 match(Set dst (URShiftI src shift));
11568 format %{ "SRL $dst, $src, $shift #@shr_logical_Reg_Reg" %}
11569 ins_encode %{
11570 Register src = $src$$Register;
11571 Register dst = $dst$$Register;
11572 Register shift = $shift$$Register;
11573 __ srlv(dst, src, shift);
11574 %}
11575 ins_pipe( ialu_regI_regI );
11576 %}
11579 instruct shr_arith_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
11580 match(Set dst (RShiftI src shift));
11581 // effect(KILL cr);
11583 format %{ "SRA $dst, $src, $shift #@shr_arith_Reg_imm" %}
11584 ins_encode %{
11585 Register src = $src$$Register;
11586 Register dst = $dst$$Register;
11587 int shift = $shift$$constant;
11588 __ sra(dst, src, shift);
11589 %}
11590 ins_pipe( ialu_regI_regI );
11591 %}
11593 instruct shr_arith_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
11594 match(Set dst (RShiftI src shift));
11595 // effect(KILL cr);
11597 format %{ "SRA $dst, $src, $shift #@shr_arith_Reg_Reg" %}
11598 ins_encode %{
11599 Register src = $src$$Register;
11600 Register dst = $dst$$Register;
11601 Register shift = $shift$$Register;
11602 __ srav(dst, src, shift);
11603 %}
11604 ins_pipe( ialu_regI_regI );
11605 %}
11607 //----------Convert Int to Boolean---------------------------------------------
11609 instruct convI2B(mRegI dst, mRegI src) %{
11610 match(Set dst (Conv2B src));
11612 ins_cost(100);
11613 format %{ "convI2B $dst, $src @ convI2B" %}
11614 ins_encode %{
11615 Register dst = as_Register($dst$$reg);
11616 Register src = as_Register($src$$reg);
11618 if (dst != src) {
11619 __ daddiu(dst, R0, 1);
11620 __ movz(dst, R0, src);
11621 } else {
11622 __ move(AT, src);
11623 __ daddiu(dst, R0, 1);
11624 __ movz(dst, R0, AT);
11625 }
11626 %}
11628 ins_pipe( ialu_regL_regL );
11629 %}
11631 instruct convI2L_reg( mRegL dst, mRegI src) %{
11632 match(Set dst (ConvI2L src));
11634 ins_cost(100);
11635 format %{ "SLL $dst, $src @ convI2L_reg\t" %}
11636 ins_encode %{
11637 Register dst = as_Register($dst$$reg);
11638 Register src = as_Register($src$$reg);
11640 if(dst != src) __ sll(dst, src, 0);
11641 %}
11642 ins_pipe( ialu_regL_regL );
11643 %}
11646 instruct convL2I_reg( mRegI dst, mRegL src ) %{
11647 match(Set dst (ConvL2I src));
11649 format %{ "MOV $dst, $src @ convL2I_reg" %}
11650 ins_encode %{
11651 Register dst = as_Register($dst$$reg);
11652 Register src = as_Register($src$$reg);
11654 __ sll(dst, src, 0);
11655 %}
11657 ins_pipe( ialu_regI_regI );
11658 %}
11660 instruct convL2I2L_reg( mRegL dst, mRegL src ) %{
11661 match(Set dst (ConvI2L (ConvL2I src)));
11663 format %{ "sll $dst, $src, 0 @ convL2I2L_reg" %}
11664 ins_encode %{
11665 Register dst = as_Register($dst$$reg);
11666 Register src = as_Register($src$$reg);
11668 __ sll(dst, src, 0);
11669 %}
11671 ins_pipe( ialu_regI_regI );
11672 %}
11674 instruct convL2D_reg( regD dst, mRegL src ) %{
11675 match(Set dst (ConvL2D src));
11676 format %{ "convL2D $dst, $src @ convL2D_reg" %}
11677 ins_encode %{
11678 Register src = as_Register($src$$reg);
11679 FloatRegister dst = as_FloatRegister($dst$$reg);
11681 __ dmtc1(src, dst);
11682 __ cvt_d_l(dst, dst);
11683 %}
11685 ins_pipe( pipe_slow );
11686 %}
11689 instruct convD2L_reg_fast( mRegL dst, regD src ) %{
11690 match(Set dst (ConvD2L src));
11691 ins_cost(150);
11692 format %{ "convD2L $dst, $src @ convD2L_reg_fast" %}
11693 ins_encode %{
11694 Register dst = as_Register($dst$$reg);
11695 FloatRegister src = as_FloatRegister($src$$reg);
11697 Label Done;
11699 __ trunc_l_d(F30, src);
11700 // max_long: 0x7fffffffffffffff
11701 // __ set64(AT, 0x7fffffffffffffff);
11702 __ daddiu(AT, R0, -1);
11703 __ dsrl(AT, AT, 1);
11704 __ dmfc1(dst, F30);
11706 __ bne(dst, AT, Done);
11707 __ delayed()->mtc1(R0, F30);
11709 __ cvt_d_w(F30, F30);
11710 __ c_ult_d(src, F30);
11711 __ bc1f(Done);
11712 __ delayed()->daddiu(T9, R0, -1);
11714 __ c_un_d(src, src); //NaN?
11715 __ subu(dst, T9, AT);
11716 __ movt(dst, R0);
11718 __ bind(Done);
11719 %}
11721 ins_pipe( pipe_slow );
11722 %}
11725 instruct convD2L_reg_slow( mRegL dst, regD src ) %{
11726 match(Set dst (ConvD2L src));
11727 ins_cost(250);
11728 format %{ "convD2L $dst, $src @ convD2L_reg_slow" %}
11729 ins_encode %{
11730 Register dst = as_Register($dst$$reg);
11731 FloatRegister src = as_FloatRegister($src$$reg);
11733 Label L;
11735 __ c_un_d(src, src); //NaN?
11736 __ bc1t(L);
11737 __ delayed();
11738 __ move(dst, R0);
11740 __ trunc_l_d(F30, src);
11741 __ cfc1(AT, 31);
11742 __ li(T9, 0x10000);
11743 __ andr(AT, AT, T9);
11744 __ beq(AT, R0, L);
11745 __ delayed()->dmfc1(dst, F30);
11747 __ mov_d(F12, src);
11748 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1);
11749 __ move(dst, V0);
11750 __ bind(L);
11751 %}
11753 ins_pipe( pipe_slow );
11754 %}
11757 instruct convF2I_reg_fast( mRegI dst, regF src ) %{
11758 match(Set dst (ConvF2I src));
11759 ins_cost(150);
11760 format %{ "convf2i $dst, $src @ convF2I_reg_fast" %}
11761 ins_encode %{
11762 Register dreg = $dst$$Register;
11763 FloatRegister fval = $src$$FloatRegister;
11764 Label L;
11766 __ trunc_w_s(F30, fval);
11767 __ move(AT, 0x7fffffff);
11768 __ mfc1(dreg, F30);
11769 __ c_un_s(fval, fval); //NaN?
11770 __ movt(dreg, R0);
11772 __ bne(AT, dreg, L);
11773 __ delayed()->lui(T9, 0x8000);
11775 __ mfc1(AT, fval);
11776 __ andr(AT, AT, T9);
11778 __ movn(dreg, T9, AT);
11780 __ bind(L);
11782 %}
11784 ins_pipe( pipe_slow );
11785 %}
11789 instruct convF2I_reg_slow( mRegI dst, regF src ) %{
11790 match(Set dst (ConvF2I src));
11791 ins_cost(250);
11792 format %{ "convf2i $dst, $src @ convF2I_reg_slow" %}
11793 ins_encode %{
11794 Register dreg = $dst$$Register;
11795 FloatRegister fval = $src$$FloatRegister;
11796 Label L;
11798 __ c_un_s(fval, fval); //NaN?
11799 __ bc1t(L);
11800 __ delayed();
11801 __ move(dreg, R0);
11803 __ trunc_w_s(F30, fval);
11805 /* Call SharedRuntime:f2i() to do valid convention */
11806 __ cfc1(AT, 31);
11807 __ li(T9, 0x10000);
11808 __ andr(AT, AT, T9);
11809 __ beq(AT, R0, L);
11810 __ delayed()->mfc1(dreg, F30);
11812 __ mov_s(F12, fval);
11814 /* 2014/01/08 Fu : This bug was found when running ezDS's control-panel.
11815 * J 982 C2 javax.swing.text.BoxView.layoutMajorAxis(II[I[I)V (283 bytes) @ 0x000000555c46aa74
11816 *
11817 * An interger array index has been assigned to V0, and then changed from 1 to Integer.MAX_VALUE.
11818 * V0 is corrupted during call_VM_leaf(), and should be preserved.
11819 */
11820 __ push(fval);
11821 if(dreg != V0) {
11822 __ push(V0);
11823 }
11824 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
11825 if(dreg != V0) {
11826 __ move(dreg, V0);
11827 __ pop(V0);
11828 }
11829 __ pop(fval);
11830 __ bind(L);
11831 %}
11833 ins_pipe( pipe_slow );
11834 %}
11837 instruct convF2L_reg_fast( mRegL dst, regF src ) %{
11838 match(Set dst (ConvF2L src));
11839 ins_cost(150);
11840 format %{ "convf2l $dst, $src @ convF2L_reg_fast" %}
11841 ins_encode %{
11842 Register dreg = $dst$$Register;
11843 FloatRegister fval = $src$$FloatRegister;
11844 Label L;
11846 __ trunc_l_s(F30, fval);
11847 __ daddiu(AT, R0, -1);
11848 __ dsrl(AT, AT, 1);
11849 __ dmfc1(dreg, F30);
11850 __ c_un_s(fval, fval); //NaN?
11851 __ movt(dreg, R0);
11853 __ bne(AT, dreg, L);
11854 __ delayed()->lui(T9, 0x8000);
11856 __ mfc1(AT, fval);
11857 __ andr(AT, AT, T9);
11859 __ dsll32(T9, T9, 0);
11860 __ movn(dreg, T9, AT);
11862 __ bind(L);
11863 %}
11865 ins_pipe( pipe_slow );
11866 %}
11869 instruct convF2L_reg_slow( mRegL dst, regF src ) %{
11870 match(Set dst (ConvF2L src));
11871 ins_cost(250);
11872 format %{ "convf2l $dst, $src @ convF2L_reg_slow" %}
11873 ins_encode %{
11874 Register dst = as_Register($dst$$reg);
11875 FloatRegister fval = $src$$FloatRegister;
11876 Label L;
11878 __ c_un_s(fval, fval); //NaN?
11879 __ bc1t(L);
11880 __ delayed();
11881 __ move(dst, R0);
11883 __ trunc_l_s(F30, fval);
11884 __ cfc1(AT, 31);
11885 __ li(T9, 0x10000);
11886 __ andr(AT, AT, T9);
11887 __ beq(AT, R0, L);
11888 __ delayed()->dmfc1(dst, F30);
11890 __ mov_s(F12, fval);
11891 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
11892 __ move(dst, V0);
11893 __ bind(L);
11894 %}
11896 ins_pipe( pipe_slow );
11897 %}
11899 instruct convL2F_reg( regF dst, mRegL src ) %{
11900 match(Set dst (ConvL2F src));
11901 format %{ "convl2f $dst, $src @ convL2F_reg" %}
11902 ins_encode %{
11903 FloatRegister dst = $dst$$FloatRegister;
11904 Register src = as_Register($src$$reg);
11905 Label L;
11907 __ dmtc1(src, dst);
11908 __ cvt_s_l(dst, dst);
11909 %}
11911 ins_pipe( pipe_slow );
11912 %}
11914 instruct convI2F_reg( regF dst, mRegI src ) %{
11915 match(Set dst (ConvI2F src));
11916 format %{ "convi2f $dst, $src @ convI2F_reg" %}
11917 ins_encode %{
11918 Register src = $src$$Register;
11919 FloatRegister dst = $dst$$FloatRegister;
11921 __ mtc1(src, dst);
11922 __ cvt_s_w(dst, dst);
11923 %}
11925 ins_pipe( fpu_regF_regF );
11926 %}
11928 instruct cmpLTMask_immI0( mRegI dst, mRegI p, immI0 zero ) %{
11929 match(Set dst (CmpLTMask p zero));
11930 ins_cost(100);
11932 format %{ "sra $dst, $p, 31 @ cmpLTMask_immI0" %}
11933 ins_encode %{
11934 Register src = $p$$Register;
11935 Register dst = $dst$$Register;
11937 __ sra(dst, src, 31);
11938 %}
11939 ins_pipe( pipe_slow );
11940 %}
11943 instruct cmpLTMask( mRegI dst, mRegI p, mRegI q ) %{
11944 match(Set dst (CmpLTMask p q));
11945 ins_cost(400);
11947 format %{ "cmpLTMask $dst, $p, $q @ cmpLTMask" %}
11948 ins_encode %{
11949 Register p = $p$$Register;
11950 Register q = $q$$Register;
11951 Register dst = $dst$$Register;
11953 __ slt(dst, p, q);
11954 __ subu(dst, R0, dst);
11955 %}
11956 ins_pipe( pipe_slow );
11957 %}
11959 instruct convP2B(mRegI dst, mRegP src) %{
11960 match(Set dst (Conv2B src));
11962 ins_cost(100);
11963 format %{ "convP2B $dst, $src @ convP2B" %}
11964 ins_encode %{
11965 Register dst = as_Register($dst$$reg);
11966 Register src = as_Register($src$$reg);
11968 if (dst != src) {
11969 __ daddiu(dst, R0, 1);
11970 __ movz(dst, R0, src);
11971 } else {
11972 __ move(AT, src);
11973 __ daddiu(dst, R0, 1);
11974 __ movz(dst, R0, AT);
11975 }
11976 %}
11978 ins_pipe( ialu_regL_regL );
11979 %}
11982 instruct convI2D_reg_reg(regD dst, mRegI src) %{
11983 match(Set dst (ConvI2D src));
11984 format %{ "conI2D $dst, $src @convI2D_reg" %}
11985 ins_encode %{
11986 Register src = $src$$Register;
11987 FloatRegister dst = $dst$$FloatRegister;
11988 __ mtc1(src, dst);
11989 __ cvt_d_w(dst, dst);
11990 %}
11991 ins_pipe( fpu_regF_regF );
11992 %}
11994 instruct convF2D_reg_reg(regD dst, regF src) %{
11995 match(Set dst (ConvF2D src));
11996 format %{ "convF2D $dst, $src\t# @convF2D_reg_reg" %}
11997 ins_encode %{
11998 FloatRegister dst = $dst$$FloatRegister;
11999 FloatRegister src = $src$$FloatRegister;
12001 __ cvt_d_s(dst, src);
12002 %}
12003 ins_pipe( fpu_regF_regF );
12004 %}
12006 instruct convD2F_reg_reg(regF dst, regD src) %{
12007 match(Set dst (ConvD2F src));
12008 format %{ "convD2F $dst, $src\t# @convD2F_reg_reg" %}
12009 ins_encode %{
12010 FloatRegister dst = $dst$$FloatRegister;
12011 FloatRegister src = $src$$FloatRegister;
12013 __ cvt_s_d(dst, src);
12014 %}
12015 ins_pipe( fpu_regF_regF );
12016 %}
12019 // Convert a double to an int. If the double is a NAN, stuff a zero in instead.
12020 instruct convD2I_reg_reg_fast( mRegI dst, regD src ) %{
12021 match(Set dst (ConvD2I src));
12023 ins_cost(150);
12024 format %{ "convD2I $dst, $src\t# @ convD2I_reg_reg_fast" %}
12026 ins_encode %{
12027 FloatRegister src = $src$$FloatRegister;
12028 Register dst = $dst$$Register;
12030 Label Done;
12032 __ trunc_w_d(F30, src);
12033 // max_int: 2147483647
12034 __ move(AT, 0x7fffffff);
12035 __ mfc1(dst, F30);
12037 __ bne(dst, AT, Done);
12038 __ delayed()->mtc1(R0, F30);
12040 __ cvt_d_w(F30, F30);
12041 __ c_ult_d(src, F30);
12042 __ bc1f(Done);
12043 __ delayed()->addiu(T9, R0, -1);
12045 __ c_un_d(src, src); //NaN?
12046 __ subu32(dst, T9, AT);
12047 __ movt(dst, R0);
12049 __ bind(Done);
12050 %}
12051 ins_pipe( pipe_slow );
12052 %}
12055 instruct convD2I_reg_reg_slow( mRegI dst, regD src ) %{
12056 match(Set dst (ConvD2I src));
12058 ins_cost(250);
12059 format %{ "convD2I $dst, $src\t# @ convD2I_reg_reg_slow" %}
12061 ins_encode %{
12062 FloatRegister src = $src$$FloatRegister;
12063 Register dst = $dst$$Register;
12064 Label L;
12066 __ trunc_w_d(F30, src);
12067 __ cfc1(AT, 31);
12068 __ li(T9, 0x10000);
12069 __ andr(AT, AT, T9);
12070 __ beq(AT, R0, L);
12071 __ delayed()->mfc1(dst, F30);
12073 __ mov_d(F12, src);
12074 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 1);
12075 __ move(dst, V0);
12076 __ bind(L);
12078 %}
12079 ins_pipe( pipe_slow );
12080 %}
12082 // Convert oop pointer into compressed form
12083 instruct encodeHeapOop(mRegN dst, mRegP src) %{
12084 predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
12085 match(Set dst (EncodeP src));
12086 format %{ "encode_heap_oop $dst,$src" %}
12087 ins_encode %{
12088 Register src = $src$$Register;
12089 Register dst = $dst$$Register;
12091 __ encode_heap_oop(dst, src);
12092 %}
12093 ins_pipe( ialu_regL_regL );
12094 %}
12096 instruct encodeHeapOop_not_null(mRegN dst, mRegP src) %{
12097 predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
12098 match(Set dst (EncodeP src));
12099 format %{ "encode_heap_oop_not_null $dst,$src @ encodeHeapOop_not_null" %}
12100 ins_encode %{
12101 __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
12102 %}
12103 ins_pipe( ialu_regL_regL );
12104 %}
12106 instruct decodeHeapOop(mRegP dst, mRegN src) %{
12107 predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
12108 n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
12109 match(Set dst (DecodeN src));
12110 format %{ "decode_heap_oop $dst,$src @ decodeHeapOop" %}
12111 ins_encode %{
12112 Register s = $src$$Register;
12113 Register d = $dst$$Register;
12115 __ decode_heap_oop(d, s);
12116 %}
12117 ins_pipe( ialu_regL_regL );
12118 %}
12120 instruct decodeHeapOop_not_null(mRegP dst, mRegN src) %{
12121 predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
12122 n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
12123 match(Set dst (DecodeN src));
12124 format %{ "decode_heap_oop_not_null $dst,$src @ decodeHeapOop_not_null" %}
12125 ins_encode %{
12126 Register s = $src$$Register;
12127 Register d = $dst$$Register;
12128 if (s != d) {
12129 __ decode_heap_oop_not_null(d, s);
12130 } else {
12131 __ decode_heap_oop_not_null(d);
12132 }
12133 %}
12134 ins_pipe( ialu_regL_regL );
12135 %}
12137 instruct encodeKlass_not_null(mRegN dst, mRegP src) %{
12138 match(Set dst (EncodePKlass src));
12139 format %{ "encode_heap_oop_not_null $dst,$src @ encodeKlass_not_null" %}
12140 ins_encode %{
12141 __ encode_klass_not_null($dst$$Register, $src$$Register);
12142 %}
12143 ins_pipe( ialu_regL_regL );
12144 %}
12146 instruct decodeKlass_not_null(mRegP dst, mRegN src) %{
12147 match(Set dst (DecodeNKlass src));
12148 format %{ "decode_heap_klass_not_null $dst,$src" %}
12149 ins_encode %{
12150 Register s = $src$$Register;
12151 Register d = $dst$$Register;
12152 if (s != d) {
12153 __ decode_klass_not_null(d, s);
12154 } else {
12155 __ decode_klass_not_null(d);
12156 }
12157 %}
12158 ins_pipe( ialu_regL_regL );
12159 %}
12161 //FIXME
12162 instruct tlsLoadP(mRegP dst) %{
12163 match(Set dst (ThreadLocal));
12165 ins_cost(0);
12166 format %{ " get_thread in $dst #@tlsLoadP" %}
12167 ins_encode %{
12168 Register dst = $dst$$Register;
12169 #ifdef OPT_THREAD
12170 __ move(dst, TREG);
12171 #else
12172 __ get_thread(dst);
12173 #endif
12174 %}
12176 ins_pipe( ialu_loadI );
12177 %}
12180 instruct checkCastPP( mRegP dst ) %{
12181 match(Set dst (CheckCastPP dst));
12183 format %{ "#checkcastPP of $dst (empty encoding) #@chekCastPP" %}
12184 ins_encode( /*empty encoding*/ );
12185 ins_pipe( empty );
12186 %}
12188 instruct castPP(mRegP dst)
12189 %{
12190 match(Set dst (CastPP dst));
12192 size(0);
12193 format %{ "# castPP of $dst" %}
12194 ins_encode(/* empty encoding */);
12195 ins_pipe(empty);
12196 %}
12198 instruct castII( mRegI dst ) %{
12199 match(Set dst (CastII dst));
12200 format %{ "#castII of $dst empty encoding" %}
12201 ins_encode( /*empty encoding*/ );
12202 ins_cost(0);
12203 ins_pipe( empty );
12204 %}
12206 // Return Instruction
12207 // Remove the return address & jump to it.
12208 instruct Ret() %{
12209 match(Return);
12210 format %{ "RET #@Ret" %}
12212 ins_encode %{
12213 __ jr(RA);
12214 __ nop();
12215 %}
12217 ins_pipe( pipe_jump );
12218 %}
12220 /*
12221 // For Loongson CPUs, jr seems too slow, so this rule shouldn't be imported.
12222 instruct jumpXtnd(mRegL switch_val) %{
12223 match(Jump switch_val);
12225 ins_cost(350);
12227 format %{ "load T9 <-- [$constanttablebase, $switch_val, $constantoffset] @ jumpXtnd\n\t"
12228 "jr T9\n\t"
12229 "nop" %}
12230 ins_encode %{
12231 Register table_base = $constanttablebase;
12232 int con_offset = $constantoffset;
12233 Register switch_reg = $switch_val$$Register;
12235 if (UseLoongsonISA) {
12236 if (Assembler::is_simm(con_offset, 8)) {
12237 __ gsldx(T9, table_base, switch_reg, con_offset);
12238 } else if (Assembler::is_simm16(con_offset)) {
12239 __ daddu(T9, table_base, switch_reg);
12240 __ ld(T9, T9, con_offset);
12241 } else {
12242 __ move(T9, con_offset);
12243 __ daddu(AT, table_base, switch_reg);
12244 __ gsldx(T9, AT, T9, 0);
12245 }
12246 } else {
12247 if (Assembler::is_simm16(con_offset)) {
12248 __ daddu(T9, table_base, switch_reg);
12249 __ ld(T9, T9, con_offset);
12250 } else {
12251 __ move(T9, con_offset);
12252 __ daddu(AT, table_base, switch_reg);
12253 __ daddu(AT, T9, AT);
12254 __ ld(T9, AT, 0);
12255 }
12256 }
12258 __ jr(T9);
12259 __ nop();
12261 %}
12262 ins_pipe(pipe_jump);
12263 %}
12264 */
12266 // Jump Direct - Label defines a relative address from JMP
12267 instruct jmpDir(label labl) %{
12268 match(Goto);
12269 effect(USE labl);
12271 ins_cost(300);
12272 format %{ "JMP $labl #@jmpDir" %}
12274 ins_encode %{
12275 Label &L = *($labl$$label);
12276 if(&L)
12277 __ b(L);
12278 else
12279 __ b(int(0));
12280 __ nop();
12281 %}
12283 ins_pipe( pipe_jump );
12284 ins_pc_relative(1);
12285 %}
12289 // Tail Jump; remove the return address; jump to target.
12290 // TailCall above leaves the return address around.
12291 // TailJump is used in only one place, the rethrow_Java stub (fancy_jump=2).
12292 // ex_oop (Exception Oop) is needed in %o0 at the jump. As there would be a
12293 // "restore" before this instruction (in Epilogue), we need to materialize it
12294 // in %i0.
12295 //FIXME
12296 instruct tailjmpInd(mRegP jump_target,mRegP ex_oop) %{
12297 match( TailJump jump_target ex_oop );
12298 ins_cost(200);
12299 format %{ "Jmp $jump_target ; ex_oop = $ex_oop #@tailjmpInd" %}
12300 ins_encode %{
12301 Register target = $jump_target$$Register;
12303 /* 2012/9/14 Jin: V0, V1 are indicated in:
12304 * [stubGenerator_mips.cpp] generate_forward_exception()
12305 * [runtime_mips.cpp] OptoRuntime::generate_exception_blob()
12306 */
12307 Register oop = $ex_oop$$Register;
12308 Register exception_oop = V0;
12309 Register exception_pc = V1;
12311 __ move(exception_pc, RA);
12312 __ move(exception_oop, oop);
12314 __ jr(target);
12315 __ nop();
12316 %}
12317 ins_pipe( pipe_jump );
12318 %}
12320 // ============================================================================
12321 // Procedure Call/Return Instructions
12322 // Call Java Static Instruction
12323 // Note: If this code changes, the corresponding ret_addr_offset() and
12324 // compute_padding() functions will have to be adjusted.
12325 instruct CallStaticJavaDirect(method meth) %{
12326 match(CallStaticJava);
12327 effect(USE meth);
12329 ins_cost(300);
12330 format %{ "CALL,static #@CallStaticJavaDirect " %}
12331 ins_encode( Java_Static_Call( meth ) );
12332 ins_pipe( pipe_slow );
12333 ins_pc_relative(1);
12334 %}
12336 // Call Java Dynamic Instruction
12337 // Note: If this code changes, the corresponding ret_addr_offset() and
12338 // compute_padding() functions will have to be adjusted.
12339 instruct CallDynamicJavaDirect(method meth) %{
12340 match(CallDynamicJava);
12341 effect(USE meth);
12343 ins_cost(300);
12344 format %{"MOV IC_Klass, (oop)-1\n\t"
12345 "CallDynamic @ CallDynamicJavaDirect" %}
12346 ins_encode( Java_Dynamic_Call( meth ) );
12347 ins_pipe( pipe_slow );
12348 ins_pc_relative(1);
12349 %}
12351 instruct CallLeafNoFPDirect(method meth) %{
12352 match(CallLeafNoFP);
12353 effect(USE meth);
12355 ins_cost(300);
12356 format %{ "CALL_LEAF_NOFP,runtime " %}
12357 ins_encode(Java_To_Runtime(meth));
12358 ins_pipe( pipe_slow );
12359 ins_pc_relative(1);
12360 ins_alignment(16);
12361 %}
12363 // Prefetch instructions.
12365 instruct prefetchrNTA( memory mem ) %{
12366 match(PrefetchRead mem);
12367 ins_cost(125);
12369 format %{ "pref $mem\t# Prefetch into non-temporal cache for read @ prefetchrNTA" %}
12370 ins_encode %{
12371 int base = $mem$$base;
12372 int index = $mem$$index;
12373 int scale = $mem$$scale;
12374 int disp = $mem$$disp;
12376 if( index != 0 ) {
12377 if (scale == 0) {
12378 __ daddu(AT, as_Register(base), as_Register(index));
12379 } else {
12380 __ dsll(AT, as_Register(index), scale);
12381 __ daddu(AT, as_Register(base), AT);
12382 }
12383 } else {
12384 __ move(AT, as_Register(base));
12385 }
12386 if( Assembler::is_simm16(disp) ) {
12387 __ daddiu(AT, as_Register(base), disp);
12388 __ daddiu(AT, AT, disp);
12389 } else {
12390 __ move(T9, disp);
12391 __ daddu(AT, as_Register(base), T9);
12392 }
12393 __ pref(0, AT, 0); //hint: 0:load
12394 %}
12395 ins_pipe(pipe_slow);
12396 %}
12398 instruct prefetchwNTA( memory mem ) %{
12399 match(PrefetchWrite mem);
12400 ins_cost(125);
12401 format %{ "pref $mem\t# Prefetch to non-temporal cache for write @ prefetchwNTA" %}
12402 ins_encode %{
12403 int base = $mem$$base;
12404 int index = $mem$$index;
12405 int scale = $mem$$scale;
12406 int disp = $mem$$disp;
12408 if( index != 0 ) {
12409 if (scale == 0) {
12410 __ daddu(AT, as_Register(base), as_Register(index));
12411 } else {
12412 __ dsll(AT, as_Register(index), scale);
12413 __ daddu(AT, as_Register(base), AT);
12414 }
12415 } else {
12416 __ move(AT, as_Register(base));
12417 }
12418 if( Assembler::is_simm16(disp) ) {
12419 __ daddiu(AT, as_Register(base), disp);
12420 __ daddiu(AT, AT, disp);
12421 } else {
12422 __ move(T9, disp);
12423 __ daddu(AT, as_Register(base), T9);
12424 }
12425 __ pref(1, AT, 0); //hint: 1:store
12426 %}
12427 ins_pipe(pipe_slow);
12428 %}
12430 // Prefetch instructions for allocation.
12432 instruct prefetchAllocNTA( memory mem ) %{
12433 match(PrefetchAllocation mem);
12434 ins_cost(125);
12435 format %{ "pref $mem\t# Prefetch allocation @ prefetchAllocNTA" %}
12436 ins_encode %{
12437 int base = $mem$$base;
12438 int index = $mem$$index;
12439 int scale = $mem$$scale;
12440 int disp = $mem$$disp;
12442 Register dst = R0;
12444 if( index != 0 ) {
12445 if( Assembler::is_simm16(disp) ) {
12446 if( UseLoongsonISA ) {
12447 if (scale == 0) {
12448 __ gslbx(dst, as_Register(base), as_Register(index), disp);
12449 } else {
12450 __ dsll(AT, as_Register(index), scale);
12451 __ gslbx(dst, as_Register(base), AT, disp);
12452 }
12453 } else {
12454 if (scale == 0) {
12455 __ addu(AT, as_Register(base), as_Register(index));
12456 } else {
12457 __ dsll(AT, as_Register(index), scale);
12458 __ addu(AT, as_Register(base), AT);
12459 }
12460 __ lb(dst, AT, disp);
12461 }
12462 } else {
12463 if (scale == 0) {
12464 __ addu(AT, as_Register(base), as_Register(index));
12465 } else {
12466 __ dsll(AT, as_Register(index), scale);
12467 __ addu(AT, as_Register(base), AT);
12468 }
12469 __ move(T9, disp);
12470 if( UseLoongsonISA ) {
12471 __ gslbx(dst, AT, T9, 0);
12472 } else {
12473 __ addu(AT, AT, T9);
12474 __ lb(dst, AT, 0);
12475 }
12476 }
12477 } else {
12478 if( Assembler::is_simm16(disp) ) {
12479 __ lb(dst, as_Register(base), disp);
12480 } else {
12481 __ move(T9, disp);
12482 if( UseLoongsonISA ) {
12483 __ gslbx(dst, as_Register(base), T9, 0);
12484 } else {
12485 __ addu(AT, as_Register(base), T9);
12486 __ lb(dst, AT, 0);
12487 }
12488 }
12489 }
12490 %}
12491 ins_pipe(pipe_slow);
12492 %}
12495 // Call runtime without safepoint
12496 instruct CallLeafDirect(method meth) %{
12497 match(CallLeaf);
12498 effect(USE meth);
12500 ins_cost(300);
12501 format %{ "CALL_LEAF,runtime #@CallLeafDirect " %}
12502 ins_encode(Java_To_Runtime(meth));
12503 ins_pipe( pipe_slow );
12504 ins_pc_relative(1);
12505 ins_alignment(16);
12506 %}
12508 // Load Char (16bit unsigned)
12509 instruct loadUS(mRegI dst, memory mem) %{
12510 match(Set dst (LoadUS mem));
12512 ins_cost(125);
12513 format %{ "loadUS $dst,$mem @ loadC" %}
12514 ins_encode(load_C_enc(dst, mem));
12515 ins_pipe( ialu_loadI );
12516 %}
12518 instruct loadUS_convI2L(mRegL dst, memory mem) %{
12519 match(Set dst (ConvI2L (LoadUS mem)));
12521 ins_cost(125);
12522 format %{ "loadUS $dst,$mem @ loadUS_convI2L" %}
12523 ins_encode(load_C_enc(dst, mem));
12524 ins_pipe( ialu_loadI );
12525 %}
12527 // Store Char (16bit unsigned)
12528 instruct storeC(memory mem, mRegI src) %{
12529 match(Set mem (StoreC mem src));
12531 ins_cost(125);
12532 format %{ "storeC $src, $mem @ storeC" %}
12533 ins_encode(store_C_reg_enc(mem, src));
12534 ins_pipe( ialu_loadI );
12535 %}
12537 instruct storeC0(memory mem, immI0 zero) %{
12538 match(Set mem (StoreC mem zero));
12540 ins_cost(125);
12541 format %{ "storeC $zero, $mem @ storeC0" %}
12542 ins_encode(store_C0_enc(mem));
12543 ins_pipe( ialu_loadI );
12544 %}
12547 instruct loadConF0(regF dst, immF0 zero) %{
12548 match(Set dst zero);
12549 ins_cost(100);
12551 format %{ "mov $dst, zero @ loadConF0\n"%}
12552 ins_encode %{
12553 FloatRegister dst = $dst$$FloatRegister;
12555 __ mtc1(R0, dst);
12556 %}
12557 ins_pipe( fpu_loadF );
12558 %}
12561 instruct loadConF(regF dst, immF src) %{
12562 match(Set dst src);
12563 ins_cost(125);
12565 format %{ "lwc1 $dst, $constantoffset[$constanttablebase] # load FLOAT $src from table @ loadConF" %}
12566 ins_encode %{
12567 int con_offset = $constantoffset($src);
12569 if (Assembler::is_simm16(con_offset)) {
12570 __ lwc1($dst$$FloatRegister, $constanttablebase, con_offset);
12571 } else {
12572 __ set64(AT, con_offset);
12573 if (UseLoongsonISA) {
12574 __ gslwxc1($dst$$FloatRegister, $constanttablebase, AT, 0);
12575 } else {
12576 __ daddu(AT, $constanttablebase, AT);
12577 __ lwc1($dst$$FloatRegister, AT, 0);
12578 }
12579 }
12580 %}
12581 ins_pipe( fpu_loadF );
12582 %}
12585 instruct loadConD0(regD dst, immD0 zero) %{
12586 match(Set dst zero);
12587 ins_cost(100);
12589 format %{ "mov $dst, zero @ loadConD0"%}
12590 ins_encode %{
12591 FloatRegister dst = as_FloatRegister($dst$$reg);
12593 __ dmtc1(R0, dst);
12594 %}
12595 ins_pipe( fpu_loadF );
12596 %}
12598 instruct loadConD(regD dst, immD src) %{
12599 match(Set dst src);
12600 ins_cost(125);
12602 format %{ "ldc1 $dst, $constantoffset[$constanttablebase] # load DOUBLE $src from table @ loadConD" %}
12603 ins_encode %{
12604 int con_offset = $constantoffset($src);
12606 if (Assembler::is_simm16(con_offset)) {
12607 __ ldc1($dst$$FloatRegister, $constanttablebase, con_offset);
12608 } else {
12609 __ set64(AT, con_offset);
12610 if (UseLoongsonISA) {
12611 __ gsldxc1($dst$$FloatRegister, $constanttablebase, AT, 0);
12612 } else {
12613 __ daddu(AT, $constanttablebase, AT);
12614 __ ldc1($dst$$FloatRegister, AT, 0);
12615 }
12616 }
12617 %}
12618 ins_pipe( fpu_loadF );
12619 %}
12621 // Store register Float value (it is faster than store from FPU register)
12622 instruct storeF_reg( memory mem, regF src) %{
12623 match(Set mem (StoreF mem src));
12625 ins_cost(50);
12626 format %{ "store $mem, $src\t# store float @ storeF_reg" %}
12627 ins_encode(store_F_reg_enc(mem, src));
12628 ins_pipe( fpu_storeF );
12629 %}
12631 instruct storeF_imm0( memory mem, immF0 zero) %{
12632 match(Set mem (StoreF mem zero));
12634 ins_cost(40);
12635 format %{ "store $mem, zero\t# store float @ storeF_imm0" %}
12636 ins_encode %{
12637 int base = $mem$$base;
12638 int index = $mem$$index;
12639 int scale = $mem$$scale;
12640 int disp = $mem$$disp;
12642 if( index != 0 ) {
12643 if ( UseLoongsonISA ) {
12644 if ( Assembler::is_simm(disp, 8) ) {
12645 if ( scale == 0 ) {
12646 __ gsswx(R0, as_Register(base), as_Register(index), disp);
12647 } else {
12648 __ dsll(T9, as_Register(index), scale);
12649 __ gsswx(R0, as_Register(base), T9, disp);
12650 }
12651 } else if ( Assembler::is_simm16(disp) ) {
12652 if ( scale == 0 ) {
12653 __ daddu(AT, as_Register(base), as_Register(index));
12654 } else {
12655 __ dsll(T9, as_Register(index), scale);
12656 __ daddu(AT, as_Register(base), T9);
12657 }
12658 __ sw(R0, AT, disp);
12659 } else {
12660 if ( scale == 0 ) {
12661 __ move(T9, disp);
12662 __ daddu(AT, as_Register(index), T9);
12663 __ gsswx(R0, as_Register(base), AT, 0);
12664 } else {
12665 __ dsll(T9, as_Register(index), scale);
12666 __ move(AT, disp);
12667 __ daddu(AT, AT, T9);
12668 __ gsswx(R0, as_Register(base), AT, 0);
12669 }
12670 }
12671 } else { //not use loongson isa
12672 if(scale != 0) {
12673 __ dsll(T9, as_Register(index), scale);
12674 __ daddu(AT, as_Register(base), T9);
12675 } else {
12676 __ daddu(AT, as_Register(base), as_Register(index));
12677 }
12678 if( Assembler::is_simm16(disp) ) {
12679 __ sw(R0, AT, disp);
12680 } else {
12681 __ move(T9, disp);
12682 __ daddu(AT, AT, T9);
12683 __ sw(R0, AT, 0);
12684 }
12685 }
12686 } else { //index is 0
12687 if ( UseLoongsonISA ) {
12688 if ( Assembler::is_simm16(disp) ) {
12689 __ sw(R0, as_Register(base), disp);
12690 } else {
12691 __ move(T9, disp);
12692 __ gsswx(R0, as_Register(base), T9, 0);
12693 }
12694 } else {
12695 if( Assembler::is_simm16(disp) ) {
12696 __ sw(R0, as_Register(base), disp);
12697 } else {
12698 __ move(T9, disp);
12699 __ daddu(AT, as_Register(base), T9);
12700 __ sw(R0, AT, 0);
12701 }
12702 }
12703 }
12704 %}
12705 ins_pipe( ialu_storeI );
12706 %}
12708 // Load Double
12709 instruct loadD(regD dst, memory mem) %{
12710 match(Set dst (LoadD mem));
12712 ins_cost(150);
12713 format %{ "loadD $dst, $mem #@loadD" %}
12714 ins_encode(load_D_enc(dst, mem));
12715 ins_pipe( ialu_loadI );
12716 %}
12718 // Load Double - UNaligned
12719 instruct loadD_unaligned(regD dst, memory mem ) %{
12720 match(Set dst (LoadD_unaligned mem));
12721 ins_cost(250);
12722 // FIXME: Jin: Need more effective ldl/ldr
12723 format %{ "loadD_unaligned $dst, $mem #@loadD_unaligned" %}
12724 ins_encode(load_D_enc(dst, mem));
12725 ins_pipe( ialu_loadI );
12726 %}
12728 instruct storeD_reg( memory mem, regD src) %{
12729 match(Set mem (StoreD mem src));
12731 ins_cost(50);
12732 format %{ "store $mem, $src\t# store float @ storeD_reg" %}
12733 ins_encode(store_D_reg_enc(mem, src));
12734 ins_pipe( fpu_storeF );
12735 %}
12737 instruct storeD_imm0( memory mem, immD0 zero) %{
12738 match(Set mem (StoreD mem zero));
12740 ins_cost(40);
12741 format %{ "store $mem, zero\t# store float @ storeD_imm0" %}
12742 ins_encode %{
12743 int base = $mem$$base;
12744 int index = $mem$$index;
12745 int scale = $mem$$scale;
12746 int disp = $mem$$disp;
12748 __ mtc1(R0, F30);
12749 __ cvt_d_w(F30, F30);
12751 if( index != 0 ) {
12752 if ( UseLoongsonISA ) {
12753 if ( Assembler::is_simm(disp, 8) ) {
12754 if (scale == 0) {
12755 __ gssdxc1(F30, as_Register(base), as_Register(index), disp);
12756 } else {
12757 __ dsll(T9, as_Register(index), scale);
12758 __ gssdxc1(F30, as_Register(base), T9, disp);
12759 }
12760 } else if ( Assembler::is_simm16(disp) ) {
12761 if (scale == 0) {
12762 __ daddu(AT, as_Register(base), as_Register(index));
12763 __ sdc1(F30, AT, disp);
12764 } else {
12765 __ dsll(T9, as_Register(index), scale);
12766 __ daddu(AT, as_Register(base), T9);
12767 __ sdc1(F30, AT, disp);
12768 }
12769 } else {
12770 if (scale == 0) {
12771 __ move(T9, disp);
12772 __ daddu(AT, as_Register(index), T9);
12773 __ gssdxc1(F30, as_Register(base), AT, 0);
12774 } else {
12775 __ move(T9, disp);
12776 __ dsll(AT, as_Register(index), scale);
12777 __ daddu(AT, AT, T9);
12778 __ gssdxc1(F30, as_Register(base), AT, 0);
12779 }
12780 }
12781 } else { // not use loongson isa
12782 if(scale != 0) {
12783 __ dsll(T9, as_Register(index), scale);
12784 __ daddu(AT, as_Register(base), T9);
12785 } else {
12786 __ daddu(AT, as_Register(base), as_Register(index));
12787 }
12788 if( Assembler::is_simm16(disp) ) {
12789 __ sdc1(F30, AT, disp);
12790 } else {
12791 __ move(T9, disp);
12792 __ daddu(AT, AT, T9);
12793 __ sdc1(F30, AT, 0);
12794 }
12795 }
12796 } else {// index is 0
12797 if ( UseLoongsonISA ) {
12798 if ( Assembler::is_simm16(disp) ) {
12799 __ sdc1(F30, as_Register(base), disp);
12800 } else {
12801 __ move(T9, disp);
12802 __ gssdxc1(F30, as_Register(base), T9, 0);
12803 }
12804 } else {
12805 if( Assembler::is_simm16(disp) ) {
12806 __ sdc1(F30, as_Register(base), disp);
12807 } else {
12808 __ move(T9, disp);
12809 __ daddu(AT, as_Register(base), T9);
12810 __ sdc1(F30, AT, 0);
12811 }
12812 }
12813 }
12814 %}
12815 ins_pipe( ialu_storeI );
12816 %}
12818 instruct loadSSI(mRegI dst, stackSlotI src)
12819 %{
12820 match(Set dst src);
12822 ins_cost(125);
12823 format %{ "lw $dst, $src\t# int stk @ loadSSI" %}
12824 ins_encode %{
12825 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSI) !");
12826 __ lw($dst$$Register, SP, $src$$disp);
12827 %}
12828 ins_pipe(ialu_loadI);
12829 %}
12831 instruct storeSSI(stackSlotI dst, mRegI src)
12832 %{
12833 match(Set dst src);
12835 ins_cost(100);
12836 format %{ "sw $dst, $src\t# int stk @ storeSSI" %}
12837 ins_encode %{
12838 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSI) !");
12839 __ sw($src$$Register, SP, $dst$$disp);
12840 %}
12841 ins_pipe(ialu_storeI);
12842 %}
12844 instruct loadSSL(mRegL dst, stackSlotL src)
12845 %{
12846 match(Set dst src);
12848 ins_cost(125);
12849 format %{ "ld $dst, $src\t# long stk @ loadSSL" %}
12850 ins_encode %{
12851 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSL) !");
12852 __ ld($dst$$Register, SP, $src$$disp);
12853 %}
12854 ins_pipe(ialu_loadI);
12855 %}
12857 instruct storeSSL(stackSlotL dst, mRegL src)
12858 %{
12859 match(Set dst src);
12861 ins_cost(100);
12862 format %{ "sd $dst, $src\t# long stk @ storeSSL" %}
12863 ins_encode %{
12864 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSL) !");
12865 __ sd($src$$Register, SP, $dst$$disp);
12866 %}
12867 ins_pipe(ialu_storeI);
12868 %}
12870 instruct loadSSP(mRegP dst, stackSlotP src)
12871 %{
12872 match(Set dst src);
12874 ins_cost(125);
12875 format %{ "ld $dst, $src\t# ptr stk @ loadSSP" %}
12876 ins_encode %{
12877 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSP) !");
12878 __ ld($dst$$Register, SP, $src$$disp);
12879 %}
12880 ins_pipe(ialu_loadI);
12881 %}
12883 instruct storeSSP(stackSlotP dst, mRegP src)
12884 %{
12885 match(Set dst src);
12887 ins_cost(100);
12888 format %{ "sd $dst, $src\t# ptr stk @ storeSSP" %}
12889 ins_encode %{
12890 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSP) !");
12891 __ sd($src$$Register, SP, $dst$$disp);
12892 %}
12893 ins_pipe(ialu_storeI);
12894 %}
12896 instruct loadSSF(regF dst, stackSlotF src)
12897 %{
12898 match(Set dst src);
12900 ins_cost(125);
12901 format %{ "lwc1 $dst, $src\t# float stk @ loadSSF" %}
12902 ins_encode %{
12903 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSF) !");
12904 __ lwc1($dst$$FloatRegister, SP, $src$$disp);
12905 %}
12906 ins_pipe(ialu_loadI);
12907 %}
12909 instruct storeSSF(stackSlotF dst, regF src)
12910 %{
12911 match(Set dst src);
12913 ins_cost(100);
12914 format %{ "swc1 $dst, $src\t# float stk @ storeSSF" %}
12915 ins_encode %{
12916 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSF) !");
12917 __ swc1($src$$FloatRegister, SP, $dst$$disp);
12918 %}
12919 ins_pipe(fpu_storeF);
12920 %}
12922 // Use the same format since predicate() can not be used here.
12923 instruct loadSSD(regD dst, stackSlotD src)
12924 %{
12925 match(Set dst src);
12927 ins_cost(125);
12928 format %{ "ldc1 $dst, $src\t# double stk @ loadSSD" %}
12929 ins_encode %{
12930 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSD) !");
12931 __ ldc1($dst$$FloatRegister, SP, $src$$disp);
12932 %}
12933 ins_pipe(ialu_loadI);
12934 %}
12936 instruct storeSSD(stackSlotD dst, regD src)
12937 %{
12938 match(Set dst src);
12940 ins_cost(100);
12941 format %{ "sdc1 $dst, $src\t# double stk @ storeSSD" %}
12942 ins_encode %{
12943 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSD) !");
12944 __ sdc1($src$$FloatRegister, SP, $dst$$disp);
12945 %}
12946 ins_pipe(fpu_storeF);
12947 %}
12949 instruct cmpFastLock( FlagsReg cr, mRegP object, s0_RegP box, mRegI tmp, mRegP scr) %{
12950 match( Set cr (FastLock object box) );
12951 effect( TEMP tmp, TEMP scr, USE_KILL box );
12952 ins_cost(300);
12953 format %{ "FASTLOCK $cr $object, $box, $tmp #@ cmpFastLock" %}
12954 ins_encode %{
12955 __ fast_lock($object$$Register, $box$$Register, $tmp$$Register, $scr$$Register);
12956 %}
12958 ins_pipe( pipe_slow );
12959 ins_pc_relative(1);
12960 %}
12962 instruct cmpFastUnlock( FlagsReg cr, mRegP object, s0_RegP box, mRegP tmp ) %{
12963 match( Set cr (FastUnlock object box) );
12964 effect( TEMP tmp, USE_KILL box );
12965 ins_cost(300);
12966 format %{ "FASTUNLOCK $object, $box, $tmp #@cmpFastUnlock" %}
12967 ins_encode %{
12968 __ fast_unlock($object$$Register, $box$$Register, $tmp$$Register);
12969 %}
12971 ins_pipe( pipe_slow );
12972 ins_pc_relative(1);
12973 %}
12975 // Store CMS card-mark Immediate
12976 instruct storeImmCM(memory mem, immI8 src) %{
12977 match(Set mem (StoreCM mem src));
12979 ins_cost(150);
12980 format %{ "MOV8 $mem,$src\t! CMS card-mark imm0" %}
12981 // opcode(0xC6);
12982 ins_encode(store_B_immI_enc_sync(mem, src));
12983 ins_pipe( ialu_storeI );
12984 %}
12986 // Die now
12987 instruct ShouldNotReachHere( )
12988 %{
12989 match(Halt);
12990 ins_cost(300);
12992 // Use the following format syntax
12993 format %{ "ILLTRAP ;#@ShouldNotReachHere" %}
12994 ins_encode %{
12995 // Here we should emit illtrap !
12997 __ stop("in ShoudNotReachHere");
12999 %}
13000 ins_pipe( pipe_jump );
13001 %}
13003 instruct leaP8Narrow(mRegP dst, indOffset8Narrow mem)
13004 %{
13005 predicate(Universe::narrow_oop_shift() == 0);
13006 match(Set dst mem);
13008 ins_cost(110);
13009 format %{ "leaq $dst, $mem\t# ptr off8narrow @ leaP8Narrow" %}
13010 ins_encode %{
13011 Register dst = $dst$$Register;
13012 Register base = as_Register($mem$$base);
13013 int disp = $mem$$disp;
13015 __ daddiu(dst, base, disp);
13016 %}
13017 ins_pipe( ialu_regI_imm16 );
13018 %}
13020 instruct leaPPosIdxScaleOff8(mRegP dst, basePosIndexScaleOffset8 mem)
13021 %{
13022 match(Set dst mem);
13024 ins_cost(110);
13025 format %{ "leaq $dst, $mem\t# @ PosIdxScaleOff8" %}
13026 ins_encode %{
13027 Register dst = $dst$$Register;
13028 Register base = as_Register($mem$$base);
13029 Register index = as_Register($mem$$index);
13030 int scale = $mem$$scale;
13031 int disp = $mem$$disp;
13033 if (scale == 0) {
13034 __ daddu(AT, base, index);
13035 __ daddiu(dst, AT, disp);
13036 } else {
13037 __ dsll(AT, index, scale);
13038 __ daddu(AT, base, AT);
13039 __ daddiu(dst, AT, disp);
13040 }
13041 %}
13043 ins_pipe( ialu_regI_imm16 );
13044 %}
13046 instruct leaPIdxScale(mRegP dst, indIndexScale mem)
13047 %{
13048 match(Set dst mem);
13050 ins_cost(110);
13051 format %{ "leaq $dst, $mem\t# @ leaPIdxScale" %}
13052 ins_encode %{
13053 Register dst = $dst$$Register;
13054 Register base = as_Register($mem$$base);
13055 Register index = as_Register($mem$$index);
13056 int scale = $mem$$scale;
13058 if (scale == 0) {
13059 __ daddu(dst, base, index);
13060 } else {
13061 __ dsll(AT, index, scale);
13062 __ daddu(dst, base, AT);
13063 }
13064 %}
13066 ins_pipe( ialu_regI_imm16 );
13067 %}
13069 // Jump Direct Conditional - Label defines a relative address from Jcc+1
13070 instruct jmpLoopEnd(cmpOp cop, mRegI src1, mRegI src2, label labl) %{
13071 match(CountedLoopEnd cop (CmpI src1 src2));
13072 effect(USE labl);
13074 ins_cost(300);
13075 format %{ "J$cop $src1, $src2, $labl\t# Loop end @ jmpLoopEnd" %}
13076 ins_encode %{
13077 Register op1 = $src1$$Register;
13078 Register op2 = $src2$$Register;
13079 Label &L = *($labl$$label);
13080 int flag = $cop$$cmpcode;
13082 switch(flag)
13083 {
13084 case 0x01: //equal
13085 if (&L)
13086 __ beq(op1, op2, L);
13087 else
13088 __ beq(op1, op2, (int)0);
13089 break;
13090 case 0x02: //not_equal
13091 if (&L)
13092 __ bne(op1, op2, L);
13093 else
13094 __ bne(op1, op2, (int)0);
13095 break;
13096 case 0x03: //above
13097 __ slt(AT, op2, op1);
13098 if(&L)
13099 __ bne(AT, R0, L);
13100 else
13101 __ bne(AT, R0, (int)0);
13102 break;
13103 case 0x04: //above_equal
13104 __ slt(AT, op1, op2);
13105 if(&L)
13106 __ beq(AT, R0, L);
13107 else
13108 __ beq(AT, R0, (int)0);
13109 break;
13110 case 0x05: //below
13111 __ slt(AT, op1, op2);
13112 if(&L)
13113 __ bne(AT, R0, L);
13114 else
13115 __ bne(AT, R0, (int)0);
13116 break;
13117 case 0x06: //below_equal
13118 __ slt(AT, op2, op1);
13119 if(&L)
13120 __ beq(AT, R0, L);
13121 else
13122 __ beq(AT, R0, (int)0);
13123 break;
13124 default:
13125 Unimplemented();
13126 }
13127 __ nop();
13128 %}
13129 ins_pipe( pipe_jump );
13130 ins_pc_relative(1);
13131 %}
13134 instruct jmpLoopEnd_reg_imm16_sub(cmpOp cop, mRegI src1, immI16_sub src2, label labl) %{
13135 match(CountedLoopEnd cop (CmpI src1 src2));
13136 effect(USE labl);
13138 ins_cost(250);
13139 format %{ "J$cop $src1, $src2, $labl\t# Loop end @ jmpLoopEnd_reg_imm16_sub" %}
13140 ins_encode %{
13141 Register op1 = $src1$$Register;
13142 int op2 = $src2$$constant;
13143 Label &L = *($labl$$label);
13144 int flag = $cop$$cmpcode;
13146 __ addiu32(AT, op1, -1 * op2);
13148 switch(flag)
13149 {
13150 case 0x01: //equal
13151 if (&L)
13152 __ beq(AT, R0, L);
13153 else
13154 __ beq(AT, R0, (int)0);
13155 break;
13156 case 0x02: //not_equal
13157 if (&L)
13158 __ bne(AT, R0, L);
13159 else
13160 __ bne(AT, R0, (int)0);
13161 break;
13162 case 0x03: //above
13163 if(&L)
13164 __ bgtz(AT, L);
13165 else
13166 __ bgtz(AT, (int)0);
13167 break;
13168 case 0x04: //above_equal
13169 if(&L)
13170 __ bgez(AT, L);
13171 else
13172 __ bgez(AT,(int)0);
13173 break;
13174 case 0x05: //below
13175 if(&L)
13176 __ bltz(AT, L);
13177 else
13178 __ bltz(AT, (int)0);
13179 break;
13180 case 0x06: //below_equal
13181 if(&L)
13182 __ blez(AT, L);
13183 else
13184 __ blez(AT, (int)0);
13185 break;
13186 default:
13187 Unimplemented();
13188 }
13189 __ nop();
13190 %}
13191 ins_pipe( pipe_jump );
13192 ins_pc_relative(1);
13193 %}
13196 /*
13197 // Jump Direct Conditional - Label defines a relative address from Jcc+1
13198 instruct jmpLoopEndU(cmpOpU cop, eFlagsRegU cmp, label labl) %{
13199 match(CountedLoopEnd cop cmp);
13200 effect(USE labl);
13202 ins_cost(300);
13203 format %{ "J$cop,u $labl\t# Loop end" %}
13204 size(6);
13205 opcode(0x0F, 0x80);
13206 ins_encode( Jcc( cop, labl) );
13207 ins_pipe( pipe_jump );
13208 ins_pc_relative(1);
13209 %}
13211 instruct jmpLoopEndUCF(cmpOpUCF cop, eFlagsRegUCF cmp, label labl) %{
13212 match(CountedLoopEnd cop cmp);
13213 effect(USE labl);
13215 ins_cost(200);
13216 format %{ "J$cop,u $labl\t# Loop end" %}
13217 opcode(0x0F, 0x80);
13218 ins_encode( Jcc( cop, labl) );
13219 ins_pipe( pipe_jump );
13220 ins_pc_relative(1);
13221 %}
13222 */
13224 // This match pattern is created for StoreIConditional since I cannot match IfNode without a RegFlags! fujie 2012/07/17
13225 instruct jmpCon_flags(cmpOp cop, FlagsReg cr, label labl) %{
13226 match(If cop cr);
13227 effect(USE labl);
13229 ins_cost(300);
13230 format %{ "J$cop $labl #mips uses AT as eflag @jmpCon_flags" %}
13232 ins_encode %{
13233 Label &L = *($labl$$label);
13234 switch($cop$$cmpcode)
13235 {
13236 case 0x01: //equal
13237 if (&L)
13238 __ bne(AT, R0, L);
13239 else
13240 __ bne(AT, R0, (int)0);
13241 break;
13242 case 0x02: //not equal
13243 if (&L)
13244 __ beq(AT, R0, L);
13245 else
13246 __ beq(AT, R0, (int)0);
13247 break;
13248 default:
13249 Unimplemented();
13250 }
13251 __ nop();
13252 %}
13254 ins_pipe( pipe_jump );
13255 ins_pc_relative(1);
13256 %}
13259 // ============================================================================
13260 // The 2nd slow-half of a subtype check. Scan the subklass's 2ndary superklass
13261 // array for an instance of the superklass. Set a hidden internal cache on a
13262 // hit (cache is checked with exposed code in gen_subtype_check()). Return
13263 // NZ for a miss or zero for a hit. The encoding ALSO sets flags.
13264 instruct partialSubtypeCheck( mRegP result, no_T8_mRegP sub, no_T8_mRegP super, mT8RegI tmp ) %{
13265 match(Set result (PartialSubtypeCheck sub super));
13266 effect(KILL tmp);
13267 ins_cost(1100); // slightly larger than the next version
13268 format %{ "partialSubtypeCheck result=$result, sub=$sub, super=$super, tmp=$tmp " %}
13270 ins_encode( enc_PartialSubtypeCheck(result, sub, super, tmp) );
13271 ins_pipe( pipe_slow );
13272 %}
13275 // Conditional-store of an int value.
13276 // ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG on Intel.
13277 instruct storeIConditional( memory mem, mRegI oldval, mRegI newval, FlagsReg cr ) %{
13278 match(Set cr (StoreIConditional mem (Binary oldval newval)));
13279 // effect(KILL oldval);
13280 format %{ "CMPXCHG $newval, $mem, $oldval \t# @storeIConditional" %}
13282 ins_encode %{
13283 Register oldval = $oldval$$Register;
13284 Register newval = $newval$$Register;
13285 Address addr(as_Register($mem$$base), $mem$$disp);
13286 Label again, failure;
13288 // int base = $mem$$base;
13289 int index = $mem$$index;
13290 int scale = $mem$$scale;
13291 int disp = $mem$$disp;
13293 guarantee(Assembler::is_simm16(disp), "");
13295 if( index != 0 ) {
13296 __ stop("in storeIConditional: index != 0");
13297 } else {
13298 __ bind(again);
13299 if(!Use3A2000) __ sync();
13300 __ ll(AT, addr);
13301 __ bne(AT, oldval, failure);
13302 __ delayed()->addu(AT, R0, R0);
13304 __ addu(AT, newval, R0);
13305 __ sc(AT, addr);
13306 __ beq(AT, R0, again);
13307 __ delayed()->addiu(AT, R0, 0xFF);
13308 __ bind(failure);
13309 __ sync();
13310 }
13311 %}
13313 ins_pipe( long_memory_op );
13314 %}
13316 // Conditional-store of a long value.
13317 // ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG.
13318 instruct storeLConditional(memory mem, t2RegL oldval, mRegL newval, FlagsReg cr )
13319 %{
13320 match(Set cr (StoreLConditional mem (Binary oldval newval)));
13321 effect(KILL oldval);
13323 format %{ "cmpxchg $mem, $newval\t# If $oldval == $mem then store $newval into $mem" %}
13324 ins_encode%{
13325 Register oldval = $oldval$$Register;
13326 Register newval = $newval$$Register;
13327 Address addr((Register)$mem$$base, $mem$$disp);
13329 int index = $mem$$index;
13330 int scale = $mem$$scale;
13331 int disp = $mem$$disp;
13333 guarantee(Assembler::is_simm16(disp), "");
13335 if( index != 0 ) {
13336 __ stop("in storeIConditional: index != 0");
13337 } else {
13338 __ cmpxchg(newval, addr, oldval);
13339 }
13340 %}
13341 ins_pipe( long_memory_op );
13342 %}
13345 instruct compareAndSwapI( mRegI res, mRegP mem_ptr, mS2RegI oldval, mRegI newval) %{
13346 match(Set res (CompareAndSwapI mem_ptr (Binary oldval newval)));
13347 effect(KILL oldval);
13348 // match(CompareAndSwapI mem_ptr (Binary oldval newval));
13349 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapI\n\t"
13350 "MOV $res, 1 @ compareAndSwapI\n\t"
13351 "BNE AT, R0 @ compareAndSwapI\n\t"
13352 "MOV $res, 0 @ compareAndSwapI\n"
13353 "L:" %}
13354 ins_encode %{
13355 Register newval = $newval$$Register;
13356 Register oldval = $oldval$$Register;
13357 Register res = $res$$Register;
13358 Address addr($mem_ptr$$Register, 0);
13359 Label L;
13361 __ cmpxchg32(newval, addr, oldval);
13362 __ move(res, AT);
13363 %}
13364 ins_pipe( long_memory_op );
13365 %}
13367 //FIXME:
13368 instruct compareAndSwapP( mRegI res, mRegP mem_ptr, s2_RegP oldval, mRegP newval) %{
13369 match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval)));
13370 effect(KILL oldval);
13371 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapP\n\t"
13372 "MOV $res, AT @ compareAndSwapP\n\t"
13373 "L:" %}
13374 ins_encode %{
13375 Register newval = $newval$$Register;
13376 Register oldval = $oldval$$Register;
13377 Register res = $res$$Register;
13378 Address addr($mem_ptr$$Register, 0);
13379 Label L;
13381 __ cmpxchg(newval, addr, oldval);
13382 __ move(res, AT);
13383 %}
13384 ins_pipe( long_memory_op );
13385 %}
13387 instruct compareAndSwapN( mRegI res, mRegP mem_ptr, t2_RegN oldval, mRegN newval) %{
13388 match(Set res (CompareAndSwapN mem_ptr (Binary oldval newval)));
13389 effect(KILL oldval);
13390 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapN\n\t"
13391 "MOV $res, AT @ compareAndSwapN\n\t"
13392 "L:" %}
13393 ins_encode %{
13394 Register newval = $newval$$Register;
13395 Register oldval = $oldval$$Register;
13396 Register res = $res$$Register;
13397 Address addr($mem_ptr$$Register, 0);
13398 Label L;
13400 /* 2013/7/19 Jin: cmpxchg32 is implemented with ll/sc, which will do sign extension.
13401 * Thus, we should extend oldval's sign for correct comparision.
13402 */
13403 __ sll(oldval, oldval, 0);
13405 __ cmpxchg32(newval, addr, oldval);
13406 __ move(res, AT);
13407 %}
13408 ins_pipe( long_memory_op );
13409 %}
13411 //----------Max and Min--------------------------------------------------------
13412 // Min Instructions
13413 ////
13414 // *** Min and Max using the conditional move are slower than the
13415 // *** branch version on a Pentium III.
13416 // // Conditional move for min
13417 //instruct cmovI_reg_lt( eRegI op2, eRegI op1, eFlagsReg cr ) %{
13418 // effect( USE_DEF op2, USE op1, USE cr );
13419 // format %{ "CMOVlt $op2,$op1\t! min" %}
13420 // opcode(0x4C,0x0F);
13421 // ins_encode( OpcS, OpcP, RegReg( op2, op1 ) );
13422 // ins_pipe( pipe_cmov_reg );
13423 //%}
13424 //
13425 //// Min Register with Register (P6 version)
13426 //instruct minI_eReg_p6( eRegI op1, eRegI op2 ) %{
13427 // predicate(VM_Version::supports_cmov() );
13428 // match(Set op2 (MinI op1 op2));
13429 // ins_cost(200);
13430 // expand %{
13431 // eFlagsReg cr;
13432 // compI_eReg(cr,op1,op2);
13433 // cmovI_reg_lt(op2,op1,cr);
13434 // %}
13435 //%}
13437 // Min Register with Register (generic version)
13438 instruct minI_Reg_Reg(mRegI dst, mRegI src) %{
13439 match(Set dst (MinI dst src));
13440 //effect(KILL flags);
13441 ins_cost(80);
13443 format %{ "MIN $dst, $src @minI_Reg_Reg" %}
13444 ins_encode %{
13445 Register dst = $dst$$Register;
13446 Register src = $src$$Register;
13448 __ slt(AT, src, dst);
13449 __ movn(dst, src, AT);
13451 %}
13453 ins_pipe( pipe_slow );
13454 %}
13456 // Max Register with Register
13457 // *** Min and Max using the conditional move are slower than the
13458 // *** branch version on a Pentium III.
13459 // // Conditional move for max
13460 //instruct cmovI_reg_gt( eRegI op2, eRegI op1, eFlagsReg cr ) %{
13461 // effect( USE_DEF op2, USE op1, USE cr );
13462 // format %{ "CMOVgt $op2,$op1\t! max" %}
13463 // opcode(0x4F,0x0F);
13464 // ins_encode( OpcS, OpcP, RegReg( op2, op1 ) );
13465 // ins_pipe( pipe_cmov_reg );
13466 //%}
13467 //
13468 // // Max Register with Register (P6 version)
13469 //instruct maxI_eReg_p6( eRegI op1, eRegI op2 ) %{
13470 // predicate(VM_Version::supports_cmov() );
13471 // match(Set op2 (MaxI op1 op2));
13472 // ins_cost(200);
13473 // expand %{
13474 // eFlagsReg cr;
13475 // compI_eReg(cr,op1,op2);
13476 // cmovI_reg_gt(op2,op1,cr);
13477 // %}
13478 //%}
13480 // Max Register with Register (generic version)
13481 instruct maxI_Reg_Reg(mRegI dst, mRegI src) %{
13482 match(Set dst (MaxI dst src));
13483 ins_cost(80);
13485 format %{ "MAX $dst, $src @maxI_Reg_Reg" %}
13487 ins_encode %{
13488 Register dst = $dst$$Register;
13489 Register src = $src$$Register;
13491 __ slt(AT, dst, src);
13492 __ movn(dst, src, AT);
13494 %}
13496 ins_pipe( pipe_slow );
13497 %}
13499 instruct maxI_Reg_zero(mRegI dst, immI0 zero) %{
13500 match(Set dst (MaxI dst zero));
13501 ins_cost(50);
13503 format %{ "MAX $dst, 0 @maxI_Reg_zero" %}
13505 ins_encode %{
13506 Register dst = $dst$$Register;
13508 __ slt(AT, dst, R0);
13509 __ movn(dst, R0, AT);
13511 %}
13513 ins_pipe( pipe_slow );
13514 %}
13516 instruct zerox_long_reg_reg(mRegL dst, mRegL src, immL_32bits mask)
13517 %{
13518 match(Set dst (AndL src mask));
13520 format %{ "movl $dst, $src\t# zero-extend long @ zerox_long_reg_reg" %}
13521 ins_encode %{
13522 Register dst = $dst$$Register;
13523 Register src = $src$$Register;
13525 __ dext(dst, src, 0, 32);
13526 %}
13527 ins_pipe(ialu_regI_regI);
13528 %}
13530 instruct combine_i2l(mRegL dst, mRegI src1, immL_32bits mask, mRegI src2, immI_32 shift32)
13531 %{
13532 match(Set dst (OrL (AndL (ConvI2L src1) mask) (LShiftL (ConvI2L src2) shift32)));
13534 format %{ "combine_i2l $dst, $src2(H), $src1(L) @ combine_i2l" %}
13535 ins_encode %{
13536 Register dst = $dst$$Register;
13537 Register src1 = $src1$$Register;
13538 Register src2 = $src2$$Register;
13540 if (src1 == dst) {
13541 __ dinsu(dst, src2, 32, 32);
13542 } else if (src2 == dst) {
13543 __ dsll32(dst, dst, 0);
13544 __ dins(dst, src1, 0, 32);
13545 } else {
13546 __ dext(dst, src1, 0, 32);
13547 __ dinsu(dst, src2, 32, 32);
13548 }
13549 %}
13550 ins_pipe(ialu_regI_regI);
13551 %}
13553 // Zero-extend convert int to long
13554 instruct convI2L_reg_reg_zex(mRegL dst, mRegI src, immL_32bits mask)
13555 %{
13556 match(Set dst (AndL (ConvI2L src) mask));
13558 format %{ "movl $dst, $src\t# i2l zero-extend @ convI2L_reg_reg_zex" %}
13559 ins_encode %{
13560 Register dst = $dst$$Register;
13561 Register src = $src$$Register;
13563 __ dext(dst, src, 0, 32);
13564 %}
13565 ins_pipe(ialu_regI_regI);
13566 %}
13568 instruct convL2I2L_reg_reg_zex(mRegL dst, mRegL src, immL_32bits mask)
13569 %{
13570 match(Set dst (AndL (ConvI2L (ConvL2I src)) mask));
13572 format %{ "movl $dst, $src\t# i2l zero-extend @ convL2I2L_reg_reg_zex" %}
13573 ins_encode %{
13574 Register dst = $dst$$Register;
13575 Register src = $src$$Register;
13577 __ dext(dst, src, 0, 32);
13578 %}
13579 ins_pipe(ialu_regI_regI);
13580 %}
13582 // Match loading integer and casting it to unsigned int in long register.
13583 // LoadI + ConvI2L + AndL 0xffffffff.
13584 instruct loadUI2L_rmask(mRegL dst, memory mem, immL_32bits mask) %{
13585 match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
13587 format %{ "lwu $dst, $mem \t// zero-extend to long @ loadUI2L_rmask" %}
13588 ins_encode (load_N_enc(dst, mem));
13589 ins_pipe(ialu_loadI);
13590 %}
13592 instruct loadUI2L_lmask(mRegL dst, memory mem, immL_32bits mask) %{
13593 match(Set dst (AndL mask (ConvI2L (LoadI mem))));
13595 format %{ "lwu $dst, $mem \t// zero-extend to long @ loadUI2L_lmask" %}
13596 ins_encode (load_N_enc(dst, mem));
13597 ins_pipe(ialu_loadI);
13598 %}
13601 // ============================================================================
13602 // Safepoint Instruction
13603 instruct safePoint_poll_reg(mRegP poll) %{
13604 match(SafePoint poll);
13605 predicate(false);
13606 effect(USE poll);
13608 ins_cost(125);
13609 format %{ "Safepoint @ [$poll] : poll for GC @ safePoint_poll_reg" %}
13611 ins_encode %{
13612 Register poll_reg = $poll$$Register;
13614 __ block_comment("Safepoint:");
13615 __ relocate(relocInfo::poll_type);
13616 __ lw(AT, poll_reg, 0);
13617 %}
13619 ins_pipe( ialu_storeI );
13620 %}
13622 instruct safePoint_poll() %{
13623 match(SafePoint);
13625 ins_cost(105);
13626 format %{ "poll for GC @ safePoint_poll" %}
13628 ins_encode %{
13629 __ block_comment("Safepoint:");
13630 __ set64(T9, (long)os::get_polling_page());
13631 __ relocate(relocInfo::poll_type);
13632 __ lw(AT, T9, 0);
13633 %}
13635 ins_pipe( ialu_storeI );
13636 %}
13638 //----------Arithmetic Conversion Instructions---------------------------------
13640 instruct roundFloat_nop(regF dst)
13641 %{
13642 match(Set dst (RoundFloat dst));
13644 ins_cost(0);
13645 ins_encode();
13646 ins_pipe(empty);
13647 %}
13649 instruct roundDouble_nop(regD dst)
13650 %{
13651 match(Set dst (RoundDouble dst));
13653 ins_cost(0);
13654 ins_encode();
13655 ins_pipe(empty);
13656 %}
13658 //---------- Zeros Count Instructions ------------------------------------------
13659 // CountLeadingZerosINode CountTrailingZerosINode
13660 instruct countLeadingZerosI(mRegI dst, mRegI src) %{
13661 predicate(UseCountLeadingZerosInstruction);
13662 match(Set dst (CountLeadingZerosI src));
13664 format %{ "clz $dst, $src\t# count leading zeros (int)" %}
13665 ins_encode %{
13666 __ clz($dst$$Register, $src$$Register);
13667 %}
13668 ins_pipe( ialu_regL_regL );
13669 %}
13671 instruct countLeadingZerosL(mRegI dst, mRegL src) %{
13672 predicate(UseCountLeadingZerosInstruction);
13673 match(Set dst (CountLeadingZerosL src));
13675 format %{ "dclz $dst, $src\t# count leading zeros (long)" %}
13676 ins_encode %{
13677 __ dclz($dst$$Register, $src$$Register);
13678 %}
13679 ins_pipe( ialu_regL_regL );
13680 %}
13682 instruct countTrailingZerosI(mRegI dst, mRegI src) %{
13683 predicate(UseCountTrailingZerosInstruction);
13684 match(Set dst (CountTrailingZerosI src));
13686 format %{ "ctz $dst, $src\t# count trailing zeros (int)" %}
13687 ins_encode %{
13688 // ctz and dctz is gs instructions.
13689 __ ctz($dst$$Register, $src$$Register);
13690 %}
13691 ins_pipe( ialu_regL_regL );
13692 %}
13694 instruct countTrailingZerosL(mRegI dst, mRegL src) %{
13695 predicate(UseCountTrailingZerosInstruction);
13696 match(Set dst (CountTrailingZerosL src));
13698 format %{ "dcto $dst, $src\t# count trailing zeros (long)" %}
13699 ins_encode %{
13700 __ dctz($dst$$Register, $src$$Register);
13701 %}
13702 ins_pipe( ialu_regL_regL );
13703 %}
13705 // ====================VECTOR INSTRUCTIONS=====================================
13707 // Load vectors (8 bytes long)
13708 instruct loadV8(vecD dst, memory mem) %{
13709 predicate(n->as_LoadVector()->memory_size() == 8);
13710 match(Set dst (LoadVector mem));
13711 ins_cost(125);
13712 format %{ "load $dst, $mem\t! load vector (8 bytes)" %}
13713 ins_encode(load_D_enc(dst, mem));
13714 ins_pipe( fpu_loadF );
13715 %}
13717 // Store vectors (8 bytes long)
13718 instruct storeV8(memory mem, vecD src) %{
13719 predicate(n->as_StoreVector()->memory_size() == 8);
13720 match(Set mem (StoreVector mem src));
13721 ins_cost(145);
13722 format %{ "store $mem, $src\t! store vector (8 bytes)" %}
13723 ins_encode(store_D_reg_enc(mem, src));
13724 ins_pipe( fpu_storeF );
13725 %}
13727 instruct Repl8B_DSP(vecD dst, mRegI src) %{
13728 predicate(n->as_Vector()->length() == 8 && Use3A2000);
13729 match(Set dst (ReplicateB src));
13730 ins_cost(100);
13731 format %{ "replv_ob AT, $src\n\t"
13732 "dmtc1 AT, $dst\t! replicate8B" %}
13733 ins_encode %{
13734 __ replv_ob(AT, $src$$Register);
13735 __ dmtc1(AT, $dst$$FloatRegister);
13736 %}
13737 ins_pipe( pipe_mtc1 );
13738 %}
13740 instruct Repl8B(vecD dst, mRegI src) %{
13741 predicate(n->as_Vector()->length() == 8);
13742 match(Set dst (ReplicateB src));
13743 ins_cost(140);
13744 format %{ "move AT, $src\n\t"
13745 "dins AT, AT, 8, 8\n\t"
13746 "dins AT, AT, 16, 16\n\t"
13747 "dinsu AT, AT, 32, 32\n\t"
13748 "dmtc1 AT, $dst\t! replicate8B" %}
13749 ins_encode %{
13750 __ move(AT, $src$$Register);
13751 __ dins(AT, AT, 8, 8);
13752 __ dins(AT, AT, 16, 16);
13753 __ dinsu(AT, AT, 32, 32);
13754 __ dmtc1(AT, $dst$$FloatRegister);
13755 %}
13756 ins_pipe( pipe_mtc1 );
13757 %}
13759 instruct Repl8B_imm_DSP(vecD dst, immI con) %{
13760 predicate(n->as_Vector()->length() == 8 && Use3A2000);
13761 match(Set dst (ReplicateB con));
13762 ins_cost(110);
13763 format %{ "repl_ob AT, [$con]\n\t"
13764 "dmtc1 AT, $dst,0x00\t! replicate8B($con)" %}
13765 ins_encode %{
13766 int val = $con$$constant;
13767 __ repl_ob(AT, val);
13768 __ dmtc1(AT, $dst$$FloatRegister);
13769 %}
13770 ins_pipe( pipe_mtc1 );
13771 %}
13773 instruct Repl8B_imm(vecD dst, immI con) %{
13774 predicate(n->as_Vector()->length() == 8);
13775 match(Set dst (ReplicateB con));
13776 ins_cost(150);
13777 format %{ "move AT, [$con]\n\t"
13778 "dins AT, AT, 8, 8\n\t"
13779 "dins AT, AT, 16, 16\n\t"
13780 "dinsu AT, AT, 32, 32\n\t"
13781 "dmtc1 AT, $dst,0x00\t! replicate8B($con)" %}
13782 ins_encode %{
13783 __ move(AT, $con$$constant);
13784 __ dins(AT, AT, 8, 8);
13785 __ dins(AT, AT, 16, 16);
13786 __ dinsu(AT, AT, 32, 32);
13787 __ dmtc1(AT, $dst$$FloatRegister);
13788 %}
13789 ins_pipe( pipe_mtc1 );
13790 %}
13792 instruct Repl8B_zero(vecD dst, immI0 zero) %{
13793 predicate(n->as_Vector()->length() == 8);
13794 match(Set dst (ReplicateB zero));
13795 ins_cost(90);
13796 format %{ "dmtc1 R0, $dst\t! replicate8B zero" %}
13797 ins_encode %{
13798 __ dmtc1(R0, $dst$$FloatRegister);
13799 %}
13800 ins_pipe( pipe_mtc1 );
13801 %}
13803 instruct Repl8B_M1(vecD dst, immI_M1 M1) %{
13804 predicate(n->as_Vector()->length() == 8);
13805 match(Set dst (ReplicateB M1));
13806 ins_cost(80);
13807 format %{ "dmtc1 -1, $dst\t! replicate8B -1" %}
13808 ins_encode %{
13809 __ nor(AT, R0, R0);
13810 __ dmtc1(AT, $dst$$FloatRegister);
13811 %}
13812 ins_pipe( pipe_mtc1 );
13813 %}
13815 instruct Repl4S_DSP(vecD dst, mRegI src) %{
13816 predicate(n->as_Vector()->length() == 4 && Use3A2000);
13817 match(Set dst (ReplicateS src));
13818 ins_cost(100);
13819 format %{ "replv_qh AT, $src\n\t"
13820 "dmtc1 AT, $dst\t! replicate4S" %}
13821 ins_encode %{
13822 __ replv_qh(AT, $src$$Register);
13823 __ dmtc1(AT, $dst$$FloatRegister);
13824 %}
13825 ins_pipe( pipe_mtc1 );
13826 %}
13828 instruct Repl4S(vecD dst, mRegI src) %{
13829 predicate(n->as_Vector()->length() == 4);
13830 match(Set dst (ReplicateS src));
13831 ins_cost(120);
13832 format %{ "move AT, $src \n\t"
13833 "dins AT, AT, 16, 16\n\t"
13834 "dinsu AT, AT, 32, 32\n\t"
13835 "dmtc1 AT, $dst\t! replicate4S" %}
13836 ins_encode %{
13837 __ move(AT, $src$$Register);
13838 __ dins(AT, AT, 16, 16);
13839 __ dinsu(AT, AT, 32, 32);
13840 __ dmtc1(AT, $dst$$FloatRegister);
13841 %}
13842 ins_pipe( pipe_mtc1 );
13843 %}
13845 instruct Repl4S_imm_DSP(vecD dst, immI con) %{
13846 predicate(n->as_Vector()->length() == 4 && Use3A2000);
13847 match(Set dst (ReplicateS con));
13848 ins_cost(100);
13849 format %{ "replv_qh AT, [$con]\n\t"
13850 "dmtc1 AT, $dst\t! replicate4S($con)" %}
13851 ins_encode %{
13852 int val = $con$$constant;
13853 if ( Assembler::is_simm(val, 10)) {
13854 //repl_qh supports 10 bits immediate
13855 __ repl_qh(AT, val);
13856 } else {
13857 __ li32(AT, val);
13858 __ replv_qh(AT, AT);
13859 }
13860 __ dmtc1(AT, $dst$$FloatRegister);
13861 %}
13862 ins_pipe( pipe_mtc1 );
13863 %}
13865 instruct Repl4S_imm(vecD dst, immI con) %{
13866 predicate(n->as_Vector()->length() == 4);
13867 match(Set dst (ReplicateS con));
13868 ins_cost(110);
13869 format %{ "move AT, [$con]\n\t"
13870 "dins AT, AT, 16, 16\n\t"
13871 "dinsu AT, AT, 32, 32\n\t"
13872 "dmtc1 AT, $dst\t! replicate4S($con)" %}
13873 ins_encode %{
13874 __ move(AT, $con$$constant);
13875 __ dins(AT, AT, 16, 16);
13876 __ dinsu(AT, AT, 32, 32);
13877 __ dmtc1(AT, $dst$$FloatRegister);
13878 %}
13879 ins_pipe( pipe_mtc1 );
13880 %}
13882 instruct Repl4S_zero(vecD dst, immI0 zero) %{
13883 predicate(n->as_Vector()->length() == 4);
13884 match(Set dst (ReplicateS zero));
13885 format %{ "dmtc1 R0, $dst\t! replicate4S zero" %}
13886 ins_encode %{
13887 __ dmtc1(R0, $dst$$FloatRegister);
13888 %}
13889 ins_pipe( pipe_mtc1 );
13890 %}
13892 instruct Repl4S_M1(vecD dst, immI_M1 M1) %{
13893 predicate(n->as_Vector()->length() == 4);
13894 match(Set dst (ReplicateS M1));
13895 format %{ "dmtc1 -1, $dst\t! replicate4S -1" %}
13896 ins_encode %{
13897 __ nor(AT, R0, R0);
13898 __ dmtc1(AT, $dst$$FloatRegister);
13899 %}
13900 ins_pipe( pipe_mtc1 );
13901 %}
13903 // Replicate integer (4 byte) scalar to be vector
13904 instruct Repl2I(vecD dst, mRegI src) %{
13905 predicate(n->as_Vector()->length() == 2);
13906 match(Set dst (ReplicateI src));
13907 format %{ "dins AT, $src, 0, 32\n\t"
13908 "dinsu AT, $src, 32, 32\n\t"
13909 "dmtc1 AT, $dst\t! replicate2I" %}
13910 ins_encode %{
13911 __ dins(AT, $src$$Register, 0, 32);
13912 __ dinsu(AT, $src$$Register, 32, 32);
13913 __ dmtc1(AT, $dst$$FloatRegister);
13914 %}
13915 ins_pipe( pipe_mtc1 );
13916 %}
13918 // Replicate integer (4 byte) scalar immediate to be vector by loading from const table.
13919 instruct Repl2I_imm(vecD dst, immI con, mA7RegI tmp) %{
13920 predicate(n->as_Vector()->length() == 2);
13921 match(Set dst (ReplicateI con));
13922 effect(KILL tmp);
13923 format %{ "li32 AT, [$con], 32\n\t"
13924 "dinsu AT, AT\n\t"
13925 "dmtc1 AT, $dst\t! replicate2I($con)" %}
13926 ins_encode %{
13927 int val = $con$$constant;
13928 __ li32(AT, val);
13929 __ dinsu(AT, AT, 32, 32);
13930 __ dmtc1(AT, $dst$$FloatRegister);
13931 %}
13932 ins_pipe( pipe_mtc1 );
13933 %}
13935 // Replicate integer (4 byte) scalar zero to be vector
13936 instruct Repl2I_zero(vecD dst, immI0 zero) %{
13937 predicate(n->as_Vector()->length() == 2);
13938 match(Set dst (ReplicateI zero));
13939 format %{ "dmtc1 R0, $dst\t! replicate2I zero" %}
13940 ins_encode %{
13941 __ dmtc1(R0, $dst$$FloatRegister);
13942 %}
13943 ins_pipe( pipe_mtc1 );
13944 %}
13946 // Replicate integer (4 byte) scalar -1 to be vector
13947 instruct Repl2I_M1(vecD dst, immI_M1 M1) %{
13948 predicate(n->as_Vector()->length() == 2);
13949 match(Set dst (ReplicateI M1));
13950 format %{ "dmtc1 -1, $dst\t! replicate2I -1, use AT" %}
13951 ins_encode %{
13952 __ nor(AT, R0, R0);
13953 __ dmtc1(AT, $dst$$FloatRegister);
13954 %}
13955 ins_pipe( pipe_mtc1 );
13956 %}
13958 // Replicate float (4 byte) scalar to be vector
13959 instruct Repl2F(vecD dst, regF src) %{
13960 predicate(n->as_Vector()->length() == 2);
13961 match(Set dst (ReplicateF src));
13962 format %{ "cvt.ps $dst, $src, $src\t! replicate2F" %}
13963 ins_encode %{
13964 __ cvt_ps_s($dst$$FloatRegister, $src$$FloatRegister, $src$$FloatRegister);
13965 %}
13966 ins_pipe( pipe_slow );
13967 %}
13969 // Replicate float (4 byte) scalar zero to be vector
13970 instruct Repl2F_zero(vecD dst, immF0 zero) %{
13971 predicate(n->as_Vector()->length() == 2);
13972 match(Set dst (ReplicateF zero));
13973 format %{ "dmtc1 R0, $dst\t! replicate2F zero" %}
13974 ins_encode %{
13975 __ dmtc1(R0, $dst$$FloatRegister);
13976 %}
13977 ins_pipe( pipe_mtc1 );
13978 %}
13981 // ====================VECTOR ARITHMETIC=======================================
13983 // --------------------------------- ADD --------------------------------------
13985 // Floats vector add
13986 // kernel does not have emulation of PS instructions yet, so PS instructions is disabled.
13987 instruct vadd2F(vecD dst, vecD src) %{
13988 predicate(n->as_Vector()->length() == 2);
13989 match(Set dst (AddVF dst src));
13990 format %{ "add.ps $dst,$src\t! add packed2F" %}
13991 ins_encode %{
13992 __ add_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
13993 %}
13994 ins_pipe( pipe_slow );
13995 %}
13997 instruct vadd2F3(vecD dst, vecD src1, vecD src2) %{
13998 predicate(n->as_Vector()->length() == 2);
13999 match(Set dst (AddVF src1 src2));
14000 format %{ "add.ps $dst,$src1,$src2\t! add packed2F" %}
14001 ins_encode %{
14002 __ add_ps($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
14003 %}
14004 ins_pipe( fpu_regF_regF );
14005 %}
14007 // --------------------------------- SUB --------------------------------------
14009 // Floats vector sub
14010 instruct vsub2F(vecD dst, vecD src) %{
14011 predicate(n->as_Vector()->length() == 2);
14012 match(Set dst (SubVF dst src));
14013 format %{ "sub.ps $dst,$src\t! sub packed2F" %}
14014 ins_encode %{
14015 __ sub_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
14016 %}
14017 ins_pipe( fpu_regF_regF );
14018 %}
14020 // --------------------------------- MUL --------------------------------------
14022 // Floats vector mul
14023 instruct vmul2F(vecD dst, vecD src) %{
14024 predicate(n->as_Vector()->length() == 2);
14025 match(Set dst (MulVF dst src));
14026 format %{ "mul.ps $dst, $src\t! mul packed2F" %}
14027 ins_encode %{
14028 __ mul_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
14029 %}
14030 ins_pipe( fpu_regF_regF );
14031 %}
14033 instruct vmul2F3(vecD dst, vecD src1, vecD src2) %{
14034 predicate(n->as_Vector()->length() == 2);
14035 match(Set dst (MulVF src1 src2));
14036 format %{ "mul.ps $dst, $src1, $src2\t! mul packed2F" %}
14037 ins_encode %{
14038 __ mul_ps($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
14039 %}
14040 ins_pipe( fpu_regF_regF );
14041 %}
14043 // --------------------------------- DIV --------------------------------------
14044 // MIPS do not have div.ps
14046 // --------------------------------- MADD --------------------------------------
14047 // Floats vector madd
14048 //instruct vmadd2F(vecD dst, vecD src1, vecD src2, vecD src3) %{
14049 // predicate(n->as_Vector()->length() == 2);
14050 // match(Set dst (AddVF (MulVF src1 src2) src3));
14051 // ins_cost(50);
14052 // format %{ "madd.ps $dst, $src3, $src1, $src2\t! madd packed2F" %}
14053 // ins_encode %{
14054 // __ madd_ps($dst$$FloatRegister, $src3$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
14055 // %}
14056 // ins_pipe( fpu_regF_regF );
14057 //%}
14060 //----------PEEPHOLE RULES-----------------------------------------------------
14061 // These must follow all instruction definitions as they use the names
14062 // defined in the instructions definitions.
14063 //
14064 // peepmatch ( root_instr_name [preceeding_instruction]* );
14065 //
14066 // peepconstraint %{
14067 // (instruction_number.operand_name relational_op instruction_number.operand_name
14068 // [, ...] );
14069 // // instruction numbers are zero-based using left to right order in peepmatch
14070 //
14071 // peepreplace ( instr_name ( [instruction_number.operand_name]* ) );
14072 // // provide an instruction_number.operand_name for each operand that appears
14073 // // in the replacement instruction's match rule
14074 //
14075 // ---------VM FLAGS---------------------------------------------------------
14076 //
14077 // All peephole optimizations can be turned off using -XX:-OptoPeephole
14078 //
14079 // Each peephole rule is given an identifying number starting with zero and
14080 // increasing by one in the order seen by the parser. An individual peephole
14081 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
14082 // on the command-line.
14083 //
14084 // ---------CURRENT LIMITATIONS----------------------------------------------
14085 //
14086 // Only match adjacent instructions in same basic block
14087 // Only equality constraints
14088 // Only constraints between operands, not (0.dest_reg == EAX_enc)
14089 // Only one replacement instruction
14090 //
14091 // ---------EXAMPLE----------------------------------------------------------
14092 //
14093 // // pertinent parts of existing instructions in architecture description
14094 // instruct movI(eRegI dst, eRegI src) %{
14095 // match(Set dst (CopyI src));
14096 // %}
14097 //
14098 // instruct incI_eReg(eRegI dst, immI1 src, eFlagsReg cr) %{
14099 // match(Set dst (AddI dst src));
14100 // effect(KILL cr);
14101 // %}
14102 //
14103 // // Change (inc mov) to lea
14104 // peephole %{
14105 // // increment preceeded by register-register move
14106 // peepmatch ( incI_eReg movI );
14107 // // require that the destination register of the increment
14108 // // match the destination register of the move
14109 // peepconstraint ( 0.dst == 1.dst );
14110 // // construct a replacement instruction that sets
14111 // // the destination to ( move's source register + one )
14112 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
14113 // %}
14114 //
14115 // Implementation no longer uses movX instructions since
14116 // machine-independent system no longer uses CopyX nodes.
14117 //
14118 // peephole %{
14119 // peepmatch ( incI_eReg movI );
14120 // peepconstraint ( 0.dst == 1.dst );
14121 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
14122 // %}
14123 //
14124 // peephole %{
14125 // peepmatch ( decI_eReg movI );
14126 // peepconstraint ( 0.dst == 1.dst );
14127 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
14128 // %}
14129 //
14130 // peephole %{
14131 // peepmatch ( addI_eReg_imm movI );
14132 // peepconstraint ( 0.dst == 1.dst );
14133 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
14134 // %}
14135 //
14136 // peephole %{
14137 // peepmatch ( addP_eReg_imm movP );
14138 // peepconstraint ( 0.dst == 1.dst );
14139 // peepreplace ( leaP_eReg_immI( 0.dst 1.src 0.src ) );
14140 // %}
14142 // // Change load of spilled value to only a spill
14143 // instruct storeI(memory mem, eRegI src) %{
14144 // match(Set mem (StoreI mem src));
14145 // %}
14146 //
14147 // instruct loadI(eRegI dst, memory mem) %{
14148 // match(Set dst (LoadI mem));
14149 // %}
14150 //
14151 //peephole %{
14152 // peepmatch ( loadI storeI );
14153 // peepconstraint ( 1.src == 0.dst, 1.mem == 0.mem );
14154 // peepreplace ( storeI( 1.mem 1.mem 1.src ) );
14155 //%}
14157 //----------SMARTSPILL RULES---------------------------------------------------
14158 // These must follow all instruction definitions as they use the names
14159 // defined in the instructions definitions.