Wed, 17 May 2017 03:46:25 -0400
#5481 Vector optimization was not used by default.
Vector optimization of MIPS works in most cases, but cannot pass hotspot/test/compiler/6340864/TestFloatVect.java.
The reasons:
1. The kernel does not have emulation of PS instructions yet, so the emulation of PS instructions must be done in JVM, see JVM_handle_linux_signal.
2. It seems the gcc4.4.7 had some bug related to ucontext_t, which is used in signal handler to emulate PS instructions.
use java -XX:MaxVectorSize=8 to enable Vector optimization.
1 //
2 // Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
3 // Copyright (c) 2015, 2016, Loongson Technology. All rights reserved.
4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 //
6 // This code is free software; you can redistribute it and/or modify it
7 // under the terms of the GNU General Public License version 2 only, as
8 // published by the Free Software Foundation.
9 //
10 // This code is distributed in the hope that it will be useful, but WITHOUT
11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 // version 2 for more details (a copy is included in the LICENSE file that
14 // accompanied this code).
15 //
16 // You should have received a copy of the GNU General Public License version
17 // 2 along with this work; if not, write to the Free Software Foundation,
18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 //
20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 // or visit www.oracle.com if you need additional information or have any
22 // questions.
23 //
24 //
26 // GodSon3 Architecture Description File
28 //----------REGISTER DEFINITION BLOCK------------------------------------------
29 // This information is used by the matcher and the register allocator to
30 // describe individual registers and classes of registers within the target
31 // archtecture.
33 // format:
34 // reg_def name (call convention, c-call convention, ideal type, encoding);
35 // call convention :
36 // NS = No-Save
37 // SOC = Save-On-Call
38 // SOE = Save-On-Entry
39 // AS = Always-Save
40 // ideal type :
41 // see opto/opcodes.hpp for more info
42 // reg_class name (reg, ...);
43 // alloc_class name (reg, ...);
44 register %{
46 // General Registers
47 // Integer Registers
48 reg_def R0 ( NS, NS, Op_RegI, 0, VMRegImpl::Bad());
49 reg_def AT ( NS, NS, Op_RegI, 1, AT->as_VMReg());
50 reg_def AT_H ( NS, NS, Op_RegI, 1, AT->as_VMReg()->next());
51 reg_def V0 (SOC, SOC, Op_RegI, 2, V0->as_VMReg());
52 reg_def V0_H (SOC, SOC, Op_RegI, 2, V0->as_VMReg()->next());
53 reg_def V1 (SOC, SOC, Op_RegI, 3, V1->as_VMReg());
54 reg_def V1_H (SOC, SOC, Op_RegI, 3, V1->as_VMReg()->next());
55 reg_def A0 (SOC, SOC, Op_RegI, 4, A0->as_VMReg());
56 reg_def A0_H (SOC, SOC, Op_RegI, 4, A0->as_VMReg()->next());
57 reg_def A1 (SOC, SOC, Op_RegI, 5, A1->as_VMReg());
58 reg_def A1_H (SOC, SOC, Op_RegI, 5, A1->as_VMReg()->next());
59 reg_def A2 (SOC, SOC, Op_RegI, 6, A2->as_VMReg());
60 reg_def A2_H (SOC, SOC, Op_RegI, 6, A2->as_VMReg()->next());
61 reg_def A3 (SOC, SOC, Op_RegI, 7, A3->as_VMReg());
62 reg_def A3_H (SOC, SOC, Op_RegI, 7, A3->as_VMReg()->next());
63 reg_def A4 (SOC, SOC, Op_RegI, 8, A4->as_VMReg());
64 reg_def A4_H (SOC, SOC, Op_RegI, 8, A4->as_VMReg()->next());
65 reg_def A5 (SOC, SOC, Op_RegI, 9, A5->as_VMReg());
66 reg_def A5_H (SOC, SOC, Op_RegI, 9, A5->as_VMReg()->next());
67 reg_def A6 (SOC, SOC, Op_RegI, 10, A6->as_VMReg());
68 reg_def A6_H (SOC, SOC, Op_RegI, 10, A6->as_VMReg()->next());
69 reg_def A7 (SOC, SOC, Op_RegI, 11, A7->as_VMReg());
70 reg_def A7_H (SOC, SOC, Op_RegI, 11, A7->as_VMReg()->next());
71 reg_def T0 (SOC, SOC, Op_RegI, 12, T0->as_VMReg());
72 reg_def T0_H (SOC, SOC, Op_RegI, 12, T0->as_VMReg()->next());
73 reg_def T1 (SOC, SOC, Op_RegI, 13, T1->as_VMReg());
74 reg_def T1_H (SOC, SOC, Op_RegI, 13, T1->as_VMReg()->next());
75 reg_def T2 (SOC, SOC, Op_RegI, 14, T2->as_VMReg());
76 reg_def T2_H (SOC, SOC, Op_RegI, 14, T2->as_VMReg()->next());
77 reg_def T3 (SOC, SOC, Op_RegI, 15, T3->as_VMReg());
78 reg_def T3_H (SOC, SOC, Op_RegI, 15, T3->as_VMReg()->next());
79 reg_def S0 (SOC, SOE, Op_RegI, 16, S0->as_VMReg());
80 reg_def S0_H (SOC, SOE, Op_RegI, 16, S0->as_VMReg()->next());
81 reg_def S1 (SOC, SOE, Op_RegI, 17, S1->as_VMReg());
82 reg_def S1_H (SOC, SOE, Op_RegI, 17, S1->as_VMReg()->next());
83 reg_def S2 (SOC, SOE, Op_RegI, 18, S2->as_VMReg());
84 reg_def S2_H (SOC, SOE, Op_RegI, 18, S2->as_VMReg()->next());
85 reg_def S3 (SOC, SOE, Op_RegI, 19, S3->as_VMReg());
86 reg_def S3_H (SOC, SOE, Op_RegI, 19, S3->as_VMReg()->next());
87 reg_def S4 (SOC, SOE, Op_RegI, 20, S4->as_VMReg());
88 reg_def S4_H (SOC, SOE, Op_RegI, 20, S4->as_VMReg()->next());
89 reg_def S5 (SOC, SOE, Op_RegI, 21, S5->as_VMReg());
90 reg_def S5_H (SOC, SOE, Op_RegI, 21, S5->as_VMReg()->next());
91 reg_def S6 (SOC, SOE, Op_RegI, 22, S6->as_VMReg());
92 reg_def S6_H (SOC, SOE, Op_RegI, 22, S6->as_VMReg()->next());
93 reg_def S7 (SOC, SOE, Op_RegI, 23, S7->as_VMReg());
94 reg_def S7_H (SOC, SOE, Op_RegI, 23, S7->as_VMReg()->next());
95 reg_def T8 (SOC, SOC, Op_RegI, 24, T8->as_VMReg());
96 reg_def T8_H (SOC, SOC, Op_RegI, 24, T8->as_VMReg()->next());
97 reg_def T9 (SOC, SOC, Op_RegI, 25, T9->as_VMReg());
98 reg_def T9_H (SOC, SOC, Op_RegI, 25, T9->as_VMReg()->next());
100 // Special Registers
101 reg_def K0 ( NS, NS, Op_RegI, 26, K0->as_VMReg());
102 reg_def K1 ( NS, NS, Op_RegI, 27, K1->as_VMReg());
103 reg_def GP ( NS, NS, Op_RegI, 28, GP->as_VMReg());
104 reg_def GP_H ( NS, NS, Op_RegI, 28, GP->as_VMReg()->next());
105 reg_def SP ( NS, NS, Op_RegI, 29, SP->as_VMReg());
106 reg_def SP_H ( NS, NS, Op_RegI, 29, SP->as_VMReg()->next());
107 reg_def FP ( NS, NS, Op_RegI, 30, FP->as_VMReg());
108 reg_def FP_H ( NS, NS, Op_RegI, 30, FP->as_VMReg()->next());
109 reg_def RA ( NS, NS, Op_RegI, 31, RA->as_VMReg());
110 reg_def RA_H ( NS, NS, Op_RegI, 31, RA->as_VMReg()->next());
112 // Floating registers.
113 reg_def F0 ( SOC, SOC, Op_RegF, 0, F0->as_VMReg());
114 reg_def F0_H ( SOC, SOC, Op_RegF, 0, F0->as_VMReg()->next());
115 reg_def F1 ( SOC, SOC, Op_RegF, 1, F1->as_VMReg());
116 reg_def F1_H ( SOC, SOC, Op_RegF, 1, F1->as_VMReg()->next());
117 reg_def F2 ( SOC, SOC, Op_RegF, 2, F2->as_VMReg());
118 reg_def F2_H ( SOC, SOC, Op_RegF, 2, F2->as_VMReg()->next());
119 reg_def F3 ( SOC, SOC, Op_RegF, 3, F3->as_VMReg());
120 reg_def F3_H ( SOC, SOC, Op_RegF, 3, F3->as_VMReg()->next());
121 reg_def F4 ( SOC, SOC, Op_RegF, 4, F4->as_VMReg());
122 reg_def F4_H ( SOC, SOC, Op_RegF, 4, F4->as_VMReg()->next());
123 reg_def F5 ( SOC, SOC, Op_RegF, 5, F5->as_VMReg());
124 reg_def F5_H ( SOC, SOC, Op_RegF, 5, F5->as_VMReg()->next());
125 reg_def F6 ( SOC, SOC, Op_RegF, 6, F6->as_VMReg());
126 reg_def F6_H ( SOC, SOC, Op_RegF, 6, F6->as_VMReg()->next());
127 reg_def F7 ( SOC, SOC, Op_RegF, 7, F7->as_VMReg());
128 reg_def F7_H ( SOC, SOC, Op_RegF, 7, F7->as_VMReg()->next());
129 reg_def F8 ( SOC, SOC, Op_RegF, 8, F8->as_VMReg());
130 reg_def F8_H ( SOC, SOC, Op_RegF, 8, F8->as_VMReg()->next());
131 reg_def F9 ( SOC, SOC, Op_RegF, 9, F9->as_VMReg());
132 reg_def F9_H ( SOC, SOC, Op_RegF, 9, F9->as_VMReg()->next());
133 reg_def F10 ( SOC, SOC, Op_RegF, 10, F10->as_VMReg());
134 reg_def F10_H ( SOC, SOC, Op_RegF, 10, F10->as_VMReg()->next());
135 reg_def F11 ( SOC, SOC, Op_RegF, 11, F11->as_VMReg());
136 reg_def F11_H ( SOC, SOC, Op_RegF, 11, F11->as_VMReg()->next());
137 reg_def F12 ( SOC, SOC, Op_RegF, 12, F12->as_VMReg());
138 reg_def F12_H ( SOC, SOC, Op_RegF, 12, F12->as_VMReg()->next());
139 reg_def F13 ( SOC, SOC, Op_RegF, 13, F13->as_VMReg());
140 reg_def F13_H ( SOC, SOC, Op_RegF, 13, F13->as_VMReg()->next());
141 reg_def F14 ( SOC, SOC, Op_RegF, 14, F14->as_VMReg());
142 reg_def F14_H ( SOC, SOC, Op_RegF, 14, F14->as_VMReg()->next());
143 reg_def F15 ( SOC, SOC, Op_RegF, 15, F15->as_VMReg());
144 reg_def F15_H ( SOC, SOC, Op_RegF, 15, F15->as_VMReg()->next());
145 reg_def F16 ( SOC, SOC, Op_RegF, 16, F16->as_VMReg());
146 reg_def F16_H ( SOC, SOC, Op_RegF, 16, F16->as_VMReg()->next());
147 reg_def F17 ( SOC, SOC, Op_RegF, 17, F17->as_VMReg());
148 reg_def F17_H ( SOC, SOC, Op_RegF, 17, F17->as_VMReg()->next());
149 reg_def F18 ( SOC, SOC, Op_RegF, 18, F18->as_VMReg());
150 reg_def F18_H ( SOC, SOC, Op_RegF, 18, F18->as_VMReg()->next());
151 reg_def F19 ( SOC, SOC, Op_RegF, 19, F19->as_VMReg());
152 reg_def F19_H ( SOC, SOC, Op_RegF, 19, F19->as_VMReg()->next());
153 reg_def F20 ( SOC, SOC, Op_RegF, 20, F20->as_VMReg());
154 reg_def F20_H ( SOC, SOC, Op_RegF, 20, F20->as_VMReg()->next());
155 reg_def F21 ( SOC, SOC, Op_RegF, 21, F21->as_VMReg());
156 reg_def F21_H ( SOC, SOC, Op_RegF, 21, F21->as_VMReg()->next());
157 reg_def F22 ( SOC, SOC, Op_RegF, 22, F22->as_VMReg());
158 reg_def F22_H ( SOC, SOC, Op_RegF, 22, F22->as_VMReg()->next());
159 reg_def F23 ( SOC, SOC, Op_RegF, 23, F23->as_VMReg());
160 reg_def F23_H ( SOC, SOC, Op_RegF, 23, F23->as_VMReg()->next());
161 reg_def F24 ( SOC, SOC, Op_RegF, 24, F24->as_VMReg());
162 reg_def F24_H ( SOC, SOC, Op_RegF, 24, F24->as_VMReg()->next());
163 reg_def F25 ( SOC, SOC, Op_RegF, 25, F25->as_VMReg());
164 reg_def F25_H ( SOC, SOC, Op_RegF, 25, F25->as_VMReg()->next());
165 reg_def F26 ( SOC, SOC, Op_RegF, 26, F26->as_VMReg());
166 reg_def F26_H ( SOC, SOC, Op_RegF, 26, F26->as_VMReg()->next());
167 reg_def F27 ( SOC, SOC, Op_RegF, 27, F27->as_VMReg());
168 reg_def F27_H ( SOC, SOC, Op_RegF, 27, F27->as_VMReg()->next());
169 reg_def F28 ( SOC, SOC, Op_RegF, 28, F28->as_VMReg());
170 reg_def F28_H ( SOC, SOC, Op_RegF, 28, F28->as_VMReg()->next());
171 reg_def F29 ( SOC, SOC, Op_RegF, 29, F29->as_VMReg());
172 reg_def F29_H ( SOC, SOC, Op_RegF, 29, F29->as_VMReg()->next());
173 reg_def F30 ( SOC, SOC, Op_RegF, 30, F30->as_VMReg());
174 reg_def F30_H ( SOC, SOC, Op_RegF, 30, F30->as_VMReg()->next());
175 reg_def F31 ( SOC, SOC, Op_RegF, 31, F31->as_VMReg());
176 reg_def F31_H ( SOC, SOC, Op_RegF, 31, F31->as_VMReg()->next());
179 // ----------------------------
180 // Special Registers
181 // Condition Codes Flag Registers
182 reg_def MIPS_FLAG (SOC, SOC, Op_RegFlags, 1, as_Register(1)->as_VMReg());
183 //S6 is used for get_thread(S6)
184 //S5 is uesd for heapbase of compressed oop
185 alloc_class chunk0(
186 S7, S7_H,
187 S0, S0_H,
188 S1, S1_H,
189 S2, S2_H,
190 S4, S4_H,
191 S5, S5_H,
192 S6, S6_H,
193 S3, S3_H,
194 T2, T2_H,
195 T3, T3_H,
196 T8, T8_H,
197 T9, T9_H,
198 T1, T1_H, // inline_cache_reg
199 V1, V1_H,
200 A7, A7_H,
201 A6, A6_H,
202 A5, A5_H,
203 A4, A4_H,
204 V0, V0_H,
205 A3, A3_H,
206 A2, A2_H,
207 A1, A1_H,
208 A0, A0_H,
209 T0, T0_H,
210 GP, GP_H
211 RA, RA_H,
212 SP, SP_H, // stack_pointer
213 FP, FP_H // frame_pointer
214 );
216 alloc_class chunk1( F0, F0_H,
217 F1, F1_H,
218 F2, F2_H,
219 F3, F3_H,
220 F4, F4_H,
221 F5, F5_H,
222 F6, F6_H,
223 F7, F7_H,
224 F8, F8_H,
225 F9, F9_H,
226 F10, F10_H,
227 F11, F11_H,
228 F20, F20_H,
229 F21, F21_H,
230 F22, F22_H,
231 F23, F23_H,
232 F24, F24_H,
233 F25, F25_H,
234 F26, F26_H,
235 F27, F27_H,
236 F28, F28_H,
237 F19, F19_H,
238 F18, F18_H,
239 F17, F17_H,
240 F16, F16_H,
241 F15, F15_H,
242 F14, F14_H,
243 F13, F13_H,
244 F12, F12_H,
245 F29, F29_H,
246 F30, F30_H,
247 F31, F31_H);
249 alloc_class chunk2(MIPS_FLAG);
251 reg_class s_reg( S0, S1, S2, S3, S4, S5, S6, S7 );
252 reg_class s0_reg( S0 );
253 reg_class s1_reg( S1 );
254 reg_class s2_reg( S2 );
255 reg_class s3_reg( S3 );
256 reg_class s4_reg( S4 );
257 reg_class s5_reg( S5 );
258 reg_class s6_reg( S6 );
259 reg_class s7_reg( S7 );
261 reg_class t_reg( T0, T1, T2, T3, T8, T9 );
262 reg_class t0_reg( T0 );
263 reg_class t1_reg( T1 );
264 reg_class t2_reg( T2 );
265 reg_class t3_reg( T3 );
266 reg_class t8_reg( T8 );
267 reg_class t9_reg( T9 );
269 reg_class a_reg( A0, A1, A2, A3, A4, A5, A6, A7 );
270 reg_class a0_reg( A0 );
271 reg_class a1_reg( A1 );
272 reg_class a2_reg( A2 );
273 reg_class a3_reg( A3 );
274 reg_class a4_reg( A4 );
275 reg_class a5_reg( A5 );
276 reg_class a6_reg( A6 );
277 reg_class a7_reg( A7 );
279 reg_class v0_reg( V0 );
280 reg_class v1_reg( V1 );
282 reg_class sp_reg( SP, SP_H );
283 reg_class fp_reg( FP, FP_H );
285 reg_class mips_flags(MIPS_FLAG);
287 reg_class v0_long_reg( V0, V0_H );
288 reg_class v1_long_reg( V1, V1_H );
289 reg_class a0_long_reg( A0, A0_H );
290 reg_class a1_long_reg( A1, A1_H );
291 reg_class a2_long_reg( A2, A2_H );
292 reg_class a3_long_reg( A3, A3_H );
293 reg_class a4_long_reg( A4, A4_H );
294 reg_class a5_long_reg( A5, A5_H );
295 reg_class a6_long_reg( A6, A6_H );
296 reg_class a7_long_reg( A7, A7_H );
297 reg_class t0_long_reg( T0, T0_H );
298 reg_class t1_long_reg( T1, T1_H );
299 reg_class t2_long_reg( T2, T2_H );
300 reg_class t3_long_reg( T3, T3_H );
301 reg_class t8_long_reg( T8, T8_H );
302 reg_class t9_long_reg( T9, T9_H );
303 reg_class s0_long_reg( S0, S0_H );
304 reg_class s1_long_reg( S1, S1_H );
305 reg_class s2_long_reg( S2, S2_H );
306 reg_class s3_long_reg( S3, S3_H );
307 reg_class s4_long_reg( S4, S4_H );
308 reg_class s5_long_reg( S5, S5_H );
309 reg_class s6_long_reg( S6, S6_H );
310 reg_class s7_long_reg( S7, S7_H );
312 reg_class int_reg( S7, S0, S1, S2, S4, S3, T8, T2, T3, T1, V1, A7, A6, A5, A4, V0, A3, A2, A1, A0, T0 );
314 reg_class no_Ax_int_reg( S7, S0, S1, S2, S4, S3, T8, T2, T3, T1, V1, V0, T0 );
316 reg_class p_reg(
317 S7, S7_H,
318 S0, S0_H,
319 S1, S1_H,
320 S2, S2_H,
321 S4, S4_H,
322 S3, S3_H,
323 T8, T8_H,
324 T2, T2_H,
325 T3, T3_H,
326 T1, T1_H,
327 A7, A7_H,
328 A6, A6_H,
329 A5, A5_H,
330 A4, A4_H,
331 A3, A3_H,
332 A2, A2_H,
333 A1, A1_H,
334 A0, A0_H,
335 T0, T0_H
336 );
338 reg_class no_T8_p_reg(
339 S7, S7_H,
340 S0, S0_H,
341 S1, S1_H,
342 S2, S2_H,
343 S4, S4_H,
344 S3, S3_H,
345 T2, T2_H,
346 T3, T3_H,
347 T1, T1_H,
348 A7, A7_H,
349 A6, A6_H,
350 A5, A5_H,
351 A4, A4_H,
352 A3, A3_H,
353 A2, A2_H,
354 A1, A1_H,
355 A0, A0_H,
356 T0, T0_H
357 );
359 reg_class long_reg(
360 S7, S7_H,
361 S0, S0_H,
362 S1, S1_H,
363 S2, S2_H,
364 S4, S4_H,
365 S3, S3_H,
366 T8, T8_H,
367 T2, T2_H,
368 T3, T3_H,
369 T1, T1_H,
370 A7, A7_H,
371 A6, A6_H,
372 A5, A5_H,
373 A4, A4_H,
374 A3, A3_H,
375 A2, A2_H,
376 A1, A1_H,
377 A0, A0_H,
378 T0, T0_H
379 );
382 // Floating point registers.
383 // 2012/8/23 Fu: F30/F31 are used as temporary registers in D2I
384 // 2016/12/1 aoqi: F31 are not used as temporary registers in D2I
385 reg_class flt_reg( F0, F1, F2, F3, F4, F5, F6, F7, F8, F9, F10, F11, F12, F13, F14, F15, F16, F17 F18, F19, F20, F21, F22, F23, F24, F25, F26, F27, F28, F29, F31);
386 reg_class dbl_reg( F0, F0_H,
387 F1, F1_H,
388 F2, F2_H,
389 F3, F3_H,
390 F4, F4_H,
391 F5, F5_H,
392 F6, F6_H,
393 F7, F7_H,
394 F8, F8_H,
395 F9, F9_H,
396 F10, F10_H,
397 F11, F11_H,
398 F12, F12_H,
399 F13, F13_H,
400 F14, F14_H,
401 F15, F15_H,
402 F16, F16_H,
403 F17, F17_H,
404 F18, F18_H,
405 F19, F19_H,
406 F20, F20_H,
407 F21, F21_H,
408 F22, F22_H,
409 F23, F23_H,
410 F24, F24_H,
411 F25, F25_H,
412 F26, F26_H,
413 F27, F27_H,
414 F28, F28_H,
415 F29, F29_H,
416 F31, F31_H);
418 reg_class flt_arg0( F12 );
419 reg_class dbl_arg0( F12, F12_H );
420 reg_class dbl_arg1( F14, F14_H );
422 %}
424 //----------DEFINITION BLOCK---------------------------------------------------
425 // Define name --> value mappings to inform the ADLC of an integer valued name
426 // Current support includes integer values in the range [0, 0x7FFFFFFF]
427 // Format:
428 // int_def <name> ( <int_value>, <expression>);
429 // Generated Code in ad_<arch>.hpp
430 // #define <name> (<expression>)
431 // // value == <int_value>
432 // Generated code in ad_<arch>.cpp adlc_verification()
433 // assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
434 //
435 definitions %{
436 int_def DEFAULT_COST ( 100, 100);
437 int_def HUGE_COST (1000000, 1000000);
439 // Memory refs are twice as expensive as run-of-the-mill.
440 int_def MEMORY_REF_COST ( 200, DEFAULT_COST * 2);
442 // Branches are even more expensive.
443 int_def BRANCH_COST ( 300, DEFAULT_COST * 3);
444 // we use jr instruction to construct call, so more expensive
445 // by yjl 2/28/2006
446 int_def CALL_COST ( 500, DEFAULT_COST * 5);
447 /*
448 int_def EQUAL ( 1, 1 );
449 int_def NOT_EQUAL ( 2, 2 );
450 int_def GREATER ( 3, 3 );
451 int_def GREATER_EQUAL ( 4, 4 );
452 int_def LESS ( 5, 5 );
453 int_def LESS_EQUAL ( 6, 6 );
454 */
455 %}
459 //----------SOURCE BLOCK-------------------------------------------------------
460 // This is a block of C++ code which provides values, functions, and
461 // definitions necessary in the rest of the architecture description
463 source_hpp %{
464 // Header information of the source block.
465 // Method declarations/definitions which are used outside
466 // the ad-scope can conveniently be defined here.
467 //
468 // To keep related declarations/definitions/uses close together,
469 // we switch between source %{ }% and source_hpp %{ }% freely as needed.
471 class CallStubImpl {
473 //--------------------------------------------------------------
474 //---< Used for optimization in Compile::shorten_branches >---
475 //--------------------------------------------------------------
477 public:
478 // Size of call trampoline stub.
479 static uint size_call_trampoline() {
480 return 0; // no call trampolines on this platform
481 }
483 // number of relocations needed by a call trampoline stub
484 static uint reloc_call_trampoline() {
485 return 0; // no call trampolines on this platform
486 }
487 };
489 class HandlerImpl {
491 public:
493 static int emit_exception_handler(CodeBuffer &cbuf);
494 static int emit_deopt_handler(CodeBuffer& cbuf);
496 static uint size_exception_handler() {
497 // NativeCall instruction size is the same as NativeJump.
498 // exception handler starts out as jump and can be patched to
499 // a call be deoptimization. (4932387)
500 // Note that this value is also credited (in output.cpp) to
501 // the size of the code section.
502 // return NativeJump::instruction_size;
503 int size = NativeCall::instruction_size;
504 return round_to(size, 16);
505 }
507 #ifdef _LP64
508 static uint size_deopt_handler() {
509 int size = NativeCall::instruction_size;
510 return round_to(size, 16);
511 }
512 #else
513 static uint size_deopt_handler() {
514 // NativeCall instruction size is the same as NativeJump.
515 // exception handler starts out as jump and can be patched to
516 // a call be deoptimization. (4932387)
517 // Note that this value is also credited (in output.cpp) to
518 // the size of the code section.
519 return 5 + NativeJump::instruction_size; // pushl(); jmp;
520 }
521 #endif
522 };
524 %} // end source_hpp
526 source %{
528 #define NO_INDEX 0
529 #define RELOC_IMM64 Assembler::imm_operand
530 #define RELOC_DISP32 Assembler::disp32_operand
533 #define __ _masm.
536 // Emit exception handler code.
537 // Stuff framesize into a register and call a VM stub routine.
538 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf) {
539 // Note that the code buffer's insts_mark is always relative to insts.
540 // That's why we must use the macroassembler to generate a handler.
541 MacroAssembler _masm(&cbuf);
542 address base =
543 __ start_a_stub(size_exception_handler());
544 if (base == NULL) return 0; // CodeBuffer::expand failed
545 int offset = __ offset();
547 __ block_comment("; emit_exception_handler");
549 cbuf.set_insts_mark();
550 __ relocate(relocInfo::runtime_call_type);
551 __ patchable_jump((address)OptoRuntime::exception_blob()->entry_point());
552 __ align(16);
553 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
554 __ end_a_stub();
555 return offset;
556 }
558 // Emit deopt handler code.
559 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) {
560 // Note that the code buffer's insts_mark is always relative to insts.
561 // That's why we must use the macroassembler to generate a handler.
562 MacroAssembler _masm(&cbuf);
563 address base =
564 __ start_a_stub(size_deopt_handler());
566 // FIXME
567 if (base == NULL) return 0; // CodeBuffer::expand failed
568 int offset = __ offset();
570 __ block_comment("; emit_deopt_handler");
572 cbuf.set_insts_mark();
573 __ relocate(relocInfo::runtime_call_type);
574 __ patchable_call(SharedRuntime::deopt_blob()->unpack());
575 __ align(16);
576 assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
577 __ end_a_stub();
578 return offset;
579 }
582 const bool Matcher::match_rule_supported(int opcode) {
583 if (!has_match_rule(opcode))
584 return false;
586 switch (opcode) {
587 //Op_CountLeadingZerosI Op_CountLeadingZerosL can be deleted, all MIPS CPUs support clz & dclz.
588 case Op_CountLeadingZerosI:
589 case Op_CountLeadingZerosL:
590 if (!UseCountLeadingZerosInstruction)
591 return false;
592 break;
593 case Op_CountTrailingZerosI:
594 case Op_CountTrailingZerosL:
595 if (!UseCountTrailingZerosInstruction)
596 return false;
597 break;
598 }
600 return true; // Per default match rules are supported.
601 }
603 //FIXME
604 // emit call stub, compiled java to interpreter
605 void emit_java_to_interp(CodeBuffer &cbuf ) {
606 // Stub is fixed up when the corresponding call is converted from calling
607 // compiled code to calling interpreted code.
608 // mov rbx,0
609 // jmp -1
611 address mark = cbuf.insts_mark(); // get mark within main instrs section
613 // Note that the code buffer's insts_mark is always relative to insts.
614 // That's why we must use the macroassembler to generate a stub.
615 MacroAssembler _masm(&cbuf);
617 address base =
618 __ start_a_stub(Compile::MAX_stubs_size);
619 if (base == NULL) return; // CodeBuffer::expand failed
620 // static stub relocation stores the instruction address of the call
622 __ relocate(static_stub_Relocation::spec(mark), 0);
624 // static stub relocation also tags the methodOop in the code-stream.
625 __ patchable_set48(S3, (long)0);
626 // This is recognized as unresolved by relocs/nativeInst/ic code
628 __ relocate(relocInfo::runtime_call_type);
630 cbuf.set_insts_mark();
631 address call_pc = (address)-1;
632 __ patchable_jump(call_pc);
633 __ align(16);
634 __ end_a_stub();
635 // Update current stubs pointer and restore code_end.
636 }
638 // size of call stub, compiled java to interpretor
639 uint size_java_to_interp() {
640 int size = 4 * 4 + NativeCall::instruction_size; // sizeof(li48) + NativeCall::instruction_size
641 return round_to(size, 16);
642 }
644 // relocation entries for call stub, compiled java to interpreter
645 uint reloc_java_to_interp() {
646 return 16; // in emit_java_to_interp + in Java_Static_Call
647 }
649 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
650 if( Assembler::is_simm16(offset) ) return true;
651 else {
652 assert(false, "Not implemented yet !" );
653 Unimplemented();
654 }
655 }
658 // No additional cost for CMOVL.
659 const int Matcher::long_cmove_cost() { return 0; }
661 // No CMOVF/CMOVD with SSE2
662 const int Matcher::float_cmove_cost() { return ConditionalMoveLimit; }
664 // Does the CPU require late expand (see block.cpp for description of late expand)?
665 const bool Matcher::require_postalloc_expand = false;
667 // Should the Matcher clone shifts on addressing modes, expecting them
668 // to be subsumed into complex addressing expressions or compute them
669 // into registers? True for Intel but false for most RISCs
670 const bool Matcher::clone_shift_expressions = false;
672 // Do we need to mask the count passed to shift instructions or does
673 // the cpu only look at the lower 5/6 bits anyway?
674 const bool Matcher::need_masked_shift_count = false;
676 bool Matcher::narrow_oop_use_complex_address() {
677 NOT_LP64(ShouldNotCallThis());
678 assert(UseCompressedOops, "only for compressed oops code");
679 return false;
680 }
682 bool Matcher::narrow_klass_use_complex_address() {
683 NOT_LP64(ShouldNotCallThis());
684 assert(UseCompressedClassPointers, "only for compressed klass code");
685 return false;
686 }
688 // This is UltraSparc specific, true just means we have fast l2f conversion
689 const bool Matcher::convL2FSupported(void) {
690 return true;
691 }
693 // Max vector size in bytes. 0 if not supported.
694 const int Matcher::vector_width_in_bytes(BasicType bt) {
695 if (MaxVectorSize == 0)
696 return 0;
697 assert(MaxVectorSize == 8, "");
698 return 8;
699 }
701 // Vector ideal reg
702 const int Matcher::vector_ideal_reg(int size) {
703 assert(MaxVectorSize == 8, "");
704 switch(size) {
705 case 8: return Op_VecD;
706 }
707 ShouldNotReachHere();
708 return 0;
709 }
711 // Only lowest bits of xmm reg are used for vector shift count.
712 const int Matcher::vector_shift_count_ideal_reg(int size) {
713 fatal("vector shift is not supported");
714 return Node::NotAMachineReg;
715 }
717 // Limits on vector size (number of elements) loaded into vector.
718 const int Matcher::max_vector_size(const BasicType bt) {
719 assert(is_java_primitive(bt), "only primitive type vectors");
720 return vector_width_in_bytes(bt)/type2aelembytes(bt);
721 }
723 const int Matcher::min_vector_size(const BasicType bt) {
724 return max_vector_size(bt); // Same as max.
725 }
727 // MIPS supports misaligned vectors store/load? FIXME
728 const bool Matcher::misaligned_vectors_ok() {
729 return false;
730 //return !AlignVector; // can be changed by flag
731 }
733 // Register for DIVI projection of divmodI
734 RegMask Matcher::divI_proj_mask() {
735 ShouldNotReachHere();
736 return RegMask();
737 }
739 // Register for MODI projection of divmodI
740 RegMask Matcher::modI_proj_mask() {
741 ShouldNotReachHere();
742 return RegMask();
743 }
745 // Register for DIVL projection of divmodL
746 RegMask Matcher::divL_proj_mask() {
747 ShouldNotReachHere();
748 return RegMask();
749 }
751 int Matcher::regnum_to_fpu_offset(int regnum) {
752 return regnum - 32; // The FP registers are in the second chunk
753 }
756 const bool Matcher::isSimpleConstant64(jlong value) {
757 // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
758 return true;
759 }
762 // Return whether or not this register is ever used as an argument. This
763 // function is used on startup to build the trampoline stubs in generateOptoStub.
764 // Registers not mentioned will be killed by the VM call in the trampoline, and
765 // arguments in those registers not be available to the callee.
766 bool Matcher::can_be_java_arg( int reg ) {
767 /* Refer to: [sharedRuntime_mips_64.cpp] SharedRuntime::java_calling_convention() */
768 if ( reg == T0_num || reg == T0_H_num
769 || reg == A0_num || reg == A0_H_num
770 || reg == A1_num || reg == A1_H_num
771 || reg == A2_num || reg == A2_H_num
772 || reg == A3_num || reg == A3_H_num
773 || reg == A4_num || reg == A4_H_num
774 || reg == A5_num || reg == A5_H_num
775 || reg == A6_num || reg == A6_H_num
776 || reg == A7_num || reg == A7_H_num )
777 return true;
779 if ( reg == F12_num || reg == F12_H_num
780 || reg == F13_num || reg == F13_H_num
781 || reg == F14_num || reg == F14_H_num
782 || reg == F15_num || reg == F15_H_num
783 || reg == F16_num || reg == F16_H_num
784 || reg == F17_num || reg == F17_H_num
785 || reg == F18_num || reg == F18_H_num
786 || reg == F19_num || reg == F19_H_num )
787 return true;
789 return false;
790 }
792 bool Matcher::is_spillable_arg( int reg ) {
793 return can_be_java_arg(reg);
794 }
796 bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) {
797 return false;
798 }
800 // Register for MODL projection of divmodL
801 RegMask Matcher::modL_proj_mask() {
802 ShouldNotReachHere();
803 return RegMask();
804 }
806 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
807 return FP_REG_mask();
808 }
810 // MIPS doesn't support AES intrinsics
811 const bool Matcher::pass_original_key_for_aes() {
812 return false;
813 }
815 int CallLeafNoFPDirectNode::compute_padding(int current_offset) const {
816 //lui
817 //ori
818 //dsll
819 //ori
821 //jalr
822 //nop
824 return round_to(current_offset, alignment_required()) - current_offset;
825 }
827 int CallLeafDirectNode::compute_padding(int current_offset) const {
828 //lui
829 //ori
830 //dsll
831 //ori
833 //jalr
834 //nop
836 return round_to(current_offset, alignment_required()) - current_offset;
837 }
839 int CallRuntimeDirectNode::compute_padding(int current_offset) const {
840 //lui
841 //ori
842 //dsll
843 //ori
845 //jalr
846 //nop
848 return round_to(current_offset, alignment_required()) - current_offset;
849 }
851 // If CPU can load and store mis-aligned doubles directly then no fixup is
852 // needed. Else we split the double into 2 integer pieces and move it
853 // piece-by-piece. Only happens when passing doubles into C code as the
854 // Java calling convention forces doubles to be aligned.
855 const bool Matcher::misaligned_doubles_ok = false;
856 // Do floats take an entire double register or just half?
857 //const bool Matcher::float_in_double = true;
858 bool Matcher::float_in_double() { return false; }
859 // Threshold size for cleararray.
860 const int Matcher::init_array_short_size = 8 * BytesPerLong;
861 // Do ints take an entire long register or just half?
862 const bool Matcher::int_in_long = true;
863 // Is it better to copy float constants, or load them directly from memory?
864 // Intel can load a float constant from a direct address, requiring no
865 // extra registers. Most RISCs will have to materialize an address into a
866 // register first, so they would do better to copy the constant from stack.
867 const bool Matcher::rematerialize_float_constants = false;
868 // Advertise here if the CPU requires explicit rounding operations
869 // to implement the UseStrictFP mode.
870 const bool Matcher::strict_fp_requires_explicit_rounding = false;
871 // The ecx parameter to rep stos for the ClearArray node is in dwords.
872 const bool Matcher::init_array_count_is_in_bytes = false;
875 // Indicate if the safepoint node needs the polling page as an input.
876 // Since MIPS doesn't have absolute addressing, it needs.
877 bool SafePointNode::needs_polling_address_input() {
878 return false;
879 }
881 // !!!!! Special hack to get all type of calls to specify the byte offset
882 // from the start of the call to the point where the return address
883 // will point.
884 int MachCallStaticJavaNode::ret_addr_offset() {
885 //lui
886 //ori
887 //nop
888 //nop
889 //jalr
890 //nop
891 return 24;
892 }
894 int MachCallDynamicJavaNode::ret_addr_offset() {
895 //lui IC_Klass,
896 //ori IC_Klass,
897 //dsll IC_Klass
898 //ori IC_Klass
900 //lui T9
901 //ori T9
902 //nop
903 //nop
904 //jalr T9
905 //nop
906 return 4 * 4 + 4 * 6;
907 }
909 //=============================================================================
911 // Figure out which register class each belongs in: rc_int, rc_float, rc_stack
912 enum RC { rc_bad, rc_int, rc_float, rc_stack };
913 static enum RC rc_class( OptoReg::Name reg ) {
914 if( !OptoReg::is_valid(reg) ) return rc_bad;
915 if (OptoReg::is_stack(reg)) return rc_stack;
916 VMReg r = OptoReg::as_VMReg(reg);
917 if (r->is_Register()) return rc_int;
918 assert(r->is_FloatRegister(), "must be");
919 return rc_float;
920 }
922 uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream* st ) const {
923 // Get registers to move
924 OptoReg::Name src_second = ra_->get_reg_second(in(1));
925 OptoReg::Name src_first = ra_->get_reg_first(in(1));
926 OptoReg::Name dst_second = ra_->get_reg_second(this );
927 OptoReg::Name dst_first = ra_->get_reg_first(this );
929 enum RC src_second_rc = rc_class(src_second);
930 enum RC src_first_rc = rc_class(src_first);
931 enum RC dst_second_rc = rc_class(dst_second);
932 enum RC dst_first_rc = rc_class(dst_first);
934 assert(OptoReg::is_valid(src_first) && OptoReg::is_valid(dst_first), "must move at least 1 register" );
936 // Generate spill code!
937 int size = 0;
939 if( src_first == dst_first && src_second == dst_second )
940 return 0; // Self copy, no move
942 if (src_first_rc == rc_stack) {
943 // mem ->
944 if (dst_first_rc == rc_stack) {
945 // mem -> mem
946 assert(src_second != dst_first, "overlap");
947 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
948 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
949 // 64-bit
950 int src_offset = ra_->reg2offset(src_first);
951 int dst_offset = ra_->reg2offset(dst_first);
952 if (cbuf) {
953 MacroAssembler _masm(cbuf);
954 __ ld(AT, Address(SP, src_offset));
955 __ sd(AT, Address(SP, dst_offset));
956 #ifndef PRODUCT
957 } else {
958 if(!do_size){
959 if (size != 0) st->print("\n\t");
960 st->print("ld AT, [SP + #%d]\t# 64-bit mem-mem spill 1\n\t"
961 "sd AT, [SP + #%d]",
962 src_offset, dst_offset);
963 }
964 #endif
965 }
966 size += 8;
967 } else {
968 // 32-bit
969 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
970 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
971 // No pushl/popl, so:
972 int src_offset = ra_->reg2offset(src_first);
973 int dst_offset = ra_->reg2offset(dst_first);
974 if (cbuf) {
975 MacroAssembler _masm(cbuf);
976 __ lw(AT, Address(SP, src_offset));
977 __ sw(AT, Address(SP, dst_offset));
978 #ifndef PRODUCT
979 } else {
980 if(!do_size){
981 if (size != 0) st->print("\n\t");
982 st->print("lw AT, [SP + #%d] spill 2\n\t"
983 "sw AT, [SP + #%d]\n\t",
984 src_offset, dst_offset);
985 }
986 #endif
987 }
988 size += 8;
989 }
990 return size;
991 } else if (dst_first_rc == rc_int) {
992 // mem -> gpr
993 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
994 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
995 // 64-bit
996 int offset = ra_->reg2offset(src_first);
997 if (cbuf) {
998 MacroAssembler _masm(cbuf);
999 __ ld(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1000 #ifndef PRODUCT
1001 } else {
1002 if(!do_size){
1003 if (size != 0) st->print("\n\t");
1004 st->print("ld %s, [SP + #%d]\t# spill 3",
1005 Matcher::regName[dst_first],
1006 offset);
1007 }
1008 #endif
1009 }
1010 size += 4;
1011 } else {
1012 // 32-bit
1013 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1014 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1015 int offset = ra_->reg2offset(src_first);
1016 if (cbuf) {
1017 MacroAssembler _masm(cbuf);
1018 if (this->ideal_reg() == Op_RegI)
1019 __ lw(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1020 else
1021 __ lwu(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1022 #ifndef PRODUCT
1023 } else {
1024 if(!do_size){
1025 if (size != 0) st->print("\n\t");
1026 if (this->ideal_reg() == Op_RegI)
1027 st->print("lw %s, [SP + #%d]\t# spill 4",
1028 Matcher::regName[dst_first],
1029 offset);
1030 else
1031 st->print("lwu %s, [SP + #%d]\t# spill 5",
1032 Matcher::regName[dst_first],
1033 offset);
1034 }
1035 #endif
1036 }
1037 size += 4;
1038 }
1039 return size;
1040 } else if (dst_first_rc == rc_float) {
1041 // mem-> xmm
1042 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1043 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1044 // 64-bit
1045 int offset = ra_->reg2offset(src_first);
1046 if (cbuf) {
1047 MacroAssembler _masm(cbuf);
1048 __ ldc1( as_FloatRegister(Matcher::_regEncode[dst_first]), Address(SP, offset));
1049 #ifndef PRODUCT
1050 } else {
1051 if(!do_size){
1052 if (size != 0) st->print("\n\t");
1053 st->print("ldc1 %s, [SP + #%d]\t# spill 6",
1054 Matcher::regName[dst_first],
1055 offset);
1056 }
1057 #endif
1058 }
1059 size += 4;
1060 } else {
1061 // 32-bit
1062 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1063 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1064 int offset = ra_->reg2offset(src_first);
1065 if (cbuf) {
1066 MacroAssembler _masm(cbuf);
1067 __ lwc1( as_FloatRegister(Matcher::_regEncode[dst_first]), Address(SP, offset));
1068 #ifndef PRODUCT
1069 } else {
1070 if(!do_size){
1071 if (size != 0) st->print("\n\t");
1072 st->print("lwc1 %s, [SP + #%d]\t# spill 7",
1073 Matcher::regName[dst_first],
1074 offset);
1075 }
1076 #endif
1077 }
1078 size += 4;
1079 }
1080 return size;
1081 }
1082 } else if (src_first_rc == rc_int) {
1083 // gpr ->
1084 if (dst_first_rc == rc_stack) {
1085 // gpr -> mem
1086 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1087 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1088 // 64-bit
1089 int offset = ra_->reg2offset(dst_first);
1090 if (cbuf) {
1091 MacroAssembler _masm(cbuf);
1092 __ sd(as_Register(Matcher::_regEncode[src_first]), Address(SP, offset));
1093 #ifndef PRODUCT
1094 } else {
1095 if(!do_size){
1096 if (size != 0) st->print("\n\t");
1097 st->print("sd %s, [SP + #%d] # spill 8",
1098 Matcher::regName[src_first],
1099 offset);
1100 }
1101 #endif
1102 }
1103 size += 4;
1104 } else {
1105 // 32-bit
1106 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1107 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1108 int offset = ra_->reg2offset(dst_first);
1109 if (cbuf) {
1110 MacroAssembler _masm(cbuf);
1111 __ sw(as_Register(Matcher::_regEncode[src_first]), Address(SP, offset));
1112 #ifndef PRODUCT
1113 } else {
1114 if(!do_size){
1115 if (size != 0) st->print("\n\t");
1116 st->print("sw %s, [SP + #%d]\t# spill 9",
1117 Matcher::regName[src_first], offset);
1118 }
1119 #endif
1120 }
1121 size += 4;
1122 }
1123 return size;
1124 } else if (dst_first_rc == rc_int) {
1125 // gpr -> gpr
1126 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1127 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1128 // 64-bit
1129 if (cbuf) {
1130 MacroAssembler _masm(cbuf);
1131 __ move(as_Register(Matcher::_regEncode[dst_first]),
1132 as_Register(Matcher::_regEncode[src_first]));
1133 #ifndef PRODUCT
1134 } else {
1135 if(!do_size){
1136 if (size != 0) st->print("\n\t");
1137 st->print("move(64bit) %s <-- %s\t# spill 10",
1138 Matcher::regName[dst_first],
1139 Matcher::regName[src_first]);
1140 }
1141 #endif
1142 }
1143 size += 4;
1144 return size;
1145 } else {
1146 // 32-bit
1147 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1148 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1149 if (cbuf) {
1150 MacroAssembler _masm(cbuf);
1151 if (this->ideal_reg() == Op_RegI)
1152 __ move_u32(as_Register(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first]));
1153 else
1154 __ daddu(as_Register(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first]), R0);
1156 #ifndef PRODUCT
1157 } else {
1158 if(!do_size){
1159 if (size != 0) st->print("\n\t");
1160 st->print("move(32-bit) %s <-- %s\t# spill 11",
1161 Matcher::regName[dst_first],
1162 Matcher::regName[src_first]);
1163 }
1164 #endif
1165 }
1166 size += 4;
1167 return size;
1168 }
1169 } else if (dst_first_rc == rc_float) {
1170 // gpr -> xmm
1171 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1172 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1173 // 64-bit
1174 if (cbuf) {
1175 MacroAssembler _masm(cbuf);
1176 __ dmtc1(as_Register(Matcher::_regEncode[src_first]), as_FloatRegister(Matcher::_regEncode[dst_first]));
1177 #ifndef PRODUCT
1178 } else {
1179 if(!do_size){
1180 if (size != 0) st->print("\n\t");
1181 st->print("dmtc1 %s, %s\t# spill 12",
1182 Matcher::regName[dst_first],
1183 Matcher::regName[src_first]);
1184 }
1185 #endif
1186 }
1187 size += 4;
1188 } else {
1189 // 32-bit
1190 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1191 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1192 if (cbuf) {
1193 MacroAssembler _masm(cbuf);
1194 __ mtc1( as_Register(Matcher::_regEncode[src_first]), as_FloatRegister(Matcher::_regEncode[dst_first]) );
1195 #ifndef PRODUCT
1196 } else {
1197 if(!do_size){
1198 if (size != 0) st->print("\n\t");
1199 st->print("mtc1 %s, %s\t# spill 13",
1200 Matcher::regName[dst_first],
1201 Matcher::regName[src_first]);
1202 }
1203 #endif
1204 }
1205 size += 4;
1206 }
1207 return size;
1208 }
1209 } else if (src_first_rc == rc_float) {
1210 // xmm ->
1211 if (dst_first_rc == rc_stack) {
1212 // xmm -> mem
1213 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1214 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1215 // 64-bit
1216 int offset = ra_->reg2offset(dst_first);
1217 if (cbuf) {
1218 MacroAssembler _masm(cbuf);
1219 __ sdc1( as_FloatRegister(Matcher::_regEncode[src_first]), Address(SP, offset) );
1220 #ifndef PRODUCT
1221 } else {
1222 if(!do_size){
1223 if (size != 0) st->print("\n\t");
1224 st->print("sdc1 %s, [SP + #%d]\t# spill 14",
1225 Matcher::regName[src_first],
1226 offset);
1227 }
1228 #endif
1229 }
1230 size += 4;
1231 } else {
1232 // 32-bit
1233 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1234 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1235 int offset = ra_->reg2offset(dst_first);
1236 if (cbuf) {
1237 MacroAssembler _masm(cbuf);
1238 __ swc1(as_FloatRegister(Matcher::_regEncode[src_first]), Address(SP, offset));
1239 #ifndef PRODUCT
1240 } else {
1241 if(!do_size){
1242 if (size != 0) st->print("\n\t");
1243 st->print("swc1 %s, [SP + #%d]\t# spill 15",
1244 Matcher::regName[src_first],
1245 offset);
1246 }
1247 #endif
1248 }
1249 size += 4;
1250 }
1251 return size;
1252 } else if (dst_first_rc == rc_int) {
1253 // xmm -> gpr
1254 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1255 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1256 // 64-bit
1257 if (cbuf) {
1258 MacroAssembler _masm(cbuf);
1259 __ dmfc1( as_Register(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1260 #ifndef PRODUCT
1261 } else {
1262 if(!do_size){
1263 if (size != 0) st->print("\n\t");
1264 st->print("dmfc1 %s, %s\t# spill 16",
1265 Matcher::regName[dst_first],
1266 Matcher::regName[src_first]);
1267 }
1268 #endif
1269 }
1270 size += 4;
1271 } else {
1272 // 32-bit
1273 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1274 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1275 if (cbuf) {
1276 MacroAssembler _masm(cbuf);
1277 __ mfc1( as_Register(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1278 #ifndef PRODUCT
1279 } else {
1280 if(!do_size){
1281 if (size != 0) st->print("\n\t");
1282 st->print("mfc1 %s, %s\t# spill 17",
1283 Matcher::regName[dst_first],
1284 Matcher::regName[src_first]);
1285 }
1286 #endif
1287 }
1288 size += 4;
1289 }
1290 return size;
1291 } else if (dst_first_rc == rc_float) {
1292 // xmm -> xmm
1293 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1294 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1295 // 64-bit
1296 if (cbuf) {
1297 MacroAssembler _masm(cbuf);
1298 __ mov_d( as_FloatRegister(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1299 #ifndef PRODUCT
1300 } else {
1301 if(!do_size){
1302 if (size != 0) st->print("\n\t");
1303 st->print("mov_d %s <-- %s\t# spill 18",
1304 Matcher::regName[dst_first],
1305 Matcher::regName[src_first]);
1306 }
1307 #endif
1308 }
1309 size += 4;
1310 } else {
1311 // 32-bit
1312 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1313 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1314 if (cbuf) {
1315 MacroAssembler _masm(cbuf);
1316 __ mov_s( as_FloatRegister(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1317 #ifndef PRODUCT
1318 } else {
1319 if(!do_size){
1320 if (size != 0) st->print("\n\t");
1321 st->print("mov_s %s <-- %s\t# spill 19",
1322 Matcher::regName[dst_first],
1323 Matcher::regName[src_first]);
1324 }
1325 #endif
1326 }
1327 size += 4;
1328 }
1329 return size;
1330 }
1331 }
1333 assert(0," foo ");
1334 Unimplemented();
1335 return size;
1337 }
1339 #ifndef PRODUCT
1340 void MachSpillCopyNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1341 implementation( NULL, ra_, false, st );
1342 }
1343 #endif
1345 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1346 implementation( &cbuf, ra_, false, NULL );
1347 }
1349 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
1350 return implementation( NULL, ra_, true, NULL );
1351 }
1353 //=============================================================================
1354 #
1356 #ifndef PRODUCT
1357 void MachBreakpointNode::format( PhaseRegAlloc *, outputStream* st ) const {
1358 st->print("INT3");
1359 }
1360 #endif
1362 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc* ra_) const {
1363 MacroAssembler _masm(&cbuf);
1364 __ int3();
1365 }
1367 uint MachBreakpointNode::size(PhaseRegAlloc* ra_) const {
1368 return MachNode::size(ra_);
1369 }
1372 //=============================================================================
1373 #ifndef PRODUCT
1374 void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1375 Compile *C = ra_->C;
1376 int framesize = C->frame_size_in_bytes();
1378 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1380 st->print("daddiu SP, SP, %d # Rlease stack @ MachEpilogNode",framesize);
1381 st->cr(); st->print("\t");
1382 if (UseLoongsonISA) {
1383 st->print("gslq RA, FP, SP, %d # Restore FP & RA @ MachEpilogNode", -wordSize*2);
1384 } else {
1385 st->print("ld RA, SP, %d # Restore RA @ MachEpilogNode", -wordSize);
1386 st->cr(); st->print("\t");
1387 st->print("ld FP, SP, %d # Restore FP @ MachEpilogNode", -wordSize*2);
1388 }
1390 if( do_polling() && C->is_method_compilation() ) {
1391 st->print("Poll Safepoint # MachEpilogNode");
1392 }
1393 }
1394 #endif
1396 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1397 Compile *C = ra_->C;
1398 MacroAssembler _masm(&cbuf);
1399 int framesize = C->frame_size_in_bytes();
1401 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1403 __ daddiu(SP, SP, framesize);
1405 if (UseLoongsonISA) {
1406 __ gslq(RA, FP, SP, -wordSize*2);
1407 } else {
1408 __ ld(RA, SP, -wordSize );
1409 __ ld(FP, SP, -wordSize*2 );
1410 }
1412 if( do_polling() && C->is_method_compilation() ) {
1413 __ set64(AT, (long)os::get_polling_page());
1414 __ relocate(relocInfo::poll_return_type);
1415 __ lw(AT, AT, 0);
1416 }
1417 }
1419 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
1420 return MachNode::size(ra_); // too many variables; just compute it the hard way fujie debug
1421 }
1423 int MachEpilogNode::reloc() const {
1424 return 0; // a large enough number
1425 }
1427 const Pipeline * MachEpilogNode::pipeline() const {
1428 return MachNode::pipeline_class();
1429 }
1431 int MachEpilogNode::safepoint_offset() const { return 0; }
1433 //=============================================================================
1435 #ifndef PRODUCT
1436 void BoxLockNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1437 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1438 int reg = ra_->get_reg_first(this);
1439 st->print("ADDI %s, SP, %d @BoxLockNode",Matcher::regName[reg],offset);
1440 }
1441 #endif
1444 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
1445 return 4;
1446 }
1448 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1449 MacroAssembler _masm(&cbuf);
1450 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1451 int reg = ra_->get_encode(this);
1453 __ addi(as_Register(reg), SP, offset);
1454 /*
1455 if( offset >= 128 ) {
1456 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
1457 emit_rm(cbuf, 0x2, reg, 0x04);
1458 emit_rm(cbuf, 0x0, 0x04, SP_enc);
1459 emit_d32(cbuf, offset);
1460 }
1461 else {
1462 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
1463 emit_rm(cbuf, 0x1, reg, 0x04);
1464 emit_rm(cbuf, 0x0, 0x04, SP_enc);
1465 emit_d8(cbuf, offset);
1466 }
1467 */
1468 }
1471 //static int sizeof_FFree_Float_Stack_All = -1;
1473 int MachCallRuntimeNode::ret_addr_offset() {
1474 //lui
1475 //ori
1476 //dsll
1477 //ori
1478 //jalr
1479 //nop
1480 assert(NativeCall::instruction_size == 24, "in MachCallRuntimeNode::ret_addr_offset()");
1481 return NativeCall::instruction_size;
1482 // return 16;
1483 }
1489 //=============================================================================
1490 #ifndef PRODUCT
1491 void MachNopNode::format( PhaseRegAlloc *, outputStream* st ) const {
1492 st->print("NOP \t# %d bytes pad for loops and calls", 4 * _count);
1493 }
1494 #endif
1496 void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc * ) const {
1497 MacroAssembler _masm(&cbuf);
1498 int i = 0;
1499 for(i = 0; i < _count; i++)
1500 __ nop();
1501 }
1503 uint MachNopNode::size(PhaseRegAlloc *) const {
1504 return 4 * _count;
1505 }
1506 const Pipeline* MachNopNode::pipeline() const {
1507 return MachNode::pipeline_class();
1508 }
1510 //=============================================================================
1512 //=============================================================================
1513 #ifndef PRODUCT
1514 void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1515 st->print_cr("load_klass(T9, T0)");
1516 st->print_cr("\tbeq(T9, iCache, L)");
1517 st->print_cr("\tnop");
1518 st->print_cr("\tjmp(SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type)");
1519 st->print_cr("\tnop");
1520 st->print_cr("\tnop");
1521 st->print_cr(" L:");
1522 }
1523 #endif
1526 void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1527 MacroAssembler _masm(&cbuf);
1528 #ifdef ASSERT
1529 //uint code_size = cbuf.code_size();
1530 #endif
1531 int ic_reg = Matcher::inline_cache_reg_encode();
1532 Label L;
1533 Register receiver = T0;
1534 Register iCache = as_Register(ic_reg);
1535 __ load_klass(T9, receiver);
1536 __ beq(T9, iCache, L);
1537 __ nop();
1539 __ relocate(relocInfo::runtime_call_type);
1540 __ patchable_jump((address)SharedRuntime::get_ic_miss_stub());
1542 /* WARNING these NOPs are critical so that verified entry point is properly
1543 * 8 bytes aligned for patching by NativeJump::patch_verified_entry() */
1544 __ align(CodeEntryAlignment);
1545 __ bind(L);
1546 }
1548 uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
1549 return MachNode::size(ra_);
1550 }
1554 //=============================================================================
1556 const RegMask& MachConstantBaseNode::_out_RegMask = P_REG_mask();
1558 int Compile::ConstantTable::calculate_table_base_offset() const {
1559 return 0; // absolute addressing, no offset
1560 }
1562 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
1563 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
1564 ShouldNotReachHere();
1565 }
1567 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
1568 Compile* C = ra_->C;
1569 Compile::ConstantTable& constant_table = C->constant_table();
1570 MacroAssembler _masm(&cbuf);
1572 Register Rtoc = as_Register(ra_->get_encode(this));
1573 CodeSection* consts_section = __ code()->consts();
1574 int consts_size = consts_section->align_at_start(consts_section->size());
1575 assert(constant_table.size() == consts_size, "must be equal");
1577 if (consts_section->size()) {
1578 // Materialize the constant table base.
1579 address baseaddr = consts_section->start() + -(constant_table.table_base_offset());
1580 // RelocationHolder rspec = internal_word_Relocation::spec(baseaddr);
1581 __ relocate(relocInfo::internal_pc_type);
1582 __ patchable_set48(Rtoc, (long)baseaddr);
1583 }
1584 }
1586 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
1587 // patchable_set48 (4 insts)
1588 return 4 * 4;
1589 }
1591 #ifndef PRODUCT
1592 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
1593 Register r = as_Register(ra_->get_encode(this));
1594 st->print("patchable_set48 %s, &constanttable (constant table base) @ MachConstantBaseNode", r->name());
1595 }
1596 #endif
1599 //=============================================================================
1600 #ifndef PRODUCT
1601 void MachPrologNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1602 Compile* C = ra_->C;
1604 int framesize = C->frame_size_in_bytes();
1605 int bangsize = C->bang_size_in_bytes();
1606 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1608 // Calls to C2R adapters often do not accept exceptional returns.
1609 // We require that their callers must bang for them. But be careful, because
1610 // some VM calls (such as call site linkage) can use several kilobytes of
1611 // stack. But the stack safety zone should account for that.
1612 // See bugs 4446381, 4468289, 4497237.
1613 if (C->need_stack_bang(bangsize)) {
1614 st->print_cr("# stack bang"); st->print("\t");
1615 }
1616 if (UseLoongsonISA) {
1617 st->print("gssq RA, FP, %d(SP) @ MachPrologNode\n\t", -wordSize*2);
1618 } else {
1619 st->print("sd RA, %d(SP) @ MachPrologNode\n\t", -wordSize);
1620 st->print("sd FP, %d(SP) @ MachPrologNode\n\t", -wordSize*2);
1621 }
1622 st->print("daddiu FP, SP, -%d \n\t", wordSize*2);
1623 st->print("daddiu SP, SP, -%d \t",framesize);
1624 }
1625 #endif
1628 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1629 Compile* C = ra_->C;
1630 MacroAssembler _masm(&cbuf);
1632 int framesize = C->frame_size_in_bytes();
1633 int bangsize = C->bang_size_in_bytes();
1635 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1637 if (C->need_stack_bang(bangsize)) {
1638 __ generate_stack_overflow_check(bangsize);
1639 }
1641 if (UseLoongsonISA) {
1642 __ gssq(RA, FP, SP, -wordSize*2);
1643 } else {
1644 __ sd(RA, SP, -wordSize);
1645 __ sd(FP, SP, -wordSize*2);
1646 }
1647 __ daddiu(FP, SP, -wordSize*2);
1648 __ daddiu(SP, SP, -framesize);
1649 __ nop(); /* 2013.10.22 Jin: Make enough room for patch_verified_entry() */
1650 __ nop();
1652 C->set_frame_complete(cbuf.insts_size());
1653 if (C->has_mach_constant_base_node()) {
1654 // NOTE: We set the table base offset here because users might be
1655 // emitted before MachConstantBaseNode.
1656 Compile::ConstantTable& constant_table = C->constant_table();
1657 constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
1658 }
1660 }
1663 uint MachPrologNode::size(PhaseRegAlloc *ra_) const {
1664 return MachNode::size(ra_); // too many variables; just compute it the hard way
1665 }
1667 int MachPrologNode::reloc() const {
1668 return 0; // a large enough number
1669 }
1671 %}
1673 //----------ENCODING BLOCK-----------------------------------------------------
1674 // This block specifies the encoding classes used by the compiler to output
1675 // byte streams. Encoding classes generate functions which are called by
1676 // Machine Instruction Nodes in order to generate the bit encoding of the
1677 // instruction. Operands specify their base encoding interface with the
1678 // interface keyword. There are currently supported four interfaces,
1679 // REG_INTER, CONST_INTER, MEMORY_INTER, & COND_INTER. REG_INTER causes an
1680 // operand to generate a function which returns its register number when
1681 // queried. CONST_INTER causes an operand to generate a function which
1682 // returns the value of the constant when queried. MEMORY_INTER causes an
1683 // operand to generate four functions which return the Base Register, the
1684 // Index Register, the Scale Value, and the Offset Value of the operand when
1685 // queried. COND_INTER causes an operand to generate six functions which
1686 // return the encoding code (ie - encoding bits for the instruction)
1687 // associated with each basic boolean condition for a conditional instruction.
1688 // Instructions specify two basic values for encoding. They use the
1689 // ins_encode keyword to specify their encoding class (which must be one of
1690 // the class names specified in the encoding block), and they use the
1691 // opcode keyword to specify, in order, their primary, secondary, and
1692 // tertiary opcode. Only the opcode sections which a particular instruction
1693 // needs for encoding need to be specified.
1694 encode %{
1696 //Load byte signed
1697 enc_class load_B_enc (mRegI dst, memory mem) %{
1698 MacroAssembler _masm(&cbuf);
1699 int dst = $dst$$reg;
1700 int base = $mem$$base;
1701 int index = $mem$$index;
1702 int scale = $mem$$scale;
1703 int disp = $mem$$disp;
1705 if( index != 0 ) {
1706 if( Assembler::is_simm16(disp) ) {
1707 if( UseLoongsonISA ) {
1708 if (scale == 0) {
1709 __ gslbx(as_Register(dst), as_Register(base), as_Register(index), disp);
1710 } else {
1711 __ dsll(AT, as_Register(index), scale);
1712 __ gslbx(as_Register(dst), as_Register(base), AT, disp);
1713 }
1714 } else {
1715 if (scale == 0) {
1716 __ addu(AT, as_Register(base), as_Register(index));
1717 } else {
1718 __ dsll(AT, as_Register(index), scale);
1719 __ addu(AT, as_Register(base), AT);
1720 }
1721 __ lb(as_Register(dst), AT, disp);
1722 }
1723 } else {
1724 if (scale == 0) {
1725 __ addu(AT, as_Register(base), as_Register(index));
1726 } else {
1727 __ dsll(AT, as_Register(index), scale);
1728 __ addu(AT, as_Register(base), AT);
1729 }
1730 __ move(T9, disp);
1731 if( UseLoongsonISA ) {
1732 __ gslbx(as_Register(dst), AT, T9, 0);
1733 } else {
1734 __ addu(AT, AT, T9);
1735 __ lb(as_Register(dst), AT, 0);
1736 }
1737 }
1738 } else {
1739 if( Assembler::is_simm16(disp) ) {
1740 __ lb(as_Register(dst), as_Register(base), disp);
1741 } else {
1742 __ move(T9, disp);
1743 if( UseLoongsonISA ) {
1744 __ gslbx(as_Register(dst), as_Register(base), T9, 0);
1745 } else {
1746 __ addu(AT, as_Register(base), T9);
1747 __ lb(as_Register(dst), AT, 0);
1748 }
1749 }
1750 }
1751 %}
1753 //Load byte unsigned
1754 enc_class load_UB_enc (mRegI dst, memory mem) %{
1755 MacroAssembler _masm(&cbuf);
1756 int dst = $dst$$reg;
1757 int base = $mem$$base;
1758 int index = $mem$$index;
1759 int scale = $mem$$scale;
1760 int disp = $mem$$disp;
1762 if( index != 0 ) {
1763 if (scale == 0) {
1764 __ daddu(AT, as_Register(base), as_Register(index));
1765 } else {
1766 __ dsll(AT, as_Register(index), scale);
1767 __ daddu(AT, as_Register(base), AT);
1768 }
1769 if( Assembler::is_simm16(disp) ) {
1770 __ lbu(as_Register(dst), AT, disp);
1771 } else {
1772 __ move(T9, disp);
1773 __ daddu(AT, AT, T9);
1774 __ lbu(as_Register(dst), AT, 0);
1775 }
1776 } else {
1777 if( Assembler::is_simm16(disp) ) {
1778 __ lbu(as_Register(dst), as_Register(base), disp);
1779 } else {
1780 __ move(T9, disp);
1781 __ daddu(AT, as_Register(base), T9);
1782 __ lbu(as_Register(dst), AT, 0);
1783 }
1784 }
1785 %}
1787 enc_class store_B_reg_enc (memory mem, mRegI src) %{
1788 MacroAssembler _masm(&cbuf);
1789 int src = $src$$reg;
1790 int base = $mem$$base;
1791 int index = $mem$$index;
1792 int scale = $mem$$scale;
1793 int disp = $mem$$disp;
1795 if( index != 0 ) {
1796 if (scale == 0) {
1797 if( Assembler::is_simm(disp, 8) ) {
1798 if (UseLoongsonISA) {
1799 __ gssbx(as_Register(src), as_Register(base), as_Register(index), disp);
1800 } else {
1801 __ addu(AT, as_Register(base), as_Register(index));
1802 __ sb(as_Register(src), AT, disp);
1803 }
1804 } else if( Assembler::is_simm16(disp) ) {
1805 __ addu(AT, as_Register(base), as_Register(index));
1806 __ sb(as_Register(src), AT, disp);
1807 } else {
1808 __ addu(AT, as_Register(base), as_Register(index));
1809 __ move(T9, disp);
1810 if (UseLoongsonISA) {
1811 __ gssbx(as_Register(src), AT, T9, 0);
1812 } else {
1813 __ addu(AT, AT, T9);
1814 __ sb(as_Register(src), AT, 0);
1815 }
1816 }
1817 } else {
1818 __ dsll(AT, as_Register(index), scale);
1819 if( Assembler::is_simm(disp, 8) ) {
1820 if (UseLoongsonISA) {
1821 __ gssbx(as_Register(src), AT, as_Register(base), disp);
1822 } else {
1823 __ addu(AT, as_Register(base), AT);
1824 __ sb(as_Register(src), AT, disp);
1825 }
1826 } else if( Assembler::is_simm16(disp) ) {
1827 __ addu(AT, as_Register(base), AT);
1828 __ sb(as_Register(src), AT, disp);
1829 } else {
1830 __ addu(AT, as_Register(base), AT);
1831 __ move(T9, disp);
1832 if (UseLoongsonISA) {
1833 __ gssbx(as_Register(src), AT, T9, 0);
1834 } else {
1835 __ addu(AT, AT, T9);
1836 __ sb(as_Register(src), AT, 0);
1837 }
1838 }
1839 }
1840 } else {
1841 if( Assembler::is_simm16(disp) ) {
1842 __ sb(as_Register(src), as_Register(base), disp);
1843 } else {
1844 __ move(T9, disp);
1845 if (UseLoongsonISA) {
1846 __ gssbx(as_Register(src), as_Register(base), T9, 0);
1847 } else {
1848 __ addu(AT, as_Register(base), T9);
1849 __ sb(as_Register(src), AT, 0);
1850 }
1851 }
1852 }
1853 %}
1855 enc_class store_B_immI_enc (memory mem, immI8 src) %{
1856 MacroAssembler _masm(&cbuf);
1857 int base = $mem$$base;
1858 int index = $mem$$index;
1859 int scale = $mem$$scale;
1860 int disp = $mem$$disp;
1861 int value = $src$$constant;
1863 if( index != 0 ) {
1864 if (!UseLoongsonISA) {
1865 if (scale == 0) {
1866 __ daddu(AT, as_Register(base), as_Register(index));
1867 } else {
1868 __ dsll(AT, as_Register(index), scale);
1869 __ daddu(AT, as_Register(base), AT);
1870 }
1871 if( Assembler::is_simm16(disp) ) {
1872 if (value == 0) {
1873 __ sb(R0, AT, disp);
1874 } else {
1875 __ move(T9, value);
1876 __ sb(T9, AT, disp);
1877 }
1878 } else {
1879 if (value == 0) {
1880 __ move(T9, disp);
1881 __ daddu(AT, AT, T9);
1882 __ sb(R0, AT, 0);
1883 } else {
1884 __ move(T9, disp);
1885 __ daddu(AT, AT, T9);
1886 __ move(T9, value);
1887 __ sb(T9, AT, 0);
1888 }
1889 }
1890 } else {
1892 if (scale == 0) {
1893 if( Assembler::is_simm(disp, 8) ) {
1894 if (value == 0) {
1895 __ gssbx(R0, as_Register(base), as_Register(index), disp);
1896 } else {
1897 __ move(T9, value);
1898 __ gssbx(T9, as_Register(base), as_Register(index), disp);
1899 }
1900 } else if( Assembler::is_simm16(disp) ) {
1901 __ daddu(AT, as_Register(base), as_Register(index));
1902 if (value == 0) {
1903 __ sb(R0, AT, disp);
1904 } else {
1905 __ move(T9, value);
1906 __ sb(T9, AT, disp);
1907 }
1908 } else {
1909 if (value == 0) {
1910 __ daddu(AT, as_Register(base), as_Register(index));
1911 __ move(T9, disp);
1912 __ gssbx(R0, AT, T9, 0);
1913 } else {
1914 __ move(AT, disp);
1915 __ move(T9, value);
1916 __ daddu(AT, as_Register(base), AT);
1917 __ gssbx(T9, AT, as_Register(index), 0);
1918 }
1919 }
1921 } else {
1923 if( Assembler::is_simm(disp, 8) ) {
1924 __ dsll(AT, as_Register(index), scale);
1925 if (value == 0) {
1926 __ gssbx(R0, as_Register(base), AT, disp);
1927 } else {
1928 __ move(T9, value);
1929 __ gssbx(T9, as_Register(base), AT, disp);
1930 }
1931 } else if( Assembler::is_simm16(disp) ) {
1932 __ dsll(AT, as_Register(index), scale);
1933 __ daddu(AT, as_Register(base), AT);
1934 if (value == 0) {
1935 __ sb(R0, AT, disp);
1936 } else {
1937 __ move(T9, value);
1938 __ sb(T9, AT, disp);
1939 }
1940 } else {
1941 __ dsll(AT, as_Register(index), scale);
1942 if (value == 0) {
1943 __ daddu(AT, as_Register(base), AT);
1944 __ move(T9, disp);
1945 __ gssbx(R0, AT, T9, 0);
1946 } else {
1947 __ move(T9, disp);
1948 __ daddu(AT, AT, T9);
1949 __ move(T9, value);
1950 __ gssbx(T9, as_Register(base), AT, 0);
1951 }
1952 }
1953 }
1954 }
1955 } else {
1956 if( Assembler::is_simm16(disp) ) {
1957 if (value == 0) {
1958 __ sb(R0, as_Register(base), disp);
1959 } else {
1960 __ move(AT, value);
1961 __ sb(AT, as_Register(base), disp);
1962 }
1963 } else {
1964 if (value == 0) {
1965 __ move(T9, disp);
1966 if (UseLoongsonISA) {
1967 __ gssbx(R0, as_Register(base), T9, 0);
1968 } else {
1969 __ daddu(AT, as_Register(base), T9);
1970 __ sb(R0, AT, 0);
1971 }
1972 } else {
1973 __ move(T9, disp);
1974 if (UseLoongsonISA) {
1975 __ move(AT, value);
1976 __ gssbx(AT, as_Register(base), T9, 0);
1977 } else {
1978 __ daddu(AT, as_Register(base), T9);
1979 __ move(T9, value);
1980 __ sb(T9, AT, 0);
1981 }
1982 }
1983 }
1984 }
1985 %}
1988 enc_class store_B_immI_enc_sync (memory mem, immI8 src) %{
1989 MacroAssembler _masm(&cbuf);
1990 int base = $mem$$base;
1991 int index = $mem$$index;
1992 int scale = $mem$$scale;
1993 int disp = $mem$$disp;
1994 int value = $src$$constant;
1996 if( index != 0 ) {
1997 if ( UseLoongsonISA ) {
1998 if ( Assembler::is_simm(disp,8) ) {
1999 if ( scale == 0 ) {
2000 if ( value == 0 ) {
2001 __ gssbx(R0, as_Register(base), as_Register(index), disp);
2002 } else {
2003 __ move(AT, value);
2004 __ gssbx(AT, as_Register(base), as_Register(index), disp);
2005 }
2006 } else {
2007 __ dsll(AT, as_Register(index), scale);
2008 if ( value == 0 ) {
2009 __ gssbx(R0, as_Register(base), AT, disp);
2010 } else {
2011 __ move(T9, value);
2012 __ gssbx(T9, as_Register(base), AT, disp);
2013 }
2014 }
2015 } else if ( Assembler::is_simm16(disp) ) {
2016 if ( scale == 0 ) {
2017 __ daddu(AT, as_Register(base), as_Register(index));
2018 if ( value == 0 ){
2019 __ sb(R0, AT, disp);
2020 } else {
2021 __ move(T9, value);
2022 __ sb(T9, AT, disp);
2023 }
2024 } else {
2025 __ dsll(AT, as_Register(index), scale);
2026 __ daddu(AT, as_Register(base), AT);
2027 if ( value == 0 ) {
2028 __ sb(R0, AT, disp);
2029 } else {
2030 __ move(T9, value);
2031 __ sb(T9, AT, disp);
2032 }
2033 }
2034 } else {
2035 if ( scale == 0 ) {
2036 __ move(AT, disp);
2037 __ daddu(AT, as_Register(index), AT);
2038 if ( value == 0 ) {
2039 __ gssbx(R0, as_Register(base), AT, 0);
2040 } else {
2041 __ move(T9, value);
2042 __ gssbx(T9, as_Register(base), AT, 0);
2043 }
2044 } else {
2045 __ dsll(AT, as_Register(index), scale);
2046 __ move(T9, disp);
2047 __ daddu(AT, AT, T9);
2048 if ( value == 0 ) {
2049 __ gssbx(R0, as_Register(base), AT, 0);
2050 } else {
2051 __ move(T9, value);
2052 __ gssbx(T9, as_Register(base), AT, 0);
2053 }
2054 }
2055 }
2056 } else { //not use loongson isa
2057 if (scale == 0) {
2058 __ daddu(AT, as_Register(base), as_Register(index));
2059 } else {
2060 __ dsll(AT, as_Register(index), scale);
2061 __ daddu(AT, as_Register(base), AT);
2062 }
2063 if( Assembler::is_simm16(disp) ) {
2064 if (value == 0) {
2065 __ sb(R0, AT, disp);
2066 } else {
2067 __ move(T9, value);
2068 __ sb(T9, AT, disp);
2069 }
2070 } else {
2071 if (value == 0) {
2072 __ move(T9, disp);
2073 __ daddu(AT, AT, T9);
2074 __ sb(R0, AT, 0);
2075 } else {
2076 __ move(T9, disp);
2077 __ daddu(AT, AT, T9);
2078 __ move(T9, value);
2079 __ sb(T9, AT, 0);
2080 }
2081 }
2082 }
2083 } else {
2084 if ( UseLoongsonISA ){
2085 if ( Assembler::is_simm16(disp) ){
2086 if ( value == 0 ) {
2087 __ sb(R0, as_Register(base), disp);
2088 } else {
2089 __ move(AT, value);
2090 __ sb(AT, as_Register(base), disp);
2091 }
2092 } else {
2093 __ move(AT, disp);
2094 if ( value == 0 ) {
2095 __ gssbx(R0, as_Register(base), AT, 0);
2096 } else {
2097 __ move(T9, value);
2098 __ gssbx(T9, as_Register(base), AT, 0);
2099 }
2100 }
2101 } else {
2102 if( Assembler::is_simm16(disp) ) {
2103 if (value == 0) {
2104 __ sb(R0, as_Register(base), disp);
2105 } else {
2106 __ move(AT, value);
2107 __ sb(AT, as_Register(base), disp);
2108 }
2109 } else {
2110 if (value == 0) {
2111 __ move(T9, disp);
2112 __ daddu(AT, as_Register(base), T9);
2113 __ sb(R0, AT, 0);
2114 } else {
2115 __ move(T9, disp);
2116 __ daddu(AT, as_Register(base), T9);
2117 __ move(T9, value);
2118 __ sb(T9, AT, 0);
2119 }
2120 }
2121 }
2122 }
2124 __ sync();
2125 %}
2127 // Load Short (16bit signed)
2128 enc_class load_S_enc (mRegI dst, memory mem) %{
2129 MacroAssembler _masm(&cbuf);
2130 int dst = $dst$$reg;
2131 int base = $mem$$base;
2132 int index = $mem$$index;
2133 int scale = $mem$$scale;
2134 int disp = $mem$$disp;
2136 if( index != 0 ) {
2137 if ( UseLoongsonISA ) {
2138 if ( Assembler::is_simm(disp, 8) ) {
2139 if (scale == 0) {
2140 __ gslhx(as_Register(dst), as_Register(base), as_Register(index), disp);
2141 } else {
2142 __ dsll(AT, as_Register(index), scale);
2143 __ gslhx(as_Register(dst), as_Register(base), AT, disp);
2144 }
2145 } else if ( Assembler::is_simm16(disp) ) {
2146 if (scale == 0) {
2147 __ daddu(AT, as_Register(base), as_Register(index));
2148 __ lh(as_Register(dst), AT, disp);
2149 } else {
2150 __ dsll(AT, as_Register(index), scale);
2151 __ daddu(AT, as_Register(base), AT);
2152 __ lh(as_Register(dst), AT, disp);
2153 }
2154 } else {
2155 if (scale == 0) {
2156 __ move(AT, disp);
2157 __ daddu(AT, as_Register(index), AT);
2158 __ gslhx(as_Register(dst), as_Register(base), AT, 0);
2159 } else {
2160 __ dsll(AT, as_Register(index), scale);
2161 __ move(T9, disp);
2162 __ daddu(AT, AT, T9);
2163 __ gslhx(as_Register(dst), as_Register(base), AT, 0);
2164 }
2165 }
2166 } else { // not use loongson isa
2167 if (scale == 0) {
2168 __ daddu(AT, as_Register(base), as_Register(index));
2169 } else {
2170 __ dsll(AT, as_Register(index), scale);
2171 __ daddu(AT, as_Register(base), AT);
2172 }
2173 if( Assembler::is_simm16(disp) ) {
2174 __ lh(as_Register(dst), AT, disp);
2175 } else {
2176 __ move(T9, disp);
2177 __ daddu(AT, AT, T9);
2178 __ lh(as_Register(dst), AT, 0);
2179 }
2180 }
2181 } else { // index is 0
2182 if ( UseLoongsonISA ) {
2183 if ( Assembler::is_simm16(disp) ) {
2184 __ lh(as_Register(dst), as_Register(base), disp);
2185 } else {
2186 __ move(T9, disp);
2187 __ gslhx(as_Register(dst), as_Register(base), T9, 0);
2188 }
2189 } else { //not use loongson isa
2190 if( Assembler::is_simm16(disp) ) {
2191 __ lh(as_Register(dst), as_Register(base), disp);
2192 } else {
2193 __ move(T9, disp);
2194 __ daddu(AT, as_Register(base), T9);
2195 __ lh(as_Register(dst), AT, 0);
2196 }
2197 }
2198 }
2199 %}
2201 // Load Char (16bit unsigned)
2202 enc_class load_C_enc (mRegI dst, memory mem) %{
2203 MacroAssembler _masm(&cbuf);
2204 int dst = $dst$$reg;
2205 int base = $mem$$base;
2206 int index = $mem$$index;
2207 int scale = $mem$$scale;
2208 int disp = $mem$$disp;
2210 if( index != 0 ) {
2211 if (scale == 0) {
2212 __ daddu(AT, as_Register(base), as_Register(index));
2213 } else {
2214 __ dsll(AT, as_Register(index), scale);
2215 __ daddu(AT, as_Register(base), AT);
2216 }
2217 if( Assembler::is_simm16(disp) ) {
2218 __ lhu(as_Register(dst), AT, disp);
2219 } else {
2220 __ move(T9, disp);
2221 __ addu(AT, AT, T9);
2222 __ lhu(as_Register(dst), AT, 0);
2223 }
2224 } else {
2225 if( Assembler::is_simm16(disp) ) {
2226 __ lhu(as_Register(dst), as_Register(base), disp);
2227 } else {
2228 __ move(T9, disp);
2229 __ daddu(AT, as_Register(base), T9);
2230 __ lhu(as_Register(dst), AT, 0);
2231 }
2232 }
2233 %}
2235 // Store Char (16bit unsigned)
2236 enc_class store_C_reg_enc (memory mem, mRegI src) %{
2237 MacroAssembler _masm(&cbuf);
2238 int src = $src$$reg;
2239 int base = $mem$$base;
2240 int index = $mem$$index;
2241 int scale = $mem$$scale;
2242 int disp = $mem$$disp;
2244 if( index != 0 ) {
2245 if( Assembler::is_simm16(disp) ) {
2246 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2247 if (scale == 0) {
2248 __ gsshx(as_Register(src), as_Register(base), as_Register(index), disp);
2249 } else {
2250 __ dsll(AT, as_Register(index), scale);
2251 __ gsshx(as_Register(src), as_Register(base), AT, disp);
2252 }
2253 } else {
2254 if (scale == 0) {
2255 __ addu(AT, as_Register(base), as_Register(index));
2256 } else {
2257 __ dsll(AT, as_Register(index), scale);
2258 __ addu(AT, as_Register(base), AT);
2259 }
2260 __ sh(as_Register(src), AT, disp);
2261 }
2262 } else {
2263 if (scale == 0) {
2264 __ addu(AT, as_Register(base), as_Register(index));
2265 } else {
2266 __ dsll(AT, as_Register(index), scale);
2267 __ addu(AT, as_Register(base), AT);
2268 }
2269 __ move(T9, disp);
2270 if( UseLoongsonISA ) {
2271 __ gsshx(as_Register(src), AT, T9, 0);
2272 } else {
2273 __ addu(AT, AT, T9);
2274 __ sh(as_Register(src), AT, 0);
2275 }
2276 }
2277 } else {
2278 if( Assembler::is_simm16(disp) ) {
2279 __ sh(as_Register(src), as_Register(base), disp);
2280 } else {
2281 __ move(T9, disp);
2282 if( UseLoongsonISA ) {
2283 __ gsshx(as_Register(src), as_Register(base), T9, 0);
2284 } else {
2285 __ addu(AT, as_Register(base), T9);
2286 __ sh(as_Register(src), AT, 0);
2287 }
2288 }
2289 }
2290 %}
2292 enc_class store_C0_enc (memory mem) %{
2293 MacroAssembler _masm(&cbuf);
2294 int base = $mem$$base;
2295 int index = $mem$$index;
2296 int scale = $mem$$scale;
2297 int disp = $mem$$disp;
2299 if( index != 0 ) {
2300 if( Assembler::is_simm16(disp) ) {
2301 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2302 if (scale == 0) {
2303 __ gsshx(R0, as_Register(base), as_Register(index), disp);
2304 } else {
2305 __ dsll(AT, as_Register(index), scale);
2306 __ gsshx(R0, as_Register(base), AT, disp);
2307 }
2308 } else {
2309 if (scale == 0) {
2310 __ addu(AT, as_Register(base), as_Register(index));
2311 } else {
2312 __ dsll(AT, as_Register(index), scale);
2313 __ addu(AT, as_Register(base), AT);
2314 }
2315 __ sh(R0, AT, disp);
2316 }
2317 } else {
2318 if (scale == 0) {
2319 __ addu(AT, as_Register(base), as_Register(index));
2320 } else {
2321 __ dsll(AT, as_Register(index), scale);
2322 __ addu(AT, as_Register(base), AT);
2323 }
2324 __ move(T9, disp);
2325 if( UseLoongsonISA ) {
2326 __ gsshx(R0, AT, T9, 0);
2327 } else {
2328 __ addu(AT, AT, T9);
2329 __ sh(R0, AT, 0);
2330 }
2331 }
2332 } else {
2333 if( Assembler::is_simm16(disp) ) {
2334 __ sh(R0, as_Register(base), disp);
2335 } else {
2336 __ move(T9, disp);
2337 if( UseLoongsonISA ) {
2338 __ gsshx(R0, as_Register(base), T9, 0);
2339 } else {
2340 __ addu(AT, as_Register(base), T9);
2341 __ sh(R0, AT, 0);
2342 }
2343 }
2344 }
2345 %}
2347 enc_class load_I_enc (mRegI dst, memory mem) %{
2348 MacroAssembler _masm(&cbuf);
2349 int dst = $dst$$reg;
2350 int base = $mem$$base;
2351 int index = $mem$$index;
2352 int scale = $mem$$scale;
2353 int disp = $mem$$disp;
2355 if( index != 0 ) {
2356 if( Assembler::is_simm16(disp) ) {
2357 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2358 if (scale == 0) {
2359 __ gslwx(as_Register(dst), as_Register(base), as_Register(index), disp);
2360 } else {
2361 __ dsll(AT, as_Register(index), scale);
2362 __ gslwx(as_Register(dst), as_Register(base), AT, disp);
2363 }
2364 } else {
2365 if (scale == 0) {
2366 __ addu(AT, as_Register(base), as_Register(index));
2367 } else {
2368 __ dsll(AT, as_Register(index), scale);
2369 __ addu(AT, as_Register(base), AT);
2370 }
2371 __ lw(as_Register(dst), AT, disp);
2372 }
2373 } else {
2374 if (scale == 0) {
2375 __ addu(AT, as_Register(base), as_Register(index));
2376 } else {
2377 __ dsll(AT, as_Register(index), scale);
2378 __ addu(AT, as_Register(base), AT);
2379 }
2380 __ move(T9, disp);
2381 if( UseLoongsonISA ) {
2382 __ gslwx(as_Register(dst), AT, T9, 0);
2383 } else {
2384 __ addu(AT, AT, T9);
2385 __ lw(as_Register(dst), AT, 0);
2386 }
2387 }
2388 } else {
2389 if( Assembler::is_simm16(disp) ) {
2390 __ lw(as_Register(dst), as_Register(base), disp);
2391 } else {
2392 __ move(T9, disp);
2393 if( UseLoongsonISA ) {
2394 __ gslwx(as_Register(dst), as_Register(base), T9, 0);
2395 } else {
2396 __ addu(AT, as_Register(base), T9);
2397 __ lw(as_Register(dst), AT, 0);
2398 }
2399 }
2400 }
2401 %}
2403 enc_class store_I_reg_enc (memory mem, mRegI src) %{
2404 MacroAssembler _masm(&cbuf);
2405 int src = $src$$reg;
2406 int base = $mem$$base;
2407 int index = $mem$$index;
2408 int scale = $mem$$scale;
2409 int disp = $mem$$disp;
2411 if( index != 0 ) {
2412 if( Assembler::is_simm16(disp) ) {
2413 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2414 if (scale == 0) {
2415 __ gsswx(as_Register(src), as_Register(base), as_Register(index), disp);
2416 } else {
2417 __ dsll(AT, as_Register(index), scale);
2418 __ gsswx(as_Register(src), as_Register(base), AT, disp);
2419 }
2420 } else {
2421 if (scale == 0) {
2422 __ addu(AT, as_Register(base), as_Register(index));
2423 } else {
2424 __ dsll(AT, as_Register(index), scale);
2425 __ addu(AT, as_Register(base), AT);
2426 }
2427 __ sw(as_Register(src), AT, disp);
2428 }
2429 } else {
2430 if (scale == 0) {
2431 __ addu(AT, as_Register(base), as_Register(index));
2432 } else {
2433 __ dsll(AT, as_Register(index), scale);
2434 __ addu(AT, as_Register(base), AT);
2435 }
2436 __ move(T9, disp);
2437 if( UseLoongsonISA ) {
2438 __ gsswx(as_Register(src), AT, T9, 0);
2439 } else {
2440 __ addu(AT, AT, T9);
2441 __ sw(as_Register(src), AT, 0);
2442 }
2443 }
2444 } else {
2445 if( Assembler::is_simm16(disp) ) {
2446 __ sw(as_Register(src), as_Register(base), disp);
2447 } else {
2448 __ move(T9, disp);
2449 if( UseLoongsonISA ) {
2450 __ gsswx(as_Register(src), as_Register(base), T9, 0);
2451 } else {
2452 __ addu(AT, as_Register(base), T9);
2453 __ sw(as_Register(src), AT, 0);
2454 }
2455 }
2456 }
2457 %}
2459 enc_class store_I_immI_enc (memory mem, immI src) %{
2460 MacroAssembler _masm(&cbuf);
2461 int base = $mem$$base;
2462 int index = $mem$$index;
2463 int scale = $mem$$scale;
2464 int disp = $mem$$disp;
2465 int value = $src$$constant;
2467 if( index != 0 ) {
2468 if ( UseLoongsonISA ) {
2469 if ( Assembler::is_simm(disp, 8) ) {
2470 if ( scale == 0 ) {
2471 if ( value == 0 ) {
2472 __ gsswx(R0, as_Register(base), as_Register(index), disp);
2473 } else {
2474 __ move(T9, value);
2475 __ gsswx(T9, as_Register(base), as_Register(index), disp);
2476 }
2477 } else {
2478 __ dsll(AT, as_Register(index), scale);
2479 if ( value == 0 ) {
2480 __ gsswx(R0, as_Register(base), AT, disp);
2481 } else {
2482 __ move(T9, value);
2483 __ gsswx(T9, as_Register(base), AT, disp);
2484 }
2485 }
2486 } else if ( Assembler::is_simm16(disp) ) {
2487 if ( scale == 0 ) {
2488 __ daddu(AT, as_Register(base), as_Register(index));
2489 if ( value == 0 ) {
2490 __ sw(R0, AT, disp);
2491 } else {
2492 __ move(T9, value);
2493 __ sw(T9, AT, disp);
2494 }
2495 } else {
2496 __ dsll(AT, as_Register(index), scale);
2497 __ daddu(AT, as_Register(base), AT);
2498 if ( value == 0 ) {
2499 __ sw(R0, AT, disp);
2500 } else {
2501 __ move(T9, value);
2502 __ sw(T9, AT, disp);
2503 }
2504 }
2505 } else {
2506 if ( scale == 0 ) {
2507 __ move(T9, disp);
2508 __ daddu(AT, as_Register(index), T9);
2509 if ( value ==0 ) {
2510 __ gsswx(R0, as_Register(base), AT, 0);
2511 } else {
2512 __ move(T9, value);
2513 __ gsswx(T9, as_Register(base), AT, 0);
2514 }
2515 } else {
2516 __ dsll(AT, as_Register(index), scale);
2517 __ move(T9, disp);
2518 __ daddu(AT, AT, T9);
2519 if ( value == 0 ) {
2520 __ gsswx(R0, as_Register(base), AT, 0);
2521 } else {
2522 __ move(T9, value);
2523 __ gsswx(T9, as_Register(base), AT, 0);
2524 }
2525 }
2526 }
2527 } else { //not use loongson isa
2528 if (scale == 0) {
2529 __ daddu(AT, as_Register(base), as_Register(index));
2530 } else {
2531 __ dsll(AT, as_Register(index), scale);
2532 __ daddu(AT, as_Register(base), AT);
2533 }
2534 if( Assembler::is_simm16(disp) ) {
2535 if (value == 0) {
2536 __ sw(R0, AT, disp);
2537 } else {
2538 __ move(T9, value);
2539 __ sw(T9, AT, disp);
2540 }
2541 } else {
2542 if (value == 0) {
2543 __ move(T9, disp);
2544 __ daddu(AT, AT, T9);
2545 __ sw(R0, AT, 0);
2546 } else {
2547 __ move(T9, disp);
2548 __ daddu(AT, AT, T9);
2549 __ move(T9, value);
2550 __ sw(T9, AT, 0);
2551 }
2552 }
2553 }
2554 } else {
2555 if ( UseLoongsonISA ) {
2556 if ( Assembler::is_simm16(disp) ) {
2557 if ( value == 0 ) {
2558 __ sw(R0, as_Register(base), disp);
2559 } else {
2560 __ move(AT, value);
2561 __ sw(AT, as_Register(base), disp);
2562 }
2563 } else {
2564 __ move(T9, disp);
2565 if ( value == 0 ) {
2566 __ gsswx(R0, as_Register(base), T9, 0);
2567 } else {
2568 __ move(AT, value);
2569 __ gsswx(AT, as_Register(base), T9, 0);
2570 }
2571 }
2572 } else {
2573 if( Assembler::is_simm16(disp) ) {
2574 if (value == 0) {
2575 __ sw(R0, as_Register(base), disp);
2576 } else {
2577 __ move(AT, value);
2578 __ sw(AT, as_Register(base), disp);
2579 }
2580 } else {
2581 if (value == 0) {
2582 __ move(T9, disp);
2583 __ daddu(AT, as_Register(base), T9);
2584 __ sw(R0, AT, 0);
2585 } else {
2586 __ move(T9, disp);
2587 __ daddu(AT, as_Register(base), T9);
2588 __ move(T9, value);
2589 __ sw(T9, AT, 0);
2590 }
2591 }
2592 }
2593 }
2594 %}
2596 enc_class load_N_enc (mRegN dst, memory mem) %{
2597 MacroAssembler _masm(&cbuf);
2598 int dst = $dst$$reg;
2599 int base = $mem$$base;
2600 int index = $mem$$index;
2601 int scale = $mem$$scale;
2602 int disp = $mem$$disp;
2603 relocInfo::relocType disp_reloc = $mem->disp_reloc();
2604 assert(disp_reloc == relocInfo::none, "cannot have disp");
2606 if( index != 0 ) {
2607 if (scale == 0) {
2608 __ daddu(AT, as_Register(base), as_Register(index));
2609 } else {
2610 __ dsll(AT, as_Register(index), scale);
2611 __ daddu(AT, as_Register(base), AT);
2612 }
2613 if( Assembler::is_simm16(disp) ) {
2614 __ lwu(as_Register(dst), AT, disp);
2615 } else {
2616 __ set64(T9, disp);
2617 __ daddu(AT, AT, T9);
2618 __ lwu(as_Register(dst), AT, 0);
2619 }
2620 } else {
2621 if( Assembler::is_simm16(disp) ) {
2622 __ lwu(as_Register(dst), as_Register(base), disp);
2623 } else {
2624 __ set64(T9, disp);
2625 __ daddu(AT, as_Register(base), T9);
2626 __ lwu(as_Register(dst), AT, 0);
2627 }
2628 }
2630 %}
2633 enc_class load_P_enc (mRegP dst, memory mem) %{
2634 MacroAssembler _masm(&cbuf);
2635 int dst = $dst$$reg;
2636 int base = $mem$$base;
2637 int index = $mem$$index;
2638 int scale = $mem$$scale;
2639 int disp = $mem$$disp;
2640 relocInfo::relocType disp_reloc = $mem->disp_reloc();
2641 assert(disp_reloc == relocInfo::none, "cannot have disp");
2643 if( index != 0 ) {
2644 if ( UseLoongsonISA ) {
2645 if ( Assembler::is_simm(disp, 8) ) {
2646 if ( scale != 0 ) {
2647 __ dsll(AT, as_Register(index), scale);
2648 __ gsldx(as_Register(dst), as_Register(base), AT, disp);
2649 } else {
2650 __ gsldx(as_Register(dst), as_Register(base), as_Register(index), disp);
2651 }
2652 } else if ( Assembler::is_simm16(disp) ){
2653 if ( scale != 0 ) {
2654 __ dsll(AT, as_Register(index), scale);
2655 __ daddu(AT, AT, as_Register(base));
2656 } else {
2657 __ daddu(AT, as_Register(index), as_Register(base));
2658 }
2659 __ ld(as_Register(dst), AT, disp);
2660 } else {
2661 if ( scale != 0 ) {
2662 __ dsll(AT, as_Register(index), scale);
2663 __ move(T9, disp);
2664 __ daddu(AT, AT, T9);
2665 } else {
2666 __ move(T9, disp);
2667 __ daddu(AT, as_Register(index), T9);
2668 }
2669 __ gsldx(as_Register(dst), as_Register(base), AT, 0);
2670 }
2671 } else { //not use loongson isa
2672 if (scale == 0) {
2673 __ daddu(AT, as_Register(base), as_Register(index));
2674 } else {
2675 __ dsll(AT, as_Register(index), scale);
2676 __ daddu(AT, as_Register(base), AT);
2677 }
2678 if( Assembler::is_simm16(disp) ) {
2679 __ ld(as_Register(dst), AT, disp);
2680 } else {
2681 __ set64(T9, disp);
2682 __ daddu(AT, AT, T9);
2683 __ ld(as_Register(dst), AT, 0);
2684 }
2685 }
2686 } else {
2687 if ( UseLoongsonISA ) {
2688 if ( Assembler::is_simm16(disp) ){
2689 __ ld(as_Register(dst), as_Register(base), disp);
2690 } else {
2691 __ set64(T9, disp);
2692 __ gsldx(as_Register(dst), as_Register(base), T9, 0);
2693 }
2694 } else { //not use loongson isa
2695 if( Assembler::is_simm16(disp) ) {
2696 __ ld(as_Register(dst), as_Register(base), disp);
2697 } else {
2698 __ set64(T9, disp);
2699 __ daddu(AT, as_Register(base), T9);
2700 __ ld(as_Register(dst), AT, 0);
2701 }
2702 }
2703 }
2704 // if( disp_reloc != relocInfo::none) __ ld(as_Register(dst), as_Register(dst), 0);
2705 %}
2707 enc_class store_P_reg_enc (memory mem, mRegP src) %{
2708 MacroAssembler _masm(&cbuf);
2709 int src = $src$$reg;
2710 int base = $mem$$base;
2711 int index = $mem$$index;
2712 int scale = $mem$$scale;
2713 int disp = $mem$$disp;
2715 if( index != 0 ) {
2716 if ( UseLoongsonISA ){
2717 if ( Assembler::is_simm(disp, 8) ) {
2718 if ( scale == 0 ) {
2719 __ gssdx(as_Register(src), as_Register(base), as_Register(index), disp);
2720 } else {
2721 __ dsll(AT, as_Register(index), scale);
2722 __ gssdx(as_Register(src), as_Register(base), AT, disp);
2723 }
2724 } else if ( Assembler::is_simm16(disp) ) {
2725 if ( scale == 0 ) {
2726 __ daddu(AT, as_Register(base), as_Register(index));
2727 } else {
2728 __ dsll(AT, as_Register(index), scale);
2729 __ daddu(AT, as_Register(base), AT);
2730 }
2731 __ sd(as_Register(src), AT, disp);
2732 } else {
2733 if ( scale == 0 ) {
2734 __ move(T9, disp);
2735 __ daddu(AT, as_Register(index), T9);
2736 } else {
2737 __ dsll(AT, as_Register(index), scale);
2738 __ move(T9, disp);
2739 __ daddu(AT, AT, T9);
2740 }
2741 __ gssdx(as_Register(src), as_Register(base), AT, 0);
2742 }
2743 } else { //not use loongson isa
2744 if (scale == 0) {
2745 __ daddu(AT, as_Register(base), as_Register(index));
2746 } else {
2747 __ dsll(AT, as_Register(index), scale);
2748 __ daddu(AT, as_Register(base), AT);
2749 }
2750 if( Assembler::is_simm16(disp) ) {
2751 __ sd(as_Register(src), AT, disp);
2752 } else {
2753 __ move(T9, disp);
2754 __ daddu(AT, AT, T9);
2755 __ sd(as_Register(src), AT, 0);
2756 }
2757 }
2758 } else {
2759 if ( UseLoongsonISA ) {
2760 if ( Assembler::is_simm16(disp) ) {
2761 __ sd(as_Register(src), as_Register(base), disp);
2762 } else {
2763 __ move(T9, disp);
2764 __ gssdx(as_Register(src), as_Register(base), T9, 0);
2765 }
2766 } else {
2767 if( Assembler::is_simm16(disp) ) {
2768 __ sd(as_Register(src), as_Register(base), disp);
2769 } else {
2770 __ move(T9, disp);
2771 __ daddu(AT, as_Register(base), T9);
2772 __ sd(as_Register(src), AT, 0);
2773 }
2774 }
2775 }
2776 %}
2778 enc_class store_N_reg_enc (memory mem, mRegN src) %{
2779 MacroAssembler _masm(&cbuf);
2780 int src = $src$$reg;
2781 int base = $mem$$base;
2782 int index = $mem$$index;
2783 int scale = $mem$$scale;
2784 int disp = $mem$$disp;
2786 if( index != 0 ) {
2787 if ( UseLoongsonISA ){
2788 if ( Assembler::is_simm(disp, 8) ) {
2789 if ( scale == 0 ) {
2790 __ gsswx(as_Register(src), as_Register(base), as_Register(index), disp);
2791 } else {
2792 __ dsll(AT, as_Register(index), scale);
2793 __ gsswx(as_Register(src), as_Register(base), AT, disp);
2794 }
2795 } else if ( Assembler::is_simm16(disp) ) {
2796 if ( scale == 0 ) {
2797 __ daddu(AT, as_Register(base), as_Register(index));
2798 } else {
2799 __ dsll(AT, as_Register(index), scale);
2800 __ daddu(AT, as_Register(base), AT);
2801 }
2802 __ sw(as_Register(src), AT, disp);
2803 } else {
2804 if ( scale == 0 ) {
2805 __ move(T9, disp);
2806 __ daddu(AT, as_Register(index), T9);
2807 } else {
2808 __ dsll(AT, as_Register(index), scale);
2809 __ move(T9, disp);
2810 __ daddu(AT, AT, T9);
2811 }
2812 __ gsswx(as_Register(src), as_Register(base), AT, 0);
2813 }
2814 } else { //not use loongson isa
2815 if (scale == 0) {
2816 __ daddu(AT, as_Register(base), as_Register(index));
2817 } else {
2818 __ dsll(AT, as_Register(index), scale);
2819 __ daddu(AT, as_Register(base), AT);
2820 }
2821 if( Assembler::is_simm16(disp) ) {
2822 __ sw(as_Register(src), AT, disp);
2823 } else {
2824 __ move(T9, disp);
2825 __ daddu(AT, AT, T9);
2826 __ sw(as_Register(src), AT, 0);
2827 }
2828 }
2829 } else {
2830 if ( UseLoongsonISA ) {
2831 if ( Assembler::is_simm16(disp) ) {
2832 __ sw(as_Register(src), as_Register(base), disp);
2833 } else {
2834 __ move(T9, disp);
2835 __ gsswx(as_Register(src), as_Register(base), T9, 0);
2836 }
2837 } else {
2838 if( Assembler::is_simm16(disp) ) {
2839 __ sw(as_Register(src), as_Register(base), disp);
2840 } else {
2841 __ move(T9, disp);
2842 __ daddu(AT, as_Register(base), T9);
2843 __ sw(as_Register(src), AT, 0);
2844 }
2845 }
2846 }
2847 %}
2849 enc_class store_P_immP0_enc (memory mem) %{
2850 MacroAssembler _masm(&cbuf);
2851 int base = $mem$$base;
2852 int index = $mem$$index;
2853 int scale = $mem$$scale;
2854 int disp = $mem$$disp;
2856 if( index != 0 ) {
2857 if (scale == 0) {
2858 if( Assembler::is_simm16(disp) ) {
2859 if (UseLoongsonISA && Assembler::is_simm(disp, 8)) {
2860 __ gssdx(R0, as_Register(base), as_Register(index), disp);
2861 } else {
2862 __ daddu(AT, as_Register(base), as_Register(index));
2863 __ sd(R0, AT, disp);
2864 }
2865 } else {
2866 __ daddu(AT, as_Register(base), as_Register(index));
2867 __ move(T9, disp);
2868 if(UseLoongsonISA) {
2869 __ gssdx(R0, AT, T9, 0);
2870 } else {
2871 __ daddu(AT, AT, T9);
2872 __ sd(R0, AT, 0);
2873 }
2874 }
2875 } else {
2876 __ dsll(AT, as_Register(index), scale);
2877 if( Assembler::is_simm16(disp) ) {
2878 if (UseLoongsonISA && Assembler::is_simm(disp, 8)) {
2879 __ gssdx(R0, as_Register(base), AT, disp);
2880 } else {
2881 __ daddu(AT, as_Register(base), AT);
2882 __ sd(R0, AT, disp);
2883 }
2884 } else {
2885 __ daddu(AT, as_Register(base), AT);
2886 __ move(T9, disp);
2887 if (UseLoongsonISA) {
2888 __ gssdx(R0, AT, T9, 0);
2889 } else {
2890 __ daddu(AT, AT, T9);
2891 __ sd(R0, AT, 0);
2892 }
2893 }
2894 }
2895 } else {
2896 if( Assembler::is_simm16(disp) ) {
2897 __ sd(R0, as_Register(base), disp);
2898 } else {
2899 __ move(T9, disp);
2900 if (UseLoongsonISA) {
2901 __ gssdx(R0, as_Register(base), T9, 0);
2902 } else {
2903 __ daddu(AT, as_Register(base), T9);
2904 __ sd(R0, AT, 0);
2905 }
2906 }
2907 }
2908 %}
2910 enc_class store_P_immP_enc (memory mem, immP31 src) %{
2911 MacroAssembler _masm(&cbuf);
2912 int base = $mem$$base;
2913 int index = $mem$$index;
2914 int scale = $mem$$scale;
2915 int disp = $mem$$disp;
2916 long value = $src$$constant;
2918 if( index != 0 ) {
2919 if (scale == 0) {
2920 __ daddu(AT, as_Register(base), as_Register(index));
2921 } else {
2922 __ dsll(AT, as_Register(index), scale);
2923 __ daddu(AT, as_Register(base), AT);
2924 }
2925 if( Assembler::is_simm16(disp) ) {
2926 if (value == 0) {
2927 __ sd(R0, AT, disp);
2928 } else {
2929 __ move(T9, value);
2930 __ sd(T9, AT, disp);
2931 }
2932 } else {
2933 if (value == 0) {
2934 __ move(T9, disp);
2935 __ daddu(AT, AT, T9);
2936 __ sd(R0, AT, 0);
2937 } else {
2938 __ move(T9, disp);
2939 __ daddu(AT, AT, T9);
2940 __ move(T9, value);
2941 __ sd(T9, AT, 0);
2942 }
2943 }
2944 } else {
2945 if( Assembler::is_simm16(disp) ) {
2946 if (value == 0) {
2947 __ sd(R0, as_Register(base), disp);
2948 } else {
2949 __ move(AT, value);
2950 __ sd(AT, as_Register(base), disp);
2951 }
2952 } else {
2953 if (value == 0) {
2954 __ move(T9, disp);
2955 __ daddu(AT, as_Register(base), T9);
2956 __ sd(R0, AT, 0);
2957 } else {
2958 __ move(T9, disp);
2959 __ daddu(AT, as_Register(base), T9);
2960 __ move(T9, value);
2961 __ sd(T9, AT, 0);
2962 }
2963 }
2964 }
2965 %}
2967 enc_class storeImmN0_enc(memory mem, ImmN0 src) %{
2968 MacroAssembler _masm(&cbuf);
2969 int base = $mem$$base;
2970 int index = $mem$$index;
2971 int scale = $mem$$scale;
2972 int disp = $mem$$disp;
2974 if(index!=0){
2975 if (scale == 0) {
2976 __ daddu(AT, as_Register(base), as_Register(index));
2977 } else {
2978 __ dsll(AT, as_Register(index), scale);
2979 __ daddu(AT, as_Register(base), AT);
2980 }
2982 if( Assembler::is_simm16(disp) ) {
2983 __ sw(R0, AT, disp);
2984 } else {
2985 __ move(T9, disp);
2986 __ daddu(AT, AT, T9);
2987 __ sw(R0, AT, 0);
2988 }
2989 }
2990 else {
2991 if( Assembler::is_simm16(disp) ) {
2992 __ sw(R0, as_Register(base), disp);
2993 } else {
2994 __ move(T9, disp);
2995 __ daddu(AT, as_Register(base), T9);
2996 __ sw(R0, AT, 0);
2997 }
2998 }
2999 %}
3001 enc_class storeImmN_enc (memory mem, immN src) %{
3002 MacroAssembler _masm(&cbuf);
3003 int base = $mem$$base;
3004 int index = $mem$$index;
3005 int scale = $mem$$scale;
3006 int disp = $mem$$disp;
3007 long * value = (long *)$src$$constant;
3009 if (value == NULL) {
3010 guarantee(Assembler::is_simm16(disp), "FIXME: disp is not simm16!");
3011 if (index == 0) {
3012 __ sw(R0, as_Register(base), disp);
3013 } else {
3014 if (scale == 0) {
3015 __ daddu(AT, as_Register(base), as_Register(index));
3016 } else {
3017 __ dsll(AT, as_Register(index), scale);
3018 __ daddu(AT, as_Register(base), AT);
3019 }
3020 __ sw(R0, AT, disp);
3021 }
3023 return;
3024 }
3026 int oop_index = __ oop_recorder()->find_index((jobject)value);
3027 RelocationHolder rspec = oop_Relocation::spec(oop_index);
3029 guarantee(scale == 0, "FIXME: scale is not zero !");
3030 guarantee(value != 0, "FIXME: value is zero !");
3032 if (index != 0) {
3033 if (scale == 0) {
3034 __ daddu(AT, as_Register(base), as_Register(index));
3035 } else {
3036 __ dsll(AT, as_Register(index), scale);
3037 __ daddu(AT, as_Register(base), AT);
3038 }
3039 if( Assembler::is_simm16(disp) ) {
3040 if(rspec.type() != relocInfo::none) {
3041 __ relocate(rspec, Assembler::narrow_oop_operand);
3042 __ patchable_set48(T9, oop_index);
3043 } else {
3044 __ set64(T9, oop_index);
3045 }
3046 __ sw(T9, AT, disp);
3047 } else {
3048 __ move(T9, disp);
3049 __ addu(AT, AT, T9);
3051 if(rspec.type() != relocInfo::none) {
3052 __ relocate(rspec, Assembler::narrow_oop_operand);
3053 __ patchable_set48(T9, oop_index);
3054 } else {
3055 __ set64(T9, oop_index);
3056 }
3057 __ sw(T9, AT, 0);
3058 }
3059 }
3060 else {
3061 if( Assembler::is_simm16(disp) ) {
3062 if($src->constant_reloc() != relocInfo::none) {
3063 __ relocate(rspec, Assembler::narrow_oop_operand);
3064 __ patchable_set48(T9, oop_index);
3065 } else {
3066 __ set64(T9, oop_index);
3067 }
3068 __ sw(T9, as_Register(base), disp);
3069 } else {
3070 __ move(T9, disp);
3071 __ daddu(AT, as_Register(base), T9);
3073 if($src->constant_reloc() != relocInfo::none){
3074 __ relocate(rspec, Assembler::narrow_oop_operand);
3075 __ patchable_set48(T9, oop_index);
3076 } else {
3077 __ set64(T9, oop_index);
3078 }
3079 __ sw(T9, AT, 0);
3080 }
3081 }
3082 %}
3084 enc_class storeImmNKlass_enc (memory mem, immNKlass src) %{
3085 MacroAssembler _masm(&cbuf);
3087 assert (UseCompressedOops, "should only be used for compressed headers");
3088 assert (__ oop_recorder() != NULL, "this assembler needs an OopRecorder");
3090 int base = $mem$$base;
3091 int index = $mem$$index;
3092 int scale = $mem$$scale;
3093 int disp = $mem$$disp;
3094 long value = $src$$constant;
3096 int klass_index = __ oop_recorder()->find_index((Klass*)value);
3097 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
3098 long narrowp = Klass::encode_klass((Klass*)value);
3100 if(index!=0){
3101 if (scale == 0) {
3102 __ daddu(AT, as_Register(base), as_Register(index));
3103 } else {
3104 __ dsll(AT, as_Register(index), scale);
3105 __ daddu(AT, as_Register(base), AT);
3106 }
3108 if( Assembler::is_simm16(disp) ) {
3109 if(rspec.type() != relocInfo::none){
3110 __ relocate(rspec, Assembler::narrow_oop_operand);
3111 __ patchable_set48(T9, narrowp);
3112 } else {
3113 __ set64(T9, narrowp);
3114 }
3115 __ sw(T9, AT, disp);
3116 } else {
3117 __ move(T9, disp);
3118 __ daddu(AT, AT, T9);
3120 if(rspec.type() != relocInfo::none){
3121 __ relocate(rspec, Assembler::narrow_oop_operand);
3122 __ patchable_set48(T9, narrowp);
3123 } else {
3124 __ set64(T9, narrowp);
3125 }
3127 __ sw(T9, AT, 0);
3128 }
3129 } else {
3130 if( Assembler::is_simm16(disp) ) {
3131 if(rspec.type() != relocInfo::none){
3132 __ relocate(rspec, Assembler::narrow_oop_operand);
3133 __ patchable_set48(T9, narrowp);
3134 }
3135 else {
3136 __ set64(T9, narrowp);
3137 }
3138 __ sw(T9, as_Register(base), disp);
3139 } else {
3140 __ move(T9, disp);
3141 __ daddu(AT, as_Register(base), T9);
3143 if(rspec.type() != relocInfo::none){
3144 __ relocate(rspec, Assembler::narrow_oop_operand);
3145 __ patchable_set48(T9, narrowp);
3146 } else {
3147 __ set64(T9, narrowp);
3148 }
3149 __ sw(T9, AT, 0);
3150 }
3151 }
3152 %}
3154 enc_class load_L_enc (mRegL dst, memory mem) %{
3155 MacroAssembler _masm(&cbuf);
3156 int base = $mem$$base;
3157 int index = $mem$$index;
3158 int scale = $mem$$scale;
3159 int disp = $mem$$disp;
3160 Register dst_reg = as_Register($dst$$reg);
3162 // For implicit null check
3163 __ lb(AT, as_Register(base), 0);
3165 if( index != 0 ) {
3166 if (scale == 0) {
3167 __ daddu(AT, as_Register(base), as_Register(index));
3168 } else {
3169 __ dsll(AT, as_Register(index), scale);
3170 __ daddu(AT, as_Register(base), AT);
3171 }
3172 if( Assembler::is_simm16(disp) ) {
3173 __ ld(dst_reg, AT, disp);
3174 } else {
3175 __ move(T9, disp);
3176 __ daddu(AT, AT, T9);
3177 __ ld(dst_reg, AT, 0);
3178 }
3179 } else {
3180 if( Assembler::is_simm16(disp) ) {
3181 __ ld(dst_reg, as_Register(base), disp);
3182 } else {
3183 __ move(T9, disp);
3184 __ daddu(AT, as_Register(base), T9);
3185 __ ld(dst_reg, AT, 0);
3186 }
3187 }
3188 %}
3190 enc_class store_L_reg_enc (memory mem, mRegL src) %{
3191 MacroAssembler _masm(&cbuf);
3192 int base = $mem$$base;
3193 int index = $mem$$index;
3194 int scale = $mem$$scale;
3195 int disp = $mem$$disp;
3196 Register src_reg = as_Register($src$$reg);
3198 if( index != 0 ) {
3199 if (scale == 0) {
3200 __ daddu(AT, as_Register(base), as_Register(index));
3201 } else {
3202 __ dsll(AT, as_Register(index), scale);
3203 __ daddu(AT, as_Register(base), AT);
3204 }
3205 if( Assembler::is_simm16(disp) ) {
3206 __ sd(src_reg, AT, disp);
3207 } else {
3208 __ move(T9, disp);
3209 __ daddu(AT, AT, T9);
3210 __ sd(src_reg, AT, 0);
3211 }
3212 } else {
3213 if( Assembler::is_simm16(disp) ) {
3214 __ sd(src_reg, as_Register(base), disp);
3215 } else {
3216 __ move(T9, disp);
3217 __ daddu(AT, as_Register(base), T9);
3218 __ sd(src_reg, AT, 0);
3219 }
3220 }
3221 %}
3223 enc_class store_L_immL0_enc (memory mem, immL0 src) %{
3224 MacroAssembler _masm(&cbuf);
3225 int base = $mem$$base;
3226 int index = $mem$$index;
3227 int scale = $mem$$scale;
3228 int disp = $mem$$disp;
3230 if( index != 0 ) {
3231 // For implicit null check
3232 __ lb(AT, as_Register(base), 0);
3234 if (scale == 0) {
3235 __ daddu(AT, as_Register(base), as_Register(index));
3236 } else {
3237 __ dsll(AT, as_Register(index), scale);
3238 __ daddu(AT, as_Register(base), AT);
3239 }
3240 if( Assembler::is_simm16(disp) ) {
3241 __ sd(R0, AT, disp);
3242 } else {
3243 __ move(T9, disp);
3244 __ addu(AT, AT, T9);
3245 __ sd(R0, AT, 0);
3246 }
3247 } else {
3248 if( Assembler::is_simm16(disp) ) {
3249 __ sd(R0, as_Register(base), disp);
3250 } else {
3251 __ move(T9, disp);
3252 __ addu(AT, as_Register(base), T9);
3253 __ sd(R0, AT, 0);
3254 }
3255 }
3256 %}
3258 enc_class store_L_immL_enc (memory mem, immL src) %{
3259 MacroAssembler _masm(&cbuf);
3260 int base = $mem$$base;
3261 int index = $mem$$index;
3262 int scale = $mem$$scale;
3263 int disp = $mem$$disp;
3264 long imm = $src$$constant;
3266 if( index != 0 ) {
3267 if (scale == 0) {
3268 __ daddu(AT, as_Register(base), as_Register(index));
3269 } else {
3270 __ dsll(AT, as_Register(index), scale);
3271 __ daddu(AT, as_Register(base), AT);
3272 }
3273 if( Assembler::is_simm16(disp) ) {
3274 __ set64(T9, imm);
3275 __ sd(T9, AT, disp);
3276 } else {
3277 __ move(T9, disp);
3278 __ addu(AT, AT, T9);
3279 __ set64(T9, imm);
3280 __ sd(T9, AT, 0);
3281 }
3282 } else {
3283 if( Assembler::is_simm16(disp) ) {
3284 __ move(AT, as_Register(base));
3285 __ set64(T9, imm);
3286 __ sd(T9, AT, disp);
3287 } else {
3288 __ move(T9, disp);
3289 __ addu(AT, as_Register(base), T9);
3290 __ set64(T9, imm);
3291 __ sd(T9, AT, 0);
3292 }
3293 }
3294 %}
3296 enc_class load_F_enc (regF dst, memory mem) %{
3297 MacroAssembler _masm(&cbuf);
3298 int base = $mem$$base;
3299 int index = $mem$$index;
3300 int scale = $mem$$scale;
3301 int disp = $mem$$disp;
3302 FloatRegister dst = $dst$$FloatRegister;
3304 if( index != 0 ) {
3305 if( Assembler::is_simm16(disp) ) {
3306 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3307 if (scale == 0) {
3308 __ gslwxc1(dst, as_Register(base), as_Register(index), disp);
3309 } else {
3310 __ dsll(AT, as_Register(index), scale);
3311 __ gslwxc1(dst, as_Register(base), AT, disp);
3312 }
3313 } else {
3314 if (scale == 0) {
3315 __ daddu(AT, as_Register(base), as_Register(index));
3316 } else {
3317 __ dsll(AT, as_Register(index), scale);
3318 __ daddu(AT, as_Register(base), AT);
3319 }
3320 __ lwc1(dst, AT, disp);
3321 }
3322 } else {
3323 if (scale == 0) {
3324 __ daddu(AT, as_Register(base), as_Register(index));
3325 } else {
3326 __ dsll(AT, as_Register(index), scale);
3327 __ daddu(AT, as_Register(base), AT);
3328 }
3329 __ move(T9, disp);
3330 if( UseLoongsonISA ) {
3331 __ gslwxc1(dst, AT, T9, 0);
3332 } else {
3333 __ daddu(AT, AT, T9);
3334 __ lwc1(dst, AT, 0);
3335 }
3336 }
3337 } else {
3338 if( Assembler::is_simm16(disp) ) {
3339 __ lwc1(dst, as_Register(base), disp);
3340 } else {
3341 __ move(T9, disp);
3342 if( UseLoongsonISA ) {
3343 __ gslwxc1(dst, as_Register(base), T9, 0);
3344 } else {
3345 __ daddu(AT, as_Register(base), T9);
3346 __ lwc1(dst, AT, 0);
3347 }
3348 }
3349 }
3350 %}
3352 enc_class store_F_reg_enc (memory mem, regF src) %{
3353 MacroAssembler _masm(&cbuf);
3354 int base = $mem$$base;
3355 int index = $mem$$index;
3356 int scale = $mem$$scale;
3357 int disp = $mem$$disp;
3358 FloatRegister src = $src$$FloatRegister;
3360 if( index != 0 ) {
3361 if( Assembler::is_simm16(disp) ) {
3362 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3363 if (scale == 0) {
3364 __ gsswxc1(src, as_Register(base), as_Register(index), disp);
3365 } else {
3366 __ dsll(AT, as_Register(index), scale);
3367 __ gsswxc1(src, as_Register(base), AT, disp);
3368 }
3369 } else {
3370 if (scale == 0) {
3371 __ daddu(AT, as_Register(base), as_Register(index));
3372 } else {
3373 __ dsll(AT, as_Register(index), scale);
3374 __ daddu(AT, as_Register(base), AT);
3375 }
3376 __ swc1(src, AT, disp);
3377 }
3378 } else {
3379 if (scale == 0) {
3380 __ daddu(AT, as_Register(base), as_Register(index));
3381 } else {
3382 __ dsll(AT, as_Register(index), scale);
3383 __ daddu(AT, as_Register(base), AT);
3384 }
3385 __ move(T9, disp);
3386 if( UseLoongsonISA ) {
3387 __ gsswxc1(src, AT, T9, 0);
3388 } else {
3389 __ daddu(AT, AT, T9);
3390 __ swc1(src, AT, 0);
3391 }
3392 }
3393 } else {
3394 if( Assembler::is_simm16(disp) ) {
3395 __ swc1(src, as_Register(base), disp);
3396 } else {
3397 __ move(T9, disp);
3398 if( UseLoongsonISA ) {
3399 __ gslwxc1(src, as_Register(base), T9, 0);
3400 } else {
3401 __ daddu(AT, as_Register(base), T9);
3402 __ swc1(src, AT, 0);
3403 }
3404 }
3405 }
3406 %}
3408 enc_class load_D_enc (regD dst, memory mem) %{
3409 MacroAssembler _masm(&cbuf);
3410 int base = $mem$$base;
3411 int index = $mem$$index;
3412 int scale = $mem$$scale;
3413 int disp = $mem$$disp;
3414 FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3416 if( index != 0 ) {
3417 if( Assembler::is_simm16(disp) ) {
3418 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3419 if (scale == 0) {
3420 __ gsldxc1(dst_reg, as_Register(base), as_Register(index), disp);
3421 } else {
3422 __ dsll(AT, as_Register(index), scale);
3423 __ gsldxc1(dst_reg, as_Register(base), AT, disp);
3424 }
3425 } else {
3426 if (scale == 0) {
3427 __ daddu(AT, as_Register(base), as_Register(index));
3428 } else {
3429 __ dsll(AT, as_Register(index), scale);
3430 __ daddu(AT, as_Register(base), AT);
3431 }
3432 __ ldc1(dst_reg, AT, disp);
3433 }
3434 } else {
3435 if (scale == 0) {
3436 __ daddu(AT, as_Register(base), as_Register(index));
3437 } else {
3438 __ dsll(AT, as_Register(index), scale);
3439 __ daddu(AT, as_Register(base), AT);
3440 }
3441 __ move(T9, disp);
3442 if( UseLoongsonISA ) {
3443 __ gsldxc1(dst_reg, AT, T9, 0);
3444 } else {
3445 __ addu(AT, AT, T9);
3446 __ ldc1(dst_reg, AT, 0);
3447 }
3448 }
3449 } else {
3450 if( Assembler::is_simm16(disp) ) {
3451 __ ldc1(dst_reg, as_Register(base), disp);
3452 } else {
3453 __ move(T9, disp);
3454 if( UseLoongsonISA ) {
3455 __ gsldxc1(dst_reg, as_Register(base), T9, 0);
3456 } else {
3457 __ addu(AT, as_Register(base), T9);
3458 __ ldc1(dst_reg, AT, 0);
3459 }
3460 }
3461 }
3462 %}
3464 enc_class store_D_reg_enc (memory mem, regD src) %{
3465 MacroAssembler _masm(&cbuf);
3466 int base = $mem$$base;
3467 int index = $mem$$index;
3468 int scale = $mem$$scale;
3469 int disp = $mem$$disp;
3470 FloatRegister src_reg = as_FloatRegister($src$$reg);
3472 if( index != 0 ) {
3473 if( Assembler::is_simm16(disp) ) {
3474 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3475 if (scale == 0) {
3476 __ gssdxc1(src_reg, as_Register(base), as_Register(index), disp);
3477 } else {
3478 __ dsll(AT, as_Register(index), scale);
3479 __ gssdxc1(src_reg, as_Register(base), AT, disp);
3480 }
3481 } else {
3482 if (scale == 0) {
3483 __ daddu(AT, as_Register(base), as_Register(index));
3484 } else {
3485 __ dsll(AT, as_Register(index), scale);
3486 __ daddu(AT, as_Register(base), AT);
3487 }
3488 __ sdc1(src_reg, AT, disp);
3489 }
3490 } else {
3491 if (scale == 0) {
3492 __ daddu(AT, as_Register(base), as_Register(index));
3493 } else {
3494 __ dsll(AT, as_Register(index), scale);
3495 __ daddu(AT, as_Register(base), AT);
3496 }
3497 __ move(T9, disp);
3498 if( UseLoongsonISA ) {
3499 __ gssdxc1(src_reg, AT, T9, 0);
3500 } else {
3501 __ addu(AT, AT, T9);
3502 __ sdc1(src_reg, AT, 0);
3503 }
3504 }
3505 } else {
3506 if( Assembler::is_simm16(disp) ) {
3507 __ sdc1(src_reg, as_Register(base), disp);
3508 } else {
3509 __ move(T9, disp);
3510 if( UseLoongsonISA ) {
3511 __ gssdxc1(src_reg, as_Register(base), T9, 0);
3512 } else {
3513 __ addu(AT, as_Register(base), T9);
3514 __ sdc1(src_reg, AT, 0);
3515 }
3516 }
3517 }
3518 %}
3520 enc_class Java_To_Runtime (method meth) %{ // CALL Java_To_Runtime, Java_To_Runtime_Leaf
3521 MacroAssembler _masm(&cbuf);
3522 // This is the instruction starting address for relocation info.
3523 __ block_comment("Java_To_Runtime");
3524 cbuf.set_insts_mark();
3525 __ relocate(relocInfo::runtime_call_type);
3527 __ patchable_call((address)$meth$$method);
3528 %}
3530 enc_class Java_Static_Call (method meth) %{ // JAVA STATIC CALL
3531 // CALL to fixup routine. Fixup routine uses ScopeDesc info to determine
3532 // who we intended to call.
3533 MacroAssembler _masm(&cbuf);
3534 cbuf.set_insts_mark();
3536 if ( !_method ) {
3537 __ relocate(relocInfo::runtime_call_type);
3538 } else if(_optimized_virtual) {
3539 __ relocate(relocInfo::opt_virtual_call_type);
3540 } else {
3541 __ relocate(relocInfo::static_call_type);
3542 }
3544 __ patchable_call((address)($meth$$method));
3545 if( _method ) { // Emit stub for static call
3546 emit_java_to_interp(cbuf);
3547 }
3548 %}
3551 /*
3552 * [Ref: LIR_Assembler::ic_call() ]
3553 */
3554 enc_class Java_Dynamic_Call (method meth) %{ // JAVA DYNAMIC CALL
3555 MacroAssembler _masm(&cbuf);
3556 __ block_comment("Java_Dynamic_Call");
3557 __ ic_call((address)$meth$$method);
3558 %}
3561 enc_class Set_Flags_After_Fast_Lock_Unlock(FlagsReg cr) %{
3562 Register flags = $cr$$Register;
3563 Label L;
3565 MacroAssembler _masm(&cbuf);
3567 __ addu(flags, R0, R0);
3568 __ beq(AT, R0, L);
3569 __ delayed()->nop();
3570 __ move(flags, 0xFFFFFFFF);
3571 __ bind(L);
3572 %}
3574 enc_class enc_PartialSubtypeCheck(mRegP result, mRegP sub, mRegP super, mRegI tmp) %{
3575 Register result = $result$$Register;
3576 Register sub = $sub$$Register;
3577 Register super = $super$$Register;
3578 Register length = $tmp$$Register;
3579 Register tmp = T9;
3580 Label miss;
3582 /* 2012/9/28 Jin: result may be the same as sub
3583 * 47c B40: # B21 B41 <- B20 Freq: 0.155379
3584 * 47c partialSubtypeCheck result=S1, sub=S1, super=S3, length=S0
3585 * 4bc mov S2, NULL #@loadConP
3586 * 4c0 beq S1, S2, B21 #@branchConP P=0.999999 C=-1.000000
3587 */
3588 MacroAssembler _masm(&cbuf);
3589 Label done;
3590 __ check_klass_subtype_slow_path(sub, super, length, tmp,
3591 NULL, &miss,
3592 /*set_cond_codes:*/ true);
3593 /* 2013/7/22 Jin: Refer to X86_64's RDI */
3594 __ move(result, 0);
3595 __ b(done);
3596 __ nop();
3598 __ bind(miss);
3599 __ move(result, 1);
3600 __ bind(done);
3601 %}
3603 %}
3606 //---------MIPS FRAME--------------------------------------------------------------
3607 // Definition of frame structure and management information.
3608 //
3609 // S T A C K L A Y O U T Allocators stack-slot number
3610 // | (to get allocators register number
3611 // G Owned by | | v add SharedInfo::stack0)
3612 // r CALLER | |
3613 // o | +--------+ pad to even-align allocators stack-slot
3614 // w V | pad0 | numbers; owned by CALLER
3615 // t -----------+--------+----> Matcher::_in_arg_limit, unaligned
3616 // h ^ | in | 5
3617 // | | args | 4 Holes in incoming args owned by SELF
3618 // | | old | | 3
3619 // | | SP-+--------+----> Matcher::_old_SP, even aligned
3620 // v | | ret | 3 return address
3621 // Owned by +--------+
3622 // Self | pad2 | 2 pad to align old SP
3623 // | +--------+ 1
3624 // | | locks | 0
3625 // | +--------+----> SharedInfo::stack0, even aligned
3626 // | | pad1 | 11 pad to align new SP
3627 // | +--------+
3628 // | | | 10
3629 // | | spills | 9 spills
3630 // V | | 8 (pad0 slot for callee)
3631 // -----------+--------+----> Matcher::_out_arg_limit, unaligned
3632 // ^ | out | 7
3633 // | | args | 6 Holes in outgoing args owned by CALLEE
3634 // Owned by new | |
3635 // Callee SP-+--------+----> Matcher::_new_SP, even aligned
3636 // | |
3637 //
3638 // Note 1: Only region 8-11 is determined by the allocator. Region 0-5 is
3639 // known from SELF's arguments and the Java calling convention.
3640 // Region 6-7 is determined per call site.
3641 // Note 2: If the calling convention leaves holes in the incoming argument
3642 // area, those holes are owned by SELF. Holes in the outgoing area
3643 // are owned by the CALLEE. Holes should not be nessecary in the
3644 // incoming area, as the Java calling convention is completely under
3645 // the control of the AD file. Doubles can be sorted and packed to
3646 // avoid holes. Holes in the outgoing arguments may be nessecary for
3647 // varargs C calling conventions.
3648 // Note 3: Region 0-3 is even aligned, with pad2 as needed. Region 3-5 is
3649 // even aligned with pad0 as needed.
3650 // Region 6 is even aligned. Region 6-7 is NOT even aligned;
3651 // region 6-11 is even aligned; it may be padded out more so that
3652 // the region from SP to FP meets the minimum stack alignment.
3653 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
3654 // alignment. Region 11, pad1, may be dynamically extended so that
3655 // SP meets the minimum alignment.
3658 frame %{
3660 stack_direction(TOWARDS_LOW);
3662 // These two registers define part of the calling convention
3663 // between compiled code and the interpreter.
3664 // SEE StartI2CNode::calling_convention & StartC2INode::calling_convention & StartOSRNode::calling_convention
3665 // for more information. by yjl 3/16/2006
3667 inline_cache_reg(T1); // Inline Cache Register
3668 interpreter_method_oop_reg(S3); // Method Oop Register when calling interpreter
3669 /*
3670 inline_cache_reg(T1); // Inline Cache Register or methodOop for I2C
3671 interpreter_arg_ptr_reg(A0); // Argument pointer for I2C adapters
3672 */
3674 // Optional: name the operand used by cisc-spilling to access [stack_pointer + offset]
3675 cisc_spilling_operand_name(indOffset32);
3677 // Number of stack slots consumed by locking an object
3678 // generate Compile::sync_stack_slots
3679 #ifdef _LP64
3680 sync_stack_slots(2);
3681 #else
3682 sync_stack_slots(1);
3683 #endif
3685 frame_pointer(SP);
3687 // Interpreter stores its frame pointer in a register which is
3688 // stored to the stack by I2CAdaptors.
3689 // I2CAdaptors convert from interpreted java to compiled java.
3691 interpreter_frame_pointer(FP);
3693 // generate Matcher::stack_alignment
3694 stack_alignment(StackAlignmentInBytes); //wordSize = sizeof(char*);
3696 // Number of stack slots between incoming argument block and the start of
3697 // a new frame. The PROLOG must add this many slots to the stack. The
3698 // EPILOG must remove this many slots. Intel needs one slot for
3699 // return address.
3700 // generate Matcher::in_preserve_stack_slots
3701 //in_preserve_stack_slots(VerifyStackAtCalls + 2); //Now VerifyStackAtCalls is defined as false ! Leave one stack slot for ra and fp
3702 in_preserve_stack_slots(4); //Now VerifyStackAtCalls is defined as false ! Leave two stack slots for ra and fp
3704 // Number of outgoing stack slots killed above the out_preserve_stack_slots
3705 // for calls to C. Supports the var-args backing area for register parms.
3706 varargs_C_out_slots_killed(0);
3708 // The after-PROLOG location of the return address. Location of
3709 // return address specifies a type (REG or STACK) and a number
3710 // representing the register number (i.e. - use a register name) or
3711 // stack slot.
3712 // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
3713 // Otherwise, it is above the locks and verification slot and alignment word
3714 //return_addr(STACK -1+ round_to(1+VerifyStackAtCalls+Compile::current()->sync()*Compile::current()->sync_stack_slots(),WordsPerLong));
3715 return_addr(REG RA);
3717 // Body of function which returns an integer array locating
3718 // arguments either in registers or in stack slots. Passed an array
3719 // of ideal registers called "sig" and a "length" count. Stack-slot
3720 // offsets are based on outgoing arguments, i.e. a CALLER setting up
3721 // arguments for a CALLEE. Incoming stack arguments are
3722 // automatically biased by the preserve_stack_slots field above.
3725 // will generated to Matcher::calling_convention(OptoRegPair *sig, uint length, bool is_outgoing)
3726 // StartNode::calling_convention call this. by yjl 3/16/2006
3727 calling_convention %{
3728 SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
3729 %}
3734 // Body of function which returns an integer array locating
3735 // arguments either in registers or in stack slots. Passed an array
3736 // of ideal registers called "sig" and a "length" count. Stack-slot
3737 // offsets are based on outgoing arguments, i.e. a CALLER setting up
3738 // arguments for a CALLEE. Incoming stack arguments are
3739 // automatically biased by the preserve_stack_slots field above.
3742 // SEE CallRuntimeNode::calling_convention for more information. by yjl 3/16/2006
3743 c_calling_convention %{
3744 (void) SharedRuntime::c_calling_convention(sig_bt, regs, /*regs2=*/NULL, length);
3745 %}
3748 // Location of C & interpreter return values
3749 // register(s) contain(s) return value for Op_StartI2C and Op_StartOSR.
3750 // SEE Matcher::match. by yjl 3/16/2006
3751 c_return_value %{
3752 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3753 /* -- , -- , Op_RegN, Op_RegI, Op_RegP, Op_RegF, Op_RegD, Op_RegL */
3754 static int lo[Op_RegL+1] = { 0, 0, V0_num, V0_num, V0_num, F0_num, F0_num, V0_num };
3755 static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, V0_H_num, OptoReg::Bad, F0_H_num, V0_H_num };
3756 return OptoRegPair(hi[ideal_reg],lo[ideal_reg]);
3757 %}
3759 // Location of return values
3760 // register(s) contain(s) return value for Op_StartC2I and Op_Start.
3761 // SEE Matcher::match. by yjl 3/16/2006
3763 return_value %{
3764 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3765 /* -- , -- , Op_RegN, Op_RegI, Op_RegP, Op_RegF, Op_RegD, Op_RegL */
3766 static int lo[Op_RegL+1] = { 0, 0, V0_num, V0_num, V0_num, F0_num, F0_num, V0_num };
3767 static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, V0_H_num, OptoReg::Bad, F0_H_num, V0_H_num};
3768 return OptoRegPair(hi[ideal_reg],lo[ideal_reg]);
3769 %}
3771 %}
3773 //----------ATTRIBUTES---------------------------------------------------------
3774 //----------Operand Attributes-------------------------------------------------
3775 op_attrib op_cost(0); // Required cost attribute
3777 //----------Instruction Attributes---------------------------------------------
3778 ins_attrib ins_cost(100); // Required cost attribute
3779 ins_attrib ins_size(32); // Required size attribute (in bits)
3780 ins_attrib ins_pc_relative(0); // Required PC Relative flag
3781 ins_attrib ins_short_branch(0); // Required flag: is this instruction a
3782 // non-matching short branch variant of some
3783 // long branch?
3784 ins_attrib ins_alignment(4); // Required alignment attribute (must be a power of 2)
3785 // specifies the alignment that some part of the instruction (not
3786 // necessarily the start) requires. If > 1, a compute_padding()
3787 // function must be provided for the instruction
3789 //----------OPERANDS-----------------------------------------------------------
3790 // Operand definitions must precede instruction definitions for correct parsing
3791 // in the ADLC because operands constitute user defined types which are used in
3792 // instruction definitions.
3794 // Vectors
3795 operand vecD() %{
3796 constraint(ALLOC_IN_RC(dbl_reg));
3797 match(VecD);
3799 format %{ %}
3800 interface(REG_INTER);
3801 %}
3803 // Flags register, used as output of compare instructions
3804 operand FlagsReg() %{
3805 constraint(ALLOC_IN_RC(mips_flags));
3806 match(RegFlags);
3808 format %{ "EFLAGS" %}
3809 interface(REG_INTER);
3810 %}
3812 //----------Simple Operands----------------------------------------------------
3813 //TODO: Should we need to define some more special immediate number ?
3814 // Immediate Operands
3815 // Integer Immediate
3816 operand immI() %{
3817 match(ConI);
3818 //TODO: should not match immI8 here LEE
3819 match(immI8);
3821 op_cost(20);
3822 format %{ %}
3823 interface(CONST_INTER);
3824 %}
3826 // Long Immediate 8-bit
3827 operand immL8()
3828 %{
3829 predicate(-0x80L <= n->get_long() && n->get_long() < 0x80L);
3830 match(ConL);
3832 op_cost(5);
3833 format %{ %}
3834 interface(CONST_INTER);
3835 %}
3837 // Constant for test vs zero
3838 operand immI0() %{
3839 predicate(n->get_int() == 0);
3840 match(ConI);
3842 op_cost(0);
3843 format %{ %}
3844 interface(CONST_INTER);
3845 %}
3847 // Constant for increment
3848 operand immI1() %{
3849 predicate(n->get_int() == 1);
3850 match(ConI);
3852 op_cost(0);
3853 format %{ %}
3854 interface(CONST_INTER);
3855 %}
3857 // Constant for decrement
3858 operand immI_M1() %{
3859 predicate(n->get_int() == -1);
3860 match(ConI);
3862 op_cost(0);
3863 format %{ %}
3864 interface(CONST_INTER);
3865 %}
3867 operand immI_MaxI() %{
3868 predicate(n->get_int() == 2147483647);
3869 match(ConI);
3871 op_cost(0);
3872 format %{ %}
3873 interface(CONST_INTER);
3874 %}
3876 // Valid scale values for addressing modes
3877 operand immI2() %{
3878 predicate(0 <= n->get_int() && (n->get_int() <= 3));
3879 match(ConI);
3881 format %{ %}
3882 interface(CONST_INTER);
3883 %}
3885 operand immI8() %{
3886 predicate((-128 <= n->get_int()) && (n->get_int() <= 127));
3887 match(ConI);
3889 op_cost(5);
3890 format %{ %}
3891 interface(CONST_INTER);
3892 %}
3894 operand immI16() %{
3895 predicate((-32768 <= n->get_int()) && (n->get_int() <= 32767));
3896 match(ConI);
3898 op_cost(10);
3899 format %{ %}
3900 interface(CONST_INTER);
3901 %}
3903 // Constant for long shifts
3904 operand immI_32() %{
3905 predicate( n->get_int() == 32 );
3906 match(ConI);
3908 op_cost(0);
3909 format %{ %}
3910 interface(CONST_INTER);
3911 %}
3913 operand immI_63() %{
3914 predicate( n->get_int() == 63 );
3915 match(ConI);
3917 op_cost(0);
3918 format %{ %}
3919 interface(CONST_INTER);
3920 %}
3922 operand immI_0_31() %{
3923 predicate( n->get_int() >= 0 && n->get_int() <= 31 );
3924 match(ConI);
3926 op_cost(0);
3927 format %{ %}
3928 interface(CONST_INTER);
3929 %}
3931 // Operand for non-negtive integer mask
3932 operand immI_nonneg_mask() %{
3933 predicate( (n->get_int() >= 0) && (Assembler::is_int_mask(n->get_int()) != -1) );
3934 match(ConI);
3936 op_cost(0);
3937 format %{ %}
3938 interface(CONST_INTER);
3939 %}
3941 operand immI_32_63() %{
3942 predicate( n->get_int() >= 32 && n->get_int() <= 63 );
3943 match(ConI);
3944 op_cost(0);
3946 format %{ %}
3947 interface(CONST_INTER);
3948 %}
3950 operand immI16_sub() %{
3951 predicate((-32767 <= n->get_int()) && (n->get_int() <= 32768));
3952 match(ConI);
3954 op_cost(10);
3955 format %{ %}
3956 interface(CONST_INTER);
3957 %}
3959 operand immI_0_32767() %{
3960 predicate( n->get_int() >= 0 && n->get_int() <= 32767 );
3961 match(ConI);
3962 op_cost(0);
3964 format %{ %}
3965 interface(CONST_INTER);
3966 %}
3968 operand immI_0_65535() %{
3969 predicate( n->get_int() >= 0 && n->get_int() <= 65535 );
3970 match(ConI);
3971 op_cost(0);
3973 format %{ %}
3974 interface(CONST_INTER);
3975 %}
3977 operand immI_1() %{
3978 predicate( n->get_int() == 1 );
3979 match(ConI);
3981 op_cost(0);
3982 format %{ %}
3983 interface(CONST_INTER);
3984 %}
3986 operand immI_2() %{
3987 predicate( n->get_int() == 2 );
3988 match(ConI);
3990 op_cost(0);
3991 format %{ %}
3992 interface(CONST_INTER);
3993 %}
3995 operand immI_3() %{
3996 predicate( n->get_int() == 3 );
3997 match(ConI);
3999 op_cost(0);
4000 format %{ %}
4001 interface(CONST_INTER);
4002 %}
4004 operand immI_7() %{
4005 predicate( n->get_int() == 7 );
4006 match(ConI);
4008 format %{ %}
4009 interface(CONST_INTER);
4010 %}
4012 // Immediates for special shifts (sign extend)
4014 // Constants for increment
4015 operand immI_16() %{
4016 predicate( n->get_int() == 16 );
4017 match(ConI);
4019 format %{ %}
4020 interface(CONST_INTER);
4021 %}
4023 operand immI_24() %{
4024 predicate( n->get_int() == 24 );
4025 match(ConI);
4027 format %{ %}
4028 interface(CONST_INTER);
4029 %}
4031 // Constant for byte-wide masking
4032 operand immI_255() %{
4033 predicate( n->get_int() == 255 );
4034 match(ConI);
4036 op_cost(0);
4037 format %{ %}
4038 interface(CONST_INTER);
4039 %}
4041 operand immI_65535() %{
4042 predicate( n->get_int() == 65535 );
4043 match(ConI);
4045 op_cost(5);
4046 format %{ %}
4047 interface(CONST_INTER);
4048 %}
4050 operand immI_65536() %{
4051 predicate( n->get_int() == 65536 );
4052 match(ConI);
4054 op_cost(5);
4055 format %{ %}
4056 interface(CONST_INTER);
4057 %}
4059 operand immI_M65536() %{
4060 predicate( n->get_int() == -65536 );
4061 match(ConI);
4063 op_cost(5);
4064 format %{ %}
4065 interface(CONST_INTER);
4066 %}
4068 // Pointer Immediate
4069 operand immP() %{
4070 match(ConP);
4072 op_cost(10);
4073 format %{ %}
4074 interface(CONST_INTER);
4075 %}
4077 operand immP31()
4078 %{
4079 predicate(n->as_Type()->type()->reloc() == relocInfo::none
4080 && (n->get_ptr() >> 31) == 0);
4081 match(ConP);
4083 op_cost(5);
4084 format %{ %}
4085 interface(CONST_INTER);
4086 %}
4088 // NULL Pointer Immediate
4089 operand immP0() %{
4090 predicate( n->get_ptr() == 0 );
4091 match(ConP);
4092 op_cost(0);
4094 format %{ %}
4095 interface(CONST_INTER);
4096 %}
4098 // Pointer Immediate: 64-bit
4099 operand immP_set() %{
4100 match(ConP);
4102 op_cost(5);
4103 // formats are generated automatically for constants and base registers
4104 format %{ %}
4105 interface(CONST_INTER);
4106 %}
4108 // Pointer Immediate: 64-bit
4109 operand immP_load() %{
4110 predicate(n->bottom_type()->isa_oop_ptr() || (MacroAssembler::insts_for_set64(n->get_ptr()) > 3));
4111 match(ConP);
4113 op_cost(5);
4114 // formats are generated automatically for constants and base registers
4115 format %{ %}
4116 interface(CONST_INTER);
4117 %}
4119 // Pointer Immediate: 64-bit
4120 operand immP_no_oop_cheap() %{
4121 predicate(!n->bottom_type()->isa_oop_ptr() && (MacroAssembler::insts_for_set64(n->get_ptr()) <= 3));
4122 match(ConP);
4124 op_cost(5);
4125 // formats are generated automatically for constants and base registers
4126 format %{ %}
4127 interface(CONST_INTER);
4128 %}
4130 // Pointer for polling page
4131 operand immP_poll() %{
4132 predicate(n->get_ptr() != 0 && n->get_ptr() == (intptr_t)os::get_polling_page());
4133 match(ConP);
4134 op_cost(5);
4136 format %{ %}
4137 interface(CONST_INTER);
4138 %}
4140 // Pointer Immediate
4141 operand immN() %{
4142 match(ConN);
4144 op_cost(10);
4145 format %{ %}
4146 interface(CONST_INTER);
4147 %}
4149 operand immNKlass() %{
4150 match(ConNKlass);
4152 op_cost(10);
4153 format %{ %}
4154 interface(CONST_INTER);
4155 %}
4157 // NULL Pointer Immediate
4158 operand immN0() %{
4159 predicate(n->get_narrowcon() == 0);
4160 match(ConN);
4162 op_cost(5);
4163 format %{ %}
4164 interface(CONST_INTER);
4165 %}
4167 // Long Immediate
4168 operand immL() %{
4169 match(ConL);
4171 op_cost(20);
4172 format %{ %}
4173 interface(CONST_INTER);
4174 %}
4176 // Long Immediate zero
4177 operand immL0() %{
4178 predicate( n->get_long() == 0L );
4179 match(ConL);
4180 op_cost(0);
4182 format %{ %}
4183 interface(CONST_INTER);
4184 %}
4186 operand immL7() %{
4187 predicate( n->get_long() == 7L );
4188 match(ConL);
4189 op_cost(0);
4191 format %{ %}
4192 interface(CONST_INTER);
4193 %}
4195 operand immL_M1() %{
4196 predicate( n->get_long() == -1L );
4197 match(ConL);
4198 op_cost(0);
4200 format %{ %}
4201 interface(CONST_INTER);
4202 %}
4204 // bit 0..2 zero
4205 operand immL_M8() %{
4206 predicate( n->get_long() == -8L );
4207 match(ConL);
4208 op_cost(0);
4210 format %{ %}
4211 interface(CONST_INTER);
4212 %}
4214 // bit 2 zero
4215 operand immL_M5() %{
4216 predicate( n->get_long() == -5L );
4217 match(ConL);
4218 op_cost(0);
4220 format %{ %}
4221 interface(CONST_INTER);
4222 %}
4224 // bit 1..2 zero
4225 operand immL_M7() %{
4226 predicate( n->get_long() == -7L );
4227 match(ConL);
4228 op_cost(0);
4230 format %{ %}
4231 interface(CONST_INTER);
4232 %}
4234 // bit 0..1 zero
4235 operand immL_M4() %{
4236 predicate( n->get_long() == -4L );
4237 match(ConL);
4238 op_cost(0);
4240 format %{ %}
4241 interface(CONST_INTER);
4242 %}
4244 // bit 3..6 zero
4245 operand immL_M121() %{
4246 predicate( n->get_long() == -121L );
4247 match(ConL);
4248 op_cost(0);
4250 format %{ %}
4251 interface(CONST_INTER);
4252 %}
4254 // Long immediate from 0 to 127.
4255 // Used for a shorter form of long mul by 10.
4256 operand immL_127() %{
4257 predicate((0 <= n->get_long()) && (n->get_long() <= 127));
4258 match(ConL);
4259 op_cost(0);
4261 format %{ %}
4262 interface(CONST_INTER);
4263 %}
4265 // Operand for non-negtive long mask
4266 operand immL_nonneg_mask() %{
4267 predicate( (n->get_long() >= 0) && (Assembler::is_jlong_mask(n->get_long()) != -1) );
4268 match(ConL);
4270 op_cost(0);
4271 format %{ %}
4272 interface(CONST_INTER);
4273 %}
4275 operand immL_0_65535() %{
4276 predicate( n->get_long() >= 0 && n->get_long() <= 65535 );
4277 match(ConL);
4278 op_cost(0);
4280 format %{ %}
4281 interface(CONST_INTER);
4282 %}
4284 // Long Immediate: cheap (materialize in <= 3 instructions)
4285 operand immL_cheap() %{
4286 predicate(MacroAssembler::insts_for_set64(n->get_long()) <= 3);
4287 match(ConL);
4288 op_cost(0);
4290 format %{ %}
4291 interface(CONST_INTER);
4292 %}
4294 // Long Immediate: expensive (materialize in > 3 instructions)
4295 operand immL_expensive() %{
4296 predicate(MacroAssembler::insts_for_set64(n->get_long()) > 3);
4297 match(ConL);
4298 op_cost(0);
4300 format %{ %}
4301 interface(CONST_INTER);
4302 %}
4304 operand immL16() %{
4305 predicate((-32768 <= n->get_long()) && (n->get_long() <= 32767));
4306 match(ConL);
4308 op_cost(10);
4309 format %{ %}
4310 interface(CONST_INTER);
4311 %}
4313 operand immL16_sub() %{
4314 predicate((-32767 <= n->get_long()) && (n->get_long() <= 32768));
4315 match(ConL);
4317 op_cost(10);
4318 format %{ %}
4319 interface(CONST_INTER);
4320 %}
4322 // Long Immediate: low 32-bit mask
4323 operand immL_32bits() %{
4324 predicate(n->get_long() == 0xFFFFFFFFL);
4325 match(ConL);
4326 op_cost(20);
4328 format %{ %}
4329 interface(CONST_INTER);
4330 %}
4332 // Long Immediate 32-bit signed
4333 operand immL32()
4334 %{
4335 predicate(n->get_long() == (int) (n->get_long()));
4336 match(ConL);
4338 op_cost(15);
4339 format %{ %}
4340 interface(CONST_INTER);
4341 %}
4344 //single-precision floating-point zero
4345 operand immF0() %{
4346 predicate(jint_cast(n->getf()) == 0);
4347 match(ConF);
4349 op_cost(5);
4350 format %{ %}
4351 interface(CONST_INTER);
4352 %}
4354 //single-precision floating-point immediate
4355 operand immF() %{
4356 match(ConF);
4358 op_cost(20);
4359 format %{ %}
4360 interface(CONST_INTER);
4361 %}
4363 //double-precision floating-point zero
4364 operand immD0() %{
4365 predicate(jlong_cast(n->getd()) == 0);
4366 match(ConD);
4368 op_cost(5);
4369 format %{ %}
4370 interface(CONST_INTER);
4371 %}
4373 //double-precision floating-point immediate
4374 operand immD() %{
4375 match(ConD);
4377 op_cost(20);
4378 format %{ %}
4379 interface(CONST_INTER);
4380 %}
4382 // Register Operands
4383 // Integer Register
4384 operand mRegI() %{
4385 constraint(ALLOC_IN_RC(int_reg));
4386 match(RegI);
4388 format %{ %}
4389 interface(REG_INTER);
4390 %}
4392 operand no_Ax_mRegI() %{
4393 constraint(ALLOC_IN_RC(no_Ax_int_reg));
4394 match(RegI);
4395 match(mRegI);
4397 format %{ %}
4398 interface(REG_INTER);
4399 %}
4401 operand mS0RegI() %{
4402 constraint(ALLOC_IN_RC(s0_reg));
4403 match(RegI);
4404 match(mRegI);
4406 format %{ "S0" %}
4407 interface(REG_INTER);
4408 %}
4410 operand mS1RegI() %{
4411 constraint(ALLOC_IN_RC(s1_reg));
4412 match(RegI);
4413 match(mRegI);
4415 format %{ "S1" %}
4416 interface(REG_INTER);
4417 %}
4419 operand mS2RegI() %{
4420 constraint(ALLOC_IN_RC(s2_reg));
4421 match(RegI);
4422 match(mRegI);
4424 format %{ "S2" %}
4425 interface(REG_INTER);
4426 %}
4428 operand mS3RegI() %{
4429 constraint(ALLOC_IN_RC(s3_reg));
4430 match(RegI);
4431 match(mRegI);
4433 format %{ "S3" %}
4434 interface(REG_INTER);
4435 %}
4437 operand mS4RegI() %{
4438 constraint(ALLOC_IN_RC(s4_reg));
4439 match(RegI);
4440 match(mRegI);
4442 format %{ "S4" %}
4443 interface(REG_INTER);
4444 %}
4446 operand mS5RegI() %{
4447 constraint(ALLOC_IN_RC(s5_reg));
4448 match(RegI);
4449 match(mRegI);
4451 format %{ "S5" %}
4452 interface(REG_INTER);
4453 %}
4455 operand mS6RegI() %{
4456 constraint(ALLOC_IN_RC(s6_reg));
4457 match(RegI);
4458 match(mRegI);
4460 format %{ "S6" %}
4461 interface(REG_INTER);
4462 %}
4464 operand mS7RegI() %{
4465 constraint(ALLOC_IN_RC(s7_reg));
4466 match(RegI);
4467 match(mRegI);
4469 format %{ "S7" %}
4470 interface(REG_INTER);
4471 %}
4474 operand mT0RegI() %{
4475 constraint(ALLOC_IN_RC(t0_reg));
4476 match(RegI);
4477 match(mRegI);
4479 format %{ "T0" %}
4480 interface(REG_INTER);
4481 %}
4483 operand mT1RegI() %{
4484 constraint(ALLOC_IN_RC(t1_reg));
4485 match(RegI);
4486 match(mRegI);
4488 format %{ "T1" %}
4489 interface(REG_INTER);
4490 %}
4492 operand mT2RegI() %{
4493 constraint(ALLOC_IN_RC(t2_reg));
4494 match(RegI);
4495 match(mRegI);
4497 format %{ "T2" %}
4498 interface(REG_INTER);
4499 %}
4501 operand mT3RegI() %{
4502 constraint(ALLOC_IN_RC(t3_reg));
4503 match(RegI);
4504 match(mRegI);
4506 format %{ "T3" %}
4507 interface(REG_INTER);
4508 %}
4510 operand mT8RegI() %{
4511 constraint(ALLOC_IN_RC(t8_reg));
4512 match(RegI);
4513 match(mRegI);
4515 format %{ "T8" %}
4516 interface(REG_INTER);
4517 %}
4519 operand mT9RegI() %{
4520 constraint(ALLOC_IN_RC(t9_reg));
4521 match(RegI);
4522 match(mRegI);
4524 format %{ "T9" %}
4525 interface(REG_INTER);
4526 %}
4528 operand mA0RegI() %{
4529 constraint(ALLOC_IN_RC(a0_reg));
4530 match(RegI);
4531 match(mRegI);
4533 format %{ "A0" %}
4534 interface(REG_INTER);
4535 %}
4537 operand mA1RegI() %{
4538 constraint(ALLOC_IN_RC(a1_reg));
4539 match(RegI);
4540 match(mRegI);
4542 format %{ "A1" %}
4543 interface(REG_INTER);
4544 %}
4546 operand mA2RegI() %{
4547 constraint(ALLOC_IN_RC(a2_reg));
4548 match(RegI);
4549 match(mRegI);
4551 format %{ "A2" %}
4552 interface(REG_INTER);
4553 %}
4555 operand mA3RegI() %{
4556 constraint(ALLOC_IN_RC(a3_reg));
4557 match(RegI);
4558 match(mRegI);
4560 format %{ "A3" %}
4561 interface(REG_INTER);
4562 %}
4564 operand mA4RegI() %{
4565 constraint(ALLOC_IN_RC(a4_reg));
4566 match(RegI);
4567 match(mRegI);
4569 format %{ "A4" %}
4570 interface(REG_INTER);
4571 %}
4573 operand mA5RegI() %{
4574 constraint(ALLOC_IN_RC(a5_reg));
4575 match(RegI);
4576 match(mRegI);
4578 format %{ "A5" %}
4579 interface(REG_INTER);
4580 %}
4582 operand mA6RegI() %{
4583 constraint(ALLOC_IN_RC(a6_reg));
4584 match(RegI);
4585 match(mRegI);
4587 format %{ "A6" %}
4588 interface(REG_INTER);
4589 %}
4591 operand mA7RegI() %{
4592 constraint(ALLOC_IN_RC(a7_reg));
4593 match(RegI);
4594 match(mRegI);
4596 format %{ "A7" %}
4597 interface(REG_INTER);
4598 %}
4600 operand mV0RegI() %{
4601 constraint(ALLOC_IN_RC(v0_reg));
4602 match(RegI);
4603 match(mRegI);
4605 format %{ "V0" %}
4606 interface(REG_INTER);
4607 %}
4609 operand mV1RegI() %{
4610 constraint(ALLOC_IN_RC(v1_reg));
4611 match(RegI);
4612 match(mRegI);
4614 format %{ "V1" %}
4615 interface(REG_INTER);
4616 %}
4618 operand mRegN() %{
4619 constraint(ALLOC_IN_RC(int_reg));
4620 match(RegN);
4622 format %{ %}
4623 interface(REG_INTER);
4624 %}
4626 operand t0_RegN() %{
4627 constraint(ALLOC_IN_RC(t0_reg));
4628 match(RegN);
4629 match(mRegN);
4631 format %{ %}
4632 interface(REG_INTER);
4633 %}
4635 operand t1_RegN() %{
4636 constraint(ALLOC_IN_RC(t1_reg));
4637 match(RegN);
4638 match(mRegN);
4640 format %{ %}
4641 interface(REG_INTER);
4642 %}
4644 operand t2_RegN() %{
4645 constraint(ALLOC_IN_RC(t2_reg));
4646 match(RegN);
4647 match(mRegN);
4649 format %{ %}
4650 interface(REG_INTER);
4651 %}
4653 operand t3_RegN() %{
4654 constraint(ALLOC_IN_RC(t3_reg));
4655 match(RegN);
4656 match(mRegN);
4658 format %{ %}
4659 interface(REG_INTER);
4660 %}
4662 operand t8_RegN() %{
4663 constraint(ALLOC_IN_RC(t8_reg));
4664 match(RegN);
4665 match(mRegN);
4667 format %{ %}
4668 interface(REG_INTER);
4669 %}
4671 operand t9_RegN() %{
4672 constraint(ALLOC_IN_RC(t9_reg));
4673 match(RegN);
4674 match(mRegN);
4676 format %{ %}
4677 interface(REG_INTER);
4678 %}
4680 operand a0_RegN() %{
4681 constraint(ALLOC_IN_RC(a0_reg));
4682 match(RegN);
4683 match(mRegN);
4685 format %{ %}
4686 interface(REG_INTER);
4687 %}
4689 operand a1_RegN() %{
4690 constraint(ALLOC_IN_RC(a1_reg));
4691 match(RegN);
4692 match(mRegN);
4694 format %{ %}
4695 interface(REG_INTER);
4696 %}
4698 operand a2_RegN() %{
4699 constraint(ALLOC_IN_RC(a2_reg));
4700 match(RegN);
4701 match(mRegN);
4703 format %{ %}
4704 interface(REG_INTER);
4705 %}
4707 operand a3_RegN() %{
4708 constraint(ALLOC_IN_RC(a3_reg));
4709 match(RegN);
4710 match(mRegN);
4712 format %{ %}
4713 interface(REG_INTER);
4714 %}
4716 operand a4_RegN() %{
4717 constraint(ALLOC_IN_RC(a4_reg));
4718 match(RegN);
4719 match(mRegN);
4721 format %{ %}
4722 interface(REG_INTER);
4723 %}
4725 operand a5_RegN() %{
4726 constraint(ALLOC_IN_RC(a5_reg));
4727 match(RegN);
4728 match(mRegN);
4730 format %{ %}
4731 interface(REG_INTER);
4732 %}
4734 operand a6_RegN() %{
4735 constraint(ALLOC_IN_RC(a6_reg));
4736 match(RegN);
4737 match(mRegN);
4739 format %{ %}
4740 interface(REG_INTER);
4741 %}
4743 operand a7_RegN() %{
4744 constraint(ALLOC_IN_RC(a7_reg));
4745 match(RegN);
4746 match(mRegN);
4748 format %{ %}
4749 interface(REG_INTER);
4750 %}
4752 operand s0_RegN() %{
4753 constraint(ALLOC_IN_RC(s0_reg));
4754 match(RegN);
4755 match(mRegN);
4757 format %{ %}
4758 interface(REG_INTER);
4759 %}
4761 operand s1_RegN() %{
4762 constraint(ALLOC_IN_RC(s1_reg));
4763 match(RegN);
4764 match(mRegN);
4766 format %{ %}
4767 interface(REG_INTER);
4768 %}
4770 operand s2_RegN() %{
4771 constraint(ALLOC_IN_RC(s2_reg));
4772 match(RegN);
4773 match(mRegN);
4775 format %{ %}
4776 interface(REG_INTER);
4777 %}
4779 operand s3_RegN() %{
4780 constraint(ALLOC_IN_RC(s3_reg));
4781 match(RegN);
4782 match(mRegN);
4784 format %{ %}
4785 interface(REG_INTER);
4786 %}
4788 operand s4_RegN() %{
4789 constraint(ALLOC_IN_RC(s4_reg));
4790 match(RegN);
4791 match(mRegN);
4793 format %{ %}
4794 interface(REG_INTER);
4795 %}
4797 operand s5_RegN() %{
4798 constraint(ALLOC_IN_RC(s5_reg));
4799 match(RegN);
4800 match(mRegN);
4802 format %{ %}
4803 interface(REG_INTER);
4804 %}
4806 operand s6_RegN() %{
4807 constraint(ALLOC_IN_RC(s6_reg));
4808 match(RegN);
4809 match(mRegN);
4811 format %{ %}
4812 interface(REG_INTER);
4813 %}
4815 operand s7_RegN() %{
4816 constraint(ALLOC_IN_RC(s7_reg));
4817 match(RegN);
4818 match(mRegN);
4820 format %{ %}
4821 interface(REG_INTER);
4822 %}
4824 operand v0_RegN() %{
4825 constraint(ALLOC_IN_RC(v0_reg));
4826 match(RegN);
4827 match(mRegN);
4829 format %{ %}
4830 interface(REG_INTER);
4831 %}
4833 operand v1_RegN() %{
4834 constraint(ALLOC_IN_RC(v1_reg));
4835 match(RegN);
4836 match(mRegN);
4838 format %{ %}
4839 interface(REG_INTER);
4840 %}
4842 // Pointer Register
4843 operand mRegP() %{
4844 constraint(ALLOC_IN_RC(p_reg));
4845 match(RegP);
4847 format %{ %}
4848 interface(REG_INTER);
4849 %}
4851 operand no_T8_mRegP() %{
4852 constraint(ALLOC_IN_RC(no_T8_p_reg));
4853 match(RegP);
4854 match(mRegP);
4856 format %{ %}
4857 interface(REG_INTER);
4858 %}
4860 operand s0_RegP()
4861 %{
4862 constraint(ALLOC_IN_RC(s0_long_reg));
4863 match(RegP);
4864 match(mRegP);
4865 match(no_T8_mRegP);
4867 format %{ %}
4868 interface(REG_INTER);
4869 %}
4871 operand s1_RegP()
4872 %{
4873 constraint(ALLOC_IN_RC(s1_long_reg));
4874 match(RegP);
4875 match(mRegP);
4876 match(no_T8_mRegP);
4878 format %{ %}
4879 interface(REG_INTER);
4880 %}
4882 operand s2_RegP()
4883 %{
4884 constraint(ALLOC_IN_RC(s2_long_reg));
4885 match(RegP);
4886 match(mRegP);
4887 match(no_T8_mRegP);
4889 format %{ %}
4890 interface(REG_INTER);
4891 %}
4893 operand s3_RegP()
4894 %{
4895 constraint(ALLOC_IN_RC(s3_long_reg));
4896 match(RegP);
4897 match(mRegP);
4898 match(no_T8_mRegP);
4900 format %{ %}
4901 interface(REG_INTER);
4902 %}
4904 operand s4_RegP()
4905 %{
4906 constraint(ALLOC_IN_RC(s4_long_reg));
4907 match(RegP);
4908 match(mRegP);
4909 match(no_T8_mRegP);
4911 format %{ %}
4912 interface(REG_INTER);
4913 %}
4915 operand s5_RegP()
4916 %{
4917 constraint(ALLOC_IN_RC(s5_long_reg));
4918 match(RegP);
4919 match(mRegP);
4920 match(no_T8_mRegP);
4922 format %{ %}
4923 interface(REG_INTER);
4924 %}
4926 operand s6_RegP()
4927 %{
4928 constraint(ALLOC_IN_RC(s6_long_reg));
4929 match(RegP);
4930 match(mRegP);
4931 match(no_T8_mRegP);
4933 format %{ %}
4934 interface(REG_INTER);
4935 %}
4937 operand s7_RegP()
4938 %{
4939 constraint(ALLOC_IN_RC(s7_long_reg));
4940 match(RegP);
4941 match(mRegP);
4942 match(no_T8_mRegP);
4944 format %{ %}
4945 interface(REG_INTER);
4946 %}
4948 operand t0_RegP()
4949 %{
4950 constraint(ALLOC_IN_RC(t0_long_reg));
4951 match(RegP);
4952 match(mRegP);
4953 match(no_T8_mRegP);
4955 format %{ %}
4956 interface(REG_INTER);
4957 %}
4959 operand t1_RegP()
4960 %{
4961 constraint(ALLOC_IN_RC(t1_long_reg));
4962 match(RegP);
4963 match(mRegP);
4964 match(no_T8_mRegP);
4966 format %{ %}
4967 interface(REG_INTER);
4968 %}
4970 operand t2_RegP()
4971 %{
4972 constraint(ALLOC_IN_RC(t2_long_reg));
4973 match(RegP);
4974 match(mRegP);
4975 match(no_T8_mRegP);
4977 format %{ %}
4978 interface(REG_INTER);
4979 %}
4981 operand t3_RegP()
4982 %{
4983 constraint(ALLOC_IN_RC(t3_long_reg));
4984 match(RegP);
4985 match(mRegP);
4986 match(no_T8_mRegP);
4988 format %{ %}
4989 interface(REG_INTER);
4990 %}
4992 operand t8_RegP()
4993 %{
4994 constraint(ALLOC_IN_RC(t8_long_reg));
4995 match(RegP);
4996 match(mRegP);
4998 format %{ %}
4999 interface(REG_INTER);
5000 %}
5002 operand t9_RegP()
5003 %{
5004 constraint(ALLOC_IN_RC(t9_long_reg));
5005 match(RegP);
5006 match(mRegP);
5007 match(no_T8_mRegP);
5009 format %{ %}
5010 interface(REG_INTER);
5011 %}
5013 operand a0_RegP()
5014 %{
5015 constraint(ALLOC_IN_RC(a0_long_reg));
5016 match(RegP);
5017 match(mRegP);
5018 match(no_T8_mRegP);
5020 format %{ %}
5021 interface(REG_INTER);
5022 %}
5024 operand a1_RegP()
5025 %{
5026 constraint(ALLOC_IN_RC(a1_long_reg));
5027 match(RegP);
5028 match(mRegP);
5029 match(no_T8_mRegP);
5031 format %{ %}
5032 interface(REG_INTER);
5033 %}
5035 operand a2_RegP()
5036 %{
5037 constraint(ALLOC_IN_RC(a2_long_reg));
5038 match(RegP);
5039 match(mRegP);
5040 match(no_T8_mRegP);
5042 format %{ %}
5043 interface(REG_INTER);
5044 %}
5046 operand a3_RegP()
5047 %{
5048 constraint(ALLOC_IN_RC(a3_long_reg));
5049 match(RegP);
5050 match(mRegP);
5051 match(no_T8_mRegP);
5053 format %{ %}
5054 interface(REG_INTER);
5055 %}
5057 operand a4_RegP()
5058 %{
5059 constraint(ALLOC_IN_RC(a4_long_reg));
5060 match(RegP);
5061 match(mRegP);
5062 match(no_T8_mRegP);
5064 format %{ %}
5065 interface(REG_INTER);
5066 %}
5069 operand a5_RegP()
5070 %{
5071 constraint(ALLOC_IN_RC(a5_long_reg));
5072 match(RegP);
5073 match(mRegP);
5074 match(no_T8_mRegP);
5076 format %{ %}
5077 interface(REG_INTER);
5078 %}
5080 operand a6_RegP()
5081 %{
5082 constraint(ALLOC_IN_RC(a6_long_reg));
5083 match(RegP);
5084 match(mRegP);
5085 match(no_T8_mRegP);
5087 format %{ %}
5088 interface(REG_INTER);
5089 %}
5091 operand a7_RegP()
5092 %{
5093 constraint(ALLOC_IN_RC(a7_long_reg));
5094 match(RegP);
5095 match(mRegP);
5096 match(no_T8_mRegP);
5098 format %{ %}
5099 interface(REG_INTER);
5100 %}
5102 operand v0_RegP()
5103 %{
5104 constraint(ALLOC_IN_RC(v0_long_reg));
5105 match(RegP);
5106 match(mRegP);
5107 match(no_T8_mRegP);
5109 format %{ %}
5110 interface(REG_INTER);
5111 %}
5113 operand v1_RegP()
5114 %{
5115 constraint(ALLOC_IN_RC(v1_long_reg));
5116 match(RegP);
5117 match(mRegP);
5118 match(no_T8_mRegP);
5120 format %{ %}
5121 interface(REG_INTER);
5122 %}
5124 /*
5125 operand mSPRegP(mRegP reg) %{
5126 constraint(ALLOC_IN_RC(sp_reg));
5127 match(reg);
5129 format %{ "SP" %}
5130 interface(REG_INTER);
5131 %}
5133 operand mFPRegP(mRegP reg) %{
5134 constraint(ALLOC_IN_RC(fp_reg));
5135 match(reg);
5137 format %{ "FP" %}
5138 interface(REG_INTER);
5139 %}
5140 */
5142 operand mRegL() %{
5143 constraint(ALLOC_IN_RC(long_reg));
5144 match(RegL);
5146 format %{ %}
5147 interface(REG_INTER);
5148 %}
5150 operand v0RegL() %{
5151 constraint(ALLOC_IN_RC(v0_long_reg));
5152 match(RegL);
5153 match(mRegL);
5155 format %{ %}
5156 interface(REG_INTER);
5157 %}
5159 operand v1RegL() %{
5160 constraint(ALLOC_IN_RC(v1_long_reg));
5161 match(RegL);
5162 match(mRegL);
5164 format %{ %}
5165 interface(REG_INTER);
5166 %}
5168 operand a0RegL() %{
5169 constraint(ALLOC_IN_RC(a0_long_reg));
5170 match(RegL);
5171 match(mRegL);
5173 format %{ "A0" %}
5174 interface(REG_INTER);
5175 %}
5177 operand a1RegL() %{
5178 constraint(ALLOC_IN_RC(a1_long_reg));
5179 match(RegL);
5180 match(mRegL);
5182 format %{ %}
5183 interface(REG_INTER);
5184 %}
5186 operand a2RegL() %{
5187 constraint(ALLOC_IN_RC(a2_long_reg));
5188 match(RegL);
5189 match(mRegL);
5191 format %{ %}
5192 interface(REG_INTER);
5193 %}
5195 operand a3RegL() %{
5196 constraint(ALLOC_IN_RC(a3_long_reg));
5197 match(RegL);
5198 match(mRegL);
5200 format %{ %}
5201 interface(REG_INTER);
5202 %}
5204 operand t0RegL() %{
5205 constraint(ALLOC_IN_RC(t0_long_reg));
5206 match(RegL);
5207 match(mRegL);
5209 format %{ %}
5210 interface(REG_INTER);
5211 %}
5213 operand t1RegL() %{
5214 constraint(ALLOC_IN_RC(t1_long_reg));
5215 match(RegL);
5216 match(mRegL);
5218 format %{ %}
5219 interface(REG_INTER);
5220 %}
5222 operand t2RegL() %{
5223 constraint(ALLOC_IN_RC(t2_long_reg));
5224 match(RegL);
5225 match(mRegL);
5227 format %{ %}
5228 interface(REG_INTER);
5229 %}
5231 operand t3RegL() %{
5232 constraint(ALLOC_IN_RC(t3_long_reg));
5233 match(RegL);
5234 match(mRegL);
5236 format %{ %}
5237 interface(REG_INTER);
5238 %}
5240 operand t8RegL() %{
5241 constraint(ALLOC_IN_RC(t8_long_reg));
5242 match(RegL);
5243 match(mRegL);
5245 format %{ %}
5246 interface(REG_INTER);
5247 %}
5249 operand a4RegL() %{
5250 constraint(ALLOC_IN_RC(a4_long_reg));
5251 match(RegL);
5252 match(mRegL);
5254 format %{ %}
5255 interface(REG_INTER);
5256 %}
5258 operand a5RegL() %{
5259 constraint(ALLOC_IN_RC(a5_long_reg));
5260 match(RegL);
5261 match(mRegL);
5263 format %{ %}
5264 interface(REG_INTER);
5265 %}
5267 operand a6RegL() %{
5268 constraint(ALLOC_IN_RC(a6_long_reg));
5269 match(RegL);
5270 match(mRegL);
5272 format %{ %}
5273 interface(REG_INTER);
5274 %}
5276 operand a7RegL() %{
5277 constraint(ALLOC_IN_RC(a7_long_reg));
5278 match(RegL);
5279 match(mRegL);
5281 format %{ %}
5282 interface(REG_INTER);
5283 %}
5285 operand s0RegL() %{
5286 constraint(ALLOC_IN_RC(s0_long_reg));
5287 match(RegL);
5288 match(mRegL);
5290 format %{ %}
5291 interface(REG_INTER);
5292 %}
5294 operand s1RegL() %{
5295 constraint(ALLOC_IN_RC(s1_long_reg));
5296 match(RegL);
5297 match(mRegL);
5299 format %{ %}
5300 interface(REG_INTER);
5301 %}
5303 operand s2RegL() %{
5304 constraint(ALLOC_IN_RC(s2_long_reg));
5305 match(RegL);
5306 match(mRegL);
5308 format %{ %}
5309 interface(REG_INTER);
5310 %}
5312 operand s3RegL() %{
5313 constraint(ALLOC_IN_RC(s3_long_reg));
5314 match(RegL);
5315 match(mRegL);
5317 format %{ %}
5318 interface(REG_INTER);
5319 %}
5321 operand s4RegL() %{
5322 constraint(ALLOC_IN_RC(s4_long_reg));
5323 match(RegL);
5324 match(mRegL);
5326 format %{ %}
5327 interface(REG_INTER);
5328 %}
5330 operand s7RegL() %{
5331 constraint(ALLOC_IN_RC(s7_long_reg));
5332 match(RegL);
5333 match(mRegL);
5335 format %{ %}
5336 interface(REG_INTER);
5337 %}
5339 // Floating register operands
5340 operand regF() %{
5341 constraint(ALLOC_IN_RC(flt_reg));
5342 match(RegF);
5344 format %{ %}
5345 interface(REG_INTER);
5346 %}
5348 //Double Precision Floating register operands
5349 operand regD() %{
5350 constraint(ALLOC_IN_RC(dbl_reg));
5351 match(RegD);
5353 format %{ %}
5354 interface(REG_INTER);
5355 %}
5357 //----------Memory Operands----------------------------------------------------
5358 // Indirect Memory Operand
5359 operand indirect(mRegP reg) %{
5360 constraint(ALLOC_IN_RC(p_reg));
5361 match(reg);
5363 format %{ "[$reg] @ indirect" %}
5364 interface(MEMORY_INTER) %{
5365 base($reg);
5366 index(0x0); /* NO_INDEX */
5367 scale(0x0);
5368 disp(0x0);
5369 %}
5370 %}
5372 // Indirect Memory Plus Short Offset Operand
5373 operand indOffset8(mRegP reg, immL8 off)
5374 %{
5375 constraint(ALLOC_IN_RC(p_reg));
5376 match(AddP reg off);
5378 op_cost(10);
5379 format %{ "[$reg + $off (8-bit)] @ indOffset8" %}
5380 interface(MEMORY_INTER) %{
5381 base($reg);
5382 index(0x0); /* NO_INDEX */
5383 scale(0x0);
5384 disp($off);
5385 %}
5386 %}
5388 // Indirect Memory Times Scale Plus Index Register
5389 operand indIndexScale(mRegP reg, mRegL lreg, immI2 scale)
5390 %{
5391 constraint(ALLOC_IN_RC(p_reg));
5392 match(AddP reg (LShiftL lreg scale));
5394 op_cost(10);
5395 format %{"[$reg + $lreg << $scale] @ indIndexScale" %}
5396 interface(MEMORY_INTER) %{
5397 base($reg);
5398 index($lreg);
5399 scale($scale);
5400 disp(0x0);
5401 %}
5402 %}
5405 // [base + index + offset]
5406 operand baseIndexOffset8(mRegP base, mRegL index, immL8 off)
5407 %{
5408 constraint(ALLOC_IN_RC(p_reg));
5409 op_cost(5);
5410 match(AddP (AddP base index) off);
5412 format %{ "[$base + $index + $off (8-bit)] @ baseIndexOffset8" %}
5413 interface(MEMORY_INTER) %{
5414 base($base);
5415 index($index);
5416 scale(0x0);
5417 disp($off);
5418 %}
5419 %}
5421 // [base + index + offset]
5422 operand baseIndexOffset8_convI2L(mRegP base, mRegI index, immL8 off)
5423 %{
5424 constraint(ALLOC_IN_RC(p_reg));
5425 op_cost(5);
5426 match(AddP (AddP base (ConvI2L index)) off);
5428 format %{ "[$base + $index + $off (8-bit)] @ baseIndexOffset8_convI2L" %}
5429 interface(MEMORY_INTER) %{
5430 base($base);
5431 index($index);
5432 scale(0x0);
5433 disp($off);
5434 %}
5435 %}
5437 // Indirect Memory Times Scale Plus Index Register Plus Offset Operand
5438 operand indIndexScaleOffset8(mRegP reg, immL8 off, mRegL lreg, immI2 scale)
5439 %{
5440 constraint(ALLOC_IN_RC(p_reg));
5441 match(AddP (AddP reg (LShiftL lreg scale)) off);
5443 op_cost(10);
5444 format %{"[$reg + $off + $lreg << $scale] @ indIndexScaleOffset8" %}
5445 interface(MEMORY_INTER) %{
5446 base($reg);
5447 index($lreg);
5448 scale($scale);
5449 disp($off);
5450 %}
5451 %}
5453 operand indIndexScaleOffset8_convI2L(mRegP reg, immL8 off, mRegI ireg, immI2 scale)
5454 %{
5455 constraint(ALLOC_IN_RC(p_reg));
5456 match(AddP (AddP reg (LShiftL (ConvI2L ireg) scale)) off);
5458 op_cost(10);
5459 format %{"[$reg + $off + $ireg << $scale] @ indIndexScaleOffset8_convI2L" %}
5460 interface(MEMORY_INTER) %{
5461 base($reg);
5462 index($ireg);
5463 scale($scale);
5464 disp($off);
5465 %}
5466 %}
5468 // [base + index<<scale + offset]
5469 operand basePosIndexScaleOffset8(mRegP base, mRegI index, immL8 off, immI_0_31 scale)
5470 %{
5471 constraint(ALLOC_IN_RC(p_reg));
5472 //predicate(n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
5473 op_cost(10);
5474 match(AddP (AddP base (LShiftL (ConvI2L index) scale)) off);
5476 format %{ "[$base + $index << $scale + $off (8-bit)] @ basePosIndexScaleOffset8" %}
5477 interface(MEMORY_INTER) %{
5478 base($base);
5479 index($index);
5480 scale($scale);
5481 disp($off);
5482 %}
5483 %}
5485 // Indirect Memory Times Scale Plus Index Register Plus Offset Operand
5486 operand indIndexScaleOffsetNarrow(mRegN reg, immL8 off, mRegL lreg, immI2 scale)
5487 %{
5488 predicate(Universe::narrow_oop_shift() == 0);
5489 constraint(ALLOC_IN_RC(p_reg));
5490 match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
5492 op_cost(10);
5493 format %{"[$reg + $off + $lreg << $scale] @ indIndexScaleOffsetNarrow" %}
5494 interface(MEMORY_INTER) %{
5495 base($reg);
5496 index($lreg);
5497 scale($scale);
5498 disp($off);
5499 %}
5500 %}
5502 // [base + index<<scale + offset] for compressd Oops
5503 operand indPosIndexI2LScaleOffset8Narrow(mRegN base, mRegI index, immL8 off, immI_0_31 scale)
5504 %{
5505 constraint(ALLOC_IN_RC(p_reg));
5506 //predicate(Universe::narrow_oop_shift() == 0 && n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
5507 predicate(Universe::narrow_oop_shift() == 0);
5508 op_cost(10);
5509 match(AddP (AddP (DecodeN base) (LShiftL (ConvI2L index) scale)) off);
5511 format %{ "[$base + $index << $scale + $off (8-bit)] @ indPosIndexI2LScaleOffset8Narrow" %}
5512 interface(MEMORY_INTER) %{
5513 base($base);
5514 index($index);
5515 scale($scale);
5516 disp($off);
5517 %}
5518 %}
5520 //FIXME: I think it's better to limit the immI to be 16-bit at most!
5521 // Indirect Memory Plus Long Offset Operand
5522 operand indOffset32(mRegP reg, immL32 off) %{
5523 constraint(ALLOC_IN_RC(p_reg));
5524 op_cost(20);
5525 match(AddP reg off);
5527 format %{ "[$reg + $off (32-bit)] @ indOffset32" %}
5528 interface(MEMORY_INTER) %{
5529 base($reg);
5530 index(0x0); /* NO_INDEX */
5531 scale(0x0);
5532 disp($off);
5533 %}
5534 %}
5536 // Indirect Memory Plus Index Register
5537 operand indIndex(mRegP addr, mRegL index) %{
5538 constraint(ALLOC_IN_RC(p_reg));
5539 match(AddP addr index);
5541 op_cost(20);
5542 format %{"[$addr + $index] @ indIndex" %}
5543 interface(MEMORY_INTER) %{
5544 base($addr);
5545 index($index);
5546 scale(0x0);
5547 disp(0x0);
5548 %}
5549 %}
5551 operand indirectNarrowKlass(mRegN reg)
5552 %{
5553 predicate(Universe::narrow_klass_shift() == 0);
5554 constraint(ALLOC_IN_RC(p_reg));
5555 op_cost(10);
5556 match(DecodeNKlass reg);
5558 format %{ "[$reg] @ indirectNarrowKlass" %}
5559 interface(MEMORY_INTER) %{
5560 base($reg);
5561 index(0x0);
5562 scale(0x0);
5563 disp(0x0);
5564 %}
5565 %}
5567 operand indOffset8NarrowKlass(mRegN reg, immL8 off)
5568 %{
5569 predicate(Universe::narrow_klass_shift() == 0);
5570 constraint(ALLOC_IN_RC(p_reg));
5571 op_cost(10);
5572 match(AddP (DecodeNKlass reg) off);
5574 format %{ "[$reg + $off (8-bit)] @ indOffset8NarrowKlass" %}
5575 interface(MEMORY_INTER) %{
5576 base($reg);
5577 index(0x0);
5578 scale(0x0);
5579 disp($off);
5580 %}
5581 %}
5583 operand indOffset32NarrowKlass(mRegN reg, immL32 off)
5584 %{
5585 predicate(Universe::narrow_klass_shift() == 0);
5586 constraint(ALLOC_IN_RC(p_reg));
5587 op_cost(10);
5588 match(AddP (DecodeNKlass reg) off);
5590 format %{ "[$reg + $off (32-bit)] @ indOffset32NarrowKlass" %}
5591 interface(MEMORY_INTER) %{
5592 base($reg);
5593 index(0x0);
5594 scale(0x0);
5595 disp($off);
5596 %}
5597 %}
5599 operand indIndexOffsetNarrowKlass(mRegN reg, mRegL lreg, immL32 off)
5600 %{
5601 predicate(Universe::narrow_klass_shift() == 0);
5602 constraint(ALLOC_IN_RC(p_reg));
5603 match(AddP (AddP (DecodeNKlass reg) lreg) off);
5605 op_cost(10);
5606 format %{"[$reg + $off + $lreg] @ indIndexOffsetNarrowKlass" %}
5607 interface(MEMORY_INTER) %{
5608 base($reg);
5609 index($lreg);
5610 scale(0x0);
5611 disp($off);
5612 %}
5613 %}
5615 operand indIndexNarrowKlass(mRegN reg, mRegL lreg)
5616 %{
5617 predicate(Universe::narrow_klass_shift() == 0);
5618 constraint(ALLOC_IN_RC(p_reg));
5619 match(AddP (DecodeNKlass reg) lreg);
5621 op_cost(10);
5622 format %{"[$reg + $lreg] @ indIndexNarrowKlass" %}
5623 interface(MEMORY_INTER) %{
5624 base($reg);
5625 index($lreg);
5626 scale(0x0);
5627 disp(0x0);
5628 %}
5629 %}
5631 // Indirect Memory Operand
5632 operand indirectNarrow(mRegN reg)
5633 %{
5634 predicate(Universe::narrow_oop_shift() == 0);
5635 constraint(ALLOC_IN_RC(p_reg));
5636 op_cost(10);
5637 match(DecodeN reg);
5639 format %{ "[$reg] @ indirectNarrow" %}
5640 interface(MEMORY_INTER) %{
5641 base($reg);
5642 index(0x0);
5643 scale(0x0);
5644 disp(0x0);
5645 %}
5646 %}
5648 // Indirect Memory Plus Short Offset Operand
5649 operand indOffset8Narrow(mRegN reg, immL8 off)
5650 %{
5651 predicate(Universe::narrow_oop_shift() == 0);
5652 constraint(ALLOC_IN_RC(p_reg));
5653 op_cost(10);
5654 match(AddP (DecodeN reg) off);
5656 format %{ "[$reg + $off (8-bit)] @ indOffset8Narrow" %}
5657 interface(MEMORY_INTER) %{
5658 base($reg);
5659 index(0x0);
5660 scale(0x0);
5661 disp($off);
5662 %}
5663 %}
5665 // Indirect Memory Plus Index Register Plus Offset Operand
5666 operand indIndexOffset8Narrow(mRegN reg, mRegL lreg, immL8 off)
5667 %{
5668 predicate(Universe::narrow_oop_shift() == 0);
5669 constraint(ALLOC_IN_RC(p_reg));
5670 match(AddP (AddP (DecodeN reg) lreg) off);
5672 op_cost(10);
5673 format %{"[$reg + $off + $lreg] @ indIndexOffset8Narrow" %}
5674 interface(MEMORY_INTER) %{
5675 base($reg);
5676 index($lreg);
5677 scale(0x0);
5678 disp($off);
5679 %}
5680 %}
5682 //----------Load Long Memory Operands------------------------------------------
5683 // The load-long idiom will use it's address expression again after loading
5684 // the first word of the long. If the load-long destination overlaps with
5685 // registers used in the addressing expression, the 2nd half will be loaded
5686 // from a clobbered address. Fix this by requiring that load-long use
5687 // address registers that do not overlap with the load-long target.
5689 // load-long support
5690 operand load_long_RegP() %{
5691 constraint(ALLOC_IN_RC(p_reg));
5692 match(RegP);
5693 match(mRegP);
5694 op_cost(100);
5695 format %{ %}
5696 interface(REG_INTER);
5697 %}
5699 // Indirect Memory Operand Long
5700 operand load_long_indirect(load_long_RegP reg) %{
5701 constraint(ALLOC_IN_RC(p_reg));
5702 match(reg);
5704 format %{ "[$reg]" %}
5705 interface(MEMORY_INTER) %{
5706 base($reg);
5707 index(0x0);
5708 scale(0x0);
5709 disp(0x0);
5710 %}
5711 %}
5713 // Indirect Memory Plus Long Offset Operand
5714 operand load_long_indOffset32(load_long_RegP reg, immL32 off) %{
5715 match(AddP reg off);
5717 format %{ "[$reg + $off]" %}
5718 interface(MEMORY_INTER) %{
5719 base($reg);
5720 index(0x0);
5721 scale(0x0);
5722 disp($off);
5723 %}
5724 %}
5726 //----------Conditional Branch Operands----------------------------------------
5727 // Comparison Op - This is the operation of the comparison, and is limited to
5728 // the following set of codes:
5729 // L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
5730 //
5731 // Other attributes of the comparison, such as unsignedness, are specified
5732 // by the comparison instruction that sets a condition code flags register.
5733 // That result is represented by a flags operand whose subtype is appropriate
5734 // to the unsignedness (etc.) of the comparison.
5735 //
5736 // Later, the instruction which matches both the Comparison Op (a Bool) and
5737 // the flags (produced by the Cmp) specifies the coding of the comparison op
5738 // by matching a specific subtype of Bool operand below, such as cmpOpU.
5740 // Comparision Code
5741 operand cmpOp() %{
5742 match(Bool);
5744 format %{ "" %}
5745 interface(COND_INTER) %{
5746 equal(0x01);
5747 not_equal(0x02);
5748 greater(0x03);
5749 greater_equal(0x04);
5750 less(0x05);
5751 less_equal(0x06);
5752 overflow(0x7);
5753 no_overflow(0x8);
5754 %}
5755 %}
5758 // Comparision Code
5759 // Comparison Code, unsigned compare. Used by FP also, with
5760 // C2 (unordered) turned into GT or LT already. The other bits
5761 // C0 and C3 are turned into Carry & Zero flags.
5762 operand cmpOpU() %{
5763 match(Bool);
5765 format %{ "" %}
5766 interface(COND_INTER) %{
5767 equal(0x01);
5768 not_equal(0x02);
5769 greater(0x03);
5770 greater_equal(0x04);
5771 less(0x05);
5772 less_equal(0x06);
5773 overflow(0x7);
5774 no_overflow(0x8);
5775 %}
5776 %}
5778 /*
5779 // Comparison Code, unsigned compare. Used by FP also, with
5780 // C2 (unordered) turned into GT or LT already. The other bits
5781 // C0 and C3 are turned into Carry & Zero flags.
5782 operand cmpOpU() %{
5783 match(Bool);
5785 format %{ "" %}
5786 interface(COND_INTER) %{
5787 equal(0x4);
5788 not_equal(0x5);
5789 less(0x2);
5790 greater_equal(0x3);
5791 less_equal(0x6);
5792 greater(0x7);
5793 %}
5794 %}
5795 */
5796 /*
5797 // Comparison Code for FP conditional move
5798 operand cmpOp_fcmov() %{
5799 match(Bool);
5801 format %{ "" %}
5802 interface(COND_INTER) %{
5803 equal (0x01);
5804 not_equal (0x02);
5805 greater (0x03);
5806 greater_equal(0x04);
5807 less (0x05);
5808 less_equal (0x06);
5809 %}
5810 %}
5812 // Comparision Code used in long compares
5813 operand cmpOp_commute() %{
5814 match(Bool);
5816 format %{ "" %}
5817 interface(COND_INTER) %{
5818 equal(0x4);
5819 not_equal(0x5);
5820 less(0xF);
5821 greater_equal(0xE);
5822 less_equal(0xD);
5823 greater(0xC);
5824 %}
5825 %}
5826 */
5828 //----------Special Memory Operands--------------------------------------------
5829 // Stack Slot Operand - This operand is used for loading and storing temporary
5830 // values on the stack where a match requires a value to
5831 // flow through memory.
5832 operand stackSlotP(sRegP reg) %{
5833 constraint(ALLOC_IN_RC(stack_slots));
5834 // No match rule because this operand is only generated in matching
5835 op_cost(50);
5836 format %{ "[$reg]" %}
5837 interface(MEMORY_INTER) %{
5838 base(0x1d); // SP
5839 index(0x0); // No Index
5840 scale(0x0); // No Scale
5841 disp($reg); // Stack Offset
5842 %}
5843 %}
5845 operand stackSlotI(sRegI reg) %{
5846 constraint(ALLOC_IN_RC(stack_slots));
5847 // No match rule because this operand is only generated in matching
5848 op_cost(50);
5849 format %{ "[$reg]" %}
5850 interface(MEMORY_INTER) %{
5851 base(0x1d); // SP
5852 index(0x0); // No Index
5853 scale(0x0); // No Scale
5854 disp($reg); // Stack Offset
5855 %}
5856 %}
5858 operand stackSlotF(sRegF reg) %{
5859 constraint(ALLOC_IN_RC(stack_slots));
5860 // No match rule because this operand is only generated in matching
5861 op_cost(50);
5862 format %{ "[$reg]" %}
5863 interface(MEMORY_INTER) %{
5864 base(0x1d); // SP
5865 index(0x0); // No Index
5866 scale(0x0); // No Scale
5867 disp($reg); // Stack Offset
5868 %}
5869 %}
5871 operand stackSlotD(sRegD reg) %{
5872 constraint(ALLOC_IN_RC(stack_slots));
5873 // No match rule because this operand is only generated in matching
5874 op_cost(50);
5875 format %{ "[$reg]" %}
5876 interface(MEMORY_INTER) %{
5877 base(0x1d); // SP
5878 index(0x0); // No Index
5879 scale(0x0); // No Scale
5880 disp($reg); // Stack Offset
5881 %}
5882 %}
5884 operand stackSlotL(sRegL reg) %{
5885 constraint(ALLOC_IN_RC(stack_slots));
5886 // No match rule because this operand is only generated in matching
5887 op_cost(50);
5888 format %{ "[$reg]" %}
5889 interface(MEMORY_INTER) %{
5890 base(0x1d); // SP
5891 index(0x0); // No Index
5892 scale(0x0); // No Scale
5893 disp($reg); // Stack Offset
5894 %}
5895 %}
5898 //------------------------OPERAND CLASSES--------------------------------------
5899 //opclass memory( direct, indirect, indOffset16, indOffset32, indOffset32X, indIndexOffset );
5900 opclass memory( indirect, indirectNarrow, indOffset8, indOffset32, indIndex, indIndexScale, load_long_indirect, load_long_indOffset32, baseIndexOffset8, baseIndexOffset8_convI2L, indIndexScaleOffset8, indIndexScaleOffset8_convI2L, basePosIndexScaleOffset8, indIndexScaleOffsetNarrow, indPosIndexI2LScaleOffset8Narrow, indOffset8Narrow, indIndexOffset8Narrow);
5903 //----------PIPELINE-----------------------------------------------------------
5904 // Rules which define the behavior of the target architectures pipeline.
5906 pipeline %{
5908 //----------ATTRIBUTES---------------------------------------------------------
5909 attributes %{
5910 fixed_size_instructions; // Fixed size instructions
5911 branch_has_delay_slot; // branch have delay slot in gs2
5912 max_instructions_per_bundle = 1; // 1 instruction per bundle
5913 max_bundles_per_cycle = 4; // Up to 4 bundles per cycle
5914 bundle_unit_size=4;
5915 instruction_unit_size = 4; // An instruction is 4 bytes long
5916 instruction_fetch_unit_size = 16; // The processor fetches one line
5917 instruction_fetch_units = 1; // of 16 bytes
5919 // List of nop instructions
5920 nops( MachNop );
5921 %}
5923 //----------RESOURCES----------------------------------------------------------
5924 // Resources are the functional units available to the machine
5926 resources(D1, D2, D3, D4, DECODE = D1 | D2 | D3| D4, ALU1, ALU2, ALU = ALU1 | ALU2, FPU1, FPU2, FPU = FPU1 | FPU2, MEM, BR);
5928 //----------PIPELINE DESCRIPTION-----------------------------------------------
5929 // Pipeline Description specifies the stages in the machine's pipeline
5931 // IF: fetch
5932 // ID: decode
5933 // RD: read
5934 // CA: caculate
5935 // WB: write back
5936 // CM: commit
5938 pipe_desc(IF, ID, RD, CA, WB, CM);
5941 //----------PIPELINE CLASSES---------------------------------------------------
5942 // Pipeline Classes describe the stages in which input and output are
5943 // referenced by the hardware pipeline.
5945 //No.1 Integer ALU reg-reg operation : dst <-- reg1 op reg2
5946 pipe_class ialu_regI_regI(mRegI dst, mRegI src1, mRegI src2) %{
5947 single_instruction;
5948 src1 : RD(read);
5949 src2 : RD(read);
5950 dst : WB(write)+1;
5951 DECODE : ID;
5952 ALU : CA;
5953 %}
5955 //No.19 Integer mult operation : dst <-- reg1 mult reg2
5956 pipe_class ialu_mult(mRegI dst, mRegI src1, mRegI src2) %{
5957 src1 : RD(read);
5958 src2 : RD(read);
5959 dst : WB(write)+5;
5960 DECODE : ID;
5961 ALU2 : CA;
5962 %}
5964 pipe_class mulL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
5965 src1 : RD(read);
5966 src2 : RD(read);
5967 dst : WB(write)+10;
5968 DECODE : ID;
5969 ALU2 : CA;
5970 %}
5972 //No.19 Integer div operation : dst <-- reg1 div reg2
5973 pipe_class ialu_div(mRegI dst, mRegI src1, mRegI src2) %{
5974 src1 : RD(read);
5975 src2 : RD(read);
5976 dst : WB(write)+10;
5977 DECODE : ID;
5978 ALU2 : CA;
5979 %}
5981 //No.19 Integer mod operation : dst <-- reg1 mod reg2
5982 pipe_class ialu_mod(mRegI dst, mRegI src1, mRegI src2) %{
5983 instruction_count(2);
5984 src1 : RD(read);
5985 src2 : RD(read);
5986 dst : WB(write)+10;
5987 DECODE : ID;
5988 ALU2 : CA;
5989 %}
5991 //No.15 Long ALU reg-reg operation : dst <-- reg1 op reg2
5992 pipe_class ialu_regL_regL(mRegL dst, mRegL src1, mRegL src2) %{
5993 instruction_count(2);
5994 src1 : RD(read);
5995 src2 : RD(read);
5996 dst : WB(write);
5997 DECODE : ID;
5998 ALU : CA;
5999 %}
6001 //No.18 Long ALU reg-imm16 operation : dst <-- reg1 op imm16
6002 pipe_class ialu_regL_imm16(mRegL dst, mRegL src) %{
6003 instruction_count(2);
6004 src : RD(read);
6005 dst : WB(write);
6006 DECODE : ID;
6007 ALU : CA;
6008 %}
6010 //no.16 load Long from memory :
6011 pipe_class ialu_loadL(mRegL dst, memory mem) %{
6012 instruction_count(2);
6013 mem : RD(read);
6014 dst : WB(write)+5;
6015 DECODE : ID;
6016 MEM : RD;
6017 %}
6019 //No.17 Store Long to Memory :
6020 pipe_class ialu_storeL(mRegL src, memory mem) %{
6021 instruction_count(2);
6022 mem : RD(read);
6023 src : RD(read);
6024 DECODE : ID;
6025 MEM : RD;
6026 %}
6028 //No.2 Integer ALU reg-imm16 operation : dst <-- reg1 op imm16
6029 pipe_class ialu_regI_imm16(mRegI dst, mRegI src) %{
6030 single_instruction;
6031 src : RD(read);
6032 dst : WB(write);
6033 DECODE : ID;
6034 ALU : CA;
6035 %}
6037 //No.3 Integer move operation : dst <-- reg
6038 pipe_class ialu_regI_mov(mRegI dst, mRegI src) %{
6039 src : RD(read);
6040 dst : WB(write);
6041 DECODE : ID;
6042 ALU : CA;
6043 %}
6045 //No.4 No instructions : do nothing
6046 pipe_class empty( ) %{
6047 instruction_count(0);
6048 %}
6050 //No.5 UnConditional branch :
6051 pipe_class pipe_jump( label labl ) %{
6052 multiple_bundles;
6053 DECODE : ID;
6054 BR : RD;
6055 %}
6057 //No.6 ALU Conditional branch :
6058 pipe_class pipe_alu_branch(mRegI src1, mRegI src2, label labl ) %{
6059 multiple_bundles;
6060 src1 : RD(read);
6061 src2 : RD(read);
6062 DECODE : ID;
6063 BR : RD;
6064 %}
6066 //no.7 load integer from memory :
6067 pipe_class ialu_loadI(mRegI dst, memory mem) %{
6068 mem : RD(read);
6069 dst : WB(write)+3;
6070 DECODE : ID;
6071 MEM : RD;
6072 %}
6074 //No.8 Store Integer to Memory :
6075 pipe_class ialu_storeI(mRegI src, memory mem) %{
6076 mem : RD(read);
6077 src : RD(read);
6078 DECODE : ID;
6079 MEM : RD;
6080 %}
6083 //No.10 Floating FPU reg-reg operation : dst <-- reg1 op reg2
6084 pipe_class fpu_regF_regF(regF dst, regF src1, regF src2) %{
6085 src1 : RD(read);
6086 src2 : RD(read);
6087 dst : WB(write);
6088 DECODE : ID;
6089 FPU : CA;
6090 %}
6092 //No.22 Floating div operation : dst <-- reg1 div reg2
6093 pipe_class fpu_div(regF dst, regF src1, regF src2) %{
6094 src1 : RD(read);
6095 src2 : RD(read);
6096 dst : WB(write);
6097 DECODE : ID;
6098 FPU2 : CA;
6099 %}
6101 pipe_class fcvt_I2D(regD dst, mRegI src) %{
6102 src : RD(read);
6103 dst : WB(write);
6104 DECODE : ID;
6105 FPU1 : CA;
6106 %}
6108 pipe_class fcvt_D2I(mRegI dst, regD src) %{
6109 src : RD(read);
6110 dst : WB(write);
6111 DECODE : ID;
6112 FPU1 : CA;
6113 %}
6115 pipe_class pipe_mfc1(mRegI dst, regD src) %{
6116 src : RD(read);
6117 dst : WB(write);
6118 DECODE : ID;
6119 MEM : RD;
6120 %}
6122 pipe_class pipe_mtc1(regD dst, mRegI src) %{
6123 src : RD(read);
6124 dst : WB(write);
6125 DECODE : ID;
6126 MEM : RD(5);
6127 %}
6129 //No.23 Floating sqrt operation : dst <-- reg1 sqrt reg2
6130 pipe_class fpu_sqrt(regF dst, regF src1, regF src2) %{
6131 multiple_bundles;
6132 src1 : RD(read);
6133 src2 : RD(read);
6134 dst : WB(write);
6135 DECODE : ID;
6136 FPU2 : CA;
6137 %}
6139 //No.11 Load Floating from Memory :
6140 pipe_class fpu_loadF(regF dst, memory mem) %{
6141 instruction_count(1);
6142 mem : RD(read);
6143 dst : WB(write)+3;
6144 DECODE : ID;
6145 MEM : RD;
6146 %}
6148 //No.12 Store Floating to Memory :
6149 pipe_class fpu_storeF(regF src, memory mem) %{
6150 instruction_count(1);
6151 mem : RD(read);
6152 src : RD(read);
6153 DECODE : ID;
6154 MEM : RD;
6155 %}
6157 //No.13 FPU Conditional branch :
6158 pipe_class pipe_fpu_branch(regF src1, regF src2, label labl ) %{
6159 multiple_bundles;
6160 src1 : RD(read);
6161 src2 : RD(read);
6162 DECODE : ID;
6163 BR : RD;
6164 %}
6166 //No.14 Floating FPU reg operation : dst <-- op reg
6167 pipe_class fpu1_regF(regF dst, regF src) %{
6168 src : RD(read);
6169 dst : WB(write);
6170 DECODE : ID;
6171 FPU : CA;
6172 %}
6174 pipe_class long_memory_op() %{
6175 instruction_count(10); multiple_bundles; force_serialization;
6176 fixed_latency(30);
6177 %}
6179 pipe_class simple_call() %{
6180 instruction_count(10); multiple_bundles; force_serialization;
6181 fixed_latency(200);
6182 BR : RD;
6183 %}
6185 pipe_class call() %{
6186 instruction_count(10); multiple_bundles; force_serialization;
6187 fixed_latency(200);
6188 %}
6190 //FIXME:
6191 //No.9 Piple slow : for multi-instructions
6192 pipe_class pipe_slow( ) %{
6193 instruction_count(20);
6194 force_serialization;
6195 multiple_bundles;
6196 fixed_latency(50);
6197 %}
6199 %}
6203 //----------INSTRUCTIONS-------------------------------------------------------
6204 //
6205 // match -- States which machine-independent subtree may be replaced
6206 // by this instruction.
6207 // ins_cost -- The estimated cost of this instruction is used by instruction
6208 // selection to identify a minimum cost tree of machine
6209 // instructions that matches a tree of machine-independent
6210 // instructions.
6211 // format -- A string providing the disassembly for this instruction.
6212 // The value of an instruction's operand may be inserted
6213 // by referring to it with a '$' prefix.
6214 // opcode -- Three instruction opcodes may be provided. These are referred
6215 // to within an encode class as $primary, $secondary, and $tertiary
6216 // respectively. The primary opcode is commonly used to
6217 // indicate the type of machine instruction, while secondary
6218 // and tertiary are often used for prefix options or addressing
6219 // modes.
6220 // ins_encode -- A list of encode classes with parameters. The encode class
6221 // name must have been defined in an 'enc_class' specification
6222 // in the encode section of the architecture description.
6225 // Load Integer
6226 instruct loadI(mRegI dst, memory mem) %{
6227 match(Set dst (LoadI mem));
6229 ins_cost(125);
6230 format %{ "lw $dst, $mem #@loadI" %}
6231 ins_encode (load_I_enc(dst, mem));
6232 ins_pipe( ialu_loadI );
6233 %}
6235 instruct loadI_convI2L(mRegL dst, memory mem) %{
6236 match(Set dst (ConvI2L (LoadI mem)));
6238 ins_cost(125);
6239 format %{ "lw $dst, $mem #@loadI_convI2L" %}
6240 ins_encode (load_I_enc(dst, mem));
6241 ins_pipe( ialu_loadI );
6242 %}
6244 // Load Integer (32 bit signed) to Byte (8 bit signed)
6245 instruct loadI2B(mRegI dst, memory mem, immI_24 twentyfour) %{
6246 match(Set dst (RShiftI (LShiftI (LoadI mem) twentyfour) twentyfour));
6248 ins_cost(125);
6249 format %{ "lb $dst, $mem\t# int -> byte #@loadI2B" %}
6250 ins_encode(load_B_enc(dst, mem));
6251 ins_pipe(ialu_loadI);
6252 %}
6254 // Load Integer (32 bit signed) to Unsigned Byte (8 bit UNsigned)
6255 instruct loadI2UB(mRegI dst, memory mem, immI_255 mask) %{
6256 match(Set dst (AndI (LoadI mem) mask));
6258 ins_cost(125);
6259 format %{ "lbu $dst, $mem\t# int -> ubyte #@loadI2UB" %}
6260 ins_encode(load_UB_enc(dst, mem));
6261 ins_pipe(ialu_loadI);
6262 %}
6264 // Load Integer (32 bit signed) to Short (16 bit signed)
6265 instruct loadI2S(mRegI dst, memory mem, immI_16 sixteen) %{
6266 match(Set dst (RShiftI (LShiftI (LoadI mem) sixteen) sixteen));
6268 ins_cost(125);
6269 format %{ "lh $dst, $mem\t# int -> short #@loadI2S" %}
6270 ins_encode(load_S_enc(dst, mem));
6271 ins_pipe(ialu_loadI);
6272 %}
6274 // Load Integer (32 bit signed) to Unsigned Short/Char (16 bit UNsigned)
6275 instruct loadI2US(mRegI dst, memory mem, immI_65535 mask) %{
6276 match(Set dst (AndI (LoadI mem) mask));
6278 ins_cost(125);
6279 format %{ "lhu $dst, $mem\t# int -> ushort/char #@loadI2US" %}
6280 ins_encode(load_C_enc(dst, mem));
6281 ins_pipe(ialu_loadI);
6282 %}
6284 // Load Long.
6285 instruct loadL(mRegL dst, memory mem) %{
6286 // predicate(!((LoadLNode*)n)->require_atomic_access());
6287 match(Set dst (LoadL mem));
6289 ins_cost(250);
6290 format %{ "ld $dst, $mem #@loadL" %}
6291 ins_encode(load_L_enc(dst, mem));
6292 ins_pipe( ialu_loadL );
6293 %}
6295 // Load Long - UNaligned
6296 instruct loadL_unaligned(mRegL dst, memory mem) %{
6297 match(Set dst (LoadL_unaligned mem));
6299 // FIXME: Jin: Need more effective ldl/ldr
6300 ins_cost(450);
6301 format %{ "ld $dst, $mem #@loadL_unaligned\n\t" %}
6302 ins_encode(load_L_enc(dst, mem));
6303 ins_pipe( ialu_loadL );
6304 %}
6306 // Store Long
6307 instruct storeL_reg(memory mem, mRegL src) %{
6308 match(Set mem (StoreL mem src));
6310 ins_cost(200);
6311 format %{ "sd $mem, $src #@storeL_reg\n" %}
6312 ins_encode(store_L_reg_enc(mem, src));
6313 ins_pipe( ialu_storeL );
6314 %}
6316 instruct storeL_immL0(memory mem, immL0 zero) %{
6317 match(Set mem (StoreL mem zero));
6319 ins_cost(180);
6320 format %{ "sd zero, $mem #@storeL_immL0" %}
6321 ins_encode(store_L_immL0_enc(mem, zero));
6322 ins_pipe( ialu_storeL );
6323 %}
6325 instruct storeL_imm(memory mem, immL src) %{
6326 match(Set mem (StoreL mem src));
6328 ins_cost(200);
6329 format %{ "sd $src, $mem #@storeL_imm" %}
6330 ins_encode(store_L_immL_enc(mem, src));
6331 ins_pipe( ialu_storeL );
6332 %}
6334 // Load Compressed Pointer
6335 instruct loadN(mRegN dst, memory mem)
6336 %{
6337 match(Set dst (LoadN mem));
6339 ins_cost(125); // XXX
6340 format %{ "lwu $dst, $mem\t# compressed ptr @ loadN" %}
6341 ins_encode (load_N_enc(dst, mem));
6342 ins_pipe( ialu_loadI ); // XXX
6343 %}
6345 instruct loadN2P(mRegP dst, memory mem)
6346 %{
6347 match(Set dst (DecodeN (LoadN mem)));
6348 predicate(Universe::narrow_oop_base() == NULL && Universe::narrow_oop_shift() == 0);
6350 ins_cost(125); // XXX
6351 format %{ "lwu $dst, $mem\t# @ loadN2P" %}
6352 ins_encode (load_N_enc(dst, mem));
6353 ins_pipe( ialu_loadI ); // XXX
6354 %}
6356 // Load Pointer
6357 instruct loadP(mRegP dst, memory mem) %{
6358 match(Set dst (LoadP mem));
6360 ins_cost(125);
6361 format %{ "ld $dst, $mem #@loadP" %}
6362 ins_encode (load_P_enc(dst, mem));
6363 ins_pipe( ialu_loadI );
6364 %}
6366 // Load Klass Pointer
6367 instruct loadKlass(mRegP dst, memory mem) %{
6368 match(Set dst (LoadKlass mem));
6370 ins_cost(125);
6371 format %{ "MOV $dst,$mem @ loadKlass" %}
6372 ins_encode (load_P_enc(dst, mem));
6373 ins_pipe( ialu_loadI );
6374 %}
6376 // Load narrow Klass Pointer
6377 instruct loadNKlass(mRegN dst, memory mem)
6378 %{
6379 match(Set dst (LoadNKlass mem));
6381 ins_cost(125); // XXX
6382 format %{ "lwu $dst, $mem\t# compressed klass ptr @ loadNKlass" %}
6383 ins_encode (load_N_enc(dst, mem));
6384 ins_pipe( ialu_loadI ); // XXX
6385 %}
6387 instruct loadN2PKlass(mRegP dst, memory mem)
6388 %{
6389 match(Set dst (DecodeNKlass (LoadNKlass mem)));
6390 predicate(Universe::narrow_klass_base() == NULL && Universe::narrow_klass_shift() == 0);
6392 ins_cost(125); // XXX
6393 format %{ "lwu $dst, $mem\t# compressed klass ptr @ loadN2PKlass" %}
6394 ins_encode (load_N_enc(dst, mem));
6395 ins_pipe( ialu_loadI ); // XXX
6396 %}
6398 // Load Constant
6399 instruct loadConI(mRegI dst, immI src) %{
6400 match(Set dst src);
6402 ins_cost(150);
6403 format %{ "mov $dst, $src #@loadConI" %}
6404 ins_encode %{
6405 Register dst = $dst$$Register;
6406 int value = $src$$constant;
6407 __ move(dst, value);
6408 %}
6409 ins_pipe( ialu_regI_regI );
6410 %}
6413 instruct loadConL_set64(mRegL dst, immL src) %{
6414 match(Set dst src);
6415 ins_cost(120);
6416 format %{ "li $dst, $src @ loadConL_set64" %}
6417 ins_encode %{
6418 __ set64($dst$$Register, $src$$constant);
6419 %}
6420 ins_pipe(ialu_regL_regL);
6421 %}
6423 /*
6424 // Load long value from constant table (predicated by immL_expensive).
6425 instruct loadConL_load(mRegL dst, immL_expensive src) %{
6426 match(Set dst src);
6427 ins_cost(150);
6428 format %{ "ld $dst, $constantoffset[$constanttablebase] # load long $src from table @ loadConL_ldx" %}
6429 ins_encode %{
6430 int con_offset = $constantoffset($src);
6432 if (Assembler::is_simm16(con_offset)) {
6433 __ ld($dst$$Register, $constanttablebase, con_offset);
6434 } else {
6435 __ set64(AT, con_offset);
6436 if (UseLoongsonISA) {
6437 __ gsldx($dst$$Register, $constanttablebase, AT, 0);
6438 } else {
6439 __ daddu(AT, $constanttablebase, AT);
6440 __ ld($dst$$Register, AT, 0);
6441 }
6442 }
6443 %}
6444 ins_pipe(ialu_loadI);
6445 %}
6446 */
6448 instruct loadConL16(mRegL dst, immL16 src) %{
6449 match(Set dst src);
6450 ins_cost(105);
6451 format %{ "mov $dst, $src #@loadConL16" %}
6452 ins_encode %{
6453 Register dst_reg = as_Register($dst$$reg);
6454 int value = $src$$constant;
6455 __ daddiu(dst_reg, R0, value);
6456 %}
6457 ins_pipe( ialu_regL_regL );
6458 %}
6461 instruct loadConL0(mRegL dst, immL0 src) %{
6462 match(Set dst src);
6463 ins_cost(100);
6464 format %{ "mov $dst, zero #@loadConL0" %}
6465 ins_encode %{
6466 Register dst_reg = as_Register($dst$$reg);
6467 __ daddu(dst_reg, R0, R0);
6468 %}
6469 ins_pipe( ialu_regL_regL );
6470 %}
6472 // Load Range
6473 instruct loadRange(mRegI dst, memory mem) %{
6474 match(Set dst (LoadRange mem));
6476 ins_cost(125);
6477 format %{ "MOV $dst,$mem @ loadRange" %}
6478 ins_encode(load_I_enc(dst, mem));
6479 ins_pipe( ialu_loadI );
6480 %}
6483 instruct storeP(memory mem, mRegP src ) %{
6484 match(Set mem (StoreP mem src));
6486 ins_cost(125);
6487 format %{ "sd $src, $mem #@storeP" %}
6488 ins_encode(store_P_reg_enc(mem, src));
6489 ins_pipe( ialu_storeI );
6490 %}
6492 // Store NULL Pointer, mark word, or other simple pointer constant.
6493 instruct storeImmP0(memory mem, immP0 zero) %{
6494 match(Set mem (StoreP mem zero));
6496 ins_cost(125);
6497 format %{ "mov $mem, $zero #@storeImmP0" %}
6498 ins_encode(store_P_immP0_enc(mem));
6499 ins_pipe( ialu_storeI );
6500 %}
6502 // Store NULL Pointer, mark word, or other simple pointer constant.
6503 instruct storeImmP(memory mem, immP31 src) %{
6504 match(Set mem (StoreP mem src));
6506 ins_cost(150);
6507 format %{ "mov $mem, $src #@storeImmP" %}
6508 ins_encode(store_P_immP_enc(mem, src));
6509 ins_pipe( ialu_storeI );
6510 %}
6512 // Store Byte Immediate
6513 instruct storeImmB(memory mem, immI8 src) %{
6514 match(Set mem (StoreB mem src));
6516 ins_cost(150);
6517 format %{ "movb $mem, $src #@storeImmB" %}
6518 ins_encode(store_B_immI_enc(mem, src));
6519 ins_pipe( ialu_storeI );
6520 %}
6522 // Store Compressed Pointer
6523 instruct storeN(memory mem, mRegN src)
6524 %{
6525 match(Set mem (StoreN mem src));
6527 ins_cost(125); // XXX
6528 format %{ "sw $mem, $src\t# compressed ptr @ storeN" %}
6529 ins_encode(store_N_reg_enc(mem, src));
6530 ins_pipe( ialu_storeI );
6531 %}
6533 instruct storeP2N(memory mem, mRegP src)
6534 %{
6535 match(Set mem (StoreN mem (EncodeP src)));
6536 predicate(Universe::narrow_oop_base() == NULL && Universe::narrow_oop_shift() == 0);
6538 ins_cost(125); // XXX
6539 format %{ "sw $mem, $src\t# @ storeP2N" %}
6540 ins_encode(store_N_reg_enc(mem, src));
6541 ins_pipe( ialu_storeI );
6542 %}
6544 instruct storeNKlass(memory mem, mRegN src)
6545 %{
6546 match(Set mem (StoreNKlass mem src));
6548 ins_cost(125); // XXX
6549 format %{ "sw $mem, $src\t# compressed klass ptr @ storeNKlass" %}
6550 ins_encode(store_N_reg_enc(mem, src));
6551 ins_pipe( ialu_storeI );
6552 %}
6554 instruct storeP2NKlass(memory mem, mRegP src)
6555 %{
6556 match(Set mem (StoreNKlass mem (EncodePKlass src)));
6557 predicate(Universe::narrow_klass_base() == NULL && Universe::narrow_klass_shift() == 0);
6559 ins_cost(125); // XXX
6560 format %{ "sw $mem, $src\t# @ storeP2NKlass" %}
6561 ins_encode(store_N_reg_enc(mem, src));
6562 ins_pipe( ialu_storeI );
6563 %}
6565 instruct storeImmN0(memory mem, immN0 zero)
6566 %{
6567 match(Set mem (StoreN mem zero));
6569 ins_cost(125); // XXX
6570 format %{ "storeN0 zero, $mem\t# compressed ptr" %}
6571 ins_encode(storeImmN0_enc(mem, zero));
6572 ins_pipe( ialu_storeI );
6573 %}
6575 instruct storeImmN(memory mem, immN src)
6576 %{
6577 match(Set mem (StoreN mem src));
6579 ins_cost(150);
6580 format %{ "storeImmN $mem, $src\t# compressed ptr @ storeImmN" %}
6581 ins_encode(storeImmN_enc(mem, src));
6582 ins_pipe( ialu_storeI );
6583 %}
6585 instruct storeImmNKlass(memory mem, immNKlass src)
6586 %{
6587 match(Set mem (StoreNKlass mem src));
6589 ins_cost(150); // XXX
6590 format %{ "sw $mem, $src\t# compressed klass ptr @ storeImmNKlass" %}
6591 ins_encode(storeImmNKlass_enc(mem, src));
6592 ins_pipe( ialu_storeI );
6593 %}
6595 // Store Byte
6596 instruct storeB(memory mem, mRegI src) %{
6597 match(Set mem (StoreB mem src));
6599 ins_cost(125);
6600 format %{ "sb $src, $mem #@storeB" %}
6601 ins_encode(store_B_reg_enc(mem, src));
6602 ins_pipe( ialu_storeI );
6603 %}
6605 instruct storeB_convL2I(memory mem, mRegL src) %{
6606 match(Set mem (StoreB mem (ConvL2I src)));
6608 ins_cost(125);
6609 format %{ "sb $src, $mem #@storeB_convL2I" %}
6610 ins_encode(store_B_reg_enc(mem, src));
6611 ins_pipe( ialu_storeI );
6612 %}
6614 // Load Byte (8bit signed)
6615 instruct loadB(mRegI dst, memory mem) %{
6616 match(Set dst (LoadB mem));
6618 ins_cost(125);
6619 format %{ "lb $dst, $mem #@loadB" %}
6620 ins_encode(load_B_enc(dst, mem));
6621 ins_pipe( ialu_loadI );
6622 %}
6624 instruct loadB_convI2L(mRegL dst, memory mem) %{
6625 match(Set dst (ConvI2L (LoadB mem)));
6627 ins_cost(125);
6628 format %{ "lb $dst, $mem #@loadB_convI2L" %}
6629 ins_encode(load_B_enc(dst, mem));
6630 ins_pipe( ialu_loadI );
6631 %}
6633 // Load Byte (8bit UNsigned)
6634 instruct loadUB(mRegI dst, memory mem) %{
6635 match(Set dst (LoadUB mem));
6637 ins_cost(125);
6638 format %{ "lbu $dst, $mem #@loadUB" %}
6639 ins_encode(load_UB_enc(dst, mem));
6640 ins_pipe( ialu_loadI );
6641 %}
6643 instruct loadUB_convI2L(mRegL dst, memory mem) %{
6644 match(Set dst (ConvI2L (LoadUB mem)));
6646 ins_cost(125);
6647 format %{ "lbu $dst, $mem #@loadUB_convI2L" %}
6648 ins_encode(load_UB_enc(dst, mem));
6649 ins_pipe( ialu_loadI );
6650 %}
6652 // Load Short (16bit signed)
6653 instruct loadS(mRegI dst, memory mem) %{
6654 match(Set dst (LoadS mem));
6656 ins_cost(125);
6657 format %{ "lh $dst, $mem #@loadS" %}
6658 ins_encode(load_S_enc(dst, mem));
6659 ins_pipe( ialu_loadI );
6660 %}
6662 // Load Short (16 bit signed) to Byte (8 bit signed)
6663 instruct loadS2B(mRegI dst, memory mem, immI_24 twentyfour) %{
6664 match(Set dst (RShiftI (LShiftI (LoadS mem) twentyfour) twentyfour));
6666 ins_cost(125);
6667 format %{ "lb $dst, $mem\t# short -> byte #@loadS2B" %}
6668 ins_encode(load_B_enc(dst, mem));
6669 ins_pipe(ialu_loadI);
6670 %}
6672 instruct loadS_convI2L(mRegL dst, memory mem) %{
6673 match(Set dst (ConvI2L (LoadS mem)));
6675 ins_cost(125);
6676 format %{ "lh $dst, $mem #@loadS_convI2L" %}
6677 ins_encode(load_S_enc(dst, mem));
6678 ins_pipe( ialu_loadI );
6679 %}
6681 // Store Integer Immediate
6682 instruct storeImmI(memory mem, immI src) %{
6683 match(Set mem (StoreI mem src));
6685 ins_cost(150);
6686 format %{ "mov $mem, $src #@storeImmI" %}
6687 ins_encode(store_I_immI_enc(mem, src));
6688 ins_pipe( ialu_storeI );
6689 %}
6691 // Store Integer
6692 instruct storeI(memory mem, mRegI src) %{
6693 match(Set mem (StoreI mem src));
6695 ins_cost(125);
6696 format %{ "sw $mem, $src #@storeI" %}
6697 ins_encode(store_I_reg_enc(mem, src));
6698 ins_pipe( ialu_storeI );
6699 %}
6701 instruct storeI_convL2I(memory mem, mRegL src) %{
6702 match(Set mem (StoreI mem (ConvL2I src)));
6704 ins_cost(125);
6705 format %{ "sw $mem, $src #@storeI_convL2I" %}
6706 ins_encode(store_I_reg_enc(mem, src));
6707 ins_pipe( ialu_storeI );
6708 %}
6710 // Load Float
6711 instruct loadF(regF dst, memory mem) %{
6712 match(Set dst (LoadF mem));
6714 ins_cost(150);
6715 format %{ "loadF $dst, $mem #@loadF" %}
6716 ins_encode(load_F_enc(dst, mem));
6717 ins_pipe( ialu_loadI );
6718 %}
6720 instruct loadConP_general(mRegP dst, immP src) %{
6721 match(Set dst src);
6723 ins_cost(120);
6724 format %{ "li $dst, $src #@loadConP_general" %}
6726 ins_encode %{
6727 Register dst = $dst$$Register;
6728 long* value = (long*)$src$$constant;
6730 if($src->constant_reloc() == relocInfo::metadata_type){
6731 int klass_index = __ oop_recorder()->find_index((Klass*)value);
6732 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
6734 __ relocate(rspec);
6735 __ patchable_set48(dst, (long)value);
6736 }else if($src->constant_reloc() == relocInfo::oop_type){
6737 int oop_index = __ oop_recorder()->find_index((jobject)value);
6738 RelocationHolder rspec = oop_Relocation::spec(oop_index);
6740 __ relocate(rspec);
6741 __ patchable_set48(dst, (long)value);
6742 } else if ($src->constant_reloc() == relocInfo::none) {
6743 __ set64(dst, (long)value);
6744 }
6745 %}
6747 ins_pipe( ialu_regI_regI );
6748 %}
6750 /*
6751 instruct loadConP_load(mRegP dst, immP_load src) %{
6752 match(Set dst src);
6754 ins_cost(100);
6755 format %{ "ld $dst, [$constanttablebase + $constantoffset] load from constant table: ptr=$src @ loadConP_load" %}
6757 ins_encode %{
6759 int con_offset = $constantoffset($src);
6761 if (Assembler::is_simm16(con_offset)) {
6762 __ ld($dst$$Register, $constanttablebase, con_offset);
6763 } else {
6764 __ set64(AT, con_offset);
6765 if (UseLoongsonISA) {
6766 __ gsldx($dst$$Register, $constanttablebase, AT, 0);
6767 } else {
6768 __ daddu(AT, $constanttablebase, AT);
6769 __ ld($dst$$Register, AT, 0);
6770 }
6771 }
6772 %}
6774 ins_pipe(ialu_loadI);
6775 %}
6776 */
6778 instruct loadConP_no_oop_cheap(mRegP dst, immP_no_oop_cheap src) %{
6779 match(Set dst src);
6781 ins_cost(80);
6782 format %{ "li $dst, $src @ loadConP_no_oop_cheap" %}
6784 ins_encode %{
6785 __ set64($dst$$Register, $src$$constant);
6786 %}
6788 ins_pipe(ialu_regI_regI);
6789 %}
6792 instruct loadConP_poll(mRegP dst, immP_poll src) %{
6793 match(Set dst src);
6795 ins_cost(50);
6796 format %{ "li $dst, $src #@loadConP_poll" %}
6798 ins_encode %{
6799 Register dst = $dst$$Register;
6800 intptr_t value = (intptr_t)$src$$constant;
6802 __ set64(dst, (jlong)value);
6803 %}
6805 ins_pipe( ialu_regI_regI );
6806 %}
6808 instruct loadConP0(mRegP dst, immP0 src)
6809 %{
6810 match(Set dst src);
6812 ins_cost(50);
6813 format %{ "mov $dst, R0\t# ptr" %}
6814 ins_encode %{
6815 Register dst_reg = $dst$$Register;
6816 __ daddu(dst_reg, R0, R0);
6817 %}
6818 ins_pipe( ialu_regI_regI );
6819 %}
6821 instruct loadConN0(mRegN dst, immN0 src) %{
6822 match(Set dst src);
6823 format %{ "move $dst, R0\t# compressed NULL ptr" %}
6824 ins_encode %{
6825 __ move($dst$$Register, R0);
6826 %}
6827 ins_pipe( ialu_regI_regI );
6828 %}
6830 instruct loadConN(mRegN dst, immN src) %{
6831 match(Set dst src);
6833 ins_cost(125);
6834 format %{ "li $dst, $src\t# compressed ptr @ loadConN" %}
6835 ins_encode %{
6836 Register dst = $dst$$Register;
6837 __ set_narrow_oop(dst, (jobject)$src$$constant);
6838 %}
6839 ins_pipe( ialu_regI_regI ); // XXX
6840 %}
6842 instruct loadConNKlass(mRegN dst, immNKlass src) %{
6843 match(Set dst src);
6845 ins_cost(125);
6846 format %{ "li $dst, $src\t# compressed klass ptr @ loadConNKlass" %}
6847 ins_encode %{
6848 Register dst = $dst$$Register;
6849 __ set_narrow_klass(dst, (Klass*)$src$$constant);
6850 %}
6851 ins_pipe( ialu_regI_regI ); // XXX
6852 %}
6854 //FIXME
6855 // Tail Call; Jump from runtime stub to Java code.
6856 // Also known as an 'interprocedural jump'.
6857 // Target of jump will eventually return to caller.
6858 // TailJump below removes the return address.
6859 instruct TailCalljmpInd(mRegP jump_target, mRegP method_oop) %{
6860 match(TailCall jump_target method_oop );
6861 ins_cost(300);
6862 format %{ "JMP $jump_target \t# @TailCalljmpInd" %}
6864 ins_encode %{
6865 Register target = $jump_target$$Register;
6866 Register oop = $method_oop$$Register;
6868 /* 2012/10/12 Jin: RA will be used in generate_forward_exception() */
6869 __ push(RA);
6871 __ move(S3, oop);
6872 __ jr(target);
6873 __ nop();
6874 %}
6876 ins_pipe( pipe_jump );
6877 %}
6879 // Create exception oop: created by stack-crawling runtime code.
6880 // Created exception is now available to this handler, and is setup
6881 // just prior to jumping to this handler. No code emitted.
6882 instruct CreateException( a0_RegP ex_oop )
6883 %{
6884 match(Set ex_oop (CreateEx));
6886 // use the following format syntax
6887 format %{ "# exception oop is in A0; no code emitted @CreateException" %}
6888 ins_encode %{
6889 /* Jin: X86 leaves this function empty */
6890 __ block_comment("CreateException is empty in X86/MIPS");
6891 %}
6892 ins_pipe( empty );
6893 // ins_pipe( pipe_jump );
6894 %}
6897 /* 2012/9/14 Jin: The mechanism of exception handling is clear now.
6899 - Common try/catch:
6900 2012/9/14 Jin: [stubGenerator_mips.cpp] generate_forward_exception()
6901 |- V0, V1 are created
6902 |- T9 <= SharedRuntime::exception_handler_for_return_address
6903 `- jr T9
6904 `- the caller's exception_handler
6905 `- jr OptoRuntime::exception_blob
6906 `- here
6907 - Rethrow(e.g. 'unwind'):
6908 * The callee:
6909 |- an exception is triggered during execution
6910 `- exits the callee method through RethrowException node
6911 |- The callee pushes exception_oop(T0) and exception_pc(RA)
6912 `- The callee jumps to OptoRuntime::rethrow_stub()
6913 * In OptoRuntime::rethrow_stub:
6914 |- The VM calls _rethrow_Java to determine the return address in the caller method
6915 `- exits the stub with tailjmpInd
6916 |- pops exception_oop(V0) and exception_pc(V1)
6917 `- jumps to the return address(usually an exception_handler)
6918 * The caller:
6919 `- continues processing the exception_blob with V0/V1
6920 */
6922 /*
6923 Disassembling OptoRuntime::rethrow_stub()
6925 ; locals
6926 0x2d3bf320: addiu sp, sp, 0xfffffff8
6927 0x2d3bf324: sw ra, 0x4(sp)
6928 0x2d3bf328: sw fp, 0x0(sp)
6929 0x2d3bf32c: addu fp, sp, zero
6930 0x2d3bf330: addiu sp, sp, 0xfffffff0
6931 0x2d3bf334: sw ra, 0x8(sp)
6932 0x2d3bf338: sw t0, 0x4(sp)
6933 0x2d3bf33c: sw sp, 0x0(sp)
6935 ; get_thread(S2)
6936 0x2d3bf340: addu s2, sp, zero
6937 0x2d3bf344: srl s2, s2, 12
6938 0x2d3bf348: sll s2, s2, 2
6939 0x2d3bf34c: lui at, 0x2c85
6940 0x2d3bf350: addu at, at, s2
6941 0x2d3bf354: lw s2, 0xffffcc80(at)
6943 0x2d3bf358: lw s0, 0x0(sp)
6944 0x2d3bf35c: sw s0, 0x118(s2) // last_sp -> threa
6945 0x2d3bf360: sw s2, 0xc(sp)
6947 ; OptoRuntime::rethrow_C(oopDesc* exception, JavaThread* thread, address ret_pc)
6948 0x2d3bf364: lw a0, 0x4(sp)
6949 0x2d3bf368: lw a1, 0xc(sp)
6950 0x2d3bf36c: lw a2, 0x8(sp)
6951 ;; Java_To_Runtime
6952 0x2d3bf370: lui t9, 0x2c34
6953 0x2d3bf374: addiu t9, t9, 0xffff8a48
6954 0x2d3bf378: jalr t9
6955 0x2d3bf37c: nop
6957 0x2d3bf380: addu s3, v0, zero ; S3: SharedRuntime::raw_exception_handler_for_return_address()
6959 0x2d3bf384: lw s0, 0xc(sp)
6960 0x2d3bf388: sw zero, 0x118(s0)
6961 0x2d3bf38c: sw zero, 0x11c(s0)
6962 0x2d3bf390: lw s1, 0x144(s0) ; ex_oop: S1
6963 0x2d3bf394: addu s2, s0, zero
6964 0x2d3bf398: sw zero, 0x144(s2)
6965 0x2d3bf39c: lw s0, 0x4(s2)
6966 0x2d3bf3a0: addiu s4, zero, 0x0
6967 0x2d3bf3a4: bne s0, s4, 0x2d3bf3d4
6968 0x2d3bf3a8: nop
6969 0x2d3bf3ac: addiu sp, sp, 0x10
6970 0x2d3bf3b0: addiu sp, sp, 0x8
6971 0x2d3bf3b4: lw ra, 0xfffffffc(sp)
6972 0x2d3bf3b8: lw fp, 0xfffffff8(sp)
6973 0x2d3bf3bc: lui at, 0x2b48
6974 0x2d3bf3c0: lw at, 0x100(at)
6976 ; tailjmpInd: Restores exception_oop & exception_pc
6977 0x2d3bf3c4: addu v1, ra, zero
6978 0x2d3bf3c8: addu v0, s1, zero
6979 0x2d3bf3cc: jr s3
6980 0x2d3bf3d0: nop
6981 ; Exception:
6982 0x2d3bf3d4: lui s1, 0x2cc8 ; generate_forward_exception()
6983 0x2d3bf3d8: addiu s1, s1, 0x40
6984 0x2d3bf3dc: addiu s2, zero, 0x0
6985 0x2d3bf3e0: addiu sp, sp, 0x10
6986 0x2d3bf3e4: addiu sp, sp, 0x8
6987 0x2d3bf3e8: lw ra, 0xfffffffc(sp)
6988 0x2d3bf3ec: lw fp, 0xfffffff8(sp)
6989 0x2d3bf3f0: lui at, 0x2b48
6990 0x2d3bf3f4: lw at, 0x100(at)
6991 ; TailCalljmpInd
6992 __ push(RA); ; to be used in generate_forward_exception()
6993 0x2d3bf3f8: addu t7, s2, zero
6994 0x2d3bf3fc: jr s1
6995 0x2d3bf400: nop
6996 */
6997 // Rethrow exception:
6998 // The exception oop will come in the first argument position.
6999 // Then JUMP (not call) to the rethrow stub code.
7000 instruct RethrowException()
7001 %{
7002 match(Rethrow);
7004 // use the following format syntax
7005 format %{ "JMP rethrow_stub #@RethrowException" %}
7006 ins_encode %{
7007 __ block_comment("@ RethrowException");
7009 cbuf.set_insts_mark();
7010 cbuf.relocate(cbuf.insts_mark(), runtime_call_Relocation::spec());
7012 // call OptoRuntime::rethrow_stub to get the exception handler in parent method
7013 __ patchable_jump((address)OptoRuntime::rethrow_stub());
7014 %}
7015 ins_pipe( pipe_jump );
7016 %}
7018 instruct branchConP_zero(cmpOpU cmp, mRegP op1, immP0 zero, label labl) %{
7019 match(If cmp (CmpP op1 zero));
7020 effect(USE labl);
7022 ins_cost(180);
7023 format %{ "b$cmp $op1, R0, $labl #@branchConP_zero" %}
7025 ins_encode %{
7026 Register op1 = $op1$$Register;
7027 Register op2 = R0;
7028 Label &L = *($labl$$label);
7029 int flag = $cmp$$cmpcode;
7031 switch(flag)
7032 {
7033 case 0x01: //equal
7034 if (&L)
7035 __ beq(op1, op2, L);
7036 else
7037 __ beq(op1, op2, (int)0);
7038 break;
7039 case 0x02: //not_equal
7040 if (&L)
7041 __ bne(op1, op2, L);
7042 else
7043 __ bne(op1, op2, (int)0);
7044 break;
7045 /*
7046 case 0x03: //above
7047 __ sltu(AT, op2, op1);
7048 if(&L)
7049 __ bne(R0, AT, L);
7050 else
7051 __ bne(R0, AT, (int)0);
7052 break;
7053 case 0x04: //above_equal
7054 __ sltu(AT, op1, op2);
7055 if(&L)
7056 __ beq(AT, R0, L);
7057 else
7058 __ beq(AT, R0, (int)0);
7059 break;
7060 case 0x05: //below
7061 __ sltu(AT, op1, op2);
7062 if(&L)
7063 __ bne(R0, AT, L);
7064 else
7065 __ bne(R0, AT, (int)0);
7066 break;
7067 case 0x06: //below_equal
7068 __ sltu(AT, op2, op1);
7069 if(&L)
7070 __ beq(AT, R0, L);
7071 else
7072 __ beq(AT, R0, (int)0);
7073 break;
7074 */
7075 default:
7076 Unimplemented();
7077 }
7078 __ nop();
7079 %}
7081 ins_pc_relative(1);
7082 ins_pipe( pipe_alu_branch );
7083 %}
7085 instruct branchConN2P_zero(cmpOpU cmp, mRegN op1, immP0 zero, label labl) %{
7086 match(If cmp (CmpP (DecodeN op1) zero));
7087 predicate(Universe::narrow_oop_base() == NULL && Universe::narrow_oop_shift() == 0);
7088 effect(USE labl);
7090 ins_cost(180);
7091 format %{ "b$cmp $op1, R0, $labl #@branchConN2P_zero" %}
7093 ins_encode %{
7094 Register op1 = $op1$$Register;
7095 Register op2 = R0;
7096 Label &L = *($labl$$label);
7097 int flag = $cmp$$cmpcode;
7099 switch(flag)
7100 {
7101 case 0x01: //equal
7102 if (&L)
7103 __ beq(op1, op2, L);
7104 else
7105 __ beq(op1, op2, (int)0);
7106 break;
7107 case 0x02: //not_equal
7108 if (&L)
7109 __ bne(op1, op2, L);
7110 else
7111 __ bne(op1, op2, (int)0);
7112 break;
7113 default:
7114 Unimplemented();
7115 }
7116 __ nop();
7117 %}
7119 ins_pc_relative(1);
7120 ins_pipe( pipe_alu_branch );
7121 %}
7124 instruct branchConP(cmpOpU cmp, mRegP op1, mRegP op2, label labl) %{
7125 match(If cmp (CmpP op1 op2));
7126 // predicate(can_branch_register(_kids[0]->_leaf, _kids[1]->_leaf));
7127 effect(USE labl);
7129 ins_cost(200);
7130 format %{ "b$cmp $op1, $op2, $labl #@branchConP" %}
7132 ins_encode %{
7133 Register op1 = $op1$$Register;
7134 Register op2 = $op2$$Register;
7135 Label &L = *($labl$$label);
7136 int flag = $cmp$$cmpcode;
7138 switch(flag)
7139 {
7140 case 0x01: //equal
7141 if (&L)
7142 __ beq(op1, op2, L);
7143 else
7144 __ beq(op1, op2, (int)0);
7145 break;
7146 case 0x02: //not_equal
7147 if (&L)
7148 __ bne(op1, op2, L);
7149 else
7150 __ bne(op1, op2, (int)0);
7151 break;
7152 case 0x03: //above
7153 __ sltu(AT, op2, op1);
7154 if(&L)
7155 __ bne(R0, AT, L);
7156 else
7157 __ bne(R0, AT, (int)0);
7158 break;
7159 case 0x04: //above_equal
7160 __ sltu(AT, op1, op2);
7161 if(&L)
7162 __ beq(AT, R0, L);
7163 else
7164 __ beq(AT, R0, (int)0);
7165 break;
7166 case 0x05: //below
7167 __ sltu(AT, op1, op2);
7168 if(&L)
7169 __ bne(R0, AT, L);
7170 else
7171 __ bne(R0, AT, (int)0);
7172 break;
7173 case 0x06: //below_equal
7174 __ sltu(AT, op2, op1);
7175 if(&L)
7176 __ beq(AT, R0, L);
7177 else
7178 __ beq(AT, R0, (int)0);
7179 break;
7180 default:
7181 Unimplemented();
7182 }
7183 __ nop();
7184 %}
7186 ins_pc_relative(1);
7187 ins_pipe( pipe_alu_branch );
7188 %}
7190 instruct cmpN_null_branch(cmpOp cmp, mRegN op1, immN0 null, label labl) %{
7191 match(If cmp (CmpN op1 null));
7192 effect(USE labl);
7194 ins_cost(180);
7195 format %{ "CMP $op1,0\t! compressed ptr\n\t"
7196 "BP$cmp $labl @ cmpN_null_branch" %}
7197 ins_encode %{
7198 Register op1 = $op1$$Register;
7199 Register op2 = R0;
7200 Label &L = *($labl$$label);
7201 int flag = $cmp$$cmpcode;
7203 switch(flag)
7204 {
7205 case 0x01: //equal
7206 if (&L)
7207 __ beq(op1, op2, L);
7208 else
7209 __ beq(op1, op2, (int)0);
7210 break;
7211 case 0x02: //not_equal
7212 if (&L)
7213 __ bne(op1, op2, L);
7214 else
7215 __ bne(op1, op2, (int)0);
7216 break;
7217 default:
7218 Unimplemented();
7219 }
7220 __ nop();
7221 %}
7222 //TODO: pipe_branchP or create pipe_branchN LEE
7223 ins_pc_relative(1);
7224 ins_pipe( pipe_alu_branch );
7225 %}
7227 instruct cmpN_reg_branch(cmpOp cmp, mRegN op1, mRegN op2, label labl) %{
7228 match(If cmp (CmpN op1 op2));
7229 effect(USE labl);
7231 ins_cost(180);
7232 format %{ "CMP $op1,$op2\t! compressed ptr\n\t"
7233 "BP$cmp $labl" %}
7234 ins_encode %{
7235 Register op1_reg = $op1$$Register;
7236 Register op2_reg = $op2$$Register;
7237 Label &L = *($labl$$label);
7238 int flag = $cmp$$cmpcode;
7240 switch(flag)
7241 {
7242 case 0x01: //equal
7243 if (&L)
7244 __ beq(op1_reg, op2_reg, L);
7245 else
7246 __ beq(op1_reg, op2_reg, (int)0);
7247 break;
7248 case 0x02: //not_equal
7249 if (&L)
7250 __ bne(op1_reg, op2_reg, L);
7251 else
7252 __ bne(op1_reg, op2_reg, (int)0);
7253 break;
7254 case 0x03: //above
7255 __ sltu(AT, op2_reg, op1_reg);
7256 if(&L)
7257 __ bne(R0, AT, L);
7258 else
7259 __ bne(R0, AT, (int)0);
7260 break;
7261 case 0x04: //above_equal
7262 __ sltu(AT, op1_reg, op2_reg);
7263 if(&L)
7264 __ beq(AT, R0, L);
7265 else
7266 __ beq(AT, R0, (int)0);
7267 break;
7268 case 0x05: //below
7269 __ sltu(AT, op1_reg, op2_reg);
7270 if(&L)
7271 __ bne(R0, AT, L);
7272 else
7273 __ bne(R0, AT, (int)0);
7274 break;
7275 case 0x06: //below_equal
7276 __ sltu(AT, op2_reg, op1_reg);
7277 if(&L)
7278 __ beq(AT, R0, L);
7279 else
7280 __ beq(AT, R0, (int)0);
7281 break;
7282 default:
7283 Unimplemented();
7284 }
7285 __ nop();
7286 %}
7287 ins_pc_relative(1);
7288 ins_pipe( pipe_alu_branch );
7289 %}
7291 instruct branchConIU_reg_reg(cmpOpU cmp, mRegI src1, mRegI src2, label labl) %{
7292 match( If cmp (CmpU src1 src2) );
7293 effect(USE labl);
7294 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_reg" %}
7296 ins_encode %{
7297 Register op1 = $src1$$Register;
7298 Register op2 = $src2$$Register;
7299 Label &L = *($labl$$label);
7300 int flag = $cmp$$cmpcode;
7302 switch(flag)
7303 {
7304 case 0x01: //equal
7305 if (&L)
7306 __ beq(op1, op2, L);
7307 else
7308 __ beq(op1, op2, (int)0);
7309 break;
7310 case 0x02: //not_equal
7311 if (&L)
7312 __ bne(op1, op2, L);
7313 else
7314 __ bne(op1, op2, (int)0);
7315 break;
7316 case 0x03: //above
7317 __ sltu(AT, op2, op1);
7318 if(&L)
7319 __ bne(AT, R0, L);
7320 else
7321 __ bne(AT, R0, (int)0);
7322 break;
7323 case 0x04: //above_equal
7324 __ sltu(AT, op1, op2);
7325 if(&L)
7326 __ beq(AT, R0, L);
7327 else
7328 __ beq(AT, R0, (int)0);
7329 break;
7330 case 0x05: //below
7331 __ sltu(AT, op1, op2);
7332 if(&L)
7333 __ bne(AT, R0, L);
7334 else
7335 __ bne(AT, R0, (int)0);
7336 break;
7337 case 0x06: //below_equal
7338 __ sltu(AT, op2, op1);
7339 if(&L)
7340 __ beq(AT, R0, L);
7341 else
7342 __ beq(AT, R0, (int)0);
7343 break;
7344 default:
7345 Unimplemented();
7346 }
7347 __ nop();
7348 %}
7350 ins_pc_relative(1);
7351 ins_pipe( pipe_alu_branch );
7352 %}
7355 instruct branchConIU_reg_imm(cmpOpU cmp, mRegI src1, immI src2, label labl) %{
7356 match( If cmp (CmpU src1 src2) );
7357 effect(USE labl);
7358 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_imm" %}
7360 ins_encode %{
7361 Register op1 = $src1$$Register;
7362 int val = $src2$$constant;
7363 Label &L = *($labl$$label);
7364 int flag = $cmp$$cmpcode;
7366 __ move(AT, val);
7367 switch(flag)
7368 {
7369 case 0x01: //equal
7370 if (&L)
7371 __ beq(op1, AT, L);
7372 else
7373 __ beq(op1, AT, (int)0);
7374 break;
7375 case 0x02: //not_equal
7376 if (&L)
7377 __ bne(op1, AT, L);
7378 else
7379 __ bne(op1, AT, (int)0);
7380 break;
7381 case 0x03: //above
7382 __ sltu(AT, AT, op1);
7383 if(&L)
7384 __ bne(R0, AT, L);
7385 else
7386 __ bne(R0, AT, (int)0);
7387 break;
7388 case 0x04: //above_equal
7389 __ sltu(AT, op1, AT);
7390 if(&L)
7391 __ beq(AT, R0, L);
7392 else
7393 __ beq(AT, R0, (int)0);
7394 break;
7395 case 0x05: //below
7396 __ sltu(AT, op1, AT);
7397 if(&L)
7398 __ bne(R0, AT, L);
7399 else
7400 __ bne(R0, AT, (int)0);
7401 break;
7402 case 0x06: //below_equal
7403 __ sltu(AT, AT, op1);
7404 if(&L)
7405 __ beq(AT, R0, L);
7406 else
7407 __ beq(AT, R0, (int)0);
7408 break;
7409 default:
7410 Unimplemented();
7411 }
7412 __ nop();
7413 %}
7415 ins_pc_relative(1);
7416 ins_pipe( pipe_alu_branch );
7417 %}
7419 instruct branchConI_reg_reg(cmpOp cmp, mRegI src1, mRegI src2, label labl) %{
7420 match( If cmp (CmpI src1 src2) );
7421 effect(USE labl);
7422 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_reg" %}
7424 ins_encode %{
7425 Register op1 = $src1$$Register;
7426 Register op2 = $src2$$Register;
7427 Label &L = *($labl$$label);
7428 int flag = $cmp$$cmpcode;
7430 switch(flag)
7431 {
7432 case 0x01: //equal
7433 if (&L)
7434 __ beq(op1, op2, L);
7435 else
7436 __ beq(op1, op2, (int)0);
7437 break;
7438 case 0x02: //not_equal
7439 if (&L)
7440 __ bne(op1, op2, L);
7441 else
7442 __ bne(op1, op2, (int)0);
7443 break;
7444 case 0x03: //above
7445 __ slt(AT, op2, op1);
7446 if(&L)
7447 __ bne(R0, AT, L);
7448 else
7449 __ bne(R0, AT, (int)0);
7450 break;
7451 case 0x04: //above_equal
7452 __ slt(AT, op1, op2);
7453 if(&L)
7454 __ beq(AT, R0, L);
7455 else
7456 __ beq(AT, R0, (int)0);
7457 break;
7458 case 0x05: //below
7459 __ slt(AT, op1, op2);
7460 if(&L)
7461 __ bne(R0, AT, L);
7462 else
7463 __ bne(R0, AT, (int)0);
7464 break;
7465 case 0x06: //below_equal
7466 __ slt(AT, op2, op1);
7467 if(&L)
7468 __ beq(AT, R0, L);
7469 else
7470 __ beq(AT, R0, (int)0);
7471 break;
7472 default:
7473 Unimplemented();
7474 }
7475 __ nop();
7476 %}
7478 ins_pc_relative(1);
7479 ins_pipe( pipe_alu_branch );
7480 %}
7482 instruct branchConI_reg_imm0(cmpOp cmp, mRegI src1, immI0 src2, label labl) %{
7483 match( If cmp (CmpI src1 src2) );
7484 effect(USE labl);
7485 ins_cost(170);
7486 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm0" %}
7488 ins_encode %{
7489 Register op1 = $src1$$Register;
7490 // int val = $src2$$constant;
7491 Label &L = *($labl$$label);
7492 int flag = $cmp$$cmpcode;
7494 //__ move(AT, val);
7495 switch(flag)
7496 {
7497 case 0x01: //equal
7498 if (&L)
7499 __ beq(op1, R0, L);
7500 else
7501 __ beq(op1, R0, (int)0);
7502 break;
7503 case 0x02: //not_equal
7504 if (&L)
7505 __ bne(op1, R0, L);
7506 else
7507 __ bne(op1, R0, (int)0);
7508 break;
7509 case 0x03: //greater
7510 if(&L)
7511 __ bgtz(op1, L);
7512 else
7513 __ bgtz(op1, (int)0);
7514 break;
7515 case 0x04: //greater_equal
7516 if(&L)
7517 __ bgez(op1, L);
7518 else
7519 __ bgez(op1, (int)0);
7520 break;
7521 case 0x05: //less
7522 if(&L)
7523 __ bltz(op1, L);
7524 else
7525 __ bltz(op1, (int)0);
7526 break;
7527 case 0x06: //less_equal
7528 if(&L)
7529 __ blez(op1, L);
7530 else
7531 __ blez(op1, (int)0);
7532 break;
7533 default:
7534 Unimplemented();
7535 }
7536 __ nop();
7537 %}
7539 ins_pc_relative(1);
7540 ins_pipe( pipe_alu_branch );
7541 %}
7544 instruct branchConI_reg_imm(cmpOp cmp, mRegI src1, immI src2, label labl) %{
7545 match( If cmp (CmpI src1 src2) );
7546 effect(USE labl);
7547 ins_cost(200);
7548 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm" %}
7550 ins_encode %{
7551 Register op1 = $src1$$Register;
7552 int val = $src2$$constant;
7553 Label &L = *($labl$$label);
7554 int flag = $cmp$$cmpcode;
7556 __ move(AT, val);
7557 switch(flag)
7558 {
7559 case 0x01: //equal
7560 if (&L)
7561 __ beq(op1, AT, L);
7562 else
7563 __ beq(op1, AT, (int)0);
7564 break;
7565 case 0x02: //not_equal
7566 if (&L)
7567 __ bne(op1, AT, L);
7568 else
7569 __ bne(op1, AT, (int)0);
7570 break;
7571 case 0x03: //greater
7572 __ slt(AT, AT, op1);
7573 if(&L)
7574 __ bne(R0, AT, L);
7575 else
7576 __ bne(R0, AT, (int)0);
7577 break;
7578 case 0x04: //greater_equal
7579 __ slt(AT, op1, AT);
7580 if(&L)
7581 __ beq(AT, R0, L);
7582 else
7583 __ beq(AT, R0, (int)0);
7584 break;
7585 case 0x05: //less
7586 __ slt(AT, op1, AT);
7587 if(&L)
7588 __ bne(R0, AT, L);
7589 else
7590 __ bne(R0, AT, (int)0);
7591 break;
7592 case 0x06: //less_equal
7593 __ slt(AT, AT, op1);
7594 if(&L)
7595 __ beq(AT, R0, L);
7596 else
7597 __ beq(AT, R0, (int)0);
7598 break;
7599 default:
7600 Unimplemented();
7601 }
7602 __ nop();
7603 %}
7605 ins_pc_relative(1);
7606 ins_pipe( pipe_alu_branch );
7607 %}
7609 instruct branchConIU_reg_imm0(cmpOpU cmp, mRegI src1, immI0 zero, label labl) %{
7610 match( If cmp (CmpU src1 zero) );
7611 effect(USE labl);
7612 format %{ "BR$cmp $src1, zero, $labl #@branchConIU_reg_imm0" %}
7614 ins_encode %{
7615 Register op1 = $src1$$Register;
7616 Label &L = *($labl$$label);
7617 int flag = $cmp$$cmpcode;
7619 switch(flag)
7620 {
7621 case 0x01: //equal
7622 if (&L)
7623 __ beq(op1, R0, L);
7624 else
7625 __ beq(op1, R0, (int)0);
7626 break;
7627 case 0x02: //not_equal
7628 if (&L)
7629 __ bne(op1, R0, L);
7630 else
7631 __ bne(op1, R0, (int)0);
7632 break;
7633 case 0x03: //above
7634 if(&L)
7635 __ bne(R0, op1, L);
7636 else
7637 __ bne(R0, op1, (int)0);
7638 break;
7639 case 0x04: //above_equal
7640 if(&L)
7641 __ beq(R0, R0, L);
7642 else
7643 __ beq(R0, R0, (int)0);
7644 break;
7645 case 0x05: //below
7646 return;
7647 break;
7648 case 0x06: //below_equal
7649 if(&L)
7650 __ beq(op1, R0, L);
7651 else
7652 __ beq(op1, R0, (int)0);
7653 break;
7654 default:
7655 Unimplemented();
7656 }
7657 __ nop();
7658 %}
7660 ins_pc_relative(1);
7661 ins_pipe( pipe_alu_branch );
7662 %}
7665 instruct branchConIU_reg_immI16(cmpOpU cmp, mRegI src1, immI16 src2, label labl) %{
7666 match( If cmp (CmpU src1 src2) );
7667 effect(USE labl);
7668 ins_cost(180);
7669 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_immI16" %}
7671 ins_encode %{
7672 Register op1 = $src1$$Register;
7673 int val = $src2$$constant;
7674 Label &L = *($labl$$label);
7675 int flag = $cmp$$cmpcode;
7677 switch(flag)
7678 {
7679 case 0x01: //equal
7680 __ move(AT, val);
7681 if (&L)
7682 __ beq(op1, AT, L);
7683 else
7684 __ beq(op1, AT, (int)0);
7685 break;
7686 case 0x02: //not_equal
7687 __ move(AT, val);
7688 if (&L)
7689 __ bne(op1, AT, L);
7690 else
7691 __ bne(op1, AT, (int)0);
7692 break;
7693 case 0x03: //above
7694 __ move(AT, val);
7695 __ sltu(AT, AT, op1);
7696 if(&L)
7697 __ bne(R0, AT, L);
7698 else
7699 __ bne(R0, AT, (int)0);
7700 break;
7701 case 0x04: //above_equal
7702 __ sltiu(AT, op1, val);
7703 if(&L)
7704 __ beq(AT, R0, L);
7705 else
7706 __ beq(AT, R0, (int)0);
7707 break;
7708 case 0x05: //below
7709 __ sltiu(AT, op1, val);
7710 if(&L)
7711 __ bne(R0, AT, L);
7712 else
7713 __ bne(R0, AT, (int)0);
7714 break;
7715 case 0x06: //below_equal
7716 __ move(AT, val);
7717 __ sltu(AT, AT, op1);
7718 if(&L)
7719 __ beq(AT, R0, L);
7720 else
7721 __ beq(AT, R0, (int)0);
7722 break;
7723 default:
7724 Unimplemented();
7725 }
7726 __ nop();
7727 %}
7729 ins_pc_relative(1);
7730 ins_pipe( pipe_alu_branch );
7731 %}
7734 instruct branchConL_regL_regL(cmpOp cmp, mRegL src1, mRegL src2, label labl) %{
7735 match( If cmp (CmpL src1 src2) );
7736 effect(USE labl);
7737 format %{ "BR$cmp $src1, $src2, $labl #@branchConL_regL_regL" %}
7738 ins_cost(250);
7740 ins_encode %{
7741 Register opr1_reg = as_Register($src1$$reg);
7742 Register opr2_reg = as_Register($src2$$reg);
7744 Label &target = *($labl$$label);
7745 int flag = $cmp$$cmpcode;
7747 switch(flag)
7748 {
7749 case 0x01: //equal
7750 if (&target)
7751 __ beq(opr1_reg, opr2_reg, target);
7752 else
7753 __ beq(opr1_reg, opr2_reg, (int)0);
7754 __ delayed()->nop();
7755 break;
7757 case 0x02: //not_equal
7758 if(&target)
7759 __ bne(opr1_reg, opr2_reg, target);
7760 else
7761 __ bne(opr1_reg, opr2_reg, (int)0);
7762 __ delayed()->nop();
7763 break;
7765 case 0x03: //greater
7766 __ slt(AT, opr2_reg, opr1_reg);
7767 if(&target)
7768 __ bne(AT, R0, target);
7769 else
7770 __ bne(AT, R0, (int)0);
7771 __ delayed()->nop();
7772 break;
7774 case 0x04: //greater_equal
7775 __ slt(AT, opr1_reg, opr2_reg);
7776 if(&target)
7777 __ beq(AT, R0, target);
7778 else
7779 __ beq(AT, R0, (int)0);
7780 __ delayed()->nop();
7782 break;
7784 case 0x05: //less
7785 __ slt(AT, opr1_reg, opr2_reg);
7786 if(&target)
7787 __ bne(AT, R0, target);
7788 else
7789 __ bne(AT, R0, (int)0);
7790 __ delayed()->nop();
7792 break;
7794 case 0x06: //less_equal
7795 __ slt(AT, opr2_reg, opr1_reg);
7797 if(&target)
7798 __ beq(AT, R0, target);
7799 else
7800 __ beq(AT, R0, (int)0);
7801 __ delayed()->nop();
7803 break;
7805 default:
7806 Unimplemented();
7807 }
7808 %}
7811 ins_pc_relative(1);
7812 ins_pipe( pipe_alu_branch );
7813 %}
7815 instruct branchConL_reg_immL16_sub(cmpOp cmp, mRegL src1, immL16_sub src2, label labl) %{
7816 match( If cmp (CmpL src1 src2) );
7817 effect(USE labl);
7818 ins_cost(180);
7819 format %{ "BR$cmp $src1, $src2, $labl #@branchConL_reg_immL16_sub" %}
7821 ins_encode %{
7822 Register op1 = $src1$$Register;
7823 int val = $src2$$constant;
7824 Label &L = *($labl$$label);
7825 int flag = $cmp$$cmpcode;
7827 __ daddiu(AT, op1, -1 * val);
7828 switch(flag)
7829 {
7830 case 0x01: //equal
7831 if (&L)
7832 __ beq(R0, AT, L);
7833 else
7834 __ beq(R0, AT, (int)0);
7835 break;
7836 case 0x02: //not_equal
7837 if (&L)
7838 __ bne(R0, AT, L);
7839 else
7840 __ bne(R0, AT, (int)0);
7841 break;
7842 case 0x03: //greater
7843 if(&L)
7844 __ bgtz(AT, L);
7845 else
7846 __ bgtz(AT, (int)0);
7847 break;
7848 case 0x04: //greater_equal
7849 if(&L)
7850 __ bgez(AT, L);
7851 else
7852 __ bgez(AT, (int)0);
7853 break;
7854 case 0x05: //less
7855 if(&L)
7856 __ bltz(AT, L);
7857 else
7858 __ bltz(AT, (int)0);
7859 break;
7860 case 0x06: //less_equal
7861 if(&L)
7862 __ blez(AT, L);
7863 else
7864 __ blez(AT, (int)0);
7865 break;
7866 default:
7867 Unimplemented();
7868 }
7869 __ nop();
7870 %}
7872 ins_pc_relative(1);
7873 ins_pipe( pipe_alu_branch );
7874 %}
7877 instruct branchConI_reg_imm16_sub(cmpOp cmp, mRegI src1, immI16_sub src2, label labl) %{
7878 match( If cmp (CmpI src1 src2) );
7879 effect(USE labl);
7880 ins_cost(180);
7881 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm16_sub" %}
7883 ins_encode %{
7884 Register op1 = $src1$$Register;
7885 int val = $src2$$constant;
7886 Label &L = *($labl$$label);
7887 int flag = $cmp$$cmpcode;
7889 __ addiu32(AT, op1, -1 * val);
7890 switch(flag)
7891 {
7892 case 0x01: //equal
7893 if (&L)
7894 __ beq(R0, AT, L);
7895 else
7896 __ beq(R0, AT, (int)0);
7897 break;
7898 case 0x02: //not_equal
7899 if (&L)
7900 __ bne(R0, AT, L);
7901 else
7902 __ bne(R0, AT, (int)0);
7903 break;
7904 case 0x03: //greater
7905 if(&L)
7906 __ bgtz(AT, L);
7907 else
7908 __ bgtz(AT, (int)0);
7909 break;
7910 case 0x04: //greater_equal
7911 if(&L)
7912 __ bgez(AT, L);
7913 else
7914 __ bgez(AT, (int)0);
7915 break;
7916 case 0x05: //less
7917 if(&L)
7918 __ bltz(AT, L);
7919 else
7920 __ bltz(AT, (int)0);
7921 break;
7922 case 0x06: //less_equal
7923 if(&L)
7924 __ blez(AT, L);
7925 else
7926 __ blez(AT, (int)0);
7927 break;
7928 default:
7929 Unimplemented();
7930 }
7931 __ nop();
7932 %}
7934 ins_pc_relative(1);
7935 ins_pipe( pipe_alu_branch );
7936 %}
7938 instruct branchConL_regL_immL0(cmpOp cmp, mRegL src1, immL0 zero, label labl) %{
7939 match( If cmp (CmpL src1 zero) );
7940 effect(USE labl);
7941 format %{ "BR$cmp $src1, zero, $labl #@branchConL_regL_immL0" %}
7942 ins_cost(150);
7944 ins_encode %{
7945 Register opr1_reg = as_Register($src1$$reg);
7946 Label &target = *($labl$$label);
7947 int flag = $cmp$$cmpcode;
7949 switch(flag)
7950 {
7951 case 0x01: //equal
7952 if (&target)
7953 __ beq(opr1_reg, R0, target);
7954 else
7955 __ beq(opr1_reg, R0, int(0));
7956 break;
7958 case 0x02: //not_equal
7959 if(&target)
7960 __ bne(opr1_reg, R0, target);
7961 else
7962 __ bne(opr1_reg, R0, (int)0);
7963 break;
7965 case 0x03: //greater
7966 if(&target)
7967 __ bgtz(opr1_reg, target);
7968 else
7969 __ bgtz(opr1_reg, (int)0);
7970 break;
7972 case 0x04: //greater_equal
7973 if(&target)
7974 __ bgez(opr1_reg, target);
7975 else
7976 __ bgez(opr1_reg, (int)0);
7977 break;
7979 case 0x05: //less
7980 __ slt(AT, opr1_reg, R0);
7981 if(&target)
7982 __ bne(AT, R0, target);
7983 else
7984 __ bne(AT, R0, (int)0);
7985 break;
7987 case 0x06: //less_equal
7988 if (&target)
7989 __ blez(opr1_reg, target);
7990 else
7991 __ blez(opr1_reg, int(0));
7992 break;
7994 default:
7995 Unimplemented();
7996 }
7997 __ delayed()->nop();
7998 %}
8001 ins_pc_relative(1);
8002 ins_pipe( pipe_alu_branch );
8003 %}
8006 //FIXME
8007 instruct branchConF_reg_reg(cmpOp cmp, regF src1, regF src2, label labl) %{
8008 match( If cmp (CmpF src1 src2) );
8009 effect(USE labl);
8010 format %{ "BR$cmp $src1, $src2, $labl #@branchConF_reg_reg" %}
8012 ins_encode %{
8013 FloatRegister reg_op1 = $src1$$FloatRegister;
8014 FloatRegister reg_op2 = $src2$$FloatRegister;
8015 Label &L = *($labl$$label);
8016 int flag = $cmp$$cmpcode;
8018 switch(flag)
8019 {
8020 case 0x01: //equal
8021 __ c_eq_s(reg_op1, reg_op2);
8022 if (&L)
8023 __ bc1t(L);
8024 else
8025 __ bc1t((int)0);
8026 break;
8027 case 0x02: //not_equal
8028 __ c_eq_s(reg_op1, reg_op2);
8029 if (&L)
8030 __ bc1f(L);
8031 else
8032 __ bc1f((int)0);
8033 break;
8034 case 0x03: //greater
8035 __ c_ule_s(reg_op1, reg_op2);
8036 if(&L)
8037 __ bc1f(L);
8038 else
8039 __ bc1f((int)0);
8040 break;
8041 case 0x04: //greater_equal
8042 __ c_ult_s(reg_op1, reg_op2);
8043 if(&L)
8044 __ bc1f(L);
8045 else
8046 __ bc1f((int)0);
8047 break;
8048 case 0x05: //less
8049 __ c_ult_s(reg_op1, reg_op2);
8050 if(&L)
8051 __ bc1t(L);
8052 else
8053 __ bc1t((int)0);
8054 break;
8055 case 0x06: //less_equal
8056 __ c_ule_s(reg_op1, reg_op2);
8057 if(&L)
8058 __ bc1t(L);
8059 else
8060 __ bc1t((int)0);
8061 break;
8062 default:
8063 Unimplemented();
8064 }
8065 __ nop();
8066 %}
8068 ins_pc_relative(1);
8069 ins_pipe(pipe_slow);
8070 %}
8072 instruct branchConD_reg_reg(cmpOp cmp, regD src1, regD src2, label labl) %{
8073 match( If cmp (CmpD src1 src2) );
8074 effect(USE labl);
8075 format %{ "BR$cmp $src1, $src2, $labl #@branchConD_reg_reg" %}
8077 ins_encode %{
8078 FloatRegister reg_op1 = $src1$$FloatRegister;
8079 FloatRegister reg_op2 = $src2$$FloatRegister;
8080 Label &L = *($labl$$label);
8081 int flag = $cmp$$cmpcode;
8083 switch(flag)
8084 {
8085 case 0x01: //equal
8086 __ c_eq_d(reg_op1, reg_op2);
8087 if (&L)
8088 __ bc1t(L);
8089 else
8090 __ bc1t((int)0);
8091 break;
8092 case 0x02: //not_equal
8093 //2016/4/19 aoqi: c_ueq_d cannot distinguish NaN from equal. Double.isNaN(Double) is implemented by 'f != f', so the use of c_ueq_d causes bugs.
8094 __ c_eq_d(reg_op1, reg_op2);
8095 if (&L)
8096 __ bc1f(L);
8097 else
8098 __ bc1f((int)0);
8099 break;
8100 case 0x03: //greater
8101 __ c_ule_d(reg_op1, reg_op2);
8102 if(&L)
8103 __ bc1f(L);
8104 else
8105 __ bc1f((int)0);
8106 break;
8107 case 0x04: //greater_equal
8108 __ c_ult_d(reg_op1, reg_op2);
8109 if(&L)
8110 __ bc1f(L);
8111 else
8112 __ bc1f((int)0);
8113 break;
8114 case 0x05: //less
8115 __ c_ult_d(reg_op1, reg_op2);
8116 if(&L)
8117 __ bc1t(L);
8118 else
8119 __ bc1t((int)0);
8120 break;
8121 case 0x06: //less_equal
8122 __ c_ule_d(reg_op1, reg_op2);
8123 if(&L)
8124 __ bc1t(L);
8125 else
8126 __ bc1t((int)0);
8127 break;
8128 default:
8129 Unimplemented();
8130 }
8131 __ nop();
8132 %}
8134 ins_pc_relative(1);
8135 ins_pipe(pipe_slow);
8136 %}
8139 // Call Runtime Instruction
8140 instruct CallRuntimeDirect(method meth) %{
8141 match(CallRuntime );
8142 effect(USE meth);
8144 ins_cost(300);
8145 format %{ "CALL,runtime #@CallRuntimeDirect" %}
8146 ins_encode( Java_To_Runtime( meth ) );
8147 ins_pipe( pipe_slow );
8148 ins_alignment(16);
8149 %}
8153 //------------------------MemBar Instructions-------------------------------
8154 //Memory barrier flavors
8156 instruct membar_acquire() %{
8157 match(MemBarAcquire);
8158 ins_cost(0);
8160 size(0);
8161 format %{ "MEMBAR-acquire (empty) @ membar_acquire" %}
8162 ins_encode();
8163 ins_pipe(empty);
8164 %}
8166 instruct load_fence() %{
8167 match(LoadFence);
8168 ins_cost(400);
8170 format %{ "MEMBAR @ load_fence" %}
8171 ins_encode %{
8172 __ sync();
8173 %}
8174 ins_pipe(pipe_slow);
8175 %}
8177 instruct membar_acquire_lock()
8178 %{
8179 match(MemBarAcquireLock);
8180 ins_cost(0);
8182 size(0);
8183 format %{ "MEMBAR-acquire (acquire as part of CAS in prior FastLock so empty encoding) @ membar_acquire_lock" %}
8184 ins_encode();
8185 ins_pipe(empty);
8186 %}
8188 instruct membar_release() %{
8189 match(MemBarRelease);
8190 ins_cost(400);
8192 format %{ "MEMBAR-release @ membar_release" %}
8194 ins_encode %{
8195 // Attention: DO NOT DELETE THIS GUY!
8196 __ sync();
8197 %}
8199 ins_pipe(pipe_slow);
8200 %}
8202 instruct store_fence() %{
8203 match(StoreFence);
8204 ins_cost(400);
8206 format %{ "MEMBAR @ store_fence" %}
8208 ins_encode %{
8209 __ sync();
8210 %}
8212 ins_pipe(pipe_slow);
8213 %}
8215 instruct membar_release_lock()
8216 %{
8217 match(MemBarReleaseLock);
8218 ins_cost(0);
8220 size(0);
8221 format %{ "MEMBAR-release-lock (release in FastUnlock so empty) @ membar_release_lock" %}
8222 ins_encode();
8223 ins_pipe(empty);
8224 %}
8227 instruct membar_volatile() %{
8228 match(MemBarVolatile);
8229 ins_cost(400);
8231 format %{ "MEMBAR-volatile" %}
8232 ins_encode %{
8233 if( !os::is_MP() ) return; // Not needed on single CPU
8234 __ sync();
8236 %}
8237 ins_pipe(pipe_slow);
8238 %}
8240 instruct unnecessary_membar_volatile() %{
8241 match(MemBarVolatile);
8242 predicate(Matcher::post_store_load_barrier(n));
8243 ins_cost(0);
8245 size(0);
8246 format %{ "MEMBAR-volatile (unnecessary so empty encoding) @ unnecessary_membar_volatile" %}
8247 ins_encode( );
8248 ins_pipe(empty);
8249 %}
8251 instruct membar_storestore() %{
8252 match(MemBarStoreStore);
8254 ins_cost(0);
8255 size(0);
8256 format %{ "MEMBAR-storestore (empty encoding) @ membar_storestore" %}
8257 ins_encode( );
8258 ins_pipe(empty);
8259 %}
8261 //----------Move Instructions--------------------------------------------------
8262 instruct castX2P(mRegP dst, mRegL src) %{
8263 match(Set dst (CastX2P src));
8264 format %{ "castX2P $dst, $src @ castX2P" %}
8265 ins_encode %{
8266 Register src = $src$$Register;
8267 Register dst = $dst$$Register;
8269 if(src != dst)
8270 __ move(dst, src);
8271 %}
8272 ins_cost(10);
8273 ins_pipe( ialu_regI_mov );
8274 %}
8276 instruct castP2X(mRegL dst, mRegP src ) %{
8277 match(Set dst (CastP2X src));
8279 format %{ "mov $dst, $src\t #@castP2X" %}
8280 ins_encode %{
8281 Register src = $src$$Register;
8282 Register dst = $dst$$Register;
8284 if(src != dst)
8285 __ move(dst, src);
8286 %}
8287 ins_pipe( ialu_regI_mov );
8288 %}
8290 instruct MoveF2I_reg_reg(mRegI dst, regF src) %{
8291 match(Set dst (MoveF2I src));
8292 effect(DEF dst, USE src);
8293 ins_cost(85);
8294 format %{ "MoveF2I $dst, $src @ MoveF2I_reg_reg" %}
8295 ins_encode %{
8296 Register dst = as_Register($dst$$reg);
8297 FloatRegister src = as_FloatRegister($src$$reg);
8299 __ mfc1(dst, src);
8300 %}
8301 ins_pipe( pipe_slow );
8302 %}
8304 instruct MoveI2F_reg_reg(regF dst, mRegI src) %{
8305 match(Set dst (MoveI2F src));
8306 effect(DEF dst, USE src);
8307 ins_cost(85);
8308 format %{ "MoveI2F $dst, $src @ MoveI2F_reg_reg" %}
8309 ins_encode %{
8310 Register src = as_Register($src$$reg);
8311 FloatRegister dst = as_FloatRegister($dst$$reg);
8313 __ mtc1(src, dst);
8314 %}
8315 ins_pipe( pipe_slow );
8316 %}
8318 instruct MoveD2L_reg_reg(mRegL dst, regD src) %{
8319 match(Set dst (MoveD2L src));
8320 effect(DEF dst, USE src);
8321 ins_cost(85);
8322 format %{ "MoveD2L $dst, $src @ MoveD2L_reg_reg" %}
8323 ins_encode %{
8324 Register dst = as_Register($dst$$reg);
8325 FloatRegister src = as_FloatRegister($src$$reg);
8327 __ dmfc1(dst, src);
8328 %}
8329 ins_pipe( pipe_slow );
8330 %}
8332 instruct MoveL2D_reg_reg(regD dst, mRegL src) %{
8333 match(Set dst (MoveL2D src));
8334 effect(DEF dst, USE src);
8335 ins_cost(85);
8336 format %{ "MoveL2D $dst, $src @ MoveL2D_reg_reg" %}
8337 ins_encode %{
8338 FloatRegister dst = as_FloatRegister($dst$$reg);
8339 Register src = as_Register($src$$reg);
8341 __ dmtc1(src, dst);
8342 %}
8343 ins_pipe( pipe_slow );
8344 %}
8346 //----------Conditional Move---------------------------------------------------
8347 // Conditional move
8348 instruct cmovI_cmpI_reg_reg(mRegI dst, mRegI src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8349 match(Set dst (CMoveI (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8350 ins_cost(80);
8351 format %{
8352 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpI_reg_reg\n"
8353 "\tCMOV $dst,$src \t @cmovI_cmpI_reg_reg"
8354 %}
8356 ins_encode %{
8357 Register op1 = $tmp1$$Register;
8358 Register op2 = $tmp2$$Register;
8359 Register dst = $dst$$Register;
8360 Register src = $src$$Register;
8361 int flag = $cop$$cmpcode;
8363 switch(flag)
8364 {
8365 case 0x01: //equal
8366 __ subu32(AT, op1, op2);
8367 __ movz(dst, src, AT);
8368 break;
8370 case 0x02: //not_equal
8371 __ subu32(AT, op1, op2);
8372 __ movn(dst, src, AT);
8373 break;
8375 case 0x03: //great
8376 __ slt(AT, op2, op1);
8377 __ movn(dst, src, AT);
8378 break;
8380 case 0x04: //great_equal
8381 __ slt(AT, op1, op2);
8382 __ movz(dst, src, AT);
8383 break;
8385 case 0x05: //less
8386 __ slt(AT, op1, op2);
8387 __ movn(dst, src, AT);
8388 break;
8390 case 0x06: //less_equal
8391 __ slt(AT, op2, op1);
8392 __ movz(dst, src, AT);
8393 break;
8395 default:
8396 Unimplemented();
8397 }
8398 %}
8400 ins_pipe( pipe_slow );
8401 %}
8403 instruct cmovI_cmpP_reg_reg(mRegI dst, mRegI src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
8404 match(Set dst (CMoveI (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
8405 ins_cost(80);
8406 format %{
8407 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpP_reg_reg\n\t"
8408 "CMOV $dst,$src\t @cmovI_cmpP_reg_reg"
8409 %}
8410 ins_encode %{
8411 Register op1 = $tmp1$$Register;
8412 Register op2 = $tmp2$$Register;
8413 Register dst = $dst$$Register;
8414 Register src = $src$$Register;
8415 int flag = $cop$$cmpcode;
8417 switch(flag)
8418 {
8419 case 0x01: //equal
8420 __ subu(AT, op1, op2);
8421 __ movz(dst, src, AT);
8422 break;
8424 case 0x02: //not_equal
8425 __ subu(AT, op1, op2);
8426 __ movn(dst, src, AT);
8427 break;
8429 case 0x03: //above
8430 __ sltu(AT, op2, op1);
8431 __ movn(dst, src, AT);
8432 break;
8434 case 0x04: //above_equal
8435 __ sltu(AT, op1, op2);
8436 __ movz(dst, src, AT);
8437 break;
8439 case 0x05: //below
8440 __ sltu(AT, op1, op2);
8441 __ movn(dst, src, AT);
8442 break;
8444 case 0x06: //below_equal
8445 __ sltu(AT, op2, op1);
8446 __ movz(dst, src, AT);
8447 break;
8449 default:
8450 Unimplemented();
8451 }
8452 %}
8454 ins_pipe( pipe_slow );
8455 %}
8457 instruct cmovI_cmpN_reg_reg(mRegI dst, mRegI src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8458 match(Set dst (CMoveI (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8459 ins_cost(80);
8460 format %{
8461 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpN_reg_reg\n\t"
8462 "CMOV $dst,$src\t @cmovI_cmpN_reg_reg"
8463 %}
8464 ins_encode %{
8465 Register op1 = $tmp1$$Register;
8466 Register op2 = $tmp2$$Register;
8467 Register dst = $dst$$Register;
8468 Register src = $src$$Register;
8469 int flag = $cop$$cmpcode;
8471 switch(flag)
8472 {
8473 case 0x01: //equal
8474 __ subu32(AT, op1, op2);
8475 __ movz(dst, src, AT);
8476 break;
8478 case 0x02: //not_equal
8479 __ subu32(AT, op1, op2);
8480 __ movn(dst, src, AT);
8481 break;
8483 case 0x03: //above
8484 __ sltu(AT, op2, op1);
8485 __ movn(dst, src, AT);
8486 break;
8488 case 0x04: //above_equal
8489 __ sltu(AT, op1, op2);
8490 __ movz(dst, src, AT);
8491 break;
8493 case 0x05: //below
8494 __ sltu(AT, op1, op2);
8495 __ movn(dst, src, AT);
8496 break;
8498 case 0x06: //below_equal
8499 __ sltu(AT, op2, op1);
8500 __ movz(dst, src, AT);
8501 break;
8503 default:
8504 Unimplemented();
8505 }
8506 %}
8508 ins_pipe( pipe_slow );
8509 %}
8511 instruct cmovP_cmpN_reg_reg(mRegP dst, mRegP src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8512 match(Set dst (CMoveP (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8513 ins_cost(80);
8514 format %{
8515 "CMPU$cop $tmp1,$tmp2\t @cmovP_cmpN_reg_reg\n\t"
8516 "CMOV $dst,$src\t @cmovP_cmpN_reg_reg"
8517 %}
8518 ins_encode %{
8519 Register op1 = $tmp1$$Register;
8520 Register op2 = $tmp2$$Register;
8521 Register dst = $dst$$Register;
8522 Register src = $src$$Register;
8523 int flag = $cop$$cmpcode;
8525 switch(flag)
8526 {
8527 case 0x01: //equal
8528 __ subu32(AT, op1, op2);
8529 __ movz(dst, src, AT);
8530 break;
8532 case 0x02: //not_equal
8533 __ subu32(AT, op1, op2);
8534 __ movn(dst, src, AT);
8535 break;
8537 case 0x03: //above
8538 __ sltu(AT, op2, op1);
8539 __ movn(dst, src, AT);
8540 break;
8542 case 0x04: //above_equal
8543 __ sltu(AT, op1, op2);
8544 __ movz(dst, src, AT);
8545 break;
8547 case 0x05: //below
8548 __ sltu(AT, op1, op2);
8549 __ movn(dst, src, AT);
8550 break;
8552 case 0x06: //below_equal
8553 __ sltu(AT, op2, op1);
8554 __ movz(dst, src, AT);
8555 break;
8557 default:
8558 Unimplemented();
8559 }
8560 %}
8562 ins_pipe( pipe_slow );
8563 %}
8565 instruct cmovN_cmpP_reg_reg(mRegN dst, mRegN src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
8566 match(Set dst (CMoveN (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
8567 ins_cost(80);
8568 format %{
8569 "CMPU$cop $tmp1,$tmp2\t @cmovN_cmpP_reg_reg\n\t"
8570 "CMOV $dst,$src\t @cmovN_cmpP_reg_reg"
8571 %}
8572 ins_encode %{
8573 Register op1 = $tmp1$$Register;
8574 Register op2 = $tmp2$$Register;
8575 Register dst = $dst$$Register;
8576 Register src = $src$$Register;
8577 int flag = $cop$$cmpcode;
8579 switch(flag)
8580 {
8581 case 0x01: //equal
8582 __ subu(AT, op1, op2);
8583 __ movz(dst, src, AT);
8584 break;
8586 case 0x02: //not_equal
8587 __ subu(AT, op1, op2);
8588 __ movn(dst, src, AT);
8589 break;
8591 case 0x03: //above
8592 __ sltu(AT, op2, op1);
8593 __ movn(dst, src, AT);
8594 break;
8596 case 0x04: //above_equal
8597 __ sltu(AT, op1, op2);
8598 __ movz(dst, src, AT);
8599 break;
8601 case 0x05: //below
8602 __ sltu(AT, op1, op2);
8603 __ movn(dst, src, AT);
8604 break;
8606 case 0x06: //below_equal
8607 __ sltu(AT, op2, op1);
8608 __ movz(dst, src, AT);
8609 break;
8611 default:
8612 Unimplemented();
8613 }
8614 %}
8616 ins_pipe( pipe_slow );
8617 %}
8619 instruct cmovP_cmpD_reg_reg(mRegP dst, mRegP src, regD tmp1, regD tmp2, cmpOp cop ) %{
8620 match(Set dst (CMoveP (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8621 ins_cost(80);
8622 format %{
8623 "CMP$cop $tmp1, $tmp2\t @cmovP_cmpD_reg_reg\n"
8624 "\tCMOV $dst,$src \t @cmovP_cmpD_reg_reg"
8625 %}
8626 ins_encode %{
8627 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8628 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8629 Register dst = as_Register($dst$$reg);
8630 Register src = as_Register($src$$reg);
8632 int flag = $cop$$cmpcode;
8634 switch(flag)
8635 {
8636 case 0x01: //equal
8637 __ c_eq_d(reg_op1, reg_op2);
8638 __ movt(dst, src);
8639 break;
8640 case 0x02: //not_equal
8641 __ c_eq_d(reg_op1, reg_op2);
8642 __ movf(dst, src);
8643 break;
8644 case 0x03: //greater
8645 __ c_ole_d(reg_op1, reg_op2);
8646 __ movf(dst, src);
8647 break;
8648 case 0x04: //greater_equal
8649 __ c_olt_d(reg_op1, reg_op2);
8650 __ movf(dst, src);
8651 break;
8652 case 0x05: //less
8653 __ c_ult_d(reg_op1, reg_op2);
8654 __ movt(dst, src);
8655 break;
8656 case 0x06: //less_equal
8657 __ c_ule_d(reg_op1, reg_op2);
8658 __ movt(dst, src);
8659 break;
8660 default:
8661 Unimplemented();
8662 }
8663 %}
8665 ins_pipe( pipe_slow );
8666 %}
8669 instruct cmovN_cmpN_reg_reg(mRegN dst, mRegN src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8670 match(Set dst (CMoveN (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8671 ins_cost(80);
8672 format %{
8673 "CMPU$cop $tmp1,$tmp2\t @cmovN_cmpN_reg_reg\n\t"
8674 "CMOV $dst,$src\t @cmovN_cmpN_reg_reg"
8675 %}
8676 ins_encode %{
8677 Register op1 = $tmp1$$Register;
8678 Register op2 = $tmp2$$Register;
8679 Register dst = $dst$$Register;
8680 Register src = $src$$Register;
8681 int flag = $cop$$cmpcode;
8683 switch(flag)
8684 {
8685 case 0x01: //equal
8686 __ subu32(AT, op1, op2);
8687 __ movz(dst, src, AT);
8688 break;
8690 case 0x02: //not_equal
8691 __ subu32(AT, op1, op2);
8692 __ movn(dst, src, AT);
8693 break;
8695 case 0x03: //above
8696 __ sltu(AT, op2, op1);
8697 __ movn(dst, src, AT);
8698 break;
8700 case 0x04: //above_equal
8701 __ sltu(AT, op1, op2);
8702 __ movz(dst, src, AT);
8703 break;
8705 case 0x05: //below
8706 __ sltu(AT, op1, op2);
8707 __ movn(dst, src, AT);
8708 break;
8710 case 0x06: //below_equal
8711 __ sltu(AT, op2, op1);
8712 __ movz(dst, src, AT);
8713 break;
8715 default:
8716 Unimplemented();
8717 }
8718 %}
8720 ins_pipe( pipe_slow );
8721 %}
8724 instruct cmovI_cmpU_reg_reg(mRegI dst, mRegI src, mRegI tmp1, mRegI tmp2, cmpOpU cop ) %{
8725 match(Set dst (CMoveI (Binary cop (CmpU tmp1 tmp2)) (Binary dst src)));
8726 ins_cost(80);
8727 format %{
8728 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpU_reg_reg\n\t"
8729 "CMOV $dst,$src\t @cmovI_cmpU_reg_reg"
8730 %}
8731 ins_encode %{
8732 Register op1 = $tmp1$$Register;
8733 Register op2 = $tmp2$$Register;
8734 Register dst = $dst$$Register;
8735 Register src = $src$$Register;
8736 int flag = $cop$$cmpcode;
8738 switch(flag)
8739 {
8740 case 0x01: //equal
8741 __ subu(AT, op1, op2);
8742 __ movz(dst, src, AT);
8743 break;
8745 case 0x02: //not_equal
8746 __ subu(AT, op1, op2);
8747 __ movn(dst, src, AT);
8748 break;
8750 case 0x03: //above
8751 __ sltu(AT, op2, op1);
8752 __ movn(dst, src, AT);
8753 break;
8755 case 0x04: //above_equal
8756 __ sltu(AT, op1, op2);
8757 __ movz(dst, src, AT);
8758 break;
8760 case 0x05: //below
8761 __ sltu(AT, op1, op2);
8762 __ movn(dst, src, AT);
8763 break;
8765 case 0x06: //below_equal
8766 __ sltu(AT, op2, op1);
8767 __ movz(dst, src, AT);
8768 break;
8770 default:
8771 Unimplemented();
8772 }
8773 %}
8775 ins_pipe( pipe_slow );
8776 %}
8778 instruct cmovI_cmpL_reg_reg(mRegI dst, mRegI src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8779 match(Set dst (CMoveI (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8780 ins_cost(80);
8781 format %{
8782 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpL_reg_reg\n"
8783 "\tCMOV $dst,$src \t @cmovI_cmpL_reg_reg"
8784 %}
8785 ins_encode %{
8786 Register opr1 = as_Register($tmp1$$reg);
8787 Register opr2 = as_Register($tmp2$$reg);
8788 Register dst = $dst$$Register;
8789 Register src = $src$$Register;
8790 int flag = $cop$$cmpcode;
8792 switch(flag)
8793 {
8794 case 0x01: //equal
8795 __ subu(AT, opr1, opr2);
8796 __ movz(dst, src, AT);
8797 break;
8799 case 0x02: //not_equal
8800 __ subu(AT, opr1, opr2);
8801 __ movn(dst, src, AT);
8802 break;
8804 case 0x03: //greater
8805 __ slt(AT, opr2, opr1);
8806 __ movn(dst, src, AT);
8807 break;
8809 case 0x04: //greater_equal
8810 __ slt(AT, opr1, opr2);
8811 __ movz(dst, src, AT);
8812 break;
8814 case 0x05: //less
8815 __ slt(AT, opr1, opr2);
8816 __ movn(dst, src, AT);
8817 break;
8819 case 0x06: //less_equal
8820 __ slt(AT, opr2, opr1);
8821 __ movz(dst, src, AT);
8822 break;
8824 default:
8825 Unimplemented();
8826 }
8827 %}
8829 ins_pipe( pipe_slow );
8830 %}
8832 instruct cmovP_cmpL_reg_reg(mRegP dst, mRegP src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8833 match(Set dst (CMoveP (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8834 ins_cost(80);
8835 format %{
8836 "CMP$cop $tmp1, $tmp2\t @cmovP_cmpL_reg_reg\n"
8837 "\tCMOV $dst,$src \t @cmovP_cmpL_reg_reg"
8838 %}
8839 ins_encode %{
8840 Register opr1 = as_Register($tmp1$$reg);
8841 Register opr2 = as_Register($tmp2$$reg);
8842 Register dst = $dst$$Register;
8843 Register src = $src$$Register;
8844 int flag = $cop$$cmpcode;
8846 switch(flag)
8847 {
8848 case 0x01: //equal
8849 __ subu(AT, opr1, opr2);
8850 __ movz(dst, src, AT);
8851 break;
8853 case 0x02: //not_equal
8854 __ subu(AT, opr1, opr2);
8855 __ movn(dst, src, AT);
8856 break;
8858 case 0x03: //greater
8859 __ slt(AT, opr2, opr1);
8860 __ movn(dst, src, AT);
8861 break;
8863 case 0x04: //greater_equal
8864 __ slt(AT, opr1, opr2);
8865 __ movz(dst, src, AT);
8866 break;
8868 case 0x05: //less
8869 __ slt(AT, opr1, opr2);
8870 __ movn(dst, src, AT);
8871 break;
8873 case 0x06: //less_equal
8874 __ slt(AT, opr2, opr1);
8875 __ movz(dst, src, AT);
8876 break;
8878 default:
8879 Unimplemented();
8880 }
8881 %}
8883 ins_pipe( pipe_slow );
8884 %}
8886 instruct cmovI_cmpD_reg_reg(mRegI dst, mRegI src, regD tmp1, regD tmp2, cmpOp cop ) %{
8887 match(Set dst (CMoveI (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8888 ins_cost(80);
8889 format %{
8890 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpD_reg_reg\n"
8891 "\tCMOV $dst,$src \t @cmovI_cmpD_reg_reg"
8892 %}
8893 ins_encode %{
8894 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8895 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8896 Register dst = as_Register($dst$$reg);
8897 Register src = as_Register($src$$reg);
8899 int flag = $cop$$cmpcode;
8901 switch(flag)
8902 {
8903 case 0x01: //equal
8904 __ c_eq_d(reg_op1, reg_op2);
8905 __ movt(dst, src);
8906 break;
8907 case 0x02: //not_equal
8908 //2016/4/19 aoqi: See instruct branchConD_reg_reg. The change in branchConD_reg_reg fixed a bug. It seems similar here, so I made thesame change.
8909 __ c_eq_d(reg_op1, reg_op2);
8910 __ movf(dst, src);
8911 break;
8912 case 0x03: //greater
8913 __ c_ole_d(reg_op1, reg_op2);
8914 __ movf(dst, src);
8915 break;
8916 case 0x04: //greater_equal
8917 __ c_olt_d(reg_op1, reg_op2);
8918 __ movf(dst, src);
8919 break;
8920 case 0x05: //less
8921 __ c_ult_d(reg_op1, reg_op2);
8922 __ movt(dst, src);
8923 break;
8924 case 0x06: //less_equal
8925 __ c_ule_d(reg_op1, reg_op2);
8926 __ movt(dst, src);
8927 break;
8928 default:
8929 Unimplemented();
8930 }
8931 %}
8933 ins_pipe( pipe_slow );
8934 %}
8937 instruct cmovP_cmpP_reg_reg(mRegP dst, mRegP src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
8938 match(Set dst (CMoveP (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
8939 ins_cost(80);
8940 format %{
8941 "CMPU$cop $tmp1,$tmp2\t @cmovP_cmpP_reg_reg\n\t"
8942 "CMOV $dst,$src\t @cmovP_cmpP_reg_reg"
8943 %}
8944 ins_encode %{
8945 Register op1 = $tmp1$$Register;
8946 Register op2 = $tmp2$$Register;
8947 Register dst = $dst$$Register;
8948 Register src = $src$$Register;
8949 int flag = $cop$$cmpcode;
8951 switch(flag)
8952 {
8953 case 0x01: //equal
8954 __ subu(AT, op1, op2);
8955 __ movz(dst, src, AT);
8956 break;
8958 case 0x02: //not_equal
8959 __ subu(AT, op1, op2);
8960 __ movn(dst, src, AT);
8961 break;
8963 case 0x03: //above
8964 __ sltu(AT, op2, op1);
8965 __ movn(dst, src, AT);
8966 break;
8968 case 0x04: //above_equal
8969 __ sltu(AT, op1, op2);
8970 __ movz(dst, src, AT);
8971 break;
8973 case 0x05: //below
8974 __ sltu(AT, op1, op2);
8975 __ movn(dst, src, AT);
8976 break;
8978 case 0x06: //below_equal
8979 __ sltu(AT, op2, op1);
8980 __ movz(dst, src, AT);
8981 break;
8983 default:
8984 Unimplemented();
8985 }
8986 %}
8988 ins_pipe( pipe_slow );
8989 %}
8991 instruct cmovP_cmpI_reg_reg(mRegP dst, mRegP src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8992 match(Set dst (CMoveP (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8993 ins_cost(80);
8994 format %{
8995 "CMP$cop $tmp1,$tmp2\t @cmovP_cmpI_reg_reg\n\t"
8996 "CMOV $dst,$src\t @cmovP_cmpI_reg_reg"
8997 %}
8998 ins_encode %{
8999 Register op1 = $tmp1$$Register;
9000 Register op2 = $tmp2$$Register;
9001 Register dst = $dst$$Register;
9002 Register src = $src$$Register;
9003 int flag = $cop$$cmpcode;
9005 switch(flag)
9006 {
9007 case 0x01: //equal
9008 __ subu32(AT, op1, op2);
9009 __ movz(dst, src, AT);
9010 break;
9012 case 0x02: //not_equal
9013 __ subu32(AT, op1, op2);
9014 __ movn(dst, src, AT);
9015 break;
9017 case 0x03: //above
9018 __ slt(AT, op2, op1);
9019 __ movn(dst, src, AT);
9020 break;
9022 case 0x04: //above_equal
9023 __ slt(AT, op1, op2);
9024 __ movz(dst, src, AT);
9025 break;
9027 case 0x05: //below
9028 __ slt(AT, op1, op2);
9029 __ movn(dst, src, AT);
9030 break;
9032 case 0x06: //below_equal
9033 __ slt(AT, op2, op1);
9034 __ movz(dst, src, AT);
9035 break;
9037 default:
9038 Unimplemented();
9039 }
9040 %}
9042 ins_pipe( pipe_slow );
9043 %}
9045 instruct cmovN_cmpI_reg_reg(mRegN dst, mRegN src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
9046 match(Set dst (CMoveN (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
9047 ins_cost(80);
9048 format %{
9049 "CMP$cop $tmp1,$tmp2\t @cmovN_cmpI_reg_reg\n\t"
9050 "CMOV $dst,$src\t @cmovN_cmpI_reg_reg"
9051 %}
9052 ins_encode %{
9053 Register op1 = $tmp1$$Register;
9054 Register op2 = $tmp2$$Register;
9055 Register dst = $dst$$Register;
9056 Register src = $src$$Register;
9057 int flag = $cop$$cmpcode;
9059 switch(flag)
9060 {
9061 case 0x01: //equal
9062 __ subu32(AT, op1, op2);
9063 __ movz(dst, src, AT);
9064 break;
9066 case 0x02: //not_equal
9067 __ subu32(AT, op1, op2);
9068 __ movn(dst, src, AT);
9069 break;
9071 case 0x03: //above
9072 __ slt(AT, op2, op1);
9073 __ movn(dst, src, AT);
9074 break;
9076 case 0x04: //above_equal
9077 __ slt(AT, op1, op2);
9078 __ movz(dst, src, AT);
9079 break;
9081 case 0x05: //below
9082 __ slt(AT, op1, op2);
9083 __ movn(dst, src, AT);
9084 break;
9086 case 0x06: //below_equal
9087 __ slt(AT, op2, op1);
9088 __ movz(dst, src, AT);
9089 break;
9091 default:
9092 Unimplemented();
9093 }
9094 %}
9096 ins_pipe( pipe_slow );
9097 %}
9100 instruct cmovL_cmpI_reg_reg(mRegL dst, mRegL src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
9101 match(Set dst (CMoveL (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
9102 ins_cost(80);
9103 format %{
9104 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpI_reg_reg\n"
9105 "\tCMOV $dst,$src \t @cmovL_cmpI_reg_reg"
9106 %}
9108 ins_encode %{
9109 Register op1 = $tmp1$$Register;
9110 Register op2 = $tmp2$$Register;
9111 Register dst = as_Register($dst$$reg);
9112 Register src = as_Register($src$$reg);
9113 int flag = $cop$$cmpcode;
9115 switch(flag)
9116 {
9117 case 0x01: //equal
9118 __ subu32(AT, op1, op2);
9119 __ movz(dst, src, AT);
9120 break;
9122 case 0x02: //not_equal
9123 __ subu32(AT, op1, op2);
9124 __ movn(dst, src, AT);
9125 break;
9127 case 0x03: //great
9128 __ slt(AT, op2, op1);
9129 __ movn(dst, src, AT);
9130 break;
9132 case 0x04: //great_equal
9133 __ slt(AT, op1, op2);
9134 __ movz(dst, src, AT);
9135 break;
9137 case 0x05: //less
9138 __ slt(AT, op1, op2);
9139 __ movn(dst, src, AT);
9140 break;
9142 case 0x06: //less_equal
9143 __ slt(AT, op2, op1);
9144 __ movz(dst, src, AT);
9145 break;
9147 default:
9148 Unimplemented();
9149 }
9150 %}
9152 ins_pipe( pipe_slow );
9153 %}
9155 instruct cmovL_cmpL_reg_reg(mRegL dst, mRegL src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
9156 match(Set dst (CMoveL (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
9157 ins_cost(80);
9158 format %{
9159 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpL_reg_reg\n"
9160 "\tCMOV $dst,$src \t @cmovL_cmpL_reg_reg"
9161 %}
9162 ins_encode %{
9163 Register opr1 = as_Register($tmp1$$reg);
9164 Register opr2 = as_Register($tmp2$$reg);
9165 Register dst = as_Register($dst$$reg);
9166 Register src = as_Register($src$$reg);
9167 int flag = $cop$$cmpcode;
9169 switch(flag)
9170 {
9171 case 0x01: //equal
9172 __ subu(AT, opr1, opr2);
9173 __ movz(dst, src, AT);
9174 break;
9176 case 0x02: //not_equal
9177 __ subu(AT, opr1, opr2);
9178 __ movn(dst, src, AT);
9179 break;
9181 case 0x03: //greater
9182 __ slt(AT, opr2, opr1);
9183 __ movn(dst, src, AT);
9184 break;
9186 case 0x04: //greater_equal
9187 __ slt(AT, opr1, opr2);
9188 __ movz(dst, src, AT);
9189 break;
9191 case 0x05: //less
9192 __ slt(AT, opr1, opr2);
9193 __ movn(dst, src, AT);
9194 break;
9196 case 0x06: //less_equal
9197 __ slt(AT, opr2, opr1);
9198 __ movz(dst, src, AT);
9199 break;
9201 default:
9202 Unimplemented();
9203 }
9204 %}
9206 ins_pipe( pipe_slow );
9207 %}
9209 instruct cmovL_cmpN_reg_reg(mRegL dst, mRegL src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
9210 match(Set dst (CMoveL (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
9211 ins_cost(80);
9212 format %{
9213 "CMPU$cop $tmp1,$tmp2\t @cmovL_cmpN_reg_reg\n\t"
9214 "CMOV $dst,$src\t @cmovL_cmpN_reg_reg"
9215 %}
9216 ins_encode %{
9217 Register op1 = $tmp1$$Register;
9218 Register op2 = $tmp2$$Register;
9219 Register dst = $dst$$Register;
9220 Register src = $src$$Register;
9221 int flag = $cop$$cmpcode;
9223 switch(flag)
9224 {
9225 case 0x01: //equal
9226 __ subu32(AT, op1, op2);
9227 __ movz(dst, src, AT);
9228 break;
9230 case 0x02: //not_equal
9231 __ subu32(AT, op1, op2);
9232 __ movn(dst, src, AT);
9233 break;
9235 case 0x03: //above
9236 __ sltu(AT, op2, op1);
9237 __ movn(dst, src, AT);
9238 break;
9240 case 0x04: //above_equal
9241 __ sltu(AT, op1, op2);
9242 __ movz(dst, src, AT);
9243 break;
9245 case 0x05: //below
9246 __ sltu(AT, op1, op2);
9247 __ movn(dst, src, AT);
9248 break;
9250 case 0x06: //below_equal
9251 __ sltu(AT, op2, op1);
9252 __ movz(dst, src, AT);
9253 break;
9255 default:
9256 Unimplemented();
9257 }
9258 %}
9260 ins_pipe( pipe_slow );
9261 %}
9264 instruct cmovL_cmpD_reg_reg(mRegL dst, mRegL src, regD tmp1, regD tmp2, cmpOp cop ) %{
9265 match(Set dst (CMoveL (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
9266 ins_cost(80);
9267 format %{
9268 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpD_reg_reg\n"
9269 "\tCMOV $dst,$src \t @cmovL_cmpD_reg_reg"
9270 %}
9271 ins_encode %{
9272 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
9273 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
9274 Register dst = as_Register($dst$$reg);
9275 Register src = as_Register($src$$reg);
9277 int flag = $cop$$cmpcode;
9279 switch(flag)
9280 {
9281 case 0x01: //equal
9282 __ c_eq_d(reg_op1, reg_op2);
9283 __ movt(dst, src);
9284 break;
9285 case 0x02: //not_equal
9286 __ c_eq_d(reg_op1, reg_op2);
9287 __ movf(dst, src);
9288 break;
9289 case 0x03: //greater
9290 __ c_ole_d(reg_op1, reg_op2);
9291 __ movf(dst, src);
9292 break;
9293 case 0x04: //greater_equal
9294 __ c_olt_d(reg_op1, reg_op2);
9295 __ movf(dst, src);
9296 break;
9297 case 0x05: //less
9298 __ c_ult_d(reg_op1, reg_op2);
9299 __ movt(dst, src);
9300 break;
9301 case 0x06: //less_equal
9302 __ c_ule_d(reg_op1, reg_op2);
9303 __ movt(dst, src);
9304 break;
9305 default:
9306 Unimplemented();
9307 }
9308 %}
9310 ins_pipe( pipe_slow );
9311 %}
9313 instruct cmovD_cmpD_reg_reg(regD dst, regD src, regD tmp1, regD tmp2, cmpOp cop ) %{
9314 match(Set dst (CMoveD (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
9315 ins_cost(200);
9316 format %{
9317 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpD_reg_reg\n"
9318 "\tCMOV $dst,$src \t @cmovD_cmpD_reg_reg"
9319 %}
9320 ins_encode %{
9321 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
9322 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
9323 FloatRegister dst = as_FloatRegister($dst$$reg);
9324 FloatRegister src = as_FloatRegister($src$$reg);
9326 int flag = $cop$$cmpcode;
9328 switch(flag)
9329 {
9330 case 0x01: //equal
9331 __ c_eq_d(reg_op1, reg_op2);
9332 __ movt_d(dst, src);
9333 break;
9334 case 0x02: //not_equal
9335 __ c_eq_d(reg_op1, reg_op2);
9336 __ movf_d(dst, src);
9337 break;
9338 case 0x03: //greater
9339 __ c_ole_d(reg_op1, reg_op2);
9340 __ movf_d(dst, src);
9341 break;
9342 case 0x04: //greater_equal
9343 __ c_olt_d(reg_op1, reg_op2);
9344 __ movf_d(dst, src);
9345 break;
9346 case 0x05: //less
9347 __ c_ult_d(reg_op1, reg_op2);
9348 __ movt_d(dst, src);
9349 break;
9350 case 0x06: //less_equal
9351 __ c_ule_d(reg_op1, reg_op2);
9352 __ movt_d(dst, src);
9353 break;
9354 default:
9355 Unimplemented();
9356 }
9357 %}
9359 ins_pipe( pipe_slow );
9360 %}
9362 instruct cmovF_cmpI_reg_reg(regF dst, regF src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
9363 match(Set dst (CMoveF (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
9364 ins_cost(200);
9365 format %{
9366 "CMP$cop $tmp1, $tmp2\t @cmovF_cmpI_reg_reg\n"
9367 "\tCMOV $dst, $src \t @cmovF_cmpI_reg_reg"
9368 %}
9370 ins_encode %{
9371 Register op1 = $tmp1$$Register;
9372 Register op2 = $tmp2$$Register;
9373 FloatRegister dst = as_FloatRegister($dst$$reg);
9374 FloatRegister src = as_FloatRegister($src$$reg);
9375 int flag = $cop$$cmpcode;
9376 Label L;
9378 switch(flag)
9379 {
9380 case 0x01: //equal
9381 __ bne(op1, op2, L);
9382 __ nop();
9383 __ mov_s(dst, src);
9384 __ bind(L);
9385 break;
9386 case 0x02: //not_equal
9387 __ beq(op1, op2, L);
9388 __ nop();
9389 __ mov_s(dst, src);
9390 __ bind(L);
9391 break;
9392 case 0x03: //great
9393 __ slt(AT, op2, op1);
9394 __ beq(AT, R0, L);
9395 __ nop();
9396 __ mov_s(dst, src);
9397 __ bind(L);
9398 break;
9399 case 0x04: //great_equal
9400 __ slt(AT, op1, op2);
9401 __ bne(AT, R0, L);
9402 __ nop();
9403 __ mov_s(dst, src);
9404 __ bind(L);
9405 break;
9406 case 0x05: //less
9407 __ slt(AT, op1, op2);
9408 __ beq(AT, R0, L);
9409 __ nop();
9410 __ mov_s(dst, src);
9411 __ bind(L);
9412 break;
9413 case 0x06: //less_equal
9414 __ slt(AT, op2, op1);
9415 __ bne(AT, R0, L);
9416 __ nop();
9417 __ mov_s(dst, src);
9418 __ bind(L);
9419 break;
9420 default:
9421 Unimplemented();
9422 }
9423 %}
9425 ins_pipe( pipe_slow );
9426 %}
9428 instruct cmovD_cmpI_reg_reg(regD dst, regD src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
9429 match(Set dst (CMoveD (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
9430 ins_cost(200);
9431 format %{
9432 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpI_reg_reg\n"
9433 "\tCMOV $dst, $src \t @cmovD_cmpI_reg_reg"
9434 %}
9436 ins_encode %{
9437 Register op1 = $tmp1$$Register;
9438 Register op2 = $tmp2$$Register;
9439 FloatRegister dst = as_FloatRegister($dst$$reg);
9440 FloatRegister src = as_FloatRegister($src$$reg);
9441 int flag = $cop$$cmpcode;
9442 Label L;
9444 switch(flag)
9445 {
9446 case 0x01: //equal
9447 __ bne(op1, op2, L);
9448 __ nop();
9449 __ mov_d(dst, src);
9450 __ bind(L);
9451 break;
9452 case 0x02: //not_equal
9453 __ beq(op1, op2, L);
9454 __ nop();
9455 __ mov_d(dst, src);
9456 __ bind(L);
9457 break;
9458 case 0x03: //great
9459 __ slt(AT, op2, op1);
9460 __ beq(AT, R0, L);
9461 __ nop();
9462 __ mov_d(dst, src);
9463 __ bind(L);
9464 break;
9465 case 0x04: //great_equal
9466 __ slt(AT, op1, op2);
9467 __ bne(AT, R0, L);
9468 __ nop();
9469 __ mov_d(dst, src);
9470 __ bind(L);
9471 break;
9472 case 0x05: //less
9473 __ slt(AT, op1, op2);
9474 __ beq(AT, R0, L);
9475 __ nop();
9476 __ mov_d(dst, src);
9477 __ bind(L);
9478 break;
9479 case 0x06: //less_equal
9480 __ slt(AT, op2, op1);
9481 __ bne(AT, R0, L);
9482 __ nop();
9483 __ mov_d(dst, src);
9484 __ bind(L);
9485 break;
9486 default:
9487 Unimplemented();
9488 }
9489 %}
9491 ins_pipe( pipe_slow );
9492 %}
9494 instruct cmovD_cmpP_reg_reg(regD dst, regD src, mRegP tmp1, mRegP tmp2, cmpOp cop ) %{
9495 match(Set dst (CMoveD (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
9496 ins_cost(200);
9497 format %{
9498 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpP_reg_reg\n"
9499 "\tCMOV $dst, $src \t @cmovD_cmpP_reg_reg"
9500 %}
9502 ins_encode %{
9503 Register op1 = $tmp1$$Register;
9504 Register op2 = $tmp2$$Register;
9505 FloatRegister dst = as_FloatRegister($dst$$reg);
9506 FloatRegister src = as_FloatRegister($src$$reg);
9507 int flag = $cop$$cmpcode;
9508 Label L;
9510 switch(flag)
9511 {
9512 case 0x01: //equal
9513 __ bne(op1, op2, L);
9514 __ nop();
9515 __ mov_d(dst, src);
9516 __ bind(L);
9517 break;
9518 case 0x02: //not_equal
9519 __ beq(op1, op2, L);
9520 __ nop();
9521 __ mov_d(dst, src);
9522 __ bind(L);
9523 break;
9524 case 0x03: //great
9525 __ slt(AT, op2, op1);
9526 __ beq(AT, R0, L);
9527 __ nop();
9528 __ mov_d(dst, src);
9529 __ bind(L);
9530 break;
9531 case 0x04: //great_equal
9532 __ slt(AT, op1, op2);
9533 __ bne(AT, R0, L);
9534 __ nop();
9535 __ mov_d(dst, src);
9536 __ bind(L);
9537 break;
9538 case 0x05: //less
9539 __ slt(AT, op1, op2);
9540 __ beq(AT, R0, L);
9541 __ nop();
9542 __ mov_d(dst, src);
9543 __ bind(L);
9544 break;
9545 case 0x06: //less_equal
9546 __ slt(AT, op2, op1);
9547 __ bne(AT, R0, L);
9548 __ nop();
9549 __ mov_d(dst, src);
9550 __ bind(L);
9551 break;
9552 default:
9553 Unimplemented();
9554 }
9555 %}
9557 ins_pipe( pipe_slow );
9558 %}
9560 //FIXME
9561 instruct cmovI_cmpF_reg_reg(mRegI dst, mRegI src, regF tmp1, regF tmp2, cmpOp cop ) %{
9562 match(Set dst (CMoveI (Binary cop (CmpF tmp1 tmp2)) (Binary dst src)));
9563 ins_cost(80);
9564 format %{
9565 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpF_reg_reg\n"
9566 "\tCMOV $dst,$src \t @cmovI_cmpF_reg_reg"
9567 %}
9569 ins_encode %{
9570 FloatRegister reg_op1 = $tmp1$$FloatRegister;
9571 FloatRegister reg_op2 = $tmp2$$FloatRegister;
9572 Register dst = $dst$$Register;
9573 Register src = $src$$Register;
9574 int flag = $cop$$cmpcode;
9576 switch(flag)
9577 {
9578 case 0x01: //equal
9579 __ c_eq_s(reg_op1, reg_op2);
9580 __ movt(dst, src);
9581 break;
9582 case 0x02: //not_equal
9583 __ c_eq_s(reg_op1, reg_op2);
9584 __ movf(dst, src);
9585 break;
9586 case 0x03: //greater
9587 __ c_ole_s(reg_op1, reg_op2);
9588 __ movf(dst, src);
9589 break;
9590 case 0x04: //greater_equal
9591 __ c_olt_s(reg_op1, reg_op2);
9592 __ movf(dst, src);
9593 break;
9594 case 0x05: //less
9595 __ c_ult_s(reg_op1, reg_op2);
9596 __ movt(dst, src);
9597 break;
9598 case 0x06: //less_equal
9599 __ c_ule_s(reg_op1, reg_op2);
9600 __ movt(dst, src);
9601 break;
9602 default:
9603 Unimplemented();
9604 }
9605 %}
9606 ins_pipe( pipe_slow );
9607 %}
9609 instruct cmovF_cmpF_reg_reg(regF dst, regF src, regF tmp1, regF tmp2, cmpOp cop ) %{
9610 match(Set dst (CMoveF (Binary cop (CmpF tmp1 tmp2)) (Binary dst src)));
9611 ins_cost(200);
9612 format %{
9613 "CMP$cop $tmp1, $tmp2\t @cmovF_cmpF_reg_reg\n"
9614 "\tCMOV $dst,$src \t @cmovF_cmpF_reg_reg"
9615 %}
9617 ins_encode %{
9618 FloatRegister reg_op1 = $tmp1$$FloatRegister;
9619 FloatRegister reg_op2 = $tmp2$$FloatRegister;
9620 FloatRegister dst = $dst$$FloatRegister;
9621 FloatRegister src = $src$$FloatRegister;
9622 int flag = $cop$$cmpcode;
9624 switch(flag)
9625 {
9626 case 0x01: //equal
9627 __ c_eq_s(reg_op1, reg_op2);
9628 __ movt_s(dst, src);
9629 break;
9630 case 0x02: //not_equal
9631 __ c_eq_s(reg_op1, reg_op2);
9632 __ movf_s(dst, src);
9633 break;
9634 case 0x03: //greater
9635 __ c_ole_s(reg_op1, reg_op2);
9636 __ movf_s(dst, src);
9637 break;
9638 case 0x04: //greater_equal
9639 __ c_olt_s(reg_op1, reg_op2);
9640 __ movf_s(dst, src);
9641 break;
9642 case 0x05: //less
9643 __ c_ult_s(reg_op1, reg_op2);
9644 __ movt_s(dst, src);
9645 break;
9646 case 0x06: //less_equal
9647 __ c_ule_s(reg_op1, reg_op2);
9648 __ movt_s(dst, src);
9649 break;
9650 default:
9651 Unimplemented();
9652 }
9653 %}
9654 ins_pipe( pipe_slow );
9655 %}
9657 // Manifest a CmpL result in an integer register. Very painful.
9658 // This is the test to avoid.
9659 instruct cmpL3_reg_reg(mRegI dst, mRegL src1, mRegL src2) %{
9660 match(Set dst (CmpL3 src1 src2));
9661 ins_cost(1000);
9662 format %{ "cmpL3 $dst, $src1, $src2 @ cmpL3_reg_reg" %}
9663 ins_encode %{
9664 Register opr1 = as_Register($src1$$reg);
9665 Register opr2 = as_Register($src2$$reg);
9666 Register dst = as_Register($dst$$reg);
9668 Label Done;
9670 __ subu(AT, opr1, opr2);
9671 __ bltz(AT, Done);
9672 __ delayed()->daddiu(dst, R0, -1);
9674 __ move(dst, 1);
9675 __ movz(dst, R0, AT);
9677 __ bind(Done);
9678 %}
9679 ins_pipe( pipe_slow );
9680 %}
9682 //
9683 // less_rsult = -1
9684 // greater_result = 1
9685 // equal_result = 0
9686 // nan_result = -1
9687 //
9688 instruct cmpF3_reg_reg(mRegI dst, regF src1, regF src2) %{
9689 match(Set dst (CmpF3 src1 src2));
9690 ins_cost(1000);
9691 format %{ "cmpF3 $dst, $src1, $src2 @ cmpF3_reg_reg" %}
9692 ins_encode %{
9693 FloatRegister src1 = as_FloatRegister($src1$$reg);
9694 FloatRegister src2 = as_FloatRegister($src2$$reg);
9695 Register dst = as_Register($dst$$reg);
9697 Label Done;
9699 __ c_ult_s(src1, src2);
9700 __ bc1t(Done);
9701 __ delayed()->daddiu(dst, R0, -1);
9703 __ c_eq_s(src1, src2);
9704 __ move(dst, 1);
9705 __ movt(dst, R0);
9707 __ bind(Done);
9708 %}
9709 ins_pipe( pipe_slow );
9710 %}
9712 instruct cmpD3_reg_reg(mRegI dst, regD src1, regD src2) %{
9713 match(Set dst (CmpD3 src1 src2));
9714 ins_cost(1000);
9715 format %{ "cmpD3 $dst, $src1, $src2 @ cmpD3_reg_reg" %}
9716 ins_encode %{
9717 FloatRegister src1 = as_FloatRegister($src1$$reg);
9718 FloatRegister src2 = as_FloatRegister($src2$$reg);
9719 Register dst = as_Register($dst$$reg);
9721 Label Done;
9723 __ c_ult_d(src1, src2);
9724 __ bc1t(Done);
9725 __ delayed()->daddiu(dst, R0, -1);
9727 __ c_eq_d(src1, src2);
9728 __ move(dst, 1);
9729 __ movt(dst, R0);
9731 __ bind(Done);
9732 %}
9733 ins_pipe( pipe_slow );
9734 %}
9736 instruct clear_array(mRegL cnt, mRegP base, Universe dummy) %{
9737 match(Set dummy (ClearArray cnt base));
9738 format %{ "CLEAR_ARRAY base = $base, cnt = $cnt # Clear doublewords" %}
9739 ins_encode %{
9740 //Assume cnt is the number of bytes in an array to be cleared,
9741 //and base points to the starting address of the array.
9742 Register base = $base$$Register;
9743 Register num = $cnt$$Register;
9744 Label Loop, done;
9746 __ beq(num, R0, done);
9747 __ delayed()->daddu(AT, base, R0);
9749 __ move(T9, num); /* T9 = words */
9751 __ bind(Loop);
9752 __ sd(R0, AT, 0);
9753 __ daddi(T9, T9, -1);
9754 __ bne(T9, R0, Loop);
9755 __ delayed()->daddi(AT, AT, wordSize);
9757 __ bind(done);
9758 %}
9759 ins_pipe( pipe_slow );
9760 %}
9762 instruct string_compare(a4_RegP str1, mA5RegI cnt1, a6_RegP str2, mA7RegI cnt2, no_Ax_mRegI result) %{
9763 match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
9764 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2);
9766 format %{ "String Compare $str1[len: $cnt1], $str2[len: $cnt2] -> $result @ string_compare" %}
9767 ins_encode %{
9768 // Get the first character position in both strings
9769 // [8] char array, [12] offset, [16] count
9770 Register str1 = $str1$$Register;
9771 Register str2 = $str2$$Register;
9772 Register cnt1 = $cnt1$$Register;
9773 Register cnt2 = $cnt2$$Register;
9774 Register result = $result$$Register;
9776 Label L, Loop, haveResult, done;
9778 // compute the and difference of lengths (in result)
9779 __ subu(result, cnt1, cnt2); // result holds the difference of two lengths
9781 // compute the shorter length (in cnt1)
9782 __ slt(AT, cnt2, cnt1);
9783 __ movn(cnt1, cnt2, AT);
9785 // Now the shorter length is in cnt1 and cnt2 can be used as a tmp register
9786 __ bind(Loop); // Loop begin
9787 __ beq(cnt1, R0, done);
9788 __ delayed()->lhu(AT, str1, 0);;
9790 // compare current character
9791 __ lhu(cnt2, str2, 0);
9792 __ bne(AT, cnt2, haveResult);
9793 __ delayed()->addi(str1, str1, 2);
9794 __ addi(str2, str2, 2);
9795 __ b(Loop);
9796 __ delayed()->addi(cnt1, cnt1, -1); // Loop end
9798 __ bind(haveResult);
9799 __ subu(result, AT, cnt2);
9801 __ bind(done);
9802 %}
9804 ins_pipe( pipe_slow );
9805 %}
9807 // intrinsic optimization
9808 instruct string_equals(a4_RegP str1, a5_RegP str2, mA6RegI cnt, mA7RegI temp, no_Ax_mRegI result) %{
9809 match(Set result (StrEquals (Binary str1 str2) cnt));
9810 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL temp);
9812 format %{ "String Equal $str1, $str2, len:$cnt tmp:$temp -> $result @ string_equals" %}
9813 ins_encode %{
9814 // Get the first character position in both strings
9815 // [8] char array, [12] offset, [16] count
9816 Register str1 = $str1$$Register;
9817 Register str2 = $str2$$Register;
9818 Register cnt = $cnt$$Register;
9819 Register tmp = $temp$$Register;
9820 Register result = $result$$Register;
9822 Label Loop, done;
9825 __ beq(str1, str2, done); // same char[] ?
9826 __ daddiu(result, R0, 1);
9828 __ bind(Loop); // Loop begin
9829 __ beq(cnt, R0, done);
9830 __ daddiu(result, R0, 1); // count == 0
9832 // compare current character
9833 __ lhu(AT, str1, 0);;
9834 __ lhu(tmp, str2, 0);
9835 __ bne(AT, tmp, done);
9836 __ delayed()->daddi(result, R0, 0);
9837 __ addi(str1, str1, 2);
9838 __ addi(str2, str2, 2);
9839 __ b(Loop);
9840 __ delayed()->addi(cnt, cnt, -1); // Loop end
9842 __ bind(done);
9843 %}
9845 ins_pipe( pipe_slow );
9846 %}
9848 //----------Arithmetic Instructions-------------------------------------------
9849 //----------Addition Instructions---------------------------------------------
9850 instruct addI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9851 match(Set dst (AddI src1 src2));
9853 format %{ "add $dst, $src1, $src2 #@addI_Reg_Reg" %}
9854 ins_encode %{
9855 Register dst = $dst$$Register;
9856 Register src1 = $src1$$Register;
9857 Register src2 = $src2$$Register;
9858 __ addu32(dst, src1, src2);
9859 %}
9860 ins_pipe( ialu_regI_regI );
9861 %}
9863 instruct addI_Reg_imm(mRegI dst, mRegI src1, immI src2) %{
9864 match(Set dst (AddI src1 src2));
9866 format %{ "add $dst, $src1, $src2 #@addI_Reg_imm" %}
9867 ins_encode %{
9868 Register dst = $dst$$Register;
9869 Register src1 = $src1$$Register;
9870 int imm = $src2$$constant;
9872 if(Assembler::is_simm16(imm)) {
9873 __ addiu32(dst, src1, imm);
9874 } else {
9875 __ move(AT, imm);
9876 __ addu32(dst, src1, AT);
9877 }
9878 %}
9879 ins_pipe( ialu_regI_regI );
9880 %}
9882 instruct addP_reg_reg(mRegP dst, mRegP src1, mRegL src2) %{
9883 match(Set dst (AddP src1 src2));
9885 format %{ "dadd $dst, $src1, $src2 #@addP_reg_reg" %}
9887 ins_encode %{
9888 Register dst = $dst$$Register;
9889 Register src1 = $src1$$Register;
9890 Register src2 = $src2$$Register;
9891 __ daddu(dst, src1, src2);
9892 %}
9894 ins_pipe( ialu_regI_regI );
9895 %}
9897 instruct addP_reg_reg_convI2L(mRegP dst, mRegP src1, mRegI src2) %{
9898 match(Set dst (AddP src1 (ConvI2L src2)));
9900 format %{ "dadd $dst, $src1, $src2 #@addP_reg_reg_convI2L" %}
9902 ins_encode %{
9903 Register dst = $dst$$Register;
9904 Register src1 = $src1$$Register;
9905 Register src2 = $src2$$Register;
9906 __ daddu(dst, src1, src2);
9907 %}
9909 ins_pipe( ialu_regI_regI );
9910 %}
9912 instruct addP_reg_imm(mRegP dst, mRegP src1, immL src2) %{
9913 match(Set dst (AddP src1 src2));
9915 format %{ "daddi $dst, $src1, $src2 #@addP_reg_imm" %}
9916 ins_encode %{
9917 Register src1 = $src1$$Register;
9918 long src2 = $src2$$constant;
9919 Register dst = $dst$$Register;
9921 if(Assembler::is_simm16(src2)) {
9922 __ daddiu(dst, src1, src2);
9923 } else {
9924 __ set64(AT, src2);
9925 __ daddu(dst, src1, AT);
9926 }
9927 %}
9928 ins_pipe( ialu_regI_imm16 );
9929 %}
9931 // Add Long Register with Register
9932 instruct addL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
9933 match(Set dst (AddL src1 src2));
9934 ins_cost(200);
9935 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_Reg\t" %}
9937 ins_encode %{
9938 Register dst_reg = as_Register($dst$$reg);
9939 Register src1_reg = as_Register($src1$$reg);
9940 Register src2_reg = as_Register($src2$$reg);
9942 __ daddu(dst_reg, src1_reg, src2_reg);
9943 %}
9945 ins_pipe( ialu_regL_regL );
9946 %}
9948 instruct addL_Reg_imm(mRegL dst, mRegL src1, immL16 src2)
9949 %{
9950 match(Set dst (AddL src1 src2));
9952 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_imm " %}
9953 ins_encode %{
9954 Register dst_reg = as_Register($dst$$reg);
9955 Register src1_reg = as_Register($src1$$reg);
9956 int src2_imm = $src2$$constant;
9958 __ daddiu(dst_reg, src1_reg, src2_imm);
9959 %}
9961 ins_pipe( ialu_regL_regL );
9962 %}
9964 instruct addL_RegI2L_imm(mRegL dst, mRegI src1, immL16 src2)
9965 %{
9966 match(Set dst (AddL (ConvI2L src1) src2));
9968 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_imm " %}
9969 ins_encode %{
9970 Register dst_reg = as_Register($dst$$reg);
9971 Register src1_reg = as_Register($src1$$reg);
9972 int src2_imm = $src2$$constant;
9974 __ daddiu(dst_reg, src1_reg, src2_imm);
9975 %}
9977 ins_pipe( ialu_regL_regL );
9978 %}
9980 instruct addL_RegI2L_Reg(mRegL dst, mRegI src1, mRegL src2) %{
9981 match(Set dst (AddL (ConvI2L src1) src2));
9982 ins_cost(200);
9983 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_Reg\t" %}
9985 ins_encode %{
9986 Register dst_reg = as_Register($dst$$reg);
9987 Register src1_reg = as_Register($src1$$reg);
9988 Register src2_reg = as_Register($src2$$reg);
9990 __ daddu(dst_reg, src1_reg, src2_reg);
9991 %}
9993 ins_pipe( ialu_regL_regL );
9994 %}
9996 instruct addL_RegI2L_RegI2L(mRegL dst, mRegI src1, mRegI src2) %{
9997 match(Set dst (AddL (ConvI2L src1) (ConvI2L src2)));
9998 ins_cost(200);
9999 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_RegI2L\t" %}
10001 ins_encode %{
10002 Register dst_reg = as_Register($dst$$reg);
10003 Register src1_reg = as_Register($src1$$reg);
10004 Register src2_reg = as_Register($src2$$reg);
10006 __ daddu(dst_reg, src1_reg, src2_reg);
10007 %}
10009 ins_pipe( ialu_regL_regL );
10010 %}
10012 instruct addL_Reg_RegI2L(mRegL dst, mRegL src1, mRegI src2) %{
10013 match(Set dst (AddL src1 (ConvI2L src2)));
10014 ins_cost(200);
10015 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_RegI2L\t" %}
10017 ins_encode %{
10018 Register dst_reg = as_Register($dst$$reg);
10019 Register src1_reg = as_Register($src1$$reg);
10020 Register src2_reg = as_Register($src2$$reg);
10022 __ daddu(dst_reg, src1_reg, src2_reg);
10023 %}
10025 ins_pipe( ialu_regL_regL );
10026 %}
10028 //----------Subtraction Instructions-------------------------------------------
10029 // Integer Subtraction Instructions
10030 instruct subI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10031 match(Set dst (SubI src1 src2));
10032 ins_cost(100);
10034 format %{ "sub $dst, $src1, $src2 #@subI_Reg_Reg" %}
10035 ins_encode %{
10036 Register dst = $dst$$Register;
10037 Register src1 = $src1$$Register;
10038 Register src2 = $src2$$Register;
10039 __ subu32(dst, src1, src2);
10040 %}
10041 ins_pipe( ialu_regI_regI );
10042 %}
10044 instruct subI_Reg_immI16_sub(mRegI dst, mRegI src1, immI16_sub src2) %{
10045 match(Set dst (SubI src1 src2));
10046 ins_cost(80);
10048 format %{ "sub $dst, $src1, $src2 #@subI_Reg_immI16_sub" %}
10049 ins_encode %{
10050 Register dst = $dst$$Register;
10051 Register src1 = $src1$$Register;
10052 __ addiu32(dst, src1, -1 * $src2$$constant);
10053 %}
10054 ins_pipe( ialu_regI_regI );
10055 %}
10057 instruct negI_Reg(mRegI dst, immI0 zero, mRegI src) %{
10058 match(Set dst (SubI zero src));
10059 ins_cost(80);
10061 format %{ "neg $dst, $src #@negI_Reg" %}
10062 ins_encode %{
10063 Register dst = $dst$$Register;
10064 Register src = $src$$Register;
10065 __ subu32(dst, R0, src);
10066 %}
10067 ins_pipe( ialu_regI_regI );
10068 %}
10070 instruct negL_Reg(mRegL dst, immL0 zero, mRegL src) %{
10071 match(Set dst (SubL zero src));
10072 ins_cost(80);
10074 format %{ "neg $dst, $src #@negL_Reg" %}
10075 ins_encode %{
10076 Register dst = $dst$$Register;
10077 Register src = $src$$Register;
10078 __ subu(dst, R0, src);
10079 %}
10080 ins_pipe( ialu_regI_regI );
10081 %}
10083 instruct subL_Reg_immL16_sub(mRegL dst, mRegL src1, immL16_sub src2) %{
10084 match(Set dst (SubL src1 src2));
10085 ins_cost(80);
10087 format %{ "sub $dst, $src1, $src2 #@subL_Reg_immL16_sub" %}
10088 ins_encode %{
10089 Register dst = $dst$$Register;
10090 Register src1 = $src1$$Register;
10091 __ daddiu(dst, src1, -1 * $src2$$constant);
10092 %}
10093 ins_pipe( ialu_regI_regI );
10094 %}
10096 // Subtract Long Register with Register.
10097 instruct subL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10098 match(Set dst (SubL src1 src2));
10099 ins_cost(100);
10100 format %{ "SubL $dst, $src1, $src2 @ subL_Reg_Reg" %}
10101 ins_encode %{
10102 Register dst = as_Register($dst$$reg);
10103 Register src1 = as_Register($src1$$reg);
10104 Register src2 = as_Register($src2$$reg);
10106 __ subu(dst, src1, src2);
10107 %}
10108 ins_pipe( ialu_regL_regL );
10109 %}
10111 instruct subL_Reg_RegI2L(mRegL dst, mRegL src1, mRegI src2) %{
10112 match(Set dst (SubL src1 (ConvI2L src2)));
10113 ins_cost(100);
10114 format %{ "SubL $dst, $src1, $src2 @ subL_Reg_RegI2L" %}
10115 ins_encode %{
10116 Register dst = as_Register($dst$$reg);
10117 Register src1 = as_Register($src1$$reg);
10118 Register src2 = as_Register($src2$$reg);
10120 __ subu(dst, src1, src2);
10121 %}
10122 ins_pipe( ialu_regL_regL );
10123 %}
10125 instruct subL_RegI2L_Reg(mRegL dst, mRegI src1, mRegL src2) %{
10126 match(Set dst (SubL (ConvI2L src1) src2));
10127 ins_cost(200);
10128 format %{ "SubL $dst, $src1, $src2 @ subL_RegI2L_Reg" %}
10129 ins_encode %{
10130 Register dst = as_Register($dst$$reg);
10131 Register src1 = as_Register($src1$$reg);
10132 Register src2 = as_Register($src2$$reg);
10134 __ subu(dst, src1, src2);
10135 %}
10136 ins_pipe( ialu_regL_regL );
10137 %}
10139 instruct subL_RegI2L_RegI2L(mRegL dst, mRegI src1, mRegI src2) %{
10140 match(Set dst (SubL (ConvI2L src1) (ConvI2L src2)));
10141 ins_cost(200);
10142 format %{ "SubL $dst, $src1, $src2 @ subL_RegI2L_RegI2L" %}
10143 ins_encode %{
10144 Register dst = as_Register($dst$$reg);
10145 Register src1 = as_Register($src1$$reg);
10146 Register src2 = as_Register($src2$$reg);
10148 __ subu(dst, src1, src2);
10149 %}
10150 ins_pipe( ialu_regL_regL );
10151 %}
10153 // Integer MOD with Register
10154 instruct modI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10155 match(Set dst (ModI src1 src2));
10156 ins_cost(300);
10157 format %{ "modi $dst, $src1, $src2 @ modI_Reg_Reg" %}
10158 ins_encode %{
10159 Register dst = $dst$$Register;
10160 Register src1 = $src1$$Register;
10161 Register src2 = $src2$$Register;
10163 //if (UseLoongsonISA) {
10164 if (0) {
10165 // 2016.08.10
10166 // Experiments show that gsmod is slower that div+mfhi.
10167 // So I just disable it here.
10168 __ gsmod(dst, src1, src2);
10169 } else {
10170 __ div(src1, src2);
10171 __ mfhi(dst);
10172 }
10173 %}
10175 //ins_pipe( ialu_mod );
10176 ins_pipe( ialu_regI_regI );
10177 %}
10179 instruct modL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
10180 match(Set dst (ModL src1 src2));
10181 format %{ "modL $dst, $src1, $src2 @modL_reg_reg" %}
10183 ins_encode %{
10184 Register dst = as_Register($dst$$reg);
10185 Register op1 = as_Register($src1$$reg);
10186 Register op2 = as_Register($src2$$reg);
10188 if (UseLoongsonISA) {
10189 __ gsdmod(dst, op1, op2);
10190 } else {
10191 __ ddiv(op1, op2);
10192 __ mfhi(dst);
10193 }
10194 %}
10195 ins_pipe( pipe_slow );
10196 %}
10198 instruct mulI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10199 match(Set dst (MulI src1 src2));
10201 ins_cost(300);
10202 format %{ "mul $dst, $src1, $src2 @ mulI_Reg_Reg" %}
10203 ins_encode %{
10204 Register src1 = $src1$$Register;
10205 Register src2 = $src2$$Register;
10206 Register dst = $dst$$Register;
10208 __ mul(dst, src1, src2);
10209 %}
10210 ins_pipe( ialu_mult );
10211 %}
10213 instruct maddI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2, mRegI src3) %{
10214 match(Set dst (AddI (MulI src1 src2) src3));
10216 ins_cost(999);
10217 format %{ "madd $dst, $src1 * $src2 + $src3 #@maddI_Reg_Reg" %}
10218 ins_encode %{
10219 Register src1 = $src1$$Register;
10220 Register src2 = $src2$$Register;
10221 Register src3 = $src3$$Register;
10222 Register dst = $dst$$Register;
10224 __ mtlo(src3);
10225 __ madd(src1, src2);
10226 __ mflo(dst);
10227 %}
10228 ins_pipe( ialu_mult );
10229 %}
10231 instruct divI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10232 match(Set dst (DivI src1 src2));
10234 ins_cost(300);
10235 format %{ "div $dst, $src1, $src2 @ divI_Reg_Reg" %}
10236 ins_encode %{
10237 Register src1 = $src1$$Register;
10238 Register src2 = $src2$$Register;
10239 Register dst = $dst$$Register;
10241 /* 2012/4/21 Jin: In MIPS, div does not cause exception.
10242 We must trap an exception manually. */
10243 __ teq(R0, src2, 0x7);
10245 if (UseLoongsonISA) {
10246 __ gsdiv(dst, src1, src2);
10247 } else {
10248 __ div(src1, src2);
10250 __ nop();
10251 __ nop();
10252 __ mflo(dst);
10253 }
10254 %}
10255 ins_pipe( ialu_mod );
10256 %}
10258 instruct divF_Reg_Reg(regF dst, regF src1, regF src2) %{
10259 match(Set dst (DivF src1 src2));
10261 ins_cost(300);
10262 format %{ "divF $dst, $src1, $src2 @ divF_Reg_Reg" %}
10263 ins_encode %{
10264 FloatRegister src1 = $src1$$FloatRegister;
10265 FloatRegister src2 = $src2$$FloatRegister;
10266 FloatRegister dst = $dst$$FloatRegister;
10268 /* Here do we need to trap an exception manually ? */
10269 __ div_s(dst, src1, src2);
10270 %}
10271 ins_pipe( pipe_slow );
10272 %}
10274 instruct divD_Reg_Reg(regD dst, regD src1, regD src2) %{
10275 match(Set dst (DivD src1 src2));
10277 ins_cost(300);
10278 format %{ "divD $dst, $src1, $src2 @ divD_Reg_Reg" %}
10279 ins_encode %{
10280 FloatRegister src1 = $src1$$FloatRegister;
10281 FloatRegister src2 = $src2$$FloatRegister;
10282 FloatRegister dst = $dst$$FloatRegister;
10284 /* Here do we need to trap an exception manually ? */
10285 __ div_d(dst, src1, src2);
10286 %}
10287 ins_pipe( pipe_slow );
10288 %}
10290 instruct mulL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
10291 match(Set dst (MulL src1 src2));
10292 format %{ "mulL $dst, $src1, $src2 @mulL_reg_reg" %}
10293 ins_encode %{
10294 Register dst = as_Register($dst$$reg);
10295 Register op1 = as_Register($src1$$reg);
10296 Register op2 = as_Register($src2$$reg);
10298 if (UseLoongsonISA) {
10299 __ gsdmult(dst, op1, op2);
10300 } else {
10301 __ dmult(op1, op2);
10302 __ mflo(dst);
10303 }
10304 %}
10305 ins_pipe( pipe_slow );
10306 %}
10308 instruct mulL_reg_regI2L(mRegL dst, mRegL src1, mRegI src2) %{
10309 match(Set dst (MulL src1 (ConvI2L src2)));
10310 format %{ "mulL $dst, $src1, $src2 @mulL_reg_regI2L" %}
10311 ins_encode %{
10312 Register dst = as_Register($dst$$reg);
10313 Register op1 = as_Register($src1$$reg);
10314 Register op2 = as_Register($src2$$reg);
10316 if (UseLoongsonISA) {
10317 __ gsdmult(dst, op1, op2);
10318 } else {
10319 __ dmult(op1, op2);
10320 __ mflo(dst);
10321 }
10322 %}
10323 ins_pipe( pipe_slow );
10324 %}
10326 instruct divL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
10327 match(Set dst (DivL src1 src2));
10328 format %{ "divL $dst, $src1, $src2 @divL_reg_reg" %}
10330 ins_encode %{
10331 Register dst = as_Register($dst$$reg);
10332 Register op1 = as_Register($src1$$reg);
10333 Register op2 = as_Register($src2$$reg);
10335 if (UseLoongsonISA) {
10336 __ gsddiv(dst, op1, op2);
10337 } else {
10338 __ ddiv(op1, op2);
10339 __ mflo(dst);
10340 }
10341 %}
10342 ins_pipe( pipe_slow );
10343 %}
10345 instruct addF_reg_reg(regF dst, regF src1, regF src2) %{
10346 match(Set dst (AddF src1 src2));
10347 format %{ "AddF $dst, $src1, $src2 @addF_reg_reg" %}
10348 ins_encode %{
10349 FloatRegister src1 = as_FloatRegister($src1$$reg);
10350 FloatRegister src2 = as_FloatRegister($src2$$reg);
10351 FloatRegister dst = as_FloatRegister($dst$$reg);
10353 __ add_s(dst, src1, src2);
10354 %}
10355 ins_pipe( fpu_regF_regF );
10356 %}
10358 instruct subF_reg_reg(regF dst, regF src1, regF src2) %{
10359 match(Set dst (SubF src1 src2));
10360 format %{ "SubF $dst, $src1, $src2 @subF_reg_reg" %}
10361 ins_encode %{
10362 FloatRegister src1 = as_FloatRegister($src1$$reg);
10363 FloatRegister src2 = as_FloatRegister($src2$$reg);
10364 FloatRegister dst = as_FloatRegister($dst$$reg);
10366 __ sub_s(dst, src1, src2);
10367 %}
10368 ins_pipe( fpu_regF_regF );
10369 %}
10370 instruct addD_reg_reg(regD dst, regD src1, regD src2) %{
10371 match(Set dst (AddD src1 src2));
10372 format %{ "AddD $dst, $src1, $src2 @addD_reg_reg" %}
10373 ins_encode %{
10374 FloatRegister src1 = as_FloatRegister($src1$$reg);
10375 FloatRegister src2 = as_FloatRegister($src2$$reg);
10376 FloatRegister dst = as_FloatRegister($dst$$reg);
10378 __ add_d(dst, src1, src2);
10379 %}
10380 ins_pipe( fpu_regF_regF );
10381 %}
10383 instruct subD_reg_reg(regD dst, regD src1, regD src2) %{
10384 match(Set dst (SubD src1 src2));
10385 format %{ "SubD $dst, $src1, $src2 @subD_reg_reg" %}
10386 ins_encode %{
10387 FloatRegister src1 = as_FloatRegister($src1$$reg);
10388 FloatRegister src2 = as_FloatRegister($src2$$reg);
10389 FloatRegister dst = as_FloatRegister($dst$$reg);
10391 __ sub_d(dst, src1, src2);
10392 %}
10393 ins_pipe( fpu_regF_regF );
10394 %}
10396 instruct negF_reg(regF dst, regF src) %{
10397 match(Set dst (NegF src));
10398 format %{ "negF $dst, $src @negF_reg" %}
10399 ins_encode %{
10400 FloatRegister src = as_FloatRegister($src$$reg);
10401 FloatRegister dst = as_FloatRegister($dst$$reg);
10403 __ neg_s(dst, src);
10404 %}
10405 ins_pipe( fpu_regF_regF );
10406 %}
10408 instruct negD_reg(regD dst, regD src) %{
10409 match(Set dst (NegD src));
10410 format %{ "negD $dst, $src @negD_reg" %}
10411 ins_encode %{
10412 FloatRegister src = as_FloatRegister($src$$reg);
10413 FloatRegister dst = as_FloatRegister($dst$$reg);
10415 __ neg_d(dst, src);
10416 %}
10417 ins_pipe( fpu_regF_regF );
10418 %}
10421 instruct mulF_reg_reg(regF dst, regF src1, regF src2) %{
10422 match(Set dst (MulF src1 src2));
10423 format %{ "MULF $dst, $src1, $src2 @mulF_reg_reg" %}
10424 ins_encode %{
10425 FloatRegister src1 = $src1$$FloatRegister;
10426 FloatRegister src2 = $src2$$FloatRegister;
10427 FloatRegister dst = $dst$$FloatRegister;
10429 __ mul_s(dst, src1, src2);
10430 %}
10431 ins_pipe( fpu_regF_regF );
10432 %}
10434 instruct maddF_reg_reg(regF dst, regF src1, regF src2, regF src3) %{
10435 match(Set dst (AddF (MulF src1 src2) src3));
10436 // For compatibility reason (e.g. on the Loongson platform), disable this guy.
10437 ins_cost(44444);
10438 format %{ "maddF $dst, $src1, $src2, $src3 @maddF_reg_reg" %}
10439 ins_encode %{
10440 FloatRegister src1 = $src1$$FloatRegister;
10441 FloatRegister src2 = $src2$$FloatRegister;
10442 FloatRegister src3 = $src3$$FloatRegister;
10443 FloatRegister dst = $dst$$FloatRegister;
10445 __ madd_s(dst, src1, src2, src3);
10446 %}
10447 ins_pipe( fpu_regF_regF );
10448 %}
10450 // Mul two double precision floating piont number
10451 instruct mulD_reg_reg(regD dst, regD src1, regD src2) %{
10452 match(Set dst (MulD src1 src2));
10453 format %{ "MULD $dst, $src1, $src2 @mulD_reg_reg" %}
10454 ins_encode %{
10455 FloatRegister src1 = $src1$$FloatRegister;
10456 FloatRegister src2 = $src2$$FloatRegister;
10457 FloatRegister dst = $dst$$FloatRegister;
10459 __ mul_d(dst, src1, src2);
10460 %}
10461 ins_pipe( fpu_regF_regF );
10462 %}
10464 instruct maddD_reg_reg(regD dst, regD src1, regD src2, regD src3) %{
10465 match(Set dst (AddD (MulD src1 src2) src3));
10466 // For compatibility reason (e.g. on the Loongson platform), disable this guy.
10467 ins_cost(44444);
10468 format %{ "maddD $dst, $src1, $src2, $src3 @maddD_reg_reg" %}
10469 ins_encode %{
10470 FloatRegister src1 = $src1$$FloatRegister;
10471 FloatRegister src2 = $src2$$FloatRegister;
10472 FloatRegister src3 = $src3$$FloatRegister;
10473 FloatRegister dst = $dst$$FloatRegister;
10475 __ madd_d(dst, src1, src2, src3);
10476 %}
10477 ins_pipe( fpu_regF_regF );
10478 %}
10480 instruct absF_reg(regF dst, regF src) %{
10481 match(Set dst (AbsF src));
10482 ins_cost(100);
10483 format %{ "absF $dst, $src @absF_reg" %}
10484 ins_encode %{
10485 FloatRegister src = as_FloatRegister($src$$reg);
10486 FloatRegister dst = as_FloatRegister($dst$$reg);
10488 __ abs_s(dst, src);
10489 %}
10490 ins_pipe( fpu_regF_regF );
10491 %}
10494 // intrinsics for math_native.
10495 // AbsD SqrtD CosD SinD TanD LogD Log10D
10497 instruct absD_reg(regD dst, regD src) %{
10498 match(Set dst (AbsD src));
10499 ins_cost(100);
10500 format %{ "absD $dst, $src @absD_reg" %}
10501 ins_encode %{
10502 FloatRegister src = as_FloatRegister($src$$reg);
10503 FloatRegister dst = as_FloatRegister($dst$$reg);
10505 __ abs_d(dst, src);
10506 %}
10507 ins_pipe( fpu_regF_regF );
10508 %}
10510 instruct sqrtD_reg(regD dst, regD src) %{
10511 match(Set dst (SqrtD src));
10512 ins_cost(100);
10513 format %{ "SqrtD $dst, $src @sqrtD_reg" %}
10514 ins_encode %{
10515 FloatRegister src = as_FloatRegister($src$$reg);
10516 FloatRegister dst = as_FloatRegister($dst$$reg);
10518 __ sqrt_d(dst, src);
10519 %}
10520 ins_pipe( fpu_regF_regF );
10521 %}
10523 instruct sqrtF_reg(regF dst, regF src) %{
10524 match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
10525 ins_cost(100);
10526 format %{ "SqrtF $dst, $src @sqrtF_reg" %}
10527 ins_encode %{
10528 FloatRegister src = as_FloatRegister($src$$reg);
10529 FloatRegister dst = as_FloatRegister($dst$$reg);
10531 __ sqrt_s(dst, src);
10532 %}
10533 ins_pipe( fpu_regF_regF );
10534 %}
10535 //----------------------------------Logical Instructions----------------------
10536 //__________________________________Integer Logical Instructions-------------
10538 //And Instuctions
10539 // And Register with Immediate
10540 instruct andI_Reg_immI(mRegI dst, mRegI src1, immI src2) %{
10541 match(Set dst (AndI src1 src2));
10543 format %{ "and $dst, $src1, $src2 #@andI_Reg_immI" %}
10544 ins_encode %{
10545 Register dst = $dst$$Register;
10546 Register src = $src1$$Register;
10547 int val = $src2$$constant;
10549 __ move(AT, val);
10550 __ andr(dst, src, AT);
10551 %}
10552 ins_pipe( ialu_regI_regI );
10553 %}
10555 instruct andI_Reg_imm_0_65535(mRegI dst, mRegI src1, immI_0_65535 src2) %{
10556 match(Set dst (AndI src1 src2));
10557 ins_cost(60);
10559 format %{ "and $dst, $src1, $src2 #@andI_Reg_imm_0_65535" %}
10560 ins_encode %{
10561 Register dst = $dst$$Register;
10562 Register src = $src1$$Register;
10563 int val = $src2$$constant;
10565 __ andi(dst, src, val);
10566 %}
10567 ins_pipe( ialu_regI_regI );
10568 %}
10570 instruct andI_Reg_immI_nonneg_mask(mRegI dst, mRegI src1, immI_nonneg_mask mask) %{
10571 match(Set dst (AndI src1 mask));
10572 ins_cost(60);
10574 format %{ "and $dst, $src1, $mask #@andI_Reg_immI_nonneg_mask" %}
10575 ins_encode %{
10576 Register dst = $dst$$Register;
10577 Register src = $src1$$Register;
10578 int size = Assembler::is_int_mask($mask$$constant);
10580 __ ext(dst, src, 0, size);
10581 %}
10582 ins_pipe( ialu_regI_regI );
10583 %}
10585 instruct andL_Reg_immL_nonneg_mask(mRegL dst, mRegL src1, immL_nonneg_mask mask) %{
10586 match(Set dst (AndL src1 mask));
10587 ins_cost(60);
10589 format %{ "and $dst, $src1, $mask #@andL_Reg_immL_nonneg_mask" %}
10590 ins_encode %{
10591 Register dst = $dst$$Register;
10592 Register src = $src1$$Register;
10593 int size = Assembler::is_jlong_mask($mask$$constant);
10595 __ dext(dst, src, 0, size);
10596 %}
10597 ins_pipe( ialu_regI_regI );
10598 %}
10600 instruct xorI_Reg_imm_0_65535(mRegI dst, mRegI src1, immI_0_65535 src2) %{
10601 match(Set dst (XorI src1 src2));
10602 ins_cost(60);
10604 format %{ "xori $dst, $src1, $src2 #@xorI_Reg_imm_0_65535" %}
10605 ins_encode %{
10606 Register dst = $dst$$Register;
10607 Register src = $src1$$Register;
10608 int val = $src2$$constant;
10610 __ xori(dst, src, val);
10611 %}
10612 ins_pipe( ialu_regI_regI );
10613 %}
10615 instruct xorI_Reg_immI_M1(mRegI dst, mRegI src1, immI_M1 M1) %{
10616 match(Set dst (XorI src1 M1));
10617 predicate(UseLoongsonISA && Use3A2000);
10618 ins_cost(60);
10620 format %{ "xor $dst, $src1, $M1 #@xorI_Reg_immI_M1" %}
10621 ins_encode %{
10622 Register dst = $dst$$Register;
10623 Register src = $src1$$Register;
10625 __ gsorn(dst, R0, src);
10626 %}
10627 ins_pipe( ialu_regI_regI );
10628 %}
10630 instruct xorL2I_Reg_immI_M1(mRegI dst, mRegL src1, immI_M1 M1) %{
10631 match(Set dst (XorI (ConvL2I src1) M1));
10632 predicate(UseLoongsonISA && Use3A2000);
10633 ins_cost(60);
10635 format %{ "xor $dst, $src1, $M1 #@xorL2I_Reg_immI_M1" %}
10636 ins_encode %{
10637 Register dst = $dst$$Register;
10638 Register src = $src1$$Register;
10640 __ gsorn(dst, R0, src);
10641 %}
10642 ins_pipe( ialu_regI_regI );
10643 %}
10645 instruct xorL_Reg_imm_0_65535(mRegL dst, mRegL src1, immL_0_65535 src2) %{
10646 match(Set dst (XorL src1 src2));
10647 ins_cost(60);
10649 format %{ "xori $dst, $src1, $src2 #@xorL_Reg_imm_0_65535" %}
10650 ins_encode %{
10651 Register dst = $dst$$Register;
10652 Register src = $src1$$Register;
10653 int val = $src2$$constant;
10655 __ xori(dst, src, val);
10656 %}
10657 ins_pipe( ialu_regI_regI );
10658 %}
10660 /*
10661 instruct xorL_Reg_immL_M1(mRegL dst, mRegL src1, immL_M1 M1) %{
10662 match(Set dst (XorL src1 M1));
10663 predicate(UseLoongsonISA);
10664 ins_cost(60);
10666 format %{ "xor $dst, $src1, $M1 #@xorL_Reg_immL_M1" %}
10667 ins_encode %{
10668 Register dst = $dst$$Register;
10669 Register src = $src1$$Register;
10671 __ gsorn(dst, R0, src);
10672 %}
10673 ins_pipe( ialu_regI_regI );
10674 %}
10675 */
10677 instruct lbu_and_lmask(mRegI dst, memory mem, immI_255 mask) %{
10678 match(Set dst (AndI mask (LoadB mem)));
10679 ins_cost(60);
10681 format %{ "lhu $dst, $mem #@lbu_and_lmask" %}
10682 ins_encode(load_UB_enc(dst, mem));
10683 ins_pipe( ialu_loadI );
10684 %}
10686 instruct lbu_and_rmask(mRegI dst, memory mem, immI_255 mask) %{
10687 match(Set dst (AndI (LoadB mem) mask));
10688 ins_cost(60);
10690 format %{ "lhu $dst, $mem #@lbu_and_rmask" %}
10691 ins_encode(load_UB_enc(dst, mem));
10692 ins_pipe( ialu_loadI );
10693 %}
10695 instruct andI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10696 match(Set dst (AndI src1 src2));
10698 format %{ "and $dst, $src1, $src2 #@andI_Reg_Reg" %}
10699 ins_encode %{
10700 Register dst = $dst$$Register;
10701 Register src1 = $src1$$Register;
10702 Register src2 = $src2$$Register;
10703 __ andr(dst, src1, src2);
10704 %}
10705 ins_pipe( ialu_regI_regI );
10706 %}
10708 instruct andnI_Reg_nReg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10709 match(Set dst (AndI src1 (XorI src2 M1)));
10710 predicate(UseLoongsonISA && Use3A2000);
10712 format %{ "andn $dst, $src1, $src2 #@andnI_Reg_nReg" %}
10713 ins_encode %{
10714 Register dst = $dst$$Register;
10715 Register src1 = $src1$$Register;
10716 Register src2 = $src2$$Register;
10718 __ gsandn(dst, src1, src2);
10719 %}
10720 ins_pipe( ialu_regI_regI );
10721 %}
10723 instruct ornI_Reg_nReg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10724 match(Set dst (OrI src1 (XorI src2 M1)));
10725 predicate(UseLoongsonISA && Use3A2000);
10727 format %{ "orn $dst, $src1, $src2 #@ornI_Reg_nReg" %}
10728 ins_encode %{
10729 Register dst = $dst$$Register;
10730 Register src1 = $src1$$Register;
10731 Register src2 = $src2$$Register;
10733 __ gsorn(dst, src1, src2);
10734 %}
10735 ins_pipe( ialu_regI_regI );
10736 %}
10738 instruct andnI_nReg_Reg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10739 match(Set dst (AndI (XorI src1 M1) src2));
10740 predicate(UseLoongsonISA && Use3A2000);
10742 format %{ "andn $dst, $src2, $src1 #@andnI_nReg_Reg" %}
10743 ins_encode %{
10744 Register dst = $dst$$Register;
10745 Register src1 = $src1$$Register;
10746 Register src2 = $src2$$Register;
10748 __ gsandn(dst, src2, src1);
10749 %}
10750 ins_pipe( ialu_regI_regI );
10751 %}
10753 instruct ornI_nReg_Reg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10754 match(Set dst (OrI (XorI src1 M1) src2));
10755 predicate(UseLoongsonISA && Use3A2000);
10757 format %{ "orn $dst, $src2, $src1 #@ornI_nReg_Reg" %}
10758 ins_encode %{
10759 Register dst = $dst$$Register;
10760 Register src1 = $src1$$Register;
10761 Register src2 = $src2$$Register;
10763 __ gsorn(dst, src2, src1);
10764 %}
10765 ins_pipe( ialu_regI_regI );
10766 %}
10768 // And Long Register with Register
10769 instruct andL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10770 match(Set dst (AndL src1 src2));
10771 format %{ "AND $dst, $src1, $src2 @ andL_Reg_Reg\n\t" %}
10772 ins_encode %{
10773 Register dst_reg = as_Register($dst$$reg);
10774 Register src1_reg = as_Register($src1$$reg);
10775 Register src2_reg = as_Register($src2$$reg);
10777 __ andr(dst_reg, src1_reg, src2_reg);
10778 %}
10779 ins_pipe( ialu_regL_regL );
10780 %}
10782 instruct andL_Reg_Reg_convI2L(mRegL dst, mRegL src1, mRegI src2) %{
10783 match(Set dst (AndL src1 (ConvI2L src2)));
10784 format %{ "AND $dst, $src1, $src2 @ andL_Reg_Reg_convI2L\n\t" %}
10785 ins_encode %{
10786 Register dst_reg = as_Register($dst$$reg);
10787 Register src1_reg = as_Register($src1$$reg);
10788 Register src2_reg = as_Register($src2$$reg);
10790 __ andr(dst_reg, src1_reg, src2_reg);
10791 %}
10792 ins_pipe( ialu_regL_regL );
10793 %}
10795 instruct andL_Reg_imm_0_65535(mRegL dst, mRegL src1, immL_0_65535 src2) %{
10796 match(Set dst (AndL src1 src2));
10797 ins_cost(60);
10799 format %{ "and $dst, $src1, $src2 #@andL_Reg_imm_0_65535" %}
10800 ins_encode %{
10801 Register dst = $dst$$Register;
10802 Register src = $src1$$Register;
10803 long val = $src2$$constant;
10805 __ andi(dst, src, val);
10806 %}
10807 ins_pipe( ialu_regI_regI );
10808 %}
10810 instruct andL2I_Reg_imm_0_65535(mRegI dst, mRegL src1, immL_0_65535 src2) %{
10811 match(Set dst (ConvL2I (AndL src1 src2)));
10812 ins_cost(60);
10814 format %{ "and $dst, $src1, $src2 #@andL2I_Reg_imm_0_65535" %}
10815 ins_encode %{
10816 Register dst = $dst$$Register;
10817 Register src = $src1$$Register;
10818 long val = $src2$$constant;
10820 __ andi(dst, src, val);
10821 %}
10822 ins_pipe( ialu_regI_regI );
10823 %}
10825 /*
10826 instruct andnL_Reg_nReg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10827 match(Set dst (AndL src1 (XorL src2 M1)));
10828 predicate(UseLoongsonISA);
10830 format %{ "andn $dst, $src1, $src2 #@andnL_Reg_nReg" %}
10831 ins_encode %{
10832 Register dst = $dst$$Register;
10833 Register src1 = $src1$$Register;
10834 Register src2 = $src2$$Register;
10836 __ gsandn(dst, src1, src2);
10837 %}
10838 ins_pipe( ialu_regI_regI );
10839 %}
10840 */
10842 /*
10843 instruct ornL_Reg_nReg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10844 match(Set dst (OrL src1 (XorL src2 M1)));
10845 predicate(UseLoongsonISA);
10847 format %{ "orn $dst, $src1, $src2 #@ornL_Reg_nReg" %}
10848 ins_encode %{
10849 Register dst = $dst$$Register;
10850 Register src1 = $src1$$Register;
10851 Register src2 = $src2$$Register;
10853 __ gsorn(dst, src1, src2);
10854 %}
10855 ins_pipe( ialu_regI_regI );
10856 %}
10857 */
10859 /*
10860 instruct andnL_nReg_Reg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10861 match(Set dst (AndL (XorL src1 M1) src2));
10862 predicate(UseLoongsonISA);
10864 format %{ "andn $dst, $src2, $src1 #@andnL_nReg_Reg" %}
10865 ins_encode %{
10866 Register dst = $dst$$Register;
10867 Register src1 = $src1$$Register;
10868 Register src2 = $src2$$Register;
10870 __ gsandn(dst, src2, src1);
10871 %}
10872 ins_pipe( ialu_regI_regI );
10873 %}
10874 */
10876 /*
10877 instruct ornL_nReg_Reg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10878 match(Set dst (OrL (XorL src1 M1) src2));
10879 predicate(UseLoongsonISA);
10881 format %{ "orn $dst, $src2, $src1 #@ornL_nReg_Reg" %}
10882 ins_encode %{
10883 Register dst = $dst$$Register;
10884 Register src1 = $src1$$Register;
10885 Register src2 = $src2$$Register;
10887 __ gsorn(dst, src2, src1);
10888 %}
10889 ins_pipe( ialu_regI_regI );
10890 %}
10891 */
10893 instruct andL_Reg_immL_M8(mRegL dst, immL_M8 M8) %{
10894 match(Set dst (AndL dst M8));
10895 ins_cost(60);
10897 format %{ "and $dst, $dst, $M8 #@andL_Reg_immL_M8" %}
10898 ins_encode %{
10899 Register dst = $dst$$Register;
10901 __ dins(dst, R0, 0, 3);
10902 %}
10903 ins_pipe( ialu_regI_regI );
10904 %}
10906 instruct andL_Reg_immL_M5(mRegL dst, immL_M5 M5) %{
10907 match(Set dst (AndL dst M5));
10908 ins_cost(60);
10910 format %{ "and $dst, $dst, $M5 #@andL_Reg_immL_M5" %}
10911 ins_encode %{
10912 Register dst = $dst$$Register;
10914 __ dins(dst, R0, 2, 1);
10915 %}
10916 ins_pipe( ialu_regI_regI );
10917 %}
10919 instruct andL_Reg_immL_M7(mRegL dst, immL_M7 M7) %{
10920 match(Set dst (AndL dst M7));
10921 ins_cost(60);
10923 format %{ "and $dst, $dst, $M7 #@andL_Reg_immL_M7" %}
10924 ins_encode %{
10925 Register dst = $dst$$Register;
10927 __ dins(dst, R0, 1, 2);
10928 %}
10929 ins_pipe( ialu_regI_regI );
10930 %}
10932 instruct andL_Reg_immL_M4(mRegL dst, immL_M4 M4) %{
10933 match(Set dst (AndL dst M4));
10934 ins_cost(60);
10936 format %{ "and $dst, $dst, $M4 #@andL_Reg_immL_M4" %}
10937 ins_encode %{
10938 Register dst = $dst$$Register;
10940 __ dins(dst, R0, 0, 2);
10941 %}
10942 ins_pipe( ialu_regI_regI );
10943 %}
10945 instruct andL_Reg_immL_M121(mRegL dst, immL_M121 M121) %{
10946 match(Set dst (AndL dst M121));
10947 ins_cost(60);
10949 format %{ "and $dst, $dst, $M121 #@andL_Reg_immL_M121" %}
10950 ins_encode %{
10951 Register dst = $dst$$Register;
10953 __ dins(dst, R0, 3, 4);
10954 %}
10955 ins_pipe( ialu_regI_regI );
10956 %}
10958 // Or Long Register with Register
10959 instruct orL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10960 match(Set dst (OrL src1 src2));
10961 format %{ "OR $dst, $src1, $src2 @ orL_Reg_Reg\t" %}
10962 ins_encode %{
10963 Register dst_reg = $dst$$Register;
10964 Register src1_reg = $src1$$Register;
10965 Register src2_reg = $src2$$Register;
10967 __ orr(dst_reg, src1_reg, src2_reg);
10968 %}
10969 ins_pipe( ialu_regL_regL );
10970 %}
10972 instruct orL_Reg_P2XReg(mRegL dst, mRegP src1, mRegL src2) %{
10973 match(Set dst (OrL (CastP2X src1) src2));
10974 format %{ "OR $dst, $src1, $src2 @ orL_Reg_P2XReg\t" %}
10975 ins_encode %{
10976 Register dst_reg = $dst$$Register;
10977 Register src1_reg = $src1$$Register;
10978 Register src2_reg = $src2$$Register;
10980 __ orr(dst_reg, src1_reg, src2_reg);
10981 %}
10982 ins_pipe( ialu_regL_regL );
10983 %}
10985 // Xor Long Register with Register
10986 instruct xorL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10987 match(Set dst (XorL src1 src2));
10988 format %{ "XOR $dst, $src1, $src2 @ xorL_Reg_Reg\t" %}
10989 ins_encode %{
10990 Register dst_reg = as_Register($dst$$reg);
10991 Register src1_reg = as_Register($src1$$reg);
10992 Register src2_reg = as_Register($src2$$reg);
10994 __ xorr(dst_reg, src1_reg, src2_reg);
10995 %}
10996 ins_pipe( ialu_regL_regL );
10997 %}
10999 // Shift Left by 8-bit immediate
11000 instruct salI_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
11001 match(Set dst (LShiftI src shift));
11003 format %{ "SHL $dst, $src, $shift #@salI_Reg_imm" %}
11004 ins_encode %{
11005 Register src = $src$$Register;
11006 Register dst = $dst$$Register;
11007 int shamt = $shift$$constant;
11009 __ sll(dst, src, shamt);
11010 %}
11011 ins_pipe( ialu_regI_regI );
11012 %}
11014 instruct salL2I_Reg_imm(mRegI dst, mRegL src, immI8 shift) %{
11015 match(Set dst (LShiftI (ConvL2I src) shift));
11017 format %{ "SHL $dst, $src, $shift #@salL2I_Reg_imm" %}
11018 ins_encode %{
11019 Register src = $src$$Register;
11020 Register dst = $dst$$Register;
11021 int shamt = $shift$$constant;
11023 __ sll(dst, src, shamt);
11024 %}
11025 ins_pipe( ialu_regI_regI );
11026 %}
11028 instruct salI_Reg_imm_and_M65536(mRegI dst, mRegI src, immI_16 shift, immI_M65536 mask) %{
11029 match(Set dst (AndI (LShiftI src shift) mask));
11031 format %{ "SHL $dst, $src, $shift #@salI_Reg_imm_and_M65536" %}
11032 ins_encode %{
11033 Register src = $src$$Register;
11034 Register dst = $dst$$Register;
11036 __ sll(dst, src, 16);
11037 %}
11038 ins_pipe( ialu_regI_regI );
11039 %}
11041 instruct land7_2_s(mRegI dst, mRegL src, immL7 seven, immI_16 sixteen)
11042 %{
11043 match(Set dst (RShiftI (LShiftI (ConvL2I (AndL src seven)) sixteen) sixteen));
11045 format %{ "andi $dst, $src, 7\t# @land7_2_s" %}
11046 ins_encode %{
11047 Register src = $src$$Register;
11048 Register dst = $dst$$Register;
11050 __ andi(dst, src, 7);
11051 %}
11052 ins_pipe(ialu_regI_regI);
11053 %}
11055 instruct ori2s(mRegI dst, mRegI src1, immI_0_32767 src2, immI_16 sixteen)
11056 %{
11057 match(Set dst (RShiftI (LShiftI (OrI src1 src2) sixteen) sixteen));
11059 format %{ "ori $dst, $src1, $src2\t# @ori2s" %}
11060 ins_encode %{
11061 Register src = $src1$$Register;
11062 int val = $src2$$constant;
11063 Register dst = $dst$$Register;
11065 __ ori(dst, src, val);
11066 %}
11067 ins_pipe(ialu_regI_regI);
11068 %}
11070 // Logical Shift Right by 16, followed by Arithmetic Shift Left by 16.
11071 // This idiom is used by the compiler the i2s bytecode.
11072 instruct i2s(mRegI dst, mRegI src, immI_16 sixteen)
11073 %{
11074 match(Set dst (RShiftI (LShiftI src sixteen) sixteen));
11076 format %{ "i2s $dst, $src\t# @i2s" %}
11077 ins_encode %{
11078 Register src = $src$$Register;
11079 Register dst = $dst$$Register;
11081 __ seh(dst, src);
11082 %}
11083 ins_pipe(ialu_regI_regI);
11084 %}
11086 // Logical Shift Right by 24, followed by Arithmetic Shift Left by 24.
11087 // This idiom is used by the compiler for the i2b bytecode.
11088 instruct i2b(mRegI dst, mRegI src, immI_24 twentyfour)
11089 %{
11090 match(Set dst (RShiftI (LShiftI src twentyfour) twentyfour));
11092 format %{ "i2b $dst, $src\t# @i2b" %}
11093 ins_encode %{
11094 Register src = $src$$Register;
11095 Register dst = $dst$$Register;
11097 __ seb(dst, src);
11098 %}
11099 ins_pipe(ialu_regI_regI);
11100 %}
11103 instruct salI_RegL2I_imm(mRegI dst, mRegL src, immI8 shift) %{
11104 match(Set dst (LShiftI (ConvL2I src) shift));
11106 format %{ "SHL $dst, $src, $shift #@salI_RegL2I_imm" %}
11107 ins_encode %{
11108 Register src = $src$$Register;
11109 Register dst = $dst$$Register;
11110 int shamt = $shift$$constant;
11112 __ sll(dst, src, shamt);
11113 %}
11114 ins_pipe( ialu_regI_regI );
11115 %}
11117 // Shift Left by 8-bit immediate
11118 instruct salI_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
11119 match(Set dst (LShiftI src shift));
11121 format %{ "SHL $dst, $src, $shift #@salI_Reg_Reg" %}
11122 ins_encode %{
11123 Register src = $src$$Register;
11124 Register dst = $dst$$Register;
11125 Register shamt = $shift$$Register;
11126 __ sllv(dst, src, shamt);
11127 %}
11128 ins_pipe( ialu_regI_regI );
11129 %}
11132 // Shift Left Long
11133 instruct salL_Reg_imm(mRegL dst, mRegL src, immI8 shift) %{
11134 //predicate(UseNewLongLShift);
11135 match(Set dst (LShiftL src shift));
11136 ins_cost(100);
11137 format %{ "salL $dst, $src, $shift @ salL_Reg_imm" %}
11138 ins_encode %{
11139 Register src_reg = as_Register($src$$reg);
11140 Register dst_reg = as_Register($dst$$reg);
11141 int shamt = $shift$$constant;
11143 if (__ is_simm(shamt, 5))
11144 __ dsll(dst_reg, src_reg, shamt);
11145 else
11146 {
11147 int sa = Assembler::low(shamt, 6);
11148 if (sa < 32) {
11149 __ dsll(dst_reg, src_reg, sa);
11150 } else {
11151 __ dsll32(dst_reg, src_reg, sa - 32);
11152 }
11153 }
11154 %}
11155 ins_pipe( ialu_regL_regL );
11156 %}
11158 instruct salL_RegI2L_imm(mRegL dst, mRegI src, immI8 shift) %{
11159 //predicate(UseNewLongLShift);
11160 match(Set dst (LShiftL (ConvI2L src) shift));
11161 ins_cost(100);
11162 format %{ "salL $dst, $src, $shift @ salL_RegI2L_imm" %}
11163 ins_encode %{
11164 Register src_reg = as_Register($src$$reg);
11165 Register dst_reg = as_Register($dst$$reg);
11166 int shamt = $shift$$constant;
11168 if (__ is_simm(shamt, 5))
11169 __ dsll(dst_reg, src_reg, shamt);
11170 else
11171 {
11172 int sa = Assembler::low(shamt, 6);
11173 if (sa < 32) {
11174 __ dsll(dst_reg, src_reg, sa);
11175 } else {
11176 __ dsll32(dst_reg, src_reg, sa - 32);
11177 }
11178 }
11179 %}
11180 ins_pipe( ialu_regL_regL );
11181 %}
11183 // Shift Left Long
11184 instruct salL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
11185 //predicate(UseNewLongLShift);
11186 match(Set dst (LShiftL src shift));
11187 ins_cost(100);
11188 format %{ "salL $dst, $src, $shift @ salL_Reg_Reg" %}
11189 ins_encode %{
11190 Register src_reg = as_Register($src$$reg);
11191 Register dst_reg = as_Register($dst$$reg);
11193 __ dsllv(dst_reg, src_reg, $shift$$Register);
11194 %}
11195 ins_pipe( ialu_regL_regL );
11196 %}
11198 instruct salL_convI2L_Reg_imm(mRegL dst, mRegI src, immI8 shift) %{
11199 match(Set dst (LShiftL (ConvI2L src) shift));
11200 ins_cost(100);
11201 format %{ "salL $dst, $src, $shift @ salL_convI2L_Reg_imm" %}
11202 ins_encode %{
11203 Register src_reg = as_Register($src$$reg);
11204 Register dst_reg = as_Register($dst$$reg);
11205 int shamt = $shift$$constant;
11207 if (__ is_simm(shamt, 5)) {
11208 __ dsll(dst_reg, src_reg, shamt);
11209 } else {
11210 int sa = Assembler::low(shamt, 6);
11211 if (sa < 32) {
11212 __ dsll(dst_reg, src_reg, sa);
11213 } else {
11214 __ dsll32(dst_reg, src_reg, sa - 32);
11215 }
11216 }
11217 %}
11218 ins_pipe( ialu_regL_regL );
11219 %}
11221 // Shift Right Long
11222 instruct sarL_Reg_imm(mRegL dst, mRegL src, immI8 shift) %{
11223 match(Set dst (RShiftL src shift));
11224 ins_cost(100);
11225 format %{ "sarL $dst, $src, $shift @ sarL_Reg_imm" %}
11226 ins_encode %{
11227 Register src_reg = as_Register($src$$reg);
11228 Register dst_reg = as_Register($dst$$reg);
11229 int shamt = ($shift$$constant & 0x3f);
11230 if (__ is_simm(shamt, 5))
11231 __ dsra(dst_reg, src_reg, shamt);
11232 else {
11233 int sa = Assembler::low(shamt, 6);
11234 if (sa < 32) {
11235 __ dsra(dst_reg, src_reg, sa);
11236 } else {
11237 __ dsra32(dst_reg, src_reg, sa - 32);
11238 }
11239 }
11240 %}
11241 ins_pipe( ialu_regL_regL );
11242 %}
11244 instruct sarL2I_Reg_immI_32_63(mRegI dst, mRegL src, immI_32_63 shift) %{
11245 match(Set dst (ConvL2I (RShiftL src shift)));
11246 ins_cost(100);
11247 format %{ "sarL $dst, $src, $shift @ sarL2I_Reg_immI_32_63" %}
11248 ins_encode %{
11249 Register src_reg = as_Register($src$$reg);
11250 Register dst_reg = as_Register($dst$$reg);
11251 int shamt = $shift$$constant;
11253 __ dsra32(dst_reg, src_reg, shamt - 32);
11254 %}
11255 ins_pipe( ialu_regL_regL );
11256 %}
11258 // Shift Right Long arithmetically
11259 instruct sarL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
11260 //predicate(UseNewLongLShift);
11261 match(Set dst (RShiftL src shift));
11262 ins_cost(100);
11263 format %{ "sarL $dst, $src, $shift @ sarL_Reg_Reg" %}
11264 ins_encode %{
11265 Register src_reg = as_Register($src$$reg);
11266 Register dst_reg = as_Register($dst$$reg);
11268 __ dsrav(dst_reg, src_reg, $shift$$Register);
11269 %}
11270 ins_pipe( ialu_regL_regL );
11271 %}
11273 // Shift Right Long logically
11274 instruct slrL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
11275 match(Set dst (URShiftL src shift));
11276 ins_cost(100);
11277 format %{ "slrL $dst, $src, $shift @ slrL_Reg_Reg" %}
11278 ins_encode %{
11279 Register src_reg = as_Register($src$$reg);
11280 Register dst_reg = as_Register($dst$$reg);
11282 __ dsrlv(dst_reg, src_reg, $shift$$Register);
11283 %}
11284 ins_pipe( ialu_regL_regL );
11285 %}
11287 instruct slrL_Reg_immI_0_31(mRegL dst, mRegL src, immI_0_31 shift) %{
11288 match(Set dst (URShiftL src shift));
11289 ins_cost(80);
11290 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_0_31" %}
11291 ins_encode %{
11292 Register src_reg = as_Register($src$$reg);
11293 Register dst_reg = as_Register($dst$$reg);
11294 int shamt = $shift$$constant;
11296 __ dsrl(dst_reg, src_reg, shamt);
11297 %}
11298 ins_pipe( ialu_regL_regL );
11299 %}
11301 instruct slrL_Reg_immI_0_31_and_max_int(mRegI dst, mRegL src, immI_0_31 shift, immI_MaxI max_int) %{
11302 match(Set dst (AndI (ConvL2I (URShiftL src shift)) max_int));
11303 ins_cost(80);
11304 format %{ "dext $dst, $src, $shift, 31 @ slrL_Reg_immI_0_31_and_max_int" %}
11305 ins_encode %{
11306 Register src_reg = as_Register($src$$reg);
11307 Register dst_reg = as_Register($dst$$reg);
11308 int shamt = $shift$$constant;
11310 __ dext(dst_reg, src_reg, shamt, 31);
11311 %}
11312 ins_pipe( ialu_regL_regL );
11313 %}
11315 instruct slrL_P2XReg_immI_0_31(mRegL dst, mRegP src, immI_0_31 shift) %{
11316 match(Set dst (URShiftL (CastP2X src) shift));
11317 ins_cost(80);
11318 format %{ "slrL $dst, $src, $shift @ slrL_P2XReg_immI_0_31" %}
11319 ins_encode %{
11320 Register src_reg = as_Register($src$$reg);
11321 Register dst_reg = as_Register($dst$$reg);
11322 int shamt = $shift$$constant;
11324 __ dsrl(dst_reg, src_reg, shamt);
11325 %}
11326 ins_pipe( ialu_regL_regL );
11327 %}
11329 instruct slrL_Reg_immI_32_63(mRegL dst, mRegL src, immI_32_63 shift) %{
11330 match(Set dst (URShiftL src shift));
11331 ins_cost(80);
11332 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_32_63" %}
11333 ins_encode %{
11334 Register src_reg = as_Register($src$$reg);
11335 Register dst_reg = as_Register($dst$$reg);
11336 int shamt = $shift$$constant;
11338 __ dsrl32(dst_reg, src_reg, shamt - 32);
11339 %}
11340 ins_pipe( ialu_regL_regL );
11341 %}
11343 instruct slrL_Reg_immI_convL2I(mRegI dst, mRegL src, immI_32_63 shift) %{
11344 match(Set dst (ConvL2I (URShiftL src shift)));
11345 predicate(n->in(1)->in(2)->get_int() > 32);
11346 ins_cost(80);
11347 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_convL2I" %}
11348 ins_encode %{
11349 Register src_reg = as_Register($src$$reg);
11350 Register dst_reg = as_Register($dst$$reg);
11351 int shamt = $shift$$constant;
11353 __ dsrl32(dst_reg, src_reg, shamt - 32);
11354 %}
11355 ins_pipe( ialu_regL_regL );
11356 %}
11358 instruct slrL_P2XReg_immI_32_63(mRegL dst, mRegP src, immI_32_63 shift) %{
11359 match(Set dst (URShiftL (CastP2X src) shift));
11360 ins_cost(80);
11361 format %{ "slrL $dst, $src, $shift @ slrL_P2XReg_immI_32_63" %}
11362 ins_encode %{
11363 Register src_reg = as_Register($src$$reg);
11364 Register dst_reg = as_Register($dst$$reg);
11365 int shamt = $shift$$constant;
11367 __ dsrl32(dst_reg, src_reg, shamt - 32);
11368 %}
11369 ins_pipe( ialu_regL_regL );
11370 %}
11372 // Xor Instructions
11373 // Xor Register with Register
11374 instruct xorI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
11375 match(Set dst (XorI src1 src2));
11377 format %{ "XOR $dst, $src1, $src2 #@xorI_Reg_Reg" %}
11379 ins_encode %{
11380 Register dst = $dst$$Register;
11381 Register src1 = $src1$$Register;
11382 Register src2 = $src2$$Register;
11383 __ xorr(dst, src1, src2);
11384 __ sll(dst, dst, 0); /* long -> int */
11385 %}
11387 ins_pipe( ialu_regI_regI );
11388 %}
11390 // Or Instructions
11391 // Or Register with Register
11392 instruct orI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
11393 match(Set dst (OrI src1 src2));
11395 format %{ "OR $dst, $src1, $src2 #@orI_Reg_Reg" %}
11396 ins_encode %{
11397 Register dst = $dst$$Register;
11398 Register src1 = $src1$$Register;
11399 Register src2 = $src2$$Register;
11400 __ orr(dst, src1, src2);
11401 %}
11403 ins_pipe( ialu_regI_regI );
11404 %}
11406 instruct rotI_shr_logical_Reg(mRegI dst, mRegI src, immI_0_31 rshift, immI_0_31 lshift, immI_1 one) %{
11407 match(Set dst (OrI (URShiftI src rshift) (LShiftI (AndI src one) lshift)));
11408 predicate(32 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int())));
11410 format %{ "rotr $dst, $src, 1 ...\n\t"
11411 "srl $dst, $dst, ($rshift-1) @ rotI_shr_logical_Reg" %}
11412 ins_encode %{
11413 Register dst = $dst$$Register;
11414 Register src = $src$$Register;
11415 int rshift = $rshift$$constant;
11417 __ rotr(dst, src, 1);
11418 if (rshift - 1) {
11419 __ srl(dst, dst, rshift - 1);
11420 }
11421 %}
11423 ins_pipe( ialu_regI_regI );
11424 %}
11426 instruct orI_Reg_castP2X(mRegL dst, mRegL src1, mRegP src2) %{
11427 match(Set dst (OrI src1 (CastP2X src2)));
11429 format %{ "OR $dst, $src1, $src2 #@orI_Reg_castP2X" %}
11430 ins_encode %{
11431 Register dst = $dst$$Register;
11432 Register src1 = $src1$$Register;
11433 Register src2 = $src2$$Register;
11434 __ orr(dst, src1, src2);
11435 %}
11437 ins_pipe( ialu_regI_regI );
11438 %}
11440 // Logical Shift Right by 8-bit immediate
11441 instruct shr_logical_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
11442 match(Set dst (URShiftI src shift));
11443 // effect(KILL cr);
11445 format %{ "SRL $dst, $src, $shift #@shr_logical_Reg_imm" %}
11446 ins_encode %{
11447 Register src = $src$$Register;
11448 Register dst = $dst$$Register;
11449 int shift = $shift$$constant;
11451 __ srl(dst, src, shift);
11452 %}
11453 ins_pipe( ialu_regI_regI );
11454 %}
11456 instruct shr_logical_Reg_imm_nonneg_mask(mRegI dst, mRegI src, immI_0_31 shift, immI_nonneg_mask mask) %{
11457 match(Set dst (AndI (URShiftI src shift) mask));
11459 format %{ "ext $dst, $src, $shift, one-bits($mask) #@shr_logical_Reg_imm_nonneg_mask" %}
11460 ins_encode %{
11461 Register src = $src$$Register;
11462 Register dst = $dst$$Register;
11463 int pos = $shift$$constant;
11464 int size = Assembler::is_int_mask($mask$$constant);
11466 __ ext(dst, src, pos, size);
11467 %}
11468 ins_pipe( ialu_regI_regI );
11469 %}
11471 instruct rolI_Reg_immI_0_31(mRegI dst, immI_0_31 lshift, immI_0_31 rshift)
11472 %{
11473 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
11474 match(Set dst (OrI (LShiftI dst lshift) (URShiftI dst rshift)));
11476 ins_cost(100);
11477 format %{ "rotr $dst, $dst, $rshift #@rolI_Reg_immI_0_31" %}
11478 ins_encode %{
11479 Register dst = $dst$$Register;
11480 int sa = $rshift$$constant;
11482 __ rotr(dst, dst, sa);
11483 %}
11484 ins_pipe( ialu_regI_regI );
11485 %}
11487 instruct rolL_Reg_immI_0_31(mRegL dst, immI_32_63 lshift, immI_0_31 rshift)
11488 %{
11489 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11490 match(Set dst (OrL (LShiftL dst lshift) (URShiftL dst rshift)));
11492 ins_cost(100);
11493 format %{ "rotr $dst, $dst, $rshift #@rolL_Reg_immI_0_31" %}
11494 ins_encode %{
11495 Register dst = $dst$$Register;
11496 int sa = $rshift$$constant;
11498 __ drotr(dst, dst, sa);
11499 %}
11500 ins_pipe( ialu_regI_regI );
11501 %}
11503 instruct rolL_Reg_immI_32_63(mRegL dst, immI_0_31 lshift, immI_32_63 rshift)
11504 %{
11505 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11506 match(Set dst (OrL (LShiftL dst lshift) (URShiftL dst rshift)));
11508 ins_cost(100);
11509 format %{ "rotr $dst, $dst, $rshift #@rolL_Reg_immI_32_63" %}
11510 ins_encode %{
11511 Register dst = $dst$$Register;
11512 int sa = $rshift$$constant;
11514 __ drotr32(dst, dst, sa - 32);
11515 %}
11516 ins_pipe( ialu_regI_regI );
11517 %}
11519 instruct rorI_Reg_immI_0_31(mRegI dst, immI_0_31 rshift, immI_0_31 lshift)
11520 %{
11521 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
11522 match(Set dst (OrI (URShiftI dst rshift) (LShiftI dst lshift)));
11524 ins_cost(100);
11525 format %{ "rotr $dst, $dst, $rshift #@rorI_Reg_immI_0_31" %}
11526 ins_encode %{
11527 Register dst = $dst$$Register;
11528 int sa = $rshift$$constant;
11530 __ rotr(dst, dst, sa);
11531 %}
11532 ins_pipe( ialu_regI_regI );
11533 %}
11535 instruct rorL_Reg_immI_0_31(mRegL dst, immI_0_31 rshift, immI_32_63 lshift)
11536 %{
11537 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11538 match(Set dst (OrL (URShiftL dst rshift) (LShiftL dst lshift)));
11540 ins_cost(100);
11541 format %{ "rotr $dst, $dst, $rshift #@rorL_Reg_immI_0_31" %}
11542 ins_encode %{
11543 Register dst = $dst$$Register;
11544 int sa = $rshift$$constant;
11546 __ drotr(dst, dst, sa);
11547 %}
11548 ins_pipe( ialu_regI_regI );
11549 %}
11551 instruct rorL_Reg_immI_32_63(mRegL dst, immI_32_63 rshift, immI_0_31 lshift)
11552 %{
11553 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11554 match(Set dst (OrL (URShiftL dst rshift) (LShiftL dst lshift)));
11556 ins_cost(100);
11557 format %{ "rotr $dst, $dst, $rshift #@rorL_Reg_immI_32_63" %}
11558 ins_encode %{
11559 Register dst = $dst$$Register;
11560 int sa = $rshift$$constant;
11562 __ drotr32(dst, dst, sa - 32);
11563 %}
11564 ins_pipe( ialu_regI_regI );
11565 %}
11567 // Logical Shift Right
11568 instruct shr_logical_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
11569 match(Set dst (URShiftI src shift));
11571 format %{ "SRL $dst, $src, $shift #@shr_logical_Reg_Reg" %}
11572 ins_encode %{
11573 Register src = $src$$Register;
11574 Register dst = $dst$$Register;
11575 Register shift = $shift$$Register;
11576 __ srlv(dst, src, shift);
11577 %}
11578 ins_pipe( ialu_regI_regI );
11579 %}
11582 instruct shr_arith_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
11583 match(Set dst (RShiftI src shift));
11584 // effect(KILL cr);
11586 format %{ "SRA $dst, $src, $shift #@shr_arith_Reg_imm" %}
11587 ins_encode %{
11588 Register src = $src$$Register;
11589 Register dst = $dst$$Register;
11590 int shift = $shift$$constant;
11591 __ sra(dst, src, shift);
11592 %}
11593 ins_pipe( ialu_regI_regI );
11594 %}
11596 instruct shr_arith_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
11597 match(Set dst (RShiftI src shift));
11598 // effect(KILL cr);
11600 format %{ "SRA $dst, $src, $shift #@shr_arith_Reg_Reg" %}
11601 ins_encode %{
11602 Register src = $src$$Register;
11603 Register dst = $dst$$Register;
11604 Register shift = $shift$$Register;
11605 __ srav(dst, src, shift);
11606 %}
11607 ins_pipe( ialu_regI_regI );
11608 %}
11610 //----------Convert Int to Boolean---------------------------------------------
11612 instruct convI2B(mRegI dst, mRegI src) %{
11613 match(Set dst (Conv2B src));
11615 ins_cost(100);
11616 format %{ "convI2B $dst, $src @ convI2B" %}
11617 ins_encode %{
11618 Register dst = as_Register($dst$$reg);
11619 Register src = as_Register($src$$reg);
11621 if (dst != src) {
11622 __ daddiu(dst, R0, 1);
11623 __ movz(dst, R0, src);
11624 } else {
11625 __ move(AT, src);
11626 __ daddiu(dst, R0, 1);
11627 __ movz(dst, R0, AT);
11628 }
11629 %}
11631 ins_pipe( ialu_regL_regL );
11632 %}
11634 instruct convI2L_reg( mRegL dst, mRegI src) %{
11635 match(Set dst (ConvI2L src));
11637 ins_cost(100);
11638 format %{ "SLL $dst, $src @ convI2L_reg\t" %}
11639 ins_encode %{
11640 Register dst = as_Register($dst$$reg);
11641 Register src = as_Register($src$$reg);
11643 if(dst != src) __ sll(dst, src, 0);
11644 %}
11645 ins_pipe( ialu_regL_regL );
11646 %}
11649 instruct convL2I_reg( mRegI dst, mRegL src ) %{
11650 match(Set dst (ConvL2I src));
11652 format %{ "MOV $dst, $src @ convL2I_reg" %}
11653 ins_encode %{
11654 Register dst = as_Register($dst$$reg);
11655 Register src = as_Register($src$$reg);
11657 __ sll(dst, src, 0);
11658 %}
11660 ins_pipe( ialu_regI_regI );
11661 %}
11663 instruct convL2I2L_reg( mRegL dst, mRegL src ) %{
11664 match(Set dst (ConvI2L (ConvL2I src)));
11666 format %{ "sll $dst, $src, 0 @ convL2I2L_reg" %}
11667 ins_encode %{
11668 Register dst = as_Register($dst$$reg);
11669 Register src = as_Register($src$$reg);
11671 __ sll(dst, src, 0);
11672 %}
11674 ins_pipe( ialu_regI_regI );
11675 %}
11677 instruct convL2D_reg( regD dst, mRegL src ) %{
11678 match(Set dst (ConvL2D src));
11679 format %{ "convL2D $dst, $src @ convL2D_reg" %}
11680 ins_encode %{
11681 Register src = as_Register($src$$reg);
11682 FloatRegister dst = as_FloatRegister($dst$$reg);
11684 __ dmtc1(src, dst);
11685 __ cvt_d_l(dst, dst);
11686 %}
11688 ins_pipe( pipe_slow );
11689 %}
11692 instruct convD2L_reg_fast( mRegL dst, regD src ) %{
11693 match(Set dst (ConvD2L src));
11694 ins_cost(150);
11695 format %{ "convD2L $dst, $src @ convD2L_reg_fast" %}
11696 ins_encode %{
11697 Register dst = as_Register($dst$$reg);
11698 FloatRegister src = as_FloatRegister($src$$reg);
11700 Label Done;
11702 __ trunc_l_d(F30, src);
11703 // max_long: 0x7fffffffffffffff
11704 // __ set64(AT, 0x7fffffffffffffff);
11705 __ daddiu(AT, R0, -1);
11706 __ dsrl(AT, AT, 1);
11707 __ dmfc1(dst, F30);
11709 __ bne(dst, AT, Done);
11710 __ delayed()->mtc1(R0, F30);
11712 __ cvt_d_w(F30, F30);
11713 __ c_ult_d(src, F30);
11714 __ bc1f(Done);
11715 __ delayed()->daddiu(T9, R0, -1);
11717 __ c_un_d(src, src); //NaN?
11718 __ subu(dst, T9, AT);
11719 __ movt(dst, R0);
11721 __ bind(Done);
11722 %}
11724 ins_pipe( pipe_slow );
11725 %}
11728 instruct convD2L_reg_slow( mRegL dst, regD src ) %{
11729 match(Set dst (ConvD2L src));
11730 ins_cost(250);
11731 format %{ "convD2L $dst, $src @ convD2L_reg_slow" %}
11732 ins_encode %{
11733 Register dst = as_Register($dst$$reg);
11734 FloatRegister src = as_FloatRegister($src$$reg);
11736 Label L;
11738 __ c_un_d(src, src); //NaN?
11739 __ bc1t(L);
11740 __ delayed();
11741 __ move(dst, R0);
11743 __ trunc_l_d(F30, src);
11744 __ cfc1(AT, 31);
11745 __ li(T9, 0x10000);
11746 __ andr(AT, AT, T9);
11747 __ beq(AT, R0, L);
11748 __ delayed()->dmfc1(dst, F30);
11750 __ mov_d(F12, src);
11751 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1);
11752 __ move(dst, V0);
11753 __ bind(L);
11754 %}
11756 ins_pipe( pipe_slow );
11757 %}
11760 instruct convF2I_reg_fast( mRegI dst, regF src ) %{
11761 match(Set dst (ConvF2I src));
11762 ins_cost(150);
11763 format %{ "convf2i $dst, $src @ convF2I_reg_fast" %}
11764 ins_encode %{
11765 Register dreg = $dst$$Register;
11766 FloatRegister fval = $src$$FloatRegister;
11767 Label L;
11769 __ trunc_w_s(F30, fval);
11770 __ move(AT, 0x7fffffff);
11771 __ mfc1(dreg, F30);
11772 __ c_un_s(fval, fval); //NaN?
11773 __ movt(dreg, R0);
11775 __ bne(AT, dreg, L);
11776 __ delayed()->lui(T9, 0x8000);
11778 __ mfc1(AT, fval);
11779 __ andr(AT, AT, T9);
11781 __ movn(dreg, T9, AT);
11783 __ bind(L);
11785 %}
11787 ins_pipe( pipe_slow );
11788 %}
11792 instruct convF2I_reg_slow( mRegI dst, regF src ) %{
11793 match(Set dst (ConvF2I src));
11794 ins_cost(250);
11795 format %{ "convf2i $dst, $src @ convF2I_reg_slow" %}
11796 ins_encode %{
11797 Register dreg = $dst$$Register;
11798 FloatRegister fval = $src$$FloatRegister;
11799 Label L;
11801 __ c_un_s(fval, fval); //NaN?
11802 __ bc1t(L);
11803 __ delayed();
11804 __ move(dreg, R0);
11806 __ trunc_w_s(F30, fval);
11808 /* Call SharedRuntime:f2i() to do valid convention */
11809 __ cfc1(AT, 31);
11810 __ li(T9, 0x10000);
11811 __ andr(AT, AT, T9);
11812 __ beq(AT, R0, L);
11813 __ delayed()->mfc1(dreg, F30);
11815 __ mov_s(F12, fval);
11817 /* 2014/01/08 Fu : This bug was found when running ezDS's control-panel.
11818 * J 982 C2 javax.swing.text.BoxView.layoutMajorAxis(II[I[I)V (283 bytes) @ 0x000000555c46aa74
11819 *
11820 * An interger array index has been assigned to V0, and then changed from 1 to Integer.MAX_VALUE.
11821 * V0 is corrupted during call_VM_leaf(), and should be preserved.
11822 */
11823 __ push(fval);
11824 if(dreg != V0) {
11825 __ push(V0);
11826 }
11827 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
11828 if(dreg != V0) {
11829 __ move(dreg, V0);
11830 __ pop(V0);
11831 }
11832 __ pop(fval);
11833 __ bind(L);
11834 %}
11836 ins_pipe( pipe_slow );
11837 %}
11840 instruct convF2L_reg_fast( mRegL dst, regF src ) %{
11841 match(Set dst (ConvF2L src));
11842 ins_cost(150);
11843 format %{ "convf2l $dst, $src @ convF2L_reg_fast" %}
11844 ins_encode %{
11845 Register dreg = $dst$$Register;
11846 FloatRegister fval = $src$$FloatRegister;
11847 Label L;
11849 __ trunc_l_s(F30, fval);
11850 __ daddiu(AT, R0, -1);
11851 __ dsrl(AT, AT, 1);
11852 __ dmfc1(dreg, F30);
11853 __ c_un_s(fval, fval); //NaN?
11854 __ movt(dreg, R0);
11856 __ bne(AT, dreg, L);
11857 __ delayed()->lui(T9, 0x8000);
11859 __ mfc1(AT, fval);
11860 __ andr(AT, AT, T9);
11862 __ dsll32(T9, T9, 0);
11863 __ movn(dreg, T9, AT);
11865 __ bind(L);
11866 %}
11868 ins_pipe( pipe_slow );
11869 %}
11872 instruct convF2L_reg_slow( mRegL dst, regF src ) %{
11873 match(Set dst (ConvF2L src));
11874 ins_cost(250);
11875 format %{ "convf2l $dst, $src @ convF2L_reg_slow" %}
11876 ins_encode %{
11877 Register dst = as_Register($dst$$reg);
11878 FloatRegister fval = $src$$FloatRegister;
11879 Label L;
11881 __ c_un_s(fval, fval); //NaN?
11882 __ bc1t(L);
11883 __ delayed();
11884 __ move(dst, R0);
11886 __ trunc_l_s(F30, fval);
11887 __ cfc1(AT, 31);
11888 __ li(T9, 0x10000);
11889 __ andr(AT, AT, T9);
11890 __ beq(AT, R0, L);
11891 __ delayed()->dmfc1(dst, F30);
11893 __ mov_s(F12, fval);
11894 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
11895 __ move(dst, V0);
11896 __ bind(L);
11897 %}
11899 ins_pipe( pipe_slow );
11900 %}
11902 instruct convL2F_reg( regF dst, mRegL src ) %{
11903 match(Set dst (ConvL2F src));
11904 format %{ "convl2f $dst, $src @ convL2F_reg" %}
11905 ins_encode %{
11906 FloatRegister dst = $dst$$FloatRegister;
11907 Register src = as_Register($src$$reg);
11908 Label L;
11910 __ dmtc1(src, dst);
11911 __ cvt_s_l(dst, dst);
11912 %}
11914 ins_pipe( pipe_slow );
11915 %}
11917 instruct convI2F_reg( regF dst, mRegI src ) %{
11918 match(Set dst (ConvI2F src));
11919 format %{ "convi2f $dst, $src @ convI2F_reg" %}
11920 ins_encode %{
11921 Register src = $src$$Register;
11922 FloatRegister dst = $dst$$FloatRegister;
11924 __ mtc1(src, dst);
11925 __ cvt_s_w(dst, dst);
11926 %}
11928 ins_pipe( fpu_regF_regF );
11929 %}
11931 instruct cmpLTMask_immI0( mRegI dst, mRegI p, immI0 zero ) %{
11932 match(Set dst (CmpLTMask p zero));
11933 ins_cost(100);
11935 format %{ "sra $dst, $p, 31 @ cmpLTMask_immI0" %}
11936 ins_encode %{
11937 Register src = $p$$Register;
11938 Register dst = $dst$$Register;
11940 __ sra(dst, src, 31);
11941 %}
11942 ins_pipe( pipe_slow );
11943 %}
11946 instruct cmpLTMask( mRegI dst, mRegI p, mRegI q ) %{
11947 match(Set dst (CmpLTMask p q));
11948 ins_cost(400);
11950 format %{ "cmpLTMask $dst, $p, $q @ cmpLTMask" %}
11951 ins_encode %{
11952 Register p = $p$$Register;
11953 Register q = $q$$Register;
11954 Register dst = $dst$$Register;
11956 __ slt(dst, p, q);
11957 __ subu(dst, R0, dst);
11958 %}
11959 ins_pipe( pipe_slow );
11960 %}
11962 instruct convP2B(mRegI dst, mRegP src) %{
11963 match(Set dst (Conv2B src));
11965 ins_cost(100);
11966 format %{ "convP2B $dst, $src @ convP2B" %}
11967 ins_encode %{
11968 Register dst = as_Register($dst$$reg);
11969 Register src = as_Register($src$$reg);
11971 if (dst != src) {
11972 __ daddiu(dst, R0, 1);
11973 __ movz(dst, R0, src);
11974 } else {
11975 __ move(AT, src);
11976 __ daddiu(dst, R0, 1);
11977 __ movz(dst, R0, AT);
11978 }
11979 %}
11981 ins_pipe( ialu_regL_regL );
11982 %}
11985 instruct convI2D_reg_reg(regD dst, mRegI src) %{
11986 match(Set dst (ConvI2D src));
11987 format %{ "conI2D $dst, $src @convI2D_reg" %}
11988 ins_encode %{
11989 Register src = $src$$Register;
11990 FloatRegister dst = $dst$$FloatRegister;
11991 __ mtc1(src, dst);
11992 __ cvt_d_w(dst, dst);
11993 %}
11994 ins_pipe( fpu_regF_regF );
11995 %}
11997 instruct convF2D_reg_reg(regD dst, regF src) %{
11998 match(Set dst (ConvF2D src));
11999 format %{ "convF2D $dst, $src\t# @convF2D_reg_reg" %}
12000 ins_encode %{
12001 FloatRegister dst = $dst$$FloatRegister;
12002 FloatRegister src = $src$$FloatRegister;
12004 __ cvt_d_s(dst, src);
12005 %}
12006 ins_pipe( fpu_regF_regF );
12007 %}
12009 instruct convD2F_reg_reg(regF dst, regD src) %{
12010 match(Set dst (ConvD2F src));
12011 format %{ "convD2F $dst, $src\t# @convD2F_reg_reg" %}
12012 ins_encode %{
12013 FloatRegister dst = $dst$$FloatRegister;
12014 FloatRegister src = $src$$FloatRegister;
12016 __ cvt_s_d(dst, src);
12017 %}
12018 ins_pipe( fpu_regF_regF );
12019 %}
12022 // Convert a double to an int. If the double is a NAN, stuff a zero in instead.
12023 instruct convD2I_reg_reg_fast( mRegI dst, regD src ) %{
12024 match(Set dst (ConvD2I src));
12026 ins_cost(150);
12027 format %{ "convD2I $dst, $src\t# @ convD2I_reg_reg_fast" %}
12029 ins_encode %{
12030 FloatRegister src = $src$$FloatRegister;
12031 Register dst = $dst$$Register;
12033 Label Done;
12035 __ trunc_w_d(F30, src);
12036 // max_int: 2147483647
12037 __ move(AT, 0x7fffffff);
12038 __ mfc1(dst, F30);
12040 __ bne(dst, AT, Done);
12041 __ delayed()->mtc1(R0, F30);
12043 __ cvt_d_w(F30, F30);
12044 __ c_ult_d(src, F30);
12045 __ bc1f(Done);
12046 __ delayed()->addiu(T9, R0, -1);
12048 __ c_un_d(src, src); //NaN?
12049 __ subu32(dst, T9, AT);
12050 __ movt(dst, R0);
12052 __ bind(Done);
12053 %}
12054 ins_pipe( pipe_slow );
12055 %}
12058 instruct convD2I_reg_reg_slow( mRegI dst, regD src ) %{
12059 match(Set dst (ConvD2I src));
12061 ins_cost(250);
12062 format %{ "convD2I $dst, $src\t# @ convD2I_reg_reg_slow" %}
12064 ins_encode %{
12065 FloatRegister src = $src$$FloatRegister;
12066 Register dst = $dst$$Register;
12067 Label L;
12069 __ trunc_w_d(F30, src);
12070 __ cfc1(AT, 31);
12071 __ li(T9, 0x10000);
12072 __ andr(AT, AT, T9);
12073 __ beq(AT, R0, L);
12074 __ delayed()->mfc1(dst, F30);
12076 __ mov_d(F12, src);
12077 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 1);
12078 __ move(dst, V0);
12079 __ bind(L);
12081 %}
12082 ins_pipe( pipe_slow );
12083 %}
12085 // Convert oop pointer into compressed form
12086 instruct encodeHeapOop(mRegN dst, mRegP src) %{
12087 predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
12088 match(Set dst (EncodeP src));
12089 format %{ "encode_heap_oop $dst,$src" %}
12090 ins_encode %{
12091 Register src = $src$$Register;
12092 Register dst = $dst$$Register;
12094 __ encode_heap_oop(dst, src);
12095 %}
12096 ins_pipe( ialu_regL_regL );
12097 %}
12099 instruct encodeHeapOop_not_null(mRegN dst, mRegP src) %{
12100 predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
12101 match(Set dst (EncodeP src));
12102 format %{ "encode_heap_oop_not_null $dst,$src @ encodeHeapOop_not_null" %}
12103 ins_encode %{
12104 __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
12105 %}
12106 ins_pipe( ialu_regL_regL );
12107 %}
12109 instruct decodeHeapOop(mRegP dst, mRegN src) %{
12110 predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
12111 n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
12112 match(Set dst (DecodeN src));
12113 format %{ "decode_heap_oop $dst,$src @ decodeHeapOop" %}
12114 ins_encode %{
12115 Register s = $src$$Register;
12116 Register d = $dst$$Register;
12118 __ decode_heap_oop(d, s);
12119 %}
12120 ins_pipe( ialu_regL_regL );
12121 %}
12123 instruct decodeHeapOop_not_null(mRegP dst, mRegN src) %{
12124 predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
12125 n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
12126 match(Set dst (DecodeN src));
12127 format %{ "decode_heap_oop_not_null $dst,$src @ decodeHeapOop_not_null" %}
12128 ins_encode %{
12129 Register s = $src$$Register;
12130 Register d = $dst$$Register;
12131 if (s != d) {
12132 __ decode_heap_oop_not_null(d, s);
12133 } else {
12134 __ decode_heap_oop_not_null(d);
12135 }
12136 %}
12137 ins_pipe( ialu_regL_regL );
12138 %}
12140 instruct encodeKlass_not_null(mRegN dst, mRegP src) %{
12141 match(Set dst (EncodePKlass src));
12142 format %{ "encode_heap_oop_not_null $dst,$src @ encodeKlass_not_null" %}
12143 ins_encode %{
12144 __ encode_klass_not_null($dst$$Register, $src$$Register);
12145 %}
12146 ins_pipe( ialu_regL_regL );
12147 %}
12149 instruct decodeKlass_not_null(mRegP dst, mRegN src) %{
12150 match(Set dst (DecodeNKlass src));
12151 format %{ "decode_heap_klass_not_null $dst,$src" %}
12152 ins_encode %{
12153 Register s = $src$$Register;
12154 Register d = $dst$$Register;
12155 if (s != d) {
12156 __ decode_klass_not_null(d, s);
12157 } else {
12158 __ decode_klass_not_null(d);
12159 }
12160 %}
12161 ins_pipe( ialu_regL_regL );
12162 %}
12164 //FIXME
12165 instruct tlsLoadP(mRegP dst) %{
12166 match(Set dst (ThreadLocal));
12168 ins_cost(0);
12169 format %{ " get_thread in $dst #@tlsLoadP" %}
12170 ins_encode %{
12171 Register dst = $dst$$Register;
12172 #ifdef OPT_THREAD
12173 __ move(dst, TREG);
12174 #else
12175 __ get_thread(dst);
12176 #endif
12177 %}
12179 ins_pipe( ialu_loadI );
12180 %}
12183 instruct checkCastPP( mRegP dst ) %{
12184 match(Set dst (CheckCastPP dst));
12186 format %{ "#checkcastPP of $dst (empty encoding) #@chekCastPP" %}
12187 ins_encode( /*empty encoding*/ );
12188 ins_pipe( empty );
12189 %}
12191 instruct castPP(mRegP dst)
12192 %{
12193 match(Set dst (CastPP dst));
12195 size(0);
12196 format %{ "# castPP of $dst" %}
12197 ins_encode(/* empty encoding */);
12198 ins_pipe(empty);
12199 %}
12201 instruct castII( mRegI dst ) %{
12202 match(Set dst (CastII dst));
12203 format %{ "#castII of $dst empty encoding" %}
12204 ins_encode( /*empty encoding*/ );
12205 ins_cost(0);
12206 ins_pipe( empty );
12207 %}
12209 // Return Instruction
12210 // Remove the return address & jump to it.
12211 instruct Ret() %{
12212 match(Return);
12213 format %{ "RET #@Ret" %}
12215 ins_encode %{
12216 __ jr(RA);
12217 __ nop();
12218 %}
12220 ins_pipe( pipe_jump );
12221 %}
12223 /*
12224 // For Loongson CPUs, jr seems too slow, so this rule shouldn't be imported.
12225 instruct jumpXtnd(mRegL switch_val) %{
12226 match(Jump switch_val);
12228 ins_cost(350);
12230 format %{ "load T9 <-- [$constanttablebase, $switch_val, $constantoffset] @ jumpXtnd\n\t"
12231 "jr T9\n\t"
12232 "nop" %}
12233 ins_encode %{
12234 Register table_base = $constanttablebase;
12235 int con_offset = $constantoffset;
12236 Register switch_reg = $switch_val$$Register;
12238 if (UseLoongsonISA) {
12239 if (Assembler::is_simm(con_offset, 8)) {
12240 __ gsldx(T9, table_base, switch_reg, con_offset);
12241 } else if (Assembler::is_simm16(con_offset)) {
12242 __ daddu(T9, table_base, switch_reg);
12243 __ ld(T9, T9, con_offset);
12244 } else {
12245 __ move(T9, con_offset);
12246 __ daddu(AT, table_base, switch_reg);
12247 __ gsldx(T9, AT, T9, 0);
12248 }
12249 } else {
12250 if (Assembler::is_simm16(con_offset)) {
12251 __ daddu(T9, table_base, switch_reg);
12252 __ ld(T9, T9, con_offset);
12253 } else {
12254 __ move(T9, con_offset);
12255 __ daddu(AT, table_base, switch_reg);
12256 __ daddu(AT, T9, AT);
12257 __ ld(T9, AT, 0);
12258 }
12259 }
12261 __ jr(T9);
12262 __ nop();
12264 %}
12265 ins_pipe(pipe_jump);
12266 %}
12267 */
12269 // Jump Direct - Label defines a relative address from JMP
12270 instruct jmpDir(label labl) %{
12271 match(Goto);
12272 effect(USE labl);
12274 ins_cost(300);
12275 format %{ "JMP $labl #@jmpDir" %}
12277 ins_encode %{
12278 Label &L = *($labl$$label);
12279 if(&L)
12280 __ b(L);
12281 else
12282 __ b(int(0));
12283 __ nop();
12284 %}
12286 ins_pipe( pipe_jump );
12287 ins_pc_relative(1);
12288 %}
12292 // Tail Jump; remove the return address; jump to target.
12293 // TailCall above leaves the return address around.
12294 // TailJump is used in only one place, the rethrow_Java stub (fancy_jump=2).
12295 // ex_oop (Exception Oop) is needed in %o0 at the jump. As there would be a
12296 // "restore" before this instruction (in Epilogue), we need to materialize it
12297 // in %i0.
12298 //FIXME
12299 instruct tailjmpInd(mRegP jump_target,mRegP ex_oop) %{
12300 match( TailJump jump_target ex_oop );
12301 ins_cost(200);
12302 format %{ "Jmp $jump_target ; ex_oop = $ex_oop #@tailjmpInd" %}
12303 ins_encode %{
12304 Register target = $jump_target$$Register;
12306 /* 2012/9/14 Jin: V0, V1 are indicated in:
12307 * [stubGenerator_mips.cpp] generate_forward_exception()
12308 * [runtime_mips.cpp] OptoRuntime::generate_exception_blob()
12309 */
12310 Register oop = $ex_oop$$Register;
12311 Register exception_oop = V0;
12312 Register exception_pc = V1;
12314 __ move(exception_pc, RA);
12315 __ move(exception_oop, oop);
12317 __ jr(target);
12318 __ nop();
12319 %}
12320 ins_pipe( pipe_jump );
12321 %}
12323 // ============================================================================
12324 // Procedure Call/Return Instructions
12325 // Call Java Static Instruction
12326 // Note: If this code changes, the corresponding ret_addr_offset() and
12327 // compute_padding() functions will have to be adjusted.
12328 instruct CallStaticJavaDirect(method meth) %{
12329 match(CallStaticJava);
12330 effect(USE meth);
12332 ins_cost(300);
12333 format %{ "CALL,static #@CallStaticJavaDirect " %}
12334 ins_encode( Java_Static_Call( meth ) );
12335 ins_pipe( pipe_slow );
12336 ins_pc_relative(1);
12337 %}
12339 // Call Java Dynamic Instruction
12340 // Note: If this code changes, the corresponding ret_addr_offset() and
12341 // compute_padding() functions will have to be adjusted.
12342 instruct CallDynamicJavaDirect(method meth) %{
12343 match(CallDynamicJava);
12344 effect(USE meth);
12346 ins_cost(300);
12347 format %{"MOV IC_Klass, (oop)-1\n\t"
12348 "CallDynamic @ CallDynamicJavaDirect" %}
12349 ins_encode( Java_Dynamic_Call( meth ) );
12350 ins_pipe( pipe_slow );
12351 ins_pc_relative(1);
12352 %}
12354 instruct CallLeafNoFPDirect(method meth) %{
12355 match(CallLeafNoFP);
12356 effect(USE meth);
12358 ins_cost(300);
12359 format %{ "CALL_LEAF_NOFP,runtime " %}
12360 ins_encode(Java_To_Runtime(meth));
12361 ins_pipe( pipe_slow );
12362 ins_pc_relative(1);
12363 ins_alignment(16);
12364 %}
12366 // Prefetch instructions.
12368 instruct prefetchrNTA( memory mem ) %{
12369 match(PrefetchRead mem);
12370 ins_cost(125);
12372 format %{ "pref $mem\t# Prefetch into non-temporal cache for read @ prefetchrNTA" %}
12373 ins_encode %{
12374 int base = $mem$$base;
12375 int index = $mem$$index;
12376 int scale = $mem$$scale;
12377 int disp = $mem$$disp;
12379 if( index != 0 ) {
12380 if (scale == 0) {
12381 __ daddu(AT, as_Register(base), as_Register(index));
12382 } else {
12383 __ dsll(AT, as_Register(index), scale);
12384 __ daddu(AT, as_Register(base), AT);
12385 }
12386 } else {
12387 __ move(AT, as_Register(base));
12388 }
12389 if( Assembler::is_simm16(disp) ) {
12390 __ daddiu(AT, as_Register(base), disp);
12391 __ daddiu(AT, AT, disp);
12392 } else {
12393 __ move(T9, disp);
12394 __ daddu(AT, as_Register(base), T9);
12395 }
12396 __ pref(0, AT, 0); //hint: 0:load
12397 %}
12398 ins_pipe(pipe_slow);
12399 %}
12401 instruct prefetchwNTA( memory mem ) %{
12402 match(PrefetchWrite mem);
12403 ins_cost(125);
12404 format %{ "pref $mem\t# Prefetch to non-temporal cache for write @ prefetchwNTA" %}
12405 ins_encode %{
12406 int base = $mem$$base;
12407 int index = $mem$$index;
12408 int scale = $mem$$scale;
12409 int disp = $mem$$disp;
12411 if( index != 0 ) {
12412 if (scale == 0) {
12413 __ daddu(AT, as_Register(base), as_Register(index));
12414 } else {
12415 __ dsll(AT, as_Register(index), scale);
12416 __ daddu(AT, as_Register(base), AT);
12417 }
12418 } else {
12419 __ move(AT, as_Register(base));
12420 }
12421 if( Assembler::is_simm16(disp) ) {
12422 __ daddiu(AT, as_Register(base), disp);
12423 __ daddiu(AT, AT, disp);
12424 } else {
12425 __ move(T9, disp);
12426 __ daddu(AT, as_Register(base), T9);
12427 }
12428 __ pref(1, AT, 0); //hint: 1:store
12429 %}
12430 ins_pipe(pipe_slow);
12431 %}
12433 // Prefetch instructions for allocation.
12435 instruct prefetchAllocNTA( memory mem ) %{
12436 match(PrefetchAllocation mem);
12437 ins_cost(125);
12438 format %{ "pref $mem\t# Prefetch allocation @ prefetchAllocNTA" %}
12439 ins_encode %{
12440 int base = $mem$$base;
12441 int index = $mem$$index;
12442 int scale = $mem$$scale;
12443 int disp = $mem$$disp;
12445 Register dst = R0;
12447 if( index != 0 ) {
12448 if( Assembler::is_simm16(disp) ) {
12449 if( UseLoongsonISA ) {
12450 if (scale == 0) {
12451 __ gslbx(dst, as_Register(base), as_Register(index), disp);
12452 } else {
12453 __ dsll(AT, as_Register(index), scale);
12454 __ gslbx(dst, as_Register(base), AT, disp);
12455 }
12456 } else {
12457 if (scale == 0) {
12458 __ addu(AT, as_Register(base), as_Register(index));
12459 } else {
12460 __ dsll(AT, as_Register(index), scale);
12461 __ addu(AT, as_Register(base), AT);
12462 }
12463 __ lb(dst, AT, disp);
12464 }
12465 } else {
12466 if (scale == 0) {
12467 __ addu(AT, as_Register(base), as_Register(index));
12468 } else {
12469 __ dsll(AT, as_Register(index), scale);
12470 __ addu(AT, as_Register(base), AT);
12471 }
12472 __ move(T9, disp);
12473 if( UseLoongsonISA ) {
12474 __ gslbx(dst, AT, T9, 0);
12475 } else {
12476 __ addu(AT, AT, T9);
12477 __ lb(dst, AT, 0);
12478 }
12479 }
12480 } else {
12481 if( Assembler::is_simm16(disp) ) {
12482 __ lb(dst, as_Register(base), disp);
12483 } else {
12484 __ move(T9, disp);
12485 if( UseLoongsonISA ) {
12486 __ gslbx(dst, as_Register(base), T9, 0);
12487 } else {
12488 __ addu(AT, as_Register(base), T9);
12489 __ lb(dst, AT, 0);
12490 }
12491 }
12492 }
12493 %}
12494 ins_pipe(pipe_slow);
12495 %}
12498 // Call runtime without safepoint
12499 instruct CallLeafDirect(method meth) %{
12500 match(CallLeaf);
12501 effect(USE meth);
12503 ins_cost(300);
12504 format %{ "CALL_LEAF,runtime #@CallLeafDirect " %}
12505 ins_encode(Java_To_Runtime(meth));
12506 ins_pipe( pipe_slow );
12507 ins_pc_relative(1);
12508 ins_alignment(16);
12509 %}
12511 // Load Char (16bit unsigned)
12512 instruct loadUS(mRegI dst, memory mem) %{
12513 match(Set dst (LoadUS mem));
12515 ins_cost(125);
12516 format %{ "loadUS $dst,$mem @ loadC" %}
12517 ins_encode(load_C_enc(dst, mem));
12518 ins_pipe( ialu_loadI );
12519 %}
12521 instruct loadUS_convI2L(mRegL dst, memory mem) %{
12522 match(Set dst (ConvI2L (LoadUS mem)));
12524 ins_cost(125);
12525 format %{ "loadUS $dst,$mem @ loadUS_convI2L" %}
12526 ins_encode(load_C_enc(dst, mem));
12527 ins_pipe( ialu_loadI );
12528 %}
12530 // Store Char (16bit unsigned)
12531 instruct storeC(memory mem, mRegI src) %{
12532 match(Set mem (StoreC mem src));
12534 ins_cost(125);
12535 format %{ "storeC $src, $mem @ storeC" %}
12536 ins_encode(store_C_reg_enc(mem, src));
12537 ins_pipe( ialu_loadI );
12538 %}
12540 instruct storeC0(memory mem, immI0 zero) %{
12541 match(Set mem (StoreC mem zero));
12543 ins_cost(125);
12544 format %{ "storeC $zero, $mem @ storeC0" %}
12545 ins_encode(store_C0_enc(mem));
12546 ins_pipe( ialu_loadI );
12547 %}
12550 instruct loadConF0(regF dst, immF0 zero) %{
12551 match(Set dst zero);
12552 ins_cost(100);
12554 format %{ "mov $dst, zero @ loadConF0\n"%}
12555 ins_encode %{
12556 FloatRegister dst = $dst$$FloatRegister;
12558 __ mtc1(R0, dst);
12559 %}
12560 ins_pipe( fpu_loadF );
12561 %}
12564 instruct loadConF(regF dst, immF src) %{
12565 match(Set dst src);
12566 ins_cost(125);
12568 format %{ "lwc1 $dst, $constantoffset[$constanttablebase] # load FLOAT $src from table @ loadConF" %}
12569 ins_encode %{
12570 int con_offset = $constantoffset($src);
12572 if (Assembler::is_simm16(con_offset)) {
12573 __ lwc1($dst$$FloatRegister, $constanttablebase, con_offset);
12574 } else {
12575 __ set64(AT, con_offset);
12576 if (UseLoongsonISA) {
12577 __ gslwxc1($dst$$FloatRegister, $constanttablebase, AT, 0);
12578 } else {
12579 __ daddu(AT, $constanttablebase, AT);
12580 __ lwc1($dst$$FloatRegister, AT, 0);
12581 }
12582 }
12583 %}
12584 ins_pipe( fpu_loadF );
12585 %}
12588 instruct loadConD0(regD dst, immD0 zero) %{
12589 match(Set dst zero);
12590 ins_cost(100);
12592 format %{ "mov $dst, zero @ loadConD0"%}
12593 ins_encode %{
12594 FloatRegister dst = as_FloatRegister($dst$$reg);
12596 __ dmtc1(R0, dst);
12597 %}
12598 ins_pipe( fpu_loadF );
12599 %}
12601 instruct loadConD(regD dst, immD src) %{
12602 match(Set dst src);
12603 ins_cost(125);
12605 format %{ "ldc1 $dst, $constantoffset[$constanttablebase] # load DOUBLE $src from table @ loadConD" %}
12606 ins_encode %{
12607 int con_offset = $constantoffset($src);
12609 if (Assembler::is_simm16(con_offset)) {
12610 __ ldc1($dst$$FloatRegister, $constanttablebase, con_offset);
12611 } else {
12612 __ set64(AT, con_offset);
12613 if (UseLoongsonISA) {
12614 __ gsldxc1($dst$$FloatRegister, $constanttablebase, AT, 0);
12615 } else {
12616 __ daddu(AT, $constanttablebase, AT);
12617 __ ldc1($dst$$FloatRegister, AT, 0);
12618 }
12619 }
12620 %}
12621 ins_pipe( fpu_loadF );
12622 %}
12624 // Store register Float value (it is faster than store from FPU register)
12625 instruct storeF_reg( memory mem, regF src) %{
12626 match(Set mem (StoreF mem src));
12628 ins_cost(50);
12629 format %{ "store $mem, $src\t# store float @ storeF_reg" %}
12630 ins_encode(store_F_reg_enc(mem, src));
12631 ins_pipe( fpu_storeF );
12632 %}
12634 instruct storeF_imm0( memory mem, immF0 zero) %{
12635 match(Set mem (StoreF mem zero));
12637 ins_cost(40);
12638 format %{ "store $mem, zero\t# store float @ storeF_imm0" %}
12639 ins_encode %{
12640 int base = $mem$$base;
12641 int index = $mem$$index;
12642 int scale = $mem$$scale;
12643 int disp = $mem$$disp;
12645 if( index != 0 ) {
12646 if ( UseLoongsonISA ) {
12647 if ( Assembler::is_simm(disp, 8) ) {
12648 if ( scale == 0 ) {
12649 __ gsswx(R0, as_Register(base), as_Register(index), disp);
12650 } else {
12651 __ dsll(T9, as_Register(index), scale);
12652 __ gsswx(R0, as_Register(base), T9, disp);
12653 }
12654 } else if ( Assembler::is_simm16(disp) ) {
12655 if ( scale == 0 ) {
12656 __ daddu(AT, as_Register(base), as_Register(index));
12657 } else {
12658 __ dsll(T9, as_Register(index), scale);
12659 __ daddu(AT, as_Register(base), T9);
12660 }
12661 __ sw(R0, AT, disp);
12662 } else {
12663 if ( scale == 0 ) {
12664 __ move(T9, disp);
12665 __ daddu(AT, as_Register(index), T9);
12666 __ gsswx(R0, as_Register(base), AT, 0);
12667 } else {
12668 __ dsll(T9, as_Register(index), scale);
12669 __ move(AT, disp);
12670 __ daddu(AT, AT, T9);
12671 __ gsswx(R0, as_Register(base), AT, 0);
12672 }
12673 }
12674 } else { //not use loongson isa
12675 if(scale != 0) {
12676 __ dsll(T9, as_Register(index), scale);
12677 __ daddu(AT, as_Register(base), T9);
12678 } else {
12679 __ daddu(AT, as_Register(base), as_Register(index));
12680 }
12681 if( Assembler::is_simm16(disp) ) {
12682 __ sw(R0, AT, disp);
12683 } else {
12684 __ move(T9, disp);
12685 __ daddu(AT, AT, T9);
12686 __ sw(R0, AT, 0);
12687 }
12688 }
12689 } else { //index is 0
12690 if ( UseLoongsonISA ) {
12691 if ( Assembler::is_simm16(disp) ) {
12692 __ sw(R0, as_Register(base), disp);
12693 } else {
12694 __ move(T9, disp);
12695 __ gsswx(R0, as_Register(base), T9, 0);
12696 }
12697 } else {
12698 if( Assembler::is_simm16(disp) ) {
12699 __ sw(R0, as_Register(base), disp);
12700 } else {
12701 __ move(T9, disp);
12702 __ daddu(AT, as_Register(base), T9);
12703 __ sw(R0, AT, 0);
12704 }
12705 }
12706 }
12707 %}
12708 ins_pipe( ialu_storeI );
12709 %}
12711 // Load Double
12712 instruct loadD(regD dst, memory mem) %{
12713 match(Set dst (LoadD mem));
12715 ins_cost(150);
12716 format %{ "loadD $dst, $mem #@loadD" %}
12717 ins_encode(load_D_enc(dst, mem));
12718 ins_pipe( ialu_loadI );
12719 %}
12721 // Load Double - UNaligned
12722 instruct loadD_unaligned(regD dst, memory mem ) %{
12723 match(Set dst (LoadD_unaligned mem));
12724 ins_cost(250);
12725 // FIXME: Jin: Need more effective ldl/ldr
12726 format %{ "loadD_unaligned $dst, $mem #@loadD_unaligned" %}
12727 ins_encode(load_D_enc(dst, mem));
12728 ins_pipe( ialu_loadI );
12729 %}
12731 instruct storeD_reg( memory mem, regD src) %{
12732 match(Set mem (StoreD mem src));
12734 ins_cost(50);
12735 format %{ "store $mem, $src\t# store float @ storeD_reg" %}
12736 ins_encode(store_D_reg_enc(mem, src));
12737 ins_pipe( fpu_storeF );
12738 %}
12740 instruct storeD_imm0( memory mem, immD0 zero) %{
12741 match(Set mem (StoreD mem zero));
12743 ins_cost(40);
12744 format %{ "store $mem, zero\t# store float @ storeD_imm0" %}
12745 ins_encode %{
12746 int base = $mem$$base;
12747 int index = $mem$$index;
12748 int scale = $mem$$scale;
12749 int disp = $mem$$disp;
12751 __ mtc1(R0, F30);
12752 __ cvt_d_w(F30, F30);
12754 if( index != 0 ) {
12755 if ( UseLoongsonISA ) {
12756 if ( Assembler::is_simm(disp, 8) ) {
12757 if (scale == 0) {
12758 __ gssdxc1(F30, as_Register(base), as_Register(index), disp);
12759 } else {
12760 __ dsll(T9, as_Register(index), scale);
12761 __ gssdxc1(F30, as_Register(base), T9, disp);
12762 }
12763 } else if ( Assembler::is_simm16(disp) ) {
12764 if (scale == 0) {
12765 __ daddu(AT, as_Register(base), as_Register(index));
12766 __ sdc1(F30, AT, disp);
12767 } else {
12768 __ dsll(T9, as_Register(index), scale);
12769 __ daddu(AT, as_Register(base), T9);
12770 __ sdc1(F30, AT, disp);
12771 }
12772 } else {
12773 if (scale == 0) {
12774 __ move(T9, disp);
12775 __ daddu(AT, as_Register(index), T9);
12776 __ gssdxc1(F30, as_Register(base), AT, 0);
12777 } else {
12778 __ move(T9, disp);
12779 __ dsll(AT, as_Register(index), scale);
12780 __ daddu(AT, AT, T9);
12781 __ gssdxc1(F30, as_Register(base), AT, 0);
12782 }
12783 }
12784 } else { // not use loongson isa
12785 if(scale != 0) {
12786 __ dsll(T9, as_Register(index), scale);
12787 __ daddu(AT, as_Register(base), T9);
12788 } else {
12789 __ daddu(AT, as_Register(base), as_Register(index));
12790 }
12791 if( Assembler::is_simm16(disp) ) {
12792 __ sdc1(F30, AT, disp);
12793 } else {
12794 __ move(T9, disp);
12795 __ daddu(AT, AT, T9);
12796 __ sdc1(F30, AT, 0);
12797 }
12798 }
12799 } else {// index is 0
12800 if ( UseLoongsonISA ) {
12801 if ( Assembler::is_simm16(disp) ) {
12802 __ sdc1(F30, as_Register(base), disp);
12803 } else {
12804 __ move(T9, disp);
12805 __ gssdxc1(F30, as_Register(base), T9, 0);
12806 }
12807 } else {
12808 if( Assembler::is_simm16(disp) ) {
12809 __ sdc1(F30, as_Register(base), disp);
12810 } else {
12811 __ move(T9, disp);
12812 __ daddu(AT, as_Register(base), T9);
12813 __ sdc1(F30, AT, 0);
12814 }
12815 }
12816 }
12817 %}
12818 ins_pipe( ialu_storeI );
12819 %}
12821 instruct loadSSI(mRegI dst, stackSlotI src)
12822 %{
12823 match(Set dst src);
12825 ins_cost(125);
12826 format %{ "lw $dst, $src\t# int stk @ loadSSI" %}
12827 ins_encode %{
12828 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSI) !");
12829 __ lw($dst$$Register, SP, $src$$disp);
12830 %}
12831 ins_pipe(ialu_loadI);
12832 %}
12834 instruct storeSSI(stackSlotI dst, mRegI src)
12835 %{
12836 match(Set dst src);
12838 ins_cost(100);
12839 format %{ "sw $dst, $src\t# int stk @ storeSSI" %}
12840 ins_encode %{
12841 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSI) !");
12842 __ sw($src$$Register, SP, $dst$$disp);
12843 %}
12844 ins_pipe(ialu_storeI);
12845 %}
12847 instruct loadSSL(mRegL dst, stackSlotL src)
12848 %{
12849 match(Set dst src);
12851 ins_cost(125);
12852 format %{ "ld $dst, $src\t# long stk @ loadSSL" %}
12853 ins_encode %{
12854 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSL) !");
12855 __ ld($dst$$Register, SP, $src$$disp);
12856 %}
12857 ins_pipe(ialu_loadI);
12858 %}
12860 instruct storeSSL(stackSlotL dst, mRegL src)
12861 %{
12862 match(Set dst src);
12864 ins_cost(100);
12865 format %{ "sd $dst, $src\t# long stk @ storeSSL" %}
12866 ins_encode %{
12867 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSL) !");
12868 __ sd($src$$Register, SP, $dst$$disp);
12869 %}
12870 ins_pipe(ialu_storeI);
12871 %}
12873 instruct loadSSP(mRegP dst, stackSlotP src)
12874 %{
12875 match(Set dst src);
12877 ins_cost(125);
12878 format %{ "ld $dst, $src\t# ptr stk @ loadSSP" %}
12879 ins_encode %{
12880 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSP) !");
12881 __ ld($dst$$Register, SP, $src$$disp);
12882 %}
12883 ins_pipe(ialu_loadI);
12884 %}
12886 instruct storeSSP(stackSlotP dst, mRegP src)
12887 %{
12888 match(Set dst src);
12890 ins_cost(100);
12891 format %{ "sd $dst, $src\t# ptr stk @ storeSSP" %}
12892 ins_encode %{
12893 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSP) !");
12894 __ sd($src$$Register, SP, $dst$$disp);
12895 %}
12896 ins_pipe(ialu_storeI);
12897 %}
12899 instruct loadSSF(regF dst, stackSlotF src)
12900 %{
12901 match(Set dst src);
12903 ins_cost(125);
12904 format %{ "lwc1 $dst, $src\t# float stk @ loadSSF" %}
12905 ins_encode %{
12906 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSF) !");
12907 __ lwc1($dst$$FloatRegister, SP, $src$$disp);
12908 %}
12909 ins_pipe(ialu_loadI);
12910 %}
12912 instruct storeSSF(stackSlotF dst, regF src)
12913 %{
12914 match(Set dst src);
12916 ins_cost(100);
12917 format %{ "swc1 $dst, $src\t# float stk @ storeSSF" %}
12918 ins_encode %{
12919 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSF) !");
12920 __ swc1($src$$FloatRegister, SP, $dst$$disp);
12921 %}
12922 ins_pipe(fpu_storeF);
12923 %}
12925 // Use the same format since predicate() can not be used here.
12926 instruct loadSSD(regD dst, stackSlotD src)
12927 %{
12928 match(Set dst src);
12930 ins_cost(125);
12931 format %{ "ldc1 $dst, $src\t# double stk @ loadSSD" %}
12932 ins_encode %{
12933 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSD) !");
12934 __ ldc1($dst$$FloatRegister, SP, $src$$disp);
12935 %}
12936 ins_pipe(ialu_loadI);
12937 %}
12939 instruct storeSSD(stackSlotD dst, regD src)
12940 %{
12941 match(Set dst src);
12943 ins_cost(100);
12944 format %{ "sdc1 $dst, $src\t# double stk @ storeSSD" %}
12945 ins_encode %{
12946 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSD) !");
12947 __ sdc1($src$$FloatRegister, SP, $dst$$disp);
12948 %}
12949 ins_pipe(fpu_storeF);
12950 %}
12952 instruct cmpFastLock( FlagsReg cr, mRegP object, s0_RegP box, mRegI tmp, mRegP scr) %{
12953 match( Set cr (FastLock object box) );
12954 effect( TEMP tmp, TEMP scr, USE_KILL box );
12955 ins_cost(300);
12956 format %{ "FASTLOCK $cr $object, $box, $tmp #@ cmpFastLock" %}
12957 ins_encode %{
12958 __ fast_lock($object$$Register, $box$$Register, $tmp$$Register, $scr$$Register);
12959 %}
12961 ins_pipe( pipe_slow );
12962 ins_pc_relative(1);
12963 %}
12965 instruct cmpFastUnlock( FlagsReg cr, mRegP object, s0_RegP box, mRegP tmp ) %{
12966 match( Set cr (FastUnlock object box) );
12967 effect( TEMP tmp, USE_KILL box );
12968 ins_cost(300);
12969 format %{ "FASTUNLOCK $object, $box, $tmp #@cmpFastUnlock" %}
12970 ins_encode %{
12971 __ fast_unlock($object$$Register, $box$$Register, $tmp$$Register);
12972 %}
12974 ins_pipe( pipe_slow );
12975 ins_pc_relative(1);
12976 %}
12978 // Store CMS card-mark Immediate
12979 instruct storeImmCM(memory mem, immI8 src) %{
12980 match(Set mem (StoreCM mem src));
12982 ins_cost(150);
12983 format %{ "MOV8 $mem,$src\t! CMS card-mark imm0" %}
12984 // opcode(0xC6);
12985 ins_encode(store_B_immI_enc_sync(mem, src));
12986 ins_pipe( ialu_storeI );
12987 %}
12989 // Die now
12990 instruct ShouldNotReachHere( )
12991 %{
12992 match(Halt);
12993 ins_cost(300);
12995 // Use the following format syntax
12996 format %{ "ILLTRAP ;#@ShouldNotReachHere" %}
12997 ins_encode %{
12998 // Here we should emit illtrap !
13000 __ stop("in ShoudNotReachHere");
13002 %}
13003 ins_pipe( pipe_jump );
13004 %}
13006 instruct leaP8Narrow(mRegP dst, indOffset8Narrow mem)
13007 %{
13008 predicate(Universe::narrow_oop_shift() == 0);
13009 match(Set dst mem);
13011 ins_cost(110);
13012 format %{ "leaq $dst, $mem\t# ptr off8narrow @ leaP8Narrow" %}
13013 ins_encode %{
13014 Register dst = $dst$$Register;
13015 Register base = as_Register($mem$$base);
13016 int disp = $mem$$disp;
13018 __ daddiu(dst, base, disp);
13019 %}
13020 ins_pipe( ialu_regI_imm16 );
13021 %}
13023 instruct leaPPosIdxScaleOff8(mRegP dst, basePosIndexScaleOffset8 mem)
13024 %{
13025 match(Set dst mem);
13027 ins_cost(110);
13028 format %{ "leaq $dst, $mem\t# @ PosIdxScaleOff8" %}
13029 ins_encode %{
13030 Register dst = $dst$$Register;
13031 Register base = as_Register($mem$$base);
13032 Register index = as_Register($mem$$index);
13033 int scale = $mem$$scale;
13034 int disp = $mem$$disp;
13036 if (scale == 0) {
13037 __ daddu(AT, base, index);
13038 __ daddiu(dst, AT, disp);
13039 } else {
13040 __ dsll(AT, index, scale);
13041 __ daddu(AT, base, AT);
13042 __ daddiu(dst, AT, disp);
13043 }
13044 %}
13046 ins_pipe( ialu_regI_imm16 );
13047 %}
13049 instruct leaPIdxScale(mRegP dst, indIndexScale mem)
13050 %{
13051 match(Set dst mem);
13053 ins_cost(110);
13054 format %{ "leaq $dst, $mem\t# @ leaPIdxScale" %}
13055 ins_encode %{
13056 Register dst = $dst$$Register;
13057 Register base = as_Register($mem$$base);
13058 Register index = as_Register($mem$$index);
13059 int scale = $mem$$scale;
13061 if (scale == 0) {
13062 __ daddu(dst, base, index);
13063 } else {
13064 __ dsll(AT, index, scale);
13065 __ daddu(dst, base, AT);
13066 }
13067 %}
13069 ins_pipe( ialu_regI_imm16 );
13070 %}
13072 // Jump Direct Conditional - Label defines a relative address from Jcc+1
13073 instruct jmpLoopEnd(cmpOp cop, mRegI src1, mRegI src2, label labl) %{
13074 match(CountedLoopEnd cop (CmpI src1 src2));
13075 effect(USE labl);
13077 ins_cost(300);
13078 format %{ "J$cop $src1, $src2, $labl\t# Loop end @ jmpLoopEnd" %}
13079 ins_encode %{
13080 Register op1 = $src1$$Register;
13081 Register op2 = $src2$$Register;
13082 Label &L = *($labl$$label);
13083 int flag = $cop$$cmpcode;
13085 switch(flag)
13086 {
13087 case 0x01: //equal
13088 if (&L)
13089 __ beq(op1, op2, L);
13090 else
13091 __ beq(op1, op2, (int)0);
13092 break;
13093 case 0x02: //not_equal
13094 if (&L)
13095 __ bne(op1, op2, L);
13096 else
13097 __ bne(op1, op2, (int)0);
13098 break;
13099 case 0x03: //above
13100 __ slt(AT, op2, op1);
13101 if(&L)
13102 __ bne(AT, R0, L);
13103 else
13104 __ bne(AT, R0, (int)0);
13105 break;
13106 case 0x04: //above_equal
13107 __ slt(AT, op1, op2);
13108 if(&L)
13109 __ beq(AT, R0, L);
13110 else
13111 __ beq(AT, R0, (int)0);
13112 break;
13113 case 0x05: //below
13114 __ slt(AT, op1, op2);
13115 if(&L)
13116 __ bne(AT, R0, L);
13117 else
13118 __ bne(AT, R0, (int)0);
13119 break;
13120 case 0x06: //below_equal
13121 __ slt(AT, op2, op1);
13122 if(&L)
13123 __ beq(AT, R0, L);
13124 else
13125 __ beq(AT, R0, (int)0);
13126 break;
13127 default:
13128 Unimplemented();
13129 }
13130 __ nop();
13131 %}
13132 ins_pipe( pipe_jump );
13133 ins_pc_relative(1);
13134 %}
13137 instruct jmpLoopEnd_reg_imm16_sub(cmpOp cop, mRegI src1, immI16_sub src2, label labl) %{
13138 match(CountedLoopEnd cop (CmpI src1 src2));
13139 effect(USE labl);
13141 ins_cost(250);
13142 format %{ "J$cop $src1, $src2, $labl\t# Loop end @ jmpLoopEnd_reg_imm16_sub" %}
13143 ins_encode %{
13144 Register op1 = $src1$$Register;
13145 int op2 = $src2$$constant;
13146 Label &L = *($labl$$label);
13147 int flag = $cop$$cmpcode;
13149 __ addiu32(AT, op1, -1 * op2);
13151 switch(flag)
13152 {
13153 case 0x01: //equal
13154 if (&L)
13155 __ beq(AT, R0, L);
13156 else
13157 __ beq(AT, R0, (int)0);
13158 break;
13159 case 0x02: //not_equal
13160 if (&L)
13161 __ bne(AT, R0, L);
13162 else
13163 __ bne(AT, R0, (int)0);
13164 break;
13165 case 0x03: //above
13166 if(&L)
13167 __ bgtz(AT, L);
13168 else
13169 __ bgtz(AT, (int)0);
13170 break;
13171 case 0x04: //above_equal
13172 if(&L)
13173 __ bgez(AT, L);
13174 else
13175 __ bgez(AT,(int)0);
13176 break;
13177 case 0x05: //below
13178 if(&L)
13179 __ bltz(AT, L);
13180 else
13181 __ bltz(AT, (int)0);
13182 break;
13183 case 0x06: //below_equal
13184 if(&L)
13185 __ blez(AT, L);
13186 else
13187 __ blez(AT, (int)0);
13188 break;
13189 default:
13190 Unimplemented();
13191 }
13192 __ nop();
13193 %}
13194 ins_pipe( pipe_jump );
13195 ins_pc_relative(1);
13196 %}
13199 /*
13200 // Jump Direct Conditional - Label defines a relative address from Jcc+1
13201 instruct jmpLoopEndU(cmpOpU cop, eFlagsRegU cmp, label labl) %{
13202 match(CountedLoopEnd cop cmp);
13203 effect(USE labl);
13205 ins_cost(300);
13206 format %{ "J$cop,u $labl\t# Loop end" %}
13207 size(6);
13208 opcode(0x0F, 0x80);
13209 ins_encode( Jcc( cop, labl) );
13210 ins_pipe( pipe_jump );
13211 ins_pc_relative(1);
13212 %}
13214 instruct jmpLoopEndUCF(cmpOpUCF cop, eFlagsRegUCF cmp, label labl) %{
13215 match(CountedLoopEnd cop cmp);
13216 effect(USE labl);
13218 ins_cost(200);
13219 format %{ "J$cop,u $labl\t# Loop end" %}
13220 opcode(0x0F, 0x80);
13221 ins_encode( Jcc( cop, labl) );
13222 ins_pipe( pipe_jump );
13223 ins_pc_relative(1);
13224 %}
13225 */
13227 // This match pattern is created for StoreIConditional since I cannot match IfNode without a RegFlags! fujie 2012/07/17
13228 instruct jmpCon_flags(cmpOp cop, FlagsReg cr, label labl) %{
13229 match(If cop cr);
13230 effect(USE labl);
13232 ins_cost(300);
13233 format %{ "J$cop $labl #mips uses AT as eflag @jmpCon_flags" %}
13235 ins_encode %{
13236 Label &L = *($labl$$label);
13237 switch($cop$$cmpcode)
13238 {
13239 case 0x01: //equal
13240 if (&L)
13241 __ bne(AT, R0, L);
13242 else
13243 __ bne(AT, R0, (int)0);
13244 break;
13245 case 0x02: //not equal
13246 if (&L)
13247 __ beq(AT, R0, L);
13248 else
13249 __ beq(AT, R0, (int)0);
13250 break;
13251 default:
13252 Unimplemented();
13253 }
13254 __ nop();
13255 %}
13257 ins_pipe( pipe_jump );
13258 ins_pc_relative(1);
13259 %}
13262 // ============================================================================
13263 // The 2nd slow-half of a subtype check. Scan the subklass's 2ndary superklass
13264 // array for an instance of the superklass. Set a hidden internal cache on a
13265 // hit (cache is checked with exposed code in gen_subtype_check()). Return
13266 // NZ for a miss or zero for a hit. The encoding ALSO sets flags.
13267 instruct partialSubtypeCheck( mRegP result, no_T8_mRegP sub, no_T8_mRegP super, mT8RegI tmp ) %{
13268 match(Set result (PartialSubtypeCheck sub super));
13269 effect(KILL tmp);
13270 ins_cost(1100); // slightly larger than the next version
13271 format %{ "partialSubtypeCheck result=$result, sub=$sub, super=$super, tmp=$tmp " %}
13273 ins_encode( enc_PartialSubtypeCheck(result, sub, super, tmp) );
13274 ins_pipe( pipe_slow );
13275 %}
13278 // Conditional-store of an int value.
13279 // ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG on Intel.
13280 instruct storeIConditional( memory mem, mRegI oldval, mRegI newval, FlagsReg cr ) %{
13281 match(Set cr (StoreIConditional mem (Binary oldval newval)));
13282 // effect(KILL oldval);
13283 format %{ "CMPXCHG $newval, $mem, $oldval \t# @storeIConditional" %}
13285 ins_encode %{
13286 Register oldval = $oldval$$Register;
13287 Register newval = $newval$$Register;
13288 Address addr(as_Register($mem$$base), $mem$$disp);
13289 Label again, failure;
13291 // int base = $mem$$base;
13292 int index = $mem$$index;
13293 int scale = $mem$$scale;
13294 int disp = $mem$$disp;
13296 guarantee(Assembler::is_simm16(disp), "");
13298 if( index != 0 ) {
13299 __ stop("in storeIConditional: index != 0");
13300 } else {
13301 __ bind(again);
13302 if(!Use3A2000) __ sync();
13303 __ ll(AT, addr);
13304 __ bne(AT, oldval, failure);
13305 __ delayed()->addu(AT, R0, R0);
13307 __ addu(AT, newval, R0);
13308 __ sc(AT, addr);
13309 __ beq(AT, R0, again);
13310 __ delayed()->addiu(AT, R0, 0xFF);
13311 __ bind(failure);
13312 __ sync();
13313 }
13314 %}
13316 ins_pipe( long_memory_op );
13317 %}
13319 // Conditional-store of a long value.
13320 // ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG.
13321 instruct storeLConditional(memory mem, t2RegL oldval, mRegL newval, FlagsReg cr )
13322 %{
13323 match(Set cr (StoreLConditional mem (Binary oldval newval)));
13324 effect(KILL oldval);
13326 format %{ "cmpxchg $mem, $newval\t# If $oldval == $mem then store $newval into $mem" %}
13327 ins_encode%{
13328 Register oldval = $oldval$$Register;
13329 Register newval = $newval$$Register;
13330 Address addr((Register)$mem$$base, $mem$$disp);
13332 int index = $mem$$index;
13333 int scale = $mem$$scale;
13334 int disp = $mem$$disp;
13336 guarantee(Assembler::is_simm16(disp), "");
13338 if( index != 0 ) {
13339 __ stop("in storeIConditional: index != 0");
13340 } else {
13341 __ cmpxchg(newval, addr, oldval);
13342 }
13343 %}
13344 ins_pipe( long_memory_op );
13345 %}
13348 instruct compareAndSwapI( mRegI res, mRegP mem_ptr, mS2RegI oldval, mRegI newval) %{
13349 match(Set res (CompareAndSwapI mem_ptr (Binary oldval newval)));
13350 effect(KILL oldval);
13351 // match(CompareAndSwapI mem_ptr (Binary oldval newval));
13352 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapI\n\t"
13353 "MOV $res, 1 @ compareAndSwapI\n\t"
13354 "BNE AT, R0 @ compareAndSwapI\n\t"
13355 "MOV $res, 0 @ compareAndSwapI\n"
13356 "L:" %}
13357 ins_encode %{
13358 Register newval = $newval$$Register;
13359 Register oldval = $oldval$$Register;
13360 Register res = $res$$Register;
13361 Address addr($mem_ptr$$Register, 0);
13362 Label L;
13364 __ cmpxchg32(newval, addr, oldval);
13365 __ move(res, AT);
13366 %}
13367 ins_pipe( long_memory_op );
13368 %}
13370 //FIXME:
13371 instruct compareAndSwapP( mRegI res, mRegP mem_ptr, s2_RegP oldval, mRegP newval) %{
13372 match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval)));
13373 effect(KILL oldval);
13374 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapP\n\t"
13375 "MOV $res, AT @ compareAndSwapP\n\t"
13376 "L:" %}
13377 ins_encode %{
13378 Register newval = $newval$$Register;
13379 Register oldval = $oldval$$Register;
13380 Register res = $res$$Register;
13381 Address addr($mem_ptr$$Register, 0);
13382 Label L;
13384 __ cmpxchg(newval, addr, oldval);
13385 __ move(res, AT);
13386 %}
13387 ins_pipe( long_memory_op );
13388 %}
13390 instruct compareAndSwapN( mRegI res, mRegP mem_ptr, t2_RegN oldval, mRegN newval) %{
13391 match(Set res (CompareAndSwapN mem_ptr (Binary oldval newval)));
13392 effect(KILL oldval);
13393 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapN\n\t"
13394 "MOV $res, AT @ compareAndSwapN\n\t"
13395 "L:" %}
13396 ins_encode %{
13397 Register newval = $newval$$Register;
13398 Register oldval = $oldval$$Register;
13399 Register res = $res$$Register;
13400 Address addr($mem_ptr$$Register, 0);
13401 Label L;
13403 /* 2013/7/19 Jin: cmpxchg32 is implemented with ll/sc, which will do sign extension.
13404 * Thus, we should extend oldval's sign for correct comparision.
13405 */
13406 __ sll(oldval, oldval, 0);
13408 __ cmpxchg32(newval, addr, oldval);
13409 __ move(res, AT);
13410 %}
13411 ins_pipe( long_memory_op );
13412 %}
13414 //----------Max and Min--------------------------------------------------------
13415 // Min Instructions
13416 ////
13417 // *** Min and Max using the conditional move are slower than the
13418 // *** branch version on a Pentium III.
13419 // // Conditional move for min
13420 //instruct cmovI_reg_lt( eRegI op2, eRegI op1, eFlagsReg cr ) %{
13421 // effect( USE_DEF op2, USE op1, USE cr );
13422 // format %{ "CMOVlt $op2,$op1\t! min" %}
13423 // opcode(0x4C,0x0F);
13424 // ins_encode( OpcS, OpcP, RegReg( op2, op1 ) );
13425 // ins_pipe( pipe_cmov_reg );
13426 //%}
13427 //
13428 //// Min Register with Register (P6 version)
13429 //instruct minI_eReg_p6( eRegI op1, eRegI op2 ) %{
13430 // predicate(VM_Version::supports_cmov() );
13431 // match(Set op2 (MinI op1 op2));
13432 // ins_cost(200);
13433 // expand %{
13434 // eFlagsReg cr;
13435 // compI_eReg(cr,op1,op2);
13436 // cmovI_reg_lt(op2,op1,cr);
13437 // %}
13438 //%}
13440 // Min Register with Register (generic version)
13441 instruct minI_Reg_Reg(mRegI dst, mRegI src) %{
13442 match(Set dst (MinI dst src));
13443 //effect(KILL flags);
13444 ins_cost(80);
13446 format %{ "MIN $dst, $src @minI_Reg_Reg" %}
13447 ins_encode %{
13448 Register dst = $dst$$Register;
13449 Register src = $src$$Register;
13451 __ slt(AT, src, dst);
13452 __ movn(dst, src, AT);
13454 %}
13456 ins_pipe( pipe_slow );
13457 %}
13459 // Max Register with Register
13460 // *** Min and Max using the conditional move are slower than the
13461 // *** branch version on a Pentium III.
13462 // // Conditional move for max
13463 //instruct cmovI_reg_gt( eRegI op2, eRegI op1, eFlagsReg cr ) %{
13464 // effect( USE_DEF op2, USE op1, USE cr );
13465 // format %{ "CMOVgt $op2,$op1\t! max" %}
13466 // opcode(0x4F,0x0F);
13467 // ins_encode( OpcS, OpcP, RegReg( op2, op1 ) );
13468 // ins_pipe( pipe_cmov_reg );
13469 //%}
13470 //
13471 // // Max Register with Register (P6 version)
13472 //instruct maxI_eReg_p6( eRegI op1, eRegI op2 ) %{
13473 // predicate(VM_Version::supports_cmov() );
13474 // match(Set op2 (MaxI op1 op2));
13475 // ins_cost(200);
13476 // expand %{
13477 // eFlagsReg cr;
13478 // compI_eReg(cr,op1,op2);
13479 // cmovI_reg_gt(op2,op1,cr);
13480 // %}
13481 //%}
13483 // Max Register with Register (generic version)
13484 instruct maxI_Reg_Reg(mRegI dst, mRegI src) %{
13485 match(Set dst (MaxI dst src));
13486 ins_cost(80);
13488 format %{ "MAX $dst, $src @maxI_Reg_Reg" %}
13490 ins_encode %{
13491 Register dst = $dst$$Register;
13492 Register src = $src$$Register;
13494 __ slt(AT, dst, src);
13495 __ movn(dst, src, AT);
13497 %}
13499 ins_pipe( pipe_slow );
13500 %}
13502 instruct maxI_Reg_zero(mRegI dst, immI0 zero) %{
13503 match(Set dst (MaxI dst zero));
13504 ins_cost(50);
13506 format %{ "MAX $dst, 0 @maxI_Reg_zero" %}
13508 ins_encode %{
13509 Register dst = $dst$$Register;
13511 __ slt(AT, dst, R0);
13512 __ movn(dst, R0, AT);
13514 %}
13516 ins_pipe( pipe_slow );
13517 %}
13519 instruct zerox_long_reg_reg(mRegL dst, mRegL src, immL_32bits mask)
13520 %{
13521 match(Set dst (AndL src mask));
13523 format %{ "movl $dst, $src\t# zero-extend long @ zerox_long_reg_reg" %}
13524 ins_encode %{
13525 Register dst = $dst$$Register;
13526 Register src = $src$$Register;
13528 __ dext(dst, src, 0, 32);
13529 %}
13530 ins_pipe(ialu_regI_regI);
13531 %}
13533 instruct combine_i2l(mRegL dst, mRegI src1, immL_32bits mask, mRegI src2, immI_32 shift32)
13534 %{
13535 match(Set dst (OrL (AndL (ConvI2L src1) mask) (LShiftL (ConvI2L src2) shift32)));
13537 format %{ "combine_i2l $dst, $src2(H), $src1(L) @ combine_i2l" %}
13538 ins_encode %{
13539 Register dst = $dst$$Register;
13540 Register src1 = $src1$$Register;
13541 Register src2 = $src2$$Register;
13543 if (src1 == dst) {
13544 __ dinsu(dst, src2, 32, 32);
13545 } else if (src2 == dst) {
13546 __ dsll32(dst, dst, 0);
13547 __ dins(dst, src1, 0, 32);
13548 } else {
13549 __ dext(dst, src1, 0, 32);
13550 __ dinsu(dst, src2, 32, 32);
13551 }
13552 %}
13553 ins_pipe(ialu_regI_regI);
13554 %}
13556 // Zero-extend convert int to long
13557 instruct convI2L_reg_reg_zex(mRegL dst, mRegI src, immL_32bits mask)
13558 %{
13559 match(Set dst (AndL (ConvI2L src) mask));
13561 format %{ "movl $dst, $src\t# i2l zero-extend @ convI2L_reg_reg_zex" %}
13562 ins_encode %{
13563 Register dst = $dst$$Register;
13564 Register src = $src$$Register;
13566 __ dext(dst, src, 0, 32);
13567 %}
13568 ins_pipe(ialu_regI_regI);
13569 %}
13571 instruct convL2I2L_reg_reg_zex(mRegL dst, mRegL src, immL_32bits mask)
13572 %{
13573 match(Set dst (AndL (ConvI2L (ConvL2I src)) mask));
13575 format %{ "movl $dst, $src\t# i2l zero-extend @ convL2I2L_reg_reg_zex" %}
13576 ins_encode %{
13577 Register dst = $dst$$Register;
13578 Register src = $src$$Register;
13580 __ dext(dst, src, 0, 32);
13581 %}
13582 ins_pipe(ialu_regI_regI);
13583 %}
13585 // Match loading integer and casting it to unsigned int in long register.
13586 // LoadI + ConvI2L + AndL 0xffffffff.
13587 instruct loadUI2L_rmask(mRegL dst, memory mem, immL_32bits mask) %{
13588 match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
13590 format %{ "lwu $dst, $mem \t// zero-extend to long @ loadUI2L_rmask" %}
13591 ins_encode (load_N_enc(dst, mem));
13592 ins_pipe(ialu_loadI);
13593 %}
13595 instruct loadUI2L_lmask(mRegL dst, memory mem, immL_32bits mask) %{
13596 match(Set dst (AndL mask (ConvI2L (LoadI mem))));
13598 format %{ "lwu $dst, $mem \t// zero-extend to long @ loadUI2L_lmask" %}
13599 ins_encode (load_N_enc(dst, mem));
13600 ins_pipe(ialu_loadI);
13601 %}
13604 // ============================================================================
13605 // Safepoint Instruction
13606 instruct safePoint_poll_reg(mRegP poll) %{
13607 match(SafePoint poll);
13608 predicate(false);
13609 effect(USE poll);
13611 ins_cost(125);
13612 format %{ "Safepoint @ [$poll] : poll for GC @ safePoint_poll_reg" %}
13614 ins_encode %{
13615 Register poll_reg = $poll$$Register;
13617 __ block_comment("Safepoint:");
13618 __ relocate(relocInfo::poll_type);
13619 __ lw(AT, poll_reg, 0);
13620 %}
13622 ins_pipe( ialu_storeI );
13623 %}
13625 instruct safePoint_poll() %{
13626 match(SafePoint);
13628 ins_cost(105);
13629 format %{ "poll for GC @ safePoint_poll" %}
13631 ins_encode %{
13632 __ block_comment("Safepoint:");
13633 __ set64(T9, (long)os::get_polling_page());
13634 __ relocate(relocInfo::poll_type);
13635 __ lw(AT, T9, 0);
13636 %}
13638 ins_pipe( ialu_storeI );
13639 %}
13641 //----------Arithmetic Conversion Instructions---------------------------------
13643 instruct roundFloat_nop(regF dst)
13644 %{
13645 match(Set dst (RoundFloat dst));
13647 ins_cost(0);
13648 ins_encode();
13649 ins_pipe(empty);
13650 %}
13652 instruct roundDouble_nop(regD dst)
13653 %{
13654 match(Set dst (RoundDouble dst));
13656 ins_cost(0);
13657 ins_encode();
13658 ins_pipe(empty);
13659 %}
13661 //---------- Zeros Count Instructions ------------------------------------------
13662 // CountLeadingZerosINode CountTrailingZerosINode
13663 instruct countLeadingZerosI(mRegI dst, mRegI src) %{
13664 predicate(UseCountLeadingZerosInstruction);
13665 match(Set dst (CountLeadingZerosI src));
13667 format %{ "clz $dst, $src\t# count leading zeros (int)" %}
13668 ins_encode %{
13669 __ clz($dst$$Register, $src$$Register);
13670 %}
13671 ins_pipe( ialu_regL_regL );
13672 %}
13674 instruct countLeadingZerosL(mRegI dst, mRegL src) %{
13675 predicate(UseCountLeadingZerosInstruction);
13676 match(Set dst (CountLeadingZerosL src));
13678 format %{ "dclz $dst, $src\t# count leading zeros (long)" %}
13679 ins_encode %{
13680 __ dclz($dst$$Register, $src$$Register);
13681 %}
13682 ins_pipe( ialu_regL_regL );
13683 %}
13685 instruct countTrailingZerosI(mRegI dst, mRegI src) %{
13686 predicate(UseCountTrailingZerosInstruction);
13687 match(Set dst (CountTrailingZerosI src));
13689 format %{ "ctz $dst, $src\t# count trailing zeros (int)" %}
13690 ins_encode %{
13691 // ctz and dctz is gs instructions.
13692 __ ctz($dst$$Register, $src$$Register);
13693 %}
13694 ins_pipe( ialu_regL_regL );
13695 %}
13697 instruct countTrailingZerosL(mRegI dst, mRegL src) %{
13698 predicate(UseCountTrailingZerosInstruction);
13699 match(Set dst (CountTrailingZerosL src));
13701 format %{ "dcto $dst, $src\t# count trailing zeros (long)" %}
13702 ins_encode %{
13703 __ dctz($dst$$Register, $src$$Register);
13704 %}
13705 ins_pipe( ialu_regL_regL );
13706 %}
13708 // ====================VECTOR INSTRUCTIONS=====================================
13710 // Load vectors (8 bytes long)
13711 instruct loadV8(vecD dst, memory mem) %{
13712 predicate(n->as_LoadVector()->memory_size() == 8);
13713 match(Set dst (LoadVector mem));
13714 ins_cost(125);
13715 format %{ "load $dst, $mem\t! load vector (8 bytes)" %}
13716 ins_encode(load_D_enc(dst, mem));
13717 ins_pipe( fpu_loadF );
13718 %}
13720 // Store vectors (8 bytes long)
13721 instruct storeV8(memory mem, vecD src) %{
13722 predicate(n->as_StoreVector()->memory_size() == 8);
13723 match(Set mem (StoreVector mem src));
13724 ins_cost(145);
13725 format %{ "store $mem, $src\t! store vector (8 bytes)" %}
13726 ins_encode(store_D_reg_enc(mem, src));
13727 ins_pipe( fpu_storeF );
13728 %}
13730 instruct Repl8B_DSP(vecD dst, mRegI src) %{
13731 predicate(n->as_Vector()->length() == 8 && Use3A2000);
13732 match(Set dst (ReplicateB src));
13733 ins_cost(100);
13734 format %{ "replv_ob AT, $src\n\t"
13735 "dmtc1 AT, $dst\t! replicate8B" %}
13736 ins_encode %{
13737 __ replv_ob(AT, $src$$Register);
13738 __ dmtc1(AT, $dst$$FloatRegister);
13739 %}
13740 ins_pipe( pipe_mtc1 );
13741 %}
13743 instruct Repl8B(vecD dst, mRegI src) %{
13744 predicate(n->as_Vector()->length() == 8);
13745 match(Set dst (ReplicateB src));
13746 ins_cost(140);
13747 format %{ "move AT, $src\n\t"
13748 "dins AT, AT, 8, 8\n\t"
13749 "dins AT, AT, 16, 16\n\t"
13750 "dinsu AT, AT, 32, 32\n\t"
13751 "dmtc1 AT, $dst\t! replicate8B" %}
13752 ins_encode %{
13753 __ move(AT, $src$$Register);
13754 __ dins(AT, AT, 8, 8);
13755 __ dins(AT, AT, 16, 16);
13756 __ dinsu(AT, AT, 32, 32);
13757 __ dmtc1(AT, $dst$$FloatRegister);
13758 %}
13759 ins_pipe( pipe_mtc1 );
13760 %}
13762 instruct Repl8B_imm_DSP(vecD dst, immI con) %{
13763 predicate(n->as_Vector()->length() == 8 && Use3A2000);
13764 match(Set dst (ReplicateB con));
13765 ins_cost(110);
13766 format %{ "repl_ob AT, [$con]\n\t"
13767 "dmtc1 AT, $dst,0x00\t! replicate8B($con)" %}
13768 ins_encode %{
13769 int val = $con$$constant;
13770 __ repl_ob(AT, val);
13771 __ dmtc1(AT, $dst$$FloatRegister);
13772 %}
13773 ins_pipe( pipe_mtc1 );
13774 %}
13776 instruct Repl8B_imm(vecD dst, immI con) %{
13777 predicate(n->as_Vector()->length() == 8);
13778 match(Set dst (ReplicateB con));
13779 ins_cost(150);
13780 format %{ "move AT, [$con]\n\t"
13781 "dins AT, AT, 8, 8\n\t"
13782 "dins AT, AT, 16, 16\n\t"
13783 "dinsu AT, AT, 32, 32\n\t"
13784 "dmtc1 AT, $dst,0x00\t! replicate8B($con)" %}
13785 ins_encode %{
13786 __ move(AT, $con$$constant);
13787 __ dins(AT, AT, 8, 8);
13788 __ dins(AT, AT, 16, 16);
13789 __ dinsu(AT, AT, 32, 32);
13790 __ dmtc1(AT, $dst$$FloatRegister);
13791 %}
13792 ins_pipe( pipe_mtc1 );
13793 %}
13795 instruct Repl8B_zero(vecD dst, immI0 zero) %{
13796 predicate(n->as_Vector()->length() == 8);
13797 match(Set dst (ReplicateB zero));
13798 ins_cost(90);
13799 format %{ "dmtc1 R0, $dst\t! replicate8B zero" %}
13800 ins_encode %{
13801 __ dmtc1(R0, $dst$$FloatRegister);
13802 %}
13803 ins_pipe( pipe_mtc1 );
13804 %}
13806 instruct Repl8B_M1(vecD dst, immI_M1 M1) %{
13807 predicate(n->as_Vector()->length() == 8);
13808 match(Set dst (ReplicateB M1));
13809 ins_cost(80);
13810 format %{ "dmtc1 -1, $dst\t! replicate8B -1" %}
13811 ins_encode %{
13812 __ nor(AT, R0, R0);
13813 __ dmtc1(AT, $dst$$FloatRegister);
13814 %}
13815 ins_pipe( pipe_mtc1 );
13816 %}
13818 instruct Repl4S_DSP(vecD dst, mRegI src) %{
13819 predicate(n->as_Vector()->length() == 4 && Use3A2000);
13820 match(Set dst (ReplicateS src));
13821 ins_cost(100);
13822 format %{ "replv_qh AT, $src\n\t"
13823 "dmtc1 AT, $dst\t! replicate4S" %}
13824 ins_encode %{
13825 __ replv_qh(AT, $src$$Register);
13826 __ dmtc1(AT, $dst$$FloatRegister);
13827 %}
13828 ins_pipe( pipe_mtc1 );
13829 %}
13831 instruct Repl4S(vecD dst, mRegI src) %{
13832 predicate(n->as_Vector()->length() == 4);
13833 match(Set dst (ReplicateS src));
13834 ins_cost(120);
13835 format %{ "move AT, $src \n\t"
13836 "dins AT, AT, 16, 16\n\t"
13837 "dinsu AT, AT, 32, 32\n\t"
13838 "dmtc1 AT, $dst\t! replicate4S" %}
13839 ins_encode %{
13840 __ move(AT, $src$$Register);
13841 __ dins(AT, AT, 16, 16);
13842 __ dinsu(AT, AT, 32, 32);
13843 __ dmtc1(AT, $dst$$FloatRegister);
13844 %}
13845 ins_pipe( pipe_mtc1 );
13846 %}
13848 instruct Repl4S_imm_DSP(vecD dst, immI con) %{
13849 predicate(n->as_Vector()->length() == 4 && Use3A2000);
13850 match(Set dst (ReplicateS con));
13851 ins_cost(100);
13852 format %{ "replv_qh AT, [$con]\n\t"
13853 "dmtc1 AT, $dst\t! replicate4S($con)" %}
13854 ins_encode %{
13855 int val = $con$$constant;
13856 if ( Assembler::is_simm(val, 10)) {
13857 //repl_qh supports 10 bits immediate
13858 __ repl_qh(AT, val);
13859 } else {
13860 __ li32(AT, val);
13861 __ replv_qh(AT, AT);
13862 }
13863 __ dmtc1(AT, $dst$$FloatRegister);
13864 %}
13865 ins_pipe( pipe_mtc1 );
13866 %}
13868 instruct Repl4S_imm(vecD dst, immI con) %{
13869 predicate(n->as_Vector()->length() == 4);
13870 match(Set dst (ReplicateS con));
13871 ins_cost(110);
13872 format %{ "move AT, [$con]\n\t"
13873 "dins AT, AT, 16, 16\n\t"
13874 "dinsu AT, AT, 32, 32\n\t"
13875 "dmtc1 AT, $dst\t! replicate4S($con)" %}
13876 ins_encode %{
13877 __ move(AT, $con$$constant);
13878 __ dins(AT, AT, 16, 16);
13879 __ dinsu(AT, AT, 32, 32);
13880 __ dmtc1(AT, $dst$$FloatRegister);
13881 %}
13882 ins_pipe( pipe_mtc1 );
13883 %}
13885 instruct Repl4S_zero(vecD dst, immI0 zero) %{
13886 predicate(n->as_Vector()->length() == 4);
13887 match(Set dst (ReplicateS zero));
13888 format %{ "dmtc1 R0, $dst\t! replicate4S zero" %}
13889 ins_encode %{
13890 __ dmtc1(R0, $dst$$FloatRegister);
13891 %}
13892 ins_pipe( pipe_mtc1 );
13893 %}
13895 instruct Repl4S_M1(vecD dst, immI_M1 M1) %{
13896 predicate(n->as_Vector()->length() == 4);
13897 match(Set dst (ReplicateS M1));
13898 format %{ "dmtc1 -1, $dst\t! replicate4S -1" %}
13899 ins_encode %{
13900 __ nor(AT, R0, R0);
13901 __ dmtc1(AT, $dst$$FloatRegister);
13902 %}
13903 ins_pipe( pipe_mtc1 );
13904 %}
13906 // Replicate integer (4 byte) scalar to be vector
13907 instruct Repl2I(vecD dst, mRegI src) %{
13908 predicate(n->as_Vector()->length() == 2);
13909 match(Set dst (ReplicateI src));
13910 format %{ "dins AT, $src, 0, 32\n\t"
13911 "dinsu AT, $src, 32, 32\n\t"
13912 "dmtc1 AT, $dst\t! replicate2I" %}
13913 ins_encode %{
13914 __ dins(AT, $src$$Register, 0, 32);
13915 __ dinsu(AT, $src$$Register, 32, 32);
13916 __ dmtc1(AT, $dst$$FloatRegister);
13917 %}
13918 ins_pipe( pipe_mtc1 );
13919 %}
13921 // Replicate integer (4 byte) scalar immediate to be vector by loading from const table.
13922 instruct Repl2I_imm(vecD dst, immI con, mA7RegI tmp) %{
13923 predicate(n->as_Vector()->length() == 2);
13924 match(Set dst (ReplicateI con));
13925 effect(KILL tmp);
13926 format %{ "li32 AT, [$con], 32\n\t"
13927 "dinsu AT, AT\n\t"
13928 "dmtc1 AT, $dst\t! replicate2I($con)" %}
13929 ins_encode %{
13930 int val = $con$$constant;
13931 __ li32(AT, val);
13932 __ dinsu(AT, AT, 32, 32);
13933 __ dmtc1(AT, $dst$$FloatRegister);
13934 %}
13935 ins_pipe( pipe_mtc1 );
13936 %}
13938 // Replicate integer (4 byte) scalar zero to be vector
13939 instruct Repl2I_zero(vecD dst, immI0 zero) %{
13940 predicate(n->as_Vector()->length() == 2);
13941 match(Set dst (ReplicateI zero));
13942 format %{ "dmtc1 R0, $dst\t! replicate2I zero" %}
13943 ins_encode %{
13944 __ dmtc1(R0, $dst$$FloatRegister);
13945 %}
13946 ins_pipe( pipe_mtc1 );
13947 %}
13949 // Replicate integer (4 byte) scalar -1 to be vector
13950 instruct Repl2I_M1(vecD dst, immI_M1 M1) %{
13951 predicate(n->as_Vector()->length() == 2);
13952 match(Set dst (ReplicateI M1));
13953 format %{ "dmtc1 -1, $dst\t! replicate2I -1, use AT" %}
13954 ins_encode %{
13955 __ nor(AT, R0, R0);
13956 __ dmtc1(AT, $dst$$FloatRegister);
13957 %}
13958 ins_pipe( pipe_mtc1 );
13959 %}
13961 // Replicate float (4 byte) scalar to be vector
13962 instruct Repl2F(vecD dst, regF src) %{
13963 predicate(n->as_Vector()->length() == 2);
13964 match(Set dst (ReplicateF src));
13965 format %{ "cvt.ps $dst, $src, $src\t! replicate2F" %}
13966 ins_encode %{
13967 __ cvt_ps_s($dst$$FloatRegister, $src$$FloatRegister, $src$$FloatRegister);
13968 %}
13969 ins_pipe( pipe_slow );
13970 %}
13972 // Replicate float (4 byte) scalar zero to be vector
13973 instruct Repl2F_zero(vecD dst, immF0 zero) %{
13974 predicate(n->as_Vector()->length() == 2);
13975 match(Set dst (ReplicateF zero));
13976 format %{ "dmtc1 R0, $dst\t! replicate2F zero" %}
13977 ins_encode %{
13978 __ dmtc1(R0, $dst$$FloatRegister);
13979 %}
13980 ins_pipe( pipe_mtc1 );
13981 %}
13984 // ====================VECTOR ARITHMETIC=======================================
13986 // --------------------------------- ADD --------------------------------------
13988 // Floats vector add
13989 // kernel does not have emulation of PS instructions yet, so PS instructions is disabled.
13990 instruct vadd2F(vecD dst, vecD src) %{
13991 predicate(n->as_Vector()->length() == 2);
13992 match(Set dst (AddVF dst src));
13993 format %{ "add.ps $dst,$src\t! add packed2F" %}
13994 ins_encode %{
13995 __ add_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
13996 %}
13997 ins_pipe( pipe_slow );
13998 %}
14000 instruct vadd2F3(vecD dst, vecD src1, vecD src2) %{
14001 predicate(n->as_Vector()->length() == 2);
14002 match(Set dst (AddVF src1 src2));
14003 format %{ "add.ps $dst,$src1,$src2\t! add packed2F" %}
14004 ins_encode %{
14005 __ add_ps($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
14006 %}
14007 ins_pipe( fpu_regF_regF );
14008 %}
14010 // --------------------------------- SUB --------------------------------------
14012 // Floats vector sub
14013 instruct vsub2F(vecD dst, vecD src) %{
14014 predicate(n->as_Vector()->length() == 2);
14015 match(Set dst (SubVF dst src));
14016 format %{ "sub.ps $dst,$src\t! sub packed2F" %}
14017 ins_encode %{
14018 __ sub_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
14019 %}
14020 ins_pipe( fpu_regF_regF );
14021 %}
14023 // --------------------------------- MUL --------------------------------------
14025 // Floats vector mul
14026 instruct vmul2F(vecD dst, vecD src) %{
14027 predicate(n->as_Vector()->length() == 2);
14028 match(Set dst (MulVF dst src));
14029 format %{ "mul.ps $dst, $src\t! mul packed2F" %}
14030 ins_encode %{
14031 __ mul_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
14032 %}
14033 ins_pipe( fpu_regF_regF );
14034 %}
14036 instruct vmul2F3(vecD dst, vecD src1, vecD src2) %{
14037 predicate(n->as_Vector()->length() == 2);
14038 match(Set dst (MulVF src1 src2));
14039 format %{ "mul.ps $dst, $src1, $src2\t! mul packed2F" %}
14040 ins_encode %{
14041 __ mul_ps($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
14042 %}
14043 ins_pipe( fpu_regF_regF );
14044 %}
14046 // --------------------------------- DIV --------------------------------------
14047 // MIPS do not have div.ps
14049 // --------------------------------- MADD --------------------------------------
14050 // Floats vector madd
14051 //instruct vmadd2F(vecD dst, vecD src1, vecD src2, vecD src3) %{
14052 // predicate(n->as_Vector()->length() == 2);
14053 // match(Set dst (AddVF (MulVF src1 src2) src3));
14054 // ins_cost(50);
14055 // format %{ "madd.ps $dst, $src3, $src1, $src2\t! madd packed2F" %}
14056 // ins_encode %{
14057 // __ madd_ps($dst$$FloatRegister, $src3$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
14058 // %}
14059 // ins_pipe( fpu_regF_regF );
14060 //%}
14063 //----------PEEPHOLE RULES-----------------------------------------------------
14064 // These must follow all instruction definitions as they use the names
14065 // defined in the instructions definitions.
14066 //
14067 // peepmatch ( root_instr_name [preceeding_instruction]* );
14068 //
14069 // peepconstraint %{
14070 // (instruction_number.operand_name relational_op instruction_number.operand_name
14071 // [, ...] );
14072 // // instruction numbers are zero-based using left to right order in peepmatch
14073 //
14074 // peepreplace ( instr_name ( [instruction_number.operand_name]* ) );
14075 // // provide an instruction_number.operand_name for each operand that appears
14076 // // in the replacement instruction's match rule
14077 //
14078 // ---------VM FLAGS---------------------------------------------------------
14079 //
14080 // All peephole optimizations can be turned off using -XX:-OptoPeephole
14081 //
14082 // Each peephole rule is given an identifying number starting with zero and
14083 // increasing by one in the order seen by the parser. An individual peephole
14084 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
14085 // on the command-line.
14086 //
14087 // ---------CURRENT LIMITATIONS----------------------------------------------
14088 //
14089 // Only match adjacent instructions in same basic block
14090 // Only equality constraints
14091 // Only constraints between operands, not (0.dest_reg == EAX_enc)
14092 // Only one replacement instruction
14093 //
14094 // ---------EXAMPLE----------------------------------------------------------
14095 //
14096 // // pertinent parts of existing instructions in architecture description
14097 // instruct movI(eRegI dst, eRegI src) %{
14098 // match(Set dst (CopyI src));
14099 // %}
14100 //
14101 // instruct incI_eReg(eRegI dst, immI1 src, eFlagsReg cr) %{
14102 // match(Set dst (AddI dst src));
14103 // effect(KILL cr);
14104 // %}
14105 //
14106 // // Change (inc mov) to lea
14107 // peephole %{
14108 // // increment preceeded by register-register move
14109 // peepmatch ( incI_eReg movI );
14110 // // require that the destination register of the increment
14111 // // match the destination register of the move
14112 // peepconstraint ( 0.dst == 1.dst );
14113 // // construct a replacement instruction that sets
14114 // // the destination to ( move's source register + one )
14115 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
14116 // %}
14117 //
14118 // Implementation no longer uses movX instructions since
14119 // machine-independent system no longer uses CopyX nodes.
14120 //
14121 // peephole %{
14122 // peepmatch ( incI_eReg movI );
14123 // peepconstraint ( 0.dst == 1.dst );
14124 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
14125 // %}
14126 //
14127 // peephole %{
14128 // peepmatch ( decI_eReg movI );
14129 // peepconstraint ( 0.dst == 1.dst );
14130 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
14131 // %}
14132 //
14133 // peephole %{
14134 // peepmatch ( addI_eReg_imm movI );
14135 // peepconstraint ( 0.dst == 1.dst );
14136 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
14137 // %}
14138 //
14139 // peephole %{
14140 // peepmatch ( addP_eReg_imm movP );
14141 // peepconstraint ( 0.dst == 1.dst );
14142 // peepreplace ( leaP_eReg_immI( 0.dst 1.src 0.src ) );
14143 // %}
14145 // // Change load of spilled value to only a spill
14146 // instruct storeI(memory mem, eRegI src) %{
14147 // match(Set mem (StoreI mem src));
14148 // %}
14149 //
14150 // instruct loadI(eRegI dst, memory mem) %{
14151 // match(Set dst (LoadI mem));
14152 // %}
14153 //
14154 //peephole %{
14155 // peepmatch ( loadI storeI );
14156 // peepconstraint ( 1.src == 0.dst, 1.mem == 0.mem );
14157 // peepreplace ( storeI( 1.mem 1.mem 1.src ) );
14158 //%}
14160 //----------SMARTSPILL RULES---------------------------------------------------
14161 // These must follow all instruction definitions as they use the names
14162 // defined in the instructions definitions.