Sun, 05 Mar 2017 13:20:40 -0500
Adjust NativeCall for patchable_set48 on MIPS CPUs.
1 //
2 // Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
3 // Copyright (c) 2015, 2016, Loongson Technology. All rights reserved.
4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 //
6 // This code is free software; you can redistribute it and/or modify it
7 // under the terms of the GNU General Public License version 2 only, as
8 // published by the Free Software Foundation.
9 //
10 // This code is distributed in the hope that it will be useful, but WITHOUT
11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 // version 2 for more details (a copy is included in the LICENSE file that
14 // accompanied this code).
15 //
16 // You should have received a copy of the GNU General Public License version
17 // 2 along with this work; if not, write to the Free Software Foundation,
18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 //
20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 // or visit www.oracle.com if you need additional information or have any
22 // questions.
23 //
24 //
26 // GodSon3 Architecture Description File
28 //----------REGISTER DEFINITION BLOCK------------------------------------------
29 // This information is used by the matcher and the register allocator to
30 // describe individual registers and classes of registers within the target
31 // archtecture.
33 // format:
34 // reg_def name (call convention, c-call convention, ideal type, encoding);
35 // call convention :
36 // NS = No-Save
37 // SOC = Save-On-Call
38 // SOE = Save-On-Entry
39 // AS = Always-Save
40 // ideal type :
41 // see opto/opcodes.hpp for more info
42 // reg_class name (reg, ...);
43 // alloc_class name (reg, ...);
44 register %{
46 // General Registers
47 // Integer Registers
48 reg_def R0 ( NS, NS, Op_RegI, 0, VMRegImpl::Bad());
49 reg_def AT ( NS, NS, Op_RegI, 1, AT->as_VMReg());
50 reg_def AT_H ( NS, NS, Op_RegI, 1, AT->as_VMReg()->next());
51 reg_def V0 (SOC, SOC, Op_RegI, 2, V0->as_VMReg());
52 reg_def V0_H (SOC, SOC, Op_RegI, 2, V0->as_VMReg()->next());
53 reg_def V1 (SOC, SOC, Op_RegI, 3, V1->as_VMReg());
54 reg_def V1_H (SOC, SOC, Op_RegI, 3, V1->as_VMReg()->next());
55 reg_def A0 (SOC, SOC, Op_RegI, 4, A0->as_VMReg());
56 reg_def A0_H (SOC, SOC, Op_RegI, 4, A0->as_VMReg()->next());
57 reg_def A1 (SOC, SOC, Op_RegI, 5, A1->as_VMReg());
58 reg_def A1_H (SOC, SOC, Op_RegI, 5, A1->as_VMReg()->next());
59 reg_def A2 (SOC, SOC, Op_RegI, 6, A2->as_VMReg());
60 reg_def A2_H (SOC, SOC, Op_RegI, 6, A2->as_VMReg()->next());
61 reg_def A3 (SOC, SOC, Op_RegI, 7, A3->as_VMReg());
62 reg_def A3_H (SOC, SOC, Op_RegI, 7, A3->as_VMReg()->next());
63 reg_def A4 (SOC, SOC, Op_RegI, 8, A4->as_VMReg());
64 reg_def A4_H (SOC, SOC, Op_RegI, 8, A4->as_VMReg()->next());
65 reg_def A5 (SOC, SOC, Op_RegI, 9, A5->as_VMReg());
66 reg_def A5_H (SOC, SOC, Op_RegI, 9, A5->as_VMReg()->next());
67 reg_def A6 (SOC, SOC, Op_RegI, 10, A6->as_VMReg());
68 reg_def A6_H (SOC, SOC, Op_RegI, 10, A6->as_VMReg()->next());
69 reg_def A7 (SOC, SOC, Op_RegI, 11, A7->as_VMReg());
70 reg_def A7_H (SOC, SOC, Op_RegI, 11, A7->as_VMReg()->next());
71 reg_def T0 (SOC, SOC, Op_RegI, 12, T0->as_VMReg());
72 reg_def T0_H (SOC, SOC, Op_RegI, 12, T0->as_VMReg()->next());
73 reg_def T1 (SOC, SOC, Op_RegI, 13, T1->as_VMReg());
74 reg_def T1_H (SOC, SOC, Op_RegI, 13, T1->as_VMReg()->next());
75 reg_def T2 (SOC, SOC, Op_RegI, 14, T2->as_VMReg());
76 reg_def T2_H (SOC, SOC, Op_RegI, 14, T2->as_VMReg()->next());
77 reg_def T3 (SOC, SOC, Op_RegI, 15, T3->as_VMReg());
78 reg_def T3_H (SOC, SOC, Op_RegI, 15, T3->as_VMReg()->next());
79 reg_def S0 (SOC, SOE, Op_RegI, 16, S0->as_VMReg());
80 reg_def S0_H (SOC, SOE, Op_RegI, 16, S0->as_VMReg()->next());
81 reg_def S1 (SOC, SOE, Op_RegI, 17, S1->as_VMReg());
82 reg_def S1_H (SOC, SOE, Op_RegI, 17, S1->as_VMReg()->next());
83 reg_def S2 (SOC, SOE, Op_RegI, 18, S2->as_VMReg());
84 reg_def S2_H (SOC, SOE, Op_RegI, 18, S2->as_VMReg()->next());
85 reg_def S3 (SOC, SOE, Op_RegI, 19, S3->as_VMReg());
86 reg_def S3_H (SOC, SOE, Op_RegI, 19, S3->as_VMReg()->next());
87 reg_def S4 (SOC, SOE, Op_RegI, 20, S4->as_VMReg());
88 reg_def S4_H (SOC, SOE, Op_RegI, 20, S4->as_VMReg()->next());
89 reg_def S5 (SOC, SOE, Op_RegI, 21, S5->as_VMReg());
90 reg_def S5_H (SOC, SOE, Op_RegI, 21, S5->as_VMReg()->next());
91 reg_def S6 (SOC, SOE, Op_RegI, 22, S6->as_VMReg());
92 reg_def S6_H (SOC, SOE, Op_RegI, 22, S6->as_VMReg()->next());
93 reg_def S7 (SOC, SOE, Op_RegI, 23, S7->as_VMReg());
94 reg_def S7_H (SOC, SOE, Op_RegI, 23, S7->as_VMReg()->next());
95 reg_def T8 (SOC, SOC, Op_RegI, 24, T8->as_VMReg());
96 reg_def T8_H (SOC, SOC, Op_RegI, 24, T8->as_VMReg()->next());
97 reg_def T9 (SOC, SOC, Op_RegI, 25, T9->as_VMReg());
98 reg_def T9_H (SOC, SOC, Op_RegI, 25, T9->as_VMReg()->next());
100 // Special Registers
101 reg_def K0 ( NS, NS, Op_RegI, 26, K0->as_VMReg());
102 reg_def K1 ( NS, NS, Op_RegI, 27, K1->as_VMReg());
103 reg_def GP ( NS, NS, Op_RegI, 28, GP->as_VMReg());
104 reg_def GP_H ( NS, NS, Op_RegI, 28, GP->as_VMReg()->next());
105 reg_def SP ( NS, NS, Op_RegI, 29, SP->as_VMReg());
106 reg_def SP_H ( NS, NS, Op_RegI, 29, SP->as_VMReg()->next());
107 reg_def FP ( NS, NS, Op_RegI, 30, FP->as_VMReg());
108 reg_def FP_H ( NS, NS, Op_RegI, 30, FP->as_VMReg()->next());
109 reg_def RA ( NS, NS, Op_RegI, 31, RA->as_VMReg());
110 reg_def RA_H ( NS, NS, Op_RegI, 31, RA->as_VMReg()->next());
112 // Floating registers.
113 reg_def F0 ( SOC, SOC, Op_RegF, 0, F0->as_VMReg());
114 reg_def F0_H ( SOC, SOC, Op_RegF, 0, F0->as_VMReg()->next());
115 reg_def F1 ( SOC, SOC, Op_RegF, 1, F1->as_VMReg());
116 reg_def F1_H ( SOC, SOC, Op_RegF, 1, F1->as_VMReg()->next());
117 reg_def F2 ( SOC, SOC, Op_RegF, 2, F2->as_VMReg());
118 reg_def F2_H ( SOC, SOC, Op_RegF, 2, F2->as_VMReg()->next());
119 reg_def F3 ( SOC, SOC, Op_RegF, 3, F3->as_VMReg());
120 reg_def F3_H ( SOC, SOC, Op_RegF, 3, F3->as_VMReg()->next());
121 reg_def F4 ( SOC, SOC, Op_RegF, 4, F4->as_VMReg());
122 reg_def F4_H ( SOC, SOC, Op_RegF, 4, F4->as_VMReg()->next());
123 reg_def F5 ( SOC, SOC, Op_RegF, 5, F5->as_VMReg());
124 reg_def F5_H ( SOC, SOC, Op_RegF, 5, F5->as_VMReg()->next());
125 reg_def F6 ( SOC, SOC, Op_RegF, 6, F6->as_VMReg());
126 reg_def F6_H ( SOC, SOC, Op_RegF, 6, F6->as_VMReg()->next());
127 reg_def F7 ( SOC, SOC, Op_RegF, 7, F7->as_VMReg());
128 reg_def F7_H ( SOC, SOC, Op_RegF, 7, F7->as_VMReg()->next());
129 reg_def F8 ( SOC, SOC, Op_RegF, 8, F8->as_VMReg());
130 reg_def F8_H ( SOC, SOC, Op_RegF, 8, F8->as_VMReg()->next());
131 reg_def F9 ( SOC, SOC, Op_RegF, 9, F9->as_VMReg());
132 reg_def F9_H ( SOC, SOC, Op_RegF, 9, F9->as_VMReg()->next());
133 reg_def F10 ( SOC, SOC, Op_RegF, 10, F10->as_VMReg());
134 reg_def F10_H ( SOC, SOC, Op_RegF, 10, F10->as_VMReg()->next());
135 reg_def F11 ( SOC, SOC, Op_RegF, 11, F11->as_VMReg());
136 reg_def F11_H ( SOC, SOC, Op_RegF, 11, F11->as_VMReg()->next());
137 reg_def F12 ( SOC, SOC, Op_RegF, 12, F12->as_VMReg());
138 reg_def F12_H ( SOC, SOC, Op_RegF, 12, F12->as_VMReg()->next());
139 reg_def F13 ( SOC, SOC, Op_RegF, 13, F13->as_VMReg());
140 reg_def F13_H ( SOC, SOC, Op_RegF, 13, F13->as_VMReg()->next());
141 reg_def F14 ( SOC, SOC, Op_RegF, 14, F14->as_VMReg());
142 reg_def F14_H ( SOC, SOC, Op_RegF, 14, F14->as_VMReg()->next());
143 reg_def F15 ( SOC, SOC, Op_RegF, 15, F15->as_VMReg());
144 reg_def F15_H ( SOC, SOC, Op_RegF, 15, F15->as_VMReg()->next());
145 reg_def F16 ( SOC, SOC, Op_RegF, 16, F16->as_VMReg());
146 reg_def F16_H ( SOC, SOC, Op_RegF, 16, F16->as_VMReg()->next());
147 reg_def F17 ( SOC, SOC, Op_RegF, 17, F17->as_VMReg());
148 reg_def F17_H ( SOC, SOC, Op_RegF, 17, F17->as_VMReg()->next());
149 reg_def F18 ( SOC, SOC, Op_RegF, 18, F18->as_VMReg());
150 reg_def F18_H ( SOC, SOC, Op_RegF, 18, F18->as_VMReg()->next());
151 reg_def F19 ( SOC, SOC, Op_RegF, 19, F19->as_VMReg());
152 reg_def F19_H ( SOC, SOC, Op_RegF, 19, F19->as_VMReg()->next());
153 reg_def F20 ( SOC, SOC, Op_RegF, 20, F20->as_VMReg());
154 reg_def F20_H ( SOC, SOC, Op_RegF, 20, F20->as_VMReg()->next());
155 reg_def F21 ( SOC, SOC, Op_RegF, 21, F21->as_VMReg());
156 reg_def F21_H ( SOC, SOC, Op_RegF, 21, F21->as_VMReg()->next());
157 reg_def F22 ( SOC, SOC, Op_RegF, 22, F22->as_VMReg());
158 reg_def F22_H ( SOC, SOC, Op_RegF, 22, F22->as_VMReg()->next());
159 reg_def F23 ( SOC, SOC, Op_RegF, 23, F23->as_VMReg());
160 reg_def F23_H ( SOC, SOC, Op_RegF, 23, F23->as_VMReg()->next());
161 reg_def F24 ( SOC, SOC, Op_RegF, 24, F24->as_VMReg());
162 reg_def F24_H ( SOC, SOC, Op_RegF, 24, F24->as_VMReg()->next());
163 reg_def F25 ( SOC, SOC, Op_RegF, 25, F25->as_VMReg());
164 reg_def F25_H ( SOC, SOC, Op_RegF, 25, F25->as_VMReg()->next());
165 reg_def F26 ( SOC, SOC, Op_RegF, 26, F26->as_VMReg());
166 reg_def F26_H ( SOC, SOC, Op_RegF, 26, F26->as_VMReg()->next());
167 reg_def F27 ( SOC, SOC, Op_RegF, 27, F27->as_VMReg());
168 reg_def F27_H ( SOC, SOC, Op_RegF, 27, F27->as_VMReg()->next());
169 reg_def F28 ( SOC, SOC, Op_RegF, 28, F28->as_VMReg());
170 reg_def F28_H ( SOC, SOC, Op_RegF, 28, F28->as_VMReg()->next());
171 reg_def F29 ( SOC, SOC, Op_RegF, 29, F29->as_VMReg());
172 reg_def F29_H ( SOC, SOC, Op_RegF, 29, F29->as_VMReg()->next());
173 reg_def F30 ( SOC, SOC, Op_RegF, 30, F30->as_VMReg());
174 reg_def F30_H ( SOC, SOC, Op_RegF, 30, F30->as_VMReg()->next());
175 reg_def F31 ( SOC, SOC, Op_RegF, 31, F31->as_VMReg());
176 reg_def F31_H ( SOC, SOC, Op_RegF, 31, F31->as_VMReg()->next());
179 // ----------------------------
180 // Special Registers
181 // Condition Codes Flag Registers
182 reg_def MIPS_FLAG (SOC, SOC, Op_RegFlags, 1, as_Register(1)->as_VMReg());
183 //S6 is used for get_thread(S6)
184 //S5 is uesd for heapbase of compressed oop
185 alloc_class chunk0(
186 S7, S7_H,
187 S0, S0_H,
188 S1, S1_H,
189 S2, S2_H,
190 S4, S4_H,
191 S5, S5_H,
192 S6, S6_H,
193 S3, S3_H,
194 T2, T2_H,
195 T3, T3_H,
196 T8, T8_H,
197 T9, T9_H,
198 T1, T1_H, // inline_cache_reg
199 V1, V1_H,
200 A7, A7_H,
201 A6, A6_H,
202 A5, A5_H,
203 A4, A4_H,
204 V0, V0_H,
205 A3, A3_H,
206 A2, A2_H,
207 A1, A1_H,
208 A0, A0_H,
209 T0, T0_H,
210 GP, GP_H
211 RA, RA_H,
212 SP, SP_H, // stack_pointer
213 FP, FP_H // frame_pointer
214 );
216 alloc_class chunk1( F0, F0_H,
217 F1, F1_H,
218 F2, F2_H,
219 F3, F3_H,
220 F4, F4_H,
221 F5, F5_H,
222 F6, F6_H,
223 F7, F7_H,
224 F8, F8_H,
225 F9, F9_H,
226 F10, F10_H,
227 F11, F11_H,
228 F20, F20_H,
229 F21, F21_H,
230 F22, F22_H,
231 F23, F23_H,
232 F24, F24_H,
233 F25, F25_H,
234 F26, F26_H,
235 F27, F27_H,
236 F28, F28_H,
237 F19, F19_H,
238 F18, F18_H,
239 F17, F17_H,
240 F16, F16_H,
241 F15, F15_H,
242 F14, F14_H,
243 F13, F13_H,
244 F12, F12_H,
245 F29, F29_H,
246 F30, F30_H,
247 F31, F31_H);
249 alloc_class chunk2(MIPS_FLAG);
251 reg_class s_reg( S0, S1, S2, S3, S4, S5, S6, S7 );
252 reg_class s0_reg( S0 );
253 reg_class s1_reg( S1 );
254 reg_class s2_reg( S2 );
255 reg_class s3_reg( S3 );
256 reg_class s4_reg( S4 );
257 reg_class s5_reg( S5 );
258 reg_class s6_reg( S6 );
259 reg_class s7_reg( S7 );
261 reg_class t_reg( T0, T1, T2, T3, T8, T9 );
262 reg_class t0_reg( T0 );
263 reg_class t1_reg( T1 );
264 reg_class t2_reg( T2 );
265 reg_class t3_reg( T3 );
266 reg_class t8_reg( T8 );
267 reg_class t9_reg( T9 );
269 reg_class a_reg( A0, A1, A2, A3, A4, A5, A6, A7 );
270 reg_class a0_reg( A0 );
271 reg_class a1_reg( A1 );
272 reg_class a2_reg( A2 );
273 reg_class a3_reg( A3 );
274 reg_class a4_reg( A4 );
275 reg_class a5_reg( A5 );
276 reg_class a6_reg( A6 );
277 reg_class a7_reg( A7 );
279 reg_class v0_reg( V0 );
280 reg_class v1_reg( V1 );
282 reg_class sp_reg( SP, SP_H );
283 reg_class fp_reg( FP, FP_H );
285 reg_class mips_flags(MIPS_FLAG);
287 reg_class v0_long_reg( V0, V0_H );
288 reg_class v1_long_reg( V1, V1_H );
289 reg_class a0_long_reg( A0, A0_H );
290 reg_class a1_long_reg( A1, A1_H );
291 reg_class a2_long_reg( A2, A2_H );
292 reg_class a3_long_reg( A3, A3_H );
293 reg_class a4_long_reg( A4, A4_H );
294 reg_class a5_long_reg( A5, A5_H );
295 reg_class a6_long_reg( A6, A6_H );
296 reg_class a7_long_reg( A7, A7_H );
297 reg_class t0_long_reg( T0, T0_H );
298 reg_class t1_long_reg( T1, T1_H );
299 reg_class t2_long_reg( T2, T2_H );
300 reg_class t3_long_reg( T3, T3_H );
301 reg_class t8_long_reg( T8, T8_H );
302 reg_class t9_long_reg( T9, T9_H );
303 reg_class s0_long_reg( S0, S0_H );
304 reg_class s1_long_reg( S1, S1_H );
305 reg_class s2_long_reg( S2, S2_H );
306 reg_class s3_long_reg( S3, S3_H );
307 reg_class s4_long_reg( S4, S4_H );
308 reg_class s5_long_reg( S5, S5_H );
309 reg_class s6_long_reg( S6, S6_H );
310 reg_class s7_long_reg( S7, S7_H );
312 reg_class int_reg( S7, S0, S1, S2, S4, S3, T8, T2, T3, T1, V1, A7, A6, A5, A4, V0, A3, A2, A1, A0, T0 );
314 reg_class no_Ax_int_reg( S7, S0, S1, S2, S4, S3, T8, T2, T3, T1, V1, V0, T0 );
316 reg_class p_reg(
317 S7, S7_H,
318 S0, S0_H,
319 S1, S1_H,
320 S2, S2_H,
321 S4, S4_H,
322 S3, S3_H,
323 T8, T8_H,
324 T2, T2_H,
325 T3, T3_H,
326 T1, T1_H,
327 A7, A7_H,
328 A6, A6_H,
329 A5, A5_H,
330 A4, A4_H,
331 A3, A3_H,
332 A2, A2_H,
333 A1, A1_H,
334 A0, A0_H,
335 T0, T0_H
336 );
338 reg_class no_T8_p_reg(
339 S7, S7_H,
340 S0, S0_H,
341 S1, S1_H,
342 S2, S2_H,
343 S4, S4_H,
344 S3, S3_H,
345 T2, T2_H,
346 T3, T3_H,
347 T1, T1_H,
348 A7, A7_H,
349 A6, A6_H,
350 A5, A5_H,
351 A4, A4_H,
352 A3, A3_H,
353 A2, A2_H,
354 A1, A1_H,
355 A0, A0_H,
356 T0, T0_H
357 );
359 reg_class long_reg(
360 S7, S7_H,
361 S0, S0_H,
362 S1, S1_H,
363 S2, S2_H,
364 S4, S4_H,
365 S3, S3_H,
366 T8, T8_H,
367 T2, T2_H,
368 T3, T3_H,
369 T1, T1_H,
370 A7, A7_H,
371 A6, A6_H,
372 A5, A5_H,
373 A4, A4_H,
374 A3, A3_H,
375 A2, A2_H,
376 A1, A1_H,
377 A0, A0_H,
378 T0, T0_H
379 );
382 // Floating point registers.
383 // 2012/8/23 Fu: F30/F31 are used as temporary registers in D2I
384 // 2016/12/1 aoqi: F31 are not used as temporary registers in D2I
385 reg_class flt_reg( F0, F1, F2, F3, F4, F5, F6, F7, F8, F9, F10, F11, F12, F13, F14, F15, F16, F17 F18, F19, F20, F21, F22, F23, F24, F25, F26, F27, F28, F29, F31);
386 reg_class dbl_reg( F0, F0_H,
387 F1, F1_H,
388 F2, F2_H,
389 F3, F3_H,
390 F4, F4_H,
391 F5, F5_H,
392 F6, F6_H,
393 F7, F7_H,
394 F8, F8_H,
395 F9, F9_H,
396 F10, F10_H,
397 F11, F11_H,
398 F12, F12_H,
399 F13, F13_H,
400 F14, F14_H,
401 F15, F15_H,
402 F16, F16_H,
403 F17, F17_H,
404 F18, F18_H,
405 F19, F19_H,
406 F20, F20_H,
407 F21, F21_H,
408 F22, F22_H,
409 F23, F23_H,
410 F24, F24_H,
411 F25, F25_H,
412 F26, F26_H,
413 F27, F27_H,
414 F28, F28_H,
415 F29, F29_H,
416 F31, F31_H);
418 reg_class flt_arg0( F12 );
419 reg_class dbl_arg0( F12, F12_H );
420 reg_class dbl_arg1( F14, F14_H );
422 %}
424 //----------DEFINITION BLOCK---------------------------------------------------
425 // Define name --> value mappings to inform the ADLC of an integer valued name
426 // Current support includes integer values in the range [0, 0x7FFFFFFF]
427 // Format:
428 // int_def <name> ( <int_value>, <expression>);
429 // Generated Code in ad_<arch>.hpp
430 // #define <name> (<expression>)
431 // // value == <int_value>
432 // Generated code in ad_<arch>.cpp adlc_verification()
433 // assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
434 //
435 definitions %{
436 int_def DEFAULT_COST ( 100, 100);
437 int_def HUGE_COST (1000000, 1000000);
439 // Memory refs are twice as expensive as run-of-the-mill.
440 int_def MEMORY_REF_COST ( 200, DEFAULT_COST * 2);
442 // Branches are even more expensive.
443 int_def BRANCH_COST ( 300, DEFAULT_COST * 3);
444 // we use jr instruction to construct call, so more expensive
445 // by yjl 2/28/2006
446 int_def CALL_COST ( 500, DEFAULT_COST * 5);
447 /*
448 int_def EQUAL ( 1, 1 );
449 int_def NOT_EQUAL ( 2, 2 );
450 int_def GREATER ( 3, 3 );
451 int_def GREATER_EQUAL ( 4, 4 );
452 int_def LESS ( 5, 5 );
453 int_def LESS_EQUAL ( 6, 6 );
454 */
455 %}
459 //----------SOURCE BLOCK-------------------------------------------------------
460 // This is a block of C++ code which provides values, functions, and
461 // definitions necessary in the rest of the architecture description
463 source_hpp %{
464 // Header information of the source block.
465 // Method declarations/definitions which are used outside
466 // the ad-scope can conveniently be defined here.
467 //
468 // To keep related declarations/definitions/uses close together,
469 // we switch between source %{ }% and source_hpp %{ }% freely as needed.
471 class CallStubImpl {
473 //--------------------------------------------------------------
474 //---< Used for optimization in Compile::shorten_branches >---
475 //--------------------------------------------------------------
477 public:
478 // Size of call trampoline stub.
479 static uint size_call_trampoline() {
480 return 0; // no call trampolines on this platform
481 }
483 // number of relocations needed by a call trampoline stub
484 static uint reloc_call_trampoline() {
485 return 0; // no call trampolines on this platform
486 }
487 };
489 class HandlerImpl {
491 public:
493 static int emit_exception_handler(CodeBuffer &cbuf);
494 static int emit_deopt_handler(CodeBuffer& cbuf);
496 static uint size_exception_handler() {
497 // NativeCall instruction size is the same as NativeJump.
498 // exception handler starts out as jump and can be patched to
499 // a call be deoptimization. (4932387)
500 // Note that this value is also credited (in output.cpp) to
501 // the size of the code section.
502 // return NativeJump::instruction_size;
503 int size = NativeCall::instruction_size;
504 return round_to(size, 16);
505 }
507 #ifdef _LP64
508 static uint size_deopt_handler() {
509 int size = NativeCall::instruction_size;
510 return round_to(size, 16);
511 }
512 #else
513 static uint size_deopt_handler() {
514 // NativeCall instruction size is the same as NativeJump.
515 // exception handler starts out as jump and can be patched to
516 // a call be deoptimization. (4932387)
517 // Note that this value is also credited (in output.cpp) to
518 // the size of the code section.
519 return 5 + NativeJump::instruction_size; // pushl(); jmp;
520 }
521 #endif
522 };
524 %} // end source_hpp
526 source %{
528 #define NO_INDEX 0
529 #define RELOC_IMM64 Assembler::imm_operand
530 #define RELOC_DISP32 Assembler::disp32_operand
533 #define __ _masm.
536 // Emit exception handler code.
537 // Stuff framesize into a register and call a VM stub routine.
538 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf) {
539 /*
540 // Note that the code buffer's insts_mark is always relative to insts.
541 // That's why we must use the macroassembler to generate a handler.
542 MacroAssembler _masm(&cbuf);
543 address base = __ start_a_stub(size_exception_handler());
544 if (base == NULL) return 0; // CodeBuffer::expand failed
545 int offset = __ offset();
546 __ jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
547 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
548 __ end_a_stub();
549 return offset;
550 */
551 // Note that the code buffer's insts_mark is always relative to insts.
552 // That's why we must use the macroassembler to generate a handler.
553 MacroAssembler _masm(&cbuf);
554 address base =
555 __ start_a_stub(size_exception_handler());
556 if (base == NULL) return 0; // CodeBuffer::expand failed
557 int offset = __ offset();
559 __ block_comment("; emit_exception_handler");
561 /* 2012/9/25 FIXME Jin: According to X86, we should use direct jumpt.
562 * * However, this will trigger an assert after the 40th method:
563 * *
564 * * 39 b java.lang.Throwable::<init> (25 bytes)
565 * * --- ns java.lang.Throwable::fillInStackTrace
566 * * 40 !b java.net.URLClassLoader::findClass (29 bytes)
567 * * /vm/opto/runtime.cpp, 900 , assert(caller.is_compiled_frame(),"must be")
568 * * 40 made not entrant (2) java.net.URLClassLoader::findClass (29 bytes)
569 * *
570 * * If we change from JR to JALR, the assert will disappear, but WebClient will
571 * * fail after the 403th method with unknown reason.
572 * */
573 __ li48(T9, (long)OptoRuntime::exception_blob()->entry_point());
574 __ jr(T9);
575 __ delayed()->nop();
576 __ align(16);
577 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
578 __ end_a_stub();
579 return offset;
580 }
582 // Emit deopt handler code.
583 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) {
584 // Note that the code buffer's insts_mark is always relative to insts.
585 // That's why we must use the macroassembler to generate a handler.
586 MacroAssembler _masm(&cbuf);
587 address base =
588 __ start_a_stub(size_deopt_handler());
590 // FIXME
591 if (base == NULL) return 0; // CodeBuffer::expand failed
592 int offset = __ offset();
594 __ block_comment("; emit_deopt_handler");
596 cbuf.set_insts_mark();
597 __ relocate(relocInfo::runtime_call_type);
599 __ li48(T9, (long)SharedRuntime::deopt_blob()->unpack());
600 __ jalr(T9);
601 __ delayed()->nop();
602 __ align(16);
603 assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
604 __ end_a_stub();
605 return offset;
606 }
609 const bool Matcher::match_rule_supported(int opcode) {
610 if (!has_match_rule(opcode))
611 return false;
613 switch (opcode) {
614 //Op_CountLeadingZerosI Op_CountLeadingZerosL can be deleted, all MIPS CPUs support clz & dclz.
615 case Op_CountLeadingZerosI:
616 case Op_CountLeadingZerosL:
617 if (!UseCountLeadingZerosInstruction)
618 return false;
619 break;
620 case Op_CountTrailingZerosI:
621 case Op_CountTrailingZerosL:
622 if (!UseCountTrailingZerosInstruction)
623 return false;
624 break;
625 }
627 return true; // Per default match rules are supported.
628 }
630 //FIXME
631 // emit call stub, compiled java to interpreter
632 void emit_java_to_interp(CodeBuffer &cbuf ) {
633 // Stub is fixed up when the corresponding call is converted from calling
634 // compiled code to calling interpreted code.
635 // mov rbx,0
636 // jmp -1
638 address mark = cbuf.insts_mark(); // get mark within main instrs section
640 // Note that the code buffer's insts_mark is always relative to insts.
641 // That's why we must use the macroassembler to generate a stub.
642 MacroAssembler _masm(&cbuf);
644 address base =
645 __ start_a_stub(Compile::MAX_stubs_size);
646 if (base == NULL) return; // CodeBuffer::expand failed
647 // static stub relocation stores the instruction address of the call
649 __ relocate(static_stub_Relocation::spec(mark), 0);
651 /* 2012/10/29 Jin: Rmethod contains methodOop, it should be relocated for GC */
652 /*
653 int oop_index = __ oop_recorder()->allocate_index(NULL);
654 RelocationHolder rspec = oop_Relocation::spec(oop_index);
655 __ relocate(rspec);
656 */
658 // static stub relocation also tags the methodOop in the code-stream.
659 __ li48(S3, (long)0);
660 // This is recognized as unresolved by relocs/nativeInst/ic code
662 __ relocate(relocInfo::runtime_call_type);
664 cbuf.set_insts_mark();
665 address call_pc = (address)-1;
666 __ li48(AT, (long)call_pc);
667 __ jr(AT);
668 __ nop();
669 __ align(16);
670 __ end_a_stub();
671 // Update current stubs pointer and restore code_end.
672 }
674 // size of call stub, compiled java to interpretor
675 uint size_java_to_interp() {
676 int size = 4 * 4 + NativeCall::instruction_size; // sizeof(li48) + NativeCall::instruction_size
677 return round_to(size, 16);
678 }
680 // relocation entries for call stub, compiled java to interpreter
681 uint reloc_java_to_interp() {
682 return 16; // in emit_java_to_interp + in Java_Static_Call
683 }
685 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
686 if( Assembler::is_simm16(offset) ) return true;
687 else
688 {
689 assert(false, "Not implemented yet !" );
690 Unimplemented();
691 }
692 }
695 // No additional cost for CMOVL.
696 const int Matcher::long_cmove_cost() { return 0; }
698 // No CMOVF/CMOVD with SSE2
699 const int Matcher::float_cmove_cost() { return ConditionalMoveLimit; }
701 // Does the CPU require late expand (see block.cpp for description of late expand)?
702 const bool Matcher::require_postalloc_expand = false;
704 // Should the Matcher clone shifts on addressing modes, expecting them
705 // to be subsumed into complex addressing expressions or compute them
706 // into registers? True for Intel but false for most RISCs
707 const bool Matcher::clone_shift_expressions = false;
709 // Do we need to mask the count passed to shift instructions or does
710 // the cpu only look at the lower 5/6 bits anyway?
711 const bool Matcher::need_masked_shift_count = false;
713 bool Matcher::narrow_oop_use_complex_address() {
714 NOT_LP64(ShouldNotCallThis());
715 assert(UseCompressedOops, "only for compressed oops code");
716 return false;
717 }
719 bool Matcher::narrow_klass_use_complex_address() {
720 NOT_LP64(ShouldNotCallThis());
721 assert(UseCompressedClassPointers, "only for compressed klass code");
722 return false;
723 }
725 // This is UltraSparc specific, true just means we have fast l2f conversion
726 const bool Matcher::convL2FSupported(void) {
727 return true;
728 }
730 // Max vector size in bytes. 0 if not supported.
731 const int Matcher::vector_width_in_bytes(BasicType bt) {
732 assert(MaxVectorSize == 8, "");
733 return 8;
734 }
736 // Vector ideal reg
737 const int Matcher::vector_ideal_reg(int size) {
738 assert(MaxVectorSize == 8, "");
739 switch(size) {
740 case 8: return Op_VecD;
741 }
742 ShouldNotReachHere();
743 return 0;
744 }
746 // Only lowest bits of xmm reg are used for vector shift count.
747 const int Matcher::vector_shift_count_ideal_reg(int size) {
748 fatal("vector shift is not supported");
749 return Node::NotAMachineReg;
750 }
752 // Limits on vector size (number of elements) loaded into vector.
753 const int Matcher::max_vector_size(const BasicType bt) {
754 assert(is_java_primitive(bt), "only primitive type vectors");
755 return vector_width_in_bytes(bt)/type2aelembytes(bt);
756 }
758 const int Matcher::min_vector_size(const BasicType bt) {
759 return max_vector_size(bt); // Same as max.
760 }
762 // MIPS supports misaligned vectors store/load? FIXME
763 const bool Matcher::misaligned_vectors_ok() {
764 return false;
765 //return !AlignVector; // can be changed by flag
766 }
768 // Register for DIVI projection of divmodI
769 RegMask Matcher::divI_proj_mask() {
770 ShouldNotReachHere();
771 return RegMask();
772 }
774 // Register for MODI projection of divmodI
775 RegMask Matcher::modI_proj_mask() {
776 ShouldNotReachHere();
777 return RegMask();
778 }
780 // Register for DIVL projection of divmodL
781 RegMask Matcher::divL_proj_mask() {
782 ShouldNotReachHere();
783 return RegMask();
784 }
786 int Matcher::regnum_to_fpu_offset(int regnum) {
787 return regnum - 32; // The FP registers are in the second chunk
788 }
791 const bool Matcher::isSimpleConstant64(jlong value) {
792 // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
793 return true;
794 }
797 // Return whether or not this register is ever used as an argument. This
798 // function is used on startup to build the trampoline stubs in generateOptoStub.
799 // Registers not mentioned will be killed by the VM call in the trampoline, and
800 // arguments in those registers not be available to the callee.
801 bool Matcher::can_be_java_arg( int reg ) {
802 /* Refer to: [sharedRuntime_mips_64.cpp] SharedRuntime::java_calling_convention() */
803 if ( reg == T0_num || reg == T0_H_num
804 || reg == A0_num || reg == A0_H_num
805 || reg == A1_num || reg == A1_H_num
806 || reg == A2_num || reg == A2_H_num
807 || reg == A3_num || reg == A3_H_num
808 || reg == A4_num || reg == A4_H_num
809 || reg == A5_num || reg == A5_H_num
810 || reg == A6_num || reg == A6_H_num
811 || reg == A7_num || reg == A7_H_num )
812 return true;
814 if ( reg == F12_num || reg == F12_H_num
815 || reg == F13_num || reg == F13_H_num
816 || reg == F14_num || reg == F14_H_num
817 || reg == F15_num || reg == F15_H_num
818 || reg == F16_num || reg == F16_H_num
819 || reg == F17_num || reg == F17_H_num
820 || reg == F18_num || reg == F18_H_num
821 || reg == F19_num || reg == F19_H_num )
822 return true;
824 return false;
825 }
827 bool Matcher::is_spillable_arg( int reg ) {
828 return can_be_java_arg(reg);
829 }
831 bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) {
832 return false;
833 }
835 // Register for MODL projection of divmodL
836 RegMask Matcher::modL_proj_mask() {
837 ShouldNotReachHere();
838 return RegMask();
839 }
841 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
842 return FP_REG_mask();
843 }
845 // MIPS doesn't support AES intrinsics
846 const bool Matcher::pass_original_key_for_aes() {
847 return false;
848 }
850 // The address of the call instruction needs to be 16-byte aligned to
851 // ensure that it does not span a cache line so that it can be patched.
853 int CallStaticJavaDirectNode::compute_padding(int current_offset) const {
854 //lui
855 //ori
856 //dsll
857 //ori
859 //jalr
860 //nop
862 return round_to(current_offset, alignment_required()) - current_offset;
863 }
865 // The address of the call instruction needs to be 16-byte aligned to
866 // ensure that it does not span a cache line so that it can be patched.
867 int CallDynamicJavaDirectNode::compute_padding(int current_offset) const {
868 //loadIC <--- skip
870 //lui
871 //ori
872 //dsll
873 //ori
875 //jalr
876 //nop
878 current_offset += 4 * 4;
879 return round_to(current_offset, alignment_required()) - current_offset;
880 }
882 int CallLeafNoFPDirectNode::compute_padding(int current_offset) const {
883 //lui
884 //ori
885 //dsll
886 //ori
888 //jalr
889 //nop
891 return round_to(current_offset, alignment_required()) - current_offset;
892 }
894 int CallLeafDirectNode::compute_padding(int current_offset) const {
895 //lui
896 //ori
897 //dsll
898 //ori
900 //jalr
901 //nop
903 return round_to(current_offset, alignment_required()) - current_offset;
904 }
906 int CallRuntimeDirectNode::compute_padding(int current_offset) const {
907 //lui
908 //ori
909 //dsll
910 //ori
912 //jalr
913 //nop
915 return round_to(current_offset, alignment_required()) - current_offset;
916 }
918 // If CPU can load and store mis-aligned doubles directly then no fixup is
919 // needed. Else we split the double into 2 integer pieces and move it
920 // piece-by-piece. Only happens when passing doubles into C code as the
921 // Java calling convention forces doubles to be aligned.
922 const bool Matcher::misaligned_doubles_ok = false;
923 // Do floats take an entire double register or just half?
924 //const bool Matcher::float_in_double = true;
925 bool Matcher::float_in_double() { return false; }
926 // Threshold size for cleararray.
927 const int Matcher::init_array_short_size = 8 * BytesPerLong;
928 // Do ints take an entire long register or just half?
929 const bool Matcher::int_in_long = true;
930 // Is it better to copy float constants, or load them directly from memory?
931 // Intel can load a float constant from a direct address, requiring no
932 // extra registers. Most RISCs will have to materialize an address into a
933 // register first, so they would do better to copy the constant from stack.
934 const bool Matcher::rematerialize_float_constants = false;
935 // Advertise here if the CPU requires explicit rounding operations
936 // to implement the UseStrictFP mode.
937 const bool Matcher::strict_fp_requires_explicit_rounding = false;
938 // The ecx parameter to rep stos for the ClearArray node is in dwords.
939 const bool Matcher::init_array_count_is_in_bytes = false;
942 // Indicate if the safepoint node needs the polling page as an input.
943 // Since MIPS doesn't have absolute addressing, it needs.
944 bool SafePointNode::needs_polling_address_input() {
945 return true;
946 }
948 // !!!!! Special hack to get all type of calls to specify the byte offset
949 // from the start of the call to the point where the return address
950 // will point.
951 int MachCallStaticJavaNode::ret_addr_offset() {
952 assert(NativeCall::instruction_size == 24, "in MachCallStaticJavaNode::ret_addr_offset");
953 //The value ought to be 16 bytes.
954 //lui
955 //ori
956 //dsll
957 //ori
958 //jalr
959 //nop
960 return NativeCall::instruction_size;
961 }
963 int MachCallDynamicJavaNode::ret_addr_offset() {
964 assert(NativeCall::instruction_size == 24, "in MachCallDynamicJavaNode::ret_addr_offset");
965 //The value ought to be 4 + 16 bytes.
966 //lui IC_Klass,
967 //ori IC_Klass,
968 //dsll IC_Klass
969 //ori IC_Klass
970 //lui T9
971 //ori T9
972 //dsll T9
973 //ori T9
974 //jalr T9
975 //nop
976 return 4 * 4 + NativeCall::instruction_size;
977 }
979 //=============================================================================
981 // Figure out which register class each belongs in: rc_int, rc_float, rc_stack
982 enum RC { rc_bad, rc_int, rc_float, rc_stack };
983 static enum RC rc_class( OptoReg::Name reg ) {
984 if( !OptoReg::is_valid(reg) ) return rc_bad;
985 if (OptoReg::is_stack(reg)) return rc_stack;
986 VMReg r = OptoReg::as_VMReg(reg);
987 if (r->is_Register()) return rc_int;
988 assert(r->is_FloatRegister(), "must be");
989 return rc_float;
990 }
992 uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream* st ) const {
993 // Get registers to move
994 OptoReg::Name src_second = ra_->get_reg_second(in(1));
995 OptoReg::Name src_first = ra_->get_reg_first(in(1));
996 OptoReg::Name dst_second = ra_->get_reg_second(this );
997 OptoReg::Name dst_first = ra_->get_reg_first(this );
999 enum RC src_second_rc = rc_class(src_second);
1000 enum RC src_first_rc = rc_class(src_first);
1001 enum RC dst_second_rc = rc_class(dst_second);
1002 enum RC dst_first_rc = rc_class(dst_first);
1004 assert(OptoReg::is_valid(src_first) && OptoReg::is_valid(dst_first), "must move at least 1 register" );
1006 // Generate spill code!
1007 int size = 0;
1009 if( src_first == dst_first && src_second == dst_second )
1010 return 0; // Self copy, no move
1012 if (src_first_rc == rc_stack) {
1013 // mem ->
1014 if (dst_first_rc == rc_stack) {
1015 // mem -> mem
1016 assert(src_second != dst_first, "overlap");
1017 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1018 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1019 // 64-bit
1020 int src_offset = ra_->reg2offset(src_first);
1021 int dst_offset = ra_->reg2offset(dst_first);
1022 if (cbuf) {
1023 MacroAssembler _masm(cbuf);
1024 __ ld(AT, Address(SP, src_offset));
1025 __ sd(AT, Address(SP, dst_offset));
1026 #ifndef PRODUCT
1027 } else {
1028 if(!do_size){
1029 if (size != 0) st->print("\n\t");
1030 st->print("ld AT, [SP + #%d]\t# 64-bit mem-mem spill 1\n\t"
1031 "sd AT, [SP + #%d]",
1032 src_offset, dst_offset);
1033 }
1034 #endif
1035 }
1036 size += 8;
1037 } else {
1038 // 32-bit
1039 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1040 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1041 // No pushl/popl, so:
1042 int src_offset = ra_->reg2offset(src_first);
1043 int dst_offset = ra_->reg2offset(dst_first);
1044 if (cbuf) {
1045 MacroAssembler _masm(cbuf);
1046 __ lw(AT, Address(SP, src_offset));
1047 __ sw(AT, Address(SP, dst_offset));
1048 #ifndef PRODUCT
1049 } else {
1050 if(!do_size){
1051 if (size != 0) st->print("\n\t");
1052 st->print("lw AT, [SP + #%d] spill 2\n\t"
1053 "sw AT, [SP + #%d]\n\t",
1054 src_offset, dst_offset);
1055 }
1056 #endif
1057 }
1058 size += 8;
1059 }
1060 return size;
1061 } else if (dst_first_rc == rc_int) {
1062 // mem -> gpr
1063 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1064 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1065 // 64-bit
1066 int offset = ra_->reg2offset(src_first);
1067 if (cbuf) {
1068 MacroAssembler _masm(cbuf);
1069 __ ld(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1070 #ifndef PRODUCT
1071 } else {
1072 if(!do_size){
1073 if (size != 0) st->print("\n\t");
1074 st->print("ld %s, [SP + #%d]\t# spill 3",
1075 Matcher::regName[dst_first],
1076 offset);
1077 }
1078 #endif
1079 }
1080 size += 4;
1081 } else {
1082 // 32-bit
1083 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1084 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1085 int offset = ra_->reg2offset(src_first);
1086 if (cbuf) {
1087 MacroAssembler _masm(cbuf);
1088 if (this->ideal_reg() == Op_RegI)
1089 __ lw(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1090 else
1091 __ lwu(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1092 #ifndef PRODUCT
1093 } else {
1094 if(!do_size){
1095 if (size != 0) st->print("\n\t");
1096 if (this->ideal_reg() == Op_RegI)
1097 st->print("lw %s, [SP + #%d]\t# spill 4",
1098 Matcher::regName[dst_first],
1099 offset);
1100 else
1101 st->print("lwu %s, [SP + #%d]\t# spill 5",
1102 Matcher::regName[dst_first],
1103 offset);
1104 }
1105 #endif
1106 }
1107 size += 4;
1108 }
1109 return size;
1110 } else if (dst_first_rc == rc_float) {
1111 // mem-> xmm
1112 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1113 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1114 // 64-bit
1115 int offset = ra_->reg2offset(src_first);
1116 if (cbuf) {
1117 MacroAssembler _masm(cbuf);
1118 __ ldc1( as_FloatRegister(Matcher::_regEncode[dst_first]), Address(SP, offset));
1119 #ifndef PRODUCT
1120 } else {
1121 if(!do_size){
1122 if (size != 0) st->print("\n\t");
1123 st->print("ldc1 %s, [SP + #%d]\t# spill 6",
1124 Matcher::regName[dst_first],
1125 offset);
1126 }
1127 #endif
1128 }
1129 size += 4;
1130 } else {
1131 // 32-bit
1132 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1133 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1134 int offset = ra_->reg2offset(src_first);
1135 if (cbuf) {
1136 MacroAssembler _masm(cbuf);
1137 __ lwc1( as_FloatRegister(Matcher::_regEncode[dst_first]), Address(SP, offset));
1138 #ifndef PRODUCT
1139 } else {
1140 if(!do_size){
1141 if (size != 0) st->print("\n\t");
1142 st->print("lwc1 %s, [SP + #%d]\t# spill 7",
1143 Matcher::regName[dst_first],
1144 offset);
1145 }
1146 #endif
1147 }
1148 size += 4;
1149 }
1150 return size;
1151 }
1152 } else if (src_first_rc == rc_int) {
1153 // gpr ->
1154 if (dst_first_rc == rc_stack) {
1155 // gpr -> mem
1156 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1157 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1158 // 64-bit
1159 int offset = ra_->reg2offset(dst_first);
1160 if (cbuf) {
1161 MacroAssembler _masm(cbuf);
1162 __ sd(as_Register(Matcher::_regEncode[src_first]), Address(SP, offset));
1163 #ifndef PRODUCT
1164 } else {
1165 if(!do_size){
1166 if (size != 0) st->print("\n\t");
1167 st->print("sd %s, [SP + #%d] # spill 8",
1168 Matcher::regName[src_first],
1169 offset);
1170 }
1171 #endif
1172 }
1173 size += 4;
1174 } else {
1175 // 32-bit
1176 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1177 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1178 int offset = ra_->reg2offset(dst_first);
1179 if (cbuf) {
1180 MacroAssembler _masm(cbuf);
1181 __ sw(as_Register(Matcher::_regEncode[src_first]), Address(SP, offset));
1182 #ifndef PRODUCT
1183 } else {
1184 if(!do_size){
1185 if (size != 0) st->print("\n\t");
1186 st->print("sw %s, [SP + #%d]\t# spill 9",
1187 Matcher::regName[src_first], offset);
1188 }
1189 #endif
1190 }
1191 size += 4;
1192 }
1193 return size;
1194 } else if (dst_first_rc == rc_int) {
1195 // gpr -> gpr
1196 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1197 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1198 // 64-bit
1199 if (cbuf) {
1200 MacroAssembler _masm(cbuf);
1201 __ move(as_Register(Matcher::_regEncode[dst_first]),
1202 as_Register(Matcher::_regEncode[src_first]));
1203 #ifndef PRODUCT
1204 } else {
1205 if(!do_size){
1206 if (size != 0) st->print("\n\t");
1207 st->print("move(64bit) %s <-- %s\t# spill 10",
1208 Matcher::regName[dst_first],
1209 Matcher::regName[src_first]);
1210 }
1211 #endif
1212 }
1213 size += 4;
1214 return size;
1215 } else {
1216 // 32-bit
1217 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1218 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1219 if (cbuf) {
1220 MacroAssembler _masm(cbuf);
1221 if (this->ideal_reg() == Op_RegI)
1222 __ move_u32(as_Register(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first]));
1223 else
1224 __ daddu(as_Register(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first]), R0);
1226 #ifndef PRODUCT
1227 } else {
1228 if(!do_size){
1229 if (size != 0) st->print("\n\t");
1230 st->print("move(32-bit) %s <-- %s\t# spill 11",
1231 Matcher::regName[dst_first],
1232 Matcher::regName[src_first]);
1233 }
1234 #endif
1235 }
1236 size += 4;
1237 return size;
1238 }
1239 } else if (dst_first_rc == rc_float) {
1240 // gpr -> xmm
1241 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1242 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1243 // 64-bit
1244 if (cbuf) {
1245 MacroAssembler _masm(cbuf);
1246 __ dmtc1(as_Register(Matcher::_regEncode[src_first]), as_FloatRegister(Matcher::_regEncode[dst_first]));
1247 #ifndef PRODUCT
1248 } else {
1249 if(!do_size){
1250 if (size != 0) st->print("\n\t");
1251 st->print("dmtc1 %s, %s\t# spill 12",
1252 Matcher::regName[dst_first],
1253 Matcher::regName[src_first]);
1254 }
1255 #endif
1256 }
1257 size += 4;
1258 } else {
1259 // 32-bit
1260 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1261 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1262 if (cbuf) {
1263 MacroAssembler _masm(cbuf);
1264 __ mtc1( as_Register(Matcher::_regEncode[src_first]), as_FloatRegister(Matcher::_regEncode[dst_first]) );
1265 #ifndef PRODUCT
1266 } else {
1267 if(!do_size){
1268 if (size != 0) st->print("\n\t");
1269 st->print("mtc1 %s, %s\t# spill 13",
1270 Matcher::regName[dst_first],
1271 Matcher::regName[src_first]);
1272 }
1273 #endif
1274 }
1275 size += 4;
1276 }
1277 return size;
1278 }
1279 } else if (src_first_rc == rc_float) {
1280 // xmm ->
1281 if (dst_first_rc == rc_stack) {
1282 // xmm -> mem
1283 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1284 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1285 // 64-bit
1286 int offset = ra_->reg2offset(dst_first);
1287 if (cbuf) {
1288 MacroAssembler _masm(cbuf);
1289 __ sdc1( as_FloatRegister(Matcher::_regEncode[src_first]), Address(SP, offset) );
1290 #ifndef PRODUCT
1291 } else {
1292 if(!do_size){
1293 if (size != 0) st->print("\n\t");
1294 st->print("sdc1 %s, [SP + #%d]\t# spill 14",
1295 Matcher::regName[src_first],
1296 offset);
1297 }
1298 #endif
1299 }
1300 size += 4;
1301 } else {
1302 // 32-bit
1303 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1304 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1305 int offset = ra_->reg2offset(dst_first);
1306 if (cbuf) {
1307 MacroAssembler _masm(cbuf);
1308 __ swc1(as_FloatRegister(Matcher::_regEncode[src_first]), Address(SP, offset));
1309 #ifndef PRODUCT
1310 } else {
1311 if(!do_size){
1312 if (size != 0) st->print("\n\t");
1313 st->print("swc1 %s, [SP + #%d]\t# spill 15",
1314 Matcher::regName[src_first],
1315 offset);
1316 }
1317 #endif
1318 }
1319 size += 4;
1320 }
1321 return size;
1322 } else if (dst_first_rc == rc_int) {
1323 // xmm -> gpr
1324 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1325 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1326 // 64-bit
1327 if (cbuf) {
1328 MacroAssembler _masm(cbuf);
1329 __ dmfc1( as_Register(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1330 #ifndef PRODUCT
1331 } else {
1332 if(!do_size){
1333 if (size != 0) st->print("\n\t");
1334 st->print("dmfc1 %s, %s\t# spill 16",
1335 Matcher::regName[dst_first],
1336 Matcher::regName[src_first]);
1337 }
1338 #endif
1339 }
1340 size += 4;
1341 } else {
1342 // 32-bit
1343 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1344 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1345 if (cbuf) {
1346 MacroAssembler _masm(cbuf);
1347 __ mfc1( as_Register(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1348 #ifndef PRODUCT
1349 } else {
1350 if(!do_size){
1351 if (size != 0) st->print("\n\t");
1352 st->print("mfc1 %s, %s\t# spill 17",
1353 Matcher::regName[dst_first],
1354 Matcher::regName[src_first]);
1355 }
1356 #endif
1357 }
1358 size += 4;
1359 }
1360 return size;
1361 } else if (dst_first_rc == rc_float) {
1362 // xmm -> xmm
1363 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1364 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1365 // 64-bit
1366 if (cbuf) {
1367 MacroAssembler _masm(cbuf);
1368 __ mov_d( as_FloatRegister(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1369 #ifndef PRODUCT
1370 } else {
1371 if(!do_size){
1372 if (size != 0) st->print("\n\t");
1373 st->print("mov_d %s <-- %s\t# spill 18",
1374 Matcher::regName[dst_first],
1375 Matcher::regName[src_first]);
1376 }
1377 #endif
1378 }
1379 size += 4;
1380 } else {
1381 // 32-bit
1382 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1383 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1384 if (cbuf) {
1385 MacroAssembler _masm(cbuf);
1386 __ mov_s( as_FloatRegister(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1387 #ifndef PRODUCT
1388 } else {
1389 if(!do_size){
1390 if (size != 0) st->print("\n\t");
1391 st->print("mov_s %s <-- %s\t# spill 19",
1392 Matcher::regName[dst_first],
1393 Matcher::regName[src_first]);
1394 }
1395 #endif
1396 }
1397 size += 4;
1398 }
1399 return size;
1400 }
1401 }
1403 assert(0," foo ");
1404 Unimplemented();
1405 return size;
1407 }
1409 #ifndef PRODUCT
1410 void MachSpillCopyNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1411 implementation( NULL, ra_, false, st );
1412 }
1413 #endif
1415 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1416 implementation( &cbuf, ra_, false, NULL );
1417 }
1419 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
1420 return implementation( NULL, ra_, true, NULL );
1421 }
1423 //=============================================================================
1424 #
1426 #ifndef PRODUCT
1427 void MachBreakpointNode::format( PhaseRegAlloc *, outputStream* st ) const {
1428 st->print("INT3");
1429 }
1430 #endif
1432 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc* ra_) const {
1433 MacroAssembler _masm(&cbuf);
1434 __ int3();
1435 }
1437 uint MachBreakpointNode::size(PhaseRegAlloc* ra_) const {
1438 return MachNode::size(ra_);
1439 }
1442 //=============================================================================
1443 #ifndef PRODUCT
1444 void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1445 Compile *C = ra_->C;
1446 int framesize = C->frame_size_in_bytes();
1448 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1450 st->print("daddiu SP, SP, %d # Rlease stack @ MachEpilogNode",framesize);
1451 st->cr(); st->print("\t");
1452 if (UseLoongsonISA) {
1453 st->print("gslq RA, FP, SP, %d # Restore FP & RA @ MachEpilogNode", -wordSize*2);
1454 } else {
1455 st->print("ld RA, SP, %d # Restore RA @ MachEpilogNode", -wordSize);
1456 st->cr(); st->print("\t");
1457 st->print("ld FP, SP, %d # Restore FP @ MachEpilogNode", -wordSize*2);
1458 }
1460 if( do_polling() && C->is_method_compilation() ) {
1461 st->print("Poll Safepoint # MachEpilogNode");
1462 }
1463 }
1464 #endif
1466 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1467 Compile *C = ra_->C;
1468 MacroAssembler _masm(&cbuf);
1469 int framesize = C->frame_size_in_bytes();
1471 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1473 __ daddiu(SP, SP, framesize);
1475 if (UseLoongsonISA) {
1476 __ gslq(RA, FP, SP, -wordSize*2);
1477 } else {
1478 __ ld(RA, SP, -wordSize );
1479 __ ld(FP, SP, -wordSize*2 );
1480 }
1482 /* 2012/11/19 Jin: The epilog in a RuntimeStub should not contain a safepoint */
1483 if( do_polling() && C->is_method_compilation() ) {
1484 #ifndef OPT_SAFEPOINT
1485 __ set64(AT, (long)os::get_polling_page());
1486 __ relocate(relocInfo::poll_return_type);
1487 __ lw(AT, AT, 0);
1488 #else
1489 __ lui(AT, Assembler::split_high((intptr_t)os::get_polling_page()));
1490 __ relocate(relocInfo::poll_return_type);
1491 __ lw(AT, AT, Assembler::split_low((intptr_t)os::get_polling_page()));
1492 #endif
1493 }
1494 }
1496 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
1497 return MachNode::size(ra_); // too many variables; just compute it the hard way fujie debug
1498 }
1500 int MachEpilogNode::reloc() const {
1501 return 0; // a large enough number
1502 }
1504 const Pipeline * MachEpilogNode::pipeline() const {
1505 return MachNode::pipeline_class();
1506 }
1508 int MachEpilogNode::safepoint_offset() const { return 0; }
1510 //=============================================================================
1512 #ifndef PRODUCT
1513 void BoxLockNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1514 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1515 int reg = ra_->get_reg_first(this);
1516 st->print("ADDI %s, SP, %d @BoxLockNode",Matcher::regName[reg],offset);
1517 }
1518 #endif
1521 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
1522 return 4;
1523 }
1525 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1526 MacroAssembler _masm(&cbuf);
1527 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1528 int reg = ra_->get_encode(this);
1530 __ addi(as_Register(reg), SP, offset);
1531 /*
1532 if( offset >= 128 ) {
1533 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
1534 emit_rm(cbuf, 0x2, reg, 0x04);
1535 emit_rm(cbuf, 0x0, 0x04, SP_enc);
1536 emit_d32(cbuf, offset);
1537 }
1538 else {
1539 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
1540 emit_rm(cbuf, 0x1, reg, 0x04);
1541 emit_rm(cbuf, 0x0, 0x04, SP_enc);
1542 emit_d8(cbuf, offset);
1543 }
1544 */
1545 }
1548 //static int sizeof_FFree_Float_Stack_All = -1;
1550 int MachCallRuntimeNode::ret_addr_offset() {
1551 //lui
1552 //ori
1553 //dsll
1554 //ori
1555 //jalr
1556 //nop
1557 assert(NativeCall::instruction_size == 24, "in MachCallRuntimeNode::ret_addr_offset()");
1558 return NativeCall::instruction_size;
1559 // return 16;
1560 }
1566 //=============================================================================
1567 #ifndef PRODUCT
1568 void MachNopNode::format( PhaseRegAlloc *, outputStream* st ) const {
1569 st->print("NOP \t# %d bytes pad for loops and calls", 4 * _count);
1570 }
1571 #endif
1573 void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc * ) const {
1574 MacroAssembler _masm(&cbuf);
1575 int i = 0;
1576 for(i = 0; i < _count; i++)
1577 __ nop();
1578 }
1580 uint MachNopNode::size(PhaseRegAlloc *) const {
1581 return 4 * _count;
1582 }
1583 const Pipeline* MachNopNode::pipeline() const {
1584 return MachNode::pipeline_class();
1585 }
1587 //=============================================================================
1589 //=============================================================================
1590 #ifndef PRODUCT
1591 void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1592 st->print_cr("load_klass(AT, T0)");
1593 st->print_cr("\tbeq(AT, iCache, L)");
1594 st->print_cr("\tnop");
1595 st->print_cr("\tjmp(SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type)");
1596 st->print_cr("\tnop");
1597 st->print_cr("\tnop");
1598 st->print_cr(" L:");
1599 }
1600 #endif
1603 void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1604 MacroAssembler _masm(&cbuf);
1605 #ifdef ASSERT
1606 //uint code_size = cbuf.code_size();
1607 #endif
1608 int ic_reg = Matcher::inline_cache_reg_encode();
1609 Label L;
1610 Register receiver = T0;
1611 Register iCache = as_Register(ic_reg);
1612 __ load_klass(AT, receiver);
1613 __ beq(AT, iCache, L);
1614 __ nop();
1616 __ relocate(relocInfo::runtime_call_type);
1617 __ li48(T9, (long)SharedRuntime::get_ic_miss_stub());
1618 __ jr(T9);
1619 __ nop();
1621 /* WARNING these NOPs are critical so that verified entry point is properly
1622 * 8 bytes aligned for patching by NativeJump::patch_verified_entry() */
1623 __ align(CodeEntryAlignment);
1624 __ bind(L);
1625 }
1627 uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
1628 return MachNode::size(ra_);
1629 }
1633 //=============================================================================
1635 const RegMask& MachConstantBaseNode::_out_RegMask = P_REG_mask();
1637 int Compile::ConstantTable::calculate_table_base_offset() const {
1638 return 0; // absolute addressing, no offset
1639 }
1641 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
1642 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
1643 ShouldNotReachHere();
1644 }
1646 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
1647 Compile* C = ra_->C;
1648 Compile::ConstantTable& constant_table = C->constant_table();
1649 MacroAssembler _masm(&cbuf);
1651 Register Rtoc = as_Register(ra_->get_encode(this));
1652 CodeSection* consts_section = __ code()->consts();
1653 int consts_size = consts_section->align_at_start(consts_section->size());
1654 assert(constant_table.size() == consts_size, "must be equal");
1656 if (consts_section->size()) {
1657 // Materialize the constant table base.
1658 address baseaddr = consts_section->start() + -(constant_table.table_base_offset());
1659 // RelocationHolder rspec = internal_word_Relocation::spec(baseaddr);
1660 __ relocate(relocInfo::internal_pc_type);
1661 __ li48(Rtoc, (long)baseaddr);
1662 }
1663 }
1665 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
1666 // li48 (4 insts)
1667 return 4 * 4;
1668 }
1670 #ifndef PRODUCT
1671 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
1672 Register r = as_Register(ra_->get_encode(this));
1673 st->print("li48 %s, &constanttable (constant table base) @ MachConstantBaseNode", r->name());
1674 }
1675 #endif
1678 //=============================================================================
1679 #ifndef PRODUCT
1680 void MachPrologNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1681 Compile* C = ra_->C;
1683 int framesize = C->frame_size_in_bytes();
1684 int bangsize = C->bang_size_in_bytes();
1685 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1687 // Calls to C2R adapters often do not accept exceptional returns.
1688 // We require that their callers must bang for them. But be careful, because
1689 // some VM calls (such as call site linkage) can use several kilobytes of
1690 // stack. But the stack safety zone should account for that.
1691 // See bugs 4446381, 4468289, 4497237.
1692 if (C->need_stack_bang(bangsize)) {
1693 st->print_cr("# stack bang"); st->print("\t");
1694 }
1695 if (UseLoongsonISA) {
1696 st->print("gssq RA, FP, %d(SP) @ MachPrologNode\n\t", -wordSize*2);
1697 } else {
1698 st->print("sd RA, %d(SP) @ MachPrologNode\n\t", -wordSize);
1699 st->print("sd FP, %d(SP) @ MachPrologNode\n\t", -wordSize*2);
1700 }
1701 st->print("daddiu FP, SP, -%d \n\t", wordSize*2);
1702 st->print("daddiu SP, SP, -%d \t",framesize);
1703 }
1704 #endif
1707 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1708 Compile* C = ra_->C;
1709 MacroAssembler _masm(&cbuf);
1711 int framesize = C->frame_size_in_bytes();
1712 int bangsize = C->bang_size_in_bytes();
1714 // __ verified_entry(framesize, C->need_stack_bang(bangsize)?bangsize:0, false);
1716 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1718 if (C->need_stack_bang(framesize)) {
1719 __ generate_stack_overflow_check(framesize);
1720 }
1722 if (UseLoongsonISA) {
1723 __ gssq(RA, FP, SP, -wordSize*2);
1724 } else {
1725 __ sd(RA, SP, -wordSize);
1726 __ sd(FP, SP, -wordSize*2);
1727 }
1728 __ daddiu(FP, SP, -wordSize*2);
1729 __ daddiu(SP, SP, -framesize);
1730 __ nop(); /* 2013.10.22 Jin: Make enough room for patch_verified_entry() */
1731 __ nop();
1733 C->set_frame_complete(cbuf.insts_size());
1734 if (C->has_mach_constant_base_node()) {
1735 // NOTE: We set the table base offset here because users might be
1736 // emitted before MachConstantBaseNode.
1737 Compile::ConstantTable& constant_table = C->constant_table();
1738 constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
1739 }
1741 }
1744 uint MachPrologNode::size(PhaseRegAlloc *ra_) const {
1745 //fprintf(stderr, "\nPrologNode::size(ra_)= %d \n", MachNode::size(ra_));//fujie debug
1746 return MachNode::size(ra_); // too many variables; just compute it the hard way
1747 }
1749 int MachPrologNode::reloc() const {
1750 return 0; // a large enough number
1751 }
1753 %}
1755 //----------ENCODING BLOCK-----------------------------------------------------
1756 // This block specifies the encoding classes used by the compiler to output
1757 // byte streams. Encoding classes generate functions which are called by
1758 // Machine Instruction Nodes in order to generate the bit encoding of the
1759 // instruction. Operands specify their base encoding interface with the
1760 // interface keyword. There are currently supported four interfaces,
1761 // REG_INTER, CONST_INTER, MEMORY_INTER, & COND_INTER. REG_INTER causes an
1762 // operand to generate a function which returns its register number when
1763 // queried. CONST_INTER causes an operand to generate a function which
1764 // returns the value of the constant when queried. MEMORY_INTER causes an
1765 // operand to generate four functions which return the Base Register, the
1766 // Index Register, the Scale Value, and the Offset Value of the operand when
1767 // queried. COND_INTER causes an operand to generate six functions which
1768 // return the encoding code (ie - encoding bits for the instruction)
1769 // associated with each basic boolean condition for a conditional instruction.
1770 // Instructions specify two basic values for encoding. They use the
1771 // ins_encode keyword to specify their encoding class (which must be one of
1772 // the class names specified in the encoding block), and they use the
1773 // opcode keyword to specify, in order, their primary, secondary, and
1774 // tertiary opcode. Only the opcode sections which a particular instruction
1775 // needs for encoding need to be specified.
1776 encode %{
1777 /*
1778 Alias:
1779 1044 b java.io.ObjectInputStream::readHandle (130 bytes)
1780 118 B14: # B19 B15 <- B13 Freq: 0.899955
1781 118 add S1, S2, V0 #@addP_reg_reg
1782 11c lb S0, [S1 + #-8257524] #@loadB
1783 120 BReq S0, #3, B19 #@branchConI_reg_imm P=0.100000 C=-1.000000
1784 */
1785 //Load byte signed
1786 enc_class load_B_enc (mRegI dst, memory mem) %{
1787 MacroAssembler _masm(&cbuf);
1788 int dst = $dst$$reg;
1789 int base = $mem$$base;
1790 int index = $mem$$index;
1791 int scale = $mem$$scale;
1792 int disp = $mem$$disp;
1794 if( index != 0 ) {
1795 if( Assembler::is_simm16(disp) ) {
1796 if( UseLoongsonISA ) {
1797 if (scale == 0) {
1798 __ gslbx(as_Register(dst), as_Register(base), as_Register(index), disp);
1799 } else {
1800 __ dsll(AT, as_Register(index), scale);
1801 __ gslbx(as_Register(dst), as_Register(base), AT, disp);
1802 }
1803 } else {
1804 if (scale == 0) {
1805 __ addu(AT, as_Register(base), as_Register(index));
1806 } else {
1807 __ dsll(AT, as_Register(index), scale);
1808 __ addu(AT, as_Register(base), AT);
1809 }
1810 __ lb(as_Register(dst), AT, disp);
1811 }
1812 } else {
1813 if (scale == 0) {
1814 __ addu(AT, as_Register(base), as_Register(index));
1815 } else {
1816 __ dsll(AT, as_Register(index), scale);
1817 __ addu(AT, as_Register(base), AT);
1818 }
1819 __ move(T9, disp);
1820 if( UseLoongsonISA ) {
1821 __ gslbx(as_Register(dst), AT, T9, 0);
1822 } else {
1823 __ addu(AT, AT, T9);
1824 __ lb(as_Register(dst), AT, 0);
1825 }
1826 }
1827 } else {
1828 if( Assembler::is_simm16(disp) ) {
1829 __ lb(as_Register(dst), as_Register(base), disp);
1830 } else {
1831 __ move(T9, disp);
1832 if( UseLoongsonISA ) {
1833 __ gslbx(as_Register(dst), as_Register(base), T9, 0);
1834 } else {
1835 __ addu(AT, as_Register(base), T9);
1836 __ lb(as_Register(dst), AT, 0);
1837 }
1838 }
1839 }
1840 %}
1842 //Load byte unsigned
1843 enc_class load_UB_enc (mRegI dst, memory mem) %{
1844 MacroAssembler _masm(&cbuf);
1845 int dst = $dst$$reg;
1846 int base = $mem$$base;
1847 int index = $mem$$index;
1848 int scale = $mem$$scale;
1849 int disp = $mem$$disp;
1851 if( index != 0 ) {
1852 if (scale == 0) {
1853 __ daddu(AT, as_Register(base), as_Register(index));
1854 } else {
1855 __ dsll(AT, as_Register(index), scale);
1856 __ daddu(AT, as_Register(base), AT);
1857 }
1858 if( Assembler::is_simm16(disp) ) {
1859 __ lbu(as_Register(dst), AT, disp);
1860 } else {
1861 __ move(T9, disp);
1862 __ daddu(AT, AT, T9);
1863 __ lbu(as_Register(dst), AT, 0);
1864 }
1865 } else {
1866 if( Assembler::is_simm16(disp) ) {
1867 __ lbu(as_Register(dst), as_Register(base), disp);
1868 } else {
1869 __ move(T9, disp);
1870 __ daddu(AT, as_Register(base), T9);
1871 __ lbu(as_Register(dst), AT, 0);
1872 }
1873 }
1874 %}
1876 enc_class store_B_reg_enc (memory mem, mRegI src) %{
1877 MacroAssembler _masm(&cbuf);
1878 int src = $src$$reg;
1879 int base = $mem$$base;
1880 int index = $mem$$index;
1881 int scale = $mem$$scale;
1882 int disp = $mem$$disp;
1884 if( index != 0 ) {
1885 if (scale == 0) {
1886 if( Assembler::is_simm(disp, 8) ) {
1887 if (UseLoongsonISA) {
1888 __ gssbx(as_Register(src), as_Register(base), as_Register(index), disp);
1889 } else {
1890 __ addu(AT, as_Register(base), as_Register(index));
1891 __ sb(as_Register(src), AT, disp);
1892 }
1893 } else if( Assembler::is_simm16(disp) ) {
1894 __ addu(AT, as_Register(base), as_Register(index));
1895 __ sb(as_Register(src), AT, disp);
1896 } else {
1897 __ addu(AT, as_Register(base), as_Register(index));
1898 __ move(T9, disp);
1899 if (UseLoongsonISA) {
1900 __ gssbx(as_Register(src), AT, T9, 0);
1901 } else {
1902 __ addu(AT, AT, T9);
1903 __ sb(as_Register(src), AT, 0);
1904 }
1905 }
1906 } else {
1907 __ dsll(AT, as_Register(index), scale);
1908 if( Assembler::is_simm(disp, 8) ) {
1909 if (UseLoongsonISA) {
1910 __ gssbx(as_Register(src), AT, as_Register(base), disp);
1911 } else {
1912 __ addu(AT, as_Register(base), AT);
1913 __ sb(as_Register(src), AT, disp);
1914 }
1915 } else if( Assembler::is_simm16(disp) ) {
1916 __ addu(AT, as_Register(base), AT);
1917 __ sb(as_Register(src), AT, disp);
1918 } else {
1919 __ addu(AT, as_Register(base), AT);
1920 __ move(T9, disp);
1921 if (UseLoongsonISA) {
1922 __ gssbx(as_Register(src), AT, T9, 0);
1923 } else {
1924 __ addu(AT, AT, T9);
1925 __ sb(as_Register(src), AT, 0);
1926 }
1927 }
1928 }
1929 } else {
1930 if( Assembler::is_simm16(disp) ) {
1931 __ sb(as_Register(src), as_Register(base), disp);
1932 } else {
1933 __ move(T9, disp);
1934 if (UseLoongsonISA) {
1935 __ gssbx(as_Register(src), as_Register(base), T9, 0);
1936 } else {
1937 __ addu(AT, as_Register(base), T9);
1938 __ sb(as_Register(src), AT, 0);
1939 }
1940 }
1941 }
1942 %}
1944 enc_class store_B_immI_enc (memory mem, immI8 src) %{
1945 MacroAssembler _masm(&cbuf);
1946 int base = $mem$$base;
1947 int index = $mem$$index;
1948 int scale = $mem$$scale;
1949 int disp = $mem$$disp;
1950 int value = $src$$constant;
1952 if( index != 0 ) {
1953 if (!UseLoongsonISA) {
1954 if (scale == 0) {
1955 __ daddu(AT, as_Register(base), as_Register(index));
1956 } else {
1957 __ dsll(AT, as_Register(index), scale);
1958 __ daddu(AT, as_Register(base), AT);
1959 }
1960 if( Assembler::is_simm16(disp) ) {
1961 if (value == 0) {
1962 __ sb(R0, AT, disp);
1963 } else {
1964 __ move(T9, value);
1965 __ sb(T9, AT, disp);
1966 }
1967 } else {
1968 if (value == 0) {
1969 __ move(T9, disp);
1970 __ daddu(AT, AT, T9);
1971 __ sb(R0, AT, 0);
1972 } else {
1973 __ move(T9, disp);
1974 __ daddu(AT, AT, T9);
1975 __ move(T9, value);
1976 __ sb(T9, AT, 0);
1977 }
1978 }
1979 } else {
1981 if (scale == 0) {
1982 if( Assembler::is_simm(disp, 8) ) {
1983 if (value == 0) {
1984 __ gssbx(R0, as_Register(base), as_Register(index), disp);
1985 } else {
1986 __ move(T9, value);
1987 __ gssbx(T9, as_Register(base), as_Register(index), disp);
1988 }
1989 } else if( Assembler::is_simm16(disp) ) {
1990 __ daddu(AT, as_Register(base), as_Register(index));
1991 if (value == 0) {
1992 __ sb(R0, AT, disp);
1993 } else {
1994 __ move(T9, value);
1995 __ sb(T9, AT, disp);
1996 }
1997 } else {
1998 if (value == 0) {
1999 __ daddu(AT, as_Register(base), as_Register(index));
2000 __ move(T9, disp);
2001 __ gssbx(R0, AT, T9, 0);
2002 } else {
2003 __ move(AT, disp);
2004 __ move(T9, value);
2005 __ daddu(AT, as_Register(base), AT);
2006 __ gssbx(T9, AT, as_Register(index), 0);
2007 }
2008 }
2010 } else {
2012 if( Assembler::is_simm(disp, 8) ) {
2013 __ dsll(AT, as_Register(index), scale);
2014 if (value == 0) {
2015 __ gssbx(R0, as_Register(base), AT, disp);
2016 } else {
2017 __ move(T9, value);
2018 __ gssbx(T9, as_Register(base), AT, disp);
2019 }
2020 } else if( Assembler::is_simm16(disp) ) {
2021 __ dsll(AT, as_Register(index), scale);
2022 __ daddu(AT, as_Register(base), AT);
2023 if (value == 0) {
2024 __ sb(R0, AT, disp);
2025 } else {
2026 __ move(T9, value);
2027 __ sb(T9, AT, disp);
2028 }
2029 } else {
2030 __ dsll(AT, as_Register(index), scale);
2031 if (value == 0) {
2032 __ daddu(AT, as_Register(base), AT);
2033 __ move(T9, disp);
2034 __ gssbx(R0, AT, T9, 0);
2035 } else {
2036 __ move(T9, disp);
2037 __ daddu(AT, AT, T9);
2038 __ move(T9, value);
2039 __ gssbx(T9, as_Register(base), AT, 0);
2040 }
2041 }
2042 }
2043 }
2044 } else {
2045 if( Assembler::is_simm16(disp) ) {
2046 if (value == 0) {
2047 __ sb(R0, as_Register(base), disp);
2048 } else {
2049 __ move(AT, value);
2050 __ sb(AT, as_Register(base), disp);
2051 }
2052 } else {
2053 if (value == 0) {
2054 __ move(T9, disp);
2055 if (UseLoongsonISA) {
2056 __ gssbx(R0, as_Register(base), T9, 0);
2057 } else {
2058 __ daddu(AT, as_Register(base), T9);
2059 __ sb(R0, AT, 0);
2060 }
2061 } else {
2062 __ move(T9, disp);
2063 if (UseLoongsonISA) {
2064 __ move(AT, value);
2065 __ gssbx(AT, as_Register(base), T9, 0);
2066 } else {
2067 __ daddu(AT, as_Register(base), T9);
2068 __ move(T9, value);
2069 __ sb(T9, AT, 0);
2070 }
2071 }
2072 }
2073 }
2074 %}
2077 enc_class store_B_immI_enc_sync (memory mem, immI8 src) %{
2078 MacroAssembler _masm(&cbuf);
2079 int base = $mem$$base;
2080 int index = $mem$$index;
2081 int scale = $mem$$scale;
2082 int disp = $mem$$disp;
2083 int value = $src$$constant;
2085 if( index != 0 ) {
2086 if ( UseLoongsonISA ) {
2087 if ( Assembler::is_simm(disp,8) ) {
2088 if ( scale == 0 ) {
2089 if ( value == 0 ) {
2090 __ gssbx(R0, as_Register(base), as_Register(index), disp);
2091 } else {
2092 __ move(AT, value);
2093 __ gssbx(AT, as_Register(base), as_Register(index), disp);
2094 }
2095 } else {
2096 __ dsll(AT, as_Register(index), scale);
2097 if ( value == 0 ) {
2098 __ gssbx(R0, as_Register(base), AT, disp);
2099 } else {
2100 __ move(T9, value);
2101 __ gssbx(T9, as_Register(base), AT, disp);
2102 }
2103 }
2104 } else if ( Assembler::is_simm16(disp) ) {
2105 if ( scale == 0 ) {
2106 __ daddu(AT, as_Register(base), as_Register(index));
2107 if ( value == 0 ){
2108 __ sb(R0, AT, disp);
2109 } else {
2110 __ move(T9, value);
2111 __ sb(T9, AT, disp);
2112 }
2113 } else {
2114 __ dsll(AT, as_Register(index), scale);
2115 __ daddu(AT, as_Register(base), AT);
2116 if ( value == 0 ) {
2117 __ sb(R0, AT, disp);
2118 } else {
2119 __ move(T9, value);
2120 __ sb(T9, AT, disp);
2121 }
2122 }
2123 } else {
2124 if ( scale == 0 ) {
2125 __ move(AT, disp);
2126 __ daddu(AT, as_Register(index), AT);
2127 if ( value == 0 ) {
2128 __ gssbx(R0, as_Register(base), AT, 0);
2129 } else {
2130 __ move(T9, value);
2131 __ gssbx(T9, as_Register(base), AT, 0);
2132 }
2133 } else {
2134 __ dsll(AT, as_Register(index), scale);
2135 __ move(T9, disp);
2136 __ daddu(AT, AT, T9);
2137 if ( value == 0 ) {
2138 __ gssbx(R0, as_Register(base), AT, 0);
2139 } else {
2140 __ move(T9, value);
2141 __ gssbx(T9, as_Register(base), AT, 0);
2142 }
2143 }
2144 }
2145 } else { //not use loongson isa
2146 if (scale == 0) {
2147 __ daddu(AT, as_Register(base), as_Register(index));
2148 } else {
2149 __ dsll(AT, as_Register(index), scale);
2150 __ daddu(AT, as_Register(base), AT);
2151 }
2152 if( Assembler::is_simm16(disp) ) {
2153 if (value == 0) {
2154 __ sb(R0, AT, disp);
2155 } else {
2156 __ move(T9, value);
2157 __ sb(T9, AT, disp);
2158 }
2159 } else {
2160 if (value == 0) {
2161 __ move(T9, disp);
2162 __ daddu(AT, AT, T9);
2163 __ sb(R0, AT, 0);
2164 } else {
2165 __ move(T9, disp);
2166 __ daddu(AT, AT, T9);
2167 __ move(T9, value);
2168 __ sb(T9, AT, 0);
2169 }
2170 }
2171 }
2172 } else {
2173 if ( UseLoongsonISA ){
2174 if ( Assembler::is_simm16(disp) ){
2175 if ( value == 0 ) {
2176 __ sb(R0, as_Register(base), disp);
2177 } else {
2178 __ move(AT, value);
2179 __ sb(AT, as_Register(base), disp);
2180 }
2181 } else {
2182 __ move(AT, disp);
2183 if ( value == 0 ) {
2184 __ gssbx(R0, as_Register(base), AT, 0);
2185 } else {
2186 __ move(T9, value);
2187 __ gssbx(T9, as_Register(base), AT, 0);
2188 }
2189 }
2190 } else {
2191 if( Assembler::is_simm16(disp) ) {
2192 if (value == 0) {
2193 __ sb(R0, as_Register(base), disp);
2194 } else {
2195 __ move(AT, value);
2196 __ sb(AT, as_Register(base), disp);
2197 }
2198 } else {
2199 if (value == 0) {
2200 __ move(T9, disp);
2201 __ daddu(AT, as_Register(base), T9);
2202 __ sb(R0, AT, 0);
2203 } else {
2204 __ move(T9, disp);
2205 __ daddu(AT, as_Register(base), T9);
2206 __ move(T9, value);
2207 __ sb(T9, AT, 0);
2208 }
2209 }
2210 }
2211 }
2213 __ sync();
2214 %}
2216 // Load Short (16bit signed)
2217 enc_class load_S_enc (mRegI dst, memory mem) %{
2218 MacroAssembler _masm(&cbuf);
2219 int dst = $dst$$reg;
2220 int base = $mem$$base;
2221 int index = $mem$$index;
2222 int scale = $mem$$scale;
2223 int disp = $mem$$disp;
2225 if( index != 0 ) {
2226 if ( UseLoongsonISA ) {
2227 if ( Assembler::is_simm(disp, 8) ) {
2228 if (scale == 0) {
2229 __ gslhx(as_Register(dst), as_Register(base), as_Register(index), disp);
2230 } else {
2231 __ dsll(AT, as_Register(index), scale);
2232 __ gslhx(as_Register(dst), as_Register(base), AT, disp);
2233 }
2234 } else if ( Assembler::is_simm16(disp) ) {
2235 if (scale == 0) {
2236 __ daddu(AT, as_Register(base), as_Register(index));
2237 __ lh(as_Register(dst), AT, disp);
2238 } else {
2239 __ dsll(AT, as_Register(index), scale);
2240 __ daddu(AT, as_Register(base), AT);
2241 __ lh(as_Register(dst), AT, disp);
2242 }
2243 } else {
2244 if (scale == 0) {
2245 __ move(AT, disp);
2246 __ daddu(AT, as_Register(index), AT);
2247 __ gslhx(as_Register(dst), as_Register(base), AT, 0);
2248 } else {
2249 __ dsll(AT, as_Register(index), scale);
2250 __ move(T9, disp);
2251 __ daddu(AT, AT, T9);
2252 __ gslhx(as_Register(dst), as_Register(base), AT, 0);
2253 }
2254 }
2255 } else { // not use loongson isa
2256 if (scale == 0) {
2257 __ daddu(AT, as_Register(base), as_Register(index));
2258 } else {
2259 __ dsll(AT, as_Register(index), scale);
2260 __ daddu(AT, as_Register(base), AT);
2261 }
2262 if( Assembler::is_simm16(disp) ) {
2263 __ lh(as_Register(dst), AT, disp);
2264 } else {
2265 __ move(T9, disp);
2266 __ daddu(AT, AT, T9);
2267 __ lh(as_Register(dst), AT, 0);
2268 }
2269 }
2270 } else { // index is 0
2271 if ( UseLoongsonISA ) {
2272 if ( Assembler::is_simm16(disp) ) {
2273 __ lh(as_Register(dst), as_Register(base), disp);
2274 } else {
2275 __ move(T9, disp);
2276 __ gslhx(as_Register(dst), as_Register(base), T9, 0);
2277 }
2278 } else { //not use loongson isa
2279 if( Assembler::is_simm16(disp) ) {
2280 __ lh(as_Register(dst), as_Register(base), disp);
2281 } else {
2282 __ move(T9, disp);
2283 __ daddu(AT, as_Register(base), T9);
2284 __ lh(as_Register(dst), AT, 0);
2285 }
2286 }
2287 }
2288 %}
2290 // Load Char (16bit unsigned)
2291 enc_class load_C_enc (mRegI dst, memory mem) %{
2292 MacroAssembler _masm(&cbuf);
2293 int dst = $dst$$reg;
2294 int base = $mem$$base;
2295 int index = $mem$$index;
2296 int scale = $mem$$scale;
2297 int disp = $mem$$disp;
2299 if( index != 0 ) {
2300 if (scale == 0) {
2301 __ daddu(AT, as_Register(base), as_Register(index));
2302 } else {
2303 __ dsll(AT, as_Register(index), scale);
2304 __ daddu(AT, as_Register(base), AT);
2305 }
2306 if( Assembler::is_simm16(disp) ) {
2307 __ lhu(as_Register(dst), AT, disp);
2308 } else {
2309 __ move(T9, disp);
2310 __ addu(AT, AT, T9);
2311 __ lhu(as_Register(dst), AT, 0);
2312 }
2313 } else {
2314 if( Assembler::is_simm16(disp) ) {
2315 __ lhu(as_Register(dst), as_Register(base), disp);
2316 } else {
2317 __ move(T9, disp);
2318 __ daddu(AT, as_Register(base), T9);
2319 __ lhu(as_Register(dst), AT, 0);
2320 }
2321 }
2322 %}
2324 // Store Char (16bit unsigned)
2325 enc_class store_C_reg_enc (memory mem, mRegI src) %{
2326 MacroAssembler _masm(&cbuf);
2327 int src = $src$$reg;
2328 int base = $mem$$base;
2329 int index = $mem$$index;
2330 int scale = $mem$$scale;
2331 int disp = $mem$$disp;
2333 if( index != 0 ) {
2334 if( Assembler::is_simm16(disp) ) {
2335 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2336 if (scale == 0) {
2337 __ gsshx(as_Register(src), as_Register(base), as_Register(index), disp);
2338 } else {
2339 __ dsll(AT, as_Register(index), scale);
2340 __ gsshx(as_Register(src), as_Register(base), AT, disp);
2341 }
2342 } else {
2343 if (scale == 0) {
2344 __ addu(AT, as_Register(base), as_Register(index));
2345 } else {
2346 __ dsll(AT, as_Register(index), scale);
2347 __ addu(AT, as_Register(base), AT);
2348 }
2349 __ sh(as_Register(src), AT, disp);
2350 }
2351 } else {
2352 if (scale == 0) {
2353 __ addu(AT, as_Register(base), as_Register(index));
2354 } else {
2355 __ dsll(AT, as_Register(index), scale);
2356 __ addu(AT, as_Register(base), AT);
2357 }
2358 __ move(T9, disp);
2359 if( UseLoongsonISA ) {
2360 __ gsshx(as_Register(src), AT, T9, 0);
2361 } else {
2362 __ addu(AT, AT, T9);
2363 __ sh(as_Register(src), AT, 0);
2364 }
2365 }
2366 } else {
2367 if( Assembler::is_simm16(disp) ) {
2368 __ sh(as_Register(src), as_Register(base), disp);
2369 } else {
2370 __ move(T9, disp);
2371 if( UseLoongsonISA ) {
2372 __ gsshx(as_Register(src), as_Register(base), T9, 0);
2373 } else {
2374 __ addu(AT, as_Register(base), T9);
2375 __ sh(as_Register(src), AT, 0);
2376 }
2377 }
2378 }
2379 %}
2381 enc_class store_C0_enc (memory mem) %{
2382 MacroAssembler _masm(&cbuf);
2383 int base = $mem$$base;
2384 int index = $mem$$index;
2385 int scale = $mem$$scale;
2386 int disp = $mem$$disp;
2388 if( index != 0 ) {
2389 if( Assembler::is_simm16(disp) ) {
2390 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2391 if (scale == 0) {
2392 __ gsshx(R0, as_Register(base), as_Register(index), disp);
2393 } else {
2394 __ dsll(AT, as_Register(index), scale);
2395 __ gsshx(R0, as_Register(base), AT, disp);
2396 }
2397 } else {
2398 if (scale == 0) {
2399 __ addu(AT, as_Register(base), as_Register(index));
2400 } else {
2401 __ dsll(AT, as_Register(index), scale);
2402 __ addu(AT, as_Register(base), AT);
2403 }
2404 __ sh(R0, AT, disp);
2405 }
2406 } else {
2407 if (scale == 0) {
2408 __ addu(AT, as_Register(base), as_Register(index));
2409 } else {
2410 __ dsll(AT, as_Register(index), scale);
2411 __ addu(AT, as_Register(base), AT);
2412 }
2413 __ move(T9, disp);
2414 if( UseLoongsonISA ) {
2415 __ gsshx(R0, AT, T9, 0);
2416 } else {
2417 __ addu(AT, AT, T9);
2418 __ sh(R0, AT, 0);
2419 }
2420 }
2421 } else {
2422 if( Assembler::is_simm16(disp) ) {
2423 __ sh(R0, as_Register(base), disp);
2424 } else {
2425 __ move(T9, disp);
2426 if( UseLoongsonISA ) {
2427 __ gsshx(R0, as_Register(base), T9, 0);
2428 } else {
2429 __ addu(AT, as_Register(base), T9);
2430 __ sh(R0, AT, 0);
2431 }
2432 }
2433 }
2434 %}
2436 enc_class load_I_enc (mRegI dst, memory mem) %{
2437 MacroAssembler _masm(&cbuf);
2438 int dst = $dst$$reg;
2439 int base = $mem$$base;
2440 int index = $mem$$index;
2441 int scale = $mem$$scale;
2442 int disp = $mem$$disp;
2444 if( index != 0 ) {
2445 if( Assembler::is_simm16(disp) ) {
2446 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2447 if (scale == 0) {
2448 __ gslwx(as_Register(dst), as_Register(base), as_Register(index), disp);
2449 } else {
2450 __ dsll(AT, as_Register(index), scale);
2451 __ gslwx(as_Register(dst), as_Register(base), AT, disp);
2452 }
2453 } else {
2454 if (scale == 0) {
2455 __ addu(AT, as_Register(base), as_Register(index));
2456 } else {
2457 __ dsll(AT, as_Register(index), scale);
2458 __ addu(AT, as_Register(base), AT);
2459 }
2460 __ lw(as_Register(dst), AT, disp);
2461 }
2462 } else {
2463 if (scale == 0) {
2464 __ addu(AT, as_Register(base), as_Register(index));
2465 } else {
2466 __ dsll(AT, as_Register(index), scale);
2467 __ addu(AT, as_Register(base), AT);
2468 }
2469 __ move(T9, disp);
2470 if( UseLoongsonISA ) {
2471 __ gslwx(as_Register(dst), AT, T9, 0);
2472 } else {
2473 __ addu(AT, AT, T9);
2474 __ lw(as_Register(dst), AT, 0);
2475 }
2476 }
2477 } else {
2478 if( Assembler::is_simm16(disp) ) {
2479 __ lw(as_Register(dst), as_Register(base), disp);
2480 } else {
2481 __ move(T9, disp);
2482 if( UseLoongsonISA ) {
2483 __ gslwx(as_Register(dst), as_Register(base), T9, 0);
2484 } else {
2485 __ addu(AT, as_Register(base), T9);
2486 __ lw(as_Register(dst), AT, 0);
2487 }
2488 }
2489 }
2490 %}
2492 enc_class store_I_reg_enc (memory mem, mRegI src) %{
2493 MacroAssembler _masm(&cbuf);
2494 int src = $src$$reg;
2495 int base = $mem$$base;
2496 int index = $mem$$index;
2497 int scale = $mem$$scale;
2498 int disp = $mem$$disp;
2500 if( index != 0 ) {
2501 if( Assembler::is_simm16(disp) ) {
2502 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2503 if (scale == 0) {
2504 __ gsswx(as_Register(src), as_Register(base), as_Register(index), disp);
2505 } else {
2506 __ dsll(AT, as_Register(index), scale);
2507 __ gsswx(as_Register(src), as_Register(base), AT, disp);
2508 }
2509 } else {
2510 if (scale == 0) {
2511 __ addu(AT, as_Register(base), as_Register(index));
2512 } else {
2513 __ dsll(AT, as_Register(index), scale);
2514 __ addu(AT, as_Register(base), AT);
2515 }
2516 __ sw(as_Register(src), AT, disp);
2517 }
2518 } else {
2519 if (scale == 0) {
2520 __ addu(AT, as_Register(base), as_Register(index));
2521 } else {
2522 __ dsll(AT, as_Register(index), scale);
2523 __ addu(AT, as_Register(base), AT);
2524 }
2525 __ move(T9, disp);
2526 if( UseLoongsonISA ) {
2527 __ gsswx(as_Register(src), AT, T9, 0);
2528 } else {
2529 __ addu(AT, AT, T9);
2530 __ sw(as_Register(src), AT, 0);
2531 }
2532 }
2533 } else {
2534 if( Assembler::is_simm16(disp) ) {
2535 __ sw(as_Register(src), as_Register(base), disp);
2536 } else {
2537 __ move(T9, disp);
2538 if( UseLoongsonISA ) {
2539 __ gsswx(as_Register(src), as_Register(base), T9, 0);
2540 } else {
2541 __ addu(AT, as_Register(base), T9);
2542 __ sw(as_Register(src), AT, 0);
2543 }
2544 }
2545 }
2546 %}
2548 enc_class store_I_immI_enc (memory mem, immI src) %{
2549 MacroAssembler _masm(&cbuf);
2550 int base = $mem$$base;
2551 int index = $mem$$index;
2552 int scale = $mem$$scale;
2553 int disp = $mem$$disp;
2554 int value = $src$$constant;
2556 if( index != 0 ) {
2557 if ( UseLoongsonISA ) {
2558 if ( Assembler::is_simm(disp, 8) ) {
2559 if ( scale == 0 ) {
2560 if ( value == 0 ) {
2561 __ gsswx(R0, as_Register(base), as_Register(index), disp);
2562 } else {
2563 __ move(T9, value);
2564 __ gsswx(T9, as_Register(base), as_Register(index), disp);
2565 }
2566 } else {
2567 __ dsll(AT, as_Register(index), scale);
2568 if ( value == 0 ) {
2569 __ gsswx(R0, as_Register(base), AT, disp);
2570 } else {
2571 __ move(T9, value);
2572 __ gsswx(T9, as_Register(base), AT, disp);
2573 }
2574 }
2575 } else if ( Assembler::is_simm16(disp) ) {
2576 if ( scale == 0 ) {
2577 __ daddu(AT, as_Register(base), as_Register(index));
2578 if ( value == 0 ) {
2579 __ sw(R0, AT, disp);
2580 } else {
2581 __ move(T9, value);
2582 __ sw(T9, AT, disp);
2583 }
2584 } else {
2585 __ dsll(AT, as_Register(index), scale);
2586 __ daddu(AT, as_Register(base), AT);
2587 if ( value == 0 ) {
2588 __ sw(R0, AT, disp);
2589 } else {
2590 __ move(T9, value);
2591 __ sw(T9, AT, disp);
2592 }
2593 }
2594 } else {
2595 if ( scale == 0 ) {
2596 __ move(T9, disp);
2597 __ daddu(AT, as_Register(index), T9);
2598 if ( value ==0 ) {
2599 __ gsswx(R0, as_Register(base), AT, 0);
2600 } else {
2601 __ move(T9, value);
2602 __ gsswx(T9, as_Register(base), AT, 0);
2603 }
2604 } else {
2605 __ dsll(AT, as_Register(index), scale);
2606 __ move(T9, disp);
2607 __ daddu(AT, AT, T9);
2608 if ( value == 0 ) {
2609 __ gsswx(R0, as_Register(base), AT, 0);
2610 } else {
2611 __ move(T9, value);
2612 __ gsswx(T9, as_Register(base), AT, 0);
2613 }
2614 }
2615 }
2616 } else { //not use loongson isa
2617 if (scale == 0) {
2618 __ daddu(AT, as_Register(base), as_Register(index));
2619 } else {
2620 __ dsll(AT, as_Register(index), scale);
2621 __ daddu(AT, as_Register(base), AT);
2622 }
2623 if( Assembler::is_simm16(disp) ) {
2624 if (value == 0) {
2625 __ sw(R0, AT, disp);
2626 } else {
2627 __ move(T9, value);
2628 __ sw(T9, AT, disp);
2629 }
2630 } else {
2631 if (value == 0) {
2632 __ move(T9, disp);
2633 __ daddu(AT, AT, T9);
2634 __ sw(R0, AT, 0);
2635 } else {
2636 __ move(T9, disp);
2637 __ daddu(AT, AT, T9);
2638 __ move(T9, value);
2639 __ sw(T9, AT, 0);
2640 }
2641 }
2642 }
2643 } else {
2644 if ( UseLoongsonISA ) {
2645 if ( Assembler::is_simm16(disp) ) {
2646 if ( value == 0 ) {
2647 __ sw(R0, as_Register(base), disp);
2648 } else {
2649 __ move(AT, value);
2650 __ sw(AT, as_Register(base), disp);
2651 }
2652 } else {
2653 __ move(T9, disp);
2654 if ( value == 0 ) {
2655 __ gsswx(R0, as_Register(base), T9, 0);
2656 } else {
2657 __ move(AT, value);
2658 __ gsswx(AT, as_Register(base), T9, 0);
2659 }
2660 }
2661 } else {
2662 if( Assembler::is_simm16(disp) ) {
2663 if (value == 0) {
2664 __ sw(R0, as_Register(base), disp);
2665 } else {
2666 __ move(AT, value);
2667 __ sw(AT, as_Register(base), disp);
2668 }
2669 } else {
2670 if (value == 0) {
2671 __ move(T9, disp);
2672 __ daddu(AT, as_Register(base), T9);
2673 __ sw(R0, AT, 0);
2674 } else {
2675 __ move(T9, disp);
2676 __ daddu(AT, as_Register(base), T9);
2677 __ move(T9, value);
2678 __ sw(T9, AT, 0);
2679 }
2680 }
2681 }
2682 }
2683 %}
2685 enc_class load_N_enc (mRegN dst, memory mem) %{
2686 MacroAssembler _masm(&cbuf);
2687 int dst = $dst$$reg;
2688 int base = $mem$$base;
2689 int index = $mem$$index;
2690 int scale = $mem$$scale;
2691 int disp = $mem$$disp;
2692 relocInfo::relocType disp_reloc = $mem->disp_reloc();
2693 assert(disp_reloc == relocInfo::none, "cannot have disp");
2695 if( index != 0 ) {
2696 if (scale == 0) {
2697 __ daddu(AT, as_Register(base), as_Register(index));
2698 } else {
2699 __ dsll(AT, as_Register(index), scale);
2700 __ daddu(AT, as_Register(base), AT);
2701 }
2702 if( Assembler::is_simm16(disp) ) {
2703 __ lwu(as_Register(dst), AT, disp);
2704 } else {
2705 __ li(T9, disp);
2706 __ daddu(AT, AT, T9);
2707 __ lwu(as_Register(dst), AT, 0);
2708 }
2709 } else {
2710 if( Assembler::is_simm16(disp) ) {
2711 __ lwu(as_Register(dst), as_Register(base), disp);
2712 } else {
2713 __ li(T9, disp);
2714 __ daddu(AT, as_Register(base), T9);
2715 __ lwu(as_Register(dst), AT, 0);
2716 }
2717 }
2719 %}
2722 enc_class load_P_enc (mRegP dst, memory mem) %{
2723 MacroAssembler _masm(&cbuf);
2724 int dst = $dst$$reg;
2725 int base = $mem$$base;
2726 int index = $mem$$index;
2727 int scale = $mem$$scale;
2728 int disp = $mem$$disp;
2729 relocInfo::relocType disp_reloc = $mem->disp_reloc();
2730 assert(disp_reloc == relocInfo::none, "cannot have disp");
2732 if( index != 0 ) {
2733 if ( UseLoongsonISA ) {
2734 if ( Assembler::is_simm(disp, 8) ) {
2735 if ( scale != 0 ) {
2736 __ dsll(AT, as_Register(index), scale);
2737 __ gsldx(as_Register(dst), as_Register(base), AT, disp);
2738 } else {
2739 __ gsldx(as_Register(dst), as_Register(base), as_Register(index), disp);
2740 }
2741 } else if ( Assembler::is_simm16(disp) ){
2742 if ( scale != 0 ) {
2743 __ dsll(AT, as_Register(index), scale);
2744 __ daddu(AT, AT, as_Register(base));
2745 } else {
2746 __ daddu(AT, as_Register(index), as_Register(base));
2747 }
2748 __ ld(as_Register(dst), AT, disp);
2749 } else {
2750 if ( scale != 0 ) {
2751 __ dsll(AT, as_Register(index), scale);
2752 __ move(T9, disp);
2753 __ daddu(AT, AT, T9);
2754 } else {
2755 __ move(T9, disp);
2756 __ daddu(AT, as_Register(index), T9);
2757 }
2758 __ gsldx(as_Register(dst), as_Register(base), AT, 0);
2759 }
2760 } else { //not use loongson isa
2761 if (scale == 0) {
2762 __ daddu(AT, as_Register(base), as_Register(index));
2763 } else {
2764 __ dsll(AT, as_Register(index), scale);
2765 __ daddu(AT, as_Register(base), AT);
2766 }
2767 if( Assembler::is_simm16(disp) ) {
2768 __ ld(as_Register(dst), AT, disp);
2769 } else {
2770 __ li(T9, disp);
2771 __ daddu(AT, AT, T9);
2772 __ ld(as_Register(dst), AT, 0);
2773 }
2774 }
2775 } else {
2776 if ( UseLoongsonISA ) {
2777 if ( Assembler::is_simm16(disp) ){
2778 __ ld(as_Register(dst), as_Register(base), disp);
2779 } else {
2780 __ li(T9, disp);
2781 __ gsldx(as_Register(dst), as_Register(base), T9, 0);
2782 }
2783 } else { //not use loongson isa
2784 if( Assembler::is_simm16(disp) ) {
2785 __ ld(as_Register(dst), as_Register(base), disp);
2786 } else {
2787 __ li(T9, disp);
2788 __ daddu(AT, as_Register(base), T9);
2789 __ ld(as_Register(dst), AT, 0);
2790 }
2791 }
2792 }
2793 // if( disp_reloc != relocInfo::none) __ ld(as_Register(dst), as_Register(dst), 0);
2794 %}
2796 enc_class store_P_reg_enc (memory mem, mRegP src) %{
2797 MacroAssembler _masm(&cbuf);
2798 int src = $src$$reg;
2799 int base = $mem$$base;
2800 int index = $mem$$index;
2801 int scale = $mem$$scale;
2802 int disp = $mem$$disp;
2804 if( index != 0 ) {
2805 if ( UseLoongsonISA ){
2806 if ( Assembler::is_simm(disp, 8) ) {
2807 if ( scale == 0 ) {
2808 __ gssdx(as_Register(src), as_Register(base), as_Register(index), disp);
2809 } else {
2810 __ dsll(AT, as_Register(index), scale);
2811 __ gssdx(as_Register(src), as_Register(base), AT, disp);
2812 }
2813 } else if ( Assembler::is_simm16(disp) ) {
2814 if ( scale == 0 ) {
2815 __ daddu(AT, as_Register(base), as_Register(index));
2816 } else {
2817 __ dsll(AT, as_Register(index), scale);
2818 __ daddu(AT, as_Register(base), AT);
2819 }
2820 __ sd(as_Register(src), AT, disp);
2821 } else {
2822 if ( scale == 0 ) {
2823 __ move(T9, disp);
2824 __ daddu(AT, as_Register(index), T9);
2825 } else {
2826 __ dsll(AT, as_Register(index), scale);
2827 __ move(T9, disp);
2828 __ daddu(AT, AT, T9);
2829 }
2830 __ gssdx(as_Register(src), as_Register(base), AT, 0);
2831 }
2832 } else { //not use loongson isa
2833 if (scale == 0) {
2834 __ daddu(AT, as_Register(base), as_Register(index));
2835 } else {
2836 __ dsll(AT, as_Register(index), scale);
2837 __ daddu(AT, as_Register(base), AT);
2838 }
2839 if( Assembler::is_simm16(disp) ) {
2840 __ sd(as_Register(src), AT, disp);
2841 } else {
2842 __ move(T9, disp);
2843 __ daddu(AT, AT, T9);
2844 __ sd(as_Register(src), AT, 0);
2845 }
2846 }
2847 } else {
2848 if ( UseLoongsonISA ) {
2849 if ( Assembler::is_simm16(disp) ) {
2850 __ sd(as_Register(src), as_Register(base), disp);
2851 } else {
2852 __ move(T9, disp);
2853 __ gssdx(as_Register(src), as_Register(base), T9, 0);
2854 }
2855 } else {
2856 if( Assembler::is_simm16(disp) ) {
2857 __ sd(as_Register(src), as_Register(base), disp);
2858 } else {
2859 __ move(T9, disp);
2860 __ daddu(AT, as_Register(base), T9);
2861 __ sd(as_Register(src), AT, 0);
2862 }
2863 }
2864 }
2865 %}
2867 enc_class store_N_reg_enc (memory mem, mRegN src) %{
2868 MacroAssembler _masm(&cbuf);
2869 int src = $src$$reg;
2870 int base = $mem$$base;
2871 int index = $mem$$index;
2872 int scale = $mem$$scale;
2873 int disp = $mem$$disp;
2875 if( index != 0 ) {
2876 if ( UseLoongsonISA ){
2877 if ( Assembler::is_simm(disp, 8) ) {
2878 if ( scale == 0 ) {
2879 __ gsswx(as_Register(src), as_Register(base), as_Register(index), disp);
2880 } else {
2881 __ dsll(AT, as_Register(index), scale);
2882 __ gsswx(as_Register(src), as_Register(base), AT, disp);
2883 }
2884 } else if ( Assembler::is_simm16(disp) ) {
2885 if ( scale == 0 ) {
2886 __ daddu(AT, as_Register(base), as_Register(index));
2887 } else {
2888 __ dsll(AT, as_Register(index), scale);
2889 __ daddu(AT, as_Register(base), AT);
2890 }
2891 __ sw(as_Register(src), AT, disp);
2892 } else {
2893 if ( scale == 0 ) {
2894 __ move(T9, disp);
2895 __ daddu(AT, as_Register(index), T9);
2896 } else {
2897 __ dsll(AT, as_Register(index), scale);
2898 __ move(T9, disp);
2899 __ daddu(AT, AT, T9);
2900 }
2901 __ gsswx(as_Register(src), as_Register(base), AT, 0);
2902 }
2903 } else { //not use loongson isa
2904 if (scale == 0) {
2905 __ daddu(AT, as_Register(base), as_Register(index));
2906 } else {
2907 __ dsll(AT, as_Register(index), scale);
2908 __ daddu(AT, as_Register(base), AT);
2909 }
2910 if( Assembler::is_simm16(disp) ) {
2911 __ sw(as_Register(src), AT, disp);
2912 } else {
2913 __ move(T9, disp);
2914 __ daddu(AT, AT, T9);
2915 __ sw(as_Register(src), AT, 0);
2916 }
2917 }
2918 } else {
2919 if ( UseLoongsonISA ) {
2920 if ( Assembler::is_simm16(disp) ) {
2921 __ sw(as_Register(src), as_Register(base), disp);
2922 } else {
2923 __ move(T9, disp);
2924 __ gsswx(as_Register(src), as_Register(base), T9, 0);
2925 }
2926 } else {
2927 if( Assembler::is_simm16(disp) ) {
2928 __ sw(as_Register(src), as_Register(base), disp);
2929 } else {
2930 __ move(T9, disp);
2931 __ daddu(AT, as_Register(base), T9);
2932 __ sw(as_Register(src), AT, 0);
2933 }
2934 }
2935 }
2936 %}
2938 enc_class store_P_immP0_enc (memory mem) %{
2939 MacroAssembler _masm(&cbuf);
2940 int base = $mem$$base;
2941 int index = $mem$$index;
2942 int scale = $mem$$scale;
2943 int disp = $mem$$disp;
2945 if( index != 0 ) {
2946 if (scale == 0) {
2947 if( Assembler::is_simm16(disp) ) {
2948 if (UseLoongsonISA && Assembler::is_simm(disp, 8)) {
2949 __ gssdx(R0, as_Register(base), as_Register(index), disp);
2950 } else {
2951 __ daddu(AT, as_Register(base), as_Register(index));
2952 __ sd(R0, AT, disp);
2953 }
2954 } else {
2955 __ daddu(AT, as_Register(base), as_Register(index));
2956 __ move(T9, disp);
2957 if(UseLoongsonISA) {
2958 __ gssdx(R0, AT, T9, 0);
2959 } else {
2960 __ daddu(AT, AT, T9);
2961 __ sd(R0, AT, 0);
2962 }
2963 }
2964 } else {
2965 __ dsll(AT, as_Register(index), scale);
2966 if( Assembler::is_simm16(disp) ) {
2967 if (UseLoongsonISA && Assembler::is_simm(disp, 8)) {
2968 __ gssdx(R0, as_Register(base), AT, disp);
2969 } else {
2970 __ daddu(AT, as_Register(base), AT);
2971 __ sd(R0, AT, disp);
2972 }
2973 } else {
2974 __ daddu(AT, as_Register(base), AT);
2975 __ move(T9, disp);
2976 if (UseLoongsonISA) {
2977 __ gssdx(R0, AT, T9, 0);
2978 } else {
2979 __ daddu(AT, AT, T9);
2980 __ sd(R0, AT, 0);
2981 }
2982 }
2983 }
2984 } else {
2985 if( Assembler::is_simm16(disp) ) {
2986 __ sd(R0, as_Register(base), disp);
2987 } else {
2988 __ move(T9, disp);
2989 if (UseLoongsonISA) {
2990 __ gssdx(R0, as_Register(base), T9, 0);
2991 } else {
2992 __ daddu(AT, as_Register(base), T9);
2993 __ sd(R0, AT, 0);
2994 }
2995 }
2996 }
2997 %}
3000 enc_class storeImmN0_enc(memory mem, ImmN0 src) %{
3001 MacroAssembler _masm(&cbuf);
3002 int base = $mem$$base;
3003 int index = $mem$$index;
3004 int scale = $mem$$scale;
3005 int disp = $mem$$disp;
3007 if(index!=0){
3008 if (scale == 0) {
3009 __ daddu(AT, as_Register(base), as_Register(index));
3010 } else {
3011 __ dsll(AT, as_Register(index), scale);
3012 __ daddu(AT, as_Register(base), AT);
3013 }
3015 if( Assembler::is_simm16(disp) ) {
3016 __ sw(R0, AT, disp);
3017 } else {
3018 __ move(T9, disp);
3019 __ daddu(AT, AT, T9);
3020 __ sw(R0, AT, 0);
3021 }
3022 }
3023 else {
3024 if( Assembler::is_simm16(disp) ) {
3025 __ sw(R0, as_Register(base), disp);
3026 } else {
3027 __ move(T9, disp);
3028 __ daddu(AT, as_Register(base), T9);
3029 __ sw(R0, AT, 0);
3030 }
3031 }
3032 %}
3034 enc_class load_L_enc (mRegL dst, memory mem) %{
3035 MacroAssembler _masm(&cbuf);
3036 int base = $mem$$base;
3037 int index = $mem$$index;
3038 int scale = $mem$$scale;
3039 int disp = $mem$$disp;
3040 Register dst_reg = as_Register($dst$$reg);
3042 /*********************2013/03/27**************************
3043 * Jin: $base may contain a null object.
3044 * Server JIT force the exception_offset to be the pos of
3045 * the first instruction.
3046 * I insert such a 'null_check' at the beginning.
3047 *******************************************************/
3049 __ lw(AT, as_Register(base), 0);
3051 /*********************2012/10/04**************************
3052 * Error case found in SortTest
3053 * 337 b java.util.Arrays::sort1 (401 bytes)
3054 * B73:
3055 * d34 lw T4.lo, [T4 + #16] #@loadL-lo
3056 * lw T4.hi, [T4 + #16]+4 #@loadL-hi
3057 *
3058 * The original instructions generated here are :
3059 * __ lw(dst_lo, as_Register(base), disp);
3060 * __ lw(dst_hi, as_Register(base), disp + 4);
3061 *******************************************************/
3063 if( index != 0 ) {
3064 if (scale == 0) {
3065 __ daddu(AT, as_Register(base), as_Register(index));
3066 } else {
3067 __ dsll(AT, as_Register(index), scale);
3068 __ daddu(AT, as_Register(base), AT);
3069 }
3070 if( Assembler::is_simm16(disp) ) {
3071 __ ld(dst_reg, AT, disp);
3072 } else {
3073 __ move(T9, disp);
3074 __ daddu(AT, AT, T9);
3075 __ ld(dst_reg, AT, 0);
3076 }
3077 } else {
3078 if( Assembler::is_simm16(disp) ) {
3079 __ move(AT, as_Register(base));
3080 __ ld(dst_reg, AT, disp);
3081 } else {
3082 __ move(T9, disp);
3083 __ daddu(AT, as_Register(base), T9);
3084 __ ld(dst_reg, AT, 0);
3085 }
3086 }
3087 %}
3089 enc_class store_L_reg_enc (memory mem, mRegL src) %{
3090 MacroAssembler _masm(&cbuf);
3091 int base = $mem$$base;
3092 int index = $mem$$index;
3093 int scale = $mem$$scale;
3094 int disp = $mem$$disp;
3095 Register src_reg = as_Register($src$$reg);
3097 if( index != 0 ) {
3098 if (scale == 0) {
3099 __ daddu(AT, as_Register(base), as_Register(index));
3100 } else {
3101 __ dsll(AT, as_Register(index), scale);
3102 __ daddu(AT, as_Register(base), AT);
3103 }
3104 if( Assembler::is_simm16(disp) ) {
3105 __ sd(src_reg, AT, disp);
3106 } else {
3107 __ move(T9, disp);
3108 __ daddu(AT, AT, T9);
3109 __ sd(src_reg, AT, 0);
3110 }
3111 } else {
3112 if( Assembler::is_simm16(disp) ) {
3113 __ move(AT, as_Register(base));
3114 __ sd(src_reg, AT, disp);
3115 } else {
3116 __ move(T9, disp);
3117 __ daddu(AT, as_Register(base), T9);
3118 __ sd(src_reg, AT, 0);
3119 }
3120 }
3121 %}
3123 enc_class store_L_immL0_enc (memory mem, immL0 src) %{
3124 MacroAssembler _masm(&cbuf);
3125 int base = $mem$$base;
3126 int index = $mem$$index;
3127 int scale = $mem$$scale;
3128 int disp = $mem$$disp;
3130 if( index != 0 ) {
3131 if (scale == 0) {
3132 __ daddu(AT, as_Register(base), as_Register(index));
3133 } else {
3134 __ dsll(AT, as_Register(index), scale);
3135 __ daddu(AT, as_Register(base), AT);
3136 }
3137 if( Assembler::is_simm16(disp) ) {
3138 __ sd(R0, AT, disp);
3139 } else {
3140 __ move(T9, disp);
3141 __ addu(AT, AT, T9);
3142 __ sd(R0, AT, 0);
3143 }
3144 } else {
3145 if( Assembler::is_simm16(disp) ) {
3146 __ move(AT, as_Register(base));
3147 __ sd(R0, AT, disp);
3148 } else {
3149 __ move(T9, disp);
3150 __ addu(AT, as_Register(base), T9);
3151 __ sd(R0, AT, 0);
3152 }
3153 }
3154 %}
3156 enc_class load_F_enc (regF dst, memory mem) %{
3157 MacroAssembler _masm(&cbuf);
3158 int base = $mem$$base;
3159 int index = $mem$$index;
3160 int scale = $mem$$scale;
3161 int disp = $mem$$disp;
3162 FloatRegister dst = $dst$$FloatRegister;
3164 if( index != 0 ) {
3165 if( Assembler::is_simm16(disp) ) {
3166 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3167 if (scale == 0) {
3168 __ gslwxc1(dst, as_Register(base), as_Register(index), disp);
3169 } else {
3170 __ dsll(AT, as_Register(index), scale);
3171 __ gslwxc1(dst, as_Register(base), AT, disp);
3172 }
3173 } else {
3174 if (scale == 0) {
3175 __ daddu(AT, as_Register(base), as_Register(index));
3176 } else {
3177 __ dsll(AT, as_Register(index), scale);
3178 __ daddu(AT, as_Register(base), AT);
3179 }
3180 __ lwc1(dst, AT, disp);
3181 }
3182 } else {
3183 if (scale == 0) {
3184 __ daddu(AT, as_Register(base), as_Register(index));
3185 } else {
3186 __ dsll(AT, as_Register(index), scale);
3187 __ daddu(AT, as_Register(base), AT);
3188 }
3189 __ move(T9, disp);
3190 if( UseLoongsonISA ) {
3191 __ gslwxc1(dst, AT, T9, 0);
3192 } else {
3193 __ daddu(AT, AT, T9);
3194 __ lwc1(dst, AT, 0);
3195 }
3196 }
3197 } else {
3198 if( Assembler::is_simm16(disp) ) {
3199 __ lwc1(dst, as_Register(base), disp);
3200 } else {
3201 __ move(T9, disp);
3202 if( UseLoongsonISA ) {
3203 __ gslwxc1(dst, as_Register(base), T9, 0);
3204 } else {
3205 __ daddu(AT, as_Register(base), T9);
3206 __ lwc1(dst, AT, 0);
3207 }
3208 }
3209 }
3210 %}
3212 enc_class store_F_reg_enc (memory mem, regF src) %{
3213 MacroAssembler _masm(&cbuf);
3214 int base = $mem$$base;
3215 int index = $mem$$index;
3216 int scale = $mem$$scale;
3217 int disp = $mem$$disp;
3218 FloatRegister src = $src$$FloatRegister;
3220 if( index != 0 ) {
3221 if( Assembler::is_simm16(disp) ) {
3222 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3223 if (scale == 0) {
3224 __ gsswxc1(src, as_Register(base), as_Register(index), disp);
3225 } else {
3226 __ dsll(AT, as_Register(index), scale);
3227 __ gsswxc1(src, as_Register(base), AT, disp);
3228 }
3229 } else {
3230 if (scale == 0) {
3231 __ daddu(AT, as_Register(base), as_Register(index));
3232 } else {
3233 __ dsll(AT, as_Register(index), scale);
3234 __ daddu(AT, as_Register(base), AT);
3235 }
3236 __ swc1(src, AT, disp);
3237 }
3238 } else {
3239 if (scale == 0) {
3240 __ daddu(AT, as_Register(base), as_Register(index));
3241 } else {
3242 __ dsll(AT, as_Register(index), scale);
3243 __ daddu(AT, as_Register(base), AT);
3244 }
3245 __ move(T9, disp);
3246 if( UseLoongsonISA ) {
3247 __ gsswxc1(src, AT, T9, 0);
3248 } else {
3249 __ daddu(AT, AT, T9);
3250 __ swc1(src, AT, 0);
3251 }
3252 }
3253 } else {
3254 if( Assembler::is_simm16(disp) ) {
3255 __ swc1(src, as_Register(base), disp);
3256 } else {
3257 __ move(T9, disp);
3258 if( UseLoongsonISA ) {
3259 __ gslwxc1(src, as_Register(base), T9, 0);
3260 } else {
3261 __ daddu(AT, as_Register(base), T9);
3262 __ swc1(src, AT, 0);
3263 }
3264 }
3265 }
3266 %}
3268 enc_class load_D_enc (regD dst, memory mem) %{
3269 MacroAssembler _masm(&cbuf);
3270 int base = $mem$$base;
3271 int index = $mem$$index;
3272 int scale = $mem$$scale;
3273 int disp = $mem$$disp;
3274 FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3276 if( index != 0 ) {
3277 if( Assembler::is_simm16(disp) ) {
3278 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3279 if (scale == 0) {
3280 __ gsldxc1(dst_reg, as_Register(base), as_Register(index), disp);
3281 } else {
3282 __ dsll(AT, as_Register(index), scale);
3283 __ gsldxc1(dst_reg, as_Register(base), AT, disp);
3284 }
3285 } else {
3286 if (scale == 0) {
3287 __ daddu(AT, as_Register(base), as_Register(index));
3288 } else {
3289 __ dsll(AT, as_Register(index), scale);
3290 __ daddu(AT, as_Register(base), AT);
3291 }
3292 __ ldc1(dst_reg, AT, disp);
3293 }
3294 } else {
3295 if (scale == 0) {
3296 __ daddu(AT, as_Register(base), as_Register(index));
3297 } else {
3298 __ dsll(AT, as_Register(index), scale);
3299 __ daddu(AT, as_Register(base), AT);
3300 }
3301 __ move(T9, disp);
3302 if( UseLoongsonISA ) {
3303 __ gsldxc1(dst_reg, AT, T9, 0);
3304 } else {
3305 __ addu(AT, AT, T9);
3306 __ ldc1(dst_reg, AT, 0);
3307 }
3308 }
3309 } else {
3310 if( Assembler::is_simm16(disp) ) {
3311 __ ldc1(dst_reg, as_Register(base), disp);
3312 } else {
3313 __ move(T9, disp);
3314 if( UseLoongsonISA ) {
3315 __ gsldxc1(dst_reg, as_Register(base), T9, 0);
3316 } else {
3317 __ addu(AT, as_Register(base), T9);
3318 __ ldc1(dst_reg, AT, 0);
3319 }
3320 }
3321 }
3322 %}
3324 enc_class store_D_reg_enc (memory mem, regD src) %{
3325 MacroAssembler _masm(&cbuf);
3326 int base = $mem$$base;
3327 int index = $mem$$index;
3328 int scale = $mem$$scale;
3329 int disp = $mem$$disp;
3330 FloatRegister src_reg = as_FloatRegister($src$$reg);
3332 if( index != 0 ) {
3333 if( Assembler::is_simm16(disp) ) {
3334 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3335 if (scale == 0) {
3336 __ gssdxc1(src_reg, as_Register(base), as_Register(index), disp);
3337 } else {
3338 __ dsll(AT, as_Register(index), scale);
3339 __ gssdxc1(src_reg, as_Register(base), AT, disp);
3340 }
3341 } else {
3342 if (scale == 0) {
3343 __ daddu(AT, as_Register(base), as_Register(index));
3344 } else {
3345 __ dsll(AT, as_Register(index), scale);
3346 __ daddu(AT, as_Register(base), AT);
3347 }
3348 __ sdc1(src_reg, AT, disp);
3349 }
3350 } else {
3351 if (scale == 0) {
3352 __ daddu(AT, as_Register(base), as_Register(index));
3353 } else {
3354 __ dsll(AT, as_Register(index), scale);
3355 __ daddu(AT, as_Register(base), AT);
3356 }
3357 __ move(T9, disp);
3358 if( UseLoongsonISA ) {
3359 __ gssdxc1(src_reg, AT, T9, 0);
3360 } else {
3361 __ addu(AT, AT, T9);
3362 __ sdc1(src_reg, AT, 0);
3363 }
3364 }
3365 } else {
3366 if( Assembler::is_simm16(disp) ) {
3367 __ sdc1(src_reg, as_Register(base), disp);
3368 } else {
3369 __ move(T9, disp);
3370 if( UseLoongsonISA ) {
3371 __ gssdxc1(src_reg, as_Register(base), T9, 0);
3372 } else {
3373 __ addu(AT, as_Register(base), T9);
3374 __ sdc1(src_reg, AT, 0);
3375 }
3376 }
3377 }
3378 %}
3380 enc_class Java_To_Runtime (method meth) %{ // CALL Java_To_Runtime, Java_To_Runtime_Leaf
3381 MacroAssembler _masm(&cbuf);
3382 // This is the instruction starting address for relocation info.
3383 __ block_comment("Java_To_Runtime");
3384 cbuf.set_insts_mark();
3385 __ relocate(relocInfo::runtime_call_type);
3387 __ li48(T9, (long)$meth$$method);
3388 __ jalr(T9);
3389 __ nop();
3390 %}
3392 enc_class Java_Static_Call (method meth) %{ // JAVA STATIC CALL
3393 // CALL to fixup routine. Fixup routine uses ScopeDesc info to determine
3394 // who we intended to call.
3395 MacroAssembler _masm(&cbuf);
3396 cbuf.set_insts_mark();
3398 if ( !_method ) {
3399 __ relocate(relocInfo::runtime_call_type);
3400 } else if(_optimized_virtual) {
3401 __ relocate(relocInfo::opt_virtual_call_type);
3402 } else {
3403 __ relocate(relocInfo::static_call_type);
3404 }
3406 __ li(T9, $meth$$method);
3407 __ jalr(T9);
3408 __ nop();
3409 if( _method ) { // Emit stub for static call
3410 emit_java_to_interp(cbuf);
3411 }
3412 %}
3415 /*
3416 * [Ref: LIR_Assembler::ic_call() ]
3417 */
3418 enc_class Java_Dynamic_Call (method meth) %{ // JAVA DYNAMIC CALL
3419 MacroAssembler _masm(&cbuf);
3420 __ block_comment("Java_Dynamic_Call");
3421 __ ic_call((address)$meth$$method);
3422 %}
3425 enc_class Set_Flags_After_Fast_Lock_Unlock(FlagsReg cr) %{
3426 Register flags = $cr$$Register;
3427 Label L;
3429 MacroAssembler _masm(&cbuf);
3431 __ addu(flags, R0, R0);
3432 __ beq(AT, R0, L);
3433 __ delayed()->nop();
3434 __ move(flags, 0xFFFFFFFF);
3435 __ bind(L);
3436 %}
3438 enc_class enc_PartialSubtypeCheck(mRegP result, mRegP sub, mRegP super, mRegI tmp) %{
3439 Register result = $result$$Register;
3440 Register sub = $sub$$Register;
3441 Register super = $super$$Register;
3442 Register length = $tmp$$Register;
3443 Register tmp = T9;
3444 Label miss;
3446 /* 2012/9/28 Jin: result may be the same as sub
3447 * 47c B40: # B21 B41 <- B20 Freq: 0.155379
3448 * 47c partialSubtypeCheck result=S1, sub=S1, super=S3, length=S0
3449 * 4bc mov S2, NULL #@loadConP
3450 * 4c0 beq S1, S2, B21 #@branchConP P=0.999999 C=-1.000000
3451 */
3452 MacroAssembler _masm(&cbuf);
3453 Label done;
3454 __ check_klass_subtype_slow_path(sub, super, length, tmp,
3455 NULL, &miss,
3456 /*set_cond_codes:*/ true);
3457 /* 2013/7/22 Jin: Refer to X86_64's RDI */
3458 __ move(result, 0);
3459 __ b(done);
3460 __ nop();
3462 __ bind(miss);
3463 __ move(result, 1);
3464 __ bind(done);
3465 %}
3467 %}
3470 //---------MIPS FRAME--------------------------------------------------------------
3471 // Definition of frame structure and management information.
3472 //
3473 // S T A C K L A Y O U T Allocators stack-slot number
3474 // | (to get allocators register number
3475 // G Owned by | | v add SharedInfo::stack0)
3476 // r CALLER | |
3477 // o | +--------+ pad to even-align allocators stack-slot
3478 // w V | pad0 | numbers; owned by CALLER
3479 // t -----------+--------+----> Matcher::_in_arg_limit, unaligned
3480 // h ^ | in | 5
3481 // | | args | 4 Holes in incoming args owned by SELF
3482 // | | old | | 3
3483 // | | SP-+--------+----> Matcher::_old_SP, even aligned
3484 // v | | ret | 3 return address
3485 // Owned by +--------+
3486 // Self | pad2 | 2 pad to align old SP
3487 // | +--------+ 1
3488 // | | locks | 0
3489 // | +--------+----> SharedInfo::stack0, even aligned
3490 // | | pad1 | 11 pad to align new SP
3491 // | +--------+
3492 // | | | 10
3493 // | | spills | 9 spills
3494 // V | | 8 (pad0 slot for callee)
3495 // -----------+--------+----> Matcher::_out_arg_limit, unaligned
3496 // ^ | out | 7
3497 // | | args | 6 Holes in outgoing args owned by CALLEE
3498 // Owned by new | |
3499 // Callee SP-+--------+----> Matcher::_new_SP, even aligned
3500 // | |
3501 //
3502 // Note 1: Only region 8-11 is determined by the allocator. Region 0-5 is
3503 // known from SELF's arguments and the Java calling convention.
3504 // Region 6-7 is determined per call site.
3505 // Note 2: If the calling convention leaves holes in the incoming argument
3506 // area, those holes are owned by SELF. Holes in the outgoing area
3507 // are owned by the CALLEE. Holes should not be nessecary in the
3508 // incoming area, as the Java calling convention is completely under
3509 // the control of the AD file. Doubles can be sorted and packed to
3510 // avoid holes. Holes in the outgoing arguments may be nessecary for
3511 // varargs C calling conventions.
3512 // Note 3: Region 0-3 is even aligned, with pad2 as needed. Region 3-5 is
3513 // even aligned with pad0 as needed.
3514 // Region 6 is even aligned. Region 6-7 is NOT even aligned;
3515 // region 6-11 is even aligned; it may be padded out more so that
3516 // the region from SP to FP meets the minimum stack alignment.
3517 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
3518 // alignment. Region 11, pad1, may be dynamically extended so that
3519 // SP meets the minimum alignment.
3522 frame %{
3524 stack_direction(TOWARDS_LOW);
3526 // These two registers define part of the calling convention
3527 // between compiled code and the interpreter.
3528 // SEE StartI2CNode::calling_convention & StartC2INode::calling_convention & StartOSRNode::calling_convention
3529 // for more information. by yjl 3/16/2006
3531 inline_cache_reg(T1); // Inline Cache Register
3532 interpreter_method_oop_reg(S3); // Method Oop Register when calling interpreter
3533 /*
3534 inline_cache_reg(T1); // Inline Cache Register or methodOop for I2C
3535 interpreter_arg_ptr_reg(A0); // Argument pointer for I2C adapters
3536 */
3538 // Optional: name the operand used by cisc-spilling to access [stack_pointer + offset]
3539 cisc_spilling_operand_name(indOffset32);
3541 // Number of stack slots consumed by locking an object
3542 // generate Compile::sync_stack_slots
3543 #ifdef _LP64
3544 sync_stack_slots(2);
3545 #else
3546 sync_stack_slots(1);
3547 #endif
3549 frame_pointer(SP);
3551 // Interpreter stores its frame pointer in a register which is
3552 // stored to the stack by I2CAdaptors.
3553 // I2CAdaptors convert from interpreted java to compiled java.
3555 interpreter_frame_pointer(FP);
3557 // generate Matcher::stack_alignment
3558 stack_alignment(StackAlignmentInBytes); //wordSize = sizeof(char*);
3560 // Number of stack slots between incoming argument block and the start of
3561 // a new frame. The PROLOG must add this many slots to the stack. The
3562 // EPILOG must remove this many slots. Intel needs one slot for
3563 // return address.
3564 // generate Matcher::in_preserve_stack_slots
3565 //in_preserve_stack_slots(VerifyStackAtCalls + 2); //Now VerifyStackAtCalls is defined as false ! Leave one stack slot for ra and fp
3566 in_preserve_stack_slots(4); //Now VerifyStackAtCalls is defined as false ! Leave two stack slots for ra and fp
3568 // Number of outgoing stack slots killed above the out_preserve_stack_slots
3569 // for calls to C. Supports the var-args backing area for register parms.
3570 varargs_C_out_slots_killed(0);
3572 // The after-PROLOG location of the return address. Location of
3573 // return address specifies a type (REG or STACK) and a number
3574 // representing the register number (i.e. - use a register name) or
3575 // stack slot.
3576 // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
3577 // Otherwise, it is above the locks and verification slot and alignment word
3578 //return_addr(STACK -1+ round_to(1+VerifyStackAtCalls+Compile::current()->sync()*Compile::current()->sync_stack_slots(),WordsPerLong));
3579 return_addr(REG RA);
3581 // Body of function which returns an integer array locating
3582 // arguments either in registers or in stack slots. Passed an array
3583 // of ideal registers called "sig" and a "length" count. Stack-slot
3584 // offsets are based on outgoing arguments, i.e. a CALLER setting up
3585 // arguments for a CALLEE. Incoming stack arguments are
3586 // automatically biased by the preserve_stack_slots field above.
3589 // will generated to Matcher::calling_convention(OptoRegPair *sig, uint length, bool is_outgoing)
3590 // StartNode::calling_convention call this. by yjl 3/16/2006
3591 calling_convention %{
3592 SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
3593 %}
3598 // Body of function which returns an integer array locating
3599 // arguments either in registers or in stack slots. Passed an array
3600 // of ideal registers called "sig" and a "length" count. Stack-slot
3601 // offsets are based on outgoing arguments, i.e. a CALLER setting up
3602 // arguments for a CALLEE. Incoming stack arguments are
3603 // automatically biased by the preserve_stack_slots field above.
3606 // SEE CallRuntimeNode::calling_convention for more information. by yjl 3/16/2006
3607 c_calling_convention %{
3608 (void) SharedRuntime::c_calling_convention(sig_bt, regs, /*regs2=*/NULL, length);
3609 %}
3612 // Location of C & interpreter return values
3613 // register(s) contain(s) return value for Op_StartI2C and Op_StartOSR.
3614 // SEE Matcher::match. by yjl 3/16/2006
3615 c_return_value %{
3616 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3617 /* -- , -- , Op_RegN, Op_RegI, Op_RegP, Op_RegF, Op_RegD, Op_RegL */
3618 static int lo[Op_RegL+1] = { 0, 0, V0_num, V0_num, V0_num, F0_num, F0_num, V0_num };
3619 static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, V0_H_num, OptoReg::Bad, F0_H_num, V0_H_num };
3620 return OptoRegPair(hi[ideal_reg],lo[ideal_reg]);
3621 %}
3623 // Location of return values
3624 // register(s) contain(s) return value for Op_StartC2I and Op_Start.
3625 // SEE Matcher::match. by yjl 3/16/2006
3627 return_value %{
3628 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3629 /* -- , -- , Op_RegN, Op_RegI, Op_RegP, Op_RegF, Op_RegD, Op_RegL */
3630 static int lo[Op_RegL+1] = { 0, 0, V0_num, V0_num, V0_num, F0_num, F0_num, V0_num };
3631 static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, V0_H_num, OptoReg::Bad, F0_H_num, V0_H_num};
3632 return OptoRegPair(hi[ideal_reg],lo[ideal_reg]);
3633 %}
3635 %}
3637 //----------ATTRIBUTES---------------------------------------------------------
3638 //----------Operand Attributes-------------------------------------------------
3639 op_attrib op_cost(0); // Required cost attribute
3641 //----------Instruction Attributes---------------------------------------------
3642 ins_attrib ins_cost(100); // Required cost attribute
3643 ins_attrib ins_size(32); // Required size attribute (in bits)
3644 ins_attrib ins_pc_relative(0); // Required PC Relative flag
3645 ins_attrib ins_short_branch(0); // Required flag: is this instruction a
3646 // non-matching short branch variant of some
3647 // long branch?
3648 ins_attrib ins_alignment(4); // Required alignment attribute (must be a power of 2)
3649 // specifies the alignment that some part of the instruction (not
3650 // necessarily the start) requires. If > 1, a compute_padding()
3651 // function must be provided for the instruction
3653 //----------OPERANDS-----------------------------------------------------------
3654 // Operand definitions must precede instruction definitions for correct parsing
3655 // in the ADLC because operands constitute user defined types which are used in
3656 // instruction definitions.
3658 // Vectors
3659 operand vecD() %{
3660 constraint(ALLOC_IN_RC(dbl_reg));
3661 match(VecD);
3663 format %{ %}
3664 interface(REG_INTER);
3665 %}
3667 // Flags register, used as output of compare instructions
3668 operand FlagsReg() %{
3669 constraint(ALLOC_IN_RC(mips_flags));
3670 match(RegFlags);
3672 format %{ "EFLAGS" %}
3673 interface(REG_INTER);
3674 %}
3676 //----------Simple Operands----------------------------------------------------
3677 //TODO: Should we need to define some more special immediate number ?
3678 // Immediate Operands
3679 // Integer Immediate
3680 operand immI() %{
3681 match(ConI);
3682 //TODO: should not match immI8 here LEE
3683 match(immI8);
3685 op_cost(20);
3686 format %{ %}
3687 interface(CONST_INTER);
3688 %}
3690 // Long Immediate 8-bit
3691 operand immL8()
3692 %{
3693 predicate(-0x80L <= n->get_long() && n->get_long() < 0x80L);
3694 match(ConL);
3696 op_cost(5);
3697 format %{ %}
3698 interface(CONST_INTER);
3699 %}
3701 // Constant for test vs zero
3702 operand immI0() %{
3703 predicate(n->get_int() == 0);
3704 match(ConI);
3706 op_cost(0);
3707 format %{ %}
3708 interface(CONST_INTER);
3709 %}
3711 // Constant for increment
3712 operand immI1() %{
3713 predicate(n->get_int() == 1);
3714 match(ConI);
3716 op_cost(0);
3717 format %{ %}
3718 interface(CONST_INTER);
3719 %}
3721 // Constant for decrement
3722 operand immI_M1() %{
3723 predicate(n->get_int() == -1);
3724 match(ConI);
3726 op_cost(0);
3727 format %{ %}
3728 interface(CONST_INTER);
3729 %}
3731 operand immI_MaxI() %{
3732 predicate(n->get_int() == 2147483647);
3733 match(ConI);
3735 op_cost(0);
3736 format %{ %}
3737 interface(CONST_INTER);
3738 %}
3740 // Valid scale values for addressing modes
3741 operand immI2() %{
3742 predicate(0 <= n->get_int() && (n->get_int() <= 3));
3743 match(ConI);
3745 format %{ %}
3746 interface(CONST_INTER);
3747 %}
3749 operand immI8() %{
3750 predicate((-128 <= n->get_int()) && (n->get_int() <= 127));
3751 match(ConI);
3753 op_cost(5);
3754 format %{ %}
3755 interface(CONST_INTER);
3756 %}
3758 operand immI16() %{
3759 predicate((-32768 <= n->get_int()) && (n->get_int() <= 32767));
3760 match(ConI);
3762 op_cost(10);
3763 format %{ %}
3764 interface(CONST_INTER);
3765 %}
3767 // Constant for long shifts
3768 operand immI_32() %{
3769 predicate( n->get_int() == 32 );
3770 match(ConI);
3772 op_cost(0);
3773 format %{ %}
3774 interface(CONST_INTER);
3775 %}
3777 operand immI_63() %{
3778 predicate( n->get_int() == 63 );
3779 match(ConI);
3781 op_cost(0);
3782 format %{ %}
3783 interface(CONST_INTER);
3784 %}
3786 operand immI_0_31() %{
3787 predicate( n->get_int() >= 0 && n->get_int() <= 31 );
3788 match(ConI);
3790 op_cost(0);
3791 format %{ %}
3792 interface(CONST_INTER);
3793 %}
3795 // Operand for non-negtive integer mask
3796 operand immI_nonneg_mask() %{
3797 predicate( (n->get_int() >= 0) && (Assembler::is_int_mask(n->get_int()) != -1) );
3798 match(ConI);
3800 op_cost(0);
3801 format %{ %}
3802 interface(CONST_INTER);
3803 %}
3805 operand immI_32_63() %{
3806 predicate( n->get_int() >= 32 && n->get_int() <= 63 );
3807 match(ConI);
3808 op_cost(0);
3810 format %{ %}
3811 interface(CONST_INTER);
3812 %}
3814 operand immI16_sub() %{
3815 predicate((-32767 <= n->get_int()) && (n->get_int() <= 32768));
3816 match(ConI);
3818 op_cost(10);
3819 format %{ %}
3820 interface(CONST_INTER);
3821 %}
3823 operand immI_0_32767() %{
3824 predicate( n->get_int() >= 0 && n->get_int() <= 32767 );
3825 match(ConI);
3826 op_cost(0);
3828 format %{ %}
3829 interface(CONST_INTER);
3830 %}
3832 operand immI_0_65535() %{
3833 predicate( n->get_int() >= 0 && n->get_int() <= 65535 );
3834 match(ConI);
3835 op_cost(0);
3837 format %{ %}
3838 interface(CONST_INTER);
3839 %}
3841 operand immI_1() %{
3842 predicate( n->get_int() == 1 );
3843 match(ConI);
3845 op_cost(0);
3846 format %{ %}
3847 interface(CONST_INTER);
3848 %}
3850 operand immI_2() %{
3851 predicate( n->get_int() == 2 );
3852 match(ConI);
3854 op_cost(0);
3855 format %{ %}
3856 interface(CONST_INTER);
3857 %}
3859 operand immI_3() %{
3860 predicate( n->get_int() == 3 );
3861 match(ConI);
3863 op_cost(0);
3864 format %{ %}
3865 interface(CONST_INTER);
3866 %}
3868 operand immI_7() %{
3869 predicate( n->get_int() == 7 );
3870 match(ConI);
3872 format %{ %}
3873 interface(CONST_INTER);
3874 %}
3876 // Immediates for special shifts (sign extend)
3878 // Constants for increment
3879 operand immI_16() %{
3880 predicate( n->get_int() == 16 );
3881 match(ConI);
3883 format %{ %}
3884 interface(CONST_INTER);
3885 %}
3887 operand immI_24() %{
3888 predicate( n->get_int() == 24 );
3889 match(ConI);
3891 format %{ %}
3892 interface(CONST_INTER);
3893 %}
3895 // Constant for byte-wide masking
3896 operand immI_255() %{
3897 predicate( n->get_int() == 255 );
3898 match(ConI);
3900 op_cost(0);
3901 format %{ %}
3902 interface(CONST_INTER);
3903 %}
3905 operand immI_65535() %{
3906 predicate( n->get_int() == 65535 );
3907 match(ConI);
3909 op_cost(5);
3910 format %{ %}
3911 interface(CONST_INTER);
3912 %}
3914 operand immI_65536() %{
3915 predicate( n->get_int() == 65536 );
3916 match(ConI);
3918 op_cost(5);
3919 format %{ %}
3920 interface(CONST_INTER);
3921 %}
3923 operand immI_M65536() %{
3924 predicate( n->get_int() == -65536 );
3925 match(ConI);
3927 op_cost(5);
3928 format %{ %}
3929 interface(CONST_INTER);
3930 %}
3932 // Pointer Immediate
3933 operand immP() %{
3934 match(ConP);
3936 op_cost(10);
3937 format %{ %}
3938 interface(CONST_INTER);
3939 %}
3941 // NULL Pointer Immediate
3942 operand immP0() %{
3943 predicate( n->get_ptr() == 0 );
3944 match(ConP);
3945 op_cost(0);
3947 format %{ %}
3948 interface(CONST_INTER);
3949 %}
3951 // Pointer Immediate: 64-bit
3952 operand immP_set() %{
3953 match(ConP);
3955 op_cost(5);
3956 // formats are generated automatically for constants and base registers
3957 format %{ %}
3958 interface(CONST_INTER);
3959 %}
3961 // Pointer Immediate: 64-bit
3962 operand immP_load() %{
3963 predicate(n->bottom_type()->isa_oop_ptr() || (MacroAssembler::insts_for_set64(n->get_ptr()) > 3));
3964 match(ConP);
3966 op_cost(5);
3967 // formats are generated automatically for constants and base registers
3968 format %{ %}
3969 interface(CONST_INTER);
3970 %}
3972 // Pointer Immediate: 64-bit
3973 operand immP_no_oop_cheap() %{
3974 predicate(!n->bottom_type()->isa_oop_ptr() && (MacroAssembler::insts_for_set64(n->get_ptr()) <= 3));
3975 match(ConP);
3977 op_cost(5);
3978 // formats are generated automatically for constants and base registers
3979 format %{ %}
3980 interface(CONST_INTER);
3981 %}
3983 // Pointer for polling page
3984 operand immP_poll() %{
3985 predicate(n->get_ptr() != 0 && n->get_ptr() == (intptr_t)os::get_polling_page());
3986 match(ConP);
3987 op_cost(5);
3989 format %{ %}
3990 interface(CONST_INTER);
3991 %}
3993 // Pointer Immediate
3994 operand immN() %{
3995 match(ConN);
3997 op_cost(10);
3998 format %{ %}
3999 interface(CONST_INTER);
4000 %}
4002 operand immNKlass() %{
4003 match(ConNKlass);
4005 op_cost(10);
4006 format %{ %}
4007 interface(CONST_INTER);
4008 %}
4010 // NULL Pointer Immediate
4011 operand immN0() %{
4012 predicate(n->get_narrowcon() == 0);
4013 match(ConN);
4015 op_cost(5);
4016 format %{ %}
4017 interface(CONST_INTER);
4018 %}
4020 // Long Immediate
4021 operand immL() %{
4022 match(ConL);
4024 op_cost(20);
4025 format %{ %}
4026 interface(CONST_INTER);
4027 %}
4029 // Long Immediate zero
4030 operand immL0() %{
4031 predicate( n->get_long() == 0L );
4032 match(ConL);
4033 op_cost(0);
4035 format %{ %}
4036 interface(CONST_INTER);
4037 %}
4039 operand immL7() %{
4040 predicate( n->get_long() == 7L );
4041 match(ConL);
4042 op_cost(0);
4044 format %{ %}
4045 interface(CONST_INTER);
4046 %}
4048 operand immL_M1() %{
4049 predicate( n->get_long() == -1L );
4050 match(ConL);
4051 op_cost(0);
4053 format %{ %}
4054 interface(CONST_INTER);
4055 %}
4057 // bit 0..2 zero
4058 operand immL_M8() %{
4059 predicate( n->get_long() == -8L );
4060 match(ConL);
4061 op_cost(0);
4063 format %{ %}
4064 interface(CONST_INTER);
4065 %}
4067 // bit 2 zero
4068 operand immL_M5() %{
4069 predicate( n->get_long() == -5L );
4070 match(ConL);
4071 op_cost(0);
4073 format %{ %}
4074 interface(CONST_INTER);
4075 %}
4077 // bit 1..2 zero
4078 operand immL_M7() %{
4079 predicate( n->get_long() == -7L );
4080 match(ConL);
4081 op_cost(0);
4083 format %{ %}
4084 interface(CONST_INTER);
4085 %}
4087 // bit 0..1 zero
4088 operand immL_M4() %{
4089 predicate( n->get_long() == -4L );
4090 match(ConL);
4091 op_cost(0);
4093 format %{ %}
4094 interface(CONST_INTER);
4095 %}
4097 // bit 3..6 zero
4098 operand immL_M121() %{
4099 predicate( n->get_long() == -121L );
4100 match(ConL);
4101 op_cost(0);
4103 format %{ %}
4104 interface(CONST_INTER);
4105 %}
4107 // Long immediate from 0 to 127.
4108 // Used for a shorter form of long mul by 10.
4109 operand immL_127() %{
4110 predicate((0 <= n->get_long()) && (n->get_long() <= 127));
4111 match(ConL);
4112 op_cost(0);
4114 format %{ %}
4115 interface(CONST_INTER);
4116 %}
4118 // Operand for non-negtive long mask
4119 operand immL_nonneg_mask() %{
4120 predicate( (n->get_long() >= 0) && (Assembler::is_jlong_mask(n->get_long()) != -1) );
4121 match(ConL);
4123 op_cost(0);
4124 format %{ %}
4125 interface(CONST_INTER);
4126 %}
4128 operand immL_0_65535() %{
4129 predicate( n->get_long() >= 0 && n->get_long() <= 65535 );
4130 match(ConL);
4131 op_cost(0);
4133 format %{ %}
4134 interface(CONST_INTER);
4135 %}
4137 // Long Immediate: cheap (materialize in <= 3 instructions)
4138 operand immL_cheap() %{
4139 predicate(MacroAssembler::insts_for_set64(n->get_long()) <= 3);
4140 match(ConL);
4141 op_cost(0);
4143 format %{ %}
4144 interface(CONST_INTER);
4145 %}
4147 // Long Immediate: expensive (materialize in > 3 instructions)
4148 operand immL_expensive() %{
4149 predicate(MacroAssembler::insts_for_set64(n->get_long()) > 3);
4150 match(ConL);
4151 op_cost(0);
4153 format %{ %}
4154 interface(CONST_INTER);
4155 %}
4157 operand immL16() %{
4158 predicate((-32768 <= n->get_long()) && (n->get_long() <= 32767));
4159 match(ConL);
4161 op_cost(10);
4162 format %{ %}
4163 interface(CONST_INTER);
4164 %}
4166 operand immL16_sub() %{
4167 predicate((-32767 <= n->get_long()) && (n->get_long() <= 32768));
4168 match(ConL);
4170 op_cost(10);
4171 format %{ %}
4172 interface(CONST_INTER);
4173 %}
4175 // Long Immediate: low 32-bit mask
4176 operand immL_32bits() %{
4177 predicate(n->get_long() == 0xFFFFFFFFL);
4178 match(ConL);
4179 op_cost(20);
4181 format %{ %}
4182 interface(CONST_INTER);
4183 %}
4185 // Long Immediate 32-bit signed
4186 operand immL32()
4187 %{
4188 predicate(n->get_long() == (int) (n->get_long()));
4189 match(ConL);
4191 op_cost(15);
4192 format %{ %}
4193 interface(CONST_INTER);
4194 %}
4197 //single-precision floating-point zero
4198 operand immF0() %{
4199 predicate(jint_cast(n->getf()) == 0);
4200 match(ConF);
4202 op_cost(5);
4203 format %{ %}
4204 interface(CONST_INTER);
4205 %}
4207 //single-precision floating-point immediate
4208 operand immF() %{
4209 match(ConF);
4211 op_cost(20);
4212 format %{ %}
4213 interface(CONST_INTER);
4214 %}
4216 //double-precision floating-point zero
4217 operand immD0() %{
4218 predicate(jlong_cast(n->getd()) == 0);
4219 match(ConD);
4221 op_cost(5);
4222 format %{ %}
4223 interface(CONST_INTER);
4224 %}
4226 //double-precision floating-point immediate
4227 operand immD() %{
4228 match(ConD);
4230 op_cost(20);
4231 format %{ %}
4232 interface(CONST_INTER);
4233 %}
4235 // Register Operands
4236 // Integer Register
4237 operand mRegI() %{
4238 constraint(ALLOC_IN_RC(int_reg));
4239 match(RegI);
4241 format %{ %}
4242 interface(REG_INTER);
4243 %}
4245 operand no_Ax_mRegI() %{
4246 constraint(ALLOC_IN_RC(no_Ax_int_reg));
4247 match(RegI);
4248 match(mRegI);
4250 format %{ %}
4251 interface(REG_INTER);
4252 %}
4254 operand mS0RegI() %{
4255 constraint(ALLOC_IN_RC(s0_reg));
4256 match(RegI);
4257 match(mRegI);
4259 format %{ "S0" %}
4260 interface(REG_INTER);
4261 %}
4263 operand mS1RegI() %{
4264 constraint(ALLOC_IN_RC(s1_reg));
4265 match(RegI);
4266 match(mRegI);
4268 format %{ "S1" %}
4269 interface(REG_INTER);
4270 %}
4272 operand mS2RegI() %{
4273 constraint(ALLOC_IN_RC(s2_reg));
4274 match(RegI);
4275 match(mRegI);
4277 format %{ "S2" %}
4278 interface(REG_INTER);
4279 %}
4281 operand mS3RegI() %{
4282 constraint(ALLOC_IN_RC(s3_reg));
4283 match(RegI);
4284 match(mRegI);
4286 format %{ "S3" %}
4287 interface(REG_INTER);
4288 %}
4290 operand mS4RegI() %{
4291 constraint(ALLOC_IN_RC(s4_reg));
4292 match(RegI);
4293 match(mRegI);
4295 format %{ "S4" %}
4296 interface(REG_INTER);
4297 %}
4299 operand mS5RegI() %{
4300 constraint(ALLOC_IN_RC(s5_reg));
4301 match(RegI);
4302 match(mRegI);
4304 format %{ "S5" %}
4305 interface(REG_INTER);
4306 %}
4308 operand mS6RegI() %{
4309 constraint(ALLOC_IN_RC(s6_reg));
4310 match(RegI);
4311 match(mRegI);
4313 format %{ "S6" %}
4314 interface(REG_INTER);
4315 %}
4317 operand mS7RegI() %{
4318 constraint(ALLOC_IN_RC(s7_reg));
4319 match(RegI);
4320 match(mRegI);
4322 format %{ "S7" %}
4323 interface(REG_INTER);
4324 %}
4327 operand mT0RegI() %{
4328 constraint(ALLOC_IN_RC(t0_reg));
4329 match(RegI);
4330 match(mRegI);
4332 format %{ "T0" %}
4333 interface(REG_INTER);
4334 %}
4336 operand mT1RegI() %{
4337 constraint(ALLOC_IN_RC(t1_reg));
4338 match(RegI);
4339 match(mRegI);
4341 format %{ "T1" %}
4342 interface(REG_INTER);
4343 %}
4345 operand mT2RegI() %{
4346 constraint(ALLOC_IN_RC(t2_reg));
4347 match(RegI);
4348 match(mRegI);
4350 format %{ "T2" %}
4351 interface(REG_INTER);
4352 %}
4354 operand mT3RegI() %{
4355 constraint(ALLOC_IN_RC(t3_reg));
4356 match(RegI);
4357 match(mRegI);
4359 format %{ "T3" %}
4360 interface(REG_INTER);
4361 %}
4363 operand mT8RegI() %{
4364 constraint(ALLOC_IN_RC(t8_reg));
4365 match(RegI);
4366 match(mRegI);
4368 format %{ "T8" %}
4369 interface(REG_INTER);
4370 %}
4372 operand mT9RegI() %{
4373 constraint(ALLOC_IN_RC(t9_reg));
4374 match(RegI);
4375 match(mRegI);
4377 format %{ "T9" %}
4378 interface(REG_INTER);
4379 %}
4381 operand mA0RegI() %{
4382 constraint(ALLOC_IN_RC(a0_reg));
4383 match(RegI);
4384 match(mRegI);
4386 format %{ "A0" %}
4387 interface(REG_INTER);
4388 %}
4390 operand mA1RegI() %{
4391 constraint(ALLOC_IN_RC(a1_reg));
4392 match(RegI);
4393 match(mRegI);
4395 format %{ "A1" %}
4396 interface(REG_INTER);
4397 %}
4399 operand mA2RegI() %{
4400 constraint(ALLOC_IN_RC(a2_reg));
4401 match(RegI);
4402 match(mRegI);
4404 format %{ "A2" %}
4405 interface(REG_INTER);
4406 %}
4408 operand mA3RegI() %{
4409 constraint(ALLOC_IN_RC(a3_reg));
4410 match(RegI);
4411 match(mRegI);
4413 format %{ "A3" %}
4414 interface(REG_INTER);
4415 %}
4417 operand mA4RegI() %{
4418 constraint(ALLOC_IN_RC(a4_reg));
4419 match(RegI);
4420 match(mRegI);
4422 format %{ "A4" %}
4423 interface(REG_INTER);
4424 %}
4426 operand mA5RegI() %{
4427 constraint(ALLOC_IN_RC(a5_reg));
4428 match(RegI);
4429 match(mRegI);
4431 format %{ "A5" %}
4432 interface(REG_INTER);
4433 %}
4435 operand mA6RegI() %{
4436 constraint(ALLOC_IN_RC(a6_reg));
4437 match(RegI);
4438 match(mRegI);
4440 format %{ "A6" %}
4441 interface(REG_INTER);
4442 %}
4444 operand mA7RegI() %{
4445 constraint(ALLOC_IN_RC(a7_reg));
4446 match(RegI);
4447 match(mRegI);
4449 format %{ "A7" %}
4450 interface(REG_INTER);
4451 %}
4453 operand mV0RegI() %{
4454 constraint(ALLOC_IN_RC(v0_reg));
4455 match(RegI);
4456 match(mRegI);
4458 format %{ "V0" %}
4459 interface(REG_INTER);
4460 %}
4462 operand mV1RegI() %{
4463 constraint(ALLOC_IN_RC(v1_reg));
4464 match(RegI);
4465 match(mRegI);
4467 format %{ "V1" %}
4468 interface(REG_INTER);
4469 %}
4471 operand mRegN() %{
4472 constraint(ALLOC_IN_RC(int_reg));
4473 match(RegN);
4475 format %{ %}
4476 interface(REG_INTER);
4477 %}
4479 operand t0_RegN() %{
4480 constraint(ALLOC_IN_RC(t0_reg));
4481 match(RegN);
4482 match(mRegN);
4484 format %{ %}
4485 interface(REG_INTER);
4486 %}
4488 operand t1_RegN() %{
4489 constraint(ALLOC_IN_RC(t1_reg));
4490 match(RegN);
4491 match(mRegN);
4493 format %{ %}
4494 interface(REG_INTER);
4495 %}
4497 operand t2_RegN() %{
4498 constraint(ALLOC_IN_RC(t2_reg));
4499 match(RegN);
4500 match(mRegN);
4502 format %{ %}
4503 interface(REG_INTER);
4504 %}
4506 operand t3_RegN() %{
4507 constraint(ALLOC_IN_RC(t3_reg));
4508 match(RegN);
4509 match(mRegN);
4511 format %{ %}
4512 interface(REG_INTER);
4513 %}
4515 operand t8_RegN() %{
4516 constraint(ALLOC_IN_RC(t8_reg));
4517 match(RegN);
4518 match(mRegN);
4520 format %{ %}
4521 interface(REG_INTER);
4522 %}
4524 operand t9_RegN() %{
4525 constraint(ALLOC_IN_RC(t9_reg));
4526 match(RegN);
4527 match(mRegN);
4529 format %{ %}
4530 interface(REG_INTER);
4531 %}
4533 operand a0_RegN() %{
4534 constraint(ALLOC_IN_RC(a0_reg));
4535 match(RegN);
4536 match(mRegN);
4538 format %{ %}
4539 interface(REG_INTER);
4540 %}
4542 operand a1_RegN() %{
4543 constraint(ALLOC_IN_RC(a1_reg));
4544 match(RegN);
4545 match(mRegN);
4547 format %{ %}
4548 interface(REG_INTER);
4549 %}
4551 operand a2_RegN() %{
4552 constraint(ALLOC_IN_RC(a2_reg));
4553 match(RegN);
4554 match(mRegN);
4556 format %{ %}
4557 interface(REG_INTER);
4558 %}
4560 operand a3_RegN() %{
4561 constraint(ALLOC_IN_RC(a3_reg));
4562 match(RegN);
4563 match(mRegN);
4565 format %{ %}
4566 interface(REG_INTER);
4567 %}
4569 operand a4_RegN() %{
4570 constraint(ALLOC_IN_RC(a4_reg));
4571 match(RegN);
4572 match(mRegN);
4574 format %{ %}
4575 interface(REG_INTER);
4576 %}
4578 operand a5_RegN() %{
4579 constraint(ALLOC_IN_RC(a5_reg));
4580 match(RegN);
4581 match(mRegN);
4583 format %{ %}
4584 interface(REG_INTER);
4585 %}
4587 operand a6_RegN() %{
4588 constraint(ALLOC_IN_RC(a6_reg));
4589 match(RegN);
4590 match(mRegN);
4592 format %{ %}
4593 interface(REG_INTER);
4594 %}
4596 operand a7_RegN() %{
4597 constraint(ALLOC_IN_RC(a7_reg));
4598 match(RegN);
4599 match(mRegN);
4601 format %{ %}
4602 interface(REG_INTER);
4603 %}
4605 operand s0_RegN() %{
4606 constraint(ALLOC_IN_RC(s0_reg));
4607 match(RegN);
4608 match(mRegN);
4610 format %{ %}
4611 interface(REG_INTER);
4612 %}
4614 operand s1_RegN() %{
4615 constraint(ALLOC_IN_RC(s1_reg));
4616 match(RegN);
4617 match(mRegN);
4619 format %{ %}
4620 interface(REG_INTER);
4621 %}
4623 operand s2_RegN() %{
4624 constraint(ALLOC_IN_RC(s2_reg));
4625 match(RegN);
4626 match(mRegN);
4628 format %{ %}
4629 interface(REG_INTER);
4630 %}
4632 operand s3_RegN() %{
4633 constraint(ALLOC_IN_RC(s3_reg));
4634 match(RegN);
4635 match(mRegN);
4637 format %{ %}
4638 interface(REG_INTER);
4639 %}
4641 operand s4_RegN() %{
4642 constraint(ALLOC_IN_RC(s4_reg));
4643 match(RegN);
4644 match(mRegN);
4646 format %{ %}
4647 interface(REG_INTER);
4648 %}
4650 operand s5_RegN() %{
4651 constraint(ALLOC_IN_RC(s5_reg));
4652 match(RegN);
4653 match(mRegN);
4655 format %{ %}
4656 interface(REG_INTER);
4657 %}
4659 operand s6_RegN() %{
4660 constraint(ALLOC_IN_RC(s6_reg));
4661 match(RegN);
4662 match(mRegN);
4664 format %{ %}
4665 interface(REG_INTER);
4666 %}
4668 operand s7_RegN() %{
4669 constraint(ALLOC_IN_RC(s7_reg));
4670 match(RegN);
4671 match(mRegN);
4673 format %{ %}
4674 interface(REG_INTER);
4675 %}
4677 operand v0_RegN() %{
4678 constraint(ALLOC_IN_RC(v0_reg));
4679 match(RegN);
4680 match(mRegN);
4682 format %{ %}
4683 interface(REG_INTER);
4684 %}
4686 operand v1_RegN() %{
4687 constraint(ALLOC_IN_RC(v1_reg));
4688 match(RegN);
4689 match(mRegN);
4691 format %{ %}
4692 interface(REG_INTER);
4693 %}
4695 // Pointer Register
4696 operand mRegP() %{
4697 constraint(ALLOC_IN_RC(p_reg));
4698 match(RegP);
4700 format %{ %}
4701 interface(REG_INTER);
4702 %}
4704 operand no_T8_mRegP() %{
4705 constraint(ALLOC_IN_RC(no_T8_p_reg));
4706 match(RegP);
4707 match(mRegP);
4709 format %{ %}
4710 interface(REG_INTER);
4711 %}
4713 operand s0_RegP()
4714 %{
4715 constraint(ALLOC_IN_RC(s0_long_reg));
4716 match(RegP);
4717 match(mRegP);
4718 match(no_T8_mRegP);
4720 format %{ %}
4721 interface(REG_INTER);
4722 %}
4724 operand s1_RegP()
4725 %{
4726 constraint(ALLOC_IN_RC(s1_long_reg));
4727 match(RegP);
4728 match(mRegP);
4729 match(no_T8_mRegP);
4731 format %{ %}
4732 interface(REG_INTER);
4733 %}
4735 operand s2_RegP()
4736 %{
4737 constraint(ALLOC_IN_RC(s2_long_reg));
4738 match(RegP);
4739 match(mRegP);
4740 match(no_T8_mRegP);
4742 format %{ %}
4743 interface(REG_INTER);
4744 %}
4746 operand s3_RegP()
4747 %{
4748 constraint(ALLOC_IN_RC(s3_long_reg));
4749 match(RegP);
4750 match(mRegP);
4751 match(no_T8_mRegP);
4753 format %{ %}
4754 interface(REG_INTER);
4755 %}
4757 operand s4_RegP()
4758 %{
4759 constraint(ALLOC_IN_RC(s4_long_reg));
4760 match(RegP);
4761 match(mRegP);
4762 match(no_T8_mRegP);
4764 format %{ %}
4765 interface(REG_INTER);
4766 %}
4768 operand s5_RegP()
4769 %{
4770 constraint(ALLOC_IN_RC(s5_long_reg));
4771 match(RegP);
4772 match(mRegP);
4773 match(no_T8_mRegP);
4775 format %{ %}
4776 interface(REG_INTER);
4777 %}
4779 operand s6_RegP()
4780 %{
4781 constraint(ALLOC_IN_RC(s6_long_reg));
4782 match(RegP);
4783 match(mRegP);
4784 match(no_T8_mRegP);
4786 format %{ %}
4787 interface(REG_INTER);
4788 %}
4790 operand s7_RegP()
4791 %{
4792 constraint(ALLOC_IN_RC(s7_long_reg));
4793 match(RegP);
4794 match(mRegP);
4795 match(no_T8_mRegP);
4797 format %{ %}
4798 interface(REG_INTER);
4799 %}
4801 operand t0_RegP()
4802 %{
4803 constraint(ALLOC_IN_RC(t0_long_reg));
4804 match(RegP);
4805 match(mRegP);
4806 match(no_T8_mRegP);
4808 format %{ %}
4809 interface(REG_INTER);
4810 %}
4812 operand t1_RegP()
4813 %{
4814 constraint(ALLOC_IN_RC(t1_long_reg));
4815 match(RegP);
4816 match(mRegP);
4817 match(no_T8_mRegP);
4819 format %{ %}
4820 interface(REG_INTER);
4821 %}
4823 operand t2_RegP()
4824 %{
4825 constraint(ALLOC_IN_RC(t2_long_reg));
4826 match(RegP);
4827 match(mRegP);
4828 match(no_T8_mRegP);
4830 format %{ %}
4831 interface(REG_INTER);
4832 %}
4834 operand t3_RegP()
4835 %{
4836 constraint(ALLOC_IN_RC(t3_long_reg));
4837 match(RegP);
4838 match(mRegP);
4839 match(no_T8_mRegP);
4841 format %{ %}
4842 interface(REG_INTER);
4843 %}
4845 operand t8_RegP()
4846 %{
4847 constraint(ALLOC_IN_RC(t8_long_reg));
4848 match(RegP);
4849 match(mRegP);
4851 format %{ %}
4852 interface(REG_INTER);
4853 %}
4855 operand t9_RegP()
4856 %{
4857 constraint(ALLOC_IN_RC(t9_long_reg));
4858 match(RegP);
4859 match(mRegP);
4860 match(no_T8_mRegP);
4862 format %{ %}
4863 interface(REG_INTER);
4864 %}
4866 operand a0_RegP()
4867 %{
4868 constraint(ALLOC_IN_RC(a0_long_reg));
4869 match(RegP);
4870 match(mRegP);
4871 match(no_T8_mRegP);
4873 format %{ %}
4874 interface(REG_INTER);
4875 %}
4877 operand a1_RegP()
4878 %{
4879 constraint(ALLOC_IN_RC(a1_long_reg));
4880 match(RegP);
4881 match(mRegP);
4882 match(no_T8_mRegP);
4884 format %{ %}
4885 interface(REG_INTER);
4886 %}
4888 operand a2_RegP()
4889 %{
4890 constraint(ALLOC_IN_RC(a2_long_reg));
4891 match(RegP);
4892 match(mRegP);
4893 match(no_T8_mRegP);
4895 format %{ %}
4896 interface(REG_INTER);
4897 %}
4899 operand a3_RegP()
4900 %{
4901 constraint(ALLOC_IN_RC(a3_long_reg));
4902 match(RegP);
4903 match(mRegP);
4904 match(no_T8_mRegP);
4906 format %{ %}
4907 interface(REG_INTER);
4908 %}
4910 operand a4_RegP()
4911 %{
4912 constraint(ALLOC_IN_RC(a4_long_reg));
4913 match(RegP);
4914 match(mRegP);
4915 match(no_T8_mRegP);
4917 format %{ %}
4918 interface(REG_INTER);
4919 %}
4922 operand a5_RegP()
4923 %{
4924 constraint(ALLOC_IN_RC(a5_long_reg));
4925 match(RegP);
4926 match(mRegP);
4927 match(no_T8_mRegP);
4929 format %{ %}
4930 interface(REG_INTER);
4931 %}
4933 operand a6_RegP()
4934 %{
4935 constraint(ALLOC_IN_RC(a6_long_reg));
4936 match(RegP);
4937 match(mRegP);
4938 match(no_T8_mRegP);
4940 format %{ %}
4941 interface(REG_INTER);
4942 %}
4944 operand a7_RegP()
4945 %{
4946 constraint(ALLOC_IN_RC(a7_long_reg));
4947 match(RegP);
4948 match(mRegP);
4949 match(no_T8_mRegP);
4951 format %{ %}
4952 interface(REG_INTER);
4953 %}
4955 operand v0_RegP()
4956 %{
4957 constraint(ALLOC_IN_RC(v0_long_reg));
4958 match(RegP);
4959 match(mRegP);
4960 match(no_T8_mRegP);
4962 format %{ %}
4963 interface(REG_INTER);
4964 %}
4966 operand v1_RegP()
4967 %{
4968 constraint(ALLOC_IN_RC(v1_long_reg));
4969 match(RegP);
4970 match(mRegP);
4971 match(no_T8_mRegP);
4973 format %{ %}
4974 interface(REG_INTER);
4975 %}
4977 /*
4978 operand mSPRegP(mRegP reg) %{
4979 constraint(ALLOC_IN_RC(sp_reg));
4980 match(reg);
4982 format %{ "SP" %}
4983 interface(REG_INTER);
4984 %}
4986 operand mFPRegP(mRegP reg) %{
4987 constraint(ALLOC_IN_RC(fp_reg));
4988 match(reg);
4990 format %{ "FP" %}
4991 interface(REG_INTER);
4992 %}
4993 */
4995 operand mRegL() %{
4996 constraint(ALLOC_IN_RC(long_reg));
4997 match(RegL);
4999 format %{ %}
5000 interface(REG_INTER);
5001 %}
5003 operand v0RegL() %{
5004 constraint(ALLOC_IN_RC(v0_long_reg));
5005 match(RegL);
5006 match(mRegL);
5008 format %{ %}
5009 interface(REG_INTER);
5010 %}
5012 operand v1RegL() %{
5013 constraint(ALLOC_IN_RC(v1_long_reg));
5014 match(RegL);
5015 match(mRegL);
5017 format %{ %}
5018 interface(REG_INTER);
5019 %}
5021 operand a0RegL() %{
5022 constraint(ALLOC_IN_RC(a0_long_reg));
5023 match(RegL);
5024 match(mRegL);
5026 format %{ "A0" %}
5027 interface(REG_INTER);
5028 %}
5030 operand a1RegL() %{
5031 constraint(ALLOC_IN_RC(a1_long_reg));
5032 match(RegL);
5033 match(mRegL);
5035 format %{ %}
5036 interface(REG_INTER);
5037 %}
5039 operand a2RegL() %{
5040 constraint(ALLOC_IN_RC(a2_long_reg));
5041 match(RegL);
5042 match(mRegL);
5044 format %{ %}
5045 interface(REG_INTER);
5046 %}
5048 operand a3RegL() %{
5049 constraint(ALLOC_IN_RC(a3_long_reg));
5050 match(RegL);
5051 match(mRegL);
5053 format %{ %}
5054 interface(REG_INTER);
5055 %}
5057 operand t0RegL() %{
5058 constraint(ALLOC_IN_RC(t0_long_reg));
5059 match(RegL);
5060 match(mRegL);
5062 format %{ %}
5063 interface(REG_INTER);
5064 %}
5066 operand t1RegL() %{
5067 constraint(ALLOC_IN_RC(t1_long_reg));
5068 match(RegL);
5069 match(mRegL);
5071 format %{ %}
5072 interface(REG_INTER);
5073 %}
5075 operand t2RegL() %{
5076 constraint(ALLOC_IN_RC(t2_long_reg));
5077 match(RegL);
5078 match(mRegL);
5080 format %{ %}
5081 interface(REG_INTER);
5082 %}
5084 operand t3RegL() %{
5085 constraint(ALLOC_IN_RC(t3_long_reg));
5086 match(RegL);
5087 match(mRegL);
5089 format %{ %}
5090 interface(REG_INTER);
5091 %}
5093 operand t8RegL() %{
5094 constraint(ALLOC_IN_RC(t8_long_reg));
5095 match(RegL);
5096 match(mRegL);
5098 format %{ %}
5099 interface(REG_INTER);
5100 %}
5102 operand a4RegL() %{
5103 constraint(ALLOC_IN_RC(a4_long_reg));
5104 match(RegL);
5105 match(mRegL);
5107 format %{ %}
5108 interface(REG_INTER);
5109 %}
5111 operand a5RegL() %{
5112 constraint(ALLOC_IN_RC(a5_long_reg));
5113 match(RegL);
5114 match(mRegL);
5116 format %{ %}
5117 interface(REG_INTER);
5118 %}
5120 operand a6RegL() %{
5121 constraint(ALLOC_IN_RC(a6_long_reg));
5122 match(RegL);
5123 match(mRegL);
5125 format %{ %}
5126 interface(REG_INTER);
5127 %}
5129 operand a7RegL() %{
5130 constraint(ALLOC_IN_RC(a7_long_reg));
5131 match(RegL);
5132 match(mRegL);
5134 format %{ %}
5135 interface(REG_INTER);
5136 %}
5138 operand s0RegL() %{
5139 constraint(ALLOC_IN_RC(s0_long_reg));
5140 match(RegL);
5141 match(mRegL);
5143 format %{ %}
5144 interface(REG_INTER);
5145 %}
5147 operand s1RegL() %{
5148 constraint(ALLOC_IN_RC(s1_long_reg));
5149 match(RegL);
5150 match(mRegL);
5152 format %{ %}
5153 interface(REG_INTER);
5154 %}
5156 operand s2RegL() %{
5157 constraint(ALLOC_IN_RC(s2_long_reg));
5158 match(RegL);
5159 match(mRegL);
5161 format %{ %}
5162 interface(REG_INTER);
5163 %}
5165 operand s3RegL() %{
5166 constraint(ALLOC_IN_RC(s3_long_reg));
5167 match(RegL);
5168 match(mRegL);
5170 format %{ %}
5171 interface(REG_INTER);
5172 %}
5174 operand s4RegL() %{
5175 constraint(ALLOC_IN_RC(s4_long_reg));
5176 match(RegL);
5177 match(mRegL);
5179 format %{ %}
5180 interface(REG_INTER);
5181 %}
5183 operand s7RegL() %{
5184 constraint(ALLOC_IN_RC(s7_long_reg));
5185 match(RegL);
5186 match(mRegL);
5188 format %{ %}
5189 interface(REG_INTER);
5190 %}
5192 // Floating register operands
5193 operand regF() %{
5194 constraint(ALLOC_IN_RC(flt_reg));
5195 match(RegF);
5197 format %{ %}
5198 interface(REG_INTER);
5199 %}
5201 //Double Precision Floating register operands
5202 operand regD() %{
5203 constraint(ALLOC_IN_RC(dbl_reg));
5204 match(RegD);
5206 format %{ %}
5207 interface(REG_INTER);
5208 %}
5210 //----------Memory Operands----------------------------------------------------
5211 // Indirect Memory Operand
5212 operand indirect(mRegP reg) %{
5213 constraint(ALLOC_IN_RC(p_reg));
5214 match(reg);
5216 format %{ "[$reg] @ indirect" %}
5217 interface(MEMORY_INTER) %{
5218 base($reg);
5219 index(0x0); /* NO_INDEX */
5220 scale(0x0);
5221 disp(0x0);
5222 %}
5223 %}
5225 // Indirect Memory Plus Short Offset Operand
5226 operand indOffset8(mRegP reg, immL8 off)
5227 %{
5228 constraint(ALLOC_IN_RC(p_reg));
5229 match(AddP reg off);
5231 format %{ "[$reg + $off (8-bit)] @ indOffset8" %}
5232 interface(MEMORY_INTER) %{
5233 base($reg);
5234 index(0x0); /* NO_INDEX */
5235 scale(0x0);
5236 disp($off);
5237 %}
5238 %}
5240 // Indirect Memory Times Scale Plus Index Register
5241 operand indIndexScale(mRegP reg, mRegL lreg, immI2 scale)
5242 %{
5243 constraint(ALLOC_IN_RC(p_reg));
5244 match(AddP reg (LShiftL lreg scale));
5246 op_cost(10);
5247 format %{"[$reg + $lreg << $scale] @ indIndexScale" %}
5248 interface(MEMORY_INTER) %{
5249 base($reg);
5250 index($lreg);
5251 scale($scale);
5252 disp(0x0);
5253 %}
5254 %}
5257 // [base + index + offset]
5258 operand baseIndexOffset8(mRegP base, mRegL index, immL8 off)
5259 %{
5260 constraint(ALLOC_IN_RC(p_reg));
5261 op_cost(5);
5262 match(AddP (AddP base index) off);
5264 format %{ "[$base + $index + $off (8-bit)] @ baseIndexOffset8" %}
5265 interface(MEMORY_INTER) %{
5266 base($base);
5267 index($index);
5268 scale(0x0);
5269 disp($off);
5270 %}
5271 %}
5273 // [base + index + offset]
5274 operand baseIndexOffset8_convI2L(mRegP base, mRegI index, immL8 off)
5275 %{
5276 constraint(ALLOC_IN_RC(p_reg));
5277 op_cost(5);
5278 match(AddP (AddP base (ConvI2L index)) off);
5280 format %{ "[$base + $index + $off (8-bit)] @ baseIndexOffset8_convI2L" %}
5281 interface(MEMORY_INTER) %{
5282 base($base);
5283 index($index);
5284 scale(0x0);
5285 disp($off);
5286 %}
5287 %}
5289 // Indirect Memory Times Scale Plus Index Register Plus Offset Operand
5290 operand indIndexScaleOffset8(mRegP reg, immL8 off, mRegL lreg, immI2 scale)
5291 %{
5292 constraint(ALLOC_IN_RC(p_reg));
5293 match(AddP (AddP reg (LShiftL lreg scale)) off);
5295 op_cost(10);
5296 format %{"[$reg + $off + $lreg << $scale] @ indIndexScaleOffset8" %}
5297 interface(MEMORY_INTER) %{
5298 base($reg);
5299 index($lreg);
5300 scale($scale);
5301 disp($off);
5302 %}
5303 %}
5305 operand indIndexScaleOffset8_convI2L(mRegP reg, immL8 off, mRegI ireg, immI2 scale)
5306 %{
5307 constraint(ALLOC_IN_RC(p_reg));
5308 match(AddP (AddP reg (LShiftL (ConvI2L ireg) scale)) off);
5310 op_cost(10);
5311 format %{"[$reg + $off + $ireg << $scale] @ indIndexScaleOffset8_convI2L" %}
5312 interface(MEMORY_INTER) %{
5313 base($reg);
5314 index($ireg);
5315 scale($scale);
5316 disp($off);
5317 %}
5318 %}
5320 // [base + index<<scale + offset]
5321 operand basePosIndexScaleOffset8(mRegP base, mRegI index, immL8 off, immI_0_31 scale)
5322 %{
5323 constraint(ALLOC_IN_RC(p_reg));
5324 //predicate(n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
5325 op_cost(10);
5326 match(AddP (AddP base (LShiftL (ConvI2L index) scale)) off);
5328 format %{ "[$base + $index << $scale + $off (8-bit)] @ basePosIndexScaleOffset8" %}
5329 interface(MEMORY_INTER) %{
5330 base($base);
5331 index($index);
5332 scale($scale);
5333 disp($off);
5334 %}
5335 %}
5337 // Indirect Memory Times Scale Plus Index Register Plus Offset Operand
5338 operand indIndexScaleOffsetNarrow(mRegN reg, immL8 off, mRegL lreg, immI2 scale)
5339 %{
5340 predicate(Universe::narrow_oop_shift() == 0);
5341 constraint(ALLOC_IN_RC(p_reg));
5342 match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
5344 op_cost(10);
5345 format %{"[$reg + $off + $lreg << $scale] @ indIndexScaleOffsetNarrow" %}
5346 interface(MEMORY_INTER) %{
5347 base($reg);
5348 index($lreg);
5349 scale($scale);
5350 disp($off);
5351 %}
5352 %}
5354 // [base + index<<scale + offset] for compressd Oops
5355 operand indPosIndexI2LScaleOffset8Narrow(mRegN base, mRegI index, immL8 off, immI_0_31 scale)
5356 %{
5357 constraint(ALLOC_IN_RC(p_reg));
5358 //predicate(Universe::narrow_oop_shift() == 0 && n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
5359 predicate(Universe::narrow_oop_shift() == 0);
5360 op_cost(10);
5361 match(AddP (AddP (DecodeN base) (LShiftL (ConvI2L index) scale)) off);
5363 format %{ "[$base + $index << $scale + $off (8-bit)] @ indPosIndexI2LScaleOffset8Narrow" %}
5364 interface(MEMORY_INTER) %{
5365 base($base);
5366 index($index);
5367 scale($scale);
5368 disp($off);
5369 %}
5370 %}
5372 //FIXME: I think it's better to limit the immI to be 16-bit at most!
5373 // Indirect Memory Plus Long Offset Operand
5374 operand indOffset32(mRegP reg, immL32 off) %{
5375 constraint(ALLOC_IN_RC(p_reg));
5376 op_cost(20);
5377 match(AddP reg off);
5379 format %{ "[$reg + $off (32-bit)] @ indOffset32" %}
5380 interface(MEMORY_INTER) %{
5381 base($reg);
5382 index(0x0); /* NO_INDEX */
5383 scale(0x0);
5384 disp($off);
5385 %}
5386 %}
5388 // Indirect Memory Plus Index Register
5389 operand indIndex(mRegP addr, mRegL index) %{
5390 constraint(ALLOC_IN_RC(p_reg));
5391 match(AddP addr index);
5393 op_cost(20);
5394 format %{"[$addr + $index] @ indIndex" %}
5395 interface(MEMORY_INTER) %{
5396 base($addr);
5397 index($index);
5398 scale(0x0);
5399 disp(0x0);
5400 %}
5401 %}
5403 operand indirectNarrowKlass(mRegN reg)
5404 %{
5405 predicate(Universe::narrow_klass_shift() == 0);
5406 constraint(ALLOC_IN_RC(p_reg));
5407 op_cost(10);
5408 match(DecodeNKlass reg);
5410 format %{ "[$reg] @ indirectNarrowKlass" %}
5411 interface(MEMORY_INTER) %{
5412 base($reg);
5413 index(0x0);
5414 scale(0x0);
5415 disp(0x0);
5416 %}
5417 %}
5419 operand indOffset8NarrowKlass(mRegN reg, immL8 off)
5420 %{
5421 predicate(Universe::narrow_klass_shift() == 0);
5422 constraint(ALLOC_IN_RC(p_reg));
5423 op_cost(10);
5424 match(AddP (DecodeNKlass reg) off);
5426 format %{ "[$reg + $off (8-bit)] @ indOffset8NarrowKlass" %}
5427 interface(MEMORY_INTER) %{
5428 base($reg);
5429 index(0x0);
5430 scale(0x0);
5431 disp($off);
5432 %}
5433 %}
5435 operand indOffset32NarrowKlass(mRegN reg, immL32 off)
5436 %{
5437 predicate(Universe::narrow_klass_shift() == 0);
5438 constraint(ALLOC_IN_RC(p_reg));
5439 op_cost(10);
5440 match(AddP (DecodeNKlass reg) off);
5442 format %{ "[$reg + $off (32-bit)] @ indOffset32NarrowKlass" %}
5443 interface(MEMORY_INTER) %{
5444 base($reg);
5445 index(0x0);
5446 scale(0x0);
5447 disp($off);
5448 %}
5449 %}
5451 operand indIndexOffsetNarrowKlass(mRegN reg, mRegL lreg, immL32 off)
5452 %{
5453 predicate(Universe::narrow_klass_shift() == 0);
5454 constraint(ALLOC_IN_RC(p_reg));
5455 match(AddP (AddP (DecodeNKlass reg) lreg) off);
5457 op_cost(10);
5458 format %{"[$reg + $off + $lreg] @ indIndexOffsetNarrowKlass" %}
5459 interface(MEMORY_INTER) %{
5460 base($reg);
5461 index($lreg);
5462 scale(0x0);
5463 disp($off);
5464 %}
5465 %}
5467 operand indIndexNarrowKlass(mRegN reg, mRegL lreg)
5468 %{
5469 predicate(Universe::narrow_klass_shift() == 0);
5470 constraint(ALLOC_IN_RC(p_reg));
5471 match(AddP (DecodeNKlass reg) lreg);
5473 op_cost(10);
5474 format %{"[$reg + $lreg] @ indIndexNarrowKlass" %}
5475 interface(MEMORY_INTER) %{
5476 base($reg);
5477 index($lreg);
5478 scale(0x0);
5479 disp(0x0);
5480 %}
5481 %}
5483 // Indirect Memory Operand
5484 operand indirectNarrow(mRegN reg)
5485 %{
5486 predicate(Universe::narrow_oop_shift() == 0);
5487 constraint(ALLOC_IN_RC(p_reg));
5488 op_cost(10);
5489 match(DecodeN reg);
5491 format %{ "[$reg] @ indirectNarrow" %}
5492 interface(MEMORY_INTER) %{
5493 base($reg);
5494 index(0x0);
5495 scale(0x0);
5496 disp(0x0);
5497 %}
5498 %}
5500 // Indirect Memory Plus Short Offset Operand
5501 operand indOffset8Narrow(mRegN reg, immL8 off)
5502 %{
5503 predicate(Universe::narrow_oop_shift() == 0);
5504 constraint(ALLOC_IN_RC(p_reg));
5505 op_cost(10);
5506 match(AddP (DecodeN reg) off);
5508 format %{ "[$reg + $off (8-bit)] @ indOffset8Narrow" %}
5509 interface(MEMORY_INTER) %{
5510 base($reg);
5511 index(0x0);
5512 scale(0x0);
5513 disp($off);
5514 %}
5515 %}
5517 // Indirect Memory Plus Index Register Plus Offset Operand
5518 operand indIndexOffset8Narrow(mRegN reg, mRegL lreg, immL8 off)
5519 %{
5520 predicate(Universe::narrow_oop_shift() == 0);
5521 constraint(ALLOC_IN_RC(p_reg));
5522 match(AddP (AddP (DecodeN reg) lreg) off);
5524 op_cost(10);
5525 format %{"[$reg + $off + $lreg] @ indIndexOffset8Narrow" %}
5526 interface(MEMORY_INTER) %{
5527 base($reg);
5528 index($lreg);
5529 scale(0x0);
5530 disp($off);
5531 %}
5532 %}
5534 //----------Load Long Memory Operands------------------------------------------
5535 // The load-long idiom will use it's address expression again after loading
5536 // the first word of the long. If the load-long destination overlaps with
5537 // registers used in the addressing expression, the 2nd half will be loaded
5538 // from a clobbered address. Fix this by requiring that load-long use
5539 // address registers that do not overlap with the load-long target.
5541 // load-long support
5542 operand load_long_RegP() %{
5543 constraint(ALLOC_IN_RC(p_reg));
5544 match(RegP);
5545 match(mRegP);
5546 op_cost(100);
5547 format %{ %}
5548 interface(REG_INTER);
5549 %}
5551 // Indirect Memory Operand Long
5552 operand load_long_indirect(load_long_RegP reg) %{
5553 constraint(ALLOC_IN_RC(p_reg));
5554 match(reg);
5556 format %{ "[$reg]" %}
5557 interface(MEMORY_INTER) %{
5558 base($reg);
5559 index(0x0);
5560 scale(0x0);
5561 disp(0x0);
5562 %}
5563 %}
5565 // Indirect Memory Plus Long Offset Operand
5566 operand load_long_indOffset32(load_long_RegP reg, immL32 off) %{
5567 match(AddP reg off);
5569 format %{ "[$reg + $off]" %}
5570 interface(MEMORY_INTER) %{
5571 base($reg);
5572 index(0x0);
5573 scale(0x0);
5574 disp($off);
5575 %}
5576 %}
5578 //----------Conditional Branch Operands----------------------------------------
5579 // Comparison Op - This is the operation of the comparison, and is limited to
5580 // the following set of codes:
5581 // L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
5582 //
5583 // Other attributes of the comparison, such as unsignedness, are specified
5584 // by the comparison instruction that sets a condition code flags register.
5585 // That result is represented by a flags operand whose subtype is appropriate
5586 // to the unsignedness (etc.) of the comparison.
5587 //
5588 // Later, the instruction which matches both the Comparison Op (a Bool) and
5589 // the flags (produced by the Cmp) specifies the coding of the comparison op
5590 // by matching a specific subtype of Bool operand below, such as cmpOpU.
5592 // Comparision Code
5593 operand cmpOp() %{
5594 match(Bool);
5596 format %{ "" %}
5597 interface(COND_INTER) %{
5598 equal(0x01);
5599 not_equal(0x02);
5600 greater(0x03);
5601 greater_equal(0x04);
5602 less(0x05);
5603 less_equal(0x06);
5604 overflow(0x7);
5605 no_overflow(0x8);
5606 %}
5607 %}
5610 // Comparision Code
5611 // Comparison Code, unsigned compare. Used by FP also, with
5612 // C2 (unordered) turned into GT or LT already. The other bits
5613 // C0 and C3 are turned into Carry & Zero flags.
5614 operand cmpOpU() %{
5615 match(Bool);
5617 format %{ "" %}
5618 interface(COND_INTER) %{
5619 equal(0x01);
5620 not_equal(0x02);
5621 greater(0x03);
5622 greater_equal(0x04);
5623 less(0x05);
5624 less_equal(0x06);
5625 overflow(0x7);
5626 no_overflow(0x8);
5627 %}
5628 %}
5630 /*
5631 // Comparison Code, unsigned compare. Used by FP also, with
5632 // C2 (unordered) turned into GT or LT already. The other bits
5633 // C0 and C3 are turned into Carry & Zero flags.
5634 operand cmpOpU() %{
5635 match(Bool);
5637 format %{ "" %}
5638 interface(COND_INTER) %{
5639 equal(0x4);
5640 not_equal(0x5);
5641 less(0x2);
5642 greater_equal(0x3);
5643 less_equal(0x6);
5644 greater(0x7);
5645 %}
5646 %}
5647 */
5648 /*
5649 // Comparison Code for FP conditional move
5650 operand cmpOp_fcmov() %{
5651 match(Bool);
5653 format %{ "" %}
5654 interface(COND_INTER) %{
5655 equal (0x01);
5656 not_equal (0x02);
5657 greater (0x03);
5658 greater_equal(0x04);
5659 less (0x05);
5660 less_equal (0x06);
5661 %}
5662 %}
5664 // Comparision Code used in long compares
5665 operand cmpOp_commute() %{
5666 match(Bool);
5668 format %{ "" %}
5669 interface(COND_INTER) %{
5670 equal(0x4);
5671 not_equal(0x5);
5672 less(0xF);
5673 greater_equal(0xE);
5674 less_equal(0xD);
5675 greater(0xC);
5676 %}
5677 %}
5678 */
5680 //----------Special Memory Operands--------------------------------------------
5681 // Stack Slot Operand - This operand is used for loading and storing temporary
5682 // values on the stack where a match requires a value to
5683 // flow through memory.
5684 operand stackSlotP(sRegP reg) %{
5685 constraint(ALLOC_IN_RC(stack_slots));
5686 // No match rule because this operand is only generated in matching
5687 op_cost(50);
5688 format %{ "[$reg]" %}
5689 interface(MEMORY_INTER) %{
5690 base(0x1d); // SP
5691 index(0x0); // No Index
5692 scale(0x0); // No Scale
5693 disp($reg); // Stack Offset
5694 %}
5695 %}
5697 operand stackSlotI(sRegI reg) %{
5698 constraint(ALLOC_IN_RC(stack_slots));
5699 // No match rule because this operand is only generated in matching
5700 op_cost(50);
5701 format %{ "[$reg]" %}
5702 interface(MEMORY_INTER) %{
5703 base(0x1d); // SP
5704 index(0x0); // No Index
5705 scale(0x0); // No Scale
5706 disp($reg); // Stack Offset
5707 %}
5708 %}
5710 operand stackSlotF(sRegF reg) %{
5711 constraint(ALLOC_IN_RC(stack_slots));
5712 // No match rule because this operand is only generated in matching
5713 op_cost(50);
5714 format %{ "[$reg]" %}
5715 interface(MEMORY_INTER) %{
5716 base(0x1d); // SP
5717 index(0x0); // No Index
5718 scale(0x0); // No Scale
5719 disp($reg); // Stack Offset
5720 %}
5721 %}
5723 operand stackSlotD(sRegD reg) %{
5724 constraint(ALLOC_IN_RC(stack_slots));
5725 // No match rule because this operand is only generated in matching
5726 op_cost(50);
5727 format %{ "[$reg]" %}
5728 interface(MEMORY_INTER) %{
5729 base(0x1d); // SP
5730 index(0x0); // No Index
5731 scale(0x0); // No Scale
5732 disp($reg); // Stack Offset
5733 %}
5734 %}
5736 operand stackSlotL(sRegL reg) %{
5737 constraint(ALLOC_IN_RC(stack_slots));
5738 // No match rule because this operand is only generated in matching
5739 op_cost(50);
5740 format %{ "[$reg]" %}
5741 interface(MEMORY_INTER) %{
5742 base(0x1d); // SP
5743 index(0x0); // No Index
5744 scale(0x0); // No Scale
5745 disp($reg); // Stack Offset
5746 %}
5747 %}
5750 //------------------------OPERAND CLASSES--------------------------------------
5751 //opclass memory( direct, indirect, indOffset16, indOffset32, indOffset32X, indIndexOffset );
5752 opclass memory( indirect, indirectNarrow, indOffset8, indOffset32, indIndex, indIndexScale, load_long_indirect, load_long_indOffset32, baseIndexOffset8, baseIndexOffset8_convI2L, indIndexScaleOffset8, indIndexScaleOffset8_convI2L, basePosIndexScaleOffset8, indIndexScaleOffsetNarrow, indPosIndexI2LScaleOffset8Narrow, indOffset8Narrow, indIndexOffset8Narrow);
5755 //----------PIPELINE-----------------------------------------------------------
5756 // Rules which define the behavior of the target architectures pipeline.
5758 pipeline %{
5760 //----------ATTRIBUTES---------------------------------------------------------
5761 attributes %{
5762 fixed_size_instructions; // Fixed size instructions
5763 branch_has_delay_slot; // branch have delay slot in gs2
5764 max_instructions_per_bundle = 1; // 1 instruction per bundle
5765 max_bundles_per_cycle = 4; // Up to 4 bundles per cycle
5766 bundle_unit_size=4;
5767 instruction_unit_size = 4; // An instruction is 4 bytes long
5768 instruction_fetch_unit_size = 16; // The processor fetches one line
5769 instruction_fetch_units = 1; // of 16 bytes
5771 // List of nop instructions
5772 nops( MachNop );
5773 %}
5775 //----------RESOURCES----------------------------------------------------------
5776 // Resources are the functional units available to the machine
5778 resources(D1, D2, D3, D4, DECODE = D1 | D2 | D3| D4, ALU1, ALU2, ALU = ALU1 | ALU2, FPU1, FPU2, FPU = FPU1 | FPU2, MEM, BR);
5780 //----------PIPELINE DESCRIPTION-----------------------------------------------
5781 // Pipeline Description specifies the stages in the machine's pipeline
5783 // IF: fetch
5784 // ID: decode
5785 // RD: read
5786 // CA: caculate
5787 // WB: write back
5788 // CM: commit
5790 pipe_desc(IF, ID, RD, CA, WB, CM);
5793 //----------PIPELINE CLASSES---------------------------------------------------
5794 // Pipeline Classes describe the stages in which input and output are
5795 // referenced by the hardware pipeline.
5797 //No.1 Integer ALU reg-reg operation : dst <-- reg1 op reg2
5798 pipe_class ialu_regI_regI(mRegI dst, mRegI src1, mRegI src2) %{
5799 single_instruction;
5800 src1 : RD(read);
5801 src2 : RD(read);
5802 dst : WB(write)+1;
5803 DECODE : ID;
5804 ALU : CA;
5805 %}
5807 //No.19 Integer mult operation : dst <-- reg1 mult reg2
5808 pipe_class ialu_mult(mRegI dst, mRegI src1, mRegI src2) %{
5809 src1 : RD(read);
5810 src2 : RD(read);
5811 dst : WB(write)+5;
5812 DECODE : ID;
5813 ALU2 : CA;
5814 %}
5816 pipe_class mulL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
5817 src1 : RD(read);
5818 src2 : RD(read);
5819 dst : WB(write)+10;
5820 DECODE : ID;
5821 ALU2 : CA;
5822 %}
5824 //No.19 Integer div operation : dst <-- reg1 div reg2
5825 pipe_class ialu_div(mRegI dst, mRegI src1, mRegI src2) %{
5826 src1 : RD(read);
5827 src2 : RD(read);
5828 dst : WB(write)+10;
5829 DECODE : ID;
5830 ALU2 : CA;
5831 %}
5833 //No.19 Integer mod operation : dst <-- reg1 mod reg2
5834 pipe_class ialu_mod(mRegI dst, mRegI src1, mRegI src2) %{
5835 instruction_count(2);
5836 src1 : RD(read);
5837 src2 : RD(read);
5838 dst : WB(write)+10;
5839 DECODE : ID;
5840 ALU2 : CA;
5841 %}
5843 //No.15 Long ALU reg-reg operation : dst <-- reg1 op reg2
5844 pipe_class ialu_regL_regL(mRegL dst, mRegL src1, mRegL src2) %{
5845 instruction_count(2);
5846 src1 : RD(read);
5847 src2 : RD(read);
5848 dst : WB(write);
5849 DECODE : ID;
5850 ALU : CA;
5851 %}
5853 //No.18 Long ALU reg-imm16 operation : dst <-- reg1 op imm16
5854 pipe_class ialu_regL_imm16(mRegL dst, mRegL src) %{
5855 instruction_count(2);
5856 src : RD(read);
5857 dst : WB(write);
5858 DECODE : ID;
5859 ALU : CA;
5860 %}
5862 //no.16 load Long from memory :
5863 pipe_class ialu_loadL(mRegL dst, memory mem) %{
5864 instruction_count(2);
5865 mem : RD(read);
5866 dst : WB(write)+5;
5867 DECODE : ID;
5868 MEM : RD;
5869 %}
5871 //No.17 Store Long to Memory :
5872 pipe_class ialu_storeL(mRegL src, memory mem) %{
5873 instruction_count(2);
5874 mem : RD(read);
5875 src : RD(read);
5876 DECODE : ID;
5877 MEM : RD;
5878 %}
5880 //No.2 Integer ALU reg-imm16 operation : dst <-- reg1 op imm16
5881 pipe_class ialu_regI_imm16(mRegI dst, mRegI src) %{
5882 single_instruction;
5883 src : RD(read);
5884 dst : WB(write);
5885 DECODE : ID;
5886 ALU : CA;
5887 %}
5889 //No.3 Integer move operation : dst <-- reg
5890 pipe_class ialu_regI_mov(mRegI dst, mRegI src) %{
5891 src : RD(read);
5892 dst : WB(write);
5893 DECODE : ID;
5894 ALU : CA;
5895 %}
5897 //No.4 No instructions : do nothing
5898 pipe_class empty( ) %{
5899 instruction_count(0);
5900 %}
5902 //No.5 UnConditional branch :
5903 pipe_class pipe_jump( label labl ) %{
5904 multiple_bundles;
5905 DECODE : ID;
5906 BR : RD;
5907 %}
5909 //No.6 ALU Conditional branch :
5910 pipe_class pipe_alu_branch(mRegI src1, mRegI src2, label labl ) %{
5911 multiple_bundles;
5912 src1 : RD(read);
5913 src2 : RD(read);
5914 DECODE : ID;
5915 BR : RD;
5916 %}
5918 //no.7 load integer from memory :
5919 pipe_class ialu_loadI(mRegI dst, memory mem) %{
5920 mem : RD(read);
5921 dst : WB(write)+3;
5922 DECODE : ID;
5923 MEM : RD;
5924 %}
5926 //No.8 Store Integer to Memory :
5927 pipe_class ialu_storeI(mRegI src, memory mem) %{
5928 mem : RD(read);
5929 src : RD(read);
5930 DECODE : ID;
5931 MEM : RD;
5932 %}
5935 //No.10 Floating FPU reg-reg operation : dst <-- reg1 op reg2
5936 pipe_class fpu_regF_regF(regF dst, regF src1, regF src2) %{
5937 src1 : RD(read);
5938 src2 : RD(read);
5939 dst : WB(write);
5940 DECODE : ID;
5941 FPU : CA;
5942 %}
5944 //No.22 Floating div operation : dst <-- reg1 div reg2
5945 pipe_class fpu_div(regF dst, regF src1, regF src2) %{
5946 src1 : RD(read);
5947 src2 : RD(read);
5948 dst : WB(write);
5949 DECODE : ID;
5950 FPU2 : CA;
5951 %}
5953 pipe_class fcvt_I2D(regD dst, mRegI src) %{
5954 src : RD(read);
5955 dst : WB(write);
5956 DECODE : ID;
5957 FPU1 : CA;
5958 %}
5960 pipe_class fcvt_D2I(mRegI dst, regD src) %{
5961 src : RD(read);
5962 dst : WB(write);
5963 DECODE : ID;
5964 FPU1 : CA;
5965 %}
5967 pipe_class pipe_mfc1(mRegI dst, regD src) %{
5968 src : RD(read);
5969 dst : WB(write);
5970 DECODE : ID;
5971 MEM : RD;
5972 %}
5974 pipe_class pipe_mtc1(regD dst, mRegI src) %{
5975 src : RD(read);
5976 dst : WB(write);
5977 DECODE : ID;
5978 MEM : RD(5);
5979 %}
5981 //No.23 Floating sqrt operation : dst <-- reg1 sqrt reg2
5982 pipe_class fpu_sqrt(regF dst, regF src1, regF src2) %{
5983 multiple_bundles;
5984 src1 : RD(read);
5985 src2 : RD(read);
5986 dst : WB(write);
5987 DECODE : ID;
5988 FPU2 : CA;
5989 %}
5991 //No.11 Load Floating from Memory :
5992 pipe_class fpu_loadF(regF dst, memory mem) %{
5993 instruction_count(1);
5994 mem : RD(read);
5995 dst : WB(write)+3;
5996 DECODE : ID;
5997 MEM : RD;
5998 %}
6000 //No.12 Store Floating to Memory :
6001 pipe_class fpu_storeF(regF src, memory mem) %{
6002 instruction_count(1);
6003 mem : RD(read);
6004 src : RD(read);
6005 DECODE : ID;
6006 MEM : RD;
6007 %}
6009 //No.13 FPU Conditional branch :
6010 pipe_class pipe_fpu_branch(regF src1, regF src2, label labl ) %{
6011 multiple_bundles;
6012 src1 : RD(read);
6013 src2 : RD(read);
6014 DECODE : ID;
6015 BR : RD;
6016 %}
6018 //No.14 Floating FPU reg operation : dst <-- op reg
6019 pipe_class fpu1_regF(regF dst, regF src) %{
6020 src : RD(read);
6021 dst : WB(write);
6022 DECODE : ID;
6023 FPU : CA;
6024 %}
6026 pipe_class long_memory_op() %{
6027 instruction_count(10); multiple_bundles; force_serialization;
6028 fixed_latency(30);
6029 %}
6031 pipe_class simple_call() %{
6032 instruction_count(10); multiple_bundles; force_serialization;
6033 fixed_latency(200);
6034 BR : RD;
6035 %}
6037 pipe_class call() %{
6038 instruction_count(10); multiple_bundles; force_serialization;
6039 fixed_latency(200);
6040 %}
6042 //FIXME:
6043 //No.9 Piple slow : for multi-instructions
6044 pipe_class pipe_slow( ) %{
6045 instruction_count(20);
6046 force_serialization;
6047 multiple_bundles;
6048 fixed_latency(50);
6049 %}
6051 %}
6055 //----------INSTRUCTIONS-------------------------------------------------------
6056 //
6057 // match -- States which machine-independent subtree may be replaced
6058 // by this instruction.
6059 // ins_cost -- The estimated cost of this instruction is used by instruction
6060 // selection to identify a minimum cost tree of machine
6061 // instructions that matches a tree of machine-independent
6062 // instructions.
6063 // format -- A string providing the disassembly for this instruction.
6064 // The value of an instruction's operand may be inserted
6065 // by referring to it with a '$' prefix.
6066 // opcode -- Three instruction opcodes may be provided. These are referred
6067 // to within an encode class as $primary, $secondary, and $tertiary
6068 // respectively. The primary opcode is commonly used to
6069 // indicate the type of machine instruction, while secondary
6070 // and tertiary are often used for prefix options or addressing
6071 // modes.
6072 // ins_encode -- A list of encode classes with parameters. The encode class
6073 // name must have been defined in an 'enc_class' specification
6074 // in the encode section of the architecture description.
6077 // Load Integer
6078 instruct loadI(mRegI dst, memory mem) %{
6079 match(Set dst (LoadI mem));
6081 ins_cost(125);
6082 format %{ "lw $dst, $mem #@loadI" %}
6083 ins_encode (load_I_enc(dst, mem));
6084 ins_pipe( ialu_loadI );
6085 %}
6087 instruct loadI_convI2L(mRegL dst, memory mem) %{
6088 match(Set dst (ConvI2L (LoadI mem)));
6090 ins_cost(125);
6091 format %{ "lw $dst, $mem #@loadI_convI2L" %}
6092 ins_encode (load_I_enc(dst, mem));
6093 ins_pipe( ialu_loadI );
6094 %}
6096 // Load Integer (32 bit signed) to Byte (8 bit signed)
6097 instruct loadI2B(mRegI dst, memory mem, immI_24 twentyfour) %{
6098 match(Set dst (RShiftI (LShiftI (LoadI mem) twentyfour) twentyfour));
6100 ins_cost(125);
6101 format %{ "lb $dst, $mem\t# int -> byte #@loadI2B" %}
6102 ins_encode(load_B_enc(dst, mem));
6103 ins_pipe(ialu_loadI);
6104 %}
6106 // Load Integer (32 bit signed) to Unsigned Byte (8 bit UNsigned)
6107 instruct loadI2UB(mRegI dst, memory mem, immI_255 mask) %{
6108 match(Set dst (AndI (LoadI mem) mask));
6110 ins_cost(125);
6111 format %{ "lbu $dst, $mem\t# int -> ubyte #@loadI2UB" %}
6112 ins_encode(load_UB_enc(dst, mem));
6113 ins_pipe(ialu_loadI);
6114 %}
6116 // Load Integer (32 bit signed) to Short (16 bit signed)
6117 instruct loadI2S(mRegI dst, memory mem, immI_16 sixteen) %{
6118 match(Set dst (RShiftI (LShiftI (LoadI mem) sixteen) sixteen));
6120 ins_cost(125);
6121 format %{ "lh $dst, $mem\t# int -> short #@loadI2S" %}
6122 ins_encode(load_S_enc(dst, mem));
6123 ins_pipe(ialu_loadI);
6124 %}
6126 // Load Integer (32 bit signed) to Unsigned Short/Char (16 bit UNsigned)
6127 instruct loadI2US(mRegI dst, memory mem, immI_65535 mask) %{
6128 match(Set dst (AndI (LoadI mem) mask));
6130 ins_cost(125);
6131 format %{ "lhu $dst, $mem\t# int -> ushort/char #@loadI2US" %}
6132 ins_encode(load_C_enc(dst, mem));
6133 ins_pipe(ialu_loadI);
6134 %}
6136 // Load Long.
6137 instruct loadL(mRegL dst, memory mem) %{
6138 // predicate(!((LoadLNode*)n)->require_atomic_access());
6139 match(Set dst (LoadL mem));
6141 ins_cost(250);
6142 format %{ "ld $dst, $mem #@loadL" %}
6143 ins_encode(load_L_enc(dst, mem));
6144 ins_pipe( ialu_loadL );
6145 %}
6147 // Load Long - UNaligned
6148 instruct loadL_unaligned(mRegL dst, memory mem) %{
6149 match(Set dst (LoadL_unaligned mem));
6151 // FIXME: Jin: Need more effective ldl/ldr
6152 ins_cost(450);
6153 format %{ "ld $dst, $mem #@loadL_unaligned\n\t" %}
6154 ins_encode(load_L_enc(dst, mem));
6155 ins_pipe( ialu_loadL );
6156 %}
6158 // Store Long
6159 instruct storeL_reg(memory mem, mRegL src) %{
6160 match(Set mem (StoreL mem src));
6162 ins_cost(200);
6163 format %{ "sd $mem, $src #@storeL_reg\n" %}
6164 ins_encode(store_L_reg_enc(mem, src));
6165 ins_pipe( ialu_storeL );
6166 %}
6169 instruct storeL_immL0(memory mem, immL0 zero) %{
6170 match(Set mem (StoreL mem zero));
6172 ins_cost(180);
6173 format %{ "sd $mem, zero #@storeL_immL0" %}
6174 ins_encode(store_L_immL0_enc(mem, zero));
6175 ins_pipe( ialu_storeL );
6176 %}
6178 // Load Compressed Pointer
6179 instruct loadN(mRegN dst, memory mem)
6180 %{
6181 match(Set dst (LoadN mem));
6183 ins_cost(125); // XXX
6184 format %{ "lwu $dst, $mem\t# compressed ptr @ loadN" %}
6185 ins_encode (load_N_enc(dst, mem));
6186 ins_pipe( ialu_loadI ); // XXX
6187 %}
6189 // Load Pointer
6190 instruct loadP(mRegP dst, memory mem) %{
6191 match(Set dst (LoadP mem));
6193 ins_cost(125);
6194 format %{ "ld $dst, $mem #@loadP" %}
6195 ins_encode (load_P_enc(dst, mem));
6196 ins_pipe( ialu_loadI );
6197 %}
6199 // Load Klass Pointer
6200 instruct loadKlass(mRegP dst, memory mem) %{
6201 match(Set dst (LoadKlass mem));
6203 ins_cost(125);
6204 format %{ "MOV $dst,$mem @ loadKlass" %}
6205 ins_encode (load_P_enc(dst, mem));
6206 ins_pipe( ialu_loadI );
6207 %}
6209 // Load narrow Klass Pointer
6210 instruct loadNKlass(mRegN dst, memory mem)
6211 %{
6212 match(Set dst (LoadNKlass mem));
6214 ins_cost(125); // XXX
6215 format %{ "lwu $dst, $mem\t# compressed klass ptr @ loadNKlass" %}
6216 ins_encode (load_N_enc(dst, mem));
6217 ins_pipe( ialu_loadI ); // XXX
6218 %}
6220 // Load Constant
6221 instruct loadConI(mRegI dst, immI src) %{
6222 match(Set dst src);
6224 ins_cost(150);
6225 format %{ "mov $dst, $src #@loadConI" %}
6226 ins_encode %{
6227 Register dst = $dst$$Register;
6228 int value = $src$$constant;
6229 __ move(dst, value);
6230 %}
6231 ins_pipe( ialu_regI_regI );
6232 %}
6235 instruct loadConL_set64(mRegL dst, immL src) %{
6236 match(Set dst src);
6237 ins_cost(120);
6238 format %{ "li $dst, $src @ loadConL_set64" %}
6239 ins_encode %{
6240 __ set64($dst$$Register, $src$$constant);
6241 %}
6242 ins_pipe(ialu_regL_regL);
6243 %}
6245 /*
6246 // Load long value from constant table (predicated by immL_expensive).
6247 instruct loadConL_load(mRegL dst, immL_expensive src) %{
6248 match(Set dst src);
6249 ins_cost(150);
6250 format %{ "ld $dst, $constantoffset[$constanttablebase] # load long $src from table @ loadConL_ldx" %}
6251 ins_encode %{
6252 int con_offset = $constantoffset($src);
6254 if (Assembler::is_simm16(con_offset)) {
6255 __ ld($dst$$Register, $constanttablebase, con_offset);
6256 } else {
6257 __ set64(AT, con_offset);
6258 if (UseLoongsonISA) {
6259 __ gsldx($dst$$Register, $constanttablebase, AT, 0);
6260 } else {
6261 __ daddu(AT, $constanttablebase, AT);
6262 __ ld($dst$$Register, AT, 0);
6263 }
6264 }
6265 %}
6266 ins_pipe(ialu_loadI);
6267 %}
6268 */
6270 instruct loadConL16(mRegL dst, immL16 src) %{
6271 match(Set dst src);
6272 ins_cost(105);
6273 format %{ "mov $dst, $src #@loadConL16" %}
6274 ins_encode %{
6275 Register dst_reg = as_Register($dst$$reg);
6276 int value = $src$$constant;
6277 __ daddiu(dst_reg, R0, value);
6278 %}
6279 ins_pipe( ialu_regL_regL );
6280 %}
6283 instruct loadConL0(mRegL dst, immL0 src) %{
6284 match(Set dst src);
6285 ins_cost(100);
6286 format %{ "mov $dst, zero #@loadConL0" %}
6287 ins_encode %{
6288 Register dst_reg = as_Register($dst$$reg);
6289 __ daddu(dst_reg, R0, R0);
6290 %}
6291 ins_pipe( ialu_regL_regL );
6292 %}
6294 // Load Range
6295 instruct loadRange(mRegI dst, memory mem) %{
6296 match(Set dst (LoadRange mem));
6298 ins_cost(125);
6299 format %{ "MOV $dst,$mem @ loadRange" %}
6300 ins_encode(load_I_enc(dst, mem));
6301 ins_pipe( ialu_loadI );
6302 %}
6305 instruct storeP(memory mem, mRegP src ) %{
6306 match(Set mem (StoreP mem src));
6308 ins_cost(125);
6309 format %{ "sd $src, $mem #@storeP" %}
6310 ins_encode(store_P_reg_enc(mem, src));
6311 ins_pipe( ialu_storeI );
6312 %}
6314 // Store NULL Pointer, mark word, or other simple pointer constant.
6315 instruct storeImmP0(memory mem, immP0 zero) %{
6316 match(Set mem (StoreP mem zero));
6318 ins_cost(125);
6319 format %{ "mov $mem, $zero #@storeImmP0" %}
6320 ins_encode(store_P_immP0_enc(mem));
6321 ins_pipe( ialu_storeI );
6322 %}
6324 // Store Byte Immediate
6325 instruct storeImmB(memory mem, immI8 src) %{
6326 match(Set mem (StoreB mem src));
6328 ins_cost(150);
6329 format %{ "movb $mem, $src #@storeImmB" %}
6330 ins_encode(store_B_immI_enc(mem, src));
6331 ins_pipe( ialu_storeI );
6332 %}
6334 // Store Compressed Pointer
6335 instruct storeN(memory mem, mRegN src)
6336 %{
6337 match(Set mem (StoreN mem src));
6339 ins_cost(125); // XXX
6340 format %{ "sw $mem, $src\t# compressed ptr @ storeN" %}
6341 ins_encode(store_N_reg_enc(mem, src));
6342 ins_pipe( ialu_storeI );
6343 %}
6345 instruct storeNKlass(memory mem, mRegN src)
6346 %{
6347 match(Set mem (StoreNKlass mem src));
6349 ins_cost(125); // XXX
6350 format %{ "sw $mem, $src\t# compressed klass ptr @ storeNKlass" %}
6351 ins_encode(store_N_reg_enc(mem, src));
6352 ins_pipe( ialu_storeI );
6353 %}
6355 instruct storeImmN0(memory mem, immN0 zero)
6356 %{
6357 predicate(Universe::narrow_oop_base() == NULL && Universe::narrow_klass_base() == NULL);
6358 match(Set mem (StoreN mem zero));
6360 ins_cost(125); // XXX
6361 format %{ "storeN0 $mem, R12\t# compressed ptr" %}
6362 ins_encode(storeImmN0_enc(mem, zero));
6363 ins_pipe( ialu_storeI );
6364 %}
6366 // Store Byte
6367 instruct storeB(memory mem, mRegI src) %{
6368 match(Set mem (StoreB mem src));
6370 ins_cost(125);
6371 format %{ "sb $src, $mem #@storeB" %}
6372 ins_encode(store_B_reg_enc(mem, src));
6373 ins_pipe( ialu_storeI );
6374 %}
6376 instruct storeB_convL2I(memory mem, mRegL src) %{
6377 match(Set mem (StoreB mem (ConvL2I src)));
6379 ins_cost(125);
6380 format %{ "sb $src, $mem #@storeB_convL2I" %}
6381 ins_encode(store_B_reg_enc(mem, src));
6382 ins_pipe( ialu_storeI );
6383 %}
6385 // Load Byte (8bit signed)
6386 instruct loadB(mRegI dst, memory mem) %{
6387 match(Set dst (LoadB mem));
6389 ins_cost(125);
6390 format %{ "lb $dst, $mem #@loadB" %}
6391 ins_encode(load_B_enc(dst, mem));
6392 ins_pipe( ialu_loadI );
6393 %}
6395 instruct loadB_convI2L(mRegL dst, memory mem) %{
6396 match(Set dst (ConvI2L (LoadB mem)));
6398 ins_cost(125);
6399 format %{ "lb $dst, $mem #@loadB_convI2L" %}
6400 ins_encode(load_B_enc(dst, mem));
6401 ins_pipe( ialu_loadI );
6402 %}
6404 // Load Byte (8bit UNsigned)
6405 instruct loadUB(mRegI dst, memory mem) %{
6406 match(Set dst (LoadUB mem));
6408 ins_cost(125);
6409 format %{ "lbu $dst, $mem #@loadUB" %}
6410 ins_encode(load_UB_enc(dst, mem));
6411 ins_pipe( ialu_loadI );
6412 %}
6414 instruct loadUB_convI2L(mRegL dst, memory mem) %{
6415 match(Set dst (ConvI2L (LoadUB mem)));
6417 ins_cost(125);
6418 format %{ "lbu $dst, $mem #@loadUB_convI2L" %}
6419 ins_encode(load_UB_enc(dst, mem));
6420 ins_pipe( ialu_loadI );
6421 %}
6423 // Load Short (16bit signed)
6424 instruct loadS(mRegI dst, memory mem) %{
6425 match(Set dst (LoadS mem));
6427 ins_cost(125);
6428 format %{ "lh $dst, $mem #@loadS" %}
6429 ins_encode(load_S_enc(dst, mem));
6430 ins_pipe( ialu_loadI );
6431 %}
6433 // Load Short (16 bit signed) to Byte (8 bit signed)
6434 instruct loadS2B(mRegI dst, memory mem, immI_24 twentyfour) %{
6435 match(Set dst (RShiftI (LShiftI (LoadS mem) twentyfour) twentyfour));
6437 ins_cost(125);
6438 format %{ "lb $dst, $mem\t# short -> byte #@loadS2B" %}
6439 ins_encode(load_B_enc(dst, mem));
6440 ins_pipe(ialu_loadI);
6441 %}
6443 instruct loadS_convI2L(mRegL dst, memory mem) %{
6444 match(Set dst (ConvI2L (LoadS mem)));
6446 ins_cost(125);
6447 format %{ "lh $dst, $mem #@loadS_convI2L" %}
6448 ins_encode(load_S_enc(dst, mem));
6449 ins_pipe( ialu_loadI );
6450 %}
6452 // Store Integer Immediate
6453 instruct storeImmI(memory mem, immI src) %{
6454 match(Set mem (StoreI mem src));
6456 ins_cost(150);
6457 format %{ "mov $mem, $src #@storeImmI" %}
6458 ins_encode(store_I_immI_enc(mem, src));
6459 ins_pipe( ialu_storeI );
6460 %}
6462 // Store Integer
6463 instruct storeI(memory mem, mRegI src) %{
6464 match(Set mem (StoreI mem src));
6466 ins_cost(125);
6467 format %{ "sw $mem, $src #@storeI" %}
6468 ins_encode(store_I_reg_enc(mem, src));
6469 ins_pipe( ialu_storeI );
6470 %}
6472 instruct storeI_convL2I(memory mem, mRegL src) %{
6473 match(Set mem (StoreI mem (ConvL2I src)));
6475 ins_cost(125);
6476 format %{ "sw $mem, $src #@storeI_convL2I" %}
6477 ins_encode(store_I_reg_enc(mem, src));
6478 ins_pipe( ialu_storeI );
6479 %}
6481 // Load Float
6482 instruct loadF(regF dst, memory mem) %{
6483 match(Set dst (LoadF mem));
6485 ins_cost(150);
6486 format %{ "loadF $dst, $mem #@loadF" %}
6487 ins_encode(load_F_enc(dst, mem));
6488 ins_pipe( ialu_loadI );
6489 %}
6491 instruct loadConP_general(mRegP dst, immP src) %{
6492 match(Set dst src);
6494 ins_cost(120);
6495 format %{ "li $dst, $src #@loadConP_general" %}
6497 ins_encode %{
6498 Register dst = $dst$$Register;
6499 long* value = (long*)$src$$constant;
6501 if($src->constant_reloc() == relocInfo::metadata_type){
6502 int klass_index = __ oop_recorder()->find_index((Klass*)value);
6503 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
6505 __ relocate(rspec);
6506 __ li48(dst, (long)value);
6507 }else if($src->constant_reloc() == relocInfo::oop_type){
6508 int oop_index = __ oop_recorder()->find_index((jobject)value);
6509 RelocationHolder rspec = oop_Relocation::spec(oop_index);
6511 __ relocate(rspec);
6512 __ li48(dst, (long)value);
6513 } else if ($src->constant_reloc() == relocInfo::none) {
6514 __ set64(dst, (long)value);
6515 }
6516 %}
6518 ins_pipe( ialu_regI_regI );
6519 %}
6521 /*
6522 instruct loadConP_load(mRegP dst, immP_load src) %{
6523 match(Set dst src);
6525 ins_cost(100);
6526 format %{ "ld $dst, [$constanttablebase + $constantoffset] load from constant table: ptr=$src @ loadConP_load" %}
6528 ins_encode %{
6530 int con_offset = $constantoffset($src);
6532 if (Assembler::is_simm16(con_offset)) {
6533 __ ld($dst$$Register, $constanttablebase, con_offset);
6534 } else {
6535 __ set64(AT, con_offset);
6536 if (UseLoongsonISA) {
6537 __ gsldx($dst$$Register, $constanttablebase, AT, 0);
6538 } else {
6539 __ daddu(AT, $constanttablebase, AT);
6540 __ ld($dst$$Register, AT, 0);
6541 }
6542 }
6543 %}
6545 ins_pipe(ialu_loadI);
6546 %}
6547 */
6549 instruct loadConP_no_oop_cheap(mRegP dst, immP_no_oop_cheap src) %{
6550 match(Set dst src);
6552 ins_cost(80);
6553 format %{ "li $dst, $src @ loadConP_no_oop_cheap" %}
6555 ins_encode %{
6556 __ set64($dst$$Register, $src$$constant);
6557 %}
6559 ins_pipe(ialu_regI_regI);
6560 %}
6563 instruct loadConP_poll(mRegP dst, immP_poll src) %{
6564 match(Set dst src);
6566 ins_cost(50);
6567 format %{ "li $dst, $src #@loadConP_poll" %}
6569 ins_encode %{
6570 Register dst = $dst$$Register;
6571 intptr_t value = (intptr_t)$src$$constant;
6573 __ set64(dst, (jlong)value);
6574 %}
6576 ins_pipe( ialu_regI_regI );
6577 %}
6579 instruct loadConP0(mRegP dst, immP0 src)
6580 %{
6581 match(Set dst src);
6583 ins_cost(50);
6584 format %{ "mov $dst, R0\t# ptr" %}
6585 ins_encode %{
6586 Register dst_reg = $dst$$Register;
6587 __ daddu(dst_reg, R0, R0);
6588 %}
6589 ins_pipe( ialu_regI_regI );
6590 %}
6592 instruct loadConN0(mRegN dst, immN0 src) %{
6593 match(Set dst src);
6594 format %{ "move $dst, R0\t# compressed NULL ptr" %}
6595 ins_encode %{
6596 __ move($dst$$Register, R0);
6597 %}
6598 ins_pipe( ialu_regI_regI );
6599 %}
6601 instruct loadConN(mRegN dst, immN src) %{
6602 match(Set dst src);
6604 ins_cost(125);
6605 format %{ "li $dst, $src\t# compressed ptr @ loadConN" %}
6606 ins_encode %{
6607 Register dst = $dst$$Register;
6608 __ set_narrow_oop(dst, (jobject)$src$$constant);
6609 %}
6610 ins_pipe( ialu_regI_regI ); // XXX
6611 %}
6613 instruct loadConNKlass(mRegN dst, immNKlass src) %{
6614 match(Set dst src);
6616 ins_cost(125);
6617 format %{ "li $dst, $src\t# compressed klass ptr @ loadConNKlass" %}
6618 ins_encode %{
6619 Register dst = $dst$$Register;
6620 __ set_narrow_klass(dst, (Klass*)$src$$constant);
6621 %}
6622 ins_pipe( ialu_regI_regI ); // XXX
6623 %}
6625 //FIXME
6626 // Tail Call; Jump from runtime stub to Java code.
6627 // Also known as an 'interprocedural jump'.
6628 // Target of jump will eventually return to caller.
6629 // TailJump below removes the return address.
6630 instruct TailCalljmpInd(mRegP jump_target, mRegP method_oop) %{
6631 match(TailCall jump_target method_oop );
6632 ins_cost(300);
6633 format %{ "JMP $jump_target \t# @TailCalljmpInd" %}
6635 ins_encode %{
6636 Register target = $jump_target$$Register;
6637 Register oop = $method_oop$$Register;
6639 /* 2012/10/12 Jin: RA will be used in generate_forward_exception() */
6640 __ push(RA);
6642 __ move(S3, oop);
6643 __ jr(target);
6644 __ nop();
6645 %}
6647 ins_pipe( pipe_jump );
6648 %}
6650 // Create exception oop: created by stack-crawling runtime code.
6651 // Created exception is now available to this handler, and is setup
6652 // just prior to jumping to this handler. No code emitted.
6653 instruct CreateException( a0_RegP ex_oop )
6654 %{
6655 match(Set ex_oop (CreateEx));
6657 // use the following format syntax
6658 format %{ "# exception oop is in A0; no code emitted @CreateException" %}
6659 ins_encode %{
6660 /* Jin: X86 leaves this function empty */
6661 __ block_comment("CreateException is empty in X86/MIPS");
6662 %}
6663 ins_pipe( empty );
6664 // ins_pipe( pipe_jump );
6665 %}
6668 /* 2012/9/14 Jin: The mechanism of exception handling is clear now.
6670 - Common try/catch:
6671 2012/9/14 Jin: [stubGenerator_mips.cpp] generate_forward_exception()
6672 |- V0, V1 are created
6673 |- T9 <= SharedRuntime::exception_handler_for_return_address
6674 `- jr T9
6675 `- the caller's exception_handler
6676 `- jr OptoRuntime::exception_blob
6677 `- here
6678 - Rethrow(e.g. 'unwind'):
6679 * The callee:
6680 |- an exception is triggered during execution
6681 `- exits the callee method through RethrowException node
6682 |- The callee pushes exception_oop(T0) and exception_pc(RA)
6683 `- The callee jumps to OptoRuntime::rethrow_stub()
6684 * In OptoRuntime::rethrow_stub:
6685 |- The VM calls _rethrow_Java to determine the return address in the caller method
6686 `- exits the stub with tailjmpInd
6687 |- pops exception_oop(V0) and exception_pc(V1)
6688 `- jumps to the return address(usually an exception_handler)
6689 * The caller:
6690 `- continues processing the exception_blob with V0/V1
6691 */
6693 /*
6694 Disassembling OptoRuntime::rethrow_stub()
6696 ; locals
6697 0x2d3bf320: addiu sp, sp, 0xfffffff8
6698 0x2d3bf324: sw ra, 0x4(sp)
6699 0x2d3bf328: sw fp, 0x0(sp)
6700 0x2d3bf32c: addu fp, sp, zero
6701 0x2d3bf330: addiu sp, sp, 0xfffffff0
6702 0x2d3bf334: sw ra, 0x8(sp)
6703 0x2d3bf338: sw t0, 0x4(sp)
6704 0x2d3bf33c: sw sp, 0x0(sp)
6706 ; get_thread(S2)
6707 0x2d3bf340: addu s2, sp, zero
6708 0x2d3bf344: srl s2, s2, 12
6709 0x2d3bf348: sll s2, s2, 2
6710 0x2d3bf34c: lui at, 0x2c85
6711 0x2d3bf350: addu at, at, s2
6712 0x2d3bf354: lw s2, 0xffffcc80(at)
6714 0x2d3bf358: lw s0, 0x0(sp)
6715 0x2d3bf35c: sw s0, 0x118(s2) // last_sp -> threa
6716 0x2d3bf360: sw s2, 0xc(sp)
6718 ; OptoRuntime::rethrow_C(oopDesc* exception, JavaThread* thread, address ret_pc)
6719 0x2d3bf364: lw a0, 0x4(sp)
6720 0x2d3bf368: lw a1, 0xc(sp)
6721 0x2d3bf36c: lw a2, 0x8(sp)
6722 ;; Java_To_Runtime
6723 0x2d3bf370: lui t9, 0x2c34
6724 0x2d3bf374: addiu t9, t9, 0xffff8a48
6725 0x2d3bf378: jalr t9
6726 0x2d3bf37c: nop
6728 0x2d3bf380: addu s3, v0, zero ; S3: SharedRuntime::raw_exception_handler_for_return_address()
6730 0x2d3bf384: lw s0, 0xc(sp)
6731 0x2d3bf388: sw zero, 0x118(s0)
6732 0x2d3bf38c: sw zero, 0x11c(s0)
6733 0x2d3bf390: lw s1, 0x144(s0) ; ex_oop: S1
6734 0x2d3bf394: addu s2, s0, zero
6735 0x2d3bf398: sw zero, 0x144(s2)
6736 0x2d3bf39c: lw s0, 0x4(s2)
6737 0x2d3bf3a0: addiu s4, zero, 0x0
6738 0x2d3bf3a4: bne s0, s4, 0x2d3bf3d4
6739 0x2d3bf3a8: nop
6740 0x2d3bf3ac: addiu sp, sp, 0x10
6741 0x2d3bf3b0: addiu sp, sp, 0x8
6742 0x2d3bf3b4: lw ra, 0xfffffffc(sp)
6743 0x2d3bf3b8: lw fp, 0xfffffff8(sp)
6744 0x2d3bf3bc: lui at, 0x2b48
6745 0x2d3bf3c0: lw at, 0x100(at)
6747 ; tailjmpInd: Restores exception_oop & exception_pc
6748 0x2d3bf3c4: addu v1, ra, zero
6749 0x2d3bf3c8: addu v0, s1, zero
6750 0x2d3bf3cc: jr s3
6751 0x2d3bf3d0: nop
6752 ; Exception:
6753 0x2d3bf3d4: lui s1, 0x2cc8 ; generate_forward_exception()
6754 0x2d3bf3d8: addiu s1, s1, 0x40
6755 0x2d3bf3dc: addiu s2, zero, 0x0
6756 0x2d3bf3e0: addiu sp, sp, 0x10
6757 0x2d3bf3e4: addiu sp, sp, 0x8
6758 0x2d3bf3e8: lw ra, 0xfffffffc(sp)
6759 0x2d3bf3ec: lw fp, 0xfffffff8(sp)
6760 0x2d3bf3f0: lui at, 0x2b48
6761 0x2d3bf3f4: lw at, 0x100(at)
6762 ; TailCalljmpInd
6763 __ push(RA); ; to be used in generate_forward_exception()
6764 0x2d3bf3f8: addu t7, s2, zero
6765 0x2d3bf3fc: jr s1
6766 0x2d3bf400: nop
6767 */
6768 // Rethrow exception:
6769 // The exception oop will come in the first argument position.
6770 // Then JUMP (not call) to the rethrow stub code.
6771 instruct RethrowException()
6772 %{
6773 match(Rethrow);
6775 // use the following format syntax
6776 format %{ "JMP rethrow_stub #@RethrowException" %}
6777 ins_encode %{
6778 __ block_comment("@ RethrowException");
6780 cbuf.set_insts_mark();
6781 cbuf.relocate(cbuf.insts_mark(), runtime_call_Relocation::spec());
6783 // call OptoRuntime::rethrow_stub to get the exception handler in parent method
6784 __ li(T9, OptoRuntime::rethrow_stub());
6785 __ jr(T9);
6786 __ nop();
6787 %}
6788 ins_pipe( pipe_jump );
6789 %}
6791 instruct branchConP_zero(cmpOpU cmp, mRegP op1, immP0 zero, label labl) %{
6792 match(If cmp (CmpP op1 zero));
6793 effect(USE labl);
6795 ins_cost(180);
6796 format %{ "b$cmp $op1, R0, $labl #@branchConP_zero" %}
6798 ins_encode %{
6799 Register op1 = $op1$$Register;
6800 Register op2 = R0;
6801 Label &L = *($labl$$label);
6802 int flag = $cmp$$cmpcode;
6804 switch(flag)
6805 {
6806 case 0x01: //equal
6807 if (&L)
6808 __ beq(op1, op2, L);
6809 else
6810 __ beq(op1, op2, (int)0);
6811 break;
6812 case 0x02: //not_equal
6813 if (&L)
6814 __ bne(op1, op2, L);
6815 else
6816 __ bne(op1, op2, (int)0);
6817 break;
6818 /*
6819 case 0x03: //above
6820 __ sltu(AT, op2, op1);
6821 if(&L)
6822 __ bne(R0, AT, L);
6823 else
6824 __ bne(R0, AT, (int)0);
6825 break;
6826 case 0x04: //above_equal
6827 __ sltu(AT, op1, op2);
6828 if(&L)
6829 __ beq(AT, R0, L);
6830 else
6831 __ beq(AT, R0, (int)0);
6832 break;
6833 case 0x05: //below
6834 __ sltu(AT, op1, op2);
6835 if(&L)
6836 __ bne(R0, AT, L);
6837 else
6838 __ bne(R0, AT, (int)0);
6839 break;
6840 case 0x06: //below_equal
6841 __ sltu(AT, op2, op1);
6842 if(&L)
6843 __ beq(AT, R0, L);
6844 else
6845 __ beq(AT, R0, (int)0);
6846 break;
6847 */
6848 default:
6849 Unimplemented();
6850 }
6851 __ nop();
6852 %}
6854 ins_pc_relative(1);
6855 ins_pipe( pipe_alu_branch );
6856 %}
6859 instruct branchConP(cmpOpU cmp, mRegP op1, mRegP op2, label labl) %{
6860 match(If cmp (CmpP op1 op2));
6861 // predicate(can_branch_register(_kids[0]->_leaf, _kids[1]->_leaf));
6862 effect(USE labl);
6864 ins_cost(200);
6865 format %{ "b$cmp $op1, $op2, $labl #@branchConP" %}
6867 ins_encode %{
6868 Register op1 = $op1$$Register;
6869 Register op2 = $op2$$Register;
6870 Label &L = *($labl$$label);
6871 int flag = $cmp$$cmpcode;
6873 switch(flag)
6874 {
6875 case 0x01: //equal
6876 if (&L)
6877 __ beq(op1, op2, L);
6878 else
6879 __ beq(op1, op2, (int)0);
6880 break;
6881 case 0x02: //not_equal
6882 if (&L)
6883 __ bne(op1, op2, L);
6884 else
6885 __ bne(op1, op2, (int)0);
6886 break;
6887 case 0x03: //above
6888 __ sltu(AT, op2, op1);
6889 if(&L)
6890 __ bne(R0, AT, L);
6891 else
6892 __ bne(R0, AT, (int)0);
6893 break;
6894 case 0x04: //above_equal
6895 __ sltu(AT, op1, op2);
6896 if(&L)
6897 __ beq(AT, R0, L);
6898 else
6899 __ beq(AT, R0, (int)0);
6900 break;
6901 case 0x05: //below
6902 __ sltu(AT, op1, op2);
6903 if(&L)
6904 __ bne(R0, AT, L);
6905 else
6906 __ bne(R0, AT, (int)0);
6907 break;
6908 case 0x06: //below_equal
6909 __ sltu(AT, op2, op1);
6910 if(&L)
6911 __ beq(AT, R0, L);
6912 else
6913 __ beq(AT, R0, (int)0);
6914 break;
6915 default:
6916 Unimplemented();
6917 }
6918 __ nop();
6919 %}
6921 ins_pc_relative(1);
6922 ins_pipe( pipe_alu_branch );
6923 %}
6925 instruct cmpN_null_branch(cmpOp cmp, mRegN op1, immN0 null, label labl) %{
6926 match(If cmp (CmpN op1 null));
6927 effect(USE labl);
6929 ins_cost(180);
6930 format %{ "CMP $op1,0\t! compressed ptr\n\t"
6931 "BP$cmp $labl @ cmpN_null_branch" %}
6932 ins_encode %{
6933 Register op1 = $op1$$Register;
6934 Register op2 = R0;
6935 Label &L = *($labl$$label);
6936 int flag = $cmp$$cmpcode;
6938 switch(flag)
6939 {
6940 case 0x01: //equal
6941 if (&L)
6942 __ beq(op1, op2, L);
6943 else
6944 __ beq(op1, op2, (int)0);
6945 break;
6946 case 0x02: //not_equal
6947 if (&L)
6948 __ bne(op1, op2, L);
6949 else
6950 __ bne(op1, op2, (int)0);
6951 break;
6952 default:
6953 Unimplemented();
6954 }
6955 __ nop();
6956 %}
6957 //TODO: pipe_branchP or create pipe_branchN LEE
6958 ins_pc_relative(1);
6959 ins_pipe( pipe_alu_branch );
6960 %}
6962 instruct cmpN_reg_branch(cmpOp cmp, mRegN op1, mRegN op2, label labl) %{
6963 match(If cmp (CmpN op1 op2));
6964 effect(USE labl);
6966 ins_cost(180);
6967 format %{ "CMP $op1,$op2\t! compressed ptr\n\t"
6968 "BP$cmp $labl" %}
6969 ins_encode %{
6970 Register op1_reg = $op1$$Register;
6971 Register op2_reg = $op2$$Register;
6972 Label &L = *($labl$$label);
6973 int flag = $cmp$$cmpcode;
6975 switch(flag)
6976 {
6977 case 0x01: //equal
6978 if (&L)
6979 __ beq(op1_reg, op2_reg, L);
6980 else
6981 __ beq(op1_reg, op2_reg, (int)0);
6982 break;
6983 case 0x02: //not_equal
6984 if (&L)
6985 __ bne(op1_reg, op2_reg, L);
6986 else
6987 __ bne(op1_reg, op2_reg, (int)0);
6988 break;
6989 case 0x03: //above
6990 __ sltu(AT, op2_reg, op1_reg);
6991 if(&L)
6992 __ bne(R0, AT, L);
6993 else
6994 __ bne(R0, AT, (int)0);
6995 break;
6996 case 0x04: //above_equal
6997 __ sltu(AT, op1_reg, op2_reg);
6998 if(&L)
6999 __ beq(AT, R0, L);
7000 else
7001 __ beq(AT, R0, (int)0);
7002 break;
7003 case 0x05: //below
7004 __ sltu(AT, op1_reg, op2_reg);
7005 if(&L)
7006 __ bne(R0, AT, L);
7007 else
7008 __ bne(R0, AT, (int)0);
7009 break;
7010 case 0x06: //below_equal
7011 __ sltu(AT, op2_reg, op1_reg);
7012 if(&L)
7013 __ beq(AT, R0, L);
7014 else
7015 __ beq(AT, R0, (int)0);
7016 break;
7017 default:
7018 Unimplemented();
7019 }
7020 __ nop();
7021 %}
7022 ins_pc_relative(1);
7023 ins_pipe( pipe_alu_branch );
7024 %}
7026 instruct branchConIU_reg_reg(cmpOpU cmp, mRegI src1, mRegI src2, label labl) %{
7027 match( If cmp (CmpU src1 src2) );
7028 effect(USE labl);
7029 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_reg" %}
7031 ins_encode %{
7032 Register op1 = $src1$$Register;
7033 Register op2 = $src2$$Register;
7034 Label &L = *($labl$$label);
7035 int flag = $cmp$$cmpcode;
7037 switch(flag)
7038 {
7039 case 0x01: //equal
7040 if (&L)
7041 __ beq(op1, op2, L);
7042 else
7043 __ beq(op1, op2, (int)0);
7044 break;
7045 case 0x02: //not_equal
7046 if (&L)
7047 __ bne(op1, op2, L);
7048 else
7049 __ bne(op1, op2, (int)0);
7050 break;
7051 case 0x03: //above
7052 __ sltu(AT, op2, op1);
7053 if(&L)
7054 __ bne(AT, R0, L);
7055 else
7056 __ bne(AT, R0, (int)0);
7057 break;
7058 case 0x04: //above_equal
7059 __ sltu(AT, op1, op2);
7060 if(&L)
7061 __ beq(AT, R0, L);
7062 else
7063 __ beq(AT, R0, (int)0);
7064 break;
7065 case 0x05: //below
7066 __ sltu(AT, op1, op2);
7067 if(&L)
7068 __ bne(AT, R0, L);
7069 else
7070 __ bne(AT, R0, (int)0);
7071 break;
7072 case 0x06: //below_equal
7073 __ sltu(AT, op2, op1);
7074 if(&L)
7075 __ beq(AT, R0, L);
7076 else
7077 __ beq(AT, R0, (int)0);
7078 break;
7079 default:
7080 Unimplemented();
7081 }
7082 __ nop();
7083 %}
7085 ins_pc_relative(1);
7086 ins_pipe( pipe_alu_branch );
7087 %}
7090 instruct branchConIU_reg_imm(cmpOpU cmp, mRegI src1, immI src2, label labl) %{
7091 match( If cmp (CmpU src1 src2) );
7092 effect(USE labl);
7093 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_imm" %}
7095 ins_encode %{
7096 Register op1 = $src1$$Register;
7097 int val = $src2$$constant;
7098 Label &L = *($labl$$label);
7099 int flag = $cmp$$cmpcode;
7101 __ move(AT, val);
7102 switch(flag)
7103 {
7104 case 0x01: //equal
7105 if (&L)
7106 __ beq(op1, AT, L);
7107 else
7108 __ beq(op1, AT, (int)0);
7109 break;
7110 case 0x02: //not_equal
7111 if (&L)
7112 __ bne(op1, AT, L);
7113 else
7114 __ bne(op1, AT, (int)0);
7115 break;
7116 case 0x03: //above
7117 __ sltu(AT, AT, op1);
7118 if(&L)
7119 __ bne(R0, AT, L);
7120 else
7121 __ bne(R0, AT, (int)0);
7122 break;
7123 case 0x04: //above_equal
7124 __ sltu(AT, op1, AT);
7125 if(&L)
7126 __ beq(AT, R0, L);
7127 else
7128 __ beq(AT, R0, (int)0);
7129 break;
7130 case 0x05: //below
7131 __ sltu(AT, op1, AT);
7132 if(&L)
7133 __ bne(R0, AT, L);
7134 else
7135 __ bne(R0, AT, (int)0);
7136 break;
7137 case 0x06: //below_equal
7138 __ sltu(AT, AT, op1);
7139 if(&L)
7140 __ beq(AT, R0, L);
7141 else
7142 __ beq(AT, R0, (int)0);
7143 break;
7144 default:
7145 Unimplemented();
7146 }
7147 __ nop();
7148 %}
7150 ins_pc_relative(1);
7151 ins_pipe( pipe_alu_branch );
7152 %}
7154 instruct branchConI_reg_reg(cmpOp cmp, mRegI src1, mRegI src2, label labl) %{
7155 match( If cmp (CmpI src1 src2) );
7156 effect(USE labl);
7157 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_reg" %}
7159 ins_encode %{
7160 Register op1 = $src1$$Register;
7161 Register op2 = $src2$$Register;
7162 Label &L = *($labl$$label);
7163 int flag = $cmp$$cmpcode;
7165 switch(flag)
7166 {
7167 case 0x01: //equal
7168 if (&L)
7169 __ beq(op1, op2, L);
7170 else
7171 __ beq(op1, op2, (int)0);
7172 break;
7173 case 0x02: //not_equal
7174 if (&L)
7175 __ bne(op1, op2, L);
7176 else
7177 __ bne(op1, op2, (int)0);
7178 break;
7179 case 0x03: //above
7180 __ slt(AT, op2, op1);
7181 if(&L)
7182 __ bne(R0, AT, L);
7183 else
7184 __ bne(R0, AT, (int)0);
7185 break;
7186 case 0x04: //above_equal
7187 __ slt(AT, op1, op2);
7188 if(&L)
7189 __ beq(AT, R0, L);
7190 else
7191 __ beq(AT, R0, (int)0);
7192 break;
7193 case 0x05: //below
7194 __ slt(AT, op1, op2);
7195 if(&L)
7196 __ bne(R0, AT, L);
7197 else
7198 __ bne(R0, AT, (int)0);
7199 break;
7200 case 0x06: //below_equal
7201 __ slt(AT, op2, op1);
7202 if(&L)
7203 __ beq(AT, R0, L);
7204 else
7205 __ beq(AT, R0, (int)0);
7206 break;
7207 default:
7208 Unimplemented();
7209 }
7210 __ nop();
7211 %}
7213 ins_pc_relative(1);
7214 ins_pipe( pipe_alu_branch );
7215 %}
7217 instruct branchConI_reg_imm0(cmpOp cmp, mRegI src1, immI0 src2, label labl) %{
7218 match( If cmp (CmpI src1 src2) );
7219 effect(USE labl);
7220 ins_cost(170);
7221 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm0" %}
7223 ins_encode %{
7224 Register op1 = $src1$$Register;
7225 // int val = $src2$$constant;
7226 Label &L = *($labl$$label);
7227 int flag = $cmp$$cmpcode;
7229 //__ move(AT, val);
7230 switch(flag)
7231 {
7232 case 0x01: //equal
7233 if (&L)
7234 __ beq(op1, R0, L);
7235 else
7236 __ beq(op1, R0, (int)0);
7237 break;
7238 case 0x02: //not_equal
7239 if (&L)
7240 __ bne(op1, R0, L);
7241 else
7242 __ bne(op1, R0, (int)0);
7243 break;
7244 case 0x03: //greater
7245 if(&L)
7246 __ bgtz(op1, L);
7247 else
7248 __ bgtz(op1, (int)0);
7249 break;
7250 case 0x04: //greater_equal
7251 if(&L)
7252 __ bgez(op1, L);
7253 else
7254 __ bgez(op1, (int)0);
7255 break;
7256 case 0x05: //less
7257 if(&L)
7258 __ bltz(op1, L);
7259 else
7260 __ bltz(op1, (int)0);
7261 break;
7262 case 0x06: //less_equal
7263 if(&L)
7264 __ blez(op1, L);
7265 else
7266 __ blez(op1, (int)0);
7267 break;
7268 default:
7269 Unimplemented();
7270 }
7271 __ nop();
7272 %}
7274 ins_pc_relative(1);
7275 ins_pipe( pipe_alu_branch );
7276 %}
7279 instruct branchConI_reg_imm(cmpOp cmp, mRegI src1, immI src2, label labl) %{
7280 match( If cmp (CmpI src1 src2) );
7281 effect(USE labl);
7282 ins_cost(200);
7283 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm" %}
7285 ins_encode %{
7286 Register op1 = $src1$$Register;
7287 int val = $src2$$constant;
7288 Label &L = *($labl$$label);
7289 int flag = $cmp$$cmpcode;
7291 __ move(AT, val);
7292 switch(flag)
7293 {
7294 case 0x01: //equal
7295 if (&L)
7296 __ beq(op1, AT, L);
7297 else
7298 __ beq(op1, AT, (int)0);
7299 break;
7300 case 0x02: //not_equal
7301 if (&L)
7302 __ bne(op1, AT, L);
7303 else
7304 __ bne(op1, AT, (int)0);
7305 break;
7306 case 0x03: //greater
7307 __ slt(AT, AT, op1);
7308 if(&L)
7309 __ bne(R0, AT, L);
7310 else
7311 __ bne(R0, AT, (int)0);
7312 break;
7313 case 0x04: //greater_equal
7314 __ slt(AT, op1, AT);
7315 if(&L)
7316 __ beq(AT, R0, L);
7317 else
7318 __ beq(AT, R0, (int)0);
7319 break;
7320 case 0x05: //less
7321 __ slt(AT, op1, AT);
7322 if(&L)
7323 __ bne(R0, AT, L);
7324 else
7325 __ bne(R0, AT, (int)0);
7326 break;
7327 case 0x06: //less_equal
7328 __ slt(AT, AT, op1);
7329 if(&L)
7330 __ beq(AT, R0, L);
7331 else
7332 __ beq(AT, R0, (int)0);
7333 break;
7334 default:
7335 Unimplemented();
7336 }
7337 __ nop();
7338 %}
7340 ins_pc_relative(1);
7341 ins_pipe( pipe_alu_branch );
7342 %}
7344 instruct branchConIU_reg_imm0(cmpOpU cmp, mRegI src1, immI0 zero, label labl) %{
7345 match( If cmp (CmpU src1 zero) );
7346 effect(USE labl);
7347 format %{ "BR$cmp $src1, zero, $labl #@branchConIU_reg_imm0" %}
7349 ins_encode %{
7350 Register op1 = $src1$$Register;
7351 Label &L = *($labl$$label);
7352 int flag = $cmp$$cmpcode;
7354 switch(flag)
7355 {
7356 case 0x01: //equal
7357 if (&L)
7358 __ beq(op1, R0, L);
7359 else
7360 __ beq(op1, R0, (int)0);
7361 break;
7362 case 0x02: //not_equal
7363 if (&L)
7364 __ bne(op1, R0, L);
7365 else
7366 __ bne(op1, R0, (int)0);
7367 break;
7368 case 0x03: //above
7369 if(&L)
7370 __ bne(R0, op1, L);
7371 else
7372 __ bne(R0, op1, (int)0);
7373 break;
7374 case 0x04: //above_equal
7375 if(&L)
7376 __ beq(R0, R0, L);
7377 else
7378 __ beq(R0, R0, (int)0);
7379 break;
7380 case 0x05: //below
7381 return;
7382 break;
7383 case 0x06: //below_equal
7384 if(&L)
7385 __ beq(op1, R0, L);
7386 else
7387 __ beq(op1, R0, (int)0);
7388 break;
7389 default:
7390 Unimplemented();
7391 }
7392 __ nop();
7393 %}
7395 ins_pc_relative(1);
7396 ins_pipe( pipe_alu_branch );
7397 %}
7400 instruct branchConIU_reg_immI16(cmpOpU cmp, mRegI src1, immI16 src2, label labl) %{
7401 match( If cmp (CmpU src1 src2) );
7402 effect(USE labl);
7403 ins_cost(180);
7404 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_immI16" %}
7406 ins_encode %{
7407 Register op1 = $src1$$Register;
7408 int val = $src2$$constant;
7409 Label &L = *($labl$$label);
7410 int flag = $cmp$$cmpcode;
7412 switch(flag)
7413 {
7414 case 0x01: //equal
7415 __ move(AT, val);
7416 if (&L)
7417 __ beq(op1, AT, L);
7418 else
7419 __ beq(op1, AT, (int)0);
7420 break;
7421 case 0x02: //not_equal
7422 __ move(AT, val);
7423 if (&L)
7424 __ bne(op1, AT, L);
7425 else
7426 __ bne(op1, AT, (int)0);
7427 break;
7428 case 0x03: //above
7429 __ move(AT, val);
7430 __ sltu(AT, AT, op1);
7431 if(&L)
7432 __ bne(R0, AT, L);
7433 else
7434 __ bne(R0, AT, (int)0);
7435 break;
7436 case 0x04: //above_equal
7437 __ sltiu(AT, op1, val);
7438 if(&L)
7439 __ beq(AT, R0, L);
7440 else
7441 __ beq(AT, R0, (int)0);
7442 break;
7443 case 0x05: //below
7444 __ sltiu(AT, op1, val);
7445 if(&L)
7446 __ bne(R0, AT, L);
7447 else
7448 __ bne(R0, AT, (int)0);
7449 break;
7450 case 0x06: //below_equal
7451 __ move(AT, val);
7452 __ sltu(AT, AT, op1);
7453 if(&L)
7454 __ beq(AT, R0, L);
7455 else
7456 __ beq(AT, R0, (int)0);
7457 break;
7458 default:
7459 Unimplemented();
7460 }
7461 __ nop();
7462 %}
7464 ins_pc_relative(1);
7465 ins_pipe( pipe_alu_branch );
7466 %}
7469 instruct branchConL_regL_regL(cmpOp cmp, mRegL src1, mRegL src2, label labl) %{
7470 match( If cmp (CmpL src1 src2) );
7471 effect(USE labl);
7472 format %{ "BR$cmp $src1, $src2, $labl #@branchConL_regL_regL" %}
7473 ins_cost(250);
7475 ins_encode %{
7476 Register opr1_reg = as_Register($src1$$reg);
7477 Register opr2_reg = as_Register($src2$$reg);
7479 Label &target = *($labl$$label);
7480 int flag = $cmp$$cmpcode;
7482 switch(flag)
7483 {
7484 case 0x01: //equal
7485 if (&target)
7486 __ beq(opr1_reg, opr2_reg, target);
7487 else
7488 __ beq(opr1_reg, opr2_reg, (int)0);
7489 __ delayed()->nop();
7490 break;
7492 case 0x02: //not_equal
7493 if(&target)
7494 __ bne(opr1_reg, opr2_reg, target);
7495 else
7496 __ bne(opr1_reg, opr2_reg, (int)0);
7497 __ delayed()->nop();
7498 break;
7500 case 0x03: //greater
7501 __ slt(AT, opr2_reg, opr1_reg);
7502 if(&target)
7503 __ bne(AT, R0, target);
7504 else
7505 __ bne(AT, R0, (int)0);
7506 __ delayed()->nop();
7507 break;
7509 case 0x04: //greater_equal
7510 __ slt(AT, opr1_reg, opr2_reg);
7511 if(&target)
7512 __ beq(AT, R0, target);
7513 else
7514 __ beq(AT, R0, (int)0);
7515 __ delayed()->nop();
7517 break;
7519 case 0x05: //less
7520 __ slt(AT, opr1_reg, opr2_reg);
7521 if(&target)
7522 __ bne(AT, R0, target);
7523 else
7524 __ bne(AT, R0, (int)0);
7525 __ delayed()->nop();
7527 break;
7529 case 0x06: //less_equal
7530 __ slt(AT, opr2_reg, opr1_reg);
7532 if(&target)
7533 __ beq(AT, R0, target);
7534 else
7535 __ beq(AT, R0, (int)0);
7536 __ delayed()->nop();
7538 break;
7540 default:
7541 Unimplemented();
7542 }
7543 %}
7546 ins_pc_relative(1);
7547 ins_pipe( pipe_alu_branch );
7548 %}
7550 instruct branchConL_reg_immL16_sub(cmpOp cmp, mRegL src1, immL16_sub src2, label labl) %{
7551 match( If cmp (CmpL src1 src2) );
7552 effect(USE labl);
7553 ins_cost(180);
7554 format %{ "BR$cmp $src1, $src2, $labl #@branchConL_reg_immL16_sub" %}
7556 ins_encode %{
7557 Register op1 = $src1$$Register;
7558 int val = $src2$$constant;
7559 Label &L = *($labl$$label);
7560 int flag = $cmp$$cmpcode;
7562 __ daddiu(AT, op1, -1 * val);
7563 switch(flag)
7564 {
7565 case 0x01: //equal
7566 if (&L)
7567 __ beq(R0, AT, L);
7568 else
7569 __ beq(R0, AT, (int)0);
7570 break;
7571 case 0x02: //not_equal
7572 if (&L)
7573 __ bne(R0, AT, L);
7574 else
7575 __ bne(R0, AT, (int)0);
7576 break;
7577 case 0x03: //greater
7578 if(&L)
7579 __ bgtz(AT, L);
7580 else
7581 __ bgtz(AT, (int)0);
7582 break;
7583 case 0x04: //greater_equal
7584 if(&L)
7585 __ bgez(AT, L);
7586 else
7587 __ bgez(AT, (int)0);
7588 break;
7589 case 0x05: //less
7590 if(&L)
7591 __ bltz(AT, L);
7592 else
7593 __ bltz(AT, (int)0);
7594 break;
7595 case 0x06: //less_equal
7596 if(&L)
7597 __ blez(AT, L);
7598 else
7599 __ blez(AT, (int)0);
7600 break;
7601 default:
7602 Unimplemented();
7603 }
7604 __ nop();
7605 %}
7607 ins_pc_relative(1);
7608 ins_pipe( pipe_alu_branch );
7609 %}
7612 instruct branchConI_reg_imm16_sub(cmpOp cmp, mRegI src1, immI16_sub src2, label labl) %{
7613 match( If cmp (CmpI src1 src2) );
7614 effect(USE labl);
7615 ins_cost(180);
7616 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm16_sub" %}
7618 ins_encode %{
7619 Register op1 = $src1$$Register;
7620 int val = $src2$$constant;
7621 Label &L = *($labl$$label);
7622 int flag = $cmp$$cmpcode;
7624 __ addiu32(AT, op1, -1 * val);
7625 switch(flag)
7626 {
7627 case 0x01: //equal
7628 if (&L)
7629 __ beq(R0, AT, L);
7630 else
7631 __ beq(R0, AT, (int)0);
7632 break;
7633 case 0x02: //not_equal
7634 if (&L)
7635 __ bne(R0, AT, L);
7636 else
7637 __ bne(R0, AT, (int)0);
7638 break;
7639 case 0x03: //greater
7640 if(&L)
7641 __ bgtz(AT, L);
7642 else
7643 __ bgtz(AT, (int)0);
7644 break;
7645 case 0x04: //greater_equal
7646 if(&L)
7647 __ bgez(AT, L);
7648 else
7649 __ bgez(AT, (int)0);
7650 break;
7651 case 0x05: //less
7652 if(&L)
7653 __ bltz(AT, L);
7654 else
7655 __ bltz(AT, (int)0);
7656 break;
7657 case 0x06: //less_equal
7658 if(&L)
7659 __ blez(AT, L);
7660 else
7661 __ blez(AT, (int)0);
7662 break;
7663 default:
7664 Unimplemented();
7665 }
7666 __ nop();
7667 %}
7669 ins_pc_relative(1);
7670 ins_pipe( pipe_alu_branch );
7671 %}
7673 instruct branchConL_regL_immL0(cmpOp cmp, mRegL src1, immL0 zero, label labl) %{
7674 match( If cmp (CmpL src1 zero) );
7675 effect(USE labl);
7676 format %{ "BR$cmp $src1, zero, $labl #@branchConL_regL_immL0" %}
7677 ins_cost(150);
7679 ins_encode %{
7680 Register opr1_reg = as_Register($src1$$reg);
7681 Label &target = *($labl$$label);
7682 int flag = $cmp$$cmpcode;
7684 switch(flag)
7685 {
7686 case 0x01: //equal
7687 if (&target)
7688 __ beq(opr1_reg, R0, target);
7689 else
7690 __ beq(opr1_reg, R0, int(0));
7691 break;
7693 case 0x02: //not_equal
7694 if(&target)
7695 __ bne(opr1_reg, R0, target);
7696 else
7697 __ bne(opr1_reg, R0, (int)0);
7698 break;
7700 case 0x03: //greater
7701 if(&target)
7702 __ bgtz(opr1_reg, target);
7703 else
7704 __ bgtz(opr1_reg, (int)0);
7705 break;
7707 case 0x04: //greater_equal
7708 if(&target)
7709 __ bgez(opr1_reg, target);
7710 else
7711 __ bgez(opr1_reg, (int)0);
7712 break;
7714 case 0x05: //less
7715 __ slt(AT, opr1_reg, R0);
7716 if(&target)
7717 __ bne(AT, R0, target);
7718 else
7719 __ bne(AT, R0, (int)0);
7720 break;
7722 case 0x06: //less_equal
7723 if (&target)
7724 __ blez(opr1_reg, target);
7725 else
7726 __ blez(opr1_reg, int(0));
7727 break;
7729 default:
7730 Unimplemented();
7731 }
7732 __ delayed()->nop();
7733 %}
7736 ins_pc_relative(1);
7737 ins_pipe( pipe_alu_branch );
7738 %}
7741 //FIXME
7742 instruct branchConF_reg_reg(cmpOp cmp, regF src1, regF src2, label labl) %{
7743 match( If cmp (CmpF src1 src2) );
7744 effect(USE labl);
7745 format %{ "BR$cmp $src1, $src2, $labl #@branchConF_reg_reg" %}
7747 ins_encode %{
7748 FloatRegister reg_op1 = $src1$$FloatRegister;
7749 FloatRegister reg_op2 = $src2$$FloatRegister;
7750 Label &L = *($labl$$label);
7751 int flag = $cmp$$cmpcode;
7753 switch(flag)
7754 {
7755 case 0x01: //equal
7756 __ c_eq_s(reg_op1, reg_op2);
7757 if (&L)
7758 __ bc1t(L);
7759 else
7760 __ bc1t((int)0);
7761 break;
7762 case 0x02: //not_equal
7763 __ c_eq_s(reg_op1, reg_op2);
7764 if (&L)
7765 __ bc1f(L);
7766 else
7767 __ bc1f((int)0);
7768 break;
7769 case 0x03: //greater
7770 __ c_ule_s(reg_op1, reg_op2);
7771 if(&L)
7772 __ bc1f(L);
7773 else
7774 __ bc1f((int)0);
7775 break;
7776 case 0x04: //greater_equal
7777 __ c_ult_s(reg_op1, reg_op2);
7778 if(&L)
7779 __ bc1f(L);
7780 else
7781 __ bc1f((int)0);
7782 break;
7783 case 0x05: //less
7784 __ c_ult_s(reg_op1, reg_op2);
7785 if(&L)
7786 __ bc1t(L);
7787 else
7788 __ bc1t((int)0);
7789 break;
7790 case 0x06: //less_equal
7791 __ c_ule_s(reg_op1, reg_op2);
7792 if(&L)
7793 __ bc1t(L);
7794 else
7795 __ bc1t((int)0);
7796 break;
7797 default:
7798 Unimplemented();
7799 }
7800 __ nop();
7801 %}
7803 ins_pc_relative(1);
7804 ins_pipe(pipe_slow);
7805 %}
7807 instruct branchConD_reg_reg(cmpOp cmp, regD src1, regD src2, label labl) %{
7808 match( If cmp (CmpD src1 src2) );
7809 effect(USE labl);
7810 format %{ "BR$cmp $src1, $src2, $labl #@branchConD_reg_reg" %}
7812 ins_encode %{
7813 FloatRegister reg_op1 = $src1$$FloatRegister;
7814 FloatRegister reg_op2 = $src2$$FloatRegister;
7815 Label &L = *($labl$$label);
7816 int flag = $cmp$$cmpcode;
7818 switch(flag)
7819 {
7820 case 0x01: //equal
7821 __ c_eq_d(reg_op1, reg_op2);
7822 if (&L)
7823 __ bc1t(L);
7824 else
7825 __ bc1t((int)0);
7826 break;
7827 case 0x02: //not_equal
7828 //2016/4/19 aoqi: c_ueq_d cannot distinguish NaN from equal. Double.isNaN(Double) is implemented by 'f != f', so the use of c_ueq_d causes bugs.
7829 __ c_eq_d(reg_op1, reg_op2);
7830 if (&L)
7831 __ bc1f(L);
7832 else
7833 __ bc1f((int)0);
7834 break;
7835 case 0x03: //greater
7836 __ c_ule_d(reg_op1, reg_op2);
7837 if(&L)
7838 __ bc1f(L);
7839 else
7840 __ bc1f((int)0);
7841 break;
7842 case 0x04: //greater_equal
7843 __ c_ult_d(reg_op1, reg_op2);
7844 if(&L)
7845 __ bc1f(L);
7846 else
7847 __ bc1f((int)0);
7848 break;
7849 case 0x05: //less
7850 __ c_ult_d(reg_op1, reg_op2);
7851 if(&L)
7852 __ bc1t(L);
7853 else
7854 __ bc1t((int)0);
7855 break;
7856 case 0x06: //less_equal
7857 __ c_ule_d(reg_op1, reg_op2);
7858 if(&L)
7859 __ bc1t(L);
7860 else
7861 __ bc1t((int)0);
7862 break;
7863 default:
7864 Unimplemented();
7865 }
7866 __ nop();
7867 %}
7869 ins_pc_relative(1);
7870 ins_pipe(pipe_slow);
7871 %}
7874 // Call Runtime Instruction
7875 instruct CallRuntimeDirect(method meth) %{
7876 match(CallRuntime );
7877 effect(USE meth);
7879 ins_cost(300);
7880 format %{ "CALL,runtime #@CallRuntimeDirect" %}
7881 ins_encode( Java_To_Runtime( meth ) );
7882 ins_pipe( pipe_slow );
7883 ins_alignment(16);
7884 %}
7888 //------------------------MemBar Instructions-------------------------------
7889 //Memory barrier flavors
7891 instruct membar_acquire() %{
7892 match(MemBarAcquire);
7893 ins_cost(0);
7895 size(0);
7896 format %{ "MEMBAR-acquire (empty) @ membar_acquire" %}
7897 ins_encode();
7898 ins_pipe(empty);
7899 %}
7901 instruct load_fence() %{
7902 match(LoadFence);
7903 ins_cost(400);
7905 format %{ "MEMBAR @ load_fence" %}
7906 ins_encode %{
7907 __ sync();
7908 %}
7909 ins_pipe(pipe_slow);
7910 %}
7912 instruct membar_acquire_lock()
7913 %{
7914 match(MemBarAcquireLock);
7915 ins_cost(0);
7917 size(0);
7918 format %{ "MEMBAR-acquire (acquire as part of CAS in prior FastLock so empty encoding) @ membar_acquire_lock" %}
7919 ins_encode();
7920 ins_pipe(empty);
7921 %}
7923 instruct membar_release() %{
7924 match(MemBarRelease);
7925 ins_cost(0);
7927 size(0);
7928 format %{ "MEMBAR-release (empty) @ membar_release" %}
7929 ins_encode();
7930 ins_pipe(empty);
7931 %}
7933 instruct store_fence() %{
7934 match(StoreFence);
7935 ins_cost(400);
7937 format %{ "MEMBAR @ store_fence" %}
7939 ins_encode %{
7940 __ sync();
7941 %}
7943 ins_pipe(pipe_slow);
7944 %}
7946 instruct membar_release_lock()
7947 %{
7948 match(MemBarReleaseLock);
7949 ins_cost(0);
7951 size(0);
7952 format %{ "MEMBAR-release-lock (release in FastUnlock so empty) @ membar_release_lock" %}
7953 ins_encode();
7954 ins_pipe(empty);
7955 %}
7958 instruct membar_volatile() %{
7959 match(MemBarVolatile);
7960 ins_cost(400);
7962 format %{ "MEMBAR-volatile" %}
7963 ins_encode %{
7964 if( !os::is_MP() ) return; // Not needed on single CPU
7965 __ sync();
7967 %}
7968 ins_pipe(pipe_slow);
7969 %}
7971 instruct unnecessary_membar_volatile() %{
7972 match(MemBarVolatile);
7973 predicate(Matcher::post_store_load_barrier(n));
7974 ins_cost(0);
7976 size(0);
7977 format %{ "MEMBAR-volatile (unnecessary so empty encoding) @ unnecessary_membar_volatile" %}
7978 ins_encode( );
7979 ins_pipe(empty);
7980 %}
7982 instruct membar_storestore() %{
7983 match(MemBarStoreStore);
7985 ins_cost(0);
7986 size(0);
7987 format %{ "MEMBAR-storestore (empty encoding) @ membar_storestore" %}
7988 ins_encode( );
7989 ins_pipe(empty);
7990 %}
7992 //----------Move Instructions--------------------------------------------------
7993 instruct castX2P(mRegP dst, mRegL src) %{
7994 match(Set dst (CastX2P src));
7995 format %{ "castX2P $dst, $src @ castX2P" %}
7996 ins_encode %{
7997 Register src = $src$$Register;
7998 Register dst = $dst$$Register;
8000 if(src != dst)
8001 __ move(dst, src);
8002 %}
8003 ins_cost(10);
8004 ins_pipe( ialu_regI_mov );
8005 %}
8007 instruct castP2X(mRegL dst, mRegP src ) %{
8008 match(Set dst (CastP2X src));
8010 format %{ "mov $dst, $src\t #@castP2X" %}
8011 ins_encode %{
8012 Register src = $src$$Register;
8013 Register dst = $dst$$Register;
8015 if(src != dst)
8016 __ move(dst, src);
8017 %}
8018 ins_pipe( ialu_regI_mov );
8019 %}
8021 instruct MoveF2I_reg_reg(mRegI dst, regF src) %{
8022 match(Set dst (MoveF2I src));
8023 effect(DEF dst, USE src);
8024 ins_cost(85);
8025 format %{ "MoveF2I $dst, $src @ MoveF2I_reg_reg" %}
8026 ins_encode %{
8027 Register dst = as_Register($dst$$reg);
8028 FloatRegister src = as_FloatRegister($src$$reg);
8030 __ mfc1(dst, src);
8031 %}
8032 ins_pipe( pipe_slow );
8033 %}
8035 instruct MoveI2F_reg_reg(regF dst, mRegI src) %{
8036 match(Set dst (MoveI2F src));
8037 effect(DEF dst, USE src);
8038 ins_cost(85);
8039 format %{ "MoveI2F $dst, $src @ MoveI2F_reg_reg" %}
8040 ins_encode %{
8041 Register src = as_Register($src$$reg);
8042 FloatRegister dst = as_FloatRegister($dst$$reg);
8044 __ mtc1(src, dst);
8045 %}
8046 ins_pipe( pipe_slow );
8047 %}
8049 instruct MoveD2L_reg_reg(mRegL dst, regD src) %{
8050 match(Set dst (MoveD2L src));
8051 effect(DEF dst, USE src);
8052 ins_cost(85);
8053 format %{ "MoveD2L $dst, $src @ MoveD2L_reg_reg" %}
8054 ins_encode %{
8055 Register dst = as_Register($dst$$reg);
8056 FloatRegister src = as_FloatRegister($src$$reg);
8058 __ dmfc1(dst, src);
8059 %}
8060 ins_pipe( pipe_slow );
8061 %}
8063 instruct MoveL2D_reg_reg(regD dst, mRegL src) %{
8064 match(Set dst (MoveL2D src));
8065 effect(DEF dst, USE src);
8066 ins_cost(85);
8067 format %{ "MoveL2D $dst, $src @ MoveL2D_reg_reg" %}
8068 ins_encode %{
8069 FloatRegister dst = as_FloatRegister($dst$$reg);
8070 Register src = as_Register($src$$reg);
8072 __ dmtc1(src, dst);
8073 %}
8074 ins_pipe( pipe_slow );
8075 %}
8077 //----------Conditional Move---------------------------------------------------
8078 // Conditional move
8079 instruct cmovI_cmpI_reg_reg(mRegI dst, mRegI src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8080 match(Set dst (CMoveI (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8081 ins_cost(80);
8082 format %{
8083 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpI_reg_reg\n"
8084 "\tCMOV $dst,$src \t @cmovI_cmpI_reg_reg"
8085 %}
8087 ins_encode %{
8088 Register op1 = $tmp1$$Register;
8089 Register op2 = $tmp2$$Register;
8090 Register dst = $dst$$Register;
8091 Register src = $src$$Register;
8092 int flag = $cop$$cmpcode;
8094 switch(flag)
8095 {
8096 case 0x01: //equal
8097 __ subu32(AT, op1, op2);
8098 __ movz(dst, src, AT);
8099 break;
8101 case 0x02: //not_equal
8102 __ subu32(AT, op1, op2);
8103 __ movn(dst, src, AT);
8104 break;
8106 case 0x03: //great
8107 __ slt(AT, op2, op1);
8108 __ movn(dst, src, AT);
8109 break;
8111 case 0x04: //great_equal
8112 __ slt(AT, op1, op2);
8113 __ movz(dst, src, AT);
8114 break;
8116 case 0x05: //less
8117 __ slt(AT, op1, op2);
8118 __ movn(dst, src, AT);
8119 break;
8121 case 0x06: //less_equal
8122 __ slt(AT, op2, op1);
8123 __ movz(dst, src, AT);
8124 break;
8126 default:
8127 Unimplemented();
8128 }
8129 %}
8131 ins_pipe( pipe_slow );
8132 %}
8134 instruct cmovI_cmpP_reg_reg(mRegI dst, mRegI src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
8135 match(Set dst (CMoveI (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
8136 ins_cost(80);
8137 format %{
8138 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpP_reg_reg\n\t"
8139 "CMOV $dst,$src\t @cmovI_cmpP_reg_reg"
8140 %}
8141 ins_encode %{
8142 Register op1 = $tmp1$$Register;
8143 Register op2 = $tmp2$$Register;
8144 Register dst = $dst$$Register;
8145 Register src = $src$$Register;
8146 int flag = $cop$$cmpcode;
8148 switch(flag)
8149 {
8150 case 0x01: //equal
8151 __ subu(AT, op1, op2);
8152 __ movz(dst, src, AT);
8153 break;
8155 case 0x02: //not_equal
8156 __ subu(AT, op1, op2);
8157 __ movn(dst, src, AT);
8158 break;
8160 case 0x03: //above
8161 __ sltu(AT, op2, op1);
8162 __ movn(dst, src, AT);
8163 break;
8165 case 0x04: //above_equal
8166 __ sltu(AT, op1, op2);
8167 __ movz(dst, src, AT);
8168 break;
8170 case 0x05: //below
8171 __ sltu(AT, op1, op2);
8172 __ movn(dst, src, AT);
8173 break;
8175 case 0x06: //below_equal
8176 __ sltu(AT, op2, op1);
8177 __ movz(dst, src, AT);
8178 break;
8180 default:
8181 Unimplemented();
8182 }
8183 %}
8185 ins_pipe( pipe_slow );
8186 %}
8188 instruct cmovI_cmpN_reg_reg(mRegI dst, mRegI src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8189 match(Set dst (CMoveI (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8190 ins_cost(80);
8191 format %{
8192 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpN_reg_reg\n\t"
8193 "CMOV $dst,$src\t @cmovI_cmpN_reg_reg"
8194 %}
8195 ins_encode %{
8196 Register op1 = $tmp1$$Register;
8197 Register op2 = $tmp2$$Register;
8198 Register dst = $dst$$Register;
8199 Register src = $src$$Register;
8200 int flag = $cop$$cmpcode;
8202 switch(flag)
8203 {
8204 case 0x01: //equal
8205 __ subu32(AT, op1, op2);
8206 __ movz(dst, src, AT);
8207 break;
8209 case 0x02: //not_equal
8210 __ subu32(AT, op1, op2);
8211 __ movn(dst, src, AT);
8212 break;
8214 case 0x03: //above
8215 __ sltu(AT, op2, op1);
8216 __ movn(dst, src, AT);
8217 break;
8219 case 0x04: //above_equal
8220 __ sltu(AT, op1, op2);
8221 __ movz(dst, src, AT);
8222 break;
8224 case 0x05: //below
8225 __ sltu(AT, op1, op2);
8226 __ movn(dst, src, AT);
8227 break;
8229 case 0x06: //below_equal
8230 __ sltu(AT, op2, op1);
8231 __ movz(dst, src, AT);
8232 break;
8234 default:
8235 Unimplemented();
8236 }
8237 %}
8239 ins_pipe( pipe_slow );
8240 %}
8242 instruct cmovP_cmpN_reg_reg(mRegP dst, mRegP src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8243 match(Set dst (CMoveP (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8244 ins_cost(80);
8245 format %{
8246 "CMPU$cop $tmp1,$tmp2\t @cmovP_cmpN_reg_reg\n\t"
8247 "CMOV $dst,$src\t @cmovP_cmpN_reg_reg"
8248 %}
8249 ins_encode %{
8250 Register op1 = $tmp1$$Register;
8251 Register op2 = $tmp2$$Register;
8252 Register dst = $dst$$Register;
8253 Register src = $src$$Register;
8254 int flag = $cop$$cmpcode;
8256 switch(flag)
8257 {
8258 case 0x01: //equal
8259 __ subu32(AT, op1, op2);
8260 __ movz(dst, src, AT);
8261 break;
8263 case 0x02: //not_equal
8264 __ subu32(AT, op1, op2);
8265 __ movn(dst, src, AT);
8266 break;
8268 case 0x03: //above
8269 __ sltu(AT, op2, op1);
8270 __ movn(dst, src, AT);
8271 break;
8273 case 0x04: //above_equal
8274 __ sltu(AT, op1, op2);
8275 __ movz(dst, src, AT);
8276 break;
8278 case 0x05: //below
8279 __ sltu(AT, op1, op2);
8280 __ movn(dst, src, AT);
8281 break;
8283 case 0x06: //below_equal
8284 __ sltu(AT, op2, op1);
8285 __ movz(dst, src, AT);
8286 break;
8288 default:
8289 Unimplemented();
8290 }
8291 %}
8293 ins_pipe( pipe_slow );
8294 %}
8296 instruct cmovN_cmpP_reg_reg(mRegN dst, mRegN src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
8297 match(Set dst (CMoveN (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
8298 ins_cost(80);
8299 format %{
8300 "CMPU$cop $tmp1,$tmp2\t @cmovN_cmpP_reg_reg\n\t"
8301 "CMOV $dst,$src\t @cmovN_cmpP_reg_reg"
8302 %}
8303 ins_encode %{
8304 Register op1 = $tmp1$$Register;
8305 Register op2 = $tmp2$$Register;
8306 Register dst = $dst$$Register;
8307 Register src = $src$$Register;
8308 int flag = $cop$$cmpcode;
8310 switch(flag)
8311 {
8312 case 0x01: //equal
8313 __ subu(AT, op1, op2);
8314 __ movz(dst, src, AT);
8315 break;
8317 case 0x02: //not_equal
8318 __ subu(AT, op1, op2);
8319 __ movn(dst, src, AT);
8320 break;
8322 case 0x03: //above
8323 __ sltu(AT, op2, op1);
8324 __ movn(dst, src, AT);
8325 break;
8327 case 0x04: //above_equal
8328 __ sltu(AT, op1, op2);
8329 __ movz(dst, src, AT);
8330 break;
8332 case 0x05: //below
8333 __ sltu(AT, op1, op2);
8334 __ movn(dst, src, AT);
8335 break;
8337 case 0x06: //below_equal
8338 __ sltu(AT, op2, op1);
8339 __ movz(dst, src, AT);
8340 break;
8342 default:
8343 Unimplemented();
8344 }
8345 %}
8347 ins_pipe( pipe_slow );
8348 %}
8350 instruct cmovP_cmpD_reg_reg(mRegP dst, mRegP src, regD tmp1, regD tmp2, cmpOp cop ) %{
8351 match(Set dst (CMoveP (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8352 ins_cost(80);
8353 format %{
8354 "CMP$cop $tmp1, $tmp2\t @cmovP_cmpD_reg_reg\n"
8355 "\tCMOV $dst,$src \t @cmovP_cmpD_reg_reg"
8356 %}
8357 ins_encode %{
8358 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8359 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8360 Register dst = as_Register($dst$$reg);
8361 Register src = as_Register($src$$reg);
8363 int flag = $cop$$cmpcode;
8365 switch(flag)
8366 {
8367 case 0x01: //equal
8368 __ c_eq_d(reg_op1, reg_op2);
8369 __ movt(dst, src);
8370 break;
8371 case 0x02: //not_equal
8372 __ c_eq_d(reg_op1, reg_op2);
8373 __ movf(dst, src);
8374 break;
8375 case 0x03: //greater
8376 __ c_ole_d(reg_op1, reg_op2);
8377 __ movf(dst, src);
8378 break;
8379 case 0x04: //greater_equal
8380 __ c_olt_d(reg_op1, reg_op2);
8381 __ movf(dst, src);
8382 break;
8383 case 0x05: //less
8384 __ c_ult_d(reg_op1, reg_op2);
8385 __ movt(dst, src);
8386 break;
8387 case 0x06: //less_equal
8388 __ c_ule_d(reg_op1, reg_op2);
8389 __ movt(dst, src);
8390 break;
8391 default:
8392 Unimplemented();
8393 }
8394 %}
8396 ins_pipe( pipe_slow );
8397 %}
8400 instruct cmovN_cmpN_reg_reg(mRegN dst, mRegN src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8401 match(Set dst (CMoveN (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8402 ins_cost(80);
8403 format %{
8404 "CMPU$cop $tmp1,$tmp2\t @cmovN_cmpN_reg_reg\n\t"
8405 "CMOV $dst,$src\t @cmovN_cmpN_reg_reg"
8406 %}
8407 ins_encode %{
8408 Register op1 = $tmp1$$Register;
8409 Register op2 = $tmp2$$Register;
8410 Register dst = $dst$$Register;
8411 Register src = $src$$Register;
8412 int flag = $cop$$cmpcode;
8414 switch(flag)
8415 {
8416 case 0x01: //equal
8417 __ subu32(AT, op1, op2);
8418 __ movz(dst, src, AT);
8419 break;
8421 case 0x02: //not_equal
8422 __ subu32(AT, op1, op2);
8423 __ movn(dst, src, AT);
8424 break;
8426 case 0x03: //above
8427 __ sltu(AT, op2, op1);
8428 __ movn(dst, src, AT);
8429 break;
8431 case 0x04: //above_equal
8432 __ sltu(AT, op1, op2);
8433 __ movz(dst, src, AT);
8434 break;
8436 case 0x05: //below
8437 __ sltu(AT, op1, op2);
8438 __ movn(dst, src, AT);
8439 break;
8441 case 0x06: //below_equal
8442 __ sltu(AT, op2, op1);
8443 __ movz(dst, src, AT);
8444 break;
8446 default:
8447 Unimplemented();
8448 }
8449 %}
8451 ins_pipe( pipe_slow );
8452 %}
8455 instruct cmovI_cmpU_reg_reg(mRegI dst, mRegI src, mRegI tmp1, mRegI tmp2, cmpOpU cop ) %{
8456 match(Set dst (CMoveI (Binary cop (CmpU tmp1 tmp2)) (Binary dst src)));
8457 ins_cost(80);
8458 format %{
8459 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpU_reg_reg\n\t"
8460 "CMOV $dst,$src\t @cmovI_cmpU_reg_reg"
8461 %}
8462 ins_encode %{
8463 Register op1 = $tmp1$$Register;
8464 Register op2 = $tmp2$$Register;
8465 Register dst = $dst$$Register;
8466 Register src = $src$$Register;
8467 int flag = $cop$$cmpcode;
8469 switch(flag)
8470 {
8471 case 0x01: //equal
8472 __ subu(AT, op1, op2);
8473 __ movz(dst, src, AT);
8474 break;
8476 case 0x02: //not_equal
8477 __ subu(AT, op1, op2);
8478 __ movn(dst, src, AT);
8479 break;
8481 case 0x03: //above
8482 __ sltu(AT, op2, op1);
8483 __ movn(dst, src, AT);
8484 break;
8486 case 0x04: //above_equal
8487 __ sltu(AT, op1, op2);
8488 __ movz(dst, src, AT);
8489 break;
8491 case 0x05: //below
8492 __ sltu(AT, op1, op2);
8493 __ movn(dst, src, AT);
8494 break;
8496 case 0x06: //below_equal
8497 __ sltu(AT, op2, op1);
8498 __ movz(dst, src, AT);
8499 break;
8501 default:
8502 Unimplemented();
8503 }
8504 %}
8506 ins_pipe( pipe_slow );
8507 %}
8509 instruct cmovI_cmpL_reg_reg(mRegI dst, mRegI src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8510 match(Set dst (CMoveI (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8511 ins_cost(80);
8512 format %{
8513 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpL_reg_reg\n"
8514 "\tCMOV $dst,$src \t @cmovI_cmpL_reg_reg"
8515 %}
8516 ins_encode %{
8517 Register opr1 = as_Register($tmp1$$reg);
8518 Register opr2 = as_Register($tmp2$$reg);
8519 Register dst = $dst$$Register;
8520 Register src = $src$$Register;
8521 int flag = $cop$$cmpcode;
8523 switch(flag)
8524 {
8525 case 0x01: //equal
8526 __ subu(AT, opr1, opr2);
8527 __ movz(dst, src, AT);
8528 break;
8530 case 0x02: //not_equal
8531 __ subu(AT, opr1, opr2);
8532 __ movn(dst, src, AT);
8533 break;
8535 case 0x03: //greater
8536 __ slt(AT, opr2, opr1);
8537 __ movn(dst, src, AT);
8538 break;
8540 case 0x04: //greater_equal
8541 __ slt(AT, opr1, opr2);
8542 __ movz(dst, src, AT);
8543 break;
8545 case 0x05: //less
8546 __ slt(AT, opr1, opr2);
8547 __ movn(dst, src, AT);
8548 break;
8550 case 0x06: //less_equal
8551 __ slt(AT, opr2, opr1);
8552 __ movz(dst, src, AT);
8553 break;
8555 default:
8556 Unimplemented();
8557 }
8558 %}
8560 ins_pipe( pipe_slow );
8561 %}
8563 instruct cmovP_cmpL_reg_reg(mRegP dst, mRegP src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8564 match(Set dst (CMoveP (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8565 ins_cost(80);
8566 format %{
8567 "CMP$cop $tmp1, $tmp2\t @cmovP_cmpL_reg_reg\n"
8568 "\tCMOV $dst,$src \t @cmovP_cmpL_reg_reg"
8569 %}
8570 ins_encode %{
8571 Register opr1 = as_Register($tmp1$$reg);
8572 Register opr2 = as_Register($tmp2$$reg);
8573 Register dst = $dst$$Register;
8574 Register src = $src$$Register;
8575 int flag = $cop$$cmpcode;
8577 switch(flag)
8578 {
8579 case 0x01: //equal
8580 __ subu(AT, opr1, opr2);
8581 __ movz(dst, src, AT);
8582 break;
8584 case 0x02: //not_equal
8585 __ subu(AT, opr1, opr2);
8586 __ movn(dst, src, AT);
8587 break;
8589 case 0x03: //greater
8590 __ slt(AT, opr2, opr1);
8591 __ movn(dst, src, AT);
8592 break;
8594 case 0x04: //greater_equal
8595 __ slt(AT, opr1, opr2);
8596 __ movz(dst, src, AT);
8597 break;
8599 case 0x05: //less
8600 __ slt(AT, opr1, opr2);
8601 __ movn(dst, src, AT);
8602 break;
8604 case 0x06: //less_equal
8605 __ slt(AT, opr2, opr1);
8606 __ movz(dst, src, AT);
8607 break;
8609 default:
8610 Unimplemented();
8611 }
8612 %}
8614 ins_pipe( pipe_slow );
8615 %}
8617 instruct cmovI_cmpD_reg_reg(mRegI dst, mRegI src, regD tmp1, regD tmp2, cmpOp cop ) %{
8618 match(Set dst (CMoveI (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8619 ins_cost(80);
8620 format %{
8621 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpD_reg_reg\n"
8622 "\tCMOV $dst,$src \t @cmovI_cmpD_reg_reg"
8623 %}
8624 ins_encode %{
8625 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8626 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8627 Register dst = as_Register($dst$$reg);
8628 Register src = as_Register($src$$reg);
8630 int flag = $cop$$cmpcode;
8632 switch(flag)
8633 {
8634 case 0x01: //equal
8635 __ c_eq_d(reg_op1, reg_op2);
8636 __ movt(dst, src);
8637 break;
8638 case 0x02: //not_equal
8639 //2016/4/19 aoqi: See instruct branchConD_reg_reg. The change in branchConD_reg_reg fixed a bug. It seems similar here, so I made thesame change.
8640 __ c_eq_d(reg_op1, reg_op2);
8641 __ movf(dst, src);
8642 break;
8643 case 0x03: //greater
8644 __ c_ole_d(reg_op1, reg_op2);
8645 __ movf(dst, src);
8646 break;
8647 case 0x04: //greater_equal
8648 __ c_olt_d(reg_op1, reg_op2);
8649 __ movf(dst, src);
8650 break;
8651 case 0x05: //less
8652 __ c_ult_d(reg_op1, reg_op2);
8653 __ movt(dst, src);
8654 break;
8655 case 0x06: //less_equal
8656 __ c_ule_d(reg_op1, reg_op2);
8657 __ movt(dst, src);
8658 break;
8659 default:
8660 Unimplemented();
8661 }
8662 %}
8664 ins_pipe( pipe_slow );
8665 %}
8668 instruct cmovP_cmpP_reg_reg(mRegP dst, mRegP src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
8669 match(Set dst (CMoveP (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
8670 ins_cost(80);
8671 format %{
8672 "CMPU$cop $tmp1,$tmp2\t @cmovP_cmpP_reg_reg\n\t"
8673 "CMOV $dst,$src\t @cmovP_cmpP_reg_reg"
8674 %}
8675 ins_encode %{
8676 Register op1 = $tmp1$$Register;
8677 Register op2 = $tmp2$$Register;
8678 Register dst = $dst$$Register;
8679 Register src = $src$$Register;
8680 int flag = $cop$$cmpcode;
8682 switch(flag)
8683 {
8684 case 0x01: //equal
8685 __ subu(AT, op1, op2);
8686 __ movz(dst, src, AT);
8687 break;
8689 case 0x02: //not_equal
8690 __ subu(AT, op1, op2);
8691 __ movn(dst, src, AT);
8692 break;
8694 case 0x03: //above
8695 __ sltu(AT, op2, op1);
8696 __ movn(dst, src, AT);
8697 break;
8699 case 0x04: //above_equal
8700 __ sltu(AT, op1, op2);
8701 __ movz(dst, src, AT);
8702 break;
8704 case 0x05: //below
8705 __ sltu(AT, op1, op2);
8706 __ movn(dst, src, AT);
8707 break;
8709 case 0x06: //below_equal
8710 __ sltu(AT, op2, op1);
8711 __ movz(dst, src, AT);
8712 break;
8714 default:
8715 Unimplemented();
8716 }
8717 %}
8719 ins_pipe( pipe_slow );
8720 %}
8722 instruct cmovP_cmpI_reg_reg(mRegP dst, mRegP src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8723 match(Set dst (CMoveP (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8724 ins_cost(80);
8725 format %{
8726 "CMP$cop $tmp1,$tmp2\t @cmovP_cmpI_reg_reg\n\t"
8727 "CMOV $dst,$src\t @cmovP_cmpI_reg_reg"
8728 %}
8729 ins_encode %{
8730 Register op1 = $tmp1$$Register;
8731 Register op2 = $tmp2$$Register;
8732 Register dst = $dst$$Register;
8733 Register src = $src$$Register;
8734 int flag = $cop$$cmpcode;
8736 switch(flag)
8737 {
8738 case 0x01: //equal
8739 __ subu32(AT, op1, op2);
8740 __ movz(dst, src, AT);
8741 break;
8743 case 0x02: //not_equal
8744 __ subu32(AT, op1, op2);
8745 __ movn(dst, src, AT);
8746 break;
8748 case 0x03: //above
8749 __ slt(AT, op2, op1);
8750 __ movn(dst, src, AT);
8751 break;
8753 case 0x04: //above_equal
8754 __ slt(AT, op1, op2);
8755 __ movz(dst, src, AT);
8756 break;
8758 case 0x05: //below
8759 __ slt(AT, op1, op2);
8760 __ movn(dst, src, AT);
8761 break;
8763 case 0x06: //below_equal
8764 __ slt(AT, op2, op1);
8765 __ movz(dst, src, AT);
8766 break;
8768 default:
8769 Unimplemented();
8770 }
8771 %}
8773 ins_pipe( pipe_slow );
8774 %}
8776 instruct cmovN_cmpI_reg_reg(mRegN dst, mRegN src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8777 match(Set dst (CMoveN (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8778 ins_cost(80);
8779 format %{
8780 "CMP$cop $tmp1,$tmp2\t @cmovN_cmpI_reg_reg\n\t"
8781 "CMOV $dst,$src\t @cmovN_cmpI_reg_reg"
8782 %}
8783 ins_encode %{
8784 Register op1 = $tmp1$$Register;
8785 Register op2 = $tmp2$$Register;
8786 Register dst = $dst$$Register;
8787 Register src = $src$$Register;
8788 int flag = $cop$$cmpcode;
8790 switch(flag)
8791 {
8792 case 0x01: //equal
8793 __ subu32(AT, op1, op2);
8794 __ movz(dst, src, AT);
8795 break;
8797 case 0x02: //not_equal
8798 __ subu32(AT, op1, op2);
8799 __ movn(dst, src, AT);
8800 break;
8802 case 0x03: //above
8803 __ slt(AT, op2, op1);
8804 __ movn(dst, src, AT);
8805 break;
8807 case 0x04: //above_equal
8808 __ slt(AT, op1, op2);
8809 __ movz(dst, src, AT);
8810 break;
8812 case 0x05: //below
8813 __ slt(AT, op1, op2);
8814 __ movn(dst, src, AT);
8815 break;
8817 case 0x06: //below_equal
8818 __ slt(AT, op2, op1);
8819 __ movz(dst, src, AT);
8820 break;
8822 default:
8823 Unimplemented();
8824 }
8825 %}
8827 ins_pipe( pipe_slow );
8828 %}
8831 instruct cmovL_cmpI_reg_reg(mRegL dst, mRegL src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8832 match(Set dst (CMoveL (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8833 ins_cost(80);
8834 format %{
8835 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpI_reg_reg\n"
8836 "\tCMOV $dst,$src \t @cmovL_cmpI_reg_reg"
8837 %}
8839 ins_encode %{
8840 Register op1 = $tmp1$$Register;
8841 Register op2 = $tmp2$$Register;
8842 Register dst = as_Register($dst$$reg);
8843 Register src = as_Register($src$$reg);
8844 int flag = $cop$$cmpcode;
8846 switch(flag)
8847 {
8848 case 0x01: //equal
8849 __ subu32(AT, op1, op2);
8850 __ movz(dst, src, AT);
8851 break;
8853 case 0x02: //not_equal
8854 __ subu32(AT, op1, op2);
8855 __ movn(dst, src, AT);
8856 break;
8858 case 0x03: //great
8859 __ slt(AT, op2, op1);
8860 __ movn(dst, src, AT);
8861 break;
8863 case 0x04: //great_equal
8864 __ slt(AT, op1, op2);
8865 __ movz(dst, src, AT);
8866 break;
8868 case 0x05: //less
8869 __ slt(AT, op1, op2);
8870 __ movn(dst, src, AT);
8871 break;
8873 case 0x06: //less_equal
8874 __ slt(AT, op2, op1);
8875 __ movz(dst, src, AT);
8876 break;
8878 default:
8879 Unimplemented();
8880 }
8881 %}
8883 ins_pipe( pipe_slow );
8884 %}
8886 instruct cmovL_cmpL_reg_reg(mRegL dst, mRegL src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8887 match(Set dst (CMoveL (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8888 ins_cost(80);
8889 format %{
8890 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpL_reg_reg\n"
8891 "\tCMOV $dst,$src \t @cmovL_cmpL_reg_reg"
8892 %}
8893 ins_encode %{
8894 Register opr1 = as_Register($tmp1$$reg);
8895 Register opr2 = as_Register($tmp2$$reg);
8896 Register dst = as_Register($dst$$reg);
8897 Register src = as_Register($src$$reg);
8898 int flag = $cop$$cmpcode;
8900 switch(flag)
8901 {
8902 case 0x01: //equal
8903 __ subu(AT, opr1, opr2);
8904 __ movz(dst, src, AT);
8905 break;
8907 case 0x02: //not_equal
8908 __ subu(AT, opr1, opr2);
8909 __ movn(dst, src, AT);
8910 break;
8912 case 0x03: //greater
8913 __ slt(AT, opr2, opr1);
8914 __ movn(dst, src, AT);
8915 break;
8917 case 0x04: //greater_equal
8918 __ slt(AT, opr1, opr2);
8919 __ movz(dst, src, AT);
8920 break;
8922 case 0x05: //less
8923 __ slt(AT, opr1, opr2);
8924 __ movn(dst, src, AT);
8925 break;
8927 case 0x06: //less_equal
8928 __ slt(AT, opr2, opr1);
8929 __ movz(dst, src, AT);
8930 break;
8932 default:
8933 Unimplemented();
8934 }
8935 %}
8937 ins_pipe( pipe_slow );
8938 %}
8940 instruct cmovL_cmpN_reg_reg(mRegL dst, mRegL src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8941 match(Set dst (CMoveL (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8942 ins_cost(80);
8943 format %{
8944 "CMPU$cop $tmp1,$tmp2\t @cmovL_cmpN_reg_reg\n\t"
8945 "CMOV $dst,$src\t @cmovL_cmpN_reg_reg"
8946 %}
8947 ins_encode %{
8948 Register op1 = $tmp1$$Register;
8949 Register op2 = $tmp2$$Register;
8950 Register dst = $dst$$Register;
8951 Register src = $src$$Register;
8952 int flag = $cop$$cmpcode;
8954 switch(flag)
8955 {
8956 case 0x01: //equal
8957 __ subu32(AT, op1, op2);
8958 __ movz(dst, src, AT);
8959 break;
8961 case 0x02: //not_equal
8962 __ subu32(AT, op1, op2);
8963 __ movn(dst, src, AT);
8964 break;
8966 case 0x03: //above
8967 __ sltu(AT, op2, op1);
8968 __ movn(dst, src, AT);
8969 break;
8971 case 0x04: //above_equal
8972 __ sltu(AT, op1, op2);
8973 __ movz(dst, src, AT);
8974 break;
8976 case 0x05: //below
8977 __ sltu(AT, op1, op2);
8978 __ movn(dst, src, AT);
8979 break;
8981 case 0x06: //below_equal
8982 __ sltu(AT, op2, op1);
8983 __ movz(dst, src, AT);
8984 break;
8986 default:
8987 Unimplemented();
8988 }
8989 %}
8991 ins_pipe( pipe_slow );
8992 %}
8995 instruct cmovL_cmpD_reg_reg(mRegL dst, mRegL src, regD tmp1, regD tmp2, cmpOp cop ) %{
8996 match(Set dst (CMoveL (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8997 ins_cost(80);
8998 format %{
8999 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpD_reg_reg\n"
9000 "\tCMOV $dst,$src \t @cmovL_cmpD_reg_reg"
9001 %}
9002 ins_encode %{
9003 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
9004 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
9005 Register dst = as_Register($dst$$reg);
9006 Register src = as_Register($src$$reg);
9008 int flag = $cop$$cmpcode;
9010 switch(flag)
9011 {
9012 case 0x01: //equal
9013 __ c_eq_d(reg_op1, reg_op2);
9014 __ movt(dst, src);
9015 break;
9016 case 0x02: //not_equal
9017 __ c_eq_d(reg_op1, reg_op2);
9018 __ movf(dst, src);
9019 break;
9020 case 0x03: //greater
9021 __ c_ole_d(reg_op1, reg_op2);
9022 __ movf(dst, src);
9023 break;
9024 case 0x04: //greater_equal
9025 __ c_olt_d(reg_op1, reg_op2);
9026 __ movf(dst, src);
9027 break;
9028 case 0x05: //less
9029 __ c_ult_d(reg_op1, reg_op2);
9030 __ movt(dst, src);
9031 break;
9032 case 0x06: //less_equal
9033 __ c_ule_d(reg_op1, reg_op2);
9034 __ movt(dst, src);
9035 break;
9036 default:
9037 Unimplemented();
9038 }
9039 %}
9041 ins_pipe( pipe_slow );
9042 %}
9044 instruct cmovD_cmpD_reg_reg(regD dst, regD src, regD tmp1, regD tmp2, cmpOp cop ) %{
9045 match(Set dst (CMoveD (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
9046 ins_cost(200);
9047 format %{
9048 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpD_reg_reg\n"
9049 "\tCMOV $dst,$src \t @cmovD_cmpD_reg_reg"
9050 %}
9051 ins_encode %{
9052 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
9053 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
9054 FloatRegister dst = as_FloatRegister($dst$$reg);
9055 FloatRegister src = as_FloatRegister($src$$reg);
9057 int flag = $cop$$cmpcode;
9059 Label L;
9061 switch(flag)
9062 {
9063 case 0x01: //equal
9064 __ c_eq_d(reg_op1, reg_op2);
9065 __ bc1f(L);
9066 __ nop();
9067 __ mov_d(dst, src);
9068 __ bind(L);
9069 break;
9070 case 0x02: //not_equal
9071 //2016/4/19 aoqi: See instruct branchConD_reg_reg. The change in branchConD_reg_reg fixed a bug. It seems similar here, so I made thesame change.
9072 __ c_eq_d(reg_op1, reg_op2);
9073 __ bc1t(L);
9074 __ nop();
9075 __ mov_d(dst, src);
9076 __ bind(L);
9077 break;
9078 case 0x03: //greater
9079 __ c_ole_d(reg_op1, reg_op2);
9080 __ bc1t(L);
9081 __ nop();
9082 __ mov_d(dst, src);
9083 __ bind(L);
9084 break;
9085 case 0x04: //greater_equal
9086 __ c_olt_d(reg_op1, reg_op2);
9087 __ bc1t(L);
9088 __ nop();
9089 __ mov_d(dst, src);
9090 __ bind(L);
9091 break;
9092 case 0x05: //less
9093 __ c_ult_d(reg_op1, reg_op2);
9094 __ bc1f(L);
9095 __ nop();
9096 __ mov_d(dst, src);
9097 __ bind(L);
9098 break;
9099 case 0x06: //less_equal
9100 __ c_ule_d(reg_op1, reg_op2);
9101 __ bc1f(L);
9102 __ nop();
9103 __ mov_d(dst, src);
9104 __ bind(L);
9105 break;
9106 default:
9107 Unimplemented();
9108 }
9109 %}
9111 ins_pipe( pipe_slow );
9112 %}
9114 instruct cmovF_cmpI_reg_reg(regF dst, regF src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
9115 match(Set dst (CMoveF (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
9116 ins_cost(200);
9117 format %{
9118 "CMP$cop $tmp1, $tmp2\t @cmovF_cmpI_reg_reg\n"
9119 "\tCMOV $dst, $src \t @cmovF_cmpI_reg_reg"
9120 %}
9122 ins_encode %{
9123 Register op1 = $tmp1$$Register;
9124 Register op2 = $tmp2$$Register;
9125 FloatRegister dst = as_FloatRegister($dst$$reg);
9126 FloatRegister src = as_FloatRegister($src$$reg);
9127 int flag = $cop$$cmpcode;
9128 Label L;
9130 switch(flag)
9131 {
9132 case 0x01: //equal
9133 __ bne(op1, op2, L);
9134 __ nop();
9135 __ mov_s(dst, src);
9136 __ bind(L);
9137 break;
9138 case 0x02: //not_equal
9139 __ beq(op1, op2, L);
9140 __ nop();
9141 __ mov_s(dst, src);
9142 __ bind(L);
9143 break;
9144 case 0x03: //great
9145 __ slt(AT, op2, op1);
9146 __ beq(AT, R0, L);
9147 __ nop();
9148 __ mov_s(dst, src);
9149 __ bind(L);
9150 break;
9151 case 0x04: //great_equal
9152 __ slt(AT, op1, op2);
9153 __ bne(AT, R0, L);
9154 __ nop();
9155 __ mov_s(dst, src);
9156 __ bind(L);
9157 break;
9158 case 0x05: //less
9159 __ slt(AT, op1, op2);
9160 __ beq(AT, R0, L);
9161 __ nop();
9162 __ mov_s(dst, src);
9163 __ bind(L);
9164 break;
9165 case 0x06: //less_equal
9166 __ slt(AT, op2, op1);
9167 __ bne(AT, R0, L);
9168 __ nop();
9169 __ mov_s(dst, src);
9170 __ bind(L);
9171 break;
9172 default:
9173 Unimplemented();
9174 }
9175 %}
9177 ins_pipe( pipe_slow );
9178 %}
9180 instruct cmovD_cmpI_reg_reg(regD dst, regD src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
9181 match(Set dst (CMoveD (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
9182 ins_cost(200);
9183 format %{
9184 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpI_reg_reg\n"
9185 "\tCMOV $dst, $src \t @cmovD_cmpI_reg_reg"
9186 %}
9188 ins_encode %{
9189 Register op1 = $tmp1$$Register;
9190 Register op2 = $tmp2$$Register;
9191 FloatRegister dst = as_FloatRegister($dst$$reg);
9192 FloatRegister src = as_FloatRegister($src$$reg);
9193 int flag = $cop$$cmpcode;
9194 Label L;
9196 switch(flag)
9197 {
9198 case 0x01: //equal
9199 __ bne(op1, op2, L);
9200 __ nop();
9201 __ mov_d(dst, src);
9202 __ bind(L);
9203 break;
9204 case 0x02: //not_equal
9205 __ beq(op1, op2, L);
9206 __ nop();
9207 __ mov_d(dst, src);
9208 __ bind(L);
9209 break;
9210 case 0x03: //great
9211 __ slt(AT, op2, op1);
9212 __ beq(AT, R0, L);
9213 __ nop();
9214 __ mov_d(dst, src);
9215 __ bind(L);
9216 break;
9217 case 0x04: //great_equal
9218 __ slt(AT, op1, op2);
9219 __ bne(AT, R0, L);
9220 __ nop();
9221 __ mov_d(dst, src);
9222 __ bind(L);
9223 break;
9224 case 0x05: //less
9225 __ slt(AT, op1, op2);
9226 __ beq(AT, R0, L);
9227 __ nop();
9228 __ mov_d(dst, src);
9229 __ bind(L);
9230 break;
9231 case 0x06: //less_equal
9232 __ slt(AT, op2, op1);
9233 __ bne(AT, R0, L);
9234 __ nop();
9235 __ mov_d(dst, src);
9236 __ bind(L);
9237 break;
9238 default:
9239 Unimplemented();
9240 }
9241 %}
9243 ins_pipe( pipe_slow );
9244 %}
9246 instruct cmovD_cmpP_reg_reg(regD dst, regD src, mRegP tmp1, mRegP tmp2, cmpOp cop ) %{
9247 match(Set dst (CMoveD (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
9248 ins_cost(200);
9249 format %{
9250 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpP_reg_reg\n"
9251 "\tCMOV $dst, $src \t @cmovD_cmpP_reg_reg"
9252 %}
9254 ins_encode %{
9255 Register op1 = $tmp1$$Register;
9256 Register op2 = $tmp2$$Register;
9257 FloatRegister dst = as_FloatRegister($dst$$reg);
9258 FloatRegister src = as_FloatRegister($src$$reg);
9259 int flag = $cop$$cmpcode;
9260 Label L;
9262 switch(flag)
9263 {
9264 case 0x01: //equal
9265 __ bne(op1, op2, L);
9266 __ nop();
9267 __ mov_d(dst, src);
9268 __ bind(L);
9269 break;
9270 case 0x02: //not_equal
9271 __ beq(op1, op2, L);
9272 __ nop();
9273 __ mov_d(dst, src);
9274 __ bind(L);
9275 break;
9276 case 0x03: //great
9277 __ slt(AT, op2, op1);
9278 __ beq(AT, R0, L);
9279 __ nop();
9280 __ mov_d(dst, src);
9281 __ bind(L);
9282 break;
9283 case 0x04: //great_equal
9284 __ slt(AT, op1, op2);
9285 __ bne(AT, R0, L);
9286 __ nop();
9287 __ mov_d(dst, src);
9288 __ bind(L);
9289 break;
9290 case 0x05: //less
9291 __ slt(AT, op1, op2);
9292 __ beq(AT, R0, L);
9293 __ nop();
9294 __ mov_d(dst, src);
9295 __ bind(L);
9296 break;
9297 case 0x06: //less_equal
9298 __ slt(AT, op2, op1);
9299 __ bne(AT, R0, L);
9300 __ nop();
9301 __ mov_d(dst, src);
9302 __ bind(L);
9303 break;
9304 default:
9305 Unimplemented();
9306 }
9307 %}
9309 ins_pipe( pipe_slow );
9310 %}
9312 //FIXME
9313 instruct cmovI_cmpF_reg_reg(mRegI dst, mRegI src, regF tmp1, regF tmp2, cmpOp cop ) %{
9314 match(Set dst (CMoveI (Binary cop (CmpF tmp1 tmp2)) (Binary dst src)));
9315 ins_cost(80);
9316 format %{
9317 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpF_reg_reg\n"
9318 "\tCMOV $dst,$src \t @cmovI_cmpF_reg_reg"
9319 %}
9321 ins_encode %{
9322 FloatRegister reg_op1 = $tmp1$$FloatRegister;
9323 FloatRegister reg_op2 = $tmp2$$FloatRegister;
9324 Register dst = $dst$$Register;
9325 Register src = $src$$Register;
9326 int flag = $cop$$cmpcode;
9328 switch(flag)
9329 {
9330 case 0x01: //equal
9331 __ c_eq_s(reg_op1, reg_op2);
9332 __ movt(dst, src);
9333 break;
9334 case 0x02: //not_equal
9335 __ c_eq_s(reg_op1, reg_op2);
9336 __ movf(dst, src);
9337 break;
9338 case 0x03: //greater
9339 __ c_ole_s(reg_op1, reg_op2);
9340 __ movf(dst, src);
9341 break;
9342 case 0x04: //greater_equal
9343 __ c_olt_s(reg_op1, reg_op2);
9344 __ movf(dst, src);
9345 break;
9346 case 0x05: //less
9347 __ c_ult_s(reg_op1, reg_op2);
9348 __ movt(dst, src);
9349 break;
9350 case 0x06: //less_equal
9351 __ c_ule_s(reg_op1, reg_op2);
9352 __ movt(dst, src);
9353 break;
9354 default:
9355 Unimplemented();
9356 }
9357 %}
9358 ins_pipe( pipe_slow );
9359 %}
9361 instruct cmovF_cmpF_reg_reg(regF dst, regF src, regF tmp1, regF tmp2, cmpOp cop ) %{
9362 match(Set dst (CMoveF (Binary cop (CmpF tmp1 tmp2)) (Binary dst src)));
9363 ins_cost(200);
9364 format %{
9365 "CMP$cop $tmp1, $tmp2\t @cmovF_cmpF_reg_reg\n"
9366 "\tCMOV $dst,$src \t @cmovF_cmpF_reg_reg"
9367 %}
9369 ins_encode %{
9370 FloatRegister reg_op1 = $tmp1$$FloatRegister;
9371 FloatRegister reg_op2 = $tmp2$$FloatRegister;
9372 FloatRegister dst = $dst$$FloatRegister;
9373 FloatRegister src = $src$$FloatRegister;
9374 Label L;
9375 int flag = $cop$$cmpcode;
9377 switch(flag)
9378 {
9379 case 0x01: //equal
9380 __ c_eq_s(reg_op1, reg_op2);
9381 __ bc1f(L);
9382 __ nop();
9383 __ mov_s(dst, src);
9384 __ bind(L);
9385 break;
9386 case 0x02: //not_equal
9387 __ c_eq_s(reg_op1, reg_op2);
9388 __ bc1t(L);
9389 __ nop();
9390 __ mov_s(dst, src);
9391 __ bind(L);
9392 break;
9393 case 0x03: //greater
9394 __ c_ole_s(reg_op1, reg_op2);
9395 __ bc1t(L);
9396 __ nop();
9397 __ mov_s(dst, src);
9398 __ bind(L);
9399 break;
9400 case 0x04: //greater_equal
9401 __ c_olt_s(reg_op1, reg_op2);
9402 __ bc1t(L);
9403 __ nop();
9404 __ mov_s(dst, src);
9405 __ bind(L);
9406 break;
9407 case 0x05: //less
9408 __ c_ult_s(reg_op1, reg_op2);
9409 __ bc1f(L);
9410 __ nop();
9411 __ mov_s(dst, src);
9412 __ bind(L);
9413 break;
9414 case 0x06: //less_equal
9415 __ c_ule_s(reg_op1, reg_op2);
9416 __ bc1f(L);
9417 __ nop();
9418 __ mov_s(dst, src);
9419 __ bind(L);
9420 break;
9421 default:
9422 Unimplemented();
9423 }
9424 %}
9425 ins_pipe( pipe_slow );
9426 %}
9428 // Manifest a CmpL result in an integer register. Very painful.
9429 // This is the test to avoid.
9430 instruct cmpL3_reg_reg(mRegI dst, mRegL src1, mRegL src2) %{
9431 match(Set dst (CmpL3 src1 src2));
9432 ins_cost(1000);
9433 format %{ "cmpL3 $dst, $src1, $src2 @ cmpL3_reg_reg" %}
9434 ins_encode %{
9435 Register opr1 = as_Register($src1$$reg);
9436 Register opr2 = as_Register($src2$$reg);
9437 Register dst = as_Register($dst$$reg);
9439 Label Done;
9441 __ subu(AT, opr1, opr2);
9442 __ bltz(AT, Done);
9443 __ delayed()->daddiu(dst, R0, -1);
9445 __ move(dst, 1);
9446 __ movz(dst, R0, AT);
9448 __ bind(Done);
9449 %}
9450 ins_pipe( pipe_slow );
9451 %}
9453 //
9454 // less_rsult = -1
9455 // greater_result = 1
9456 // equal_result = 0
9457 // nan_result = -1
9458 //
9459 instruct cmpF3_reg_reg(mRegI dst, regF src1, regF src2) %{
9460 match(Set dst (CmpF3 src1 src2));
9461 ins_cost(1000);
9462 format %{ "cmpF3 $dst, $src1, $src2 @ cmpF3_reg_reg" %}
9463 ins_encode %{
9464 FloatRegister src1 = as_FloatRegister($src1$$reg);
9465 FloatRegister src2 = as_FloatRegister($src2$$reg);
9466 Register dst = as_Register($dst$$reg);
9468 Label Done;
9470 __ c_ult_s(src1, src2);
9471 __ bc1t(Done);
9472 __ delayed()->daddiu(dst, R0, -1);
9474 __ c_eq_s(src1, src2);
9475 __ move(dst, 1);
9476 __ movt(dst, R0);
9478 __ bind(Done);
9479 %}
9480 ins_pipe( pipe_slow );
9481 %}
9483 instruct cmpD3_reg_reg(mRegI dst, regD src1, regD src2) %{
9484 match(Set dst (CmpD3 src1 src2));
9485 ins_cost(1000);
9486 format %{ "cmpD3 $dst, $src1, $src2 @ cmpD3_reg_reg" %}
9487 ins_encode %{
9488 FloatRegister src1 = as_FloatRegister($src1$$reg);
9489 FloatRegister src2 = as_FloatRegister($src2$$reg);
9490 Register dst = as_Register($dst$$reg);
9492 Label Done;
9494 __ c_ult_d(src1, src2);
9495 __ bc1t(Done);
9496 __ delayed()->daddiu(dst, R0, -1);
9498 __ c_eq_d(src1, src2);
9499 __ move(dst, 1);
9500 __ movt(dst, R0);
9502 __ bind(Done);
9503 %}
9504 ins_pipe( pipe_slow );
9505 %}
9507 instruct clear_array(mRegL cnt, mRegP base, Universe dummy) %{
9508 match(Set dummy (ClearArray cnt base));
9509 format %{ "CLEAR_ARRAY base = $base, cnt = $cnt # Clear doublewords" %}
9510 ins_encode %{
9511 //Assume cnt is the number of bytes in an array to be cleared,
9512 //and base points to the starting address of the array.
9513 Register base = $base$$Register;
9514 Register num = $cnt$$Register;
9515 Label Loop, done;
9517 /* 2012/9/21 Jin: according to X86, $cnt is caculated by doublewords(8 bytes) */
9518 __ move(T9, num); /* T9 = words */
9519 __ beq(T9, R0, done);
9520 __ nop();
9521 __ move(AT, base);
9523 __ bind(Loop);
9524 __ sd(R0, Address(AT, 0));
9525 __ daddi(AT, AT, wordSize);
9526 __ daddi(T9, T9, -1);
9527 __ bne(T9, R0, Loop);
9528 __ delayed()->nop();
9529 __ bind(done);
9530 %}
9531 ins_pipe( pipe_slow );
9532 %}
9534 instruct string_compare(a4_RegP str1, mA5RegI cnt1, a6_RegP str2, mA7RegI cnt2, no_Ax_mRegI result) %{
9535 match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
9536 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2);
9538 format %{ "String Compare $str1[len: $cnt1], $str2[len: $cnt2] -> $result @ string_compare" %}
9539 ins_encode %{
9540 // Get the first character position in both strings
9541 // [8] char array, [12] offset, [16] count
9542 Register str1 = $str1$$Register;
9543 Register str2 = $str2$$Register;
9544 Register cnt1 = $cnt1$$Register;
9545 Register cnt2 = $cnt2$$Register;
9546 Register result = $result$$Register;
9548 Label L, Loop, haveResult, done;
9550 // compute the and difference of lengths (in result)
9551 __ subu(result, cnt1, cnt2); // result holds the difference of two lengths
9553 // compute the shorter length (in cnt1)
9554 __ slt(AT, cnt2, cnt1);
9555 __ movn(cnt1, cnt2, AT);
9557 // Now the shorter length is in cnt1 and cnt2 can be used as a tmp register
9558 __ bind(Loop); // Loop begin
9559 __ beq(cnt1, R0, done);
9560 __ delayed()->lhu(AT, str1, 0);;
9562 // compare current character
9563 __ lhu(cnt2, str2, 0);
9564 __ bne(AT, cnt2, haveResult);
9565 __ delayed()->addi(str1, str1, 2);
9566 __ addi(str2, str2, 2);
9567 __ b(Loop);
9568 __ delayed()->addi(cnt1, cnt1, -1); // Loop end
9570 __ bind(haveResult);
9571 __ subu(result, AT, cnt2);
9573 __ bind(done);
9574 %}
9576 ins_pipe( pipe_slow );
9577 %}
9579 // intrinsic optimization
9580 instruct string_equals(a4_RegP str1, a5_RegP str2, mA6RegI cnt, mA7RegI temp, no_Ax_mRegI result) %{
9581 match(Set result (StrEquals (Binary str1 str2) cnt));
9582 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL temp);
9584 format %{ "String Equal $str1, $str2, len:$cnt tmp:$temp -> $result @ string_equals" %}
9585 ins_encode %{
9586 // Get the first character position in both strings
9587 // [8] char array, [12] offset, [16] count
9588 Register str1 = $str1$$Register;
9589 Register str2 = $str2$$Register;
9590 Register cnt = $cnt$$Register;
9591 Register tmp = $temp$$Register;
9592 Register result = $result$$Register;
9594 Label Loop, done;
9597 __ beq(str1, str2, done); // same char[] ?
9598 __ daddiu(result, R0, 1);
9600 __ bind(Loop); // Loop begin
9601 __ beq(cnt, R0, done);
9602 __ daddiu(result, R0, 1); // count == 0
9604 // compare current character
9605 __ lhu(AT, str1, 0);;
9606 __ lhu(tmp, str2, 0);
9607 __ bne(AT, tmp, done);
9608 __ delayed()->daddi(result, R0, 0);
9609 __ addi(str1, str1, 2);
9610 __ addi(str2, str2, 2);
9611 __ b(Loop);
9612 __ delayed()->addi(cnt, cnt, -1); // Loop end
9614 __ bind(done);
9615 %}
9617 ins_pipe( pipe_slow );
9618 %}
9620 //----------Arithmetic Instructions-------------------------------------------
9621 //----------Addition Instructions---------------------------------------------
9622 instruct addI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9623 match(Set dst (AddI src1 src2));
9625 format %{ "add $dst, $src1, $src2 #@addI_Reg_Reg" %}
9626 ins_encode %{
9627 Register dst = $dst$$Register;
9628 Register src1 = $src1$$Register;
9629 Register src2 = $src2$$Register;
9630 __ addu32(dst, src1, src2);
9631 %}
9632 ins_pipe( ialu_regI_regI );
9633 %}
9635 instruct addI_Reg_imm(mRegI dst, mRegI src1, immI src2) %{
9636 match(Set dst (AddI src1 src2));
9638 format %{ "add $dst, $src1, $src2 #@addI_Reg_imm" %}
9639 ins_encode %{
9640 Register dst = $dst$$Register;
9641 Register src1 = $src1$$Register;
9642 int imm = $src2$$constant;
9644 if(Assembler::is_simm16(imm)) {
9645 __ addiu32(dst, src1, imm);
9646 } else {
9647 __ move(AT, imm);
9648 __ addu32(dst, src1, AT);
9649 }
9650 %}
9651 ins_pipe( ialu_regI_regI );
9652 %}
9654 instruct addP_reg_reg(mRegP dst, mRegP src1, mRegL src2) %{
9655 match(Set dst (AddP src1 src2));
9657 format %{ "dadd $dst, $src1, $src2 #@addP_reg_reg" %}
9659 ins_encode %{
9660 Register dst = $dst$$Register;
9661 Register src1 = $src1$$Register;
9662 Register src2 = $src2$$Register;
9663 __ daddu(dst, src1, src2);
9664 %}
9666 ins_pipe( ialu_regI_regI );
9667 %}
9669 instruct addP_reg_reg_convI2L(mRegP dst, mRegP src1, mRegI src2) %{
9670 match(Set dst (AddP src1 (ConvI2L src2)));
9672 format %{ "dadd $dst, $src1, $src2 #@addP_reg_reg_convI2L" %}
9674 ins_encode %{
9675 Register dst = $dst$$Register;
9676 Register src1 = $src1$$Register;
9677 Register src2 = $src2$$Register;
9678 __ daddu(dst, src1, src2);
9679 %}
9681 ins_pipe( ialu_regI_regI );
9682 %}
9684 instruct addP_reg_imm(mRegP dst, mRegP src1, immL src2) %{
9685 match(Set dst (AddP src1 src2));
9687 format %{ "daddi $dst, $src1, $src2 #@addP_reg_imm" %}
9688 ins_encode %{
9689 Register src1 = $src1$$Register;
9690 long src2 = $src2$$constant;
9691 Register dst = $dst$$Register;
9693 if(Assembler::is_simm16(src2)) {
9694 __ daddiu(dst, src1, src2);
9695 } else {
9696 __ set64(AT, src2);
9697 __ daddu(dst, src1, AT);
9698 }
9699 %}
9700 ins_pipe( ialu_regI_imm16 );
9701 %}
9703 // Add Long Register with Register
9704 instruct addL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
9705 match(Set dst (AddL src1 src2));
9706 ins_cost(200);
9707 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_Reg\t" %}
9709 ins_encode %{
9710 Register dst_reg = as_Register($dst$$reg);
9711 Register src1_reg = as_Register($src1$$reg);
9712 Register src2_reg = as_Register($src2$$reg);
9714 __ daddu(dst_reg, src1_reg, src2_reg);
9715 %}
9717 ins_pipe( ialu_regL_regL );
9718 %}
9720 instruct addL_Reg_imm(mRegL dst, mRegL src1, immL16 src2)
9721 %{
9722 match(Set dst (AddL src1 src2));
9724 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_imm " %}
9725 ins_encode %{
9726 Register dst_reg = as_Register($dst$$reg);
9727 Register src1_reg = as_Register($src1$$reg);
9728 int src2_imm = $src2$$constant;
9730 __ daddiu(dst_reg, src1_reg, src2_imm);
9731 %}
9733 ins_pipe( ialu_regL_regL );
9734 %}
9736 instruct addL_RegI2L_imm(mRegL dst, mRegI src1, immL16 src2)
9737 %{
9738 match(Set dst (AddL (ConvI2L src1) src2));
9740 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_imm " %}
9741 ins_encode %{
9742 Register dst_reg = as_Register($dst$$reg);
9743 Register src1_reg = as_Register($src1$$reg);
9744 int src2_imm = $src2$$constant;
9746 __ daddiu(dst_reg, src1_reg, src2_imm);
9747 %}
9749 ins_pipe( ialu_regL_regL );
9750 %}
9752 instruct addL_RegI2L_Reg(mRegL dst, mRegI src1, mRegL src2) %{
9753 match(Set dst (AddL (ConvI2L src1) src2));
9754 ins_cost(200);
9755 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_Reg\t" %}
9757 ins_encode %{
9758 Register dst_reg = as_Register($dst$$reg);
9759 Register src1_reg = as_Register($src1$$reg);
9760 Register src2_reg = as_Register($src2$$reg);
9762 __ daddu(dst_reg, src1_reg, src2_reg);
9763 %}
9765 ins_pipe( ialu_regL_regL );
9766 %}
9768 instruct addL_RegI2L_RegI2L(mRegL dst, mRegI src1, mRegI src2) %{
9769 match(Set dst (AddL (ConvI2L src1) (ConvI2L src2)));
9770 ins_cost(200);
9771 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_RegI2L\t" %}
9773 ins_encode %{
9774 Register dst_reg = as_Register($dst$$reg);
9775 Register src1_reg = as_Register($src1$$reg);
9776 Register src2_reg = as_Register($src2$$reg);
9778 __ daddu(dst_reg, src1_reg, src2_reg);
9779 %}
9781 ins_pipe( ialu_regL_regL );
9782 %}
9784 instruct addL_Reg_RegI2L(mRegL dst, mRegL src1, mRegI src2) %{
9785 match(Set dst (AddL src1 (ConvI2L src2)));
9786 ins_cost(200);
9787 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_RegI2L\t" %}
9789 ins_encode %{
9790 Register dst_reg = as_Register($dst$$reg);
9791 Register src1_reg = as_Register($src1$$reg);
9792 Register src2_reg = as_Register($src2$$reg);
9794 __ daddu(dst_reg, src1_reg, src2_reg);
9795 %}
9797 ins_pipe( ialu_regL_regL );
9798 %}
9800 //----------Subtraction Instructions-------------------------------------------
9801 // Integer Subtraction Instructions
9802 instruct subI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9803 match(Set dst (SubI src1 src2));
9804 ins_cost(100);
9806 format %{ "sub $dst, $src1, $src2 #@subI_Reg_Reg" %}
9807 ins_encode %{
9808 Register dst = $dst$$Register;
9809 Register src1 = $src1$$Register;
9810 Register src2 = $src2$$Register;
9811 __ subu32(dst, src1, src2);
9812 %}
9813 ins_pipe( ialu_regI_regI );
9814 %}
9816 instruct subI_Reg_immI16_sub(mRegI dst, mRegI src1, immI16_sub src2) %{
9817 match(Set dst (SubI src1 src2));
9818 ins_cost(80);
9820 format %{ "sub $dst, $src1, $src2 #@subI_Reg_immI16_sub" %}
9821 ins_encode %{
9822 Register dst = $dst$$Register;
9823 Register src1 = $src1$$Register;
9824 __ addiu32(dst, src1, -1 * $src2$$constant);
9825 %}
9826 ins_pipe( ialu_regI_regI );
9827 %}
9829 instruct negI_Reg(mRegI dst, immI0 zero, mRegI src) %{
9830 match(Set dst (SubI zero src));
9831 ins_cost(80);
9833 format %{ "neg $dst, $src #@negI_Reg" %}
9834 ins_encode %{
9835 Register dst = $dst$$Register;
9836 Register src = $src$$Register;
9837 __ subu32(dst, R0, src);
9838 %}
9839 ins_pipe( ialu_regI_regI );
9840 %}
9842 instruct negL_Reg(mRegL dst, immL0 zero, mRegL src) %{
9843 match(Set dst (SubL zero src));
9844 ins_cost(80);
9846 format %{ "neg $dst, $src #@negL_Reg" %}
9847 ins_encode %{
9848 Register dst = $dst$$Register;
9849 Register src = $src$$Register;
9850 __ subu(dst, R0, src);
9851 %}
9852 ins_pipe( ialu_regI_regI );
9853 %}
9855 instruct subL_Reg_immL16_sub(mRegL dst, mRegL src1, immL16_sub src2) %{
9856 match(Set dst (SubL src1 src2));
9857 ins_cost(80);
9859 format %{ "sub $dst, $src1, $src2 #@subL_Reg_immL16_sub" %}
9860 ins_encode %{
9861 Register dst = $dst$$Register;
9862 Register src1 = $src1$$Register;
9863 __ daddiu(dst, src1, -1 * $src2$$constant);
9864 %}
9865 ins_pipe( ialu_regI_regI );
9866 %}
9868 // Subtract Long Register with Register.
9869 instruct subL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
9870 match(Set dst (SubL src1 src2));
9871 ins_cost(100);
9872 format %{ "SubL $dst, $src1, $src2 @ subL_Reg_Reg" %}
9873 ins_encode %{
9874 Register dst = as_Register($dst$$reg);
9875 Register src1 = as_Register($src1$$reg);
9876 Register src2 = as_Register($src2$$reg);
9878 __ subu(dst, src1, src2);
9879 %}
9880 ins_pipe( ialu_regL_regL );
9881 %}
9883 instruct subL_Reg_RegI2L(mRegL dst, mRegL src1, mRegI src2) %{
9884 match(Set dst (SubL src1 (ConvI2L src2)));
9885 ins_cost(100);
9886 format %{ "SubL $dst, $src1, $src2 @ subL_Reg_RegI2L" %}
9887 ins_encode %{
9888 Register dst = as_Register($dst$$reg);
9889 Register src1 = as_Register($src1$$reg);
9890 Register src2 = as_Register($src2$$reg);
9892 __ subu(dst, src1, src2);
9893 %}
9894 ins_pipe( ialu_regL_regL );
9895 %}
9897 instruct subL_RegI2L_Reg(mRegL dst, mRegI src1, mRegL src2) %{
9898 match(Set dst (SubL (ConvI2L src1) src2));
9899 ins_cost(200);
9900 format %{ "SubL $dst, $src1, $src2 @ subL_RegI2L_Reg" %}
9901 ins_encode %{
9902 Register dst = as_Register($dst$$reg);
9903 Register src1 = as_Register($src1$$reg);
9904 Register src2 = as_Register($src2$$reg);
9906 __ subu(dst, src1, src2);
9907 %}
9908 ins_pipe( ialu_regL_regL );
9909 %}
9911 instruct subL_RegI2L_RegI2L(mRegL dst, mRegI src1, mRegI src2) %{
9912 match(Set dst (SubL (ConvI2L src1) (ConvI2L src2)));
9913 ins_cost(200);
9914 format %{ "SubL $dst, $src1, $src2 @ subL_RegI2L_RegI2L" %}
9915 ins_encode %{
9916 Register dst = as_Register($dst$$reg);
9917 Register src1 = as_Register($src1$$reg);
9918 Register src2 = as_Register($src2$$reg);
9920 __ subu(dst, src1, src2);
9921 %}
9922 ins_pipe( ialu_regL_regL );
9923 %}
9925 // Integer MOD with Register
9926 instruct modI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9927 match(Set dst (ModI src1 src2));
9928 ins_cost(300);
9929 format %{ "modi $dst, $src1, $src2 @ modI_Reg_Reg" %}
9930 ins_encode %{
9931 Register dst = $dst$$Register;
9932 Register src1 = $src1$$Register;
9933 Register src2 = $src2$$Register;
9935 //if (UseLoongsonISA) {
9936 if (0) {
9937 // 2016.08.10
9938 // Experiments show that gsmod is slower that div+mfhi.
9939 // So I just disable it here.
9940 __ gsmod(dst, src1, src2);
9941 } else {
9942 __ div(src1, src2);
9943 __ mfhi(dst);
9944 }
9945 %}
9947 //ins_pipe( ialu_mod );
9948 ins_pipe( ialu_regI_regI );
9949 %}
9951 instruct modL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
9952 match(Set dst (ModL src1 src2));
9953 format %{ "modL $dst, $src1, $src2 @modL_reg_reg" %}
9955 ins_encode %{
9956 Register dst = as_Register($dst$$reg);
9957 Register op1 = as_Register($src1$$reg);
9958 Register op2 = as_Register($src2$$reg);
9960 if (UseLoongsonISA) {
9961 __ gsdmod(dst, op1, op2);
9962 } else {
9963 __ ddiv(op1, op2);
9964 __ mfhi(dst);
9965 }
9966 %}
9967 ins_pipe( pipe_slow );
9968 %}
9970 instruct mulI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9971 match(Set dst (MulI src1 src2));
9973 ins_cost(300);
9974 format %{ "mul $dst, $src1, $src2 @ mulI_Reg_Reg" %}
9975 ins_encode %{
9976 Register src1 = $src1$$Register;
9977 Register src2 = $src2$$Register;
9978 Register dst = $dst$$Register;
9980 __ mul(dst, src1, src2);
9981 %}
9982 ins_pipe( ialu_mult );
9983 %}
9985 instruct maddI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2, mRegI src3) %{
9986 match(Set dst (AddI (MulI src1 src2) src3));
9988 ins_cost(999);
9989 format %{ "madd $dst, $src1 * $src2 + $src3 #@maddI_Reg_Reg" %}
9990 ins_encode %{
9991 Register src1 = $src1$$Register;
9992 Register src2 = $src2$$Register;
9993 Register src3 = $src3$$Register;
9994 Register dst = $dst$$Register;
9996 __ mtlo(src3);
9997 __ madd(src1, src2);
9998 __ mflo(dst);
9999 %}
10000 ins_pipe( ialu_mult );
10001 %}
10003 instruct divI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10004 match(Set dst (DivI src1 src2));
10006 ins_cost(300);
10007 format %{ "div $dst, $src1, $src2 @ divI_Reg_Reg" %}
10008 ins_encode %{
10009 Register src1 = $src1$$Register;
10010 Register src2 = $src2$$Register;
10011 Register dst = $dst$$Register;
10013 /* 2012/4/21 Jin: In MIPS, div does not cause exception.
10014 We must trap an exception manually. */
10015 __ teq(R0, src2, 0x7);
10017 if (UseLoongsonISA) {
10018 __ gsdiv(dst, src1, src2);
10019 } else {
10020 __ div(src1, src2);
10022 __ nop();
10023 __ nop();
10024 __ mflo(dst);
10025 }
10026 %}
10027 ins_pipe( ialu_mod );
10028 %}
10030 instruct divF_Reg_Reg(regF dst, regF src1, regF src2) %{
10031 match(Set dst (DivF src1 src2));
10033 ins_cost(300);
10034 format %{ "divF $dst, $src1, $src2 @ divF_Reg_Reg" %}
10035 ins_encode %{
10036 FloatRegister src1 = $src1$$FloatRegister;
10037 FloatRegister src2 = $src2$$FloatRegister;
10038 FloatRegister dst = $dst$$FloatRegister;
10040 /* Here do we need to trap an exception manually ? */
10041 __ div_s(dst, src1, src2);
10042 %}
10043 ins_pipe( pipe_slow );
10044 %}
10046 instruct divD_Reg_Reg(regD dst, regD src1, regD src2) %{
10047 match(Set dst (DivD src1 src2));
10049 ins_cost(300);
10050 format %{ "divD $dst, $src1, $src2 @ divD_Reg_Reg" %}
10051 ins_encode %{
10052 FloatRegister src1 = $src1$$FloatRegister;
10053 FloatRegister src2 = $src2$$FloatRegister;
10054 FloatRegister dst = $dst$$FloatRegister;
10056 /* Here do we need to trap an exception manually ? */
10057 __ div_d(dst, src1, src2);
10058 %}
10059 ins_pipe( pipe_slow );
10060 %}
10062 instruct mulL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
10063 match(Set dst (MulL src1 src2));
10064 format %{ "mulL $dst, $src1, $src2 @mulL_reg_reg" %}
10065 ins_encode %{
10066 Register dst = as_Register($dst$$reg);
10067 Register op1 = as_Register($src1$$reg);
10068 Register op2 = as_Register($src2$$reg);
10070 if (UseLoongsonISA) {
10071 __ gsdmult(dst, op1, op2);
10072 } else {
10073 __ dmult(op1, op2);
10074 __ mflo(dst);
10075 }
10076 %}
10077 ins_pipe( pipe_slow );
10078 %}
10080 instruct mulL_reg_regI2L(mRegL dst, mRegL src1, mRegI src2) %{
10081 match(Set dst (MulL src1 (ConvI2L src2)));
10082 format %{ "mulL $dst, $src1, $src2 @mulL_reg_regI2L" %}
10083 ins_encode %{
10084 Register dst = as_Register($dst$$reg);
10085 Register op1 = as_Register($src1$$reg);
10086 Register op2 = as_Register($src2$$reg);
10088 if (UseLoongsonISA) {
10089 __ gsdmult(dst, op1, op2);
10090 } else {
10091 __ dmult(op1, op2);
10092 __ mflo(dst);
10093 }
10094 %}
10095 ins_pipe( pipe_slow );
10096 %}
10098 instruct divL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
10099 match(Set dst (DivL src1 src2));
10100 format %{ "divL $dst, $src1, $src2 @divL_reg_reg" %}
10102 ins_encode %{
10103 Register dst = as_Register($dst$$reg);
10104 Register op1 = as_Register($src1$$reg);
10105 Register op2 = as_Register($src2$$reg);
10107 if (UseLoongsonISA) {
10108 __ gsddiv(dst, op1, op2);
10109 } else {
10110 __ ddiv(op1, op2);
10111 __ mflo(dst);
10112 }
10113 %}
10114 ins_pipe( pipe_slow );
10115 %}
10117 instruct addF_reg_reg(regF dst, regF src1, regF src2) %{
10118 match(Set dst (AddF src1 src2));
10119 format %{ "AddF $dst, $src1, $src2 @addF_reg_reg" %}
10120 ins_encode %{
10121 FloatRegister src1 = as_FloatRegister($src1$$reg);
10122 FloatRegister src2 = as_FloatRegister($src2$$reg);
10123 FloatRegister dst = as_FloatRegister($dst$$reg);
10125 __ add_s(dst, src1, src2);
10126 %}
10127 ins_pipe( fpu_regF_regF );
10128 %}
10130 instruct subF_reg_reg(regF dst, regF src1, regF src2) %{
10131 match(Set dst (SubF src1 src2));
10132 format %{ "SubF $dst, $src1, $src2 @subF_reg_reg" %}
10133 ins_encode %{
10134 FloatRegister src1 = as_FloatRegister($src1$$reg);
10135 FloatRegister src2 = as_FloatRegister($src2$$reg);
10136 FloatRegister dst = as_FloatRegister($dst$$reg);
10138 __ sub_s(dst, src1, src2);
10139 %}
10140 ins_pipe( fpu_regF_regF );
10141 %}
10142 instruct addD_reg_reg(regD dst, regD src1, regD src2) %{
10143 match(Set dst (AddD src1 src2));
10144 format %{ "AddD $dst, $src1, $src2 @addD_reg_reg" %}
10145 ins_encode %{
10146 FloatRegister src1 = as_FloatRegister($src1$$reg);
10147 FloatRegister src2 = as_FloatRegister($src2$$reg);
10148 FloatRegister dst = as_FloatRegister($dst$$reg);
10150 __ add_d(dst, src1, src2);
10151 %}
10152 ins_pipe( fpu_regF_regF );
10153 %}
10155 instruct subD_reg_reg(regD dst, regD src1, regD src2) %{
10156 match(Set dst (SubD src1 src2));
10157 format %{ "SubD $dst, $src1, $src2 @subD_reg_reg" %}
10158 ins_encode %{
10159 FloatRegister src1 = as_FloatRegister($src1$$reg);
10160 FloatRegister src2 = as_FloatRegister($src2$$reg);
10161 FloatRegister dst = as_FloatRegister($dst$$reg);
10163 __ sub_d(dst, src1, src2);
10164 %}
10165 ins_pipe( fpu_regF_regF );
10166 %}
10168 instruct negF_reg(regF dst, regF src) %{
10169 match(Set dst (NegF src));
10170 format %{ "negF $dst, $src @negF_reg" %}
10171 ins_encode %{
10172 FloatRegister src = as_FloatRegister($src$$reg);
10173 FloatRegister dst = as_FloatRegister($dst$$reg);
10175 __ neg_s(dst, src);
10176 %}
10177 ins_pipe( fpu_regF_regF );
10178 %}
10180 instruct negD_reg(regD dst, regD src) %{
10181 match(Set dst (NegD src));
10182 format %{ "negD $dst, $src @negD_reg" %}
10183 ins_encode %{
10184 FloatRegister src = as_FloatRegister($src$$reg);
10185 FloatRegister dst = as_FloatRegister($dst$$reg);
10187 __ neg_d(dst, src);
10188 %}
10189 ins_pipe( fpu_regF_regF );
10190 %}
10193 instruct mulF_reg_reg(regF dst, regF src1, regF src2) %{
10194 match(Set dst (MulF src1 src2));
10195 format %{ "MULF $dst, $src1, $src2 @mulF_reg_reg" %}
10196 ins_encode %{
10197 FloatRegister src1 = $src1$$FloatRegister;
10198 FloatRegister src2 = $src2$$FloatRegister;
10199 FloatRegister dst = $dst$$FloatRegister;
10201 __ mul_s(dst, src1, src2);
10202 %}
10203 ins_pipe( fpu_regF_regF );
10204 %}
10206 instruct maddF_reg_reg(regF dst, regF src1, regF src2, regF src3) %{
10207 match(Set dst (AddF (MulF src1 src2) src3));
10208 // For compatibility reason (e.g. on the Loongson platform), disable this guy.
10209 ins_cost(44444);
10210 format %{ "maddF $dst, $src1, $src2, $src3 @maddF_reg_reg" %}
10211 ins_encode %{
10212 FloatRegister src1 = $src1$$FloatRegister;
10213 FloatRegister src2 = $src2$$FloatRegister;
10214 FloatRegister src3 = $src3$$FloatRegister;
10215 FloatRegister dst = $dst$$FloatRegister;
10217 __ madd_s(dst, src1, src2, src3);
10218 %}
10219 ins_pipe( fpu_regF_regF );
10220 %}
10222 // Mul two double precision floating piont number
10223 instruct mulD_reg_reg(regD dst, regD src1, regD src2) %{
10224 match(Set dst (MulD src1 src2));
10225 format %{ "MULD $dst, $src1, $src2 @mulD_reg_reg" %}
10226 ins_encode %{
10227 FloatRegister src1 = $src1$$FloatRegister;
10228 FloatRegister src2 = $src2$$FloatRegister;
10229 FloatRegister dst = $dst$$FloatRegister;
10231 __ mul_d(dst, src1, src2);
10232 %}
10233 ins_pipe( fpu_regF_regF );
10234 %}
10236 instruct maddD_reg_reg(regD dst, regD src1, regD src2, regD src3) %{
10237 match(Set dst (AddD (MulD src1 src2) src3));
10238 // For compatibility reason (e.g. on the Loongson platform), disable this guy.
10239 ins_cost(44444);
10240 format %{ "maddD $dst, $src1, $src2, $src3 @maddD_reg_reg" %}
10241 ins_encode %{
10242 FloatRegister src1 = $src1$$FloatRegister;
10243 FloatRegister src2 = $src2$$FloatRegister;
10244 FloatRegister src3 = $src3$$FloatRegister;
10245 FloatRegister dst = $dst$$FloatRegister;
10247 __ madd_d(dst, src1, src2, src3);
10248 %}
10249 ins_pipe( fpu_regF_regF );
10250 %}
10252 instruct absF_reg(regF dst, regF src) %{
10253 match(Set dst (AbsF src));
10254 ins_cost(100);
10255 format %{ "absF $dst, $src @absF_reg" %}
10256 ins_encode %{
10257 FloatRegister src = as_FloatRegister($src$$reg);
10258 FloatRegister dst = as_FloatRegister($dst$$reg);
10260 __ abs_s(dst, src);
10261 %}
10262 ins_pipe( fpu_regF_regF );
10263 %}
10266 // intrinsics for math_native.
10267 // AbsD SqrtD CosD SinD TanD LogD Log10D
10269 instruct absD_reg(regD dst, regD src) %{
10270 match(Set dst (AbsD src));
10271 ins_cost(100);
10272 format %{ "absD $dst, $src @absD_reg" %}
10273 ins_encode %{
10274 FloatRegister src = as_FloatRegister($src$$reg);
10275 FloatRegister dst = as_FloatRegister($dst$$reg);
10277 __ abs_d(dst, src);
10278 %}
10279 ins_pipe( fpu_regF_regF );
10280 %}
10282 instruct sqrtD_reg(regD dst, regD src) %{
10283 match(Set dst (SqrtD src));
10284 ins_cost(100);
10285 format %{ "SqrtD $dst, $src @sqrtD_reg" %}
10286 ins_encode %{
10287 FloatRegister src = as_FloatRegister($src$$reg);
10288 FloatRegister dst = as_FloatRegister($dst$$reg);
10290 __ sqrt_d(dst, src);
10291 %}
10292 ins_pipe( fpu_regF_regF );
10293 %}
10295 instruct sqrtF_reg(regF dst, regF src) %{
10296 match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
10297 ins_cost(100);
10298 format %{ "SqrtF $dst, $src @sqrtF_reg" %}
10299 ins_encode %{
10300 FloatRegister src = as_FloatRegister($src$$reg);
10301 FloatRegister dst = as_FloatRegister($dst$$reg);
10303 __ sqrt_s(dst, src);
10304 %}
10305 ins_pipe( fpu_regF_regF );
10306 %}
10307 //----------------------------------Logical Instructions----------------------
10308 //__________________________________Integer Logical Instructions-------------
10310 //And Instuctions
10311 // And Register with Immediate
10312 instruct andI_Reg_immI(mRegI dst, mRegI src1, immI src2) %{
10313 match(Set dst (AndI src1 src2));
10315 format %{ "and $dst, $src1, $src2 #@andI_Reg_immI" %}
10316 ins_encode %{
10317 Register dst = $dst$$Register;
10318 Register src = $src1$$Register;
10319 int val = $src2$$constant;
10321 __ move(AT, val);
10322 __ andr(dst, src, AT);
10323 %}
10324 ins_pipe( ialu_regI_regI );
10325 %}
10327 instruct andI_Reg_imm_0_65535(mRegI dst, mRegI src1, immI_0_65535 src2) %{
10328 match(Set dst (AndI src1 src2));
10329 ins_cost(60);
10331 format %{ "and $dst, $src1, $src2 #@andI_Reg_imm_0_65535" %}
10332 ins_encode %{
10333 Register dst = $dst$$Register;
10334 Register src = $src1$$Register;
10335 int val = $src2$$constant;
10337 __ andi(dst, src, val);
10338 %}
10339 ins_pipe( ialu_regI_regI );
10340 %}
10342 instruct andI_Reg_immI_nonneg_mask(mRegI dst, mRegI src1, immI_nonneg_mask mask) %{
10343 match(Set dst (AndI src1 mask));
10344 ins_cost(60);
10346 format %{ "and $dst, $src1, $mask #@andI_Reg_immI_nonneg_mask" %}
10347 ins_encode %{
10348 Register dst = $dst$$Register;
10349 Register src = $src1$$Register;
10350 int size = Assembler::is_int_mask($mask$$constant);
10352 __ ext(dst, src, 0, size);
10353 %}
10354 ins_pipe( ialu_regI_regI );
10355 %}
10357 instruct andL_Reg_immL_nonneg_mask(mRegL dst, mRegL src1, immL_nonneg_mask mask) %{
10358 match(Set dst (AndL src1 mask));
10359 ins_cost(60);
10361 format %{ "and $dst, $src1, $mask #@andL_Reg_immL_nonneg_mask" %}
10362 ins_encode %{
10363 Register dst = $dst$$Register;
10364 Register src = $src1$$Register;
10365 int size = Assembler::is_jlong_mask($mask$$constant);
10367 __ dext(dst, src, 0, size);
10368 %}
10369 ins_pipe( ialu_regI_regI );
10370 %}
10372 instruct xorI_Reg_imm_0_65535(mRegI dst, mRegI src1, immI_0_65535 src2) %{
10373 match(Set dst (XorI src1 src2));
10374 ins_cost(60);
10376 format %{ "xori $dst, $src1, $src2 #@xorI_Reg_imm_0_65535" %}
10377 ins_encode %{
10378 Register dst = $dst$$Register;
10379 Register src = $src1$$Register;
10380 int val = $src2$$constant;
10382 __ xori(dst, src, val);
10383 %}
10384 ins_pipe( ialu_regI_regI );
10385 %}
10387 instruct xorI_Reg_immI_M1(mRegI dst, mRegI src1, immI_M1 M1) %{
10388 match(Set dst (XorI src1 M1));
10389 predicate(UseLoongsonISA && Use3A2000);
10390 ins_cost(60);
10392 format %{ "xor $dst, $src1, $M1 #@xorI_Reg_immI_M1" %}
10393 ins_encode %{
10394 Register dst = $dst$$Register;
10395 Register src = $src1$$Register;
10397 __ gsorn(dst, R0, src);
10398 %}
10399 ins_pipe( ialu_regI_regI );
10400 %}
10402 instruct xorL2I_Reg_immI_M1(mRegI dst, mRegL src1, immI_M1 M1) %{
10403 match(Set dst (XorI (ConvL2I src1) M1));
10404 predicate(UseLoongsonISA && Use3A2000);
10405 ins_cost(60);
10407 format %{ "xor $dst, $src1, $M1 #@xorL2I_Reg_immI_M1" %}
10408 ins_encode %{
10409 Register dst = $dst$$Register;
10410 Register src = $src1$$Register;
10412 __ gsorn(dst, R0, src);
10413 %}
10414 ins_pipe( ialu_regI_regI );
10415 %}
10417 instruct xorL_Reg_imm_0_65535(mRegL dst, mRegL src1, immL_0_65535 src2) %{
10418 match(Set dst (XorL src1 src2));
10419 ins_cost(60);
10421 format %{ "xori $dst, $src1, $src2 #@xorL_Reg_imm_0_65535" %}
10422 ins_encode %{
10423 Register dst = $dst$$Register;
10424 Register src = $src1$$Register;
10425 int val = $src2$$constant;
10427 __ xori(dst, src, val);
10428 %}
10429 ins_pipe( ialu_regI_regI );
10430 %}
10432 /*
10433 instruct xorL_Reg_immL_M1(mRegL dst, mRegL src1, immL_M1 M1) %{
10434 match(Set dst (XorL src1 M1));
10435 predicate(UseLoongsonISA);
10436 ins_cost(60);
10438 format %{ "xor $dst, $src1, $M1 #@xorL_Reg_immL_M1" %}
10439 ins_encode %{
10440 Register dst = $dst$$Register;
10441 Register src = $src1$$Register;
10443 __ gsorn(dst, R0, src);
10444 %}
10445 ins_pipe( ialu_regI_regI );
10446 %}
10447 */
10449 instruct lbu_and_lmask(mRegI dst, memory mem, immI_255 mask) %{
10450 match(Set dst (AndI mask (LoadB mem)));
10451 ins_cost(60);
10453 format %{ "lhu $dst, $mem #@lbu_and_lmask" %}
10454 ins_encode(load_UB_enc(dst, mem));
10455 ins_pipe( ialu_loadI );
10456 %}
10458 instruct lbu_and_rmask(mRegI dst, memory mem, immI_255 mask) %{
10459 match(Set dst (AndI (LoadB mem) mask));
10460 ins_cost(60);
10462 format %{ "lhu $dst, $mem #@lbu_and_rmask" %}
10463 ins_encode(load_UB_enc(dst, mem));
10464 ins_pipe( ialu_loadI );
10465 %}
10467 instruct andI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10468 match(Set dst (AndI src1 src2));
10470 format %{ "and $dst, $src1, $src2 #@andI_Reg_Reg" %}
10471 ins_encode %{
10472 Register dst = $dst$$Register;
10473 Register src1 = $src1$$Register;
10474 Register src2 = $src2$$Register;
10475 __ andr(dst, src1, src2);
10476 %}
10477 ins_pipe( ialu_regI_regI );
10478 %}
10480 instruct andnI_Reg_nReg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10481 match(Set dst (AndI src1 (XorI src2 M1)));
10482 predicate(UseLoongsonISA && Use3A2000);
10484 format %{ "andn $dst, $src1, $src2 #@andnI_Reg_nReg" %}
10485 ins_encode %{
10486 Register dst = $dst$$Register;
10487 Register src1 = $src1$$Register;
10488 Register src2 = $src2$$Register;
10490 __ gsandn(dst, src1, src2);
10491 %}
10492 ins_pipe( ialu_regI_regI );
10493 %}
10495 instruct ornI_Reg_nReg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10496 match(Set dst (OrI src1 (XorI src2 M1)));
10497 predicate(UseLoongsonISA && Use3A2000);
10499 format %{ "orn $dst, $src1, $src2 #@ornI_Reg_nReg" %}
10500 ins_encode %{
10501 Register dst = $dst$$Register;
10502 Register src1 = $src1$$Register;
10503 Register src2 = $src2$$Register;
10505 __ gsorn(dst, src1, src2);
10506 %}
10507 ins_pipe( ialu_regI_regI );
10508 %}
10510 instruct andnI_nReg_Reg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10511 match(Set dst (AndI (XorI src1 M1) src2));
10512 predicate(UseLoongsonISA && Use3A2000);
10514 format %{ "andn $dst, $src2, $src1 #@andnI_nReg_Reg" %}
10515 ins_encode %{
10516 Register dst = $dst$$Register;
10517 Register src1 = $src1$$Register;
10518 Register src2 = $src2$$Register;
10520 __ gsandn(dst, src2, src1);
10521 %}
10522 ins_pipe( ialu_regI_regI );
10523 %}
10525 instruct ornI_nReg_Reg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10526 match(Set dst (OrI (XorI src1 M1) src2));
10527 predicate(UseLoongsonISA && Use3A2000);
10529 format %{ "orn $dst, $src2, $src1 #@ornI_nReg_Reg" %}
10530 ins_encode %{
10531 Register dst = $dst$$Register;
10532 Register src1 = $src1$$Register;
10533 Register src2 = $src2$$Register;
10535 __ gsorn(dst, src2, src1);
10536 %}
10537 ins_pipe( ialu_regI_regI );
10538 %}
10540 // And Long Register with Register
10541 instruct andL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10542 match(Set dst (AndL src1 src2));
10543 format %{ "AND $dst, $src1, $src2 @ andL_Reg_Reg\n\t" %}
10544 ins_encode %{
10545 Register dst_reg = as_Register($dst$$reg);
10546 Register src1_reg = as_Register($src1$$reg);
10547 Register src2_reg = as_Register($src2$$reg);
10549 __ andr(dst_reg, src1_reg, src2_reg);
10550 %}
10551 ins_pipe( ialu_regL_regL );
10552 %}
10554 instruct andL_Reg_Reg_convI2L(mRegL dst, mRegL src1, mRegI src2) %{
10555 match(Set dst (AndL src1 (ConvI2L src2)));
10556 format %{ "AND $dst, $src1, $src2 @ andL_Reg_Reg_convI2L\n\t" %}
10557 ins_encode %{
10558 Register dst_reg = as_Register($dst$$reg);
10559 Register src1_reg = as_Register($src1$$reg);
10560 Register src2_reg = as_Register($src2$$reg);
10562 __ andr(dst_reg, src1_reg, src2_reg);
10563 %}
10564 ins_pipe( ialu_regL_regL );
10565 %}
10567 instruct andL_Reg_imm_0_65535(mRegL dst, mRegL src1, immL_0_65535 src2) %{
10568 match(Set dst (AndL src1 src2));
10569 ins_cost(60);
10571 format %{ "and $dst, $src1, $src2 #@andL_Reg_imm_0_65535" %}
10572 ins_encode %{
10573 Register dst = $dst$$Register;
10574 Register src = $src1$$Register;
10575 long val = $src2$$constant;
10577 __ andi(dst, src, val);
10578 %}
10579 ins_pipe( ialu_regI_regI );
10580 %}
10582 instruct andL2I_Reg_imm_0_65535(mRegI dst, mRegL src1, immL_0_65535 src2) %{
10583 match(Set dst (ConvL2I (AndL src1 src2)));
10584 ins_cost(60);
10586 format %{ "and $dst, $src1, $src2 #@andL2I_Reg_imm_0_65535" %}
10587 ins_encode %{
10588 Register dst = $dst$$Register;
10589 Register src = $src1$$Register;
10590 long val = $src2$$constant;
10592 __ andi(dst, src, val);
10593 %}
10594 ins_pipe( ialu_regI_regI );
10595 %}
10597 /*
10598 instruct andnL_Reg_nReg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10599 match(Set dst (AndL src1 (XorL src2 M1)));
10600 predicate(UseLoongsonISA);
10602 format %{ "andn $dst, $src1, $src2 #@andnL_Reg_nReg" %}
10603 ins_encode %{
10604 Register dst = $dst$$Register;
10605 Register src1 = $src1$$Register;
10606 Register src2 = $src2$$Register;
10608 __ gsandn(dst, src1, src2);
10609 %}
10610 ins_pipe( ialu_regI_regI );
10611 %}
10612 */
10614 /*
10615 instruct ornL_Reg_nReg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10616 match(Set dst (OrL src1 (XorL src2 M1)));
10617 predicate(UseLoongsonISA);
10619 format %{ "orn $dst, $src1, $src2 #@ornL_Reg_nReg" %}
10620 ins_encode %{
10621 Register dst = $dst$$Register;
10622 Register src1 = $src1$$Register;
10623 Register src2 = $src2$$Register;
10625 __ gsorn(dst, src1, src2);
10626 %}
10627 ins_pipe( ialu_regI_regI );
10628 %}
10629 */
10631 /*
10632 instruct andnL_nReg_Reg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10633 match(Set dst (AndL (XorL src1 M1) src2));
10634 predicate(UseLoongsonISA);
10636 format %{ "andn $dst, $src2, $src1 #@andnL_nReg_Reg" %}
10637 ins_encode %{
10638 Register dst = $dst$$Register;
10639 Register src1 = $src1$$Register;
10640 Register src2 = $src2$$Register;
10642 __ gsandn(dst, src2, src1);
10643 %}
10644 ins_pipe( ialu_regI_regI );
10645 %}
10646 */
10648 /*
10649 instruct ornL_nReg_Reg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10650 match(Set dst (OrL (XorL src1 M1) src2));
10651 predicate(UseLoongsonISA);
10653 format %{ "orn $dst, $src2, $src1 #@ornL_nReg_Reg" %}
10654 ins_encode %{
10655 Register dst = $dst$$Register;
10656 Register src1 = $src1$$Register;
10657 Register src2 = $src2$$Register;
10659 __ gsorn(dst, src2, src1);
10660 %}
10661 ins_pipe( ialu_regI_regI );
10662 %}
10663 */
10665 instruct andL_Reg_immL_M8(mRegL dst, immL_M8 M8) %{
10666 match(Set dst (AndL dst M8));
10667 ins_cost(60);
10669 format %{ "and $dst, $dst, $M8 #@andL_Reg_immL_M8" %}
10670 ins_encode %{
10671 Register dst = $dst$$Register;
10673 __ dins(dst, R0, 0, 3);
10674 %}
10675 ins_pipe( ialu_regI_regI );
10676 %}
10678 instruct andL_Reg_immL_M5(mRegL dst, immL_M5 M5) %{
10679 match(Set dst (AndL dst M5));
10680 ins_cost(60);
10682 format %{ "and $dst, $dst, $M5 #@andL_Reg_immL_M5" %}
10683 ins_encode %{
10684 Register dst = $dst$$Register;
10686 __ dins(dst, R0, 2, 1);
10687 %}
10688 ins_pipe( ialu_regI_regI );
10689 %}
10691 instruct andL_Reg_immL_M7(mRegL dst, immL_M7 M7) %{
10692 match(Set dst (AndL dst M7));
10693 ins_cost(60);
10695 format %{ "and $dst, $dst, $M7 #@andL_Reg_immL_M7" %}
10696 ins_encode %{
10697 Register dst = $dst$$Register;
10699 __ dins(dst, R0, 1, 2);
10700 %}
10701 ins_pipe( ialu_regI_regI );
10702 %}
10704 instruct andL_Reg_immL_M4(mRegL dst, immL_M4 M4) %{
10705 match(Set dst (AndL dst M4));
10706 ins_cost(60);
10708 format %{ "and $dst, $dst, $M4 #@andL_Reg_immL_M4" %}
10709 ins_encode %{
10710 Register dst = $dst$$Register;
10712 __ dins(dst, R0, 0, 2);
10713 %}
10714 ins_pipe( ialu_regI_regI );
10715 %}
10717 instruct andL_Reg_immL_M121(mRegL dst, immL_M121 M121) %{
10718 match(Set dst (AndL dst M121));
10719 ins_cost(60);
10721 format %{ "and $dst, $dst, $M121 #@andL_Reg_immL_M121" %}
10722 ins_encode %{
10723 Register dst = $dst$$Register;
10725 __ dins(dst, R0, 3, 4);
10726 %}
10727 ins_pipe( ialu_regI_regI );
10728 %}
10730 // Or Long Register with Register
10731 instruct orL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10732 match(Set dst (OrL src1 src2));
10733 format %{ "OR $dst, $src1, $src2 @ orL_Reg_Reg\t" %}
10734 ins_encode %{
10735 Register dst_reg = $dst$$Register;
10736 Register src1_reg = $src1$$Register;
10737 Register src2_reg = $src2$$Register;
10739 __ orr(dst_reg, src1_reg, src2_reg);
10740 %}
10741 ins_pipe( ialu_regL_regL );
10742 %}
10744 instruct orL_Reg_P2XReg(mRegL dst, mRegP src1, mRegL src2) %{
10745 match(Set dst (OrL (CastP2X src1) src2));
10746 format %{ "OR $dst, $src1, $src2 @ orL_Reg_P2XReg\t" %}
10747 ins_encode %{
10748 Register dst_reg = $dst$$Register;
10749 Register src1_reg = $src1$$Register;
10750 Register src2_reg = $src2$$Register;
10752 __ orr(dst_reg, src1_reg, src2_reg);
10753 %}
10754 ins_pipe( ialu_regL_regL );
10755 %}
10757 // Xor Long Register with Register
10758 instruct xorL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10759 match(Set dst (XorL src1 src2));
10760 format %{ "XOR $dst, $src1, $src2 @ xorL_Reg_Reg\t" %}
10761 ins_encode %{
10762 Register dst_reg = as_Register($dst$$reg);
10763 Register src1_reg = as_Register($src1$$reg);
10764 Register src2_reg = as_Register($src2$$reg);
10766 __ xorr(dst_reg, src1_reg, src2_reg);
10767 %}
10768 ins_pipe( ialu_regL_regL );
10769 %}
10771 // Shift Left by 8-bit immediate
10772 instruct salI_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
10773 match(Set dst (LShiftI src shift));
10775 format %{ "SHL $dst, $src, $shift #@salI_Reg_imm" %}
10776 ins_encode %{
10777 Register src = $src$$Register;
10778 Register dst = $dst$$Register;
10779 int shamt = $shift$$constant;
10781 __ sll(dst, src, shamt);
10782 %}
10783 ins_pipe( ialu_regI_regI );
10784 %}
10786 instruct salL2I_Reg_imm(mRegI dst, mRegL src, immI8 shift) %{
10787 match(Set dst (LShiftI (ConvL2I src) shift));
10789 format %{ "SHL $dst, $src, $shift #@salL2I_Reg_imm" %}
10790 ins_encode %{
10791 Register src = $src$$Register;
10792 Register dst = $dst$$Register;
10793 int shamt = $shift$$constant;
10795 __ sll(dst, src, shamt);
10796 %}
10797 ins_pipe( ialu_regI_regI );
10798 %}
10800 instruct salI_Reg_imm_and_M65536(mRegI dst, mRegI src, immI_16 shift, immI_M65536 mask) %{
10801 match(Set dst (AndI (LShiftI src shift) mask));
10803 format %{ "SHL $dst, $src, $shift #@salI_Reg_imm_and_M65536" %}
10804 ins_encode %{
10805 Register src = $src$$Register;
10806 Register dst = $dst$$Register;
10808 __ sll(dst, src, 16);
10809 %}
10810 ins_pipe( ialu_regI_regI );
10811 %}
10813 instruct land7_2_s(mRegI dst, mRegL src, immL7 seven, immI_16 sixteen)
10814 %{
10815 match(Set dst (RShiftI (LShiftI (ConvL2I (AndL src seven)) sixteen) sixteen));
10817 format %{ "andi $dst, $src, 7\t# @land7_2_s" %}
10818 ins_encode %{
10819 Register src = $src$$Register;
10820 Register dst = $dst$$Register;
10822 __ andi(dst, src, 7);
10823 %}
10824 ins_pipe(ialu_regI_regI);
10825 %}
10827 instruct ori2s(mRegI dst, mRegI src1, immI_0_32767 src2, immI_16 sixteen)
10828 %{
10829 match(Set dst (RShiftI (LShiftI (OrI src1 src2) sixteen) sixteen));
10831 format %{ "ori $dst, $src1, $src2\t# @ori2s" %}
10832 ins_encode %{
10833 Register src = $src1$$Register;
10834 int val = $src2$$constant;
10835 Register dst = $dst$$Register;
10837 __ ori(dst, src, val);
10838 %}
10839 ins_pipe(ialu_regI_regI);
10840 %}
10842 // Logical Shift Right by 16, followed by Arithmetic Shift Left by 16.
10843 // This idiom is used by the compiler the i2s bytecode.
10844 instruct i2s(mRegI dst, mRegI src, immI_16 sixteen)
10845 %{
10846 match(Set dst (RShiftI (LShiftI src sixteen) sixteen));
10848 format %{ "i2s $dst, $src\t# @i2s" %}
10849 ins_encode %{
10850 Register src = $src$$Register;
10851 Register dst = $dst$$Register;
10853 __ seh(dst, src);
10854 %}
10855 ins_pipe(ialu_regI_regI);
10856 %}
10858 // Logical Shift Right by 24, followed by Arithmetic Shift Left by 24.
10859 // This idiom is used by the compiler for the i2b bytecode.
10860 instruct i2b(mRegI dst, mRegI src, immI_24 twentyfour)
10861 %{
10862 match(Set dst (RShiftI (LShiftI src twentyfour) twentyfour));
10864 format %{ "i2b $dst, $src\t# @i2b" %}
10865 ins_encode %{
10866 Register src = $src$$Register;
10867 Register dst = $dst$$Register;
10869 __ seb(dst, src);
10870 %}
10871 ins_pipe(ialu_regI_regI);
10872 %}
10875 instruct salI_RegL2I_imm(mRegI dst, mRegL src, immI8 shift) %{
10876 match(Set dst (LShiftI (ConvL2I src) shift));
10878 format %{ "SHL $dst, $src, $shift #@salI_RegL2I_imm" %}
10879 ins_encode %{
10880 Register src = $src$$Register;
10881 Register dst = $dst$$Register;
10882 int shamt = $shift$$constant;
10884 __ sll(dst, src, shamt);
10885 %}
10886 ins_pipe( ialu_regI_regI );
10887 %}
10889 // Shift Left by 8-bit immediate
10890 instruct salI_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
10891 match(Set dst (LShiftI src shift));
10893 format %{ "SHL $dst, $src, $shift #@salI_Reg_Reg" %}
10894 ins_encode %{
10895 Register src = $src$$Register;
10896 Register dst = $dst$$Register;
10897 Register shamt = $shift$$Register;
10898 __ sllv(dst, src, shamt);
10899 %}
10900 ins_pipe( ialu_regI_regI );
10901 %}
10904 // Shift Left Long
10905 instruct salL_Reg_imm(mRegL dst, mRegL src, immI8 shift) %{
10906 //predicate(UseNewLongLShift);
10907 match(Set dst (LShiftL src shift));
10908 ins_cost(100);
10909 format %{ "salL $dst, $src, $shift @ salL_Reg_imm" %}
10910 ins_encode %{
10911 Register src_reg = as_Register($src$$reg);
10912 Register dst_reg = as_Register($dst$$reg);
10913 int shamt = $shift$$constant;
10915 if (__ is_simm(shamt, 5))
10916 __ dsll(dst_reg, src_reg, shamt);
10917 else
10918 {
10919 int sa = Assembler::low(shamt, 6);
10920 if (sa < 32) {
10921 __ dsll(dst_reg, src_reg, sa);
10922 } else {
10923 __ dsll32(dst_reg, src_reg, sa - 32);
10924 }
10925 }
10926 %}
10927 ins_pipe( ialu_regL_regL );
10928 %}
10930 instruct salL_RegI2L_imm(mRegL dst, mRegI src, immI8 shift) %{
10931 //predicate(UseNewLongLShift);
10932 match(Set dst (LShiftL (ConvI2L src) shift));
10933 ins_cost(100);
10934 format %{ "salL $dst, $src, $shift @ salL_RegI2L_imm" %}
10935 ins_encode %{
10936 Register src_reg = as_Register($src$$reg);
10937 Register dst_reg = as_Register($dst$$reg);
10938 int shamt = $shift$$constant;
10940 if (__ is_simm(shamt, 5))
10941 __ dsll(dst_reg, src_reg, shamt);
10942 else
10943 {
10944 int sa = Assembler::low(shamt, 6);
10945 if (sa < 32) {
10946 __ dsll(dst_reg, src_reg, sa);
10947 } else {
10948 __ dsll32(dst_reg, src_reg, sa - 32);
10949 }
10950 }
10951 %}
10952 ins_pipe( ialu_regL_regL );
10953 %}
10955 // Shift Left Long
10956 instruct salL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
10957 //predicate(UseNewLongLShift);
10958 match(Set dst (LShiftL src shift));
10959 ins_cost(100);
10960 format %{ "salL $dst, $src, $shift @ salL_Reg_Reg" %}
10961 ins_encode %{
10962 Register src_reg = as_Register($src$$reg);
10963 Register dst_reg = as_Register($dst$$reg);
10965 __ dsllv(dst_reg, src_reg, $shift$$Register);
10966 %}
10967 ins_pipe( ialu_regL_regL );
10968 %}
10970 instruct salL_convI2L_Reg_imm(mRegL dst, mRegI src, immI8 shift) %{
10971 match(Set dst (LShiftL (ConvI2L src) shift));
10972 ins_cost(100);
10973 format %{ "salL $dst, $src, $shift @ salL_convI2L_Reg_imm" %}
10974 ins_encode %{
10975 Register src_reg = as_Register($src$$reg);
10976 Register dst_reg = as_Register($dst$$reg);
10977 int shamt = $shift$$constant;
10979 if (__ is_simm(shamt, 5)) {
10980 __ dsll(dst_reg, src_reg, shamt);
10981 } else {
10982 int sa = Assembler::low(shamt, 6);
10983 if (sa < 32) {
10984 __ dsll(dst_reg, src_reg, sa);
10985 } else {
10986 __ dsll32(dst_reg, src_reg, sa - 32);
10987 }
10988 }
10989 %}
10990 ins_pipe( ialu_regL_regL );
10991 %}
10993 // Shift Right Long
10994 instruct sarL_Reg_imm(mRegL dst, mRegL src, immI8 shift) %{
10995 match(Set dst (RShiftL src shift));
10996 ins_cost(100);
10997 format %{ "sarL $dst, $src, $shift @ sarL_Reg_imm" %}
10998 ins_encode %{
10999 Register src_reg = as_Register($src$$reg);
11000 Register dst_reg = as_Register($dst$$reg);
11001 int shamt = ($shift$$constant & 0x3f);
11002 if (__ is_simm(shamt, 5))
11003 __ dsra(dst_reg, src_reg, shamt);
11004 else {
11005 int sa = Assembler::low(shamt, 6);
11006 if (sa < 32) {
11007 __ dsra(dst_reg, src_reg, sa);
11008 } else {
11009 __ dsra32(dst_reg, src_reg, sa - 32);
11010 }
11011 }
11012 %}
11013 ins_pipe( ialu_regL_regL );
11014 %}
11016 instruct sarL2I_Reg_immI_32_63(mRegI dst, mRegL src, immI_32_63 shift) %{
11017 match(Set dst (ConvL2I (RShiftL src shift)));
11018 ins_cost(100);
11019 format %{ "sarL $dst, $src, $shift @ sarL2I_Reg_immI_32_63" %}
11020 ins_encode %{
11021 Register src_reg = as_Register($src$$reg);
11022 Register dst_reg = as_Register($dst$$reg);
11023 int shamt = $shift$$constant;
11025 __ dsra32(dst_reg, src_reg, shamt - 32);
11026 %}
11027 ins_pipe( ialu_regL_regL );
11028 %}
11030 // Shift Right Long arithmetically
11031 instruct sarL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
11032 //predicate(UseNewLongLShift);
11033 match(Set dst (RShiftL src shift));
11034 ins_cost(100);
11035 format %{ "sarL $dst, $src, $shift @ sarL_Reg_Reg" %}
11036 ins_encode %{
11037 Register src_reg = as_Register($src$$reg);
11038 Register dst_reg = as_Register($dst$$reg);
11040 __ dsrav(dst_reg, src_reg, $shift$$Register);
11041 %}
11042 ins_pipe( ialu_regL_regL );
11043 %}
11045 // Shift Right Long logically
11046 instruct slrL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
11047 match(Set dst (URShiftL src shift));
11048 ins_cost(100);
11049 format %{ "slrL $dst, $src, $shift @ slrL_Reg_Reg" %}
11050 ins_encode %{
11051 Register src_reg = as_Register($src$$reg);
11052 Register dst_reg = as_Register($dst$$reg);
11054 __ dsrlv(dst_reg, src_reg, $shift$$Register);
11055 %}
11056 ins_pipe( ialu_regL_regL );
11057 %}
11059 instruct slrL_Reg_immI_0_31(mRegL dst, mRegL src, immI_0_31 shift) %{
11060 match(Set dst (URShiftL src shift));
11061 ins_cost(80);
11062 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_0_31" %}
11063 ins_encode %{
11064 Register src_reg = as_Register($src$$reg);
11065 Register dst_reg = as_Register($dst$$reg);
11066 int shamt = $shift$$constant;
11068 __ dsrl(dst_reg, src_reg, shamt);
11069 %}
11070 ins_pipe( ialu_regL_regL );
11071 %}
11073 instruct slrL_Reg_immI_0_31_and_max_int(mRegI dst, mRegL src, immI_0_31 shift, immI_MaxI max_int) %{
11074 match(Set dst (AndI (ConvL2I (URShiftL src shift)) max_int));
11075 ins_cost(80);
11076 format %{ "dext $dst, $src, $shift, 31 @ slrL_Reg_immI_0_31_and_max_int" %}
11077 ins_encode %{
11078 Register src_reg = as_Register($src$$reg);
11079 Register dst_reg = as_Register($dst$$reg);
11080 int shamt = $shift$$constant;
11082 __ dext(dst_reg, src_reg, shamt, 31);
11083 %}
11084 ins_pipe( ialu_regL_regL );
11085 %}
11087 instruct slrL_P2XReg_immI_0_31(mRegL dst, mRegP src, immI_0_31 shift) %{
11088 match(Set dst (URShiftL (CastP2X src) shift));
11089 ins_cost(80);
11090 format %{ "slrL $dst, $src, $shift @ slrL_P2XReg_immI_0_31" %}
11091 ins_encode %{
11092 Register src_reg = as_Register($src$$reg);
11093 Register dst_reg = as_Register($dst$$reg);
11094 int shamt = $shift$$constant;
11096 __ dsrl(dst_reg, src_reg, shamt);
11097 %}
11098 ins_pipe( ialu_regL_regL );
11099 %}
11101 instruct slrL_Reg_immI_32_63(mRegL dst, mRegL src, immI_32_63 shift) %{
11102 match(Set dst (URShiftL src shift));
11103 ins_cost(80);
11104 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_32_63" %}
11105 ins_encode %{
11106 Register src_reg = as_Register($src$$reg);
11107 Register dst_reg = as_Register($dst$$reg);
11108 int shamt = $shift$$constant;
11110 __ dsrl32(dst_reg, src_reg, shamt - 32);
11111 %}
11112 ins_pipe( ialu_regL_regL );
11113 %}
11115 instruct slrL_Reg_immI_convL2I(mRegI dst, mRegL src, immI_32_63 shift) %{
11116 match(Set dst (ConvL2I (URShiftL src shift)));
11117 predicate(n->in(1)->in(2)->get_int() > 32);
11118 ins_cost(80);
11119 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_convL2I" %}
11120 ins_encode %{
11121 Register src_reg = as_Register($src$$reg);
11122 Register dst_reg = as_Register($dst$$reg);
11123 int shamt = $shift$$constant;
11125 __ dsrl32(dst_reg, src_reg, shamt - 32);
11126 %}
11127 ins_pipe( ialu_regL_regL );
11128 %}
11130 instruct slrL_P2XReg_immI_32_63(mRegL dst, mRegP src, immI_32_63 shift) %{
11131 match(Set dst (URShiftL (CastP2X src) shift));
11132 ins_cost(80);
11133 format %{ "slrL $dst, $src, $shift @ slrL_P2XReg_immI_32_63" %}
11134 ins_encode %{
11135 Register src_reg = as_Register($src$$reg);
11136 Register dst_reg = as_Register($dst$$reg);
11137 int shamt = $shift$$constant;
11139 __ dsrl32(dst_reg, src_reg, shamt - 32);
11140 %}
11141 ins_pipe( ialu_regL_regL );
11142 %}
11144 // Xor Instructions
11145 // Xor Register with Register
11146 instruct xorI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
11147 match(Set dst (XorI src1 src2));
11149 format %{ "XOR $dst, $src1, $src2 #@xorI_Reg_Reg" %}
11151 ins_encode %{
11152 Register dst = $dst$$Register;
11153 Register src1 = $src1$$Register;
11154 Register src2 = $src2$$Register;
11155 __ xorr(dst, src1, src2);
11156 __ sll(dst, dst, 0); /* long -> int */
11157 %}
11159 ins_pipe( ialu_regI_regI );
11160 %}
11162 // Or Instructions
11163 // Or Register with Register
11164 instruct orI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
11165 match(Set dst (OrI src1 src2));
11167 format %{ "OR $dst, $src1, $src2 #@orI_Reg_Reg" %}
11168 ins_encode %{
11169 Register dst = $dst$$Register;
11170 Register src1 = $src1$$Register;
11171 Register src2 = $src2$$Register;
11172 __ orr(dst, src1, src2);
11173 %}
11175 ins_pipe( ialu_regI_regI );
11176 %}
11178 instruct rotI_shr_logical_Reg(mRegI dst, mRegI src, immI_0_31 rshift, immI_0_31 lshift, immI_1 one) %{
11179 match(Set dst (OrI (URShiftI src rshift) (LShiftI (AndI src one) lshift)));
11180 predicate(32 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int())));
11182 format %{ "rotr $dst, $src, 1 ...\n\t"
11183 "srl $dst, $dst, ($rshift-1) @ rotI_shr_logical_Reg" %}
11184 ins_encode %{
11185 Register dst = $dst$$Register;
11186 Register src = $src$$Register;
11187 int rshift = $rshift$$constant;
11189 __ rotr(dst, src, 1);
11190 if (rshift - 1) {
11191 __ srl(dst, dst, rshift - 1);
11192 }
11193 %}
11195 ins_pipe( ialu_regI_regI );
11196 %}
11198 instruct orI_Reg_castP2X(mRegL dst, mRegL src1, mRegP src2) %{
11199 match(Set dst (OrI src1 (CastP2X src2)));
11201 format %{ "OR $dst, $src1, $src2 #@orI_Reg_castP2X" %}
11202 ins_encode %{
11203 Register dst = $dst$$Register;
11204 Register src1 = $src1$$Register;
11205 Register src2 = $src2$$Register;
11206 __ orr(dst, src1, src2);
11207 %}
11209 ins_pipe( ialu_regI_regI );
11210 %}
11212 // Logical Shift Right by 8-bit immediate
11213 instruct shr_logical_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
11214 match(Set dst (URShiftI src shift));
11215 // effect(KILL cr);
11217 format %{ "SRL $dst, $src, $shift #@shr_logical_Reg_imm" %}
11218 ins_encode %{
11219 Register src = $src$$Register;
11220 Register dst = $dst$$Register;
11221 int shift = $shift$$constant;
11223 __ srl(dst, src, shift);
11224 %}
11225 ins_pipe( ialu_regI_regI );
11226 %}
11228 instruct shr_logical_Reg_imm_nonneg_mask(mRegI dst, mRegI src, immI_0_31 shift, immI_nonneg_mask mask) %{
11229 match(Set dst (AndI (URShiftI src shift) mask));
11231 format %{ "ext $dst, $src, $shift, one-bits($mask) #@shr_logical_Reg_imm_nonneg_mask" %}
11232 ins_encode %{
11233 Register src = $src$$Register;
11234 Register dst = $dst$$Register;
11235 int pos = $shift$$constant;
11236 int size = Assembler::is_int_mask($mask$$constant);
11238 __ ext(dst, src, pos, size);
11239 %}
11240 ins_pipe( ialu_regI_regI );
11241 %}
11243 instruct rolI_Reg_immI_0_31(mRegI dst, immI_0_31 lshift, immI_0_31 rshift)
11244 %{
11245 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
11246 match(Set dst (OrI (LShiftI dst lshift) (URShiftI dst rshift)));
11248 ins_cost(100);
11249 format %{ "rotr $dst, $dst, $rshift #@rolI_Reg_immI_0_31" %}
11250 ins_encode %{
11251 Register dst = $dst$$Register;
11252 int sa = $rshift$$constant;
11254 __ rotr(dst, dst, sa);
11255 %}
11256 ins_pipe( ialu_regI_regI );
11257 %}
11259 instruct rolL_Reg_immI_0_31(mRegL dst, immI_32_63 lshift, immI_0_31 rshift)
11260 %{
11261 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11262 match(Set dst (OrL (LShiftL dst lshift) (URShiftL dst rshift)));
11264 ins_cost(100);
11265 format %{ "rotr $dst, $dst, $rshift #@rolL_Reg_immI_0_31" %}
11266 ins_encode %{
11267 Register dst = $dst$$Register;
11268 int sa = $rshift$$constant;
11270 __ drotr(dst, dst, sa);
11271 %}
11272 ins_pipe( ialu_regI_regI );
11273 %}
11275 instruct rolL_Reg_immI_32_63(mRegL dst, immI_0_31 lshift, immI_32_63 rshift)
11276 %{
11277 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11278 match(Set dst (OrL (LShiftL dst lshift) (URShiftL dst rshift)));
11280 ins_cost(100);
11281 format %{ "rotr $dst, $dst, $rshift #@rolL_Reg_immI_32_63" %}
11282 ins_encode %{
11283 Register dst = $dst$$Register;
11284 int sa = $rshift$$constant;
11286 __ drotr32(dst, dst, sa - 32);
11287 %}
11288 ins_pipe( ialu_regI_regI );
11289 %}
11291 instruct rorI_Reg_immI_0_31(mRegI dst, immI_0_31 rshift, immI_0_31 lshift)
11292 %{
11293 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
11294 match(Set dst (OrI (URShiftI dst rshift) (LShiftI dst lshift)));
11296 ins_cost(100);
11297 format %{ "rotr $dst, $dst, $rshift #@rorI_Reg_immI_0_31" %}
11298 ins_encode %{
11299 Register dst = $dst$$Register;
11300 int sa = $rshift$$constant;
11302 __ rotr(dst, dst, sa);
11303 %}
11304 ins_pipe( ialu_regI_regI );
11305 %}
11307 instruct rorL_Reg_immI_0_31(mRegL dst, immI_0_31 rshift, immI_32_63 lshift)
11308 %{
11309 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11310 match(Set dst (OrL (URShiftL dst rshift) (LShiftL dst lshift)));
11312 ins_cost(100);
11313 format %{ "rotr $dst, $dst, $rshift #@rorL_Reg_immI_0_31" %}
11314 ins_encode %{
11315 Register dst = $dst$$Register;
11316 int sa = $rshift$$constant;
11318 __ drotr(dst, dst, sa);
11319 %}
11320 ins_pipe( ialu_regI_regI );
11321 %}
11323 instruct rorL_Reg_immI_32_63(mRegL dst, immI_32_63 rshift, immI_0_31 lshift)
11324 %{
11325 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11326 match(Set dst (OrL (URShiftL dst rshift) (LShiftL dst lshift)));
11328 ins_cost(100);
11329 format %{ "rotr $dst, $dst, $rshift #@rorL_Reg_immI_32_63" %}
11330 ins_encode %{
11331 Register dst = $dst$$Register;
11332 int sa = $rshift$$constant;
11334 __ drotr32(dst, dst, sa - 32);
11335 %}
11336 ins_pipe( ialu_regI_regI );
11337 %}
11339 // Logical Shift Right
11340 instruct shr_logical_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
11341 match(Set dst (URShiftI src shift));
11343 format %{ "SRL $dst, $src, $shift #@shr_logical_Reg_Reg" %}
11344 ins_encode %{
11345 Register src = $src$$Register;
11346 Register dst = $dst$$Register;
11347 Register shift = $shift$$Register;
11348 __ srlv(dst, src, shift);
11349 %}
11350 ins_pipe( ialu_regI_regI );
11351 %}
11354 instruct shr_arith_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
11355 match(Set dst (RShiftI src shift));
11356 // effect(KILL cr);
11358 format %{ "SRA $dst, $src, $shift #@shr_arith_Reg_imm" %}
11359 ins_encode %{
11360 Register src = $src$$Register;
11361 Register dst = $dst$$Register;
11362 int shift = $shift$$constant;
11363 __ sra(dst, src, shift);
11364 %}
11365 ins_pipe( ialu_regI_regI );
11366 %}
11368 instruct shr_arith_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
11369 match(Set dst (RShiftI src shift));
11370 // effect(KILL cr);
11372 format %{ "SRA $dst, $src, $shift #@shr_arith_Reg_Reg" %}
11373 ins_encode %{
11374 Register src = $src$$Register;
11375 Register dst = $dst$$Register;
11376 Register shift = $shift$$Register;
11377 __ srav(dst, src, shift);
11378 %}
11379 ins_pipe( ialu_regI_regI );
11380 %}
11382 //----------Convert Int to Boolean---------------------------------------------
11384 instruct convI2B(mRegI dst, mRegI src) %{
11385 match(Set dst (Conv2B src));
11387 ins_cost(100);
11388 format %{ "convI2B $dst, $src @ convI2B" %}
11389 ins_encode %{
11390 Register dst = as_Register($dst$$reg);
11391 Register src = as_Register($src$$reg);
11393 if (dst != src) {
11394 __ daddiu(dst, R0, 1);
11395 __ movz(dst, R0, src);
11396 } else {
11397 __ move(AT, src);
11398 __ daddiu(dst, R0, 1);
11399 __ movz(dst, R0, AT);
11400 }
11401 %}
11403 ins_pipe( ialu_regL_regL );
11404 %}
11406 instruct convI2L_reg( mRegL dst, mRegI src) %{
11407 match(Set dst (ConvI2L src));
11409 ins_cost(100);
11410 format %{ "SLL $dst, $src @ convI2L_reg\t" %}
11411 ins_encode %{
11412 Register dst = as_Register($dst$$reg);
11413 Register src = as_Register($src$$reg);
11415 if(dst != src) __ sll(dst, src, 0);
11416 %}
11417 ins_pipe( ialu_regL_regL );
11418 %}
11421 instruct convL2I_reg( mRegI dst, mRegL src ) %{
11422 match(Set dst (ConvL2I src));
11424 format %{ "MOV $dst, $src @ convL2I_reg" %}
11425 ins_encode %{
11426 Register dst = as_Register($dst$$reg);
11427 Register src = as_Register($src$$reg);
11429 __ sll(dst, src, 0);
11430 %}
11432 ins_pipe( ialu_regI_regI );
11433 %}
11435 instruct convL2I2L_reg( mRegL dst, mRegL src ) %{
11436 match(Set dst (ConvI2L (ConvL2I src)));
11438 format %{ "sll $dst, $src, 0 @ convL2I2L_reg" %}
11439 ins_encode %{
11440 Register dst = as_Register($dst$$reg);
11441 Register src = as_Register($src$$reg);
11443 __ sll(dst, src, 0);
11444 %}
11446 ins_pipe( ialu_regI_regI );
11447 %}
11449 instruct convL2D_reg( regD dst, mRegL src ) %{
11450 match(Set dst (ConvL2D src));
11451 format %{ "convL2D $dst, $src @ convL2D_reg" %}
11452 ins_encode %{
11453 Register src = as_Register($src$$reg);
11454 FloatRegister dst = as_FloatRegister($dst$$reg);
11456 __ dmtc1(src, dst);
11457 __ cvt_d_l(dst, dst);
11458 %}
11460 ins_pipe( pipe_slow );
11461 %}
11463 instruct convD2L_reg_fast( mRegL dst, regD src ) %{
11464 match(Set dst (ConvD2L src));
11465 ins_cost(150);
11466 format %{ "convD2L $dst, $src @ convD2L_reg_fast" %}
11467 ins_encode %{
11468 Register dst = as_Register($dst$$reg);
11469 FloatRegister src = as_FloatRegister($src$$reg);
11471 Label Done;
11473 __ trunc_l_d(F30, src);
11474 // max_long: 0x7fffffffffffffff
11475 // __ set64(AT, 0x7fffffffffffffff);
11476 __ daddiu(AT, R0, -1);
11477 __ dsrl(AT, AT, 1);
11478 __ dmfc1(dst, F30);
11480 __ bne(dst, AT, Done);
11481 __ delayed()->mtc1(R0, F30);
11483 __ cvt_d_w(F30, F30);
11484 __ c_ult_d(src, F30);
11485 __ bc1f(Done);
11486 __ delayed()->daddiu(T9, R0, -1);
11488 __ c_un_d(src, src); //NaN?
11489 __ subu(dst, T9, AT);
11490 __ movt(dst, R0);
11492 __ bind(Done);
11493 %}
11495 ins_pipe( pipe_slow );
11496 %}
11498 instruct convD2L_reg_slow( mRegL dst, regD src ) %{
11499 match(Set dst (ConvD2L src));
11500 ins_cost(250);
11501 format %{ "convD2L $dst, $src @ convD2L_reg_slow" %}
11502 ins_encode %{
11503 Register dst = as_Register($dst$$reg);
11504 FloatRegister src = as_FloatRegister($src$$reg);
11506 Label L;
11508 __ c_un_d(src, src); //NaN?
11509 __ bc1t(L);
11510 __ delayed();
11511 __ move(dst, R0);
11513 __ trunc_l_d(F30, src);
11514 __ cfc1(AT, 31);
11515 __ li(T9, 0x10000);
11516 __ andr(AT, AT, T9);
11517 __ beq(AT, R0, L);
11518 __ delayed()->dmfc1(dst, F30);
11520 __ mov_d(F12, src);
11521 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1);
11522 __ move(dst, V0);
11523 __ bind(L);
11524 %}
11526 ins_pipe( pipe_slow );
11527 %}
11529 instruct convF2I_reg_fast( mRegI dst, regF src ) %{
11530 match(Set dst (ConvF2I src));
11531 ins_cost(150);
11532 format %{ "convf2i $dst, $src @ convF2I_reg_fast" %}
11533 ins_encode %{
11534 Register dreg = $dst$$Register;
11535 FloatRegister fval = $src$$FloatRegister;
11537 __ trunc_w_s(F30, fval);
11538 __ mfc1(dreg, F30);
11539 __ c_un_s(fval, fval); //NaN?
11540 __ movt(dreg, R0);
11541 %}
11543 ins_pipe( pipe_slow );
11544 %}
11546 instruct convF2I_reg_slow( mRegI dst, regF src ) %{
11547 match(Set dst (ConvF2I src));
11548 ins_cost(250);
11549 format %{ "convf2i $dst, $src @ convF2I_reg_slow" %}
11550 ins_encode %{
11551 Register dreg = $dst$$Register;
11552 FloatRegister fval = $src$$FloatRegister;
11553 Label L;
11555 __ c_un_s(fval, fval); //NaN?
11556 __ bc1t(L);
11557 __ delayed();
11558 __ move(dreg, R0);
11560 __ trunc_w_s(F30, fval);
11562 /* Call SharedRuntime:f2i() to do valid convention */
11563 __ cfc1(AT, 31);
11564 __ li(T9, 0x10000);
11565 __ andr(AT, AT, T9);
11566 __ beq(AT, R0, L);
11567 __ delayed()->mfc1(dreg, F30);
11569 __ mov_s(F12, fval);
11571 /* 2014/01/08 Fu : This bug was found when running ezDS's control-panel.
11572 * J 982 C2 javax.swing.text.BoxView.layoutMajorAxis(II[I[I)V (283 bytes) @ 0x000000555c46aa74
11573 *
11574 * An interger array index has been assigned to V0, and then changed from 1 to Integer.MAX_VALUE.
11575 * V0 is corrupted during call_VM_leaf(), and should be preserved.
11576 */
11577 if(dreg != V0) {
11578 __ push(V0);
11579 }
11580 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
11581 if(dreg != V0) {
11582 __ move(dreg, V0);
11583 __ pop(V0);
11584 }
11585 __ bind(L);
11586 %}
11588 ins_pipe( pipe_slow );
11589 %}
11591 instruct convF2L_reg_fast( mRegL dst, regF src ) %{
11592 match(Set dst (ConvF2L src));
11593 ins_cost(150);
11594 format %{ "convf2l $dst, $src @ convF2L_reg_fast" %}
11595 ins_encode %{
11596 Register dreg = $dst$$Register;
11597 FloatRegister fval = $src$$FloatRegister;
11599 __ trunc_l_s(F30, fval);
11600 __ dmfc1(dreg, F30);
11601 __ c_un_s(fval, fval); //NaN?
11602 __ movt(dreg, R0);
11603 %}
11605 ins_pipe( pipe_slow );
11606 %}
11608 instruct convF2L_reg_slow( mRegL dst, regF src ) %{
11609 match(Set dst (ConvF2L src));
11610 ins_cost(250);
11611 format %{ "convf2l $dst, $src @ convF2L_reg_slow" %}
11612 ins_encode %{
11613 Register dst = as_Register($dst$$reg);
11614 FloatRegister fval = $src$$FloatRegister;
11615 Label L;
11617 __ c_un_s(fval, fval); //NaN?
11618 __ bc1t(L);
11619 __ delayed();
11620 __ move(dst, R0);
11622 __ trunc_l_s(F30, fval);
11623 __ cfc1(AT, 31);
11624 __ li(T9, 0x10000);
11625 __ andr(AT, AT, T9);
11626 __ beq(AT, R0, L);
11627 __ delayed()->dmfc1(dst, F30);
11629 __ mov_s(F12, fval);
11630 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
11631 __ move(dst, V0);
11632 __ bind(L);
11633 %}
11635 ins_pipe( pipe_slow );
11636 %}
11638 instruct convL2F_reg( regF dst, mRegL src ) %{
11639 match(Set dst (ConvL2F src));
11640 format %{ "convl2f $dst, $src @ convL2F_reg" %}
11641 ins_encode %{
11642 FloatRegister dst = $dst$$FloatRegister;
11643 Register src = as_Register($src$$reg);
11644 Label L;
11646 __ dmtc1(src, dst);
11647 __ cvt_s_l(dst, dst);
11648 %}
11650 ins_pipe( pipe_slow );
11651 %}
11653 instruct convI2F_reg( regF dst, mRegI src ) %{
11654 match(Set dst (ConvI2F src));
11655 format %{ "convi2f $dst, $src @ convI2F_reg" %}
11656 ins_encode %{
11657 Register src = $src$$Register;
11658 FloatRegister dst = $dst$$FloatRegister;
11660 __ mtc1(src, dst);
11661 __ cvt_s_w(dst, dst);
11662 %}
11664 ins_pipe( fpu_regF_regF );
11665 %}
11667 instruct cmpLTMask_immI0( mRegI dst, mRegI p, immI0 zero ) %{
11668 match(Set dst (CmpLTMask p zero));
11669 ins_cost(100);
11671 format %{ "sra $dst, $p, 31 @ cmpLTMask_immI0" %}
11672 ins_encode %{
11673 Register src = $p$$Register;
11674 Register dst = $dst$$Register;
11676 __ sra(dst, src, 31);
11677 %}
11678 ins_pipe( pipe_slow );
11679 %}
11682 instruct cmpLTMask( mRegI dst, mRegI p, mRegI q ) %{
11683 match(Set dst (CmpLTMask p q));
11684 ins_cost(400);
11686 format %{ "cmpLTMask $dst, $p, $q @ cmpLTMask" %}
11687 ins_encode %{
11688 Register p = $p$$Register;
11689 Register q = $q$$Register;
11690 Register dst = $dst$$Register;
11692 __ slt(dst, p, q);
11693 __ subu(dst, R0, dst);
11694 %}
11695 ins_pipe( pipe_slow );
11696 %}
11698 instruct convP2B(mRegI dst, mRegP src) %{
11699 match(Set dst (Conv2B src));
11701 ins_cost(100);
11702 format %{ "convP2B $dst, $src @ convP2B" %}
11703 ins_encode %{
11704 Register dst = as_Register($dst$$reg);
11705 Register src = as_Register($src$$reg);
11707 if (dst != src) {
11708 __ daddiu(dst, R0, 1);
11709 __ movz(dst, R0, src);
11710 } else {
11711 __ move(AT, src);
11712 __ daddiu(dst, R0, 1);
11713 __ movz(dst, R0, AT);
11714 }
11715 %}
11717 ins_pipe( ialu_regL_regL );
11718 %}
11721 instruct convI2D_reg_reg(regD dst, mRegI src) %{
11722 match(Set dst (ConvI2D src));
11723 format %{ "conI2D $dst, $src @convI2D_reg" %}
11724 ins_encode %{
11725 Register src = $src$$Register;
11726 FloatRegister dst = $dst$$FloatRegister;
11727 __ mtc1(src, dst);
11728 __ cvt_d_w(dst, dst);
11729 %}
11730 ins_pipe( fpu_regF_regF );
11731 %}
11733 instruct convF2D_reg_reg(regD dst, regF src) %{
11734 match(Set dst (ConvF2D src));
11735 format %{ "convF2D $dst, $src\t# @convF2D_reg_reg" %}
11736 ins_encode %{
11737 FloatRegister dst = $dst$$FloatRegister;
11738 FloatRegister src = $src$$FloatRegister;
11740 __ cvt_d_s(dst, src);
11741 %}
11742 ins_pipe( fpu_regF_regF );
11743 %}
11745 instruct convD2F_reg_reg(regF dst, regD src) %{
11746 match(Set dst (ConvD2F src));
11747 format %{ "convD2F $dst, $src\t# @convD2F_reg_reg" %}
11748 ins_encode %{
11749 FloatRegister dst = $dst$$FloatRegister;
11750 FloatRegister src = $src$$FloatRegister;
11752 __ cvt_s_d(dst, src);
11753 %}
11754 ins_pipe( fpu_regF_regF );
11755 %}
11757 // Convert a double to an int. If the double is a NAN, stuff a zero in instead.
11758 instruct convD2I_reg_reg_fast( mRegI dst, regD src ) %{
11759 match(Set dst (ConvD2I src));
11761 ins_cost(150);
11762 format %{ "convD2I $dst, $src\t# @ convD2I_reg_reg_fast" %}
11764 ins_encode %{
11765 FloatRegister src = $src$$FloatRegister;
11766 Register dst = $dst$$Register;
11768 Label Done;
11770 __ trunc_w_d(F30, src);
11771 // max_int: 2147483647
11772 __ move(AT, 0x7fffffff);
11773 __ mfc1(dst, F30);
11775 __ bne(dst, AT, Done);
11776 __ delayed()->mtc1(R0, F30);
11778 __ cvt_d_w(F30, F30);
11779 __ c_ult_d(src, F30);
11780 __ bc1f(Done);
11781 __ delayed()->addiu(T9, R0, -1);
11783 __ c_un_d(src, src); //NaN?
11784 __ subu32(dst, T9, AT);
11785 __ movt(dst, R0);
11787 __ bind(Done);
11788 %}
11789 ins_pipe( pipe_slow );
11790 %}
11792 instruct convD2I_reg_reg_slow( mRegI dst, regD src ) %{
11793 match(Set dst (ConvD2I src));
11795 ins_cost(250);
11796 format %{ "convD2I $dst, $src\t# @ convD2I_reg_reg_slow" %}
11798 ins_encode %{
11799 FloatRegister src = $src$$FloatRegister;
11800 Register dst = $dst$$Register;
11801 Label L;
11803 __ trunc_w_d(F30, src);
11804 __ cfc1(AT, 31);
11805 __ li(T9, 0x10000);
11806 __ andr(AT, AT, T9);
11807 __ beq(AT, R0, L);
11808 __ delayed()->mfc1(dst, F30);
11810 __ mov_d(F12, src);
11811 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 1);
11812 __ move(dst, V0);
11813 __ bind(L);
11815 %}
11816 ins_pipe( pipe_slow );
11817 %}
11819 // Convert oop pointer into compressed form
11820 instruct encodeHeapOop(mRegN dst, mRegP src) %{
11821 predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
11822 match(Set dst (EncodeP src));
11823 format %{ "encode_heap_oop $dst,$src" %}
11824 ins_encode %{
11825 Register src = $src$$Register;
11826 Register dst = $dst$$Register;
11827 if (src != dst) {
11828 __ move(dst, src);
11829 }
11830 __ encode_heap_oop(dst);
11831 %}
11832 ins_pipe( ialu_regL_regL );
11833 %}
11835 instruct encodeHeapOop_not_null(mRegN dst, mRegP src) %{
11836 predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
11837 match(Set dst (EncodeP src));
11838 format %{ "encode_heap_oop_not_null $dst,$src @ encodeHeapOop_not_null" %}
11839 ins_encode %{
11840 __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
11841 %}
11842 ins_pipe( ialu_regL_regL );
11843 %}
11845 instruct decodeHeapOop(mRegP dst, mRegN src) %{
11846 predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
11847 n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
11848 match(Set dst (DecodeN src));
11849 format %{ "decode_heap_oop $dst,$src @ decodeHeapOop" %}
11850 ins_encode %{
11851 Register s = $src$$Register;
11852 Register d = $dst$$Register;
11853 if (s != d) {
11854 __ move(d, s);
11855 }
11856 __ decode_heap_oop(d);
11857 %}
11858 ins_pipe( ialu_regL_regL );
11859 %}
11861 instruct decodeHeapOop_not_null(mRegP dst, mRegN src) %{
11862 predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
11863 n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
11864 match(Set dst (DecodeN src));
11865 format %{ "decode_heap_oop_not_null $dst,$src @ decodeHeapOop_not_null" %}
11866 ins_encode %{
11867 Register s = $src$$Register;
11868 Register d = $dst$$Register;
11869 if (s != d) {
11870 __ decode_heap_oop_not_null(d, s);
11871 } else {
11872 __ decode_heap_oop_not_null(d);
11873 }
11874 %}
11875 ins_pipe( ialu_regL_regL );
11876 %}
11878 instruct encodeKlass_not_null(mRegN dst, mRegP src) %{
11879 match(Set dst (EncodePKlass src));
11880 format %{ "encode_heap_oop_not_null $dst,$src @ encodeKlass_not_null" %}
11881 ins_encode %{
11882 __ encode_klass_not_null($dst$$Register, $src$$Register);
11883 %}
11884 ins_pipe( ialu_regL_regL );
11885 %}
11887 instruct decodeKlass_not_null(mRegP dst, mRegN src) %{
11888 match(Set dst (DecodeNKlass src));
11889 format %{ "decode_heap_klass_not_null $dst,$src" %}
11890 ins_encode %{
11891 Register s = $src$$Register;
11892 Register d = $dst$$Register;
11893 if (s != d) {
11894 __ decode_klass_not_null(d, s);
11895 } else {
11896 __ decode_klass_not_null(d);
11897 }
11898 %}
11899 ins_pipe( ialu_regL_regL );
11900 %}
11902 //FIXME
11903 instruct tlsLoadP(mRegP dst) %{
11904 match(Set dst (ThreadLocal));
11906 ins_cost(0);
11907 format %{ " get_thread in $dst #@tlsLoadP" %}
11908 ins_encode %{
11909 Register dst = $dst$$Register;
11910 #ifdef OPT_THREAD
11911 __ move(dst, TREG);
11912 #else
11913 __ get_thread(dst);
11914 #endif
11915 %}
11917 ins_pipe( ialu_loadI );
11918 %}
11921 instruct checkCastPP( mRegP dst ) %{
11922 match(Set dst (CheckCastPP dst));
11924 format %{ "#checkcastPP of $dst (empty encoding) #@chekCastPP" %}
11925 ins_encode( /*empty encoding*/ );
11926 ins_pipe( empty );
11927 %}
11929 instruct castPP(mRegP dst)
11930 %{
11931 match(Set dst (CastPP dst));
11933 size(0);
11934 format %{ "# castPP of $dst" %}
11935 ins_encode(/* empty encoding */);
11936 ins_pipe(empty);
11937 %}
11939 instruct castII( mRegI dst ) %{
11940 match(Set dst (CastII dst));
11941 format %{ "#castII of $dst empty encoding" %}
11942 ins_encode( /*empty encoding*/ );
11943 ins_cost(0);
11944 ins_pipe( empty );
11945 %}
11947 // Return Instruction
11948 // Remove the return address & jump to it.
11949 instruct Ret() %{
11950 match(Return);
11951 format %{ "RET #@Ret" %}
11953 ins_encode %{
11954 __ jr(RA);
11955 __ nop();
11956 %}
11958 ins_pipe( pipe_jump );
11959 %}
11961 /*
11962 // For Loongson CPUs, jr seems too slow, so this rule shouldn't be imported.
11963 instruct jumpXtnd(mRegL switch_val) %{
11964 match(Jump switch_val);
11966 ins_cost(350);
11968 format %{ "load T9 <-- [$constanttablebase, $switch_val, $constantoffset] @ jumpXtnd\n\t"
11969 "jr T9\n\t"
11970 "nop" %}
11971 ins_encode %{
11972 Register table_base = $constanttablebase;
11973 int con_offset = $constantoffset;
11974 Register switch_reg = $switch_val$$Register;
11976 if (UseLoongsonISA) {
11977 if (Assembler::is_simm(con_offset, 8)) {
11978 __ gsldx(T9, table_base, switch_reg, con_offset);
11979 } else if (Assembler::is_simm16(con_offset)) {
11980 __ daddu(T9, table_base, switch_reg);
11981 __ ld(T9, T9, con_offset);
11982 } else {
11983 __ move(T9, con_offset);
11984 __ daddu(AT, table_base, switch_reg);
11985 __ gsldx(T9, AT, T9, 0);
11986 }
11987 } else {
11988 if (Assembler::is_simm16(con_offset)) {
11989 __ daddu(T9, table_base, switch_reg);
11990 __ ld(T9, T9, con_offset);
11991 } else {
11992 __ move(T9, con_offset);
11993 __ daddu(AT, table_base, switch_reg);
11994 __ daddu(AT, T9, AT);
11995 __ ld(T9, AT, 0);
11996 }
11997 }
11999 __ jr(T9);
12000 __ nop();
12002 %}
12003 ins_pipe(pipe_jump);
12004 %}
12005 */
12007 // Jump Direct - Label defines a relative address from JMP
12008 instruct jmpDir(label labl) %{
12009 match(Goto);
12010 effect(USE labl);
12012 ins_cost(300);
12013 format %{ "JMP $labl #@jmpDir" %}
12015 ins_encode %{
12016 Label &L = *($labl$$label);
12017 if(&L)
12018 __ b(L);
12019 else
12020 __ b(int(0));
12021 __ nop();
12022 %}
12024 ins_pipe( pipe_jump );
12025 ins_pc_relative(1);
12026 %}
12030 // Tail Jump; remove the return address; jump to target.
12031 // TailCall above leaves the return address around.
12032 // TailJump is used in only one place, the rethrow_Java stub (fancy_jump=2).
12033 // ex_oop (Exception Oop) is needed in %o0 at the jump. As there would be a
12034 // "restore" before this instruction (in Epilogue), we need to materialize it
12035 // in %i0.
12036 //FIXME
12037 instruct tailjmpInd(mRegP jump_target,mRegP ex_oop) %{
12038 match( TailJump jump_target ex_oop );
12039 ins_cost(200);
12040 format %{ "Jmp $jump_target ; ex_oop = $ex_oop #@tailjmpInd" %}
12041 ins_encode %{
12042 Register target = $jump_target$$Register;
12044 /* 2012/9/14 Jin: V0, V1 are indicated in:
12045 * [stubGenerator_mips.cpp] generate_forward_exception()
12046 * [runtime_mips.cpp] OptoRuntime::generate_exception_blob()
12047 */
12048 Register oop = $ex_oop$$Register;
12049 Register exception_oop = V0;
12050 Register exception_pc = V1;
12052 __ move(exception_pc, RA);
12053 __ move(exception_oop, oop);
12055 __ jr(target);
12056 __ nop();
12057 %}
12058 ins_pipe( pipe_jump );
12059 %}
12061 // ============================================================================
12062 // Procedure Call/Return Instructions
12063 // Call Java Static Instruction
12064 // Note: If this code changes, the corresponding ret_addr_offset() and
12065 // compute_padding() functions will have to be adjusted.
12066 instruct CallStaticJavaDirect(method meth) %{
12067 match(CallStaticJava);
12068 effect(USE meth);
12070 ins_cost(300);
12071 format %{ "CALL,static #@CallStaticJavaDirect " %}
12072 ins_encode( Java_Static_Call( meth ) );
12073 ins_pipe( pipe_slow );
12074 ins_pc_relative(1);
12075 ins_alignment(16);
12076 %}
12078 // Call Java Dynamic Instruction
12079 // Note: If this code changes, the corresponding ret_addr_offset() and
12080 // compute_padding() functions will have to be adjusted.
12081 instruct CallDynamicJavaDirect(method meth) %{
12082 match(CallDynamicJava);
12083 effect(USE meth);
12085 ins_cost(300);
12086 format %{"MOV IC_Klass, (oop)-1 @ CallDynamicJavaDirect\n\t"
12087 "CallDynamic @ CallDynamicJavaDirect" %}
12088 ins_encode( Java_Dynamic_Call( meth ) );
12089 ins_pipe( pipe_slow );
12090 ins_pc_relative(1);
12091 ins_alignment(16);
12092 %}
12094 instruct CallLeafNoFPDirect(method meth) %{
12095 match(CallLeafNoFP);
12096 effect(USE meth);
12098 ins_cost(300);
12099 format %{ "CALL_LEAF_NOFP,runtime " %}
12100 ins_encode(Java_To_Runtime(meth));
12101 ins_pipe( pipe_slow );
12102 ins_pc_relative(1);
12103 ins_alignment(16);
12104 %}
12106 // Prefetch instructions.
12108 instruct prefetchrNTA( memory mem ) %{
12109 match(PrefetchRead mem);
12110 ins_cost(125);
12112 format %{ "pref $mem\t# Prefetch into non-temporal cache for read @ prefetchrNTA" %}
12113 ins_encode %{
12114 int base = $mem$$base;
12115 int index = $mem$$index;
12116 int scale = $mem$$scale;
12117 int disp = $mem$$disp;
12119 if( index != 0 ) {
12120 if (scale == 0) {
12121 __ daddu(AT, as_Register(base), as_Register(index));
12122 } else {
12123 __ dsll(AT, as_Register(index), scale);
12124 __ daddu(AT, as_Register(base), AT);
12125 }
12126 } else {
12127 __ move(AT, as_Register(base));
12128 }
12129 if( Assembler::is_simm16(disp) ) {
12130 __ daddiu(AT, as_Register(base), disp);
12131 __ daddiu(AT, AT, disp);
12132 } else {
12133 __ move(T9, disp);
12134 __ daddu(AT, as_Register(base), T9);
12135 }
12136 __ pref(0, AT, 0); //hint: 0:load
12137 %}
12138 ins_pipe(pipe_slow);
12139 %}
12141 instruct prefetchwNTA( memory mem ) %{
12142 match(PrefetchWrite mem);
12143 ins_cost(125);
12144 format %{ "pref $mem\t# Prefetch to non-temporal cache for write @ prefetchwNTA" %}
12145 ins_encode %{
12146 int base = $mem$$base;
12147 int index = $mem$$index;
12148 int scale = $mem$$scale;
12149 int disp = $mem$$disp;
12151 if( index != 0 ) {
12152 if (scale == 0) {
12153 __ daddu(AT, as_Register(base), as_Register(index));
12154 } else {
12155 __ dsll(AT, as_Register(index), scale);
12156 __ daddu(AT, as_Register(base), AT);
12157 }
12158 } else {
12159 __ move(AT, as_Register(base));
12160 }
12161 if( Assembler::is_simm16(disp) ) {
12162 __ daddiu(AT, as_Register(base), disp);
12163 __ daddiu(AT, AT, disp);
12164 } else {
12165 __ move(T9, disp);
12166 __ daddu(AT, as_Register(base), T9);
12167 }
12168 __ pref(1, AT, 0); //hint: 1:store
12169 %}
12170 ins_pipe(pipe_slow);
12171 %}
12173 // Prefetch instructions for allocation.
12175 instruct prefetchAllocNTA( memory mem ) %{
12176 match(PrefetchAllocation mem);
12177 ins_cost(125);
12178 format %{ "pref $mem\t# Prefetch allocation @ prefetchAllocNTA" %}
12179 ins_encode %{
12180 int base = $mem$$base;
12181 int index = $mem$$index;
12182 int scale = $mem$$scale;
12183 int disp = $mem$$disp;
12185 Register dst = R0;
12187 if( index != 0 ) {
12188 if( Assembler::is_simm16(disp) ) {
12189 if( UseLoongsonISA ) {
12190 if (scale == 0) {
12191 __ gslbx(dst, as_Register(base), as_Register(index), disp);
12192 } else {
12193 __ dsll(AT, as_Register(index), scale);
12194 __ gslbx(dst, as_Register(base), AT, disp);
12195 }
12196 } else {
12197 if (scale == 0) {
12198 __ addu(AT, as_Register(base), as_Register(index));
12199 } else {
12200 __ dsll(AT, as_Register(index), scale);
12201 __ addu(AT, as_Register(base), AT);
12202 }
12203 __ lb(dst, AT, disp);
12204 }
12205 } else {
12206 if (scale == 0) {
12207 __ addu(AT, as_Register(base), as_Register(index));
12208 } else {
12209 __ dsll(AT, as_Register(index), scale);
12210 __ addu(AT, as_Register(base), AT);
12211 }
12212 __ move(T9, disp);
12213 if( UseLoongsonISA ) {
12214 __ gslbx(dst, AT, T9, 0);
12215 } else {
12216 __ addu(AT, AT, T9);
12217 __ lb(dst, AT, 0);
12218 }
12219 }
12220 } else {
12221 if( Assembler::is_simm16(disp) ) {
12222 __ lb(dst, as_Register(base), disp);
12223 } else {
12224 __ move(T9, disp);
12225 if( UseLoongsonISA ) {
12226 __ gslbx(dst, as_Register(base), T9, 0);
12227 } else {
12228 __ addu(AT, as_Register(base), T9);
12229 __ lb(dst, AT, 0);
12230 }
12231 }
12232 }
12233 %}
12234 ins_pipe(pipe_slow);
12235 %}
12238 // Call runtime without safepoint
12239 instruct CallLeafDirect(method meth) %{
12240 match(CallLeaf);
12241 effect(USE meth);
12243 ins_cost(300);
12244 format %{ "CALL_LEAF,runtime #@CallLeafDirect " %}
12245 ins_encode(Java_To_Runtime(meth));
12246 ins_pipe( pipe_slow );
12247 ins_pc_relative(1);
12248 ins_alignment(16);
12249 %}
12251 // Load Char (16bit unsigned)
12252 instruct loadUS(mRegI dst, memory mem) %{
12253 match(Set dst (LoadUS mem));
12255 ins_cost(125);
12256 format %{ "loadUS $dst,$mem @ loadC" %}
12257 ins_encode(load_C_enc(dst, mem));
12258 ins_pipe( ialu_loadI );
12259 %}
12261 instruct loadUS_convI2L(mRegL dst, memory mem) %{
12262 match(Set dst (ConvI2L (LoadUS mem)));
12264 ins_cost(125);
12265 format %{ "loadUS $dst,$mem @ loadUS_convI2L" %}
12266 ins_encode(load_C_enc(dst, mem));
12267 ins_pipe( ialu_loadI );
12268 %}
12270 // Store Char (16bit unsigned)
12271 instruct storeC(memory mem, mRegI src) %{
12272 match(Set mem (StoreC mem src));
12274 ins_cost(125);
12275 format %{ "storeC $src, $mem @ storeC" %}
12276 ins_encode(store_C_reg_enc(mem, src));
12277 ins_pipe( ialu_loadI );
12278 %}
12280 instruct storeC0(memory mem, immI0 zero) %{
12281 match(Set mem (StoreC mem zero));
12283 ins_cost(125);
12284 format %{ "storeC $zero, $mem @ storeC0" %}
12285 ins_encode(store_C0_enc(mem));
12286 ins_pipe( ialu_loadI );
12287 %}
12290 instruct loadConF0(regF dst, immF0 zero) %{
12291 match(Set dst zero);
12292 ins_cost(100);
12294 format %{ "mov $dst, zero @ loadConF0\n"%}
12295 ins_encode %{
12296 FloatRegister dst = $dst$$FloatRegister;
12298 __ mtc1(R0, dst);
12299 %}
12300 ins_pipe( fpu_loadF );
12301 %}
12304 instruct loadConF(regF dst, immF src) %{
12305 match(Set dst src);
12306 ins_cost(125);
12308 format %{ "lwc1 $dst, $constantoffset[$constanttablebase] # load FLOAT $src from table @ loadConF" %}
12309 ins_encode %{
12310 int con_offset = $constantoffset($src);
12312 if (Assembler::is_simm16(con_offset)) {
12313 __ lwc1($dst$$FloatRegister, $constanttablebase, con_offset);
12314 } else {
12315 __ set64(AT, con_offset);
12316 if (UseLoongsonISA) {
12317 __ gslwxc1($dst$$FloatRegister, $constanttablebase, AT, 0);
12318 } else {
12319 __ daddu(AT, $constanttablebase, AT);
12320 __ lwc1($dst$$FloatRegister, AT, 0);
12321 }
12322 }
12323 %}
12324 ins_pipe( fpu_loadF );
12325 %}
12328 instruct loadConD0(regD dst, immD0 zero) %{
12329 match(Set dst zero);
12330 ins_cost(100);
12332 format %{ "mov $dst, zero @ loadConD0"%}
12333 ins_encode %{
12334 FloatRegister dst = as_FloatRegister($dst$$reg);
12336 __ dmtc1(R0, dst);
12337 %}
12338 ins_pipe( fpu_loadF );
12339 %}
12341 instruct loadConD(regD dst, immD src) %{
12342 match(Set dst src);
12343 ins_cost(125);
12345 format %{ "ldc1 $dst, $constantoffset[$constanttablebase] # load DOUBLE $src from table @ loadConD" %}
12346 ins_encode %{
12347 int con_offset = $constantoffset($src);
12349 if (Assembler::is_simm16(con_offset)) {
12350 __ ldc1($dst$$FloatRegister, $constanttablebase, con_offset);
12351 } else {
12352 __ set64(AT, con_offset);
12353 if (UseLoongsonISA) {
12354 __ gsldxc1($dst$$FloatRegister, $constanttablebase, AT, 0);
12355 } else {
12356 __ daddu(AT, $constanttablebase, AT);
12357 __ ldc1($dst$$FloatRegister, AT, 0);
12358 }
12359 }
12360 %}
12361 ins_pipe( fpu_loadF );
12362 %}
12364 // Store register Float value (it is faster than store from FPU register)
12365 instruct storeF_reg( memory mem, regF src) %{
12366 match(Set mem (StoreF mem src));
12368 ins_cost(50);
12369 format %{ "store $mem, $src\t# store float @ storeF_reg" %}
12370 ins_encode(store_F_reg_enc(mem, src));
12371 ins_pipe( fpu_storeF );
12372 %}
12374 instruct storeF_imm0( memory mem, immF0 zero) %{
12375 match(Set mem (StoreF mem zero));
12377 ins_cost(40);
12378 format %{ "store $mem, zero\t# store float @ storeF_imm0" %}
12379 ins_encode %{
12380 int base = $mem$$base;
12381 int index = $mem$$index;
12382 int scale = $mem$$scale;
12383 int disp = $mem$$disp;
12385 if( index != 0 ) {
12386 if ( UseLoongsonISA ) {
12387 if ( Assembler::is_simm(disp, 8) ) {
12388 if ( scale == 0 ) {
12389 __ gsswx(R0, as_Register(base), as_Register(index), disp);
12390 } else {
12391 __ dsll(T9, as_Register(index), scale);
12392 __ gsswx(R0, as_Register(base), T9, disp);
12393 }
12394 } else if ( Assembler::is_simm16(disp) ) {
12395 if ( scale == 0 ) {
12396 __ daddu(AT, as_Register(base), as_Register(index));
12397 } else {
12398 __ dsll(T9, as_Register(index), scale);
12399 __ daddu(AT, as_Register(base), T9);
12400 }
12401 __ sw(R0, AT, disp);
12402 } else {
12403 if ( scale == 0 ) {
12404 __ move(T9, disp);
12405 __ daddu(AT, as_Register(index), T9);
12406 __ gsswx(R0, as_Register(base), AT, 0);
12407 } else {
12408 __ dsll(T9, as_Register(index), scale);
12409 __ move(AT, disp);
12410 __ daddu(AT, AT, T9);
12411 __ gsswx(R0, as_Register(base), AT, 0);
12412 }
12413 }
12414 } else { //not use loongson isa
12415 if(scale != 0) {
12416 __ dsll(T9, as_Register(index), scale);
12417 __ daddu(AT, as_Register(base), T9);
12418 } else {
12419 __ daddu(AT, as_Register(base), as_Register(index));
12420 }
12421 if( Assembler::is_simm16(disp) ) {
12422 __ sw(R0, AT, disp);
12423 } else {
12424 __ move(T9, disp);
12425 __ daddu(AT, AT, T9);
12426 __ sw(R0, AT, 0);
12427 }
12428 }
12429 } else { //index is 0
12430 if ( UseLoongsonISA ) {
12431 if ( Assembler::is_simm16(disp) ) {
12432 __ sw(R0, as_Register(base), disp);
12433 } else {
12434 __ move(T9, disp);
12435 __ gsswx(R0, as_Register(base), T9, 0);
12436 }
12437 } else {
12438 if( Assembler::is_simm16(disp) ) {
12439 __ sw(R0, as_Register(base), disp);
12440 } else {
12441 __ move(T9, disp);
12442 __ daddu(AT, as_Register(base), T9);
12443 __ sw(R0, AT, 0);
12444 }
12445 }
12446 }
12447 %}
12448 ins_pipe( ialu_storeI );
12449 %}
12451 // Load Double
12452 instruct loadD(regD dst, memory mem) %{
12453 match(Set dst (LoadD mem));
12455 ins_cost(150);
12456 format %{ "loadD $dst, $mem #@loadD" %}
12457 ins_encode(load_D_enc(dst, mem));
12458 ins_pipe( ialu_loadI );
12459 %}
12461 // Load Double - UNaligned
12462 instruct loadD_unaligned(regD dst, memory mem ) %{
12463 match(Set dst (LoadD_unaligned mem));
12464 ins_cost(250);
12465 // FIXME: Jin: Need more effective ldl/ldr
12466 format %{ "loadD_unaligned $dst, $mem #@loadD_unaligned" %}
12467 ins_encode(load_D_enc(dst, mem));
12468 ins_pipe( ialu_loadI );
12469 %}
12471 instruct storeD_reg( memory mem, regD src) %{
12472 match(Set mem (StoreD mem src));
12474 ins_cost(50);
12475 format %{ "store $mem, $src\t# store float @ storeD_reg" %}
12476 ins_encode(store_D_reg_enc(mem, src));
12477 ins_pipe( fpu_storeF );
12478 %}
12480 instruct storeD_imm0( memory mem, immD0 zero) %{
12481 match(Set mem (StoreD mem zero));
12483 ins_cost(40);
12484 format %{ "store $mem, zero\t# store float @ storeD_imm0" %}
12485 ins_encode %{
12486 int base = $mem$$base;
12487 int index = $mem$$index;
12488 int scale = $mem$$scale;
12489 int disp = $mem$$disp;
12491 __ mtc1(R0, F30);
12492 __ cvt_d_w(F30, F30);
12494 if( index != 0 ) {
12495 if ( UseLoongsonISA ) {
12496 if ( Assembler::is_simm(disp, 8) ) {
12497 if (scale == 0) {
12498 __ gssdxc1(F30, as_Register(base), as_Register(index), disp);
12499 } else {
12500 __ dsll(T9, as_Register(index), scale);
12501 __ gssdxc1(F30, as_Register(base), T9, disp);
12502 }
12503 } else if ( Assembler::is_simm16(disp) ) {
12504 if (scale == 0) {
12505 __ daddu(AT, as_Register(base), as_Register(index));
12506 __ sdc1(F30, AT, disp);
12507 } else {
12508 __ dsll(T9, as_Register(index), scale);
12509 __ daddu(AT, as_Register(base), T9);
12510 __ sdc1(F30, AT, disp);
12511 }
12512 } else {
12513 if (scale == 0) {
12514 __ move(T9, disp);
12515 __ daddu(AT, as_Register(index), T9);
12516 __ gssdxc1(F30, as_Register(base), AT, 0);
12517 } else {
12518 __ move(T9, disp);
12519 __ dsll(AT, as_Register(index), scale);
12520 __ daddu(AT, AT, T9);
12521 __ gssdxc1(F30, as_Register(base), AT, 0);
12522 }
12523 }
12524 } else { // not use loongson isa
12525 if(scale != 0) {
12526 __ dsll(T9, as_Register(index), scale);
12527 __ daddu(AT, as_Register(base), T9);
12528 } else {
12529 __ daddu(AT, as_Register(base), as_Register(index));
12530 }
12531 if( Assembler::is_simm16(disp) ) {
12532 __ sdc1(F30, AT, disp);
12533 } else {
12534 __ move(T9, disp);
12535 __ daddu(AT, AT, T9);
12536 __ sdc1(F30, AT, 0);
12537 }
12538 }
12539 } else {// index is 0
12540 if ( UseLoongsonISA ) {
12541 if ( Assembler::is_simm16(disp) ) {
12542 __ sdc1(F30, as_Register(base), disp);
12543 } else {
12544 __ move(T9, disp);
12545 __ gssdxc1(F30, as_Register(base), T9, 0);
12546 }
12547 } else {
12548 if( Assembler::is_simm16(disp) ) {
12549 __ sdc1(F30, as_Register(base), disp);
12550 } else {
12551 __ move(T9, disp);
12552 __ daddu(AT, as_Register(base), T9);
12553 __ sdc1(F30, AT, 0);
12554 }
12555 }
12556 }
12557 %}
12558 ins_pipe( ialu_storeI );
12559 %}
12561 instruct loadSSI(mRegI dst, stackSlotI src)
12562 %{
12563 match(Set dst src);
12565 ins_cost(125);
12566 format %{ "lw $dst, $src\t# int stk @ loadSSI" %}
12567 ins_encode %{
12568 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSI) !");
12569 __ lw($dst$$Register, SP, $src$$disp);
12570 %}
12571 ins_pipe(ialu_loadI);
12572 %}
12574 instruct storeSSI(stackSlotI dst, mRegI src)
12575 %{
12576 match(Set dst src);
12578 ins_cost(100);
12579 format %{ "sw $dst, $src\t# int stk @ storeSSI" %}
12580 ins_encode %{
12581 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSI) !");
12582 __ sw($src$$Register, SP, $dst$$disp);
12583 %}
12584 ins_pipe(ialu_storeI);
12585 %}
12587 instruct loadSSL(mRegL dst, stackSlotL src)
12588 %{
12589 match(Set dst src);
12591 ins_cost(125);
12592 format %{ "ld $dst, $src\t# long stk @ loadSSL" %}
12593 ins_encode %{
12594 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSL) !");
12595 __ ld($dst$$Register, SP, $src$$disp);
12596 %}
12597 ins_pipe(ialu_loadI);
12598 %}
12600 instruct storeSSL(stackSlotL dst, mRegL src)
12601 %{
12602 match(Set dst src);
12604 ins_cost(100);
12605 format %{ "sd $dst, $src\t# long stk @ storeSSL" %}
12606 ins_encode %{
12607 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSL) !");
12608 __ sd($src$$Register, SP, $dst$$disp);
12609 %}
12610 ins_pipe(ialu_storeI);
12611 %}
12613 instruct loadSSP(mRegP dst, stackSlotP src)
12614 %{
12615 match(Set dst src);
12617 ins_cost(125);
12618 format %{ "ld $dst, $src\t# ptr stk @ loadSSP" %}
12619 ins_encode %{
12620 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSP) !");
12621 __ ld($dst$$Register, SP, $src$$disp);
12622 %}
12623 ins_pipe(ialu_loadI);
12624 %}
12626 instruct storeSSP(stackSlotP dst, mRegP src)
12627 %{
12628 match(Set dst src);
12630 ins_cost(100);
12631 format %{ "sd $dst, $src\t# ptr stk @ storeSSP" %}
12632 ins_encode %{
12633 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSP) !");
12634 __ sd($src$$Register, SP, $dst$$disp);
12635 %}
12636 ins_pipe(ialu_storeI);
12637 %}
12639 instruct loadSSF(regF dst, stackSlotF src)
12640 %{
12641 match(Set dst src);
12643 ins_cost(125);
12644 format %{ "lwc1 $dst, $src\t# float stk @ loadSSF" %}
12645 ins_encode %{
12646 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSF) !");
12647 __ lwc1($dst$$FloatRegister, SP, $src$$disp);
12648 %}
12649 ins_pipe(ialu_loadI);
12650 %}
12652 instruct storeSSF(stackSlotF dst, regF src)
12653 %{
12654 match(Set dst src);
12656 ins_cost(100);
12657 format %{ "swc1 $dst, $src\t# float stk @ storeSSF" %}
12658 ins_encode %{
12659 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSF) !");
12660 __ swc1($src$$FloatRegister, SP, $dst$$disp);
12661 %}
12662 ins_pipe(fpu_storeF);
12663 %}
12665 // Use the same format since predicate() can not be used here.
12666 instruct loadSSD(regD dst, stackSlotD src)
12667 %{
12668 match(Set dst src);
12670 ins_cost(125);
12671 format %{ "ldc1 $dst, $src\t# double stk @ loadSSD" %}
12672 ins_encode %{
12673 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSD) !");
12674 __ ldc1($dst$$FloatRegister, SP, $src$$disp);
12675 %}
12676 ins_pipe(ialu_loadI);
12677 %}
12679 instruct storeSSD(stackSlotD dst, regD src)
12680 %{
12681 match(Set dst src);
12683 ins_cost(100);
12684 format %{ "sdc1 $dst, $src\t# double stk @ storeSSD" %}
12685 ins_encode %{
12686 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSD) !");
12687 __ sdc1($src$$FloatRegister, SP, $dst$$disp);
12688 %}
12689 ins_pipe(fpu_storeF);
12690 %}
12692 instruct cmpFastLock( FlagsReg cr, mRegP object, s0_RegP box, mRegI tmp, mRegP scr) %{
12693 match( Set cr (FastLock object box) );
12694 effect( TEMP tmp, TEMP scr, USE_KILL box );
12695 ins_cost(300);
12696 format %{ "FASTLOCK $cr $object, $box, $tmp #@ cmpFastLock" %}
12697 ins_encode %{
12698 __ fast_lock($object$$Register, $box$$Register, $tmp$$Register, $scr$$Register);
12699 %}
12701 ins_pipe( pipe_slow );
12702 ins_pc_relative(1);
12703 %}
12705 instruct cmpFastUnlock( FlagsReg cr, mRegP object, s0_RegP box, mRegP tmp ) %{
12706 match( Set cr (FastUnlock object box) );
12707 effect( TEMP tmp, USE_KILL box );
12708 ins_cost(300);
12709 format %{ "FASTUNLOCK $object, $box, $tmp #@cmpFastUnlock" %}
12710 ins_encode %{
12711 __ fast_unlock($object$$Register, $box$$Register, $tmp$$Register);
12712 %}
12714 ins_pipe( pipe_slow );
12715 ins_pc_relative(1);
12716 %}
12718 // Store CMS card-mark Immediate
12719 instruct storeImmCM(memory mem, immI8 src) %{
12720 match(Set mem (StoreCM mem src));
12722 ins_cost(150);
12723 format %{ "MOV8 $mem,$src\t! CMS card-mark imm0" %}
12724 // opcode(0xC6);
12725 ins_encode(store_B_immI_enc_sync(mem, src));
12726 ins_pipe( ialu_storeI );
12727 %}
12729 // Die now
12730 instruct ShouldNotReachHere( )
12731 %{
12732 match(Halt);
12733 ins_cost(300);
12735 // Use the following format syntax
12736 format %{ "ILLTRAP ;#@ShouldNotReachHere" %}
12737 ins_encode %{
12738 // Here we should emit illtrap !
12740 __ stop("in ShoudNotReachHere");
12742 %}
12743 ins_pipe( pipe_jump );
12744 %}
12746 instruct leaP8Narrow(mRegP dst, indOffset8Narrow mem)
12747 %{
12748 predicate(Universe::narrow_oop_shift() == 0);
12749 match(Set dst mem);
12751 ins_cost(110);
12752 format %{ "leaq $dst, $mem\t# ptr off8narrow @ leaP8Narrow" %}
12753 ins_encode %{
12754 Register dst = $dst$$Register;
12755 Register base = as_Register($mem$$base);
12756 int disp = $mem$$disp;
12758 __ daddiu(dst, base, disp);
12759 %}
12760 ins_pipe( ialu_regI_imm16 );
12761 %}
12763 instruct leaPPosIdxScaleOff8(mRegP dst, basePosIndexScaleOffset8 mem)
12764 %{
12765 match(Set dst mem);
12767 ins_cost(110);
12768 format %{ "leaq $dst, $mem\t# @ PosIdxScaleOff8" %}
12769 ins_encode %{
12770 Register dst = $dst$$Register;
12771 Register base = as_Register($mem$$base);
12772 Register index = as_Register($mem$$index);
12773 int scale = $mem$$scale;
12774 int disp = $mem$$disp;
12776 if (scale == 0) {
12777 __ daddu(AT, base, index);
12778 __ daddiu(dst, AT, disp);
12779 } else {
12780 __ dsll(AT, index, scale);
12781 __ daddu(AT, base, AT);
12782 __ daddiu(dst, AT, disp);
12783 }
12784 %}
12786 ins_pipe( ialu_regI_imm16 );
12787 %}
12789 instruct leaPIdxScale(mRegP dst, indIndexScale mem)
12790 %{
12791 match(Set dst mem);
12793 ins_cost(110);
12794 format %{ "leaq $dst, $mem\t# @ leaPIdxScale" %}
12795 ins_encode %{
12796 Register dst = $dst$$Register;
12797 Register base = as_Register($mem$$base);
12798 Register index = as_Register($mem$$index);
12799 int scale = $mem$$scale;
12801 if (scale == 0) {
12802 __ daddu(dst, base, index);
12803 } else {
12804 __ dsll(AT, index, scale);
12805 __ daddu(dst, base, AT);
12806 }
12807 %}
12809 ins_pipe( ialu_regI_imm16 );
12810 %}
12812 // Jump Direct Conditional - Label defines a relative address from Jcc+1
12813 instruct jmpLoopEnd(cmpOp cop, mRegI src1, mRegI src2, label labl) %{
12814 match(CountedLoopEnd cop (CmpI src1 src2));
12815 effect(USE labl);
12817 ins_cost(300);
12818 format %{ "J$cop $src1, $src2, $labl\t# Loop end @ jmpLoopEnd" %}
12819 ins_encode %{
12820 Register op1 = $src1$$Register;
12821 Register op2 = $src2$$Register;
12822 Label &L = *($labl$$label);
12823 int flag = $cop$$cmpcode;
12825 switch(flag)
12826 {
12827 case 0x01: //equal
12828 if (&L)
12829 __ beq(op1, op2, L);
12830 else
12831 __ beq(op1, op2, (int)0);
12832 break;
12833 case 0x02: //not_equal
12834 if (&L)
12835 __ bne(op1, op2, L);
12836 else
12837 __ bne(op1, op2, (int)0);
12838 break;
12839 case 0x03: //above
12840 __ slt(AT, op2, op1);
12841 if(&L)
12842 __ bne(AT, R0, L);
12843 else
12844 __ bne(AT, R0, (int)0);
12845 break;
12846 case 0x04: //above_equal
12847 __ slt(AT, op1, op2);
12848 if(&L)
12849 __ beq(AT, R0, L);
12850 else
12851 __ beq(AT, R0, (int)0);
12852 break;
12853 case 0x05: //below
12854 __ slt(AT, op1, op2);
12855 if(&L)
12856 __ bne(AT, R0, L);
12857 else
12858 __ bne(AT, R0, (int)0);
12859 break;
12860 case 0x06: //below_equal
12861 __ slt(AT, op2, op1);
12862 if(&L)
12863 __ beq(AT, R0, L);
12864 else
12865 __ beq(AT, R0, (int)0);
12866 break;
12867 default:
12868 Unimplemented();
12869 }
12870 __ nop();
12871 %}
12872 ins_pipe( pipe_jump );
12873 ins_pc_relative(1);
12874 %}
12877 instruct jmpLoopEnd_reg_imm16_sub(cmpOp cop, mRegI src1, immI16_sub src2, label labl) %{
12878 match(CountedLoopEnd cop (CmpI src1 src2));
12879 effect(USE labl);
12881 ins_cost(250);
12882 format %{ "J$cop $src1, $src2, $labl\t# Loop end @ jmpLoopEnd_reg_imm16_sub" %}
12883 ins_encode %{
12884 Register op1 = $src1$$Register;
12885 int op2 = $src2$$constant;
12886 Label &L = *($labl$$label);
12887 int flag = $cop$$cmpcode;
12889 __ addiu32(AT, op1, -1 * op2);
12891 switch(flag)
12892 {
12893 case 0x01: //equal
12894 if (&L)
12895 __ beq(AT, R0, L);
12896 else
12897 __ beq(AT, R0, (int)0);
12898 break;
12899 case 0x02: //not_equal
12900 if (&L)
12901 __ bne(AT, R0, L);
12902 else
12903 __ bne(AT, R0, (int)0);
12904 break;
12905 case 0x03: //above
12906 if(&L)
12907 __ bgtz(AT, L);
12908 else
12909 __ bgtz(AT, (int)0);
12910 break;
12911 case 0x04: //above_equal
12912 if(&L)
12913 __ bgez(AT, L);
12914 else
12915 __ bgez(AT,(int)0);
12916 break;
12917 case 0x05: //below
12918 if(&L)
12919 __ bltz(AT, L);
12920 else
12921 __ bltz(AT, (int)0);
12922 break;
12923 case 0x06: //below_equal
12924 if(&L)
12925 __ blez(AT, L);
12926 else
12927 __ blez(AT, (int)0);
12928 break;
12929 default:
12930 Unimplemented();
12931 }
12932 __ nop();
12933 %}
12934 ins_pipe( pipe_jump );
12935 ins_pc_relative(1);
12936 %}
12939 /*
12940 // Jump Direct Conditional - Label defines a relative address from Jcc+1
12941 instruct jmpLoopEndU(cmpOpU cop, eFlagsRegU cmp, label labl) %{
12942 match(CountedLoopEnd cop cmp);
12943 effect(USE labl);
12945 ins_cost(300);
12946 format %{ "J$cop,u $labl\t# Loop end" %}
12947 size(6);
12948 opcode(0x0F, 0x80);
12949 ins_encode( Jcc( cop, labl) );
12950 ins_pipe( pipe_jump );
12951 ins_pc_relative(1);
12952 %}
12954 instruct jmpLoopEndUCF(cmpOpUCF cop, eFlagsRegUCF cmp, label labl) %{
12955 match(CountedLoopEnd cop cmp);
12956 effect(USE labl);
12958 ins_cost(200);
12959 format %{ "J$cop,u $labl\t# Loop end" %}
12960 opcode(0x0F, 0x80);
12961 ins_encode( Jcc( cop, labl) );
12962 ins_pipe( pipe_jump );
12963 ins_pc_relative(1);
12964 %}
12965 */
12967 // This match pattern is created for StoreIConditional since I cannot match IfNode without a RegFlags! fujie 2012/07/17
12968 instruct jmpCon_flags(cmpOp cop, FlagsReg cr, label labl) %{
12969 match(If cop cr);
12970 effect(USE labl);
12972 ins_cost(300);
12973 format %{ "J$cop $labl #mips uses AT as eflag @jmpCon_flags" %}
12975 ins_encode %{
12976 Label &L = *($labl$$label);
12977 switch($cop$$cmpcode)
12978 {
12979 case 0x01: //equal
12980 if (&L)
12981 __ bne(AT, R0, L);
12982 else
12983 __ bne(AT, R0, (int)0);
12984 break;
12985 case 0x02: //not equal
12986 if (&L)
12987 __ beq(AT, R0, L);
12988 else
12989 __ beq(AT, R0, (int)0);
12990 break;
12991 default:
12992 Unimplemented();
12993 }
12994 __ nop();
12995 %}
12997 ins_pipe( pipe_jump );
12998 ins_pc_relative(1);
12999 %}
13002 // ============================================================================
13003 // The 2nd slow-half of a subtype check. Scan the subklass's 2ndary superklass
13004 // array for an instance of the superklass. Set a hidden internal cache on a
13005 // hit (cache is checked with exposed code in gen_subtype_check()). Return
13006 // NZ for a miss or zero for a hit. The encoding ALSO sets flags.
13007 instruct partialSubtypeCheck( mRegP result, no_T8_mRegP sub, no_T8_mRegP super, mT8RegI tmp ) %{
13008 match(Set result (PartialSubtypeCheck sub super));
13009 effect(KILL tmp);
13010 ins_cost(1100); // slightly larger than the next version
13011 format %{ "partialSubtypeCheck result=$result, sub=$sub, super=$super, tmp=$tmp " %}
13013 ins_encode( enc_PartialSubtypeCheck(result, sub, super, tmp) );
13014 ins_pipe( pipe_slow );
13015 %}
13018 // Conditional-store of an int value.
13019 // ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG on Intel.
13020 instruct storeIConditional( memory mem, mRegI oldval, mRegI newval, FlagsReg cr ) %{
13021 match(Set cr (StoreIConditional mem (Binary oldval newval)));
13022 // effect(KILL oldval);
13023 format %{ "CMPXCHG $newval, $mem, $oldval \t# @storeIConditional" %}
13025 ins_encode %{
13026 Register oldval = $oldval$$Register;
13027 Register newval = $newval$$Register;
13028 Address addr(as_Register($mem$$base), $mem$$disp);
13029 Label again, failure;
13031 // int base = $mem$$base;
13032 int index = $mem$$index;
13033 int scale = $mem$$scale;
13034 int disp = $mem$$disp;
13036 guarantee(Assembler::is_simm16(disp), "");
13038 if( index != 0 ) {
13039 __ stop("in storeIConditional: index != 0");
13040 } else {
13041 __ bind(again);
13042 if(UseSyncLevel <= 1000) __ sync();
13043 __ ll(AT, addr);
13044 __ bne(AT, oldval, failure);
13045 __ delayed()->addu(AT, R0, R0);
13047 __ addu(AT, newval, R0);
13048 __ sc(AT, addr);
13049 __ beq(AT, R0, again);
13050 __ delayed()->addiu(AT, R0, 0xFF);
13051 __ bind(failure);
13052 __ sync();
13053 }
13054 %}
13056 ins_pipe( long_memory_op );
13057 %}
13059 // Conditional-store of a long value.
13060 // ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG.
13061 instruct storeLConditional(memory mem, t2RegL oldval, mRegL newval, FlagsReg cr )
13062 %{
13063 match(Set cr (StoreLConditional mem (Binary oldval newval)));
13064 effect(KILL oldval);
13066 format %{ "cmpxchg $mem, $newval\t# If $oldval == $mem then store $newval into $mem" %}
13067 ins_encode%{
13068 Register oldval = $oldval$$Register;
13069 Register newval = $newval$$Register;
13070 Address addr((Register)$mem$$base, $mem$$disp);
13072 int index = $mem$$index;
13073 int scale = $mem$$scale;
13074 int disp = $mem$$disp;
13076 guarantee(Assembler::is_simm16(disp), "");
13078 if( index != 0 ) {
13079 __ stop("in storeIConditional: index != 0");
13080 } else {
13081 __ cmpxchg(newval, addr, oldval);
13082 }
13083 %}
13084 ins_pipe( long_memory_op );
13085 %}
13088 instruct compareAndSwapI( mRegI res, mRegP mem_ptr, mS2RegI oldval, mRegI newval) %{
13089 match(Set res (CompareAndSwapI mem_ptr (Binary oldval newval)));
13090 effect(KILL oldval);
13091 // match(CompareAndSwapI mem_ptr (Binary oldval newval));
13092 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapI\n\t"
13093 "MOV $res, 1 @ compareAndSwapI\n\t"
13094 "BNE AT, R0 @ compareAndSwapI\n\t"
13095 "MOV $res, 0 @ compareAndSwapI\n"
13096 "L:" %}
13097 ins_encode %{
13098 Register newval = $newval$$Register;
13099 Register oldval = $oldval$$Register;
13100 Register res = $res$$Register;
13101 Address addr($mem_ptr$$Register, 0);
13102 Label L;
13104 __ cmpxchg32(newval, addr, oldval);
13105 __ move(res, AT);
13106 %}
13107 ins_pipe( long_memory_op );
13108 %}
13110 //FIXME:
13111 instruct compareAndSwapP( mRegI res, mRegP mem_ptr, s2_RegP oldval, mRegP newval) %{
13112 match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval)));
13113 effect(KILL oldval);
13114 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapP\n\t"
13115 "MOV $res, AT @ compareAndSwapP\n\t"
13116 "L:" %}
13117 ins_encode %{
13118 Register newval = $newval$$Register;
13119 Register oldval = $oldval$$Register;
13120 Register res = $res$$Register;
13121 Address addr($mem_ptr$$Register, 0);
13122 Label L;
13124 __ cmpxchg(newval, addr, oldval);
13125 __ move(res, AT);
13126 %}
13127 ins_pipe( long_memory_op );
13128 %}
13130 instruct compareAndSwapN( mRegI res, mRegP mem_ptr, t2_RegN oldval, mRegN newval) %{
13131 match(Set res (CompareAndSwapN mem_ptr (Binary oldval newval)));
13132 effect(KILL oldval);
13133 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapN\n\t"
13134 "MOV $res, AT @ compareAndSwapN\n\t"
13135 "L:" %}
13136 ins_encode %{
13137 Register newval = $newval$$Register;
13138 Register oldval = $oldval$$Register;
13139 Register res = $res$$Register;
13140 Address addr($mem_ptr$$Register, 0);
13141 Label L;
13143 /* 2013/7/19 Jin: cmpxchg32 is implemented with ll/sc, which will do sign extension.
13144 * Thus, we should extend oldval's sign for correct comparision.
13145 */
13146 __ sll(oldval, oldval, 0);
13148 __ cmpxchg32(newval, addr, oldval);
13149 __ move(res, AT);
13150 %}
13151 ins_pipe( long_memory_op );
13152 %}
13154 //----------Max and Min--------------------------------------------------------
13155 // Min Instructions
13156 ////
13157 // *** Min and Max using the conditional move are slower than the
13158 // *** branch version on a Pentium III.
13159 // // Conditional move for min
13160 //instruct cmovI_reg_lt( eRegI op2, eRegI op1, eFlagsReg cr ) %{
13161 // effect( USE_DEF op2, USE op1, USE cr );
13162 // format %{ "CMOVlt $op2,$op1\t! min" %}
13163 // opcode(0x4C,0x0F);
13164 // ins_encode( OpcS, OpcP, RegReg( op2, op1 ) );
13165 // ins_pipe( pipe_cmov_reg );
13166 //%}
13167 //
13168 //// Min Register with Register (P6 version)
13169 //instruct minI_eReg_p6( eRegI op1, eRegI op2 ) %{
13170 // predicate(VM_Version::supports_cmov() );
13171 // match(Set op2 (MinI op1 op2));
13172 // ins_cost(200);
13173 // expand %{
13174 // eFlagsReg cr;
13175 // compI_eReg(cr,op1,op2);
13176 // cmovI_reg_lt(op2,op1,cr);
13177 // %}
13178 //%}
13180 // Min Register with Register (generic version)
13181 instruct minI_Reg_Reg(mRegI dst, mRegI src) %{
13182 match(Set dst (MinI dst src));
13183 //effect(KILL flags);
13184 ins_cost(80);
13186 format %{ "MIN $dst, $src @minI_Reg_Reg" %}
13187 ins_encode %{
13188 Register dst = $dst$$Register;
13189 Register src = $src$$Register;
13191 __ slt(AT, src, dst);
13192 __ movn(dst, src, AT);
13194 %}
13196 ins_pipe( pipe_slow );
13197 %}
13199 // Max Register with Register
13200 // *** Min and Max using the conditional move are slower than the
13201 // *** branch version on a Pentium III.
13202 // // Conditional move for max
13203 //instruct cmovI_reg_gt( eRegI op2, eRegI op1, eFlagsReg cr ) %{
13204 // effect( USE_DEF op2, USE op1, USE cr );
13205 // format %{ "CMOVgt $op2,$op1\t! max" %}
13206 // opcode(0x4F,0x0F);
13207 // ins_encode( OpcS, OpcP, RegReg( op2, op1 ) );
13208 // ins_pipe( pipe_cmov_reg );
13209 //%}
13210 //
13211 // // Max Register with Register (P6 version)
13212 //instruct maxI_eReg_p6( eRegI op1, eRegI op2 ) %{
13213 // predicate(VM_Version::supports_cmov() );
13214 // match(Set op2 (MaxI op1 op2));
13215 // ins_cost(200);
13216 // expand %{
13217 // eFlagsReg cr;
13218 // compI_eReg(cr,op1,op2);
13219 // cmovI_reg_gt(op2,op1,cr);
13220 // %}
13221 //%}
13223 // Max Register with Register (generic version)
13224 instruct maxI_Reg_Reg(mRegI dst, mRegI src) %{
13225 match(Set dst (MaxI dst src));
13226 ins_cost(80);
13228 format %{ "MAX $dst, $src @maxI_Reg_Reg" %}
13230 ins_encode %{
13231 Register dst = $dst$$Register;
13232 Register src = $src$$Register;
13234 __ slt(AT, dst, src);
13235 __ movn(dst, src, AT);
13237 %}
13239 ins_pipe( pipe_slow );
13240 %}
13242 instruct maxI_Reg_zero(mRegI dst, immI0 zero) %{
13243 match(Set dst (MaxI dst zero));
13244 ins_cost(50);
13246 format %{ "MAX $dst, 0 @maxI_Reg_zero" %}
13248 ins_encode %{
13249 Register dst = $dst$$Register;
13251 __ slt(AT, dst, R0);
13252 __ movn(dst, R0, AT);
13254 %}
13256 ins_pipe( pipe_slow );
13257 %}
13259 instruct zerox_long_reg_reg(mRegL dst, mRegL src, immL_32bits mask)
13260 %{
13261 match(Set dst (AndL src mask));
13263 format %{ "movl $dst, $src\t# zero-extend long @ zerox_long_reg_reg" %}
13264 ins_encode %{
13265 Register dst = $dst$$Register;
13266 Register src = $src$$Register;
13268 __ dext(dst, src, 0, 32);
13269 %}
13270 ins_pipe(ialu_regI_regI);
13271 %}
13273 instruct combine_i2l(mRegL dst, mRegI src1, immL_32bits mask, mRegI src2, immI_32 shift32)
13274 %{
13275 match(Set dst (OrL (AndL (ConvI2L src1) mask) (LShiftL (ConvI2L src2) shift32)));
13277 format %{ "combine_i2l $dst, $src2(H), $src1(L) @ combine_i2l" %}
13278 ins_encode %{
13279 Register dst = $dst$$Register;
13280 Register src1 = $src1$$Register;
13281 Register src2 = $src2$$Register;
13283 if (src1 == dst) {
13284 __ dinsu(dst, src2, 32, 32);
13285 } else if (src2 == dst) {
13286 __ dsll32(dst, dst, 0);
13287 __ dins(dst, src1, 0, 32);
13288 } else {
13289 __ dext(dst, src1, 0, 32);
13290 __ dinsu(dst, src2, 32, 32);
13291 }
13292 %}
13293 ins_pipe(ialu_regI_regI);
13294 %}
13296 // Zero-extend convert int to long
13297 instruct convI2L_reg_reg_zex(mRegL dst, mRegI src, immL_32bits mask)
13298 %{
13299 match(Set dst (AndL (ConvI2L src) mask));
13301 format %{ "movl $dst, $src\t# i2l zero-extend @ convI2L_reg_reg_zex" %}
13302 ins_encode %{
13303 Register dst = $dst$$Register;
13304 Register src = $src$$Register;
13306 __ dext(dst, src, 0, 32);
13307 %}
13308 ins_pipe(ialu_regI_regI);
13309 %}
13311 instruct convL2I2L_reg_reg_zex(mRegL dst, mRegL src, immL_32bits mask)
13312 %{
13313 match(Set dst (AndL (ConvI2L (ConvL2I src)) mask));
13315 format %{ "movl $dst, $src\t# i2l zero-extend @ convL2I2L_reg_reg_zex" %}
13316 ins_encode %{
13317 Register dst = $dst$$Register;
13318 Register src = $src$$Register;
13320 __ dext(dst, src, 0, 32);
13321 %}
13322 ins_pipe(ialu_regI_regI);
13323 %}
13325 // Match loading integer and casting it to unsigned int in long register.
13326 // LoadI + ConvI2L + AndL 0xffffffff.
13327 instruct loadUI2L_rmask(mRegL dst, memory mem, immL_32bits mask) %{
13328 match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
13330 format %{ "lwu $dst, $mem \t// zero-extend to long @ loadUI2L_rmask" %}
13331 ins_encode (load_N_enc(dst, mem));
13332 ins_pipe(ialu_loadI);
13333 %}
13335 instruct loadUI2L_lmask(mRegL dst, memory mem, immL_32bits mask) %{
13336 match(Set dst (AndL mask (ConvI2L (LoadI mem))));
13338 format %{ "lwu $dst, $mem \t// zero-extend to long @ loadUI2L_lmask" %}
13339 ins_encode (load_N_enc(dst, mem));
13340 ins_pipe(ialu_loadI);
13341 %}
13344 // ============================================================================
13345 // Safepoint Instruction
13346 instruct safePoint_poll(mRegP poll) %{
13347 match(SafePoint poll);
13348 effect(USE poll);
13350 ins_cost(125);
13351 format %{ "Safepoint @ [$poll] : poll for GC @ safePoint_poll" %}
13353 ins_encode %{
13354 Register poll_reg = $poll$$Register;
13356 __ block_comment("Safepoint:");
13357 __ relocate(relocInfo::poll_type);
13358 __ lw(AT, poll_reg, 0);
13359 %}
13361 ins_pipe( ialu_storeI );
13362 %}
13364 //----------Arithmetic Conversion Instructions---------------------------------
13366 instruct roundFloat_nop(regF dst)
13367 %{
13368 match(Set dst (RoundFloat dst));
13370 ins_cost(0);
13371 ins_encode();
13372 ins_pipe(empty);
13373 %}
13375 instruct roundDouble_nop(regD dst)
13376 %{
13377 match(Set dst (RoundDouble dst));
13379 ins_cost(0);
13380 ins_encode();
13381 ins_pipe(empty);
13382 %}
13384 //---------- Zeros Count Instructions ------------------------------------------
13385 // CountLeadingZerosINode CountTrailingZerosINode
13386 instruct countLeadingZerosI(mRegI dst, mRegI src) %{
13387 predicate(UseCountLeadingZerosInstruction);
13388 match(Set dst (CountLeadingZerosI src));
13390 format %{ "clz $dst, $src\t# count leading zeros (int)" %}
13391 ins_encode %{
13392 __ clz($dst$$Register, $src$$Register);
13393 %}
13394 ins_pipe( ialu_regL_regL );
13395 %}
13397 instruct countLeadingZerosL(mRegI dst, mRegL src) %{
13398 predicate(UseCountLeadingZerosInstruction);
13399 match(Set dst (CountLeadingZerosL src));
13401 format %{ "dclz $dst, $src\t# count leading zeros (long)" %}
13402 ins_encode %{
13403 __ dclz($dst$$Register, $src$$Register);
13404 %}
13405 ins_pipe( ialu_regL_regL );
13406 %}
13408 instruct countTrailingZerosI(mRegI dst, mRegI src) %{
13409 predicate(UseCountTrailingZerosInstruction);
13410 match(Set dst (CountTrailingZerosI src));
13412 format %{ "ctz $dst, $src\t# count trailing zeros (int)" %}
13413 ins_encode %{
13414 // ctz and dctz is gs instructions.
13415 __ ctz($dst$$Register, $src$$Register);
13416 %}
13417 ins_pipe( ialu_regL_regL );
13418 %}
13420 instruct countTrailingZerosL(mRegI dst, mRegL src) %{
13421 predicate(UseCountTrailingZerosInstruction);
13422 match(Set dst (CountTrailingZerosL src));
13424 format %{ "dcto $dst, $src\t# count trailing zeros (long)" %}
13425 ins_encode %{
13426 __ dctz($dst$$Register, $src$$Register);
13427 %}
13428 ins_pipe( ialu_regL_regL );
13429 %}
13431 // ====================VECTOR INSTRUCTIONS=====================================
13433 // Load vectors (8 bytes long)
13434 instruct loadV8(vecD dst, memory mem) %{
13435 predicate(n->as_LoadVector()->memory_size() == 8);
13436 match(Set dst (LoadVector mem));
13437 ins_cost(125);
13438 format %{ "load $dst, $mem\t! load vector (8 bytes)" %}
13439 ins_encode(load_D_enc(dst, mem));
13440 ins_pipe( fpu_loadF );
13441 %}
13443 // Store vectors (8 bytes long)
13444 instruct storeV8(memory mem, vecD src) %{
13445 predicate(n->as_StoreVector()->memory_size() == 8);
13446 match(Set mem (StoreVector mem src));
13447 ins_cost(145);
13448 format %{ "store $mem, $src\t! store vector (8 bytes)" %}
13449 ins_encode(store_D_reg_enc(mem, src));
13450 ins_pipe( fpu_storeF );
13451 %}
13453 instruct Repl8B_DSP(vecD dst, mRegI src) %{
13454 predicate(n->as_Vector()->length() == 8 && Use3A2000);
13455 match(Set dst (ReplicateB src));
13456 ins_cost(100);
13457 format %{ "replv_ob AT, $src\n\t"
13458 "dmtc1 AT, $dst\t! replicate8B" %}
13459 ins_encode %{
13460 __ replv_ob(AT, $src$$Register);
13461 __ dmtc1(AT, $dst$$FloatRegister);
13462 %}
13463 ins_pipe( pipe_mtc1 );
13464 %}
13466 instruct Repl8B(vecD dst, mRegI src) %{
13467 predicate(n->as_Vector()->length() == 8);
13468 match(Set dst (ReplicateB src));
13469 ins_cost(140);
13470 format %{ "move AT, $src\n\t"
13471 "dins AT, AT, 8, 8\n\t"
13472 "dins AT, AT, 16, 16\n\t"
13473 "dinsu AT, AT, 32, 32\n\t"
13474 "dmtc1 AT, $dst\t! replicate8B" %}
13475 ins_encode %{
13476 __ move(AT, $src$$Register);
13477 __ dins(AT, AT, 8, 8);
13478 __ dins(AT, AT, 16, 16);
13479 __ dinsu(AT, AT, 32, 32);
13480 __ dmtc1(AT, $dst$$FloatRegister);
13481 %}
13482 ins_pipe( pipe_mtc1 );
13483 %}
13485 instruct Repl8B_imm_DSP(vecD dst, immI con) %{
13486 predicate(n->as_Vector()->length() == 8 && Use3A2000);
13487 match(Set dst (ReplicateB con));
13488 ins_cost(110);
13489 format %{ "repl_ob AT, [$con]\n\t"
13490 "dmtc1 AT, $dst,0x00\t! replicate8B($con)" %}
13491 ins_encode %{
13492 int val = $con$$constant;
13493 __ repl_ob(AT, val);
13494 __ dmtc1(AT, $dst$$FloatRegister);
13495 %}
13496 ins_pipe( pipe_mtc1 );
13497 %}
13499 instruct Repl8B_imm(vecD dst, immI con) %{
13500 predicate(n->as_Vector()->length() == 8);
13501 match(Set dst (ReplicateB con));
13502 ins_cost(150);
13503 format %{ "move AT, [$con]\n\t"
13504 "dins AT, AT, 8, 8\n\t"
13505 "dins AT, AT, 16, 16\n\t"
13506 "dinsu AT, AT, 32, 32\n\t"
13507 "dmtc1 AT, $dst,0x00\t! replicate8B($con)" %}
13508 ins_encode %{
13509 __ move(AT, $con$$constant);
13510 __ dins(AT, AT, 8, 8);
13511 __ dins(AT, AT, 16, 16);
13512 __ dinsu(AT, AT, 32, 32);
13513 __ dmtc1(AT, $dst$$FloatRegister);
13514 %}
13515 ins_pipe( pipe_mtc1 );
13516 %}
13518 instruct Repl8B_zero(vecD dst, immI0 zero) %{
13519 predicate(n->as_Vector()->length() == 8);
13520 match(Set dst (ReplicateB zero));
13521 ins_cost(90);
13522 format %{ "dmtc1 R0, $dst\t! replicate8B zero" %}
13523 ins_encode %{
13524 __ dmtc1(R0, $dst$$FloatRegister);
13525 %}
13526 ins_pipe( pipe_mtc1 );
13527 %}
13529 instruct Repl8B_M1(vecD dst, immI_M1 M1) %{
13530 predicate(n->as_Vector()->length() == 8);
13531 match(Set dst (ReplicateB M1));
13532 ins_cost(80);
13533 format %{ "dmtc1 -1, $dst\t! replicate8B -1" %}
13534 ins_encode %{
13535 __ nor(AT, R0, R0);
13536 __ dmtc1(AT, $dst$$FloatRegister);
13537 %}
13538 ins_pipe( pipe_mtc1 );
13539 %}
13541 instruct Repl4S_DSP(vecD dst, mRegI src) %{
13542 predicate(n->as_Vector()->length() == 4 && Use3A2000);
13543 match(Set dst (ReplicateS src));
13544 ins_cost(100);
13545 format %{ "replv_qh AT, $src\n\t"
13546 "dmtc1 AT, $dst\t! replicate4S" %}
13547 ins_encode %{
13548 __ replv_qh(AT, $src$$Register);
13549 __ dmtc1(AT, $dst$$FloatRegister);
13550 %}
13551 ins_pipe( pipe_mtc1 );
13552 %}
13554 instruct Repl4S(vecD dst, mRegI src) %{
13555 predicate(n->as_Vector()->length() == 4);
13556 match(Set dst (ReplicateS src));
13557 ins_cost(120);
13558 format %{ "move AT, $src \n\t"
13559 "dins AT, AT, 16, 16\n\t"
13560 "dinsu AT, AT, 32, 32\n\t"
13561 "dmtc1 AT, $dst\t! replicate4S" %}
13562 ins_encode %{
13563 __ move(AT, $src$$Register);
13564 __ dins(AT, AT, 16, 16);
13565 __ dinsu(AT, AT, 32, 32);
13566 __ dmtc1(AT, $dst$$FloatRegister);
13567 %}
13568 ins_pipe( pipe_mtc1 );
13569 %}
13571 instruct Repl4S_imm_DSP(vecD dst, immI con) %{
13572 predicate(n->as_Vector()->length() == 4 && Use3A2000);
13573 match(Set dst (ReplicateS con));
13574 ins_cost(100);
13575 format %{ "replv_qh AT, [$con]\n\t"
13576 "dmtc1 AT, $dst\t! replicate4S($con)" %}
13577 ins_encode %{
13578 int val = $con$$constant;
13579 if ( Assembler::is_simm(val, 10)) {
13580 //repl_qh supports 10 bits immediate
13581 __ repl_qh(AT, val);
13582 } else {
13583 __ li32(AT, val);
13584 __ replv_qh(AT, AT);
13585 }
13586 __ dmtc1(AT, $dst$$FloatRegister);
13587 %}
13588 ins_pipe( pipe_mtc1 );
13589 %}
13591 instruct Repl4S_imm(vecD dst, immI con) %{
13592 predicate(n->as_Vector()->length() == 4);
13593 match(Set dst (ReplicateS con));
13594 ins_cost(110);
13595 format %{ "move AT, [$con]\n\t"
13596 "dins AT, AT, 16, 16\n\t"
13597 "dinsu AT, AT, 32, 32\n\t"
13598 "dmtc1 AT, $dst\t! replicate4S($con)" %}
13599 ins_encode %{
13600 __ move(AT, $con$$constant);
13601 __ dins(AT, AT, 16, 16);
13602 __ dinsu(AT, AT, 32, 32);
13603 __ dmtc1(AT, $dst$$FloatRegister);
13604 %}
13605 ins_pipe( pipe_mtc1 );
13606 %}
13608 instruct Repl4S_zero(vecD dst, immI0 zero) %{
13609 predicate(n->as_Vector()->length() == 4);
13610 match(Set dst (ReplicateS zero));
13611 format %{ "dmtc1 R0, $dst\t! replicate4S zero" %}
13612 ins_encode %{
13613 __ dmtc1(R0, $dst$$FloatRegister);
13614 %}
13615 ins_pipe( pipe_mtc1 );
13616 %}
13618 instruct Repl4S_M1(vecD dst, immI_M1 M1) %{
13619 predicate(n->as_Vector()->length() == 4);
13620 match(Set dst (ReplicateS M1));
13621 format %{ "dmtc1 -1, $dst\t! replicate4S -1" %}
13622 ins_encode %{
13623 __ nor(AT, R0, R0);
13624 __ dmtc1(AT, $dst$$FloatRegister);
13625 %}
13626 ins_pipe( pipe_mtc1 );
13627 %}
13629 // Replicate integer (4 byte) scalar to be vector
13630 instruct Repl2I(vecD dst, mRegI src) %{
13631 predicate(n->as_Vector()->length() == 2);
13632 match(Set dst (ReplicateI src));
13633 format %{ "dins AT, $src, 0, 32\n\t"
13634 "dinsu AT, $src, 32, 32\n\t"
13635 "dmtc1 AT, $dst\t! replicate2I" %}
13636 ins_encode %{
13637 __ dins(AT, $src$$Register, 0, 32);
13638 __ dinsu(AT, $src$$Register, 32, 32);
13639 __ dmtc1(AT, $dst$$FloatRegister);
13640 %}
13641 ins_pipe( pipe_mtc1 );
13642 %}
13644 // Replicate integer (4 byte) scalar immediate to be vector by loading from const table.
13645 instruct Repl2I_imm(vecD dst, immI con, mA7RegI tmp) %{
13646 predicate(n->as_Vector()->length() == 2);
13647 match(Set dst (ReplicateI con));
13648 effect(KILL tmp);
13649 format %{ "li32 AT, [$con], 32\n\t"
13650 "dinsu AT, AT\n\t"
13651 "dmtc1 AT, $dst\t! replicate2I($con)" %}
13652 ins_encode %{
13653 int val = $con$$constant;
13654 __ li32(AT, val);
13655 __ dinsu(AT, AT, 32, 32);
13656 __ dmtc1(AT, $dst$$FloatRegister);
13657 %}
13658 ins_pipe( pipe_mtc1 );
13659 %}
13661 // Replicate integer (4 byte) scalar zero to be vector
13662 instruct Repl2I_zero(vecD dst, immI0 zero) %{
13663 predicate(n->as_Vector()->length() == 2);
13664 match(Set dst (ReplicateI zero));
13665 format %{ "dmtc1 R0, $dst\t! replicate2I zero" %}
13666 ins_encode %{
13667 __ dmtc1(R0, $dst$$FloatRegister);
13668 %}
13669 ins_pipe( pipe_mtc1 );
13670 %}
13672 // Replicate integer (4 byte) scalar -1 to be vector
13673 instruct Repl2I_M1(vecD dst, immI_M1 M1) %{
13674 predicate(n->as_Vector()->length() == 2);
13675 match(Set dst (ReplicateI M1));
13676 format %{ "dmtc1 -1, $dst\t! replicate2I -1, use AT" %}
13677 ins_encode %{
13678 __ nor(AT, R0, R0);
13679 __ dmtc1(AT, $dst$$FloatRegister);
13680 %}
13681 ins_pipe( pipe_mtc1 );
13682 %}
13684 // Replicate float (4 byte) scalar to be vector
13685 instruct Repl2F(vecD dst, regF src) %{
13686 predicate(n->as_Vector()->length() == 2);
13687 match(Set dst (ReplicateF src));
13688 format %{ "cvt.ps $dst, $src, $src\t! replicate2F" %}
13689 ins_encode %{
13690 __ cvt_ps_s($dst$$FloatRegister, $src$$FloatRegister, $src$$FloatRegister);
13691 %}
13692 ins_pipe( pipe_slow );
13693 %}
13695 // Replicate float (4 byte) scalar zero to be vector
13696 instruct Repl2F_zero(vecD dst, immF0 zero) %{
13697 predicate(n->as_Vector()->length() == 2);
13698 match(Set dst (ReplicateF zero));
13699 format %{ "dmtc1 R0, $dst\t! replicate2F zero" %}
13700 ins_encode %{
13701 __ dmtc1(R0, $dst$$FloatRegister);
13702 %}
13703 ins_pipe( pipe_mtc1 );
13704 %}
13707 // ====================VECTOR ARITHMETIC=======================================
13709 // --------------------------------- ADD --------------------------------------
13711 // Floats vector add
13712 instruct vadd2F(vecD dst, vecD src) %{
13713 predicate(n->as_Vector()->length() == 2);
13714 match(Set dst (AddVF dst src));
13715 format %{ "add.ps $dst,$src\t! add packed2F" %}
13716 ins_encode %{
13717 __ add_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
13718 %}
13719 ins_pipe( pipe_slow );
13720 %}
13722 instruct vadd2F3(vecD dst, vecD src1, vecD src2) %{
13723 predicate(n->as_Vector()->length() == 2);
13724 match(Set dst (AddVF src1 src2));
13725 format %{ "add.ps $dst,$src1,$src2\t! add packed2F" %}
13726 ins_encode %{
13727 __ add_ps($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
13728 %}
13729 ins_pipe( fpu_regF_regF );
13730 %}
13732 // --------------------------------- SUB --------------------------------------
13734 // Floats vector sub
13735 instruct vsub2F(vecD dst, vecD src) %{
13736 predicate(n->as_Vector()->length() == 2);
13737 match(Set dst (SubVF dst src));
13738 format %{ "sub.ps $dst,$src\t! sub packed2F" %}
13739 ins_encode %{
13740 __ sub_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
13741 %}
13742 ins_pipe( fpu_regF_regF );
13743 %}
13745 // --------------------------------- MUL --------------------------------------
13747 // Floats vector mul
13748 instruct vmul2F(vecD dst, vecD src) %{
13749 predicate(n->as_Vector()->length() == 2);
13750 match(Set dst (MulVF dst src));
13751 format %{ "mul.ps $dst, $src\t! mul packed2F" %}
13752 ins_encode %{
13753 __ mul_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
13754 %}
13755 ins_pipe( fpu_regF_regF );
13756 %}
13758 instruct vmul2F3(vecD dst, vecD src1, vecD src2) %{
13759 predicate(n->as_Vector()->length() == 2);
13760 match(Set dst (MulVF src1 src2));
13761 format %{ "mul.ps $dst, $src1, $src2\t! mul packed2F" %}
13762 ins_encode %{
13763 __ mul_ps($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
13764 %}
13765 ins_pipe( fpu_regF_regF );
13766 %}
13768 // --------------------------------- DIV --------------------------------------
13769 // MIPS do not have div.ps
13772 //----------PEEPHOLE RULES-----------------------------------------------------
13773 // These must follow all instruction definitions as they use the names
13774 // defined in the instructions definitions.
13775 //
13776 // peepmatch ( root_instr_name [preceeding_instruction]* );
13777 //
13778 // peepconstraint %{
13779 // (instruction_number.operand_name relational_op instruction_number.operand_name
13780 // [, ...] );
13781 // // instruction numbers are zero-based using left to right order in peepmatch
13782 //
13783 // peepreplace ( instr_name ( [instruction_number.operand_name]* ) );
13784 // // provide an instruction_number.operand_name for each operand that appears
13785 // // in the replacement instruction's match rule
13786 //
13787 // ---------VM FLAGS---------------------------------------------------------
13788 //
13789 // All peephole optimizations can be turned off using -XX:-OptoPeephole
13790 //
13791 // Each peephole rule is given an identifying number starting with zero and
13792 // increasing by one in the order seen by the parser. An individual peephole
13793 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
13794 // on the command-line.
13795 //
13796 // ---------CURRENT LIMITATIONS----------------------------------------------
13797 //
13798 // Only match adjacent instructions in same basic block
13799 // Only equality constraints
13800 // Only constraints between operands, not (0.dest_reg == EAX_enc)
13801 // Only one replacement instruction
13802 //
13803 // ---------EXAMPLE----------------------------------------------------------
13804 //
13805 // // pertinent parts of existing instructions in architecture description
13806 // instruct movI(eRegI dst, eRegI src) %{
13807 // match(Set dst (CopyI src));
13808 // %}
13809 //
13810 // instruct incI_eReg(eRegI dst, immI1 src, eFlagsReg cr) %{
13811 // match(Set dst (AddI dst src));
13812 // effect(KILL cr);
13813 // %}
13814 //
13815 // // Change (inc mov) to lea
13816 // peephole %{
13817 // // increment preceeded by register-register move
13818 // peepmatch ( incI_eReg movI );
13819 // // require that the destination register of the increment
13820 // // match the destination register of the move
13821 // peepconstraint ( 0.dst == 1.dst );
13822 // // construct a replacement instruction that sets
13823 // // the destination to ( move's source register + one )
13824 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13825 // %}
13826 //
13827 // Implementation no longer uses movX instructions since
13828 // machine-independent system no longer uses CopyX nodes.
13829 //
13830 // peephole %{
13831 // peepmatch ( incI_eReg movI );
13832 // peepconstraint ( 0.dst == 1.dst );
13833 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13834 // %}
13835 //
13836 // peephole %{
13837 // peepmatch ( decI_eReg movI );
13838 // peepconstraint ( 0.dst == 1.dst );
13839 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13840 // %}
13841 //
13842 // peephole %{
13843 // peepmatch ( addI_eReg_imm movI );
13844 // peepconstraint ( 0.dst == 1.dst );
13845 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13846 // %}
13847 //
13848 // peephole %{
13849 // peepmatch ( addP_eReg_imm movP );
13850 // peepconstraint ( 0.dst == 1.dst );
13851 // peepreplace ( leaP_eReg_immI( 0.dst 1.src 0.src ) );
13852 // %}
13854 // // Change load of spilled value to only a spill
13855 // instruct storeI(memory mem, eRegI src) %{
13856 // match(Set mem (StoreI mem src));
13857 // %}
13858 //
13859 // instruct loadI(eRegI dst, memory mem) %{
13860 // match(Set dst (LoadI mem));
13861 // %}
13862 //
13863 //peephole %{
13864 // peepmatch ( loadI storeI );
13865 // peepconstraint ( 1.src == 0.dst, 1.mem == 0.mem );
13866 // peepreplace ( storeI( 1.mem 1.mem 1.src ) );
13867 //%}
13869 //----------SMARTSPILL RULES---------------------------------------------------
13870 // These must follow all instruction definitions as they use the names
13871 // defined in the instructions definitions.