Tue, 07 Mar 2017 04:25:27 -0500
[C2] Polling at 0x10000 for MIPS.
1 //
2 // Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
3 // Copyright (c) 2015, 2016, Loongson Technology. All rights reserved.
4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 //
6 // This code is free software; you can redistribute it and/or modify it
7 // under the terms of the GNU General Public License version 2 only, as
8 // published by the Free Software Foundation.
9 //
10 // This code is distributed in the hope that it will be useful, but WITHOUT
11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 // version 2 for more details (a copy is included in the LICENSE file that
14 // accompanied this code).
15 //
16 // You should have received a copy of the GNU General Public License version
17 // 2 along with this work; if not, write to the Free Software Foundation,
18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 //
20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 // or visit www.oracle.com if you need additional information or have any
22 // questions.
23 //
24 //
26 // GodSon3 Architecture Description File
28 //----------REGISTER DEFINITION BLOCK------------------------------------------
29 // This information is used by the matcher and the register allocator to
30 // describe individual registers and classes of registers within the target
31 // archtecture.
33 // format:
34 // reg_def name (call convention, c-call convention, ideal type, encoding);
35 // call convention :
36 // NS = No-Save
37 // SOC = Save-On-Call
38 // SOE = Save-On-Entry
39 // AS = Always-Save
40 // ideal type :
41 // see opto/opcodes.hpp for more info
42 // reg_class name (reg, ...);
43 // alloc_class name (reg, ...);
44 register %{
46 // General Registers
47 // Integer Registers
48 reg_def R0 ( NS, NS, Op_RegI, 0, VMRegImpl::Bad());
49 reg_def AT ( NS, NS, Op_RegI, 1, AT->as_VMReg());
50 reg_def AT_H ( NS, NS, Op_RegI, 1, AT->as_VMReg()->next());
51 reg_def V0 (SOC, SOC, Op_RegI, 2, V0->as_VMReg());
52 reg_def V0_H (SOC, SOC, Op_RegI, 2, V0->as_VMReg()->next());
53 reg_def V1 (SOC, SOC, Op_RegI, 3, V1->as_VMReg());
54 reg_def V1_H (SOC, SOC, Op_RegI, 3, V1->as_VMReg()->next());
55 reg_def A0 (SOC, SOC, Op_RegI, 4, A0->as_VMReg());
56 reg_def A0_H (SOC, SOC, Op_RegI, 4, A0->as_VMReg()->next());
57 reg_def A1 (SOC, SOC, Op_RegI, 5, A1->as_VMReg());
58 reg_def A1_H (SOC, SOC, Op_RegI, 5, A1->as_VMReg()->next());
59 reg_def A2 (SOC, SOC, Op_RegI, 6, A2->as_VMReg());
60 reg_def A2_H (SOC, SOC, Op_RegI, 6, A2->as_VMReg()->next());
61 reg_def A3 (SOC, SOC, Op_RegI, 7, A3->as_VMReg());
62 reg_def A3_H (SOC, SOC, Op_RegI, 7, A3->as_VMReg()->next());
63 reg_def A4 (SOC, SOC, Op_RegI, 8, A4->as_VMReg());
64 reg_def A4_H (SOC, SOC, Op_RegI, 8, A4->as_VMReg()->next());
65 reg_def A5 (SOC, SOC, Op_RegI, 9, A5->as_VMReg());
66 reg_def A5_H (SOC, SOC, Op_RegI, 9, A5->as_VMReg()->next());
67 reg_def A6 (SOC, SOC, Op_RegI, 10, A6->as_VMReg());
68 reg_def A6_H (SOC, SOC, Op_RegI, 10, A6->as_VMReg()->next());
69 reg_def A7 (SOC, SOC, Op_RegI, 11, A7->as_VMReg());
70 reg_def A7_H (SOC, SOC, Op_RegI, 11, A7->as_VMReg()->next());
71 reg_def T0 (SOC, SOC, Op_RegI, 12, T0->as_VMReg());
72 reg_def T0_H (SOC, SOC, Op_RegI, 12, T0->as_VMReg()->next());
73 reg_def T1 (SOC, SOC, Op_RegI, 13, T1->as_VMReg());
74 reg_def T1_H (SOC, SOC, Op_RegI, 13, T1->as_VMReg()->next());
75 reg_def T2 (SOC, SOC, Op_RegI, 14, T2->as_VMReg());
76 reg_def T2_H (SOC, SOC, Op_RegI, 14, T2->as_VMReg()->next());
77 reg_def T3 (SOC, SOC, Op_RegI, 15, T3->as_VMReg());
78 reg_def T3_H (SOC, SOC, Op_RegI, 15, T3->as_VMReg()->next());
79 reg_def S0 (SOC, SOE, Op_RegI, 16, S0->as_VMReg());
80 reg_def S0_H (SOC, SOE, Op_RegI, 16, S0->as_VMReg()->next());
81 reg_def S1 (SOC, SOE, Op_RegI, 17, S1->as_VMReg());
82 reg_def S1_H (SOC, SOE, Op_RegI, 17, S1->as_VMReg()->next());
83 reg_def S2 (SOC, SOE, Op_RegI, 18, S2->as_VMReg());
84 reg_def S2_H (SOC, SOE, Op_RegI, 18, S2->as_VMReg()->next());
85 reg_def S3 (SOC, SOE, Op_RegI, 19, S3->as_VMReg());
86 reg_def S3_H (SOC, SOE, Op_RegI, 19, S3->as_VMReg()->next());
87 reg_def S4 (SOC, SOE, Op_RegI, 20, S4->as_VMReg());
88 reg_def S4_H (SOC, SOE, Op_RegI, 20, S4->as_VMReg()->next());
89 reg_def S5 (SOC, SOE, Op_RegI, 21, S5->as_VMReg());
90 reg_def S5_H (SOC, SOE, Op_RegI, 21, S5->as_VMReg()->next());
91 reg_def S6 (SOC, SOE, Op_RegI, 22, S6->as_VMReg());
92 reg_def S6_H (SOC, SOE, Op_RegI, 22, S6->as_VMReg()->next());
93 reg_def S7 (SOC, SOE, Op_RegI, 23, S7->as_VMReg());
94 reg_def S7_H (SOC, SOE, Op_RegI, 23, S7->as_VMReg()->next());
95 reg_def T8 (SOC, SOC, Op_RegI, 24, T8->as_VMReg());
96 reg_def T8_H (SOC, SOC, Op_RegI, 24, T8->as_VMReg()->next());
97 reg_def T9 (SOC, SOC, Op_RegI, 25, T9->as_VMReg());
98 reg_def T9_H (SOC, SOC, Op_RegI, 25, T9->as_VMReg()->next());
100 // Special Registers
101 reg_def K0 ( NS, NS, Op_RegI, 26, K0->as_VMReg());
102 reg_def K1 ( NS, NS, Op_RegI, 27, K1->as_VMReg());
103 reg_def GP ( NS, NS, Op_RegI, 28, GP->as_VMReg());
104 reg_def GP_H ( NS, NS, Op_RegI, 28, GP->as_VMReg()->next());
105 reg_def SP ( NS, NS, Op_RegI, 29, SP->as_VMReg());
106 reg_def SP_H ( NS, NS, Op_RegI, 29, SP->as_VMReg()->next());
107 reg_def FP ( NS, NS, Op_RegI, 30, FP->as_VMReg());
108 reg_def FP_H ( NS, NS, Op_RegI, 30, FP->as_VMReg()->next());
109 reg_def RA ( NS, NS, Op_RegI, 31, RA->as_VMReg());
110 reg_def RA_H ( NS, NS, Op_RegI, 31, RA->as_VMReg()->next());
112 // Floating registers.
113 reg_def F0 ( SOC, SOC, Op_RegF, 0, F0->as_VMReg());
114 reg_def F0_H ( SOC, SOC, Op_RegF, 0, F0->as_VMReg()->next());
115 reg_def F1 ( SOC, SOC, Op_RegF, 1, F1->as_VMReg());
116 reg_def F1_H ( SOC, SOC, Op_RegF, 1, F1->as_VMReg()->next());
117 reg_def F2 ( SOC, SOC, Op_RegF, 2, F2->as_VMReg());
118 reg_def F2_H ( SOC, SOC, Op_RegF, 2, F2->as_VMReg()->next());
119 reg_def F3 ( SOC, SOC, Op_RegF, 3, F3->as_VMReg());
120 reg_def F3_H ( SOC, SOC, Op_RegF, 3, F3->as_VMReg()->next());
121 reg_def F4 ( SOC, SOC, Op_RegF, 4, F4->as_VMReg());
122 reg_def F4_H ( SOC, SOC, Op_RegF, 4, F4->as_VMReg()->next());
123 reg_def F5 ( SOC, SOC, Op_RegF, 5, F5->as_VMReg());
124 reg_def F5_H ( SOC, SOC, Op_RegF, 5, F5->as_VMReg()->next());
125 reg_def F6 ( SOC, SOC, Op_RegF, 6, F6->as_VMReg());
126 reg_def F6_H ( SOC, SOC, Op_RegF, 6, F6->as_VMReg()->next());
127 reg_def F7 ( SOC, SOC, Op_RegF, 7, F7->as_VMReg());
128 reg_def F7_H ( SOC, SOC, Op_RegF, 7, F7->as_VMReg()->next());
129 reg_def F8 ( SOC, SOC, Op_RegF, 8, F8->as_VMReg());
130 reg_def F8_H ( SOC, SOC, Op_RegF, 8, F8->as_VMReg()->next());
131 reg_def F9 ( SOC, SOC, Op_RegF, 9, F9->as_VMReg());
132 reg_def F9_H ( SOC, SOC, Op_RegF, 9, F9->as_VMReg()->next());
133 reg_def F10 ( SOC, SOC, Op_RegF, 10, F10->as_VMReg());
134 reg_def F10_H ( SOC, SOC, Op_RegF, 10, F10->as_VMReg()->next());
135 reg_def F11 ( SOC, SOC, Op_RegF, 11, F11->as_VMReg());
136 reg_def F11_H ( SOC, SOC, Op_RegF, 11, F11->as_VMReg()->next());
137 reg_def F12 ( SOC, SOC, Op_RegF, 12, F12->as_VMReg());
138 reg_def F12_H ( SOC, SOC, Op_RegF, 12, F12->as_VMReg()->next());
139 reg_def F13 ( SOC, SOC, Op_RegF, 13, F13->as_VMReg());
140 reg_def F13_H ( SOC, SOC, Op_RegF, 13, F13->as_VMReg()->next());
141 reg_def F14 ( SOC, SOC, Op_RegF, 14, F14->as_VMReg());
142 reg_def F14_H ( SOC, SOC, Op_RegF, 14, F14->as_VMReg()->next());
143 reg_def F15 ( SOC, SOC, Op_RegF, 15, F15->as_VMReg());
144 reg_def F15_H ( SOC, SOC, Op_RegF, 15, F15->as_VMReg()->next());
145 reg_def F16 ( SOC, SOC, Op_RegF, 16, F16->as_VMReg());
146 reg_def F16_H ( SOC, SOC, Op_RegF, 16, F16->as_VMReg()->next());
147 reg_def F17 ( SOC, SOC, Op_RegF, 17, F17->as_VMReg());
148 reg_def F17_H ( SOC, SOC, Op_RegF, 17, F17->as_VMReg()->next());
149 reg_def F18 ( SOC, SOC, Op_RegF, 18, F18->as_VMReg());
150 reg_def F18_H ( SOC, SOC, Op_RegF, 18, F18->as_VMReg()->next());
151 reg_def F19 ( SOC, SOC, Op_RegF, 19, F19->as_VMReg());
152 reg_def F19_H ( SOC, SOC, Op_RegF, 19, F19->as_VMReg()->next());
153 reg_def F20 ( SOC, SOC, Op_RegF, 20, F20->as_VMReg());
154 reg_def F20_H ( SOC, SOC, Op_RegF, 20, F20->as_VMReg()->next());
155 reg_def F21 ( SOC, SOC, Op_RegF, 21, F21->as_VMReg());
156 reg_def F21_H ( SOC, SOC, Op_RegF, 21, F21->as_VMReg()->next());
157 reg_def F22 ( SOC, SOC, Op_RegF, 22, F22->as_VMReg());
158 reg_def F22_H ( SOC, SOC, Op_RegF, 22, F22->as_VMReg()->next());
159 reg_def F23 ( SOC, SOC, Op_RegF, 23, F23->as_VMReg());
160 reg_def F23_H ( SOC, SOC, Op_RegF, 23, F23->as_VMReg()->next());
161 reg_def F24 ( SOC, SOC, Op_RegF, 24, F24->as_VMReg());
162 reg_def F24_H ( SOC, SOC, Op_RegF, 24, F24->as_VMReg()->next());
163 reg_def F25 ( SOC, SOC, Op_RegF, 25, F25->as_VMReg());
164 reg_def F25_H ( SOC, SOC, Op_RegF, 25, F25->as_VMReg()->next());
165 reg_def F26 ( SOC, SOC, Op_RegF, 26, F26->as_VMReg());
166 reg_def F26_H ( SOC, SOC, Op_RegF, 26, F26->as_VMReg()->next());
167 reg_def F27 ( SOC, SOC, Op_RegF, 27, F27->as_VMReg());
168 reg_def F27_H ( SOC, SOC, Op_RegF, 27, F27->as_VMReg()->next());
169 reg_def F28 ( SOC, SOC, Op_RegF, 28, F28->as_VMReg());
170 reg_def F28_H ( SOC, SOC, Op_RegF, 28, F28->as_VMReg()->next());
171 reg_def F29 ( SOC, SOC, Op_RegF, 29, F29->as_VMReg());
172 reg_def F29_H ( SOC, SOC, Op_RegF, 29, F29->as_VMReg()->next());
173 reg_def F30 ( SOC, SOC, Op_RegF, 30, F30->as_VMReg());
174 reg_def F30_H ( SOC, SOC, Op_RegF, 30, F30->as_VMReg()->next());
175 reg_def F31 ( SOC, SOC, Op_RegF, 31, F31->as_VMReg());
176 reg_def F31_H ( SOC, SOC, Op_RegF, 31, F31->as_VMReg()->next());
179 // ----------------------------
180 // Special Registers
181 // Condition Codes Flag Registers
182 reg_def MIPS_FLAG (SOC, SOC, Op_RegFlags, 1, as_Register(1)->as_VMReg());
183 //S6 is used for get_thread(S6)
184 //S5 is uesd for heapbase of compressed oop
185 alloc_class chunk0(
186 S7, S7_H,
187 S0, S0_H,
188 S1, S1_H,
189 S2, S2_H,
190 S4, S4_H,
191 S5, S5_H,
192 S6, S6_H,
193 S3, S3_H,
194 T2, T2_H,
195 T3, T3_H,
196 T8, T8_H,
197 T9, T9_H,
198 T1, T1_H, // inline_cache_reg
199 V1, V1_H,
200 A7, A7_H,
201 A6, A6_H,
202 A5, A5_H,
203 A4, A4_H,
204 V0, V0_H,
205 A3, A3_H,
206 A2, A2_H,
207 A1, A1_H,
208 A0, A0_H,
209 T0, T0_H,
210 GP, GP_H
211 RA, RA_H,
212 SP, SP_H, // stack_pointer
213 FP, FP_H // frame_pointer
214 );
216 alloc_class chunk1( F0, F0_H,
217 F1, F1_H,
218 F2, F2_H,
219 F3, F3_H,
220 F4, F4_H,
221 F5, F5_H,
222 F6, F6_H,
223 F7, F7_H,
224 F8, F8_H,
225 F9, F9_H,
226 F10, F10_H,
227 F11, F11_H,
228 F20, F20_H,
229 F21, F21_H,
230 F22, F22_H,
231 F23, F23_H,
232 F24, F24_H,
233 F25, F25_H,
234 F26, F26_H,
235 F27, F27_H,
236 F28, F28_H,
237 F19, F19_H,
238 F18, F18_H,
239 F17, F17_H,
240 F16, F16_H,
241 F15, F15_H,
242 F14, F14_H,
243 F13, F13_H,
244 F12, F12_H,
245 F29, F29_H,
246 F30, F30_H,
247 F31, F31_H);
249 alloc_class chunk2(MIPS_FLAG);
251 reg_class s_reg( S0, S1, S2, S3, S4, S5, S6, S7 );
252 reg_class s0_reg( S0 );
253 reg_class s1_reg( S1 );
254 reg_class s2_reg( S2 );
255 reg_class s3_reg( S3 );
256 reg_class s4_reg( S4 );
257 reg_class s5_reg( S5 );
258 reg_class s6_reg( S6 );
259 reg_class s7_reg( S7 );
261 reg_class t_reg( T0, T1, T2, T3, T8, T9 );
262 reg_class t0_reg( T0 );
263 reg_class t1_reg( T1 );
264 reg_class t2_reg( T2 );
265 reg_class t3_reg( T3 );
266 reg_class t8_reg( T8 );
267 reg_class t9_reg( T9 );
269 reg_class a_reg( A0, A1, A2, A3, A4, A5, A6, A7 );
270 reg_class a0_reg( A0 );
271 reg_class a1_reg( A1 );
272 reg_class a2_reg( A2 );
273 reg_class a3_reg( A3 );
274 reg_class a4_reg( A4 );
275 reg_class a5_reg( A5 );
276 reg_class a6_reg( A6 );
277 reg_class a7_reg( A7 );
279 reg_class v0_reg( V0 );
280 reg_class v1_reg( V1 );
282 reg_class sp_reg( SP, SP_H );
283 reg_class fp_reg( FP, FP_H );
285 reg_class mips_flags(MIPS_FLAG);
287 reg_class v0_long_reg( V0, V0_H );
288 reg_class v1_long_reg( V1, V1_H );
289 reg_class a0_long_reg( A0, A0_H );
290 reg_class a1_long_reg( A1, A1_H );
291 reg_class a2_long_reg( A2, A2_H );
292 reg_class a3_long_reg( A3, A3_H );
293 reg_class a4_long_reg( A4, A4_H );
294 reg_class a5_long_reg( A5, A5_H );
295 reg_class a6_long_reg( A6, A6_H );
296 reg_class a7_long_reg( A7, A7_H );
297 reg_class t0_long_reg( T0, T0_H );
298 reg_class t1_long_reg( T1, T1_H );
299 reg_class t2_long_reg( T2, T2_H );
300 reg_class t3_long_reg( T3, T3_H );
301 reg_class t8_long_reg( T8, T8_H );
302 reg_class t9_long_reg( T9, T9_H );
303 reg_class s0_long_reg( S0, S0_H );
304 reg_class s1_long_reg( S1, S1_H );
305 reg_class s2_long_reg( S2, S2_H );
306 reg_class s3_long_reg( S3, S3_H );
307 reg_class s4_long_reg( S4, S4_H );
308 reg_class s5_long_reg( S5, S5_H );
309 reg_class s6_long_reg( S6, S6_H );
310 reg_class s7_long_reg( S7, S7_H );
312 reg_class int_reg( S7, S0, S1, S2, S4, S3, T8, T2, T3, T1, V1, A7, A6, A5, A4, V0, A3, A2, A1, A0, T0 );
314 reg_class no_Ax_int_reg( S7, S0, S1, S2, S4, S3, T8, T2, T3, T1, V1, V0, T0 );
316 reg_class p_reg(
317 S7, S7_H,
318 S0, S0_H,
319 S1, S1_H,
320 S2, S2_H,
321 S4, S4_H,
322 S3, S3_H,
323 T8, T8_H,
324 T2, T2_H,
325 T3, T3_H,
326 T1, T1_H,
327 A7, A7_H,
328 A6, A6_H,
329 A5, A5_H,
330 A4, A4_H,
331 A3, A3_H,
332 A2, A2_H,
333 A1, A1_H,
334 A0, A0_H,
335 T0, T0_H
336 );
338 reg_class no_T8_p_reg(
339 S7, S7_H,
340 S0, S0_H,
341 S1, S1_H,
342 S2, S2_H,
343 S4, S4_H,
344 S3, S3_H,
345 T2, T2_H,
346 T3, T3_H,
347 T1, T1_H,
348 A7, A7_H,
349 A6, A6_H,
350 A5, A5_H,
351 A4, A4_H,
352 A3, A3_H,
353 A2, A2_H,
354 A1, A1_H,
355 A0, A0_H,
356 T0, T0_H
357 );
359 reg_class long_reg(
360 S7, S7_H,
361 S0, S0_H,
362 S1, S1_H,
363 S2, S2_H,
364 S4, S4_H,
365 S3, S3_H,
366 T8, T8_H,
367 T2, T2_H,
368 T3, T3_H,
369 T1, T1_H,
370 A7, A7_H,
371 A6, A6_H,
372 A5, A5_H,
373 A4, A4_H,
374 A3, A3_H,
375 A2, A2_H,
376 A1, A1_H,
377 A0, A0_H,
378 T0, T0_H
379 );
382 // Floating point registers.
383 // 2012/8/23 Fu: F30/F31 are used as temporary registers in D2I
384 // 2016/12/1 aoqi: F31 are not used as temporary registers in D2I
385 reg_class flt_reg( F0, F1, F2, F3, F4, F5, F6, F7, F8, F9, F10, F11, F12, F13, F14, F15, F16, F17 F18, F19, F20, F21, F22, F23, F24, F25, F26, F27, F28, F29, F31);
386 reg_class dbl_reg( F0, F0_H,
387 F1, F1_H,
388 F2, F2_H,
389 F3, F3_H,
390 F4, F4_H,
391 F5, F5_H,
392 F6, F6_H,
393 F7, F7_H,
394 F8, F8_H,
395 F9, F9_H,
396 F10, F10_H,
397 F11, F11_H,
398 F12, F12_H,
399 F13, F13_H,
400 F14, F14_H,
401 F15, F15_H,
402 F16, F16_H,
403 F17, F17_H,
404 F18, F18_H,
405 F19, F19_H,
406 F20, F20_H,
407 F21, F21_H,
408 F22, F22_H,
409 F23, F23_H,
410 F24, F24_H,
411 F25, F25_H,
412 F26, F26_H,
413 F27, F27_H,
414 F28, F28_H,
415 F29, F29_H,
416 F31, F31_H);
418 reg_class flt_arg0( F12 );
419 reg_class dbl_arg0( F12, F12_H );
420 reg_class dbl_arg1( F14, F14_H );
422 %}
424 //----------DEFINITION BLOCK---------------------------------------------------
425 // Define name --> value mappings to inform the ADLC of an integer valued name
426 // Current support includes integer values in the range [0, 0x7FFFFFFF]
427 // Format:
428 // int_def <name> ( <int_value>, <expression>);
429 // Generated Code in ad_<arch>.hpp
430 // #define <name> (<expression>)
431 // // value == <int_value>
432 // Generated code in ad_<arch>.cpp adlc_verification()
433 // assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
434 //
435 definitions %{
436 int_def DEFAULT_COST ( 100, 100);
437 int_def HUGE_COST (1000000, 1000000);
439 // Memory refs are twice as expensive as run-of-the-mill.
440 int_def MEMORY_REF_COST ( 200, DEFAULT_COST * 2);
442 // Branches are even more expensive.
443 int_def BRANCH_COST ( 300, DEFAULT_COST * 3);
444 // we use jr instruction to construct call, so more expensive
445 // by yjl 2/28/2006
446 int_def CALL_COST ( 500, DEFAULT_COST * 5);
447 /*
448 int_def EQUAL ( 1, 1 );
449 int_def NOT_EQUAL ( 2, 2 );
450 int_def GREATER ( 3, 3 );
451 int_def GREATER_EQUAL ( 4, 4 );
452 int_def LESS ( 5, 5 );
453 int_def LESS_EQUAL ( 6, 6 );
454 */
455 %}
459 //----------SOURCE BLOCK-------------------------------------------------------
460 // This is a block of C++ code which provides values, functions, and
461 // definitions necessary in the rest of the architecture description
463 source_hpp %{
464 // Header information of the source block.
465 // Method declarations/definitions which are used outside
466 // the ad-scope can conveniently be defined here.
467 //
468 // To keep related declarations/definitions/uses close together,
469 // we switch between source %{ }% and source_hpp %{ }% freely as needed.
471 class CallStubImpl {
473 //--------------------------------------------------------------
474 //---< Used for optimization in Compile::shorten_branches >---
475 //--------------------------------------------------------------
477 public:
478 // Size of call trampoline stub.
479 static uint size_call_trampoline() {
480 return 0; // no call trampolines on this platform
481 }
483 // number of relocations needed by a call trampoline stub
484 static uint reloc_call_trampoline() {
485 return 0; // no call trampolines on this platform
486 }
487 };
489 class HandlerImpl {
491 public:
493 static int emit_exception_handler(CodeBuffer &cbuf);
494 static int emit_deopt_handler(CodeBuffer& cbuf);
496 static uint size_exception_handler() {
497 // NativeCall instruction size is the same as NativeJump.
498 // exception handler starts out as jump and can be patched to
499 // a call be deoptimization. (4932387)
500 // Note that this value is also credited (in output.cpp) to
501 // the size of the code section.
502 // return NativeJump::instruction_size;
503 int size = NativeCall::instruction_size;
504 return round_to(size, 16);
505 }
507 #ifdef _LP64
508 static uint size_deopt_handler() {
509 int size = NativeCall::instruction_size;
510 return round_to(size, 16);
511 }
512 #else
513 static uint size_deopt_handler() {
514 // NativeCall instruction size is the same as NativeJump.
515 // exception handler starts out as jump and can be patched to
516 // a call be deoptimization. (4932387)
517 // Note that this value is also credited (in output.cpp) to
518 // the size of the code section.
519 return 5 + NativeJump::instruction_size; // pushl(); jmp;
520 }
521 #endif
522 };
524 %} // end source_hpp
526 source %{
528 #define NO_INDEX 0
529 #define RELOC_IMM64 Assembler::imm_operand
530 #define RELOC_DISP32 Assembler::disp32_operand
533 #define __ _masm.
536 // Emit exception handler code.
537 // Stuff framesize into a register and call a VM stub routine.
538 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf) {
539 /*
540 // Note that the code buffer's insts_mark is always relative to insts.
541 // That's why we must use the macroassembler to generate a handler.
542 MacroAssembler _masm(&cbuf);
543 address base = __ start_a_stub(size_exception_handler());
544 if (base == NULL) return 0; // CodeBuffer::expand failed
545 int offset = __ offset();
546 __ jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
547 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
548 __ end_a_stub();
549 return offset;
550 */
551 // Note that the code buffer's insts_mark is always relative to insts.
552 // That's why we must use the macroassembler to generate a handler.
553 MacroAssembler _masm(&cbuf);
554 address base =
555 __ start_a_stub(size_exception_handler());
556 if (base == NULL) return 0; // CodeBuffer::expand failed
557 int offset = __ offset();
559 __ block_comment("; emit_exception_handler");
561 /* 2012/9/25 FIXME Jin: According to X86, we should use direct jumpt.
562 * * However, this will trigger an assert after the 40th method:
563 * *
564 * * 39 b java.lang.Throwable::<init> (25 bytes)
565 * * --- ns java.lang.Throwable::fillInStackTrace
566 * * 40 !b java.net.URLClassLoader::findClass (29 bytes)
567 * * /vm/opto/runtime.cpp, 900 , assert(caller.is_compiled_frame(),"must be")
568 * * 40 made not entrant (2) java.net.URLClassLoader::findClass (29 bytes)
569 * *
570 * * If we change from JR to JALR, the assert will disappear, but WebClient will
571 * * fail after the 403th method with unknown reason.
572 * */
573 cbuf.set_insts_mark();
574 __ relocate(relocInfo::runtime_call_type);
576 __ patchable_set48(T9, (long)OptoRuntime::exception_blob()->entry_point());
577 __ jr(T9);
578 __ delayed()->nop();
579 __ align(16);
580 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
581 __ end_a_stub();
582 return offset;
583 }
585 // Emit deopt handler code.
586 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) {
587 // Note that the code buffer's insts_mark is always relative to insts.
588 // That's why we must use the macroassembler to generate a handler.
589 MacroAssembler _masm(&cbuf);
590 address base =
591 __ start_a_stub(size_deopt_handler());
593 // FIXME
594 if (base == NULL) return 0; // CodeBuffer::expand failed
595 int offset = __ offset();
597 __ block_comment("; emit_deopt_handler");
599 cbuf.set_insts_mark();
600 __ relocate(relocInfo::runtime_call_type);
602 __ patchable_set48(T9, (long)SharedRuntime::deopt_blob()->unpack());
603 __ jalr(T9);
604 __ delayed()->nop();
605 __ align(16);
606 assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
607 __ end_a_stub();
608 return offset;
609 }
612 const bool Matcher::match_rule_supported(int opcode) {
613 if (!has_match_rule(opcode))
614 return false;
616 switch (opcode) {
617 //Op_CountLeadingZerosI Op_CountLeadingZerosL can be deleted, all MIPS CPUs support clz & dclz.
618 case Op_CountLeadingZerosI:
619 case Op_CountLeadingZerosL:
620 if (!UseCountLeadingZerosInstruction)
621 return false;
622 break;
623 case Op_CountTrailingZerosI:
624 case Op_CountTrailingZerosL:
625 if (!UseCountTrailingZerosInstruction)
626 return false;
627 break;
628 }
630 return true; // Per default match rules are supported.
631 }
633 //FIXME
634 // emit call stub, compiled java to interpreter
635 void emit_java_to_interp(CodeBuffer &cbuf ) {
636 // Stub is fixed up when the corresponding call is converted from calling
637 // compiled code to calling interpreted code.
638 // mov rbx,0
639 // jmp -1
641 address mark = cbuf.insts_mark(); // get mark within main instrs section
643 // Note that the code buffer's insts_mark is always relative to insts.
644 // That's why we must use the macroassembler to generate a stub.
645 MacroAssembler _masm(&cbuf);
647 address base =
648 __ start_a_stub(Compile::MAX_stubs_size);
649 if (base == NULL) return; // CodeBuffer::expand failed
650 // static stub relocation stores the instruction address of the call
652 __ relocate(static_stub_Relocation::spec(mark), 0);
654 /* 2012/10/29 Jin: Rmethod contains methodOop, it should be relocated for GC */
655 /*
656 int oop_index = __ oop_recorder()->allocate_index(NULL);
657 RelocationHolder rspec = oop_Relocation::spec(oop_index);
658 __ relocate(rspec);
659 */
661 // static stub relocation also tags the methodOop in the code-stream.
662 __ patchable_set48(S3, (long)0);
663 // This is recognized as unresolved by relocs/nativeInst/ic code
665 __ relocate(relocInfo::runtime_call_type);
667 cbuf.set_insts_mark();
668 address call_pc = (address)-1;
669 __ patchable_set48(AT, (long)call_pc);
670 __ jr(AT);
671 __ nop();
672 __ align(16);
673 __ end_a_stub();
674 // Update current stubs pointer and restore code_end.
675 }
677 // size of call stub, compiled java to interpretor
678 uint size_java_to_interp() {
679 int size = 4 * 4 + NativeCall::instruction_size; // sizeof(li48) + NativeCall::instruction_size
680 return round_to(size, 16);
681 }
683 // relocation entries for call stub, compiled java to interpreter
684 uint reloc_java_to_interp() {
685 return 16; // in emit_java_to_interp + in Java_Static_Call
686 }
688 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
689 if( Assembler::is_simm16(offset) ) return true;
690 else
691 {
692 assert(false, "Not implemented yet !" );
693 Unimplemented();
694 }
695 }
698 // No additional cost for CMOVL.
699 const int Matcher::long_cmove_cost() { return 0; }
701 // No CMOVF/CMOVD with SSE2
702 const int Matcher::float_cmove_cost() { return ConditionalMoveLimit; }
704 // Does the CPU require late expand (see block.cpp for description of late expand)?
705 const bool Matcher::require_postalloc_expand = false;
707 // Should the Matcher clone shifts on addressing modes, expecting them
708 // to be subsumed into complex addressing expressions or compute them
709 // into registers? True for Intel but false for most RISCs
710 const bool Matcher::clone_shift_expressions = false;
712 // Do we need to mask the count passed to shift instructions or does
713 // the cpu only look at the lower 5/6 bits anyway?
714 const bool Matcher::need_masked_shift_count = false;
716 bool Matcher::narrow_oop_use_complex_address() {
717 NOT_LP64(ShouldNotCallThis());
718 assert(UseCompressedOops, "only for compressed oops code");
719 return false;
720 }
722 bool Matcher::narrow_klass_use_complex_address() {
723 NOT_LP64(ShouldNotCallThis());
724 assert(UseCompressedClassPointers, "only for compressed klass code");
725 return false;
726 }
728 // This is UltraSparc specific, true just means we have fast l2f conversion
729 const bool Matcher::convL2FSupported(void) {
730 return true;
731 }
733 // Max vector size in bytes. 0 if not supported.
734 const int Matcher::vector_width_in_bytes(BasicType bt) {
735 assert(MaxVectorSize == 8, "");
736 return 8;
737 }
739 // Vector ideal reg
740 const int Matcher::vector_ideal_reg(int size) {
741 assert(MaxVectorSize == 8, "");
742 switch(size) {
743 case 8: return Op_VecD;
744 }
745 ShouldNotReachHere();
746 return 0;
747 }
749 // Only lowest bits of xmm reg are used for vector shift count.
750 const int Matcher::vector_shift_count_ideal_reg(int size) {
751 fatal("vector shift is not supported");
752 return Node::NotAMachineReg;
753 }
755 // Limits on vector size (number of elements) loaded into vector.
756 const int Matcher::max_vector_size(const BasicType bt) {
757 assert(is_java_primitive(bt), "only primitive type vectors");
758 return vector_width_in_bytes(bt)/type2aelembytes(bt);
759 }
761 const int Matcher::min_vector_size(const BasicType bt) {
762 return max_vector_size(bt); // Same as max.
763 }
765 // MIPS supports misaligned vectors store/load? FIXME
766 const bool Matcher::misaligned_vectors_ok() {
767 return false;
768 //return !AlignVector; // can be changed by flag
769 }
771 // Register for DIVI projection of divmodI
772 RegMask Matcher::divI_proj_mask() {
773 ShouldNotReachHere();
774 return RegMask();
775 }
777 // Register for MODI projection of divmodI
778 RegMask Matcher::modI_proj_mask() {
779 ShouldNotReachHere();
780 return RegMask();
781 }
783 // Register for DIVL projection of divmodL
784 RegMask Matcher::divL_proj_mask() {
785 ShouldNotReachHere();
786 return RegMask();
787 }
789 int Matcher::regnum_to_fpu_offset(int regnum) {
790 return regnum - 32; // The FP registers are in the second chunk
791 }
794 const bool Matcher::isSimpleConstant64(jlong value) {
795 // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
796 return true;
797 }
800 // Return whether or not this register is ever used as an argument. This
801 // function is used on startup to build the trampoline stubs in generateOptoStub.
802 // Registers not mentioned will be killed by the VM call in the trampoline, and
803 // arguments in those registers not be available to the callee.
804 bool Matcher::can_be_java_arg( int reg ) {
805 /* Refer to: [sharedRuntime_mips_64.cpp] SharedRuntime::java_calling_convention() */
806 if ( reg == T0_num || reg == T0_H_num
807 || reg == A0_num || reg == A0_H_num
808 || reg == A1_num || reg == A1_H_num
809 || reg == A2_num || reg == A2_H_num
810 || reg == A3_num || reg == A3_H_num
811 || reg == A4_num || reg == A4_H_num
812 || reg == A5_num || reg == A5_H_num
813 || reg == A6_num || reg == A6_H_num
814 || reg == A7_num || reg == A7_H_num )
815 return true;
817 if ( reg == F12_num || reg == F12_H_num
818 || reg == F13_num || reg == F13_H_num
819 || reg == F14_num || reg == F14_H_num
820 || reg == F15_num || reg == F15_H_num
821 || reg == F16_num || reg == F16_H_num
822 || reg == F17_num || reg == F17_H_num
823 || reg == F18_num || reg == F18_H_num
824 || reg == F19_num || reg == F19_H_num )
825 return true;
827 return false;
828 }
830 bool Matcher::is_spillable_arg( int reg ) {
831 return can_be_java_arg(reg);
832 }
834 bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) {
835 return false;
836 }
838 // Register for MODL projection of divmodL
839 RegMask Matcher::modL_proj_mask() {
840 ShouldNotReachHere();
841 return RegMask();
842 }
844 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
845 return FP_REG_mask();
846 }
848 // MIPS doesn't support AES intrinsics
849 const bool Matcher::pass_original_key_for_aes() {
850 return false;
851 }
853 // The address of the call instruction needs to be 16-byte aligned to
854 // ensure that it does not span a cache line so that it can be patched.
856 int CallStaticJavaDirectNode::compute_padding(int current_offset) const {
857 //lui
858 //ori
859 //dsll
860 //ori
862 //jalr
863 //nop
865 return round_to(current_offset, alignment_required()) - current_offset;
866 }
868 // The address of the call instruction needs to be 16-byte aligned to
869 // ensure that it does not span a cache line so that it can be patched.
870 int CallDynamicJavaDirectNode::compute_padding(int current_offset) const {
871 //loadIC <--- skip
873 //lui
874 //ori
875 //dsll
876 //ori
878 //jalr
879 //nop
881 current_offset += 4 * 4;
882 return round_to(current_offset, alignment_required()) - current_offset;
883 }
885 int CallLeafNoFPDirectNode::compute_padding(int current_offset) const {
886 //lui
887 //ori
888 //dsll
889 //ori
891 //jalr
892 //nop
894 return round_to(current_offset, alignment_required()) - current_offset;
895 }
897 int CallLeafDirectNode::compute_padding(int current_offset) const {
898 //lui
899 //ori
900 //dsll
901 //ori
903 //jalr
904 //nop
906 return round_to(current_offset, alignment_required()) - current_offset;
907 }
909 int CallRuntimeDirectNode::compute_padding(int current_offset) const {
910 //lui
911 //ori
912 //dsll
913 //ori
915 //jalr
916 //nop
918 return round_to(current_offset, alignment_required()) - current_offset;
919 }
921 // If CPU can load and store mis-aligned doubles directly then no fixup is
922 // needed. Else we split the double into 2 integer pieces and move it
923 // piece-by-piece. Only happens when passing doubles into C code as the
924 // Java calling convention forces doubles to be aligned.
925 const bool Matcher::misaligned_doubles_ok = false;
926 // Do floats take an entire double register or just half?
927 //const bool Matcher::float_in_double = true;
928 bool Matcher::float_in_double() { return false; }
929 // Threshold size for cleararray.
930 const int Matcher::init_array_short_size = 8 * BytesPerLong;
931 // Do ints take an entire long register or just half?
932 const bool Matcher::int_in_long = true;
933 // Is it better to copy float constants, or load them directly from memory?
934 // Intel can load a float constant from a direct address, requiring no
935 // extra registers. Most RISCs will have to materialize an address into a
936 // register first, so they would do better to copy the constant from stack.
937 const bool Matcher::rematerialize_float_constants = false;
938 // Advertise here if the CPU requires explicit rounding operations
939 // to implement the UseStrictFP mode.
940 const bool Matcher::strict_fp_requires_explicit_rounding = false;
941 // The ecx parameter to rep stos for the ClearArray node is in dwords.
942 const bool Matcher::init_array_count_is_in_bytes = false;
945 // Indicate if the safepoint node needs the polling page as an input.
946 // Since MIPS doesn't have absolute addressing, it needs.
947 bool SafePointNode::needs_polling_address_input() {
948 return false;
949 }
951 // !!!!! Special hack to get all type of calls to specify the byte offset
952 // from the start of the call to the point where the return address
953 // will point.
954 int MachCallStaticJavaNode::ret_addr_offset() {
955 assert(NativeCall::instruction_size == 24, "in MachCallStaticJavaNode::ret_addr_offset");
956 //The value ought to be 16 bytes.
957 //lui
958 //ori
959 //dsll
960 //ori
961 //jalr
962 //nop
963 return NativeCall::instruction_size;
964 }
966 int MachCallDynamicJavaNode::ret_addr_offset() {
967 assert(NativeCall::instruction_size == 24, "in MachCallDynamicJavaNode::ret_addr_offset");
968 //The value ought to be 4 + 16 bytes.
969 //lui IC_Klass,
970 //ori IC_Klass,
971 //dsll IC_Klass
972 //ori IC_Klass
973 //lui T9
974 //ori T9
975 //dsll T9
976 //ori T9
977 //jalr T9
978 //nop
979 return 4 * 4 + NativeCall::instruction_size;
980 }
982 //=============================================================================
984 // Figure out which register class each belongs in: rc_int, rc_float, rc_stack
985 enum RC { rc_bad, rc_int, rc_float, rc_stack };
986 static enum RC rc_class( OptoReg::Name reg ) {
987 if( !OptoReg::is_valid(reg) ) return rc_bad;
988 if (OptoReg::is_stack(reg)) return rc_stack;
989 VMReg r = OptoReg::as_VMReg(reg);
990 if (r->is_Register()) return rc_int;
991 assert(r->is_FloatRegister(), "must be");
992 return rc_float;
993 }
995 uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream* st ) const {
996 // Get registers to move
997 OptoReg::Name src_second = ra_->get_reg_second(in(1));
998 OptoReg::Name src_first = ra_->get_reg_first(in(1));
999 OptoReg::Name dst_second = ra_->get_reg_second(this );
1000 OptoReg::Name dst_first = ra_->get_reg_first(this );
1002 enum RC src_second_rc = rc_class(src_second);
1003 enum RC src_first_rc = rc_class(src_first);
1004 enum RC dst_second_rc = rc_class(dst_second);
1005 enum RC dst_first_rc = rc_class(dst_first);
1007 assert(OptoReg::is_valid(src_first) && OptoReg::is_valid(dst_first), "must move at least 1 register" );
1009 // Generate spill code!
1010 int size = 0;
1012 if( src_first == dst_first && src_second == dst_second )
1013 return 0; // Self copy, no move
1015 if (src_first_rc == rc_stack) {
1016 // mem ->
1017 if (dst_first_rc == rc_stack) {
1018 // mem -> mem
1019 assert(src_second != dst_first, "overlap");
1020 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1021 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1022 // 64-bit
1023 int src_offset = ra_->reg2offset(src_first);
1024 int dst_offset = ra_->reg2offset(dst_first);
1025 if (cbuf) {
1026 MacroAssembler _masm(cbuf);
1027 __ ld(AT, Address(SP, src_offset));
1028 __ sd(AT, Address(SP, dst_offset));
1029 #ifndef PRODUCT
1030 } else {
1031 if(!do_size){
1032 if (size != 0) st->print("\n\t");
1033 st->print("ld AT, [SP + #%d]\t# 64-bit mem-mem spill 1\n\t"
1034 "sd AT, [SP + #%d]",
1035 src_offset, dst_offset);
1036 }
1037 #endif
1038 }
1039 size += 8;
1040 } else {
1041 // 32-bit
1042 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1043 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1044 // No pushl/popl, so:
1045 int src_offset = ra_->reg2offset(src_first);
1046 int dst_offset = ra_->reg2offset(dst_first);
1047 if (cbuf) {
1048 MacroAssembler _masm(cbuf);
1049 __ lw(AT, Address(SP, src_offset));
1050 __ sw(AT, Address(SP, dst_offset));
1051 #ifndef PRODUCT
1052 } else {
1053 if(!do_size){
1054 if (size != 0) st->print("\n\t");
1055 st->print("lw AT, [SP + #%d] spill 2\n\t"
1056 "sw AT, [SP + #%d]\n\t",
1057 src_offset, dst_offset);
1058 }
1059 #endif
1060 }
1061 size += 8;
1062 }
1063 return size;
1064 } else if (dst_first_rc == rc_int) {
1065 // mem -> gpr
1066 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1067 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1068 // 64-bit
1069 int offset = ra_->reg2offset(src_first);
1070 if (cbuf) {
1071 MacroAssembler _masm(cbuf);
1072 __ ld(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1073 #ifndef PRODUCT
1074 } else {
1075 if(!do_size){
1076 if (size != 0) st->print("\n\t");
1077 st->print("ld %s, [SP + #%d]\t# spill 3",
1078 Matcher::regName[dst_first],
1079 offset);
1080 }
1081 #endif
1082 }
1083 size += 4;
1084 } else {
1085 // 32-bit
1086 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1087 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1088 int offset = ra_->reg2offset(src_first);
1089 if (cbuf) {
1090 MacroAssembler _masm(cbuf);
1091 if (this->ideal_reg() == Op_RegI)
1092 __ lw(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1093 else
1094 __ lwu(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1095 #ifndef PRODUCT
1096 } else {
1097 if(!do_size){
1098 if (size != 0) st->print("\n\t");
1099 if (this->ideal_reg() == Op_RegI)
1100 st->print("lw %s, [SP + #%d]\t# spill 4",
1101 Matcher::regName[dst_first],
1102 offset);
1103 else
1104 st->print("lwu %s, [SP + #%d]\t# spill 5",
1105 Matcher::regName[dst_first],
1106 offset);
1107 }
1108 #endif
1109 }
1110 size += 4;
1111 }
1112 return size;
1113 } else if (dst_first_rc == rc_float) {
1114 // mem-> xmm
1115 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1116 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1117 // 64-bit
1118 int offset = ra_->reg2offset(src_first);
1119 if (cbuf) {
1120 MacroAssembler _masm(cbuf);
1121 __ ldc1( as_FloatRegister(Matcher::_regEncode[dst_first]), Address(SP, offset));
1122 #ifndef PRODUCT
1123 } else {
1124 if(!do_size){
1125 if (size != 0) st->print("\n\t");
1126 st->print("ldc1 %s, [SP + #%d]\t# spill 6",
1127 Matcher::regName[dst_first],
1128 offset);
1129 }
1130 #endif
1131 }
1132 size += 4;
1133 } else {
1134 // 32-bit
1135 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1136 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1137 int offset = ra_->reg2offset(src_first);
1138 if (cbuf) {
1139 MacroAssembler _masm(cbuf);
1140 __ lwc1( as_FloatRegister(Matcher::_regEncode[dst_first]), Address(SP, offset));
1141 #ifndef PRODUCT
1142 } else {
1143 if(!do_size){
1144 if (size != 0) st->print("\n\t");
1145 st->print("lwc1 %s, [SP + #%d]\t# spill 7",
1146 Matcher::regName[dst_first],
1147 offset);
1148 }
1149 #endif
1150 }
1151 size += 4;
1152 }
1153 return size;
1154 }
1155 } else if (src_first_rc == rc_int) {
1156 // gpr ->
1157 if (dst_first_rc == rc_stack) {
1158 // gpr -> mem
1159 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1160 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1161 // 64-bit
1162 int offset = ra_->reg2offset(dst_first);
1163 if (cbuf) {
1164 MacroAssembler _masm(cbuf);
1165 __ sd(as_Register(Matcher::_regEncode[src_first]), Address(SP, offset));
1166 #ifndef PRODUCT
1167 } else {
1168 if(!do_size){
1169 if (size != 0) st->print("\n\t");
1170 st->print("sd %s, [SP + #%d] # spill 8",
1171 Matcher::regName[src_first],
1172 offset);
1173 }
1174 #endif
1175 }
1176 size += 4;
1177 } else {
1178 // 32-bit
1179 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1180 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1181 int offset = ra_->reg2offset(dst_first);
1182 if (cbuf) {
1183 MacroAssembler _masm(cbuf);
1184 __ sw(as_Register(Matcher::_regEncode[src_first]), Address(SP, offset));
1185 #ifndef PRODUCT
1186 } else {
1187 if(!do_size){
1188 if (size != 0) st->print("\n\t");
1189 st->print("sw %s, [SP + #%d]\t# spill 9",
1190 Matcher::regName[src_first], offset);
1191 }
1192 #endif
1193 }
1194 size += 4;
1195 }
1196 return size;
1197 } else if (dst_first_rc == rc_int) {
1198 // gpr -> gpr
1199 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1200 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1201 // 64-bit
1202 if (cbuf) {
1203 MacroAssembler _masm(cbuf);
1204 __ move(as_Register(Matcher::_regEncode[dst_first]),
1205 as_Register(Matcher::_regEncode[src_first]));
1206 #ifndef PRODUCT
1207 } else {
1208 if(!do_size){
1209 if (size != 0) st->print("\n\t");
1210 st->print("move(64bit) %s <-- %s\t# spill 10",
1211 Matcher::regName[dst_first],
1212 Matcher::regName[src_first]);
1213 }
1214 #endif
1215 }
1216 size += 4;
1217 return size;
1218 } else {
1219 // 32-bit
1220 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1221 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1222 if (cbuf) {
1223 MacroAssembler _masm(cbuf);
1224 if (this->ideal_reg() == Op_RegI)
1225 __ move_u32(as_Register(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first]));
1226 else
1227 __ daddu(as_Register(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first]), R0);
1229 #ifndef PRODUCT
1230 } else {
1231 if(!do_size){
1232 if (size != 0) st->print("\n\t");
1233 st->print("move(32-bit) %s <-- %s\t# spill 11",
1234 Matcher::regName[dst_first],
1235 Matcher::regName[src_first]);
1236 }
1237 #endif
1238 }
1239 size += 4;
1240 return size;
1241 }
1242 } else if (dst_first_rc == rc_float) {
1243 // gpr -> xmm
1244 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1245 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1246 // 64-bit
1247 if (cbuf) {
1248 MacroAssembler _masm(cbuf);
1249 __ dmtc1(as_Register(Matcher::_regEncode[src_first]), as_FloatRegister(Matcher::_regEncode[dst_first]));
1250 #ifndef PRODUCT
1251 } else {
1252 if(!do_size){
1253 if (size != 0) st->print("\n\t");
1254 st->print("dmtc1 %s, %s\t# spill 12",
1255 Matcher::regName[dst_first],
1256 Matcher::regName[src_first]);
1257 }
1258 #endif
1259 }
1260 size += 4;
1261 } else {
1262 // 32-bit
1263 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1264 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1265 if (cbuf) {
1266 MacroAssembler _masm(cbuf);
1267 __ mtc1( as_Register(Matcher::_regEncode[src_first]), as_FloatRegister(Matcher::_regEncode[dst_first]) );
1268 #ifndef PRODUCT
1269 } else {
1270 if(!do_size){
1271 if (size != 0) st->print("\n\t");
1272 st->print("mtc1 %s, %s\t# spill 13",
1273 Matcher::regName[dst_first],
1274 Matcher::regName[src_first]);
1275 }
1276 #endif
1277 }
1278 size += 4;
1279 }
1280 return size;
1281 }
1282 } else if (src_first_rc == rc_float) {
1283 // xmm ->
1284 if (dst_first_rc == rc_stack) {
1285 // xmm -> mem
1286 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1287 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1288 // 64-bit
1289 int offset = ra_->reg2offset(dst_first);
1290 if (cbuf) {
1291 MacroAssembler _masm(cbuf);
1292 __ sdc1( as_FloatRegister(Matcher::_regEncode[src_first]), Address(SP, offset) );
1293 #ifndef PRODUCT
1294 } else {
1295 if(!do_size){
1296 if (size != 0) st->print("\n\t");
1297 st->print("sdc1 %s, [SP + #%d]\t# spill 14",
1298 Matcher::regName[src_first],
1299 offset);
1300 }
1301 #endif
1302 }
1303 size += 4;
1304 } else {
1305 // 32-bit
1306 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1307 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1308 int offset = ra_->reg2offset(dst_first);
1309 if (cbuf) {
1310 MacroAssembler _masm(cbuf);
1311 __ swc1(as_FloatRegister(Matcher::_regEncode[src_first]), Address(SP, offset));
1312 #ifndef PRODUCT
1313 } else {
1314 if(!do_size){
1315 if (size != 0) st->print("\n\t");
1316 st->print("swc1 %s, [SP + #%d]\t# spill 15",
1317 Matcher::regName[src_first],
1318 offset);
1319 }
1320 #endif
1321 }
1322 size += 4;
1323 }
1324 return size;
1325 } else if (dst_first_rc == rc_int) {
1326 // xmm -> gpr
1327 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1328 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1329 // 64-bit
1330 if (cbuf) {
1331 MacroAssembler _masm(cbuf);
1332 __ dmfc1( as_Register(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1333 #ifndef PRODUCT
1334 } else {
1335 if(!do_size){
1336 if (size != 0) st->print("\n\t");
1337 st->print("dmfc1 %s, %s\t# spill 16",
1338 Matcher::regName[dst_first],
1339 Matcher::regName[src_first]);
1340 }
1341 #endif
1342 }
1343 size += 4;
1344 } else {
1345 // 32-bit
1346 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1347 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1348 if (cbuf) {
1349 MacroAssembler _masm(cbuf);
1350 __ mfc1( as_Register(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1351 #ifndef PRODUCT
1352 } else {
1353 if(!do_size){
1354 if (size != 0) st->print("\n\t");
1355 st->print("mfc1 %s, %s\t# spill 17",
1356 Matcher::regName[dst_first],
1357 Matcher::regName[src_first]);
1358 }
1359 #endif
1360 }
1361 size += 4;
1362 }
1363 return size;
1364 } else if (dst_first_rc == rc_float) {
1365 // xmm -> xmm
1366 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1367 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1368 // 64-bit
1369 if (cbuf) {
1370 MacroAssembler _masm(cbuf);
1371 __ mov_d( as_FloatRegister(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1372 #ifndef PRODUCT
1373 } else {
1374 if(!do_size){
1375 if (size != 0) st->print("\n\t");
1376 st->print("mov_d %s <-- %s\t# spill 18",
1377 Matcher::regName[dst_first],
1378 Matcher::regName[src_first]);
1379 }
1380 #endif
1381 }
1382 size += 4;
1383 } else {
1384 // 32-bit
1385 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1386 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1387 if (cbuf) {
1388 MacroAssembler _masm(cbuf);
1389 __ mov_s( as_FloatRegister(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1390 #ifndef PRODUCT
1391 } else {
1392 if(!do_size){
1393 if (size != 0) st->print("\n\t");
1394 st->print("mov_s %s <-- %s\t# spill 19",
1395 Matcher::regName[dst_first],
1396 Matcher::regName[src_first]);
1397 }
1398 #endif
1399 }
1400 size += 4;
1401 }
1402 return size;
1403 }
1404 }
1406 assert(0," foo ");
1407 Unimplemented();
1408 return size;
1410 }
1412 #ifndef PRODUCT
1413 void MachSpillCopyNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1414 implementation( NULL, ra_, false, st );
1415 }
1416 #endif
1418 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1419 implementation( &cbuf, ra_, false, NULL );
1420 }
1422 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
1423 return implementation( NULL, ra_, true, NULL );
1424 }
1426 //=============================================================================
1427 #
1429 #ifndef PRODUCT
1430 void MachBreakpointNode::format( PhaseRegAlloc *, outputStream* st ) const {
1431 st->print("INT3");
1432 }
1433 #endif
1435 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc* ra_) const {
1436 MacroAssembler _masm(&cbuf);
1437 __ int3();
1438 }
1440 uint MachBreakpointNode::size(PhaseRegAlloc* ra_) const {
1441 return MachNode::size(ra_);
1442 }
1445 //=============================================================================
1446 #ifndef PRODUCT
1447 void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1448 Compile *C = ra_->C;
1449 int framesize = C->frame_size_in_bytes();
1451 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1453 st->print("daddiu SP, SP, %d # Rlease stack @ MachEpilogNode",framesize);
1454 st->cr(); st->print("\t");
1455 if (UseLoongsonISA) {
1456 st->print("gslq RA, FP, SP, %d # Restore FP & RA @ MachEpilogNode", -wordSize*2);
1457 } else {
1458 st->print("ld RA, SP, %d # Restore RA @ MachEpilogNode", -wordSize);
1459 st->cr(); st->print("\t");
1460 st->print("ld FP, SP, %d # Restore FP @ MachEpilogNode", -wordSize*2);
1461 }
1463 if( do_polling() && C->is_method_compilation() ) {
1464 st->print("Poll Safepoint # MachEpilogNode");
1465 }
1466 }
1467 #endif
1469 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1470 Compile *C = ra_->C;
1471 MacroAssembler _masm(&cbuf);
1472 int framesize = C->frame_size_in_bytes();
1474 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1476 __ daddiu(SP, SP, framesize);
1478 if (UseLoongsonISA) {
1479 __ gslq(RA, FP, SP, -wordSize*2);
1480 } else {
1481 __ ld(RA, SP, -wordSize );
1482 __ ld(FP, SP, -wordSize*2 );
1483 }
1485 if( do_polling() && C->is_method_compilation() ) {
1486 __ set64(AT, (long)os::get_polling_page());
1487 __ relocate(relocInfo::poll_return_type);
1488 __ lw(AT, AT, 0);
1489 }
1490 }
1492 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
1493 return MachNode::size(ra_); // too many variables; just compute it the hard way fujie debug
1494 }
1496 int MachEpilogNode::reloc() const {
1497 return 0; // a large enough number
1498 }
1500 const Pipeline * MachEpilogNode::pipeline() const {
1501 return MachNode::pipeline_class();
1502 }
1504 int MachEpilogNode::safepoint_offset() const { return 0; }
1506 //=============================================================================
1508 #ifndef PRODUCT
1509 void BoxLockNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1510 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1511 int reg = ra_->get_reg_first(this);
1512 st->print("ADDI %s, SP, %d @BoxLockNode",Matcher::regName[reg],offset);
1513 }
1514 #endif
1517 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
1518 return 4;
1519 }
1521 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1522 MacroAssembler _masm(&cbuf);
1523 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1524 int reg = ra_->get_encode(this);
1526 __ addi(as_Register(reg), SP, offset);
1527 /*
1528 if( offset >= 128 ) {
1529 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
1530 emit_rm(cbuf, 0x2, reg, 0x04);
1531 emit_rm(cbuf, 0x0, 0x04, SP_enc);
1532 emit_d32(cbuf, offset);
1533 }
1534 else {
1535 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
1536 emit_rm(cbuf, 0x1, reg, 0x04);
1537 emit_rm(cbuf, 0x0, 0x04, SP_enc);
1538 emit_d8(cbuf, offset);
1539 }
1540 */
1541 }
1544 //static int sizeof_FFree_Float_Stack_All = -1;
1546 int MachCallRuntimeNode::ret_addr_offset() {
1547 //lui
1548 //ori
1549 //dsll
1550 //ori
1551 //jalr
1552 //nop
1553 assert(NativeCall::instruction_size == 24, "in MachCallRuntimeNode::ret_addr_offset()");
1554 return NativeCall::instruction_size;
1555 // return 16;
1556 }
1562 //=============================================================================
1563 #ifndef PRODUCT
1564 void MachNopNode::format( PhaseRegAlloc *, outputStream* st ) const {
1565 st->print("NOP \t# %d bytes pad for loops and calls", 4 * _count);
1566 }
1567 #endif
1569 void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc * ) const {
1570 MacroAssembler _masm(&cbuf);
1571 int i = 0;
1572 for(i = 0; i < _count; i++)
1573 __ nop();
1574 }
1576 uint MachNopNode::size(PhaseRegAlloc *) const {
1577 return 4 * _count;
1578 }
1579 const Pipeline* MachNopNode::pipeline() const {
1580 return MachNode::pipeline_class();
1581 }
1583 //=============================================================================
1585 //=============================================================================
1586 #ifndef PRODUCT
1587 void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1588 st->print_cr("load_klass(AT, T0)");
1589 st->print_cr("\tbeq(AT, iCache, L)");
1590 st->print_cr("\tnop");
1591 st->print_cr("\tjmp(SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type)");
1592 st->print_cr("\tnop");
1593 st->print_cr("\tnop");
1594 st->print_cr(" L:");
1595 }
1596 #endif
1599 void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1600 MacroAssembler _masm(&cbuf);
1601 #ifdef ASSERT
1602 //uint code_size = cbuf.code_size();
1603 #endif
1604 int ic_reg = Matcher::inline_cache_reg_encode();
1605 Label L;
1606 Register receiver = T0;
1607 Register iCache = as_Register(ic_reg);
1608 __ load_klass(AT, receiver);
1609 __ beq(AT, iCache, L);
1610 __ nop();
1612 __ relocate(relocInfo::runtime_call_type);
1613 __ patchable_set48(T9, (long)SharedRuntime::get_ic_miss_stub());
1614 __ jr(T9);
1615 __ nop();
1617 /* WARNING these NOPs are critical so that verified entry point is properly
1618 * 8 bytes aligned for patching by NativeJump::patch_verified_entry() */
1619 __ align(CodeEntryAlignment);
1620 __ bind(L);
1621 }
1623 uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
1624 return MachNode::size(ra_);
1625 }
1629 //=============================================================================
1631 const RegMask& MachConstantBaseNode::_out_RegMask = P_REG_mask();
1633 int Compile::ConstantTable::calculate_table_base_offset() const {
1634 return 0; // absolute addressing, no offset
1635 }
1637 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
1638 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
1639 ShouldNotReachHere();
1640 }
1642 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
1643 Compile* C = ra_->C;
1644 Compile::ConstantTable& constant_table = C->constant_table();
1645 MacroAssembler _masm(&cbuf);
1647 Register Rtoc = as_Register(ra_->get_encode(this));
1648 CodeSection* consts_section = __ code()->consts();
1649 int consts_size = consts_section->align_at_start(consts_section->size());
1650 assert(constant_table.size() == consts_size, "must be equal");
1652 if (consts_section->size()) {
1653 // Materialize the constant table base.
1654 address baseaddr = consts_section->start() + -(constant_table.table_base_offset());
1655 // RelocationHolder rspec = internal_word_Relocation::spec(baseaddr);
1656 __ relocate(relocInfo::internal_pc_type);
1657 __ patchable_set48(Rtoc, (long)baseaddr);
1658 }
1659 }
1661 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
1662 // patchable_set48 (4 insts)
1663 return 4 * 4;
1664 }
1666 #ifndef PRODUCT
1667 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
1668 Register r = as_Register(ra_->get_encode(this));
1669 st->print("patchable_set48 %s, &constanttable (constant table base) @ MachConstantBaseNode", r->name());
1670 }
1671 #endif
1674 //=============================================================================
1675 #ifndef PRODUCT
1676 void MachPrologNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1677 Compile* C = ra_->C;
1679 int framesize = C->frame_size_in_bytes();
1680 int bangsize = C->bang_size_in_bytes();
1681 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1683 // Calls to C2R adapters often do not accept exceptional returns.
1684 // We require that their callers must bang for them. But be careful, because
1685 // some VM calls (such as call site linkage) can use several kilobytes of
1686 // stack. But the stack safety zone should account for that.
1687 // See bugs 4446381, 4468289, 4497237.
1688 if (C->need_stack_bang(bangsize)) {
1689 st->print_cr("# stack bang"); st->print("\t");
1690 }
1691 if (UseLoongsonISA) {
1692 st->print("gssq RA, FP, %d(SP) @ MachPrologNode\n\t", -wordSize*2);
1693 } else {
1694 st->print("sd RA, %d(SP) @ MachPrologNode\n\t", -wordSize);
1695 st->print("sd FP, %d(SP) @ MachPrologNode\n\t", -wordSize*2);
1696 }
1697 st->print("daddiu FP, SP, -%d \n\t", wordSize*2);
1698 st->print("daddiu SP, SP, -%d \t",framesize);
1699 }
1700 #endif
1703 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1704 Compile* C = ra_->C;
1705 MacroAssembler _masm(&cbuf);
1707 int framesize = C->frame_size_in_bytes();
1708 int bangsize = C->bang_size_in_bytes();
1710 // __ verified_entry(framesize, C->need_stack_bang(bangsize)?bangsize:0, false);
1712 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1714 if (C->need_stack_bang(framesize)) {
1715 __ generate_stack_overflow_check(framesize);
1716 }
1718 if (UseLoongsonISA) {
1719 __ gssq(RA, FP, SP, -wordSize*2);
1720 } else {
1721 __ sd(RA, SP, -wordSize);
1722 __ sd(FP, SP, -wordSize*2);
1723 }
1724 __ daddiu(FP, SP, -wordSize*2);
1725 __ daddiu(SP, SP, -framesize);
1726 __ nop(); /* 2013.10.22 Jin: Make enough room for patch_verified_entry() */
1727 __ nop();
1729 C->set_frame_complete(cbuf.insts_size());
1730 if (C->has_mach_constant_base_node()) {
1731 // NOTE: We set the table base offset here because users might be
1732 // emitted before MachConstantBaseNode.
1733 Compile::ConstantTable& constant_table = C->constant_table();
1734 constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
1735 }
1737 }
1740 uint MachPrologNode::size(PhaseRegAlloc *ra_) const {
1741 //fprintf(stderr, "\nPrologNode::size(ra_)= %d \n", MachNode::size(ra_));//fujie debug
1742 return MachNode::size(ra_); // too many variables; just compute it the hard way
1743 }
1745 int MachPrologNode::reloc() const {
1746 return 0; // a large enough number
1747 }
1749 %}
1751 //----------ENCODING BLOCK-----------------------------------------------------
1752 // This block specifies the encoding classes used by the compiler to output
1753 // byte streams. Encoding classes generate functions which are called by
1754 // Machine Instruction Nodes in order to generate the bit encoding of the
1755 // instruction. Operands specify their base encoding interface with the
1756 // interface keyword. There are currently supported four interfaces,
1757 // REG_INTER, CONST_INTER, MEMORY_INTER, & COND_INTER. REG_INTER causes an
1758 // operand to generate a function which returns its register number when
1759 // queried. CONST_INTER causes an operand to generate a function which
1760 // returns the value of the constant when queried. MEMORY_INTER causes an
1761 // operand to generate four functions which return the Base Register, the
1762 // Index Register, the Scale Value, and the Offset Value of the operand when
1763 // queried. COND_INTER causes an operand to generate six functions which
1764 // return the encoding code (ie - encoding bits for the instruction)
1765 // associated with each basic boolean condition for a conditional instruction.
1766 // Instructions specify two basic values for encoding. They use the
1767 // ins_encode keyword to specify their encoding class (which must be one of
1768 // the class names specified in the encoding block), and they use the
1769 // opcode keyword to specify, in order, their primary, secondary, and
1770 // tertiary opcode. Only the opcode sections which a particular instruction
1771 // needs for encoding need to be specified.
1772 encode %{
1773 /*
1774 Alias:
1775 1044 b java.io.ObjectInputStream::readHandle (130 bytes)
1776 118 B14: # B19 B15 <- B13 Freq: 0.899955
1777 118 add S1, S2, V0 #@addP_reg_reg
1778 11c lb S0, [S1 + #-8257524] #@loadB
1779 120 BReq S0, #3, B19 #@branchConI_reg_imm P=0.100000 C=-1.000000
1780 */
1781 //Load byte signed
1782 enc_class load_B_enc (mRegI dst, memory mem) %{
1783 MacroAssembler _masm(&cbuf);
1784 int dst = $dst$$reg;
1785 int base = $mem$$base;
1786 int index = $mem$$index;
1787 int scale = $mem$$scale;
1788 int disp = $mem$$disp;
1790 if( index != 0 ) {
1791 if( Assembler::is_simm16(disp) ) {
1792 if( UseLoongsonISA ) {
1793 if (scale == 0) {
1794 __ gslbx(as_Register(dst), as_Register(base), as_Register(index), disp);
1795 } else {
1796 __ dsll(AT, as_Register(index), scale);
1797 __ gslbx(as_Register(dst), as_Register(base), AT, disp);
1798 }
1799 } else {
1800 if (scale == 0) {
1801 __ addu(AT, as_Register(base), as_Register(index));
1802 } else {
1803 __ dsll(AT, as_Register(index), scale);
1804 __ addu(AT, as_Register(base), AT);
1805 }
1806 __ lb(as_Register(dst), AT, disp);
1807 }
1808 } else {
1809 if (scale == 0) {
1810 __ addu(AT, as_Register(base), as_Register(index));
1811 } else {
1812 __ dsll(AT, as_Register(index), scale);
1813 __ addu(AT, as_Register(base), AT);
1814 }
1815 __ move(T9, disp);
1816 if( UseLoongsonISA ) {
1817 __ gslbx(as_Register(dst), AT, T9, 0);
1818 } else {
1819 __ addu(AT, AT, T9);
1820 __ lb(as_Register(dst), AT, 0);
1821 }
1822 }
1823 } else {
1824 if( Assembler::is_simm16(disp) ) {
1825 __ lb(as_Register(dst), as_Register(base), disp);
1826 } else {
1827 __ move(T9, disp);
1828 if( UseLoongsonISA ) {
1829 __ gslbx(as_Register(dst), as_Register(base), T9, 0);
1830 } else {
1831 __ addu(AT, as_Register(base), T9);
1832 __ lb(as_Register(dst), AT, 0);
1833 }
1834 }
1835 }
1836 %}
1838 //Load byte unsigned
1839 enc_class load_UB_enc (mRegI dst, memory mem) %{
1840 MacroAssembler _masm(&cbuf);
1841 int dst = $dst$$reg;
1842 int base = $mem$$base;
1843 int index = $mem$$index;
1844 int scale = $mem$$scale;
1845 int disp = $mem$$disp;
1847 if( index != 0 ) {
1848 if (scale == 0) {
1849 __ daddu(AT, as_Register(base), as_Register(index));
1850 } else {
1851 __ dsll(AT, as_Register(index), scale);
1852 __ daddu(AT, as_Register(base), AT);
1853 }
1854 if( Assembler::is_simm16(disp) ) {
1855 __ lbu(as_Register(dst), AT, disp);
1856 } else {
1857 __ move(T9, disp);
1858 __ daddu(AT, AT, T9);
1859 __ lbu(as_Register(dst), AT, 0);
1860 }
1861 } else {
1862 if( Assembler::is_simm16(disp) ) {
1863 __ lbu(as_Register(dst), as_Register(base), disp);
1864 } else {
1865 __ move(T9, disp);
1866 __ daddu(AT, as_Register(base), T9);
1867 __ lbu(as_Register(dst), AT, 0);
1868 }
1869 }
1870 %}
1872 enc_class store_B_reg_enc (memory mem, mRegI src) %{
1873 MacroAssembler _masm(&cbuf);
1874 int src = $src$$reg;
1875 int base = $mem$$base;
1876 int index = $mem$$index;
1877 int scale = $mem$$scale;
1878 int disp = $mem$$disp;
1880 if( index != 0 ) {
1881 if (scale == 0) {
1882 if( Assembler::is_simm(disp, 8) ) {
1883 if (UseLoongsonISA) {
1884 __ gssbx(as_Register(src), as_Register(base), as_Register(index), disp);
1885 } else {
1886 __ addu(AT, as_Register(base), as_Register(index));
1887 __ sb(as_Register(src), AT, disp);
1888 }
1889 } else if( Assembler::is_simm16(disp) ) {
1890 __ addu(AT, as_Register(base), as_Register(index));
1891 __ sb(as_Register(src), AT, disp);
1892 } else {
1893 __ addu(AT, as_Register(base), as_Register(index));
1894 __ move(T9, disp);
1895 if (UseLoongsonISA) {
1896 __ gssbx(as_Register(src), AT, T9, 0);
1897 } else {
1898 __ addu(AT, AT, T9);
1899 __ sb(as_Register(src), AT, 0);
1900 }
1901 }
1902 } else {
1903 __ dsll(AT, as_Register(index), scale);
1904 if( Assembler::is_simm(disp, 8) ) {
1905 if (UseLoongsonISA) {
1906 __ gssbx(as_Register(src), AT, as_Register(base), disp);
1907 } else {
1908 __ addu(AT, as_Register(base), AT);
1909 __ sb(as_Register(src), AT, disp);
1910 }
1911 } else if( Assembler::is_simm16(disp) ) {
1912 __ addu(AT, as_Register(base), AT);
1913 __ sb(as_Register(src), AT, disp);
1914 } else {
1915 __ addu(AT, as_Register(base), AT);
1916 __ move(T9, disp);
1917 if (UseLoongsonISA) {
1918 __ gssbx(as_Register(src), AT, T9, 0);
1919 } else {
1920 __ addu(AT, AT, T9);
1921 __ sb(as_Register(src), AT, 0);
1922 }
1923 }
1924 }
1925 } else {
1926 if( Assembler::is_simm16(disp) ) {
1927 __ sb(as_Register(src), as_Register(base), disp);
1928 } else {
1929 __ move(T9, disp);
1930 if (UseLoongsonISA) {
1931 __ gssbx(as_Register(src), as_Register(base), T9, 0);
1932 } else {
1933 __ addu(AT, as_Register(base), T9);
1934 __ sb(as_Register(src), AT, 0);
1935 }
1936 }
1937 }
1938 %}
1940 enc_class store_B_immI_enc (memory mem, immI8 src) %{
1941 MacroAssembler _masm(&cbuf);
1942 int base = $mem$$base;
1943 int index = $mem$$index;
1944 int scale = $mem$$scale;
1945 int disp = $mem$$disp;
1946 int value = $src$$constant;
1948 if( index != 0 ) {
1949 if (!UseLoongsonISA) {
1950 if (scale == 0) {
1951 __ daddu(AT, as_Register(base), as_Register(index));
1952 } else {
1953 __ dsll(AT, as_Register(index), scale);
1954 __ daddu(AT, as_Register(base), AT);
1955 }
1956 if( Assembler::is_simm16(disp) ) {
1957 if (value == 0) {
1958 __ sb(R0, AT, disp);
1959 } else {
1960 __ move(T9, value);
1961 __ sb(T9, AT, disp);
1962 }
1963 } else {
1964 if (value == 0) {
1965 __ move(T9, disp);
1966 __ daddu(AT, AT, T9);
1967 __ sb(R0, AT, 0);
1968 } else {
1969 __ move(T9, disp);
1970 __ daddu(AT, AT, T9);
1971 __ move(T9, value);
1972 __ sb(T9, AT, 0);
1973 }
1974 }
1975 } else {
1977 if (scale == 0) {
1978 if( Assembler::is_simm(disp, 8) ) {
1979 if (value == 0) {
1980 __ gssbx(R0, as_Register(base), as_Register(index), disp);
1981 } else {
1982 __ move(T9, value);
1983 __ gssbx(T9, as_Register(base), as_Register(index), disp);
1984 }
1985 } else if( Assembler::is_simm16(disp) ) {
1986 __ daddu(AT, as_Register(base), as_Register(index));
1987 if (value == 0) {
1988 __ sb(R0, AT, disp);
1989 } else {
1990 __ move(T9, value);
1991 __ sb(T9, AT, disp);
1992 }
1993 } else {
1994 if (value == 0) {
1995 __ daddu(AT, as_Register(base), as_Register(index));
1996 __ move(T9, disp);
1997 __ gssbx(R0, AT, T9, 0);
1998 } else {
1999 __ move(AT, disp);
2000 __ move(T9, value);
2001 __ daddu(AT, as_Register(base), AT);
2002 __ gssbx(T9, AT, as_Register(index), 0);
2003 }
2004 }
2006 } else {
2008 if( Assembler::is_simm(disp, 8) ) {
2009 __ dsll(AT, as_Register(index), scale);
2010 if (value == 0) {
2011 __ gssbx(R0, as_Register(base), AT, disp);
2012 } else {
2013 __ move(T9, value);
2014 __ gssbx(T9, as_Register(base), AT, disp);
2015 }
2016 } else if( Assembler::is_simm16(disp) ) {
2017 __ dsll(AT, as_Register(index), scale);
2018 __ daddu(AT, as_Register(base), AT);
2019 if (value == 0) {
2020 __ sb(R0, AT, disp);
2021 } else {
2022 __ move(T9, value);
2023 __ sb(T9, AT, disp);
2024 }
2025 } else {
2026 __ dsll(AT, as_Register(index), scale);
2027 if (value == 0) {
2028 __ daddu(AT, as_Register(base), AT);
2029 __ move(T9, disp);
2030 __ gssbx(R0, AT, T9, 0);
2031 } else {
2032 __ move(T9, disp);
2033 __ daddu(AT, AT, T9);
2034 __ move(T9, value);
2035 __ gssbx(T9, as_Register(base), AT, 0);
2036 }
2037 }
2038 }
2039 }
2040 } else {
2041 if( Assembler::is_simm16(disp) ) {
2042 if (value == 0) {
2043 __ sb(R0, as_Register(base), disp);
2044 } else {
2045 __ move(AT, value);
2046 __ sb(AT, as_Register(base), disp);
2047 }
2048 } else {
2049 if (value == 0) {
2050 __ move(T9, disp);
2051 if (UseLoongsonISA) {
2052 __ gssbx(R0, as_Register(base), T9, 0);
2053 } else {
2054 __ daddu(AT, as_Register(base), T9);
2055 __ sb(R0, AT, 0);
2056 }
2057 } else {
2058 __ move(T9, disp);
2059 if (UseLoongsonISA) {
2060 __ move(AT, value);
2061 __ gssbx(AT, as_Register(base), T9, 0);
2062 } else {
2063 __ daddu(AT, as_Register(base), T9);
2064 __ move(T9, value);
2065 __ sb(T9, AT, 0);
2066 }
2067 }
2068 }
2069 }
2070 %}
2073 enc_class store_B_immI_enc_sync (memory mem, immI8 src) %{
2074 MacroAssembler _masm(&cbuf);
2075 int base = $mem$$base;
2076 int index = $mem$$index;
2077 int scale = $mem$$scale;
2078 int disp = $mem$$disp;
2079 int value = $src$$constant;
2081 if( index != 0 ) {
2082 if ( UseLoongsonISA ) {
2083 if ( Assembler::is_simm(disp,8) ) {
2084 if ( scale == 0 ) {
2085 if ( value == 0 ) {
2086 __ gssbx(R0, as_Register(base), as_Register(index), disp);
2087 } else {
2088 __ move(AT, value);
2089 __ gssbx(AT, as_Register(base), as_Register(index), disp);
2090 }
2091 } else {
2092 __ dsll(AT, as_Register(index), scale);
2093 if ( value == 0 ) {
2094 __ gssbx(R0, as_Register(base), AT, disp);
2095 } else {
2096 __ move(T9, value);
2097 __ gssbx(T9, as_Register(base), AT, disp);
2098 }
2099 }
2100 } else if ( Assembler::is_simm16(disp) ) {
2101 if ( scale == 0 ) {
2102 __ daddu(AT, as_Register(base), as_Register(index));
2103 if ( value == 0 ){
2104 __ sb(R0, AT, disp);
2105 } else {
2106 __ move(T9, value);
2107 __ sb(T9, AT, disp);
2108 }
2109 } else {
2110 __ dsll(AT, as_Register(index), scale);
2111 __ daddu(AT, as_Register(base), AT);
2112 if ( value == 0 ) {
2113 __ sb(R0, AT, disp);
2114 } else {
2115 __ move(T9, value);
2116 __ sb(T9, AT, disp);
2117 }
2118 }
2119 } else {
2120 if ( scale == 0 ) {
2121 __ move(AT, disp);
2122 __ daddu(AT, as_Register(index), AT);
2123 if ( value == 0 ) {
2124 __ gssbx(R0, as_Register(base), AT, 0);
2125 } else {
2126 __ move(T9, value);
2127 __ gssbx(T9, as_Register(base), AT, 0);
2128 }
2129 } else {
2130 __ dsll(AT, as_Register(index), scale);
2131 __ move(T9, disp);
2132 __ daddu(AT, AT, T9);
2133 if ( value == 0 ) {
2134 __ gssbx(R0, as_Register(base), AT, 0);
2135 } else {
2136 __ move(T9, value);
2137 __ gssbx(T9, as_Register(base), AT, 0);
2138 }
2139 }
2140 }
2141 } else { //not use loongson isa
2142 if (scale == 0) {
2143 __ daddu(AT, as_Register(base), as_Register(index));
2144 } else {
2145 __ dsll(AT, as_Register(index), scale);
2146 __ daddu(AT, as_Register(base), AT);
2147 }
2148 if( Assembler::is_simm16(disp) ) {
2149 if (value == 0) {
2150 __ sb(R0, AT, disp);
2151 } else {
2152 __ move(T9, value);
2153 __ sb(T9, AT, disp);
2154 }
2155 } else {
2156 if (value == 0) {
2157 __ move(T9, disp);
2158 __ daddu(AT, AT, T9);
2159 __ sb(R0, AT, 0);
2160 } else {
2161 __ move(T9, disp);
2162 __ daddu(AT, AT, T9);
2163 __ move(T9, value);
2164 __ sb(T9, AT, 0);
2165 }
2166 }
2167 }
2168 } else {
2169 if ( UseLoongsonISA ){
2170 if ( Assembler::is_simm16(disp) ){
2171 if ( value == 0 ) {
2172 __ sb(R0, as_Register(base), disp);
2173 } else {
2174 __ move(AT, value);
2175 __ sb(AT, as_Register(base), disp);
2176 }
2177 } else {
2178 __ move(AT, disp);
2179 if ( value == 0 ) {
2180 __ gssbx(R0, as_Register(base), AT, 0);
2181 } else {
2182 __ move(T9, value);
2183 __ gssbx(T9, as_Register(base), AT, 0);
2184 }
2185 }
2186 } else {
2187 if( Assembler::is_simm16(disp) ) {
2188 if (value == 0) {
2189 __ sb(R0, as_Register(base), disp);
2190 } else {
2191 __ move(AT, value);
2192 __ sb(AT, as_Register(base), disp);
2193 }
2194 } else {
2195 if (value == 0) {
2196 __ move(T9, disp);
2197 __ daddu(AT, as_Register(base), T9);
2198 __ sb(R0, AT, 0);
2199 } else {
2200 __ move(T9, disp);
2201 __ daddu(AT, as_Register(base), T9);
2202 __ move(T9, value);
2203 __ sb(T9, AT, 0);
2204 }
2205 }
2206 }
2207 }
2209 __ sync();
2210 %}
2212 // Load Short (16bit signed)
2213 enc_class load_S_enc (mRegI dst, memory mem) %{
2214 MacroAssembler _masm(&cbuf);
2215 int dst = $dst$$reg;
2216 int base = $mem$$base;
2217 int index = $mem$$index;
2218 int scale = $mem$$scale;
2219 int disp = $mem$$disp;
2221 if( index != 0 ) {
2222 if ( UseLoongsonISA ) {
2223 if ( Assembler::is_simm(disp, 8) ) {
2224 if (scale == 0) {
2225 __ gslhx(as_Register(dst), as_Register(base), as_Register(index), disp);
2226 } else {
2227 __ dsll(AT, as_Register(index), scale);
2228 __ gslhx(as_Register(dst), as_Register(base), AT, disp);
2229 }
2230 } else if ( Assembler::is_simm16(disp) ) {
2231 if (scale == 0) {
2232 __ daddu(AT, as_Register(base), as_Register(index));
2233 __ lh(as_Register(dst), AT, disp);
2234 } else {
2235 __ dsll(AT, as_Register(index), scale);
2236 __ daddu(AT, as_Register(base), AT);
2237 __ lh(as_Register(dst), AT, disp);
2238 }
2239 } else {
2240 if (scale == 0) {
2241 __ move(AT, disp);
2242 __ daddu(AT, as_Register(index), AT);
2243 __ gslhx(as_Register(dst), as_Register(base), AT, 0);
2244 } else {
2245 __ dsll(AT, as_Register(index), scale);
2246 __ move(T9, disp);
2247 __ daddu(AT, AT, T9);
2248 __ gslhx(as_Register(dst), as_Register(base), AT, 0);
2249 }
2250 }
2251 } else { // not use loongson isa
2252 if (scale == 0) {
2253 __ daddu(AT, as_Register(base), as_Register(index));
2254 } else {
2255 __ dsll(AT, as_Register(index), scale);
2256 __ daddu(AT, as_Register(base), AT);
2257 }
2258 if( Assembler::is_simm16(disp) ) {
2259 __ lh(as_Register(dst), AT, disp);
2260 } else {
2261 __ move(T9, disp);
2262 __ daddu(AT, AT, T9);
2263 __ lh(as_Register(dst), AT, 0);
2264 }
2265 }
2266 } else { // index is 0
2267 if ( UseLoongsonISA ) {
2268 if ( Assembler::is_simm16(disp) ) {
2269 __ lh(as_Register(dst), as_Register(base), disp);
2270 } else {
2271 __ move(T9, disp);
2272 __ gslhx(as_Register(dst), as_Register(base), T9, 0);
2273 }
2274 } else { //not use loongson isa
2275 if( Assembler::is_simm16(disp) ) {
2276 __ lh(as_Register(dst), as_Register(base), disp);
2277 } else {
2278 __ move(T9, disp);
2279 __ daddu(AT, as_Register(base), T9);
2280 __ lh(as_Register(dst), AT, 0);
2281 }
2282 }
2283 }
2284 %}
2286 // Load Char (16bit unsigned)
2287 enc_class load_C_enc (mRegI dst, memory mem) %{
2288 MacroAssembler _masm(&cbuf);
2289 int dst = $dst$$reg;
2290 int base = $mem$$base;
2291 int index = $mem$$index;
2292 int scale = $mem$$scale;
2293 int disp = $mem$$disp;
2295 if( index != 0 ) {
2296 if (scale == 0) {
2297 __ daddu(AT, as_Register(base), as_Register(index));
2298 } else {
2299 __ dsll(AT, as_Register(index), scale);
2300 __ daddu(AT, as_Register(base), AT);
2301 }
2302 if( Assembler::is_simm16(disp) ) {
2303 __ lhu(as_Register(dst), AT, disp);
2304 } else {
2305 __ move(T9, disp);
2306 __ addu(AT, AT, T9);
2307 __ lhu(as_Register(dst), AT, 0);
2308 }
2309 } else {
2310 if( Assembler::is_simm16(disp) ) {
2311 __ lhu(as_Register(dst), as_Register(base), disp);
2312 } else {
2313 __ move(T9, disp);
2314 __ daddu(AT, as_Register(base), T9);
2315 __ lhu(as_Register(dst), AT, 0);
2316 }
2317 }
2318 %}
2320 // Store Char (16bit unsigned)
2321 enc_class store_C_reg_enc (memory mem, mRegI src) %{
2322 MacroAssembler _masm(&cbuf);
2323 int src = $src$$reg;
2324 int base = $mem$$base;
2325 int index = $mem$$index;
2326 int scale = $mem$$scale;
2327 int disp = $mem$$disp;
2329 if( index != 0 ) {
2330 if( Assembler::is_simm16(disp) ) {
2331 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2332 if (scale == 0) {
2333 __ gsshx(as_Register(src), as_Register(base), as_Register(index), disp);
2334 } else {
2335 __ dsll(AT, as_Register(index), scale);
2336 __ gsshx(as_Register(src), as_Register(base), AT, disp);
2337 }
2338 } else {
2339 if (scale == 0) {
2340 __ addu(AT, as_Register(base), as_Register(index));
2341 } else {
2342 __ dsll(AT, as_Register(index), scale);
2343 __ addu(AT, as_Register(base), AT);
2344 }
2345 __ sh(as_Register(src), AT, disp);
2346 }
2347 } else {
2348 if (scale == 0) {
2349 __ addu(AT, as_Register(base), as_Register(index));
2350 } else {
2351 __ dsll(AT, as_Register(index), scale);
2352 __ addu(AT, as_Register(base), AT);
2353 }
2354 __ move(T9, disp);
2355 if( UseLoongsonISA ) {
2356 __ gsshx(as_Register(src), AT, T9, 0);
2357 } else {
2358 __ addu(AT, AT, T9);
2359 __ sh(as_Register(src), AT, 0);
2360 }
2361 }
2362 } else {
2363 if( Assembler::is_simm16(disp) ) {
2364 __ sh(as_Register(src), as_Register(base), disp);
2365 } else {
2366 __ move(T9, disp);
2367 if( UseLoongsonISA ) {
2368 __ gsshx(as_Register(src), as_Register(base), T9, 0);
2369 } else {
2370 __ addu(AT, as_Register(base), T9);
2371 __ sh(as_Register(src), AT, 0);
2372 }
2373 }
2374 }
2375 %}
2377 enc_class store_C0_enc (memory mem) %{
2378 MacroAssembler _masm(&cbuf);
2379 int base = $mem$$base;
2380 int index = $mem$$index;
2381 int scale = $mem$$scale;
2382 int disp = $mem$$disp;
2384 if( index != 0 ) {
2385 if( Assembler::is_simm16(disp) ) {
2386 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2387 if (scale == 0) {
2388 __ gsshx(R0, as_Register(base), as_Register(index), disp);
2389 } else {
2390 __ dsll(AT, as_Register(index), scale);
2391 __ gsshx(R0, as_Register(base), AT, disp);
2392 }
2393 } else {
2394 if (scale == 0) {
2395 __ addu(AT, as_Register(base), as_Register(index));
2396 } else {
2397 __ dsll(AT, as_Register(index), scale);
2398 __ addu(AT, as_Register(base), AT);
2399 }
2400 __ sh(R0, AT, disp);
2401 }
2402 } else {
2403 if (scale == 0) {
2404 __ addu(AT, as_Register(base), as_Register(index));
2405 } else {
2406 __ dsll(AT, as_Register(index), scale);
2407 __ addu(AT, as_Register(base), AT);
2408 }
2409 __ move(T9, disp);
2410 if( UseLoongsonISA ) {
2411 __ gsshx(R0, AT, T9, 0);
2412 } else {
2413 __ addu(AT, AT, T9);
2414 __ sh(R0, AT, 0);
2415 }
2416 }
2417 } else {
2418 if( Assembler::is_simm16(disp) ) {
2419 __ sh(R0, as_Register(base), disp);
2420 } else {
2421 __ move(T9, disp);
2422 if( UseLoongsonISA ) {
2423 __ gsshx(R0, as_Register(base), T9, 0);
2424 } else {
2425 __ addu(AT, as_Register(base), T9);
2426 __ sh(R0, AT, 0);
2427 }
2428 }
2429 }
2430 %}
2432 enc_class load_I_enc (mRegI dst, memory mem) %{
2433 MacroAssembler _masm(&cbuf);
2434 int dst = $dst$$reg;
2435 int base = $mem$$base;
2436 int index = $mem$$index;
2437 int scale = $mem$$scale;
2438 int disp = $mem$$disp;
2440 if( index != 0 ) {
2441 if( Assembler::is_simm16(disp) ) {
2442 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2443 if (scale == 0) {
2444 __ gslwx(as_Register(dst), as_Register(base), as_Register(index), disp);
2445 } else {
2446 __ dsll(AT, as_Register(index), scale);
2447 __ gslwx(as_Register(dst), as_Register(base), AT, disp);
2448 }
2449 } else {
2450 if (scale == 0) {
2451 __ addu(AT, as_Register(base), as_Register(index));
2452 } else {
2453 __ dsll(AT, as_Register(index), scale);
2454 __ addu(AT, as_Register(base), AT);
2455 }
2456 __ lw(as_Register(dst), AT, disp);
2457 }
2458 } else {
2459 if (scale == 0) {
2460 __ addu(AT, as_Register(base), as_Register(index));
2461 } else {
2462 __ dsll(AT, as_Register(index), scale);
2463 __ addu(AT, as_Register(base), AT);
2464 }
2465 __ move(T9, disp);
2466 if( UseLoongsonISA ) {
2467 __ gslwx(as_Register(dst), AT, T9, 0);
2468 } else {
2469 __ addu(AT, AT, T9);
2470 __ lw(as_Register(dst), AT, 0);
2471 }
2472 }
2473 } else {
2474 if( Assembler::is_simm16(disp) ) {
2475 __ lw(as_Register(dst), as_Register(base), disp);
2476 } else {
2477 __ move(T9, disp);
2478 if( UseLoongsonISA ) {
2479 __ gslwx(as_Register(dst), as_Register(base), T9, 0);
2480 } else {
2481 __ addu(AT, as_Register(base), T9);
2482 __ lw(as_Register(dst), AT, 0);
2483 }
2484 }
2485 }
2486 %}
2488 enc_class store_I_reg_enc (memory mem, mRegI src) %{
2489 MacroAssembler _masm(&cbuf);
2490 int src = $src$$reg;
2491 int base = $mem$$base;
2492 int index = $mem$$index;
2493 int scale = $mem$$scale;
2494 int disp = $mem$$disp;
2496 if( index != 0 ) {
2497 if( Assembler::is_simm16(disp) ) {
2498 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2499 if (scale == 0) {
2500 __ gsswx(as_Register(src), as_Register(base), as_Register(index), disp);
2501 } else {
2502 __ dsll(AT, as_Register(index), scale);
2503 __ gsswx(as_Register(src), as_Register(base), AT, disp);
2504 }
2505 } else {
2506 if (scale == 0) {
2507 __ addu(AT, as_Register(base), as_Register(index));
2508 } else {
2509 __ dsll(AT, as_Register(index), scale);
2510 __ addu(AT, as_Register(base), AT);
2511 }
2512 __ sw(as_Register(src), AT, disp);
2513 }
2514 } else {
2515 if (scale == 0) {
2516 __ addu(AT, as_Register(base), as_Register(index));
2517 } else {
2518 __ dsll(AT, as_Register(index), scale);
2519 __ addu(AT, as_Register(base), AT);
2520 }
2521 __ move(T9, disp);
2522 if( UseLoongsonISA ) {
2523 __ gsswx(as_Register(src), AT, T9, 0);
2524 } else {
2525 __ addu(AT, AT, T9);
2526 __ sw(as_Register(src), AT, 0);
2527 }
2528 }
2529 } else {
2530 if( Assembler::is_simm16(disp) ) {
2531 __ sw(as_Register(src), as_Register(base), disp);
2532 } else {
2533 __ move(T9, disp);
2534 if( UseLoongsonISA ) {
2535 __ gsswx(as_Register(src), as_Register(base), T9, 0);
2536 } else {
2537 __ addu(AT, as_Register(base), T9);
2538 __ sw(as_Register(src), AT, 0);
2539 }
2540 }
2541 }
2542 %}
2544 enc_class store_I_immI_enc (memory mem, immI src) %{
2545 MacroAssembler _masm(&cbuf);
2546 int base = $mem$$base;
2547 int index = $mem$$index;
2548 int scale = $mem$$scale;
2549 int disp = $mem$$disp;
2550 int value = $src$$constant;
2552 if( index != 0 ) {
2553 if ( UseLoongsonISA ) {
2554 if ( Assembler::is_simm(disp, 8) ) {
2555 if ( scale == 0 ) {
2556 if ( value == 0 ) {
2557 __ gsswx(R0, as_Register(base), as_Register(index), disp);
2558 } else {
2559 __ move(T9, value);
2560 __ gsswx(T9, as_Register(base), as_Register(index), disp);
2561 }
2562 } else {
2563 __ dsll(AT, as_Register(index), scale);
2564 if ( value == 0 ) {
2565 __ gsswx(R0, as_Register(base), AT, disp);
2566 } else {
2567 __ move(T9, value);
2568 __ gsswx(T9, as_Register(base), AT, disp);
2569 }
2570 }
2571 } else if ( Assembler::is_simm16(disp) ) {
2572 if ( scale == 0 ) {
2573 __ daddu(AT, as_Register(base), as_Register(index));
2574 if ( value == 0 ) {
2575 __ sw(R0, AT, disp);
2576 } else {
2577 __ move(T9, value);
2578 __ sw(T9, AT, disp);
2579 }
2580 } else {
2581 __ dsll(AT, as_Register(index), scale);
2582 __ daddu(AT, as_Register(base), AT);
2583 if ( value == 0 ) {
2584 __ sw(R0, AT, disp);
2585 } else {
2586 __ move(T9, value);
2587 __ sw(T9, AT, disp);
2588 }
2589 }
2590 } else {
2591 if ( scale == 0 ) {
2592 __ move(T9, disp);
2593 __ daddu(AT, as_Register(index), T9);
2594 if ( value ==0 ) {
2595 __ gsswx(R0, as_Register(base), AT, 0);
2596 } else {
2597 __ move(T9, value);
2598 __ gsswx(T9, as_Register(base), AT, 0);
2599 }
2600 } else {
2601 __ dsll(AT, as_Register(index), scale);
2602 __ move(T9, disp);
2603 __ daddu(AT, AT, T9);
2604 if ( value == 0 ) {
2605 __ gsswx(R0, as_Register(base), AT, 0);
2606 } else {
2607 __ move(T9, value);
2608 __ gsswx(T9, as_Register(base), AT, 0);
2609 }
2610 }
2611 }
2612 } else { //not use loongson isa
2613 if (scale == 0) {
2614 __ daddu(AT, as_Register(base), as_Register(index));
2615 } else {
2616 __ dsll(AT, as_Register(index), scale);
2617 __ daddu(AT, as_Register(base), AT);
2618 }
2619 if( Assembler::is_simm16(disp) ) {
2620 if (value == 0) {
2621 __ sw(R0, AT, disp);
2622 } else {
2623 __ move(T9, value);
2624 __ sw(T9, AT, disp);
2625 }
2626 } else {
2627 if (value == 0) {
2628 __ move(T9, disp);
2629 __ daddu(AT, AT, T9);
2630 __ sw(R0, AT, 0);
2631 } else {
2632 __ move(T9, disp);
2633 __ daddu(AT, AT, T9);
2634 __ move(T9, value);
2635 __ sw(T9, AT, 0);
2636 }
2637 }
2638 }
2639 } else {
2640 if ( UseLoongsonISA ) {
2641 if ( Assembler::is_simm16(disp) ) {
2642 if ( value == 0 ) {
2643 __ sw(R0, as_Register(base), disp);
2644 } else {
2645 __ move(AT, value);
2646 __ sw(AT, as_Register(base), disp);
2647 }
2648 } else {
2649 __ move(T9, disp);
2650 if ( value == 0 ) {
2651 __ gsswx(R0, as_Register(base), T9, 0);
2652 } else {
2653 __ move(AT, value);
2654 __ gsswx(AT, as_Register(base), T9, 0);
2655 }
2656 }
2657 } else {
2658 if( Assembler::is_simm16(disp) ) {
2659 if (value == 0) {
2660 __ sw(R0, as_Register(base), disp);
2661 } else {
2662 __ move(AT, value);
2663 __ sw(AT, as_Register(base), disp);
2664 }
2665 } else {
2666 if (value == 0) {
2667 __ move(T9, disp);
2668 __ daddu(AT, as_Register(base), T9);
2669 __ sw(R0, AT, 0);
2670 } else {
2671 __ move(T9, disp);
2672 __ daddu(AT, as_Register(base), T9);
2673 __ move(T9, value);
2674 __ sw(T9, AT, 0);
2675 }
2676 }
2677 }
2678 }
2679 %}
2681 enc_class load_N_enc (mRegN dst, memory mem) %{
2682 MacroAssembler _masm(&cbuf);
2683 int dst = $dst$$reg;
2684 int base = $mem$$base;
2685 int index = $mem$$index;
2686 int scale = $mem$$scale;
2687 int disp = $mem$$disp;
2688 relocInfo::relocType disp_reloc = $mem->disp_reloc();
2689 assert(disp_reloc == relocInfo::none, "cannot have disp");
2691 if( index != 0 ) {
2692 if (scale == 0) {
2693 __ daddu(AT, as_Register(base), as_Register(index));
2694 } else {
2695 __ dsll(AT, as_Register(index), scale);
2696 __ daddu(AT, as_Register(base), AT);
2697 }
2698 if( Assembler::is_simm16(disp) ) {
2699 __ lwu(as_Register(dst), AT, disp);
2700 } else {
2701 __ set64(T9, disp);
2702 __ daddu(AT, AT, T9);
2703 __ lwu(as_Register(dst), AT, 0);
2704 }
2705 } else {
2706 if( Assembler::is_simm16(disp) ) {
2707 __ lwu(as_Register(dst), as_Register(base), disp);
2708 } else {
2709 __ set64(T9, disp);
2710 __ daddu(AT, as_Register(base), T9);
2711 __ lwu(as_Register(dst), AT, 0);
2712 }
2713 }
2715 %}
2718 enc_class load_P_enc (mRegP dst, memory mem) %{
2719 MacroAssembler _masm(&cbuf);
2720 int dst = $dst$$reg;
2721 int base = $mem$$base;
2722 int index = $mem$$index;
2723 int scale = $mem$$scale;
2724 int disp = $mem$$disp;
2725 relocInfo::relocType disp_reloc = $mem->disp_reloc();
2726 assert(disp_reloc == relocInfo::none, "cannot have disp");
2728 if( index != 0 ) {
2729 if ( UseLoongsonISA ) {
2730 if ( Assembler::is_simm(disp, 8) ) {
2731 if ( scale != 0 ) {
2732 __ dsll(AT, as_Register(index), scale);
2733 __ gsldx(as_Register(dst), as_Register(base), AT, disp);
2734 } else {
2735 __ gsldx(as_Register(dst), as_Register(base), as_Register(index), disp);
2736 }
2737 } else if ( Assembler::is_simm16(disp) ){
2738 if ( scale != 0 ) {
2739 __ dsll(AT, as_Register(index), scale);
2740 __ daddu(AT, AT, as_Register(base));
2741 } else {
2742 __ daddu(AT, as_Register(index), as_Register(base));
2743 }
2744 __ ld(as_Register(dst), AT, disp);
2745 } else {
2746 if ( scale != 0 ) {
2747 __ dsll(AT, as_Register(index), scale);
2748 __ move(T9, disp);
2749 __ daddu(AT, AT, T9);
2750 } else {
2751 __ move(T9, disp);
2752 __ daddu(AT, as_Register(index), T9);
2753 }
2754 __ gsldx(as_Register(dst), as_Register(base), AT, 0);
2755 }
2756 } else { //not use loongson isa
2757 if (scale == 0) {
2758 __ daddu(AT, as_Register(base), as_Register(index));
2759 } else {
2760 __ dsll(AT, as_Register(index), scale);
2761 __ daddu(AT, as_Register(base), AT);
2762 }
2763 if( Assembler::is_simm16(disp) ) {
2764 __ ld(as_Register(dst), AT, disp);
2765 } else {
2766 __ set64(T9, disp);
2767 __ daddu(AT, AT, T9);
2768 __ ld(as_Register(dst), AT, 0);
2769 }
2770 }
2771 } else {
2772 if ( UseLoongsonISA ) {
2773 if ( Assembler::is_simm16(disp) ){
2774 __ ld(as_Register(dst), as_Register(base), disp);
2775 } else {
2776 __ set64(T9, disp);
2777 __ gsldx(as_Register(dst), as_Register(base), T9, 0);
2778 }
2779 } else { //not use loongson isa
2780 if( Assembler::is_simm16(disp) ) {
2781 __ ld(as_Register(dst), as_Register(base), disp);
2782 } else {
2783 __ set64(T9, disp);
2784 __ daddu(AT, as_Register(base), T9);
2785 __ ld(as_Register(dst), AT, 0);
2786 }
2787 }
2788 }
2789 // if( disp_reloc != relocInfo::none) __ ld(as_Register(dst), as_Register(dst), 0);
2790 %}
2792 enc_class store_P_reg_enc (memory mem, mRegP src) %{
2793 MacroAssembler _masm(&cbuf);
2794 int src = $src$$reg;
2795 int base = $mem$$base;
2796 int index = $mem$$index;
2797 int scale = $mem$$scale;
2798 int disp = $mem$$disp;
2800 if( index != 0 ) {
2801 if ( UseLoongsonISA ){
2802 if ( Assembler::is_simm(disp, 8) ) {
2803 if ( scale == 0 ) {
2804 __ gssdx(as_Register(src), as_Register(base), as_Register(index), disp);
2805 } else {
2806 __ dsll(AT, as_Register(index), scale);
2807 __ gssdx(as_Register(src), as_Register(base), AT, disp);
2808 }
2809 } else if ( Assembler::is_simm16(disp) ) {
2810 if ( scale == 0 ) {
2811 __ daddu(AT, as_Register(base), as_Register(index));
2812 } else {
2813 __ dsll(AT, as_Register(index), scale);
2814 __ daddu(AT, as_Register(base), AT);
2815 }
2816 __ sd(as_Register(src), AT, disp);
2817 } else {
2818 if ( scale == 0 ) {
2819 __ move(T9, disp);
2820 __ daddu(AT, as_Register(index), T9);
2821 } else {
2822 __ dsll(AT, as_Register(index), scale);
2823 __ move(T9, disp);
2824 __ daddu(AT, AT, T9);
2825 }
2826 __ gssdx(as_Register(src), as_Register(base), AT, 0);
2827 }
2828 } else { //not use loongson isa
2829 if (scale == 0) {
2830 __ daddu(AT, as_Register(base), as_Register(index));
2831 } else {
2832 __ dsll(AT, as_Register(index), scale);
2833 __ daddu(AT, as_Register(base), AT);
2834 }
2835 if( Assembler::is_simm16(disp) ) {
2836 __ sd(as_Register(src), AT, disp);
2837 } else {
2838 __ move(T9, disp);
2839 __ daddu(AT, AT, T9);
2840 __ sd(as_Register(src), AT, 0);
2841 }
2842 }
2843 } else {
2844 if ( UseLoongsonISA ) {
2845 if ( Assembler::is_simm16(disp) ) {
2846 __ sd(as_Register(src), as_Register(base), disp);
2847 } else {
2848 __ move(T9, disp);
2849 __ gssdx(as_Register(src), as_Register(base), T9, 0);
2850 }
2851 } else {
2852 if( Assembler::is_simm16(disp) ) {
2853 __ sd(as_Register(src), as_Register(base), disp);
2854 } else {
2855 __ move(T9, disp);
2856 __ daddu(AT, as_Register(base), T9);
2857 __ sd(as_Register(src), AT, 0);
2858 }
2859 }
2860 }
2861 %}
2863 enc_class store_N_reg_enc (memory mem, mRegN src) %{
2864 MacroAssembler _masm(&cbuf);
2865 int src = $src$$reg;
2866 int base = $mem$$base;
2867 int index = $mem$$index;
2868 int scale = $mem$$scale;
2869 int disp = $mem$$disp;
2871 if( index != 0 ) {
2872 if ( UseLoongsonISA ){
2873 if ( Assembler::is_simm(disp, 8) ) {
2874 if ( scale == 0 ) {
2875 __ gsswx(as_Register(src), as_Register(base), as_Register(index), disp);
2876 } else {
2877 __ dsll(AT, as_Register(index), scale);
2878 __ gsswx(as_Register(src), as_Register(base), AT, disp);
2879 }
2880 } else if ( Assembler::is_simm16(disp) ) {
2881 if ( scale == 0 ) {
2882 __ daddu(AT, as_Register(base), as_Register(index));
2883 } else {
2884 __ dsll(AT, as_Register(index), scale);
2885 __ daddu(AT, as_Register(base), AT);
2886 }
2887 __ sw(as_Register(src), AT, disp);
2888 } else {
2889 if ( scale == 0 ) {
2890 __ move(T9, disp);
2891 __ daddu(AT, as_Register(index), T9);
2892 } else {
2893 __ dsll(AT, as_Register(index), scale);
2894 __ move(T9, disp);
2895 __ daddu(AT, AT, T9);
2896 }
2897 __ gsswx(as_Register(src), as_Register(base), AT, 0);
2898 }
2899 } else { //not use loongson isa
2900 if (scale == 0) {
2901 __ daddu(AT, as_Register(base), as_Register(index));
2902 } else {
2903 __ dsll(AT, as_Register(index), scale);
2904 __ daddu(AT, as_Register(base), AT);
2905 }
2906 if( Assembler::is_simm16(disp) ) {
2907 __ sw(as_Register(src), AT, disp);
2908 } else {
2909 __ move(T9, disp);
2910 __ daddu(AT, AT, T9);
2911 __ sw(as_Register(src), AT, 0);
2912 }
2913 }
2914 } else {
2915 if ( UseLoongsonISA ) {
2916 if ( Assembler::is_simm16(disp) ) {
2917 __ sw(as_Register(src), as_Register(base), disp);
2918 } else {
2919 __ move(T9, disp);
2920 __ gsswx(as_Register(src), as_Register(base), T9, 0);
2921 }
2922 } else {
2923 if( Assembler::is_simm16(disp) ) {
2924 __ sw(as_Register(src), as_Register(base), disp);
2925 } else {
2926 __ move(T9, disp);
2927 __ daddu(AT, as_Register(base), T9);
2928 __ sw(as_Register(src), AT, 0);
2929 }
2930 }
2931 }
2932 %}
2934 enc_class store_P_immP0_enc (memory mem) %{
2935 MacroAssembler _masm(&cbuf);
2936 int base = $mem$$base;
2937 int index = $mem$$index;
2938 int scale = $mem$$scale;
2939 int disp = $mem$$disp;
2941 if( index != 0 ) {
2942 if (scale == 0) {
2943 if( Assembler::is_simm16(disp) ) {
2944 if (UseLoongsonISA && Assembler::is_simm(disp, 8)) {
2945 __ gssdx(R0, as_Register(base), as_Register(index), disp);
2946 } else {
2947 __ daddu(AT, as_Register(base), as_Register(index));
2948 __ sd(R0, AT, disp);
2949 }
2950 } else {
2951 __ daddu(AT, as_Register(base), as_Register(index));
2952 __ move(T9, disp);
2953 if(UseLoongsonISA) {
2954 __ gssdx(R0, AT, T9, 0);
2955 } else {
2956 __ daddu(AT, AT, T9);
2957 __ sd(R0, AT, 0);
2958 }
2959 }
2960 } else {
2961 __ dsll(AT, as_Register(index), scale);
2962 if( Assembler::is_simm16(disp) ) {
2963 if (UseLoongsonISA && Assembler::is_simm(disp, 8)) {
2964 __ gssdx(R0, as_Register(base), AT, disp);
2965 } else {
2966 __ daddu(AT, as_Register(base), AT);
2967 __ sd(R0, AT, disp);
2968 }
2969 } else {
2970 __ daddu(AT, as_Register(base), AT);
2971 __ move(T9, disp);
2972 if (UseLoongsonISA) {
2973 __ gssdx(R0, AT, T9, 0);
2974 } else {
2975 __ daddu(AT, AT, T9);
2976 __ sd(R0, AT, 0);
2977 }
2978 }
2979 }
2980 } else {
2981 if( Assembler::is_simm16(disp) ) {
2982 __ sd(R0, as_Register(base), disp);
2983 } else {
2984 __ move(T9, disp);
2985 if (UseLoongsonISA) {
2986 __ gssdx(R0, as_Register(base), T9, 0);
2987 } else {
2988 __ daddu(AT, as_Register(base), T9);
2989 __ sd(R0, AT, 0);
2990 }
2991 }
2992 }
2993 %}
2996 enc_class storeImmN0_enc(memory mem, ImmN0 src) %{
2997 MacroAssembler _masm(&cbuf);
2998 int base = $mem$$base;
2999 int index = $mem$$index;
3000 int scale = $mem$$scale;
3001 int disp = $mem$$disp;
3003 if(index!=0){
3004 if (scale == 0) {
3005 __ daddu(AT, as_Register(base), as_Register(index));
3006 } else {
3007 __ dsll(AT, as_Register(index), scale);
3008 __ daddu(AT, as_Register(base), AT);
3009 }
3011 if( Assembler::is_simm16(disp) ) {
3012 __ sw(R0, AT, disp);
3013 } else {
3014 __ move(T9, disp);
3015 __ daddu(AT, AT, T9);
3016 __ sw(R0, AT, 0);
3017 }
3018 }
3019 else {
3020 if( Assembler::is_simm16(disp) ) {
3021 __ sw(R0, as_Register(base), disp);
3022 } else {
3023 __ move(T9, disp);
3024 __ daddu(AT, as_Register(base), T9);
3025 __ sw(R0, AT, 0);
3026 }
3027 }
3028 %}
3030 enc_class load_L_enc (mRegL dst, memory mem) %{
3031 MacroAssembler _masm(&cbuf);
3032 int base = $mem$$base;
3033 int index = $mem$$index;
3034 int scale = $mem$$scale;
3035 int disp = $mem$$disp;
3036 Register dst_reg = as_Register($dst$$reg);
3038 /*********************2013/03/27**************************
3039 * Jin: $base may contain a null object.
3040 * Server JIT force the exception_offset to be the pos of
3041 * the first instruction.
3042 * I insert such a 'null_check' at the beginning.
3043 *******************************************************/
3045 __ lw(AT, as_Register(base), 0);
3047 /*********************2012/10/04**************************
3048 * Error case found in SortTest
3049 * 337 b java.util.Arrays::sort1 (401 bytes)
3050 * B73:
3051 * d34 lw T4.lo, [T4 + #16] #@loadL-lo
3052 * lw T4.hi, [T4 + #16]+4 #@loadL-hi
3053 *
3054 * The original instructions generated here are :
3055 * __ lw(dst_lo, as_Register(base), disp);
3056 * __ lw(dst_hi, as_Register(base), disp + 4);
3057 *******************************************************/
3059 if( index != 0 ) {
3060 if (scale == 0) {
3061 __ daddu(AT, as_Register(base), as_Register(index));
3062 } else {
3063 __ dsll(AT, as_Register(index), scale);
3064 __ daddu(AT, as_Register(base), AT);
3065 }
3066 if( Assembler::is_simm16(disp) ) {
3067 __ ld(dst_reg, AT, disp);
3068 } else {
3069 __ move(T9, disp);
3070 __ daddu(AT, AT, T9);
3071 __ ld(dst_reg, AT, 0);
3072 }
3073 } else {
3074 if( Assembler::is_simm16(disp) ) {
3075 __ move(AT, as_Register(base));
3076 __ ld(dst_reg, AT, disp);
3077 } else {
3078 __ move(T9, disp);
3079 __ daddu(AT, as_Register(base), T9);
3080 __ ld(dst_reg, AT, 0);
3081 }
3082 }
3083 %}
3085 enc_class store_L_reg_enc (memory mem, mRegL src) %{
3086 MacroAssembler _masm(&cbuf);
3087 int base = $mem$$base;
3088 int index = $mem$$index;
3089 int scale = $mem$$scale;
3090 int disp = $mem$$disp;
3091 Register src_reg = as_Register($src$$reg);
3093 if( index != 0 ) {
3094 if (scale == 0) {
3095 __ daddu(AT, as_Register(base), as_Register(index));
3096 } else {
3097 __ dsll(AT, as_Register(index), scale);
3098 __ daddu(AT, as_Register(base), AT);
3099 }
3100 if( Assembler::is_simm16(disp) ) {
3101 __ sd(src_reg, AT, disp);
3102 } else {
3103 __ move(T9, disp);
3104 __ daddu(AT, AT, T9);
3105 __ sd(src_reg, AT, 0);
3106 }
3107 } else {
3108 if( Assembler::is_simm16(disp) ) {
3109 __ move(AT, as_Register(base));
3110 __ sd(src_reg, AT, disp);
3111 } else {
3112 __ move(T9, disp);
3113 __ daddu(AT, as_Register(base), T9);
3114 __ sd(src_reg, AT, 0);
3115 }
3116 }
3117 %}
3119 enc_class store_L_immL0_enc (memory mem, immL0 src) %{
3120 MacroAssembler _masm(&cbuf);
3121 int base = $mem$$base;
3122 int index = $mem$$index;
3123 int scale = $mem$$scale;
3124 int disp = $mem$$disp;
3126 if( index != 0 ) {
3127 if (scale == 0) {
3128 __ daddu(AT, as_Register(base), as_Register(index));
3129 } else {
3130 __ dsll(AT, as_Register(index), scale);
3131 __ daddu(AT, as_Register(base), AT);
3132 }
3133 if( Assembler::is_simm16(disp) ) {
3134 __ sd(R0, AT, disp);
3135 } else {
3136 __ move(T9, disp);
3137 __ addu(AT, AT, T9);
3138 __ sd(R0, AT, 0);
3139 }
3140 } else {
3141 if( Assembler::is_simm16(disp) ) {
3142 __ move(AT, as_Register(base));
3143 __ sd(R0, AT, disp);
3144 } else {
3145 __ move(T9, disp);
3146 __ addu(AT, as_Register(base), T9);
3147 __ sd(R0, AT, 0);
3148 }
3149 }
3150 %}
3152 enc_class load_F_enc (regF dst, memory mem) %{
3153 MacroAssembler _masm(&cbuf);
3154 int base = $mem$$base;
3155 int index = $mem$$index;
3156 int scale = $mem$$scale;
3157 int disp = $mem$$disp;
3158 FloatRegister dst = $dst$$FloatRegister;
3160 if( index != 0 ) {
3161 if( Assembler::is_simm16(disp) ) {
3162 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3163 if (scale == 0) {
3164 __ gslwxc1(dst, as_Register(base), as_Register(index), disp);
3165 } else {
3166 __ dsll(AT, as_Register(index), scale);
3167 __ gslwxc1(dst, as_Register(base), AT, disp);
3168 }
3169 } else {
3170 if (scale == 0) {
3171 __ daddu(AT, as_Register(base), as_Register(index));
3172 } else {
3173 __ dsll(AT, as_Register(index), scale);
3174 __ daddu(AT, as_Register(base), AT);
3175 }
3176 __ lwc1(dst, AT, disp);
3177 }
3178 } else {
3179 if (scale == 0) {
3180 __ daddu(AT, as_Register(base), as_Register(index));
3181 } else {
3182 __ dsll(AT, as_Register(index), scale);
3183 __ daddu(AT, as_Register(base), AT);
3184 }
3185 __ move(T9, disp);
3186 if( UseLoongsonISA ) {
3187 __ gslwxc1(dst, AT, T9, 0);
3188 } else {
3189 __ daddu(AT, AT, T9);
3190 __ lwc1(dst, AT, 0);
3191 }
3192 }
3193 } else {
3194 if( Assembler::is_simm16(disp) ) {
3195 __ lwc1(dst, as_Register(base), disp);
3196 } else {
3197 __ move(T9, disp);
3198 if( UseLoongsonISA ) {
3199 __ gslwxc1(dst, as_Register(base), T9, 0);
3200 } else {
3201 __ daddu(AT, as_Register(base), T9);
3202 __ lwc1(dst, AT, 0);
3203 }
3204 }
3205 }
3206 %}
3208 enc_class store_F_reg_enc (memory mem, regF src) %{
3209 MacroAssembler _masm(&cbuf);
3210 int base = $mem$$base;
3211 int index = $mem$$index;
3212 int scale = $mem$$scale;
3213 int disp = $mem$$disp;
3214 FloatRegister src = $src$$FloatRegister;
3216 if( index != 0 ) {
3217 if( Assembler::is_simm16(disp) ) {
3218 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3219 if (scale == 0) {
3220 __ gsswxc1(src, as_Register(base), as_Register(index), disp);
3221 } else {
3222 __ dsll(AT, as_Register(index), scale);
3223 __ gsswxc1(src, as_Register(base), AT, disp);
3224 }
3225 } else {
3226 if (scale == 0) {
3227 __ daddu(AT, as_Register(base), as_Register(index));
3228 } else {
3229 __ dsll(AT, as_Register(index), scale);
3230 __ daddu(AT, as_Register(base), AT);
3231 }
3232 __ swc1(src, AT, disp);
3233 }
3234 } else {
3235 if (scale == 0) {
3236 __ daddu(AT, as_Register(base), as_Register(index));
3237 } else {
3238 __ dsll(AT, as_Register(index), scale);
3239 __ daddu(AT, as_Register(base), AT);
3240 }
3241 __ move(T9, disp);
3242 if( UseLoongsonISA ) {
3243 __ gsswxc1(src, AT, T9, 0);
3244 } else {
3245 __ daddu(AT, AT, T9);
3246 __ swc1(src, AT, 0);
3247 }
3248 }
3249 } else {
3250 if( Assembler::is_simm16(disp) ) {
3251 __ swc1(src, as_Register(base), disp);
3252 } else {
3253 __ move(T9, disp);
3254 if( UseLoongsonISA ) {
3255 __ gslwxc1(src, as_Register(base), T9, 0);
3256 } else {
3257 __ daddu(AT, as_Register(base), T9);
3258 __ swc1(src, AT, 0);
3259 }
3260 }
3261 }
3262 %}
3264 enc_class load_D_enc (regD dst, memory mem) %{
3265 MacroAssembler _masm(&cbuf);
3266 int base = $mem$$base;
3267 int index = $mem$$index;
3268 int scale = $mem$$scale;
3269 int disp = $mem$$disp;
3270 FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3272 if( index != 0 ) {
3273 if( Assembler::is_simm16(disp) ) {
3274 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3275 if (scale == 0) {
3276 __ gsldxc1(dst_reg, as_Register(base), as_Register(index), disp);
3277 } else {
3278 __ dsll(AT, as_Register(index), scale);
3279 __ gsldxc1(dst_reg, as_Register(base), AT, disp);
3280 }
3281 } else {
3282 if (scale == 0) {
3283 __ daddu(AT, as_Register(base), as_Register(index));
3284 } else {
3285 __ dsll(AT, as_Register(index), scale);
3286 __ daddu(AT, as_Register(base), AT);
3287 }
3288 __ ldc1(dst_reg, AT, disp);
3289 }
3290 } else {
3291 if (scale == 0) {
3292 __ daddu(AT, as_Register(base), as_Register(index));
3293 } else {
3294 __ dsll(AT, as_Register(index), scale);
3295 __ daddu(AT, as_Register(base), AT);
3296 }
3297 __ move(T9, disp);
3298 if( UseLoongsonISA ) {
3299 __ gsldxc1(dst_reg, AT, T9, 0);
3300 } else {
3301 __ addu(AT, AT, T9);
3302 __ ldc1(dst_reg, AT, 0);
3303 }
3304 }
3305 } else {
3306 if( Assembler::is_simm16(disp) ) {
3307 __ ldc1(dst_reg, as_Register(base), disp);
3308 } else {
3309 __ move(T9, disp);
3310 if( UseLoongsonISA ) {
3311 __ gsldxc1(dst_reg, as_Register(base), T9, 0);
3312 } else {
3313 __ addu(AT, as_Register(base), T9);
3314 __ ldc1(dst_reg, AT, 0);
3315 }
3316 }
3317 }
3318 %}
3320 enc_class store_D_reg_enc (memory mem, regD src) %{
3321 MacroAssembler _masm(&cbuf);
3322 int base = $mem$$base;
3323 int index = $mem$$index;
3324 int scale = $mem$$scale;
3325 int disp = $mem$$disp;
3326 FloatRegister src_reg = as_FloatRegister($src$$reg);
3328 if( index != 0 ) {
3329 if( Assembler::is_simm16(disp) ) {
3330 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3331 if (scale == 0) {
3332 __ gssdxc1(src_reg, as_Register(base), as_Register(index), disp);
3333 } else {
3334 __ dsll(AT, as_Register(index), scale);
3335 __ gssdxc1(src_reg, as_Register(base), AT, disp);
3336 }
3337 } else {
3338 if (scale == 0) {
3339 __ daddu(AT, as_Register(base), as_Register(index));
3340 } else {
3341 __ dsll(AT, as_Register(index), scale);
3342 __ daddu(AT, as_Register(base), AT);
3343 }
3344 __ sdc1(src_reg, AT, disp);
3345 }
3346 } else {
3347 if (scale == 0) {
3348 __ daddu(AT, as_Register(base), as_Register(index));
3349 } else {
3350 __ dsll(AT, as_Register(index), scale);
3351 __ daddu(AT, as_Register(base), AT);
3352 }
3353 __ move(T9, disp);
3354 if( UseLoongsonISA ) {
3355 __ gssdxc1(src_reg, AT, T9, 0);
3356 } else {
3357 __ addu(AT, AT, T9);
3358 __ sdc1(src_reg, AT, 0);
3359 }
3360 }
3361 } else {
3362 if( Assembler::is_simm16(disp) ) {
3363 __ sdc1(src_reg, as_Register(base), disp);
3364 } else {
3365 __ move(T9, disp);
3366 if( UseLoongsonISA ) {
3367 __ gssdxc1(src_reg, as_Register(base), T9, 0);
3368 } else {
3369 __ addu(AT, as_Register(base), T9);
3370 __ sdc1(src_reg, AT, 0);
3371 }
3372 }
3373 }
3374 %}
3376 enc_class Java_To_Runtime (method meth) %{ // CALL Java_To_Runtime, Java_To_Runtime_Leaf
3377 MacroAssembler _masm(&cbuf);
3378 // This is the instruction starting address for relocation info.
3379 __ block_comment("Java_To_Runtime");
3380 cbuf.set_insts_mark();
3381 __ relocate(relocInfo::runtime_call_type);
3383 __ patchable_set48(T9, (long)$meth$$method);
3384 __ jalr(T9);
3385 __ nop();
3386 %}
3388 enc_class Java_Static_Call (method meth) %{ // JAVA STATIC CALL
3389 // CALL to fixup routine. Fixup routine uses ScopeDesc info to determine
3390 // who we intended to call.
3391 MacroAssembler _masm(&cbuf);
3392 cbuf.set_insts_mark();
3394 if ( !_method ) {
3395 __ relocate(relocInfo::runtime_call_type);
3396 } else if(_optimized_virtual) {
3397 __ relocate(relocInfo::opt_virtual_call_type);
3398 } else {
3399 __ relocate(relocInfo::static_call_type);
3400 }
3402 __ patchable_set48(T9, $meth$$method);
3403 __ jalr(T9);
3404 __ nop();
3405 if( _method ) { // Emit stub for static call
3406 emit_java_to_interp(cbuf);
3407 }
3408 %}
3411 /*
3412 * [Ref: LIR_Assembler::ic_call() ]
3413 */
3414 enc_class Java_Dynamic_Call (method meth) %{ // JAVA DYNAMIC CALL
3415 MacroAssembler _masm(&cbuf);
3416 __ block_comment("Java_Dynamic_Call");
3417 __ ic_call((address)$meth$$method);
3418 %}
3421 enc_class Set_Flags_After_Fast_Lock_Unlock(FlagsReg cr) %{
3422 Register flags = $cr$$Register;
3423 Label L;
3425 MacroAssembler _masm(&cbuf);
3427 __ addu(flags, R0, R0);
3428 __ beq(AT, R0, L);
3429 __ delayed()->nop();
3430 __ move(flags, 0xFFFFFFFF);
3431 __ bind(L);
3432 %}
3434 enc_class enc_PartialSubtypeCheck(mRegP result, mRegP sub, mRegP super, mRegI tmp) %{
3435 Register result = $result$$Register;
3436 Register sub = $sub$$Register;
3437 Register super = $super$$Register;
3438 Register length = $tmp$$Register;
3439 Register tmp = T9;
3440 Label miss;
3442 /* 2012/9/28 Jin: result may be the same as sub
3443 * 47c B40: # B21 B41 <- B20 Freq: 0.155379
3444 * 47c partialSubtypeCheck result=S1, sub=S1, super=S3, length=S0
3445 * 4bc mov S2, NULL #@loadConP
3446 * 4c0 beq S1, S2, B21 #@branchConP P=0.999999 C=-1.000000
3447 */
3448 MacroAssembler _masm(&cbuf);
3449 Label done;
3450 __ check_klass_subtype_slow_path(sub, super, length, tmp,
3451 NULL, &miss,
3452 /*set_cond_codes:*/ true);
3453 /* 2013/7/22 Jin: Refer to X86_64's RDI */
3454 __ move(result, 0);
3455 __ b(done);
3456 __ nop();
3458 __ bind(miss);
3459 __ move(result, 1);
3460 __ bind(done);
3461 %}
3463 %}
3466 //---------MIPS FRAME--------------------------------------------------------------
3467 // Definition of frame structure and management information.
3468 //
3469 // S T A C K L A Y O U T Allocators stack-slot number
3470 // | (to get allocators register number
3471 // G Owned by | | v add SharedInfo::stack0)
3472 // r CALLER | |
3473 // o | +--------+ pad to even-align allocators stack-slot
3474 // w V | pad0 | numbers; owned by CALLER
3475 // t -----------+--------+----> Matcher::_in_arg_limit, unaligned
3476 // h ^ | in | 5
3477 // | | args | 4 Holes in incoming args owned by SELF
3478 // | | old | | 3
3479 // | | SP-+--------+----> Matcher::_old_SP, even aligned
3480 // v | | ret | 3 return address
3481 // Owned by +--------+
3482 // Self | pad2 | 2 pad to align old SP
3483 // | +--------+ 1
3484 // | | locks | 0
3485 // | +--------+----> SharedInfo::stack0, even aligned
3486 // | | pad1 | 11 pad to align new SP
3487 // | +--------+
3488 // | | | 10
3489 // | | spills | 9 spills
3490 // V | | 8 (pad0 slot for callee)
3491 // -----------+--------+----> Matcher::_out_arg_limit, unaligned
3492 // ^ | out | 7
3493 // | | args | 6 Holes in outgoing args owned by CALLEE
3494 // Owned by new | |
3495 // Callee SP-+--------+----> Matcher::_new_SP, even aligned
3496 // | |
3497 //
3498 // Note 1: Only region 8-11 is determined by the allocator. Region 0-5 is
3499 // known from SELF's arguments and the Java calling convention.
3500 // Region 6-7 is determined per call site.
3501 // Note 2: If the calling convention leaves holes in the incoming argument
3502 // area, those holes are owned by SELF. Holes in the outgoing area
3503 // are owned by the CALLEE. Holes should not be nessecary in the
3504 // incoming area, as the Java calling convention is completely under
3505 // the control of the AD file. Doubles can be sorted and packed to
3506 // avoid holes. Holes in the outgoing arguments may be nessecary for
3507 // varargs C calling conventions.
3508 // Note 3: Region 0-3 is even aligned, with pad2 as needed. Region 3-5 is
3509 // even aligned with pad0 as needed.
3510 // Region 6 is even aligned. Region 6-7 is NOT even aligned;
3511 // region 6-11 is even aligned; it may be padded out more so that
3512 // the region from SP to FP meets the minimum stack alignment.
3513 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
3514 // alignment. Region 11, pad1, may be dynamically extended so that
3515 // SP meets the minimum alignment.
3518 frame %{
3520 stack_direction(TOWARDS_LOW);
3522 // These two registers define part of the calling convention
3523 // between compiled code and the interpreter.
3524 // SEE StartI2CNode::calling_convention & StartC2INode::calling_convention & StartOSRNode::calling_convention
3525 // for more information. by yjl 3/16/2006
3527 inline_cache_reg(T1); // Inline Cache Register
3528 interpreter_method_oop_reg(S3); // Method Oop Register when calling interpreter
3529 /*
3530 inline_cache_reg(T1); // Inline Cache Register or methodOop for I2C
3531 interpreter_arg_ptr_reg(A0); // Argument pointer for I2C adapters
3532 */
3534 // Optional: name the operand used by cisc-spilling to access [stack_pointer + offset]
3535 cisc_spilling_operand_name(indOffset32);
3537 // Number of stack slots consumed by locking an object
3538 // generate Compile::sync_stack_slots
3539 #ifdef _LP64
3540 sync_stack_slots(2);
3541 #else
3542 sync_stack_slots(1);
3543 #endif
3545 frame_pointer(SP);
3547 // Interpreter stores its frame pointer in a register which is
3548 // stored to the stack by I2CAdaptors.
3549 // I2CAdaptors convert from interpreted java to compiled java.
3551 interpreter_frame_pointer(FP);
3553 // generate Matcher::stack_alignment
3554 stack_alignment(StackAlignmentInBytes); //wordSize = sizeof(char*);
3556 // Number of stack slots between incoming argument block and the start of
3557 // a new frame. The PROLOG must add this many slots to the stack. The
3558 // EPILOG must remove this many slots. Intel needs one slot for
3559 // return address.
3560 // generate Matcher::in_preserve_stack_slots
3561 //in_preserve_stack_slots(VerifyStackAtCalls + 2); //Now VerifyStackAtCalls is defined as false ! Leave one stack slot for ra and fp
3562 in_preserve_stack_slots(4); //Now VerifyStackAtCalls is defined as false ! Leave two stack slots for ra and fp
3564 // Number of outgoing stack slots killed above the out_preserve_stack_slots
3565 // for calls to C. Supports the var-args backing area for register parms.
3566 varargs_C_out_slots_killed(0);
3568 // The after-PROLOG location of the return address. Location of
3569 // return address specifies a type (REG or STACK) and a number
3570 // representing the register number (i.e. - use a register name) or
3571 // stack slot.
3572 // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
3573 // Otherwise, it is above the locks and verification slot and alignment word
3574 //return_addr(STACK -1+ round_to(1+VerifyStackAtCalls+Compile::current()->sync()*Compile::current()->sync_stack_slots(),WordsPerLong));
3575 return_addr(REG RA);
3577 // Body of function which returns an integer array locating
3578 // arguments either in registers or in stack slots. Passed an array
3579 // of ideal registers called "sig" and a "length" count. Stack-slot
3580 // offsets are based on outgoing arguments, i.e. a CALLER setting up
3581 // arguments for a CALLEE. Incoming stack arguments are
3582 // automatically biased by the preserve_stack_slots field above.
3585 // will generated to Matcher::calling_convention(OptoRegPair *sig, uint length, bool is_outgoing)
3586 // StartNode::calling_convention call this. by yjl 3/16/2006
3587 calling_convention %{
3588 SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
3589 %}
3594 // Body of function which returns an integer array locating
3595 // arguments either in registers or in stack slots. Passed an array
3596 // of ideal registers called "sig" and a "length" count. Stack-slot
3597 // offsets are based on outgoing arguments, i.e. a CALLER setting up
3598 // arguments for a CALLEE. Incoming stack arguments are
3599 // automatically biased by the preserve_stack_slots field above.
3602 // SEE CallRuntimeNode::calling_convention for more information. by yjl 3/16/2006
3603 c_calling_convention %{
3604 (void) SharedRuntime::c_calling_convention(sig_bt, regs, /*regs2=*/NULL, length);
3605 %}
3608 // Location of C & interpreter return values
3609 // register(s) contain(s) return value for Op_StartI2C and Op_StartOSR.
3610 // SEE Matcher::match. by yjl 3/16/2006
3611 c_return_value %{
3612 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3613 /* -- , -- , Op_RegN, Op_RegI, Op_RegP, Op_RegF, Op_RegD, Op_RegL */
3614 static int lo[Op_RegL+1] = { 0, 0, V0_num, V0_num, V0_num, F0_num, F0_num, V0_num };
3615 static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, V0_H_num, OptoReg::Bad, F0_H_num, V0_H_num };
3616 return OptoRegPair(hi[ideal_reg],lo[ideal_reg]);
3617 %}
3619 // Location of return values
3620 // register(s) contain(s) return value for Op_StartC2I and Op_Start.
3621 // SEE Matcher::match. by yjl 3/16/2006
3623 return_value %{
3624 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3625 /* -- , -- , Op_RegN, Op_RegI, Op_RegP, Op_RegF, Op_RegD, Op_RegL */
3626 static int lo[Op_RegL+1] = { 0, 0, V0_num, V0_num, V0_num, F0_num, F0_num, V0_num };
3627 static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, V0_H_num, OptoReg::Bad, F0_H_num, V0_H_num};
3628 return OptoRegPair(hi[ideal_reg],lo[ideal_reg]);
3629 %}
3631 %}
3633 //----------ATTRIBUTES---------------------------------------------------------
3634 //----------Operand Attributes-------------------------------------------------
3635 op_attrib op_cost(0); // Required cost attribute
3637 //----------Instruction Attributes---------------------------------------------
3638 ins_attrib ins_cost(100); // Required cost attribute
3639 ins_attrib ins_size(32); // Required size attribute (in bits)
3640 ins_attrib ins_pc_relative(0); // Required PC Relative flag
3641 ins_attrib ins_short_branch(0); // Required flag: is this instruction a
3642 // non-matching short branch variant of some
3643 // long branch?
3644 ins_attrib ins_alignment(4); // Required alignment attribute (must be a power of 2)
3645 // specifies the alignment that some part of the instruction (not
3646 // necessarily the start) requires. If > 1, a compute_padding()
3647 // function must be provided for the instruction
3649 //----------OPERANDS-----------------------------------------------------------
3650 // Operand definitions must precede instruction definitions for correct parsing
3651 // in the ADLC because operands constitute user defined types which are used in
3652 // instruction definitions.
3654 // Vectors
3655 operand vecD() %{
3656 constraint(ALLOC_IN_RC(dbl_reg));
3657 match(VecD);
3659 format %{ %}
3660 interface(REG_INTER);
3661 %}
3663 // Flags register, used as output of compare instructions
3664 operand FlagsReg() %{
3665 constraint(ALLOC_IN_RC(mips_flags));
3666 match(RegFlags);
3668 format %{ "EFLAGS" %}
3669 interface(REG_INTER);
3670 %}
3672 //----------Simple Operands----------------------------------------------------
3673 //TODO: Should we need to define some more special immediate number ?
3674 // Immediate Operands
3675 // Integer Immediate
3676 operand immI() %{
3677 match(ConI);
3678 //TODO: should not match immI8 here LEE
3679 match(immI8);
3681 op_cost(20);
3682 format %{ %}
3683 interface(CONST_INTER);
3684 %}
3686 // Long Immediate 8-bit
3687 operand immL8()
3688 %{
3689 predicate(-0x80L <= n->get_long() && n->get_long() < 0x80L);
3690 match(ConL);
3692 op_cost(5);
3693 format %{ %}
3694 interface(CONST_INTER);
3695 %}
3697 // Constant for test vs zero
3698 operand immI0() %{
3699 predicate(n->get_int() == 0);
3700 match(ConI);
3702 op_cost(0);
3703 format %{ %}
3704 interface(CONST_INTER);
3705 %}
3707 // Constant for increment
3708 operand immI1() %{
3709 predicate(n->get_int() == 1);
3710 match(ConI);
3712 op_cost(0);
3713 format %{ %}
3714 interface(CONST_INTER);
3715 %}
3717 // Constant for decrement
3718 operand immI_M1() %{
3719 predicate(n->get_int() == -1);
3720 match(ConI);
3722 op_cost(0);
3723 format %{ %}
3724 interface(CONST_INTER);
3725 %}
3727 operand immI_MaxI() %{
3728 predicate(n->get_int() == 2147483647);
3729 match(ConI);
3731 op_cost(0);
3732 format %{ %}
3733 interface(CONST_INTER);
3734 %}
3736 // Valid scale values for addressing modes
3737 operand immI2() %{
3738 predicate(0 <= n->get_int() && (n->get_int() <= 3));
3739 match(ConI);
3741 format %{ %}
3742 interface(CONST_INTER);
3743 %}
3745 operand immI8() %{
3746 predicate((-128 <= n->get_int()) && (n->get_int() <= 127));
3747 match(ConI);
3749 op_cost(5);
3750 format %{ %}
3751 interface(CONST_INTER);
3752 %}
3754 operand immI16() %{
3755 predicate((-32768 <= n->get_int()) && (n->get_int() <= 32767));
3756 match(ConI);
3758 op_cost(10);
3759 format %{ %}
3760 interface(CONST_INTER);
3761 %}
3763 // Constant for long shifts
3764 operand immI_32() %{
3765 predicate( n->get_int() == 32 );
3766 match(ConI);
3768 op_cost(0);
3769 format %{ %}
3770 interface(CONST_INTER);
3771 %}
3773 operand immI_63() %{
3774 predicate( n->get_int() == 63 );
3775 match(ConI);
3777 op_cost(0);
3778 format %{ %}
3779 interface(CONST_INTER);
3780 %}
3782 operand immI_0_31() %{
3783 predicate( n->get_int() >= 0 && n->get_int() <= 31 );
3784 match(ConI);
3786 op_cost(0);
3787 format %{ %}
3788 interface(CONST_INTER);
3789 %}
3791 // Operand for non-negtive integer mask
3792 operand immI_nonneg_mask() %{
3793 predicate( (n->get_int() >= 0) && (Assembler::is_int_mask(n->get_int()) != -1) );
3794 match(ConI);
3796 op_cost(0);
3797 format %{ %}
3798 interface(CONST_INTER);
3799 %}
3801 operand immI_32_63() %{
3802 predicate( n->get_int() >= 32 && n->get_int() <= 63 );
3803 match(ConI);
3804 op_cost(0);
3806 format %{ %}
3807 interface(CONST_INTER);
3808 %}
3810 operand immI16_sub() %{
3811 predicate((-32767 <= n->get_int()) && (n->get_int() <= 32768));
3812 match(ConI);
3814 op_cost(10);
3815 format %{ %}
3816 interface(CONST_INTER);
3817 %}
3819 operand immI_0_32767() %{
3820 predicate( n->get_int() >= 0 && n->get_int() <= 32767 );
3821 match(ConI);
3822 op_cost(0);
3824 format %{ %}
3825 interface(CONST_INTER);
3826 %}
3828 operand immI_0_65535() %{
3829 predicate( n->get_int() >= 0 && n->get_int() <= 65535 );
3830 match(ConI);
3831 op_cost(0);
3833 format %{ %}
3834 interface(CONST_INTER);
3835 %}
3837 operand immI_1() %{
3838 predicate( n->get_int() == 1 );
3839 match(ConI);
3841 op_cost(0);
3842 format %{ %}
3843 interface(CONST_INTER);
3844 %}
3846 operand immI_2() %{
3847 predicate( n->get_int() == 2 );
3848 match(ConI);
3850 op_cost(0);
3851 format %{ %}
3852 interface(CONST_INTER);
3853 %}
3855 operand immI_3() %{
3856 predicate( n->get_int() == 3 );
3857 match(ConI);
3859 op_cost(0);
3860 format %{ %}
3861 interface(CONST_INTER);
3862 %}
3864 operand immI_7() %{
3865 predicate( n->get_int() == 7 );
3866 match(ConI);
3868 format %{ %}
3869 interface(CONST_INTER);
3870 %}
3872 // Immediates for special shifts (sign extend)
3874 // Constants for increment
3875 operand immI_16() %{
3876 predicate( n->get_int() == 16 );
3877 match(ConI);
3879 format %{ %}
3880 interface(CONST_INTER);
3881 %}
3883 operand immI_24() %{
3884 predicate( n->get_int() == 24 );
3885 match(ConI);
3887 format %{ %}
3888 interface(CONST_INTER);
3889 %}
3891 // Constant for byte-wide masking
3892 operand immI_255() %{
3893 predicate( n->get_int() == 255 );
3894 match(ConI);
3896 op_cost(0);
3897 format %{ %}
3898 interface(CONST_INTER);
3899 %}
3901 operand immI_65535() %{
3902 predicate( n->get_int() == 65535 );
3903 match(ConI);
3905 op_cost(5);
3906 format %{ %}
3907 interface(CONST_INTER);
3908 %}
3910 operand immI_65536() %{
3911 predicate( n->get_int() == 65536 );
3912 match(ConI);
3914 op_cost(5);
3915 format %{ %}
3916 interface(CONST_INTER);
3917 %}
3919 operand immI_M65536() %{
3920 predicate( n->get_int() == -65536 );
3921 match(ConI);
3923 op_cost(5);
3924 format %{ %}
3925 interface(CONST_INTER);
3926 %}
3928 // Pointer Immediate
3929 operand immP() %{
3930 match(ConP);
3932 op_cost(10);
3933 format %{ %}
3934 interface(CONST_INTER);
3935 %}
3937 // NULL Pointer Immediate
3938 operand immP0() %{
3939 predicate( n->get_ptr() == 0 );
3940 match(ConP);
3941 op_cost(0);
3943 format %{ %}
3944 interface(CONST_INTER);
3945 %}
3947 // Pointer Immediate: 64-bit
3948 operand immP_set() %{
3949 match(ConP);
3951 op_cost(5);
3952 // formats are generated automatically for constants and base registers
3953 format %{ %}
3954 interface(CONST_INTER);
3955 %}
3957 // Pointer Immediate: 64-bit
3958 operand immP_load() %{
3959 predicate(n->bottom_type()->isa_oop_ptr() || (MacroAssembler::insts_for_set64(n->get_ptr()) > 3));
3960 match(ConP);
3962 op_cost(5);
3963 // formats are generated automatically for constants and base registers
3964 format %{ %}
3965 interface(CONST_INTER);
3966 %}
3968 // Pointer Immediate: 64-bit
3969 operand immP_no_oop_cheap() %{
3970 predicate(!n->bottom_type()->isa_oop_ptr() && (MacroAssembler::insts_for_set64(n->get_ptr()) <= 3));
3971 match(ConP);
3973 op_cost(5);
3974 // formats are generated automatically for constants and base registers
3975 format %{ %}
3976 interface(CONST_INTER);
3977 %}
3979 // Pointer for polling page
3980 operand immP_poll() %{
3981 predicate(n->get_ptr() != 0 && n->get_ptr() == (intptr_t)os::get_polling_page());
3982 match(ConP);
3983 op_cost(5);
3985 format %{ %}
3986 interface(CONST_INTER);
3987 %}
3989 // Pointer Immediate
3990 operand immN() %{
3991 match(ConN);
3993 op_cost(10);
3994 format %{ %}
3995 interface(CONST_INTER);
3996 %}
3998 operand immNKlass() %{
3999 match(ConNKlass);
4001 op_cost(10);
4002 format %{ %}
4003 interface(CONST_INTER);
4004 %}
4006 // NULL Pointer Immediate
4007 operand immN0() %{
4008 predicate(n->get_narrowcon() == 0);
4009 match(ConN);
4011 op_cost(5);
4012 format %{ %}
4013 interface(CONST_INTER);
4014 %}
4016 // Long Immediate
4017 operand immL() %{
4018 match(ConL);
4020 op_cost(20);
4021 format %{ %}
4022 interface(CONST_INTER);
4023 %}
4025 // Long Immediate zero
4026 operand immL0() %{
4027 predicate( n->get_long() == 0L );
4028 match(ConL);
4029 op_cost(0);
4031 format %{ %}
4032 interface(CONST_INTER);
4033 %}
4035 operand immL7() %{
4036 predicate( n->get_long() == 7L );
4037 match(ConL);
4038 op_cost(0);
4040 format %{ %}
4041 interface(CONST_INTER);
4042 %}
4044 operand immL_M1() %{
4045 predicate( n->get_long() == -1L );
4046 match(ConL);
4047 op_cost(0);
4049 format %{ %}
4050 interface(CONST_INTER);
4051 %}
4053 // bit 0..2 zero
4054 operand immL_M8() %{
4055 predicate( n->get_long() == -8L );
4056 match(ConL);
4057 op_cost(0);
4059 format %{ %}
4060 interface(CONST_INTER);
4061 %}
4063 // bit 2 zero
4064 operand immL_M5() %{
4065 predicate( n->get_long() == -5L );
4066 match(ConL);
4067 op_cost(0);
4069 format %{ %}
4070 interface(CONST_INTER);
4071 %}
4073 // bit 1..2 zero
4074 operand immL_M7() %{
4075 predicate( n->get_long() == -7L );
4076 match(ConL);
4077 op_cost(0);
4079 format %{ %}
4080 interface(CONST_INTER);
4081 %}
4083 // bit 0..1 zero
4084 operand immL_M4() %{
4085 predicate( n->get_long() == -4L );
4086 match(ConL);
4087 op_cost(0);
4089 format %{ %}
4090 interface(CONST_INTER);
4091 %}
4093 // bit 3..6 zero
4094 operand immL_M121() %{
4095 predicate( n->get_long() == -121L );
4096 match(ConL);
4097 op_cost(0);
4099 format %{ %}
4100 interface(CONST_INTER);
4101 %}
4103 // Long immediate from 0 to 127.
4104 // Used for a shorter form of long mul by 10.
4105 operand immL_127() %{
4106 predicate((0 <= n->get_long()) && (n->get_long() <= 127));
4107 match(ConL);
4108 op_cost(0);
4110 format %{ %}
4111 interface(CONST_INTER);
4112 %}
4114 // Operand for non-negtive long mask
4115 operand immL_nonneg_mask() %{
4116 predicate( (n->get_long() >= 0) && (Assembler::is_jlong_mask(n->get_long()) != -1) );
4117 match(ConL);
4119 op_cost(0);
4120 format %{ %}
4121 interface(CONST_INTER);
4122 %}
4124 operand immL_0_65535() %{
4125 predicate( n->get_long() >= 0 && n->get_long() <= 65535 );
4126 match(ConL);
4127 op_cost(0);
4129 format %{ %}
4130 interface(CONST_INTER);
4131 %}
4133 // Long Immediate: cheap (materialize in <= 3 instructions)
4134 operand immL_cheap() %{
4135 predicate(MacroAssembler::insts_for_set64(n->get_long()) <= 3);
4136 match(ConL);
4137 op_cost(0);
4139 format %{ %}
4140 interface(CONST_INTER);
4141 %}
4143 // Long Immediate: expensive (materialize in > 3 instructions)
4144 operand immL_expensive() %{
4145 predicate(MacroAssembler::insts_for_set64(n->get_long()) > 3);
4146 match(ConL);
4147 op_cost(0);
4149 format %{ %}
4150 interface(CONST_INTER);
4151 %}
4153 operand immL16() %{
4154 predicate((-32768 <= n->get_long()) && (n->get_long() <= 32767));
4155 match(ConL);
4157 op_cost(10);
4158 format %{ %}
4159 interface(CONST_INTER);
4160 %}
4162 operand immL16_sub() %{
4163 predicate((-32767 <= n->get_long()) && (n->get_long() <= 32768));
4164 match(ConL);
4166 op_cost(10);
4167 format %{ %}
4168 interface(CONST_INTER);
4169 %}
4171 // Long Immediate: low 32-bit mask
4172 operand immL_32bits() %{
4173 predicate(n->get_long() == 0xFFFFFFFFL);
4174 match(ConL);
4175 op_cost(20);
4177 format %{ %}
4178 interface(CONST_INTER);
4179 %}
4181 // Long Immediate 32-bit signed
4182 operand immL32()
4183 %{
4184 predicate(n->get_long() == (int) (n->get_long()));
4185 match(ConL);
4187 op_cost(15);
4188 format %{ %}
4189 interface(CONST_INTER);
4190 %}
4193 //single-precision floating-point zero
4194 operand immF0() %{
4195 predicate(jint_cast(n->getf()) == 0);
4196 match(ConF);
4198 op_cost(5);
4199 format %{ %}
4200 interface(CONST_INTER);
4201 %}
4203 //single-precision floating-point immediate
4204 operand immF() %{
4205 match(ConF);
4207 op_cost(20);
4208 format %{ %}
4209 interface(CONST_INTER);
4210 %}
4212 //double-precision floating-point zero
4213 operand immD0() %{
4214 predicate(jlong_cast(n->getd()) == 0);
4215 match(ConD);
4217 op_cost(5);
4218 format %{ %}
4219 interface(CONST_INTER);
4220 %}
4222 //double-precision floating-point immediate
4223 operand immD() %{
4224 match(ConD);
4226 op_cost(20);
4227 format %{ %}
4228 interface(CONST_INTER);
4229 %}
4231 // Register Operands
4232 // Integer Register
4233 operand mRegI() %{
4234 constraint(ALLOC_IN_RC(int_reg));
4235 match(RegI);
4237 format %{ %}
4238 interface(REG_INTER);
4239 %}
4241 operand no_Ax_mRegI() %{
4242 constraint(ALLOC_IN_RC(no_Ax_int_reg));
4243 match(RegI);
4244 match(mRegI);
4246 format %{ %}
4247 interface(REG_INTER);
4248 %}
4250 operand mS0RegI() %{
4251 constraint(ALLOC_IN_RC(s0_reg));
4252 match(RegI);
4253 match(mRegI);
4255 format %{ "S0" %}
4256 interface(REG_INTER);
4257 %}
4259 operand mS1RegI() %{
4260 constraint(ALLOC_IN_RC(s1_reg));
4261 match(RegI);
4262 match(mRegI);
4264 format %{ "S1" %}
4265 interface(REG_INTER);
4266 %}
4268 operand mS2RegI() %{
4269 constraint(ALLOC_IN_RC(s2_reg));
4270 match(RegI);
4271 match(mRegI);
4273 format %{ "S2" %}
4274 interface(REG_INTER);
4275 %}
4277 operand mS3RegI() %{
4278 constraint(ALLOC_IN_RC(s3_reg));
4279 match(RegI);
4280 match(mRegI);
4282 format %{ "S3" %}
4283 interface(REG_INTER);
4284 %}
4286 operand mS4RegI() %{
4287 constraint(ALLOC_IN_RC(s4_reg));
4288 match(RegI);
4289 match(mRegI);
4291 format %{ "S4" %}
4292 interface(REG_INTER);
4293 %}
4295 operand mS5RegI() %{
4296 constraint(ALLOC_IN_RC(s5_reg));
4297 match(RegI);
4298 match(mRegI);
4300 format %{ "S5" %}
4301 interface(REG_INTER);
4302 %}
4304 operand mS6RegI() %{
4305 constraint(ALLOC_IN_RC(s6_reg));
4306 match(RegI);
4307 match(mRegI);
4309 format %{ "S6" %}
4310 interface(REG_INTER);
4311 %}
4313 operand mS7RegI() %{
4314 constraint(ALLOC_IN_RC(s7_reg));
4315 match(RegI);
4316 match(mRegI);
4318 format %{ "S7" %}
4319 interface(REG_INTER);
4320 %}
4323 operand mT0RegI() %{
4324 constraint(ALLOC_IN_RC(t0_reg));
4325 match(RegI);
4326 match(mRegI);
4328 format %{ "T0" %}
4329 interface(REG_INTER);
4330 %}
4332 operand mT1RegI() %{
4333 constraint(ALLOC_IN_RC(t1_reg));
4334 match(RegI);
4335 match(mRegI);
4337 format %{ "T1" %}
4338 interface(REG_INTER);
4339 %}
4341 operand mT2RegI() %{
4342 constraint(ALLOC_IN_RC(t2_reg));
4343 match(RegI);
4344 match(mRegI);
4346 format %{ "T2" %}
4347 interface(REG_INTER);
4348 %}
4350 operand mT3RegI() %{
4351 constraint(ALLOC_IN_RC(t3_reg));
4352 match(RegI);
4353 match(mRegI);
4355 format %{ "T3" %}
4356 interface(REG_INTER);
4357 %}
4359 operand mT8RegI() %{
4360 constraint(ALLOC_IN_RC(t8_reg));
4361 match(RegI);
4362 match(mRegI);
4364 format %{ "T8" %}
4365 interface(REG_INTER);
4366 %}
4368 operand mT9RegI() %{
4369 constraint(ALLOC_IN_RC(t9_reg));
4370 match(RegI);
4371 match(mRegI);
4373 format %{ "T9" %}
4374 interface(REG_INTER);
4375 %}
4377 operand mA0RegI() %{
4378 constraint(ALLOC_IN_RC(a0_reg));
4379 match(RegI);
4380 match(mRegI);
4382 format %{ "A0" %}
4383 interface(REG_INTER);
4384 %}
4386 operand mA1RegI() %{
4387 constraint(ALLOC_IN_RC(a1_reg));
4388 match(RegI);
4389 match(mRegI);
4391 format %{ "A1" %}
4392 interface(REG_INTER);
4393 %}
4395 operand mA2RegI() %{
4396 constraint(ALLOC_IN_RC(a2_reg));
4397 match(RegI);
4398 match(mRegI);
4400 format %{ "A2" %}
4401 interface(REG_INTER);
4402 %}
4404 operand mA3RegI() %{
4405 constraint(ALLOC_IN_RC(a3_reg));
4406 match(RegI);
4407 match(mRegI);
4409 format %{ "A3" %}
4410 interface(REG_INTER);
4411 %}
4413 operand mA4RegI() %{
4414 constraint(ALLOC_IN_RC(a4_reg));
4415 match(RegI);
4416 match(mRegI);
4418 format %{ "A4" %}
4419 interface(REG_INTER);
4420 %}
4422 operand mA5RegI() %{
4423 constraint(ALLOC_IN_RC(a5_reg));
4424 match(RegI);
4425 match(mRegI);
4427 format %{ "A5" %}
4428 interface(REG_INTER);
4429 %}
4431 operand mA6RegI() %{
4432 constraint(ALLOC_IN_RC(a6_reg));
4433 match(RegI);
4434 match(mRegI);
4436 format %{ "A6" %}
4437 interface(REG_INTER);
4438 %}
4440 operand mA7RegI() %{
4441 constraint(ALLOC_IN_RC(a7_reg));
4442 match(RegI);
4443 match(mRegI);
4445 format %{ "A7" %}
4446 interface(REG_INTER);
4447 %}
4449 operand mV0RegI() %{
4450 constraint(ALLOC_IN_RC(v0_reg));
4451 match(RegI);
4452 match(mRegI);
4454 format %{ "V0" %}
4455 interface(REG_INTER);
4456 %}
4458 operand mV1RegI() %{
4459 constraint(ALLOC_IN_RC(v1_reg));
4460 match(RegI);
4461 match(mRegI);
4463 format %{ "V1" %}
4464 interface(REG_INTER);
4465 %}
4467 operand mRegN() %{
4468 constraint(ALLOC_IN_RC(int_reg));
4469 match(RegN);
4471 format %{ %}
4472 interface(REG_INTER);
4473 %}
4475 operand t0_RegN() %{
4476 constraint(ALLOC_IN_RC(t0_reg));
4477 match(RegN);
4478 match(mRegN);
4480 format %{ %}
4481 interface(REG_INTER);
4482 %}
4484 operand t1_RegN() %{
4485 constraint(ALLOC_IN_RC(t1_reg));
4486 match(RegN);
4487 match(mRegN);
4489 format %{ %}
4490 interface(REG_INTER);
4491 %}
4493 operand t2_RegN() %{
4494 constraint(ALLOC_IN_RC(t2_reg));
4495 match(RegN);
4496 match(mRegN);
4498 format %{ %}
4499 interface(REG_INTER);
4500 %}
4502 operand t3_RegN() %{
4503 constraint(ALLOC_IN_RC(t3_reg));
4504 match(RegN);
4505 match(mRegN);
4507 format %{ %}
4508 interface(REG_INTER);
4509 %}
4511 operand t8_RegN() %{
4512 constraint(ALLOC_IN_RC(t8_reg));
4513 match(RegN);
4514 match(mRegN);
4516 format %{ %}
4517 interface(REG_INTER);
4518 %}
4520 operand t9_RegN() %{
4521 constraint(ALLOC_IN_RC(t9_reg));
4522 match(RegN);
4523 match(mRegN);
4525 format %{ %}
4526 interface(REG_INTER);
4527 %}
4529 operand a0_RegN() %{
4530 constraint(ALLOC_IN_RC(a0_reg));
4531 match(RegN);
4532 match(mRegN);
4534 format %{ %}
4535 interface(REG_INTER);
4536 %}
4538 operand a1_RegN() %{
4539 constraint(ALLOC_IN_RC(a1_reg));
4540 match(RegN);
4541 match(mRegN);
4543 format %{ %}
4544 interface(REG_INTER);
4545 %}
4547 operand a2_RegN() %{
4548 constraint(ALLOC_IN_RC(a2_reg));
4549 match(RegN);
4550 match(mRegN);
4552 format %{ %}
4553 interface(REG_INTER);
4554 %}
4556 operand a3_RegN() %{
4557 constraint(ALLOC_IN_RC(a3_reg));
4558 match(RegN);
4559 match(mRegN);
4561 format %{ %}
4562 interface(REG_INTER);
4563 %}
4565 operand a4_RegN() %{
4566 constraint(ALLOC_IN_RC(a4_reg));
4567 match(RegN);
4568 match(mRegN);
4570 format %{ %}
4571 interface(REG_INTER);
4572 %}
4574 operand a5_RegN() %{
4575 constraint(ALLOC_IN_RC(a5_reg));
4576 match(RegN);
4577 match(mRegN);
4579 format %{ %}
4580 interface(REG_INTER);
4581 %}
4583 operand a6_RegN() %{
4584 constraint(ALLOC_IN_RC(a6_reg));
4585 match(RegN);
4586 match(mRegN);
4588 format %{ %}
4589 interface(REG_INTER);
4590 %}
4592 operand a7_RegN() %{
4593 constraint(ALLOC_IN_RC(a7_reg));
4594 match(RegN);
4595 match(mRegN);
4597 format %{ %}
4598 interface(REG_INTER);
4599 %}
4601 operand s0_RegN() %{
4602 constraint(ALLOC_IN_RC(s0_reg));
4603 match(RegN);
4604 match(mRegN);
4606 format %{ %}
4607 interface(REG_INTER);
4608 %}
4610 operand s1_RegN() %{
4611 constraint(ALLOC_IN_RC(s1_reg));
4612 match(RegN);
4613 match(mRegN);
4615 format %{ %}
4616 interface(REG_INTER);
4617 %}
4619 operand s2_RegN() %{
4620 constraint(ALLOC_IN_RC(s2_reg));
4621 match(RegN);
4622 match(mRegN);
4624 format %{ %}
4625 interface(REG_INTER);
4626 %}
4628 operand s3_RegN() %{
4629 constraint(ALLOC_IN_RC(s3_reg));
4630 match(RegN);
4631 match(mRegN);
4633 format %{ %}
4634 interface(REG_INTER);
4635 %}
4637 operand s4_RegN() %{
4638 constraint(ALLOC_IN_RC(s4_reg));
4639 match(RegN);
4640 match(mRegN);
4642 format %{ %}
4643 interface(REG_INTER);
4644 %}
4646 operand s5_RegN() %{
4647 constraint(ALLOC_IN_RC(s5_reg));
4648 match(RegN);
4649 match(mRegN);
4651 format %{ %}
4652 interface(REG_INTER);
4653 %}
4655 operand s6_RegN() %{
4656 constraint(ALLOC_IN_RC(s6_reg));
4657 match(RegN);
4658 match(mRegN);
4660 format %{ %}
4661 interface(REG_INTER);
4662 %}
4664 operand s7_RegN() %{
4665 constraint(ALLOC_IN_RC(s7_reg));
4666 match(RegN);
4667 match(mRegN);
4669 format %{ %}
4670 interface(REG_INTER);
4671 %}
4673 operand v0_RegN() %{
4674 constraint(ALLOC_IN_RC(v0_reg));
4675 match(RegN);
4676 match(mRegN);
4678 format %{ %}
4679 interface(REG_INTER);
4680 %}
4682 operand v1_RegN() %{
4683 constraint(ALLOC_IN_RC(v1_reg));
4684 match(RegN);
4685 match(mRegN);
4687 format %{ %}
4688 interface(REG_INTER);
4689 %}
4691 // Pointer Register
4692 operand mRegP() %{
4693 constraint(ALLOC_IN_RC(p_reg));
4694 match(RegP);
4696 format %{ %}
4697 interface(REG_INTER);
4698 %}
4700 operand no_T8_mRegP() %{
4701 constraint(ALLOC_IN_RC(no_T8_p_reg));
4702 match(RegP);
4703 match(mRegP);
4705 format %{ %}
4706 interface(REG_INTER);
4707 %}
4709 operand s0_RegP()
4710 %{
4711 constraint(ALLOC_IN_RC(s0_long_reg));
4712 match(RegP);
4713 match(mRegP);
4714 match(no_T8_mRegP);
4716 format %{ %}
4717 interface(REG_INTER);
4718 %}
4720 operand s1_RegP()
4721 %{
4722 constraint(ALLOC_IN_RC(s1_long_reg));
4723 match(RegP);
4724 match(mRegP);
4725 match(no_T8_mRegP);
4727 format %{ %}
4728 interface(REG_INTER);
4729 %}
4731 operand s2_RegP()
4732 %{
4733 constraint(ALLOC_IN_RC(s2_long_reg));
4734 match(RegP);
4735 match(mRegP);
4736 match(no_T8_mRegP);
4738 format %{ %}
4739 interface(REG_INTER);
4740 %}
4742 operand s3_RegP()
4743 %{
4744 constraint(ALLOC_IN_RC(s3_long_reg));
4745 match(RegP);
4746 match(mRegP);
4747 match(no_T8_mRegP);
4749 format %{ %}
4750 interface(REG_INTER);
4751 %}
4753 operand s4_RegP()
4754 %{
4755 constraint(ALLOC_IN_RC(s4_long_reg));
4756 match(RegP);
4757 match(mRegP);
4758 match(no_T8_mRegP);
4760 format %{ %}
4761 interface(REG_INTER);
4762 %}
4764 operand s5_RegP()
4765 %{
4766 constraint(ALLOC_IN_RC(s5_long_reg));
4767 match(RegP);
4768 match(mRegP);
4769 match(no_T8_mRegP);
4771 format %{ %}
4772 interface(REG_INTER);
4773 %}
4775 operand s6_RegP()
4776 %{
4777 constraint(ALLOC_IN_RC(s6_long_reg));
4778 match(RegP);
4779 match(mRegP);
4780 match(no_T8_mRegP);
4782 format %{ %}
4783 interface(REG_INTER);
4784 %}
4786 operand s7_RegP()
4787 %{
4788 constraint(ALLOC_IN_RC(s7_long_reg));
4789 match(RegP);
4790 match(mRegP);
4791 match(no_T8_mRegP);
4793 format %{ %}
4794 interface(REG_INTER);
4795 %}
4797 operand t0_RegP()
4798 %{
4799 constraint(ALLOC_IN_RC(t0_long_reg));
4800 match(RegP);
4801 match(mRegP);
4802 match(no_T8_mRegP);
4804 format %{ %}
4805 interface(REG_INTER);
4806 %}
4808 operand t1_RegP()
4809 %{
4810 constraint(ALLOC_IN_RC(t1_long_reg));
4811 match(RegP);
4812 match(mRegP);
4813 match(no_T8_mRegP);
4815 format %{ %}
4816 interface(REG_INTER);
4817 %}
4819 operand t2_RegP()
4820 %{
4821 constraint(ALLOC_IN_RC(t2_long_reg));
4822 match(RegP);
4823 match(mRegP);
4824 match(no_T8_mRegP);
4826 format %{ %}
4827 interface(REG_INTER);
4828 %}
4830 operand t3_RegP()
4831 %{
4832 constraint(ALLOC_IN_RC(t3_long_reg));
4833 match(RegP);
4834 match(mRegP);
4835 match(no_T8_mRegP);
4837 format %{ %}
4838 interface(REG_INTER);
4839 %}
4841 operand t8_RegP()
4842 %{
4843 constraint(ALLOC_IN_RC(t8_long_reg));
4844 match(RegP);
4845 match(mRegP);
4847 format %{ %}
4848 interface(REG_INTER);
4849 %}
4851 operand t9_RegP()
4852 %{
4853 constraint(ALLOC_IN_RC(t9_long_reg));
4854 match(RegP);
4855 match(mRegP);
4856 match(no_T8_mRegP);
4858 format %{ %}
4859 interface(REG_INTER);
4860 %}
4862 operand a0_RegP()
4863 %{
4864 constraint(ALLOC_IN_RC(a0_long_reg));
4865 match(RegP);
4866 match(mRegP);
4867 match(no_T8_mRegP);
4869 format %{ %}
4870 interface(REG_INTER);
4871 %}
4873 operand a1_RegP()
4874 %{
4875 constraint(ALLOC_IN_RC(a1_long_reg));
4876 match(RegP);
4877 match(mRegP);
4878 match(no_T8_mRegP);
4880 format %{ %}
4881 interface(REG_INTER);
4882 %}
4884 operand a2_RegP()
4885 %{
4886 constraint(ALLOC_IN_RC(a2_long_reg));
4887 match(RegP);
4888 match(mRegP);
4889 match(no_T8_mRegP);
4891 format %{ %}
4892 interface(REG_INTER);
4893 %}
4895 operand a3_RegP()
4896 %{
4897 constraint(ALLOC_IN_RC(a3_long_reg));
4898 match(RegP);
4899 match(mRegP);
4900 match(no_T8_mRegP);
4902 format %{ %}
4903 interface(REG_INTER);
4904 %}
4906 operand a4_RegP()
4907 %{
4908 constraint(ALLOC_IN_RC(a4_long_reg));
4909 match(RegP);
4910 match(mRegP);
4911 match(no_T8_mRegP);
4913 format %{ %}
4914 interface(REG_INTER);
4915 %}
4918 operand a5_RegP()
4919 %{
4920 constraint(ALLOC_IN_RC(a5_long_reg));
4921 match(RegP);
4922 match(mRegP);
4923 match(no_T8_mRegP);
4925 format %{ %}
4926 interface(REG_INTER);
4927 %}
4929 operand a6_RegP()
4930 %{
4931 constraint(ALLOC_IN_RC(a6_long_reg));
4932 match(RegP);
4933 match(mRegP);
4934 match(no_T8_mRegP);
4936 format %{ %}
4937 interface(REG_INTER);
4938 %}
4940 operand a7_RegP()
4941 %{
4942 constraint(ALLOC_IN_RC(a7_long_reg));
4943 match(RegP);
4944 match(mRegP);
4945 match(no_T8_mRegP);
4947 format %{ %}
4948 interface(REG_INTER);
4949 %}
4951 operand v0_RegP()
4952 %{
4953 constraint(ALLOC_IN_RC(v0_long_reg));
4954 match(RegP);
4955 match(mRegP);
4956 match(no_T8_mRegP);
4958 format %{ %}
4959 interface(REG_INTER);
4960 %}
4962 operand v1_RegP()
4963 %{
4964 constraint(ALLOC_IN_RC(v1_long_reg));
4965 match(RegP);
4966 match(mRegP);
4967 match(no_T8_mRegP);
4969 format %{ %}
4970 interface(REG_INTER);
4971 %}
4973 /*
4974 operand mSPRegP(mRegP reg) %{
4975 constraint(ALLOC_IN_RC(sp_reg));
4976 match(reg);
4978 format %{ "SP" %}
4979 interface(REG_INTER);
4980 %}
4982 operand mFPRegP(mRegP reg) %{
4983 constraint(ALLOC_IN_RC(fp_reg));
4984 match(reg);
4986 format %{ "FP" %}
4987 interface(REG_INTER);
4988 %}
4989 */
4991 operand mRegL() %{
4992 constraint(ALLOC_IN_RC(long_reg));
4993 match(RegL);
4995 format %{ %}
4996 interface(REG_INTER);
4997 %}
4999 operand v0RegL() %{
5000 constraint(ALLOC_IN_RC(v0_long_reg));
5001 match(RegL);
5002 match(mRegL);
5004 format %{ %}
5005 interface(REG_INTER);
5006 %}
5008 operand v1RegL() %{
5009 constraint(ALLOC_IN_RC(v1_long_reg));
5010 match(RegL);
5011 match(mRegL);
5013 format %{ %}
5014 interface(REG_INTER);
5015 %}
5017 operand a0RegL() %{
5018 constraint(ALLOC_IN_RC(a0_long_reg));
5019 match(RegL);
5020 match(mRegL);
5022 format %{ "A0" %}
5023 interface(REG_INTER);
5024 %}
5026 operand a1RegL() %{
5027 constraint(ALLOC_IN_RC(a1_long_reg));
5028 match(RegL);
5029 match(mRegL);
5031 format %{ %}
5032 interface(REG_INTER);
5033 %}
5035 operand a2RegL() %{
5036 constraint(ALLOC_IN_RC(a2_long_reg));
5037 match(RegL);
5038 match(mRegL);
5040 format %{ %}
5041 interface(REG_INTER);
5042 %}
5044 operand a3RegL() %{
5045 constraint(ALLOC_IN_RC(a3_long_reg));
5046 match(RegL);
5047 match(mRegL);
5049 format %{ %}
5050 interface(REG_INTER);
5051 %}
5053 operand t0RegL() %{
5054 constraint(ALLOC_IN_RC(t0_long_reg));
5055 match(RegL);
5056 match(mRegL);
5058 format %{ %}
5059 interface(REG_INTER);
5060 %}
5062 operand t1RegL() %{
5063 constraint(ALLOC_IN_RC(t1_long_reg));
5064 match(RegL);
5065 match(mRegL);
5067 format %{ %}
5068 interface(REG_INTER);
5069 %}
5071 operand t2RegL() %{
5072 constraint(ALLOC_IN_RC(t2_long_reg));
5073 match(RegL);
5074 match(mRegL);
5076 format %{ %}
5077 interface(REG_INTER);
5078 %}
5080 operand t3RegL() %{
5081 constraint(ALLOC_IN_RC(t3_long_reg));
5082 match(RegL);
5083 match(mRegL);
5085 format %{ %}
5086 interface(REG_INTER);
5087 %}
5089 operand t8RegL() %{
5090 constraint(ALLOC_IN_RC(t8_long_reg));
5091 match(RegL);
5092 match(mRegL);
5094 format %{ %}
5095 interface(REG_INTER);
5096 %}
5098 operand a4RegL() %{
5099 constraint(ALLOC_IN_RC(a4_long_reg));
5100 match(RegL);
5101 match(mRegL);
5103 format %{ %}
5104 interface(REG_INTER);
5105 %}
5107 operand a5RegL() %{
5108 constraint(ALLOC_IN_RC(a5_long_reg));
5109 match(RegL);
5110 match(mRegL);
5112 format %{ %}
5113 interface(REG_INTER);
5114 %}
5116 operand a6RegL() %{
5117 constraint(ALLOC_IN_RC(a6_long_reg));
5118 match(RegL);
5119 match(mRegL);
5121 format %{ %}
5122 interface(REG_INTER);
5123 %}
5125 operand a7RegL() %{
5126 constraint(ALLOC_IN_RC(a7_long_reg));
5127 match(RegL);
5128 match(mRegL);
5130 format %{ %}
5131 interface(REG_INTER);
5132 %}
5134 operand s0RegL() %{
5135 constraint(ALLOC_IN_RC(s0_long_reg));
5136 match(RegL);
5137 match(mRegL);
5139 format %{ %}
5140 interface(REG_INTER);
5141 %}
5143 operand s1RegL() %{
5144 constraint(ALLOC_IN_RC(s1_long_reg));
5145 match(RegL);
5146 match(mRegL);
5148 format %{ %}
5149 interface(REG_INTER);
5150 %}
5152 operand s2RegL() %{
5153 constraint(ALLOC_IN_RC(s2_long_reg));
5154 match(RegL);
5155 match(mRegL);
5157 format %{ %}
5158 interface(REG_INTER);
5159 %}
5161 operand s3RegL() %{
5162 constraint(ALLOC_IN_RC(s3_long_reg));
5163 match(RegL);
5164 match(mRegL);
5166 format %{ %}
5167 interface(REG_INTER);
5168 %}
5170 operand s4RegL() %{
5171 constraint(ALLOC_IN_RC(s4_long_reg));
5172 match(RegL);
5173 match(mRegL);
5175 format %{ %}
5176 interface(REG_INTER);
5177 %}
5179 operand s7RegL() %{
5180 constraint(ALLOC_IN_RC(s7_long_reg));
5181 match(RegL);
5182 match(mRegL);
5184 format %{ %}
5185 interface(REG_INTER);
5186 %}
5188 // Floating register operands
5189 operand regF() %{
5190 constraint(ALLOC_IN_RC(flt_reg));
5191 match(RegF);
5193 format %{ %}
5194 interface(REG_INTER);
5195 %}
5197 //Double Precision Floating register operands
5198 operand regD() %{
5199 constraint(ALLOC_IN_RC(dbl_reg));
5200 match(RegD);
5202 format %{ %}
5203 interface(REG_INTER);
5204 %}
5206 //----------Memory Operands----------------------------------------------------
5207 // Indirect Memory Operand
5208 operand indirect(mRegP reg) %{
5209 constraint(ALLOC_IN_RC(p_reg));
5210 match(reg);
5212 format %{ "[$reg] @ indirect" %}
5213 interface(MEMORY_INTER) %{
5214 base($reg);
5215 index(0x0); /* NO_INDEX */
5216 scale(0x0);
5217 disp(0x0);
5218 %}
5219 %}
5221 // Indirect Memory Plus Short Offset Operand
5222 operand indOffset8(mRegP reg, immL8 off)
5223 %{
5224 constraint(ALLOC_IN_RC(p_reg));
5225 match(AddP reg off);
5227 format %{ "[$reg + $off (8-bit)] @ indOffset8" %}
5228 interface(MEMORY_INTER) %{
5229 base($reg);
5230 index(0x0); /* NO_INDEX */
5231 scale(0x0);
5232 disp($off);
5233 %}
5234 %}
5236 // Indirect Memory Times Scale Plus Index Register
5237 operand indIndexScale(mRegP reg, mRegL lreg, immI2 scale)
5238 %{
5239 constraint(ALLOC_IN_RC(p_reg));
5240 match(AddP reg (LShiftL lreg scale));
5242 op_cost(10);
5243 format %{"[$reg + $lreg << $scale] @ indIndexScale" %}
5244 interface(MEMORY_INTER) %{
5245 base($reg);
5246 index($lreg);
5247 scale($scale);
5248 disp(0x0);
5249 %}
5250 %}
5253 // [base + index + offset]
5254 operand baseIndexOffset8(mRegP base, mRegL index, immL8 off)
5255 %{
5256 constraint(ALLOC_IN_RC(p_reg));
5257 op_cost(5);
5258 match(AddP (AddP base index) off);
5260 format %{ "[$base + $index + $off (8-bit)] @ baseIndexOffset8" %}
5261 interface(MEMORY_INTER) %{
5262 base($base);
5263 index($index);
5264 scale(0x0);
5265 disp($off);
5266 %}
5267 %}
5269 // [base + index + offset]
5270 operand baseIndexOffset8_convI2L(mRegP base, mRegI index, immL8 off)
5271 %{
5272 constraint(ALLOC_IN_RC(p_reg));
5273 op_cost(5);
5274 match(AddP (AddP base (ConvI2L index)) off);
5276 format %{ "[$base + $index + $off (8-bit)] @ baseIndexOffset8_convI2L" %}
5277 interface(MEMORY_INTER) %{
5278 base($base);
5279 index($index);
5280 scale(0x0);
5281 disp($off);
5282 %}
5283 %}
5285 // Indirect Memory Times Scale Plus Index Register Plus Offset Operand
5286 operand indIndexScaleOffset8(mRegP reg, immL8 off, mRegL lreg, immI2 scale)
5287 %{
5288 constraint(ALLOC_IN_RC(p_reg));
5289 match(AddP (AddP reg (LShiftL lreg scale)) off);
5291 op_cost(10);
5292 format %{"[$reg + $off + $lreg << $scale] @ indIndexScaleOffset8" %}
5293 interface(MEMORY_INTER) %{
5294 base($reg);
5295 index($lreg);
5296 scale($scale);
5297 disp($off);
5298 %}
5299 %}
5301 operand indIndexScaleOffset8_convI2L(mRegP reg, immL8 off, mRegI ireg, immI2 scale)
5302 %{
5303 constraint(ALLOC_IN_RC(p_reg));
5304 match(AddP (AddP reg (LShiftL (ConvI2L ireg) scale)) off);
5306 op_cost(10);
5307 format %{"[$reg + $off + $ireg << $scale] @ indIndexScaleOffset8_convI2L" %}
5308 interface(MEMORY_INTER) %{
5309 base($reg);
5310 index($ireg);
5311 scale($scale);
5312 disp($off);
5313 %}
5314 %}
5316 // [base + index<<scale + offset]
5317 operand basePosIndexScaleOffset8(mRegP base, mRegI index, immL8 off, immI_0_31 scale)
5318 %{
5319 constraint(ALLOC_IN_RC(p_reg));
5320 //predicate(n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
5321 op_cost(10);
5322 match(AddP (AddP base (LShiftL (ConvI2L index) scale)) off);
5324 format %{ "[$base + $index << $scale + $off (8-bit)] @ basePosIndexScaleOffset8" %}
5325 interface(MEMORY_INTER) %{
5326 base($base);
5327 index($index);
5328 scale($scale);
5329 disp($off);
5330 %}
5331 %}
5333 // Indirect Memory Times Scale Plus Index Register Plus Offset Operand
5334 operand indIndexScaleOffsetNarrow(mRegN reg, immL8 off, mRegL lreg, immI2 scale)
5335 %{
5336 predicate(Universe::narrow_oop_shift() == 0);
5337 constraint(ALLOC_IN_RC(p_reg));
5338 match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
5340 op_cost(10);
5341 format %{"[$reg + $off + $lreg << $scale] @ indIndexScaleOffsetNarrow" %}
5342 interface(MEMORY_INTER) %{
5343 base($reg);
5344 index($lreg);
5345 scale($scale);
5346 disp($off);
5347 %}
5348 %}
5350 // [base + index<<scale + offset] for compressd Oops
5351 operand indPosIndexI2LScaleOffset8Narrow(mRegN base, mRegI index, immL8 off, immI_0_31 scale)
5352 %{
5353 constraint(ALLOC_IN_RC(p_reg));
5354 //predicate(Universe::narrow_oop_shift() == 0 && n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
5355 predicate(Universe::narrow_oop_shift() == 0);
5356 op_cost(10);
5357 match(AddP (AddP (DecodeN base) (LShiftL (ConvI2L index) scale)) off);
5359 format %{ "[$base + $index << $scale + $off (8-bit)] @ indPosIndexI2LScaleOffset8Narrow" %}
5360 interface(MEMORY_INTER) %{
5361 base($base);
5362 index($index);
5363 scale($scale);
5364 disp($off);
5365 %}
5366 %}
5368 //FIXME: I think it's better to limit the immI to be 16-bit at most!
5369 // Indirect Memory Plus Long Offset Operand
5370 operand indOffset32(mRegP reg, immL32 off) %{
5371 constraint(ALLOC_IN_RC(p_reg));
5372 op_cost(20);
5373 match(AddP reg off);
5375 format %{ "[$reg + $off (32-bit)] @ indOffset32" %}
5376 interface(MEMORY_INTER) %{
5377 base($reg);
5378 index(0x0); /* NO_INDEX */
5379 scale(0x0);
5380 disp($off);
5381 %}
5382 %}
5384 // Indirect Memory Plus Index Register
5385 operand indIndex(mRegP addr, mRegL index) %{
5386 constraint(ALLOC_IN_RC(p_reg));
5387 match(AddP addr index);
5389 op_cost(20);
5390 format %{"[$addr + $index] @ indIndex" %}
5391 interface(MEMORY_INTER) %{
5392 base($addr);
5393 index($index);
5394 scale(0x0);
5395 disp(0x0);
5396 %}
5397 %}
5399 operand indirectNarrowKlass(mRegN reg)
5400 %{
5401 predicate(Universe::narrow_klass_shift() == 0);
5402 constraint(ALLOC_IN_RC(p_reg));
5403 op_cost(10);
5404 match(DecodeNKlass reg);
5406 format %{ "[$reg] @ indirectNarrowKlass" %}
5407 interface(MEMORY_INTER) %{
5408 base($reg);
5409 index(0x0);
5410 scale(0x0);
5411 disp(0x0);
5412 %}
5413 %}
5415 operand indOffset8NarrowKlass(mRegN reg, immL8 off)
5416 %{
5417 predicate(Universe::narrow_klass_shift() == 0);
5418 constraint(ALLOC_IN_RC(p_reg));
5419 op_cost(10);
5420 match(AddP (DecodeNKlass reg) off);
5422 format %{ "[$reg + $off (8-bit)] @ indOffset8NarrowKlass" %}
5423 interface(MEMORY_INTER) %{
5424 base($reg);
5425 index(0x0);
5426 scale(0x0);
5427 disp($off);
5428 %}
5429 %}
5431 operand indOffset32NarrowKlass(mRegN reg, immL32 off)
5432 %{
5433 predicate(Universe::narrow_klass_shift() == 0);
5434 constraint(ALLOC_IN_RC(p_reg));
5435 op_cost(10);
5436 match(AddP (DecodeNKlass reg) off);
5438 format %{ "[$reg + $off (32-bit)] @ indOffset32NarrowKlass" %}
5439 interface(MEMORY_INTER) %{
5440 base($reg);
5441 index(0x0);
5442 scale(0x0);
5443 disp($off);
5444 %}
5445 %}
5447 operand indIndexOffsetNarrowKlass(mRegN reg, mRegL lreg, immL32 off)
5448 %{
5449 predicate(Universe::narrow_klass_shift() == 0);
5450 constraint(ALLOC_IN_RC(p_reg));
5451 match(AddP (AddP (DecodeNKlass reg) lreg) off);
5453 op_cost(10);
5454 format %{"[$reg + $off + $lreg] @ indIndexOffsetNarrowKlass" %}
5455 interface(MEMORY_INTER) %{
5456 base($reg);
5457 index($lreg);
5458 scale(0x0);
5459 disp($off);
5460 %}
5461 %}
5463 operand indIndexNarrowKlass(mRegN reg, mRegL lreg)
5464 %{
5465 predicate(Universe::narrow_klass_shift() == 0);
5466 constraint(ALLOC_IN_RC(p_reg));
5467 match(AddP (DecodeNKlass reg) lreg);
5469 op_cost(10);
5470 format %{"[$reg + $lreg] @ indIndexNarrowKlass" %}
5471 interface(MEMORY_INTER) %{
5472 base($reg);
5473 index($lreg);
5474 scale(0x0);
5475 disp(0x0);
5476 %}
5477 %}
5479 // Indirect Memory Operand
5480 operand indirectNarrow(mRegN reg)
5481 %{
5482 predicate(Universe::narrow_oop_shift() == 0);
5483 constraint(ALLOC_IN_RC(p_reg));
5484 op_cost(10);
5485 match(DecodeN reg);
5487 format %{ "[$reg] @ indirectNarrow" %}
5488 interface(MEMORY_INTER) %{
5489 base($reg);
5490 index(0x0);
5491 scale(0x0);
5492 disp(0x0);
5493 %}
5494 %}
5496 // Indirect Memory Plus Short Offset Operand
5497 operand indOffset8Narrow(mRegN reg, immL8 off)
5498 %{
5499 predicate(Universe::narrow_oop_shift() == 0);
5500 constraint(ALLOC_IN_RC(p_reg));
5501 op_cost(10);
5502 match(AddP (DecodeN reg) off);
5504 format %{ "[$reg + $off (8-bit)] @ indOffset8Narrow" %}
5505 interface(MEMORY_INTER) %{
5506 base($reg);
5507 index(0x0);
5508 scale(0x0);
5509 disp($off);
5510 %}
5511 %}
5513 // Indirect Memory Plus Index Register Plus Offset Operand
5514 operand indIndexOffset8Narrow(mRegN reg, mRegL lreg, immL8 off)
5515 %{
5516 predicate(Universe::narrow_oop_shift() == 0);
5517 constraint(ALLOC_IN_RC(p_reg));
5518 match(AddP (AddP (DecodeN reg) lreg) off);
5520 op_cost(10);
5521 format %{"[$reg + $off + $lreg] @ indIndexOffset8Narrow" %}
5522 interface(MEMORY_INTER) %{
5523 base($reg);
5524 index($lreg);
5525 scale(0x0);
5526 disp($off);
5527 %}
5528 %}
5530 //----------Load Long Memory Operands------------------------------------------
5531 // The load-long idiom will use it's address expression again after loading
5532 // the first word of the long. If the load-long destination overlaps with
5533 // registers used in the addressing expression, the 2nd half will be loaded
5534 // from a clobbered address. Fix this by requiring that load-long use
5535 // address registers that do not overlap with the load-long target.
5537 // load-long support
5538 operand load_long_RegP() %{
5539 constraint(ALLOC_IN_RC(p_reg));
5540 match(RegP);
5541 match(mRegP);
5542 op_cost(100);
5543 format %{ %}
5544 interface(REG_INTER);
5545 %}
5547 // Indirect Memory Operand Long
5548 operand load_long_indirect(load_long_RegP reg) %{
5549 constraint(ALLOC_IN_RC(p_reg));
5550 match(reg);
5552 format %{ "[$reg]" %}
5553 interface(MEMORY_INTER) %{
5554 base($reg);
5555 index(0x0);
5556 scale(0x0);
5557 disp(0x0);
5558 %}
5559 %}
5561 // Indirect Memory Plus Long Offset Operand
5562 operand load_long_indOffset32(load_long_RegP reg, immL32 off) %{
5563 match(AddP reg off);
5565 format %{ "[$reg + $off]" %}
5566 interface(MEMORY_INTER) %{
5567 base($reg);
5568 index(0x0);
5569 scale(0x0);
5570 disp($off);
5571 %}
5572 %}
5574 //----------Conditional Branch Operands----------------------------------------
5575 // Comparison Op - This is the operation of the comparison, and is limited to
5576 // the following set of codes:
5577 // L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
5578 //
5579 // Other attributes of the comparison, such as unsignedness, are specified
5580 // by the comparison instruction that sets a condition code flags register.
5581 // That result is represented by a flags operand whose subtype is appropriate
5582 // to the unsignedness (etc.) of the comparison.
5583 //
5584 // Later, the instruction which matches both the Comparison Op (a Bool) and
5585 // the flags (produced by the Cmp) specifies the coding of the comparison op
5586 // by matching a specific subtype of Bool operand below, such as cmpOpU.
5588 // Comparision Code
5589 operand cmpOp() %{
5590 match(Bool);
5592 format %{ "" %}
5593 interface(COND_INTER) %{
5594 equal(0x01);
5595 not_equal(0x02);
5596 greater(0x03);
5597 greater_equal(0x04);
5598 less(0x05);
5599 less_equal(0x06);
5600 overflow(0x7);
5601 no_overflow(0x8);
5602 %}
5603 %}
5606 // Comparision Code
5607 // Comparison Code, unsigned compare. Used by FP also, with
5608 // C2 (unordered) turned into GT or LT already. The other bits
5609 // C0 and C3 are turned into Carry & Zero flags.
5610 operand cmpOpU() %{
5611 match(Bool);
5613 format %{ "" %}
5614 interface(COND_INTER) %{
5615 equal(0x01);
5616 not_equal(0x02);
5617 greater(0x03);
5618 greater_equal(0x04);
5619 less(0x05);
5620 less_equal(0x06);
5621 overflow(0x7);
5622 no_overflow(0x8);
5623 %}
5624 %}
5626 /*
5627 // Comparison Code, unsigned compare. Used by FP also, with
5628 // C2 (unordered) turned into GT or LT already. The other bits
5629 // C0 and C3 are turned into Carry & Zero flags.
5630 operand cmpOpU() %{
5631 match(Bool);
5633 format %{ "" %}
5634 interface(COND_INTER) %{
5635 equal(0x4);
5636 not_equal(0x5);
5637 less(0x2);
5638 greater_equal(0x3);
5639 less_equal(0x6);
5640 greater(0x7);
5641 %}
5642 %}
5643 */
5644 /*
5645 // Comparison Code for FP conditional move
5646 operand cmpOp_fcmov() %{
5647 match(Bool);
5649 format %{ "" %}
5650 interface(COND_INTER) %{
5651 equal (0x01);
5652 not_equal (0x02);
5653 greater (0x03);
5654 greater_equal(0x04);
5655 less (0x05);
5656 less_equal (0x06);
5657 %}
5658 %}
5660 // Comparision Code used in long compares
5661 operand cmpOp_commute() %{
5662 match(Bool);
5664 format %{ "" %}
5665 interface(COND_INTER) %{
5666 equal(0x4);
5667 not_equal(0x5);
5668 less(0xF);
5669 greater_equal(0xE);
5670 less_equal(0xD);
5671 greater(0xC);
5672 %}
5673 %}
5674 */
5676 //----------Special Memory Operands--------------------------------------------
5677 // Stack Slot Operand - This operand is used for loading and storing temporary
5678 // values on the stack where a match requires a value to
5679 // flow through memory.
5680 operand stackSlotP(sRegP reg) %{
5681 constraint(ALLOC_IN_RC(stack_slots));
5682 // No match rule because this operand is only generated in matching
5683 op_cost(50);
5684 format %{ "[$reg]" %}
5685 interface(MEMORY_INTER) %{
5686 base(0x1d); // SP
5687 index(0x0); // No Index
5688 scale(0x0); // No Scale
5689 disp($reg); // Stack Offset
5690 %}
5691 %}
5693 operand stackSlotI(sRegI reg) %{
5694 constraint(ALLOC_IN_RC(stack_slots));
5695 // No match rule because this operand is only generated in matching
5696 op_cost(50);
5697 format %{ "[$reg]" %}
5698 interface(MEMORY_INTER) %{
5699 base(0x1d); // SP
5700 index(0x0); // No Index
5701 scale(0x0); // No Scale
5702 disp($reg); // Stack Offset
5703 %}
5704 %}
5706 operand stackSlotF(sRegF reg) %{
5707 constraint(ALLOC_IN_RC(stack_slots));
5708 // No match rule because this operand is only generated in matching
5709 op_cost(50);
5710 format %{ "[$reg]" %}
5711 interface(MEMORY_INTER) %{
5712 base(0x1d); // SP
5713 index(0x0); // No Index
5714 scale(0x0); // No Scale
5715 disp($reg); // Stack Offset
5716 %}
5717 %}
5719 operand stackSlotD(sRegD reg) %{
5720 constraint(ALLOC_IN_RC(stack_slots));
5721 // No match rule because this operand is only generated in matching
5722 op_cost(50);
5723 format %{ "[$reg]" %}
5724 interface(MEMORY_INTER) %{
5725 base(0x1d); // SP
5726 index(0x0); // No Index
5727 scale(0x0); // No Scale
5728 disp($reg); // Stack Offset
5729 %}
5730 %}
5732 operand stackSlotL(sRegL reg) %{
5733 constraint(ALLOC_IN_RC(stack_slots));
5734 // No match rule because this operand is only generated in matching
5735 op_cost(50);
5736 format %{ "[$reg]" %}
5737 interface(MEMORY_INTER) %{
5738 base(0x1d); // SP
5739 index(0x0); // No Index
5740 scale(0x0); // No Scale
5741 disp($reg); // Stack Offset
5742 %}
5743 %}
5746 //------------------------OPERAND CLASSES--------------------------------------
5747 //opclass memory( direct, indirect, indOffset16, indOffset32, indOffset32X, indIndexOffset );
5748 opclass memory( indirect, indirectNarrow, indOffset8, indOffset32, indIndex, indIndexScale, load_long_indirect, load_long_indOffset32, baseIndexOffset8, baseIndexOffset8_convI2L, indIndexScaleOffset8, indIndexScaleOffset8_convI2L, basePosIndexScaleOffset8, indIndexScaleOffsetNarrow, indPosIndexI2LScaleOffset8Narrow, indOffset8Narrow, indIndexOffset8Narrow);
5751 //----------PIPELINE-----------------------------------------------------------
5752 // Rules which define the behavior of the target architectures pipeline.
5754 pipeline %{
5756 //----------ATTRIBUTES---------------------------------------------------------
5757 attributes %{
5758 fixed_size_instructions; // Fixed size instructions
5759 branch_has_delay_slot; // branch have delay slot in gs2
5760 max_instructions_per_bundle = 1; // 1 instruction per bundle
5761 max_bundles_per_cycle = 4; // Up to 4 bundles per cycle
5762 bundle_unit_size=4;
5763 instruction_unit_size = 4; // An instruction is 4 bytes long
5764 instruction_fetch_unit_size = 16; // The processor fetches one line
5765 instruction_fetch_units = 1; // of 16 bytes
5767 // List of nop instructions
5768 nops( MachNop );
5769 %}
5771 //----------RESOURCES----------------------------------------------------------
5772 // Resources are the functional units available to the machine
5774 resources(D1, D2, D3, D4, DECODE = D1 | D2 | D3| D4, ALU1, ALU2, ALU = ALU1 | ALU2, FPU1, FPU2, FPU = FPU1 | FPU2, MEM, BR);
5776 //----------PIPELINE DESCRIPTION-----------------------------------------------
5777 // Pipeline Description specifies the stages in the machine's pipeline
5779 // IF: fetch
5780 // ID: decode
5781 // RD: read
5782 // CA: caculate
5783 // WB: write back
5784 // CM: commit
5786 pipe_desc(IF, ID, RD, CA, WB, CM);
5789 //----------PIPELINE CLASSES---------------------------------------------------
5790 // Pipeline Classes describe the stages in which input and output are
5791 // referenced by the hardware pipeline.
5793 //No.1 Integer ALU reg-reg operation : dst <-- reg1 op reg2
5794 pipe_class ialu_regI_regI(mRegI dst, mRegI src1, mRegI src2) %{
5795 single_instruction;
5796 src1 : RD(read);
5797 src2 : RD(read);
5798 dst : WB(write)+1;
5799 DECODE : ID;
5800 ALU : CA;
5801 %}
5803 //No.19 Integer mult operation : dst <-- reg1 mult reg2
5804 pipe_class ialu_mult(mRegI dst, mRegI src1, mRegI src2) %{
5805 src1 : RD(read);
5806 src2 : RD(read);
5807 dst : WB(write)+5;
5808 DECODE : ID;
5809 ALU2 : CA;
5810 %}
5812 pipe_class mulL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
5813 src1 : RD(read);
5814 src2 : RD(read);
5815 dst : WB(write)+10;
5816 DECODE : ID;
5817 ALU2 : CA;
5818 %}
5820 //No.19 Integer div operation : dst <-- reg1 div reg2
5821 pipe_class ialu_div(mRegI dst, mRegI src1, mRegI src2) %{
5822 src1 : RD(read);
5823 src2 : RD(read);
5824 dst : WB(write)+10;
5825 DECODE : ID;
5826 ALU2 : CA;
5827 %}
5829 //No.19 Integer mod operation : dst <-- reg1 mod reg2
5830 pipe_class ialu_mod(mRegI dst, mRegI src1, mRegI src2) %{
5831 instruction_count(2);
5832 src1 : RD(read);
5833 src2 : RD(read);
5834 dst : WB(write)+10;
5835 DECODE : ID;
5836 ALU2 : CA;
5837 %}
5839 //No.15 Long ALU reg-reg operation : dst <-- reg1 op reg2
5840 pipe_class ialu_regL_regL(mRegL dst, mRegL src1, mRegL src2) %{
5841 instruction_count(2);
5842 src1 : RD(read);
5843 src2 : RD(read);
5844 dst : WB(write);
5845 DECODE : ID;
5846 ALU : CA;
5847 %}
5849 //No.18 Long ALU reg-imm16 operation : dst <-- reg1 op imm16
5850 pipe_class ialu_regL_imm16(mRegL dst, mRegL src) %{
5851 instruction_count(2);
5852 src : RD(read);
5853 dst : WB(write);
5854 DECODE : ID;
5855 ALU : CA;
5856 %}
5858 //no.16 load Long from memory :
5859 pipe_class ialu_loadL(mRegL dst, memory mem) %{
5860 instruction_count(2);
5861 mem : RD(read);
5862 dst : WB(write)+5;
5863 DECODE : ID;
5864 MEM : RD;
5865 %}
5867 //No.17 Store Long to Memory :
5868 pipe_class ialu_storeL(mRegL src, memory mem) %{
5869 instruction_count(2);
5870 mem : RD(read);
5871 src : RD(read);
5872 DECODE : ID;
5873 MEM : RD;
5874 %}
5876 //No.2 Integer ALU reg-imm16 operation : dst <-- reg1 op imm16
5877 pipe_class ialu_regI_imm16(mRegI dst, mRegI src) %{
5878 single_instruction;
5879 src : RD(read);
5880 dst : WB(write);
5881 DECODE : ID;
5882 ALU : CA;
5883 %}
5885 //No.3 Integer move operation : dst <-- reg
5886 pipe_class ialu_regI_mov(mRegI dst, mRegI src) %{
5887 src : RD(read);
5888 dst : WB(write);
5889 DECODE : ID;
5890 ALU : CA;
5891 %}
5893 //No.4 No instructions : do nothing
5894 pipe_class empty( ) %{
5895 instruction_count(0);
5896 %}
5898 //No.5 UnConditional branch :
5899 pipe_class pipe_jump( label labl ) %{
5900 multiple_bundles;
5901 DECODE : ID;
5902 BR : RD;
5903 %}
5905 //No.6 ALU Conditional branch :
5906 pipe_class pipe_alu_branch(mRegI src1, mRegI src2, label labl ) %{
5907 multiple_bundles;
5908 src1 : RD(read);
5909 src2 : RD(read);
5910 DECODE : ID;
5911 BR : RD;
5912 %}
5914 //no.7 load integer from memory :
5915 pipe_class ialu_loadI(mRegI dst, memory mem) %{
5916 mem : RD(read);
5917 dst : WB(write)+3;
5918 DECODE : ID;
5919 MEM : RD;
5920 %}
5922 //No.8 Store Integer to Memory :
5923 pipe_class ialu_storeI(mRegI src, memory mem) %{
5924 mem : RD(read);
5925 src : RD(read);
5926 DECODE : ID;
5927 MEM : RD;
5928 %}
5931 //No.10 Floating FPU reg-reg operation : dst <-- reg1 op reg2
5932 pipe_class fpu_regF_regF(regF dst, regF src1, regF src2) %{
5933 src1 : RD(read);
5934 src2 : RD(read);
5935 dst : WB(write);
5936 DECODE : ID;
5937 FPU : CA;
5938 %}
5940 //No.22 Floating div operation : dst <-- reg1 div reg2
5941 pipe_class fpu_div(regF dst, regF src1, regF src2) %{
5942 src1 : RD(read);
5943 src2 : RD(read);
5944 dst : WB(write);
5945 DECODE : ID;
5946 FPU2 : CA;
5947 %}
5949 pipe_class fcvt_I2D(regD dst, mRegI src) %{
5950 src : RD(read);
5951 dst : WB(write);
5952 DECODE : ID;
5953 FPU1 : CA;
5954 %}
5956 pipe_class fcvt_D2I(mRegI dst, regD src) %{
5957 src : RD(read);
5958 dst : WB(write);
5959 DECODE : ID;
5960 FPU1 : CA;
5961 %}
5963 pipe_class pipe_mfc1(mRegI dst, regD src) %{
5964 src : RD(read);
5965 dst : WB(write);
5966 DECODE : ID;
5967 MEM : RD;
5968 %}
5970 pipe_class pipe_mtc1(regD dst, mRegI src) %{
5971 src : RD(read);
5972 dst : WB(write);
5973 DECODE : ID;
5974 MEM : RD(5);
5975 %}
5977 //No.23 Floating sqrt operation : dst <-- reg1 sqrt reg2
5978 pipe_class fpu_sqrt(regF dst, regF src1, regF src2) %{
5979 multiple_bundles;
5980 src1 : RD(read);
5981 src2 : RD(read);
5982 dst : WB(write);
5983 DECODE : ID;
5984 FPU2 : CA;
5985 %}
5987 //No.11 Load Floating from Memory :
5988 pipe_class fpu_loadF(regF dst, memory mem) %{
5989 instruction_count(1);
5990 mem : RD(read);
5991 dst : WB(write)+3;
5992 DECODE : ID;
5993 MEM : RD;
5994 %}
5996 //No.12 Store Floating to Memory :
5997 pipe_class fpu_storeF(regF src, memory mem) %{
5998 instruction_count(1);
5999 mem : RD(read);
6000 src : RD(read);
6001 DECODE : ID;
6002 MEM : RD;
6003 %}
6005 //No.13 FPU Conditional branch :
6006 pipe_class pipe_fpu_branch(regF src1, regF src2, label labl ) %{
6007 multiple_bundles;
6008 src1 : RD(read);
6009 src2 : RD(read);
6010 DECODE : ID;
6011 BR : RD;
6012 %}
6014 //No.14 Floating FPU reg operation : dst <-- op reg
6015 pipe_class fpu1_regF(regF dst, regF src) %{
6016 src : RD(read);
6017 dst : WB(write);
6018 DECODE : ID;
6019 FPU : CA;
6020 %}
6022 pipe_class long_memory_op() %{
6023 instruction_count(10); multiple_bundles; force_serialization;
6024 fixed_latency(30);
6025 %}
6027 pipe_class simple_call() %{
6028 instruction_count(10); multiple_bundles; force_serialization;
6029 fixed_latency(200);
6030 BR : RD;
6031 %}
6033 pipe_class call() %{
6034 instruction_count(10); multiple_bundles; force_serialization;
6035 fixed_latency(200);
6036 %}
6038 //FIXME:
6039 //No.9 Piple slow : for multi-instructions
6040 pipe_class pipe_slow( ) %{
6041 instruction_count(20);
6042 force_serialization;
6043 multiple_bundles;
6044 fixed_latency(50);
6045 %}
6047 %}
6051 //----------INSTRUCTIONS-------------------------------------------------------
6052 //
6053 // match -- States which machine-independent subtree may be replaced
6054 // by this instruction.
6055 // ins_cost -- The estimated cost of this instruction is used by instruction
6056 // selection to identify a minimum cost tree of machine
6057 // instructions that matches a tree of machine-independent
6058 // instructions.
6059 // format -- A string providing the disassembly for this instruction.
6060 // The value of an instruction's operand may be inserted
6061 // by referring to it with a '$' prefix.
6062 // opcode -- Three instruction opcodes may be provided. These are referred
6063 // to within an encode class as $primary, $secondary, and $tertiary
6064 // respectively. The primary opcode is commonly used to
6065 // indicate the type of machine instruction, while secondary
6066 // and tertiary are often used for prefix options or addressing
6067 // modes.
6068 // ins_encode -- A list of encode classes with parameters. The encode class
6069 // name must have been defined in an 'enc_class' specification
6070 // in the encode section of the architecture description.
6073 // Load Integer
6074 instruct loadI(mRegI dst, memory mem) %{
6075 match(Set dst (LoadI mem));
6077 ins_cost(125);
6078 format %{ "lw $dst, $mem #@loadI" %}
6079 ins_encode (load_I_enc(dst, mem));
6080 ins_pipe( ialu_loadI );
6081 %}
6083 instruct loadI_convI2L(mRegL dst, memory mem) %{
6084 match(Set dst (ConvI2L (LoadI mem)));
6086 ins_cost(125);
6087 format %{ "lw $dst, $mem #@loadI_convI2L" %}
6088 ins_encode (load_I_enc(dst, mem));
6089 ins_pipe( ialu_loadI );
6090 %}
6092 // Load Integer (32 bit signed) to Byte (8 bit signed)
6093 instruct loadI2B(mRegI dst, memory mem, immI_24 twentyfour) %{
6094 match(Set dst (RShiftI (LShiftI (LoadI mem) twentyfour) twentyfour));
6096 ins_cost(125);
6097 format %{ "lb $dst, $mem\t# int -> byte #@loadI2B" %}
6098 ins_encode(load_B_enc(dst, mem));
6099 ins_pipe(ialu_loadI);
6100 %}
6102 // Load Integer (32 bit signed) to Unsigned Byte (8 bit UNsigned)
6103 instruct loadI2UB(mRegI dst, memory mem, immI_255 mask) %{
6104 match(Set dst (AndI (LoadI mem) mask));
6106 ins_cost(125);
6107 format %{ "lbu $dst, $mem\t# int -> ubyte #@loadI2UB" %}
6108 ins_encode(load_UB_enc(dst, mem));
6109 ins_pipe(ialu_loadI);
6110 %}
6112 // Load Integer (32 bit signed) to Short (16 bit signed)
6113 instruct loadI2S(mRegI dst, memory mem, immI_16 sixteen) %{
6114 match(Set dst (RShiftI (LShiftI (LoadI mem) sixteen) sixteen));
6116 ins_cost(125);
6117 format %{ "lh $dst, $mem\t# int -> short #@loadI2S" %}
6118 ins_encode(load_S_enc(dst, mem));
6119 ins_pipe(ialu_loadI);
6120 %}
6122 // Load Integer (32 bit signed) to Unsigned Short/Char (16 bit UNsigned)
6123 instruct loadI2US(mRegI dst, memory mem, immI_65535 mask) %{
6124 match(Set dst (AndI (LoadI mem) mask));
6126 ins_cost(125);
6127 format %{ "lhu $dst, $mem\t# int -> ushort/char #@loadI2US" %}
6128 ins_encode(load_C_enc(dst, mem));
6129 ins_pipe(ialu_loadI);
6130 %}
6132 // Load Long.
6133 instruct loadL(mRegL dst, memory mem) %{
6134 // predicate(!((LoadLNode*)n)->require_atomic_access());
6135 match(Set dst (LoadL mem));
6137 ins_cost(250);
6138 format %{ "ld $dst, $mem #@loadL" %}
6139 ins_encode(load_L_enc(dst, mem));
6140 ins_pipe( ialu_loadL );
6141 %}
6143 // Load Long - UNaligned
6144 instruct loadL_unaligned(mRegL dst, memory mem) %{
6145 match(Set dst (LoadL_unaligned mem));
6147 // FIXME: Jin: Need more effective ldl/ldr
6148 ins_cost(450);
6149 format %{ "ld $dst, $mem #@loadL_unaligned\n\t" %}
6150 ins_encode(load_L_enc(dst, mem));
6151 ins_pipe( ialu_loadL );
6152 %}
6154 // Store Long
6155 instruct storeL_reg(memory mem, mRegL src) %{
6156 match(Set mem (StoreL mem src));
6158 ins_cost(200);
6159 format %{ "sd $mem, $src #@storeL_reg\n" %}
6160 ins_encode(store_L_reg_enc(mem, src));
6161 ins_pipe( ialu_storeL );
6162 %}
6165 instruct storeL_immL0(memory mem, immL0 zero) %{
6166 match(Set mem (StoreL mem zero));
6168 ins_cost(180);
6169 format %{ "sd $mem, zero #@storeL_immL0" %}
6170 ins_encode(store_L_immL0_enc(mem, zero));
6171 ins_pipe( ialu_storeL );
6172 %}
6174 // Load Compressed Pointer
6175 instruct loadN(mRegN dst, memory mem)
6176 %{
6177 match(Set dst (LoadN mem));
6179 ins_cost(125); // XXX
6180 format %{ "lwu $dst, $mem\t# compressed ptr @ loadN" %}
6181 ins_encode (load_N_enc(dst, mem));
6182 ins_pipe( ialu_loadI ); // XXX
6183 %}
6185 // Load Pointer
6186 instruct loadP(mRegP dst, memory mem) %{
6187 match(Set dst (LoadP mem));
6189 ins_cost(125);
6190 format %{ "ld $dst, $mem #@loadP" %}
6191 ins_encode (load_P_enc(dst, mem));
6192 ins_pipe( ialu_loadI );
6193 %}
6195 // Load Klass Pointer
6196 instruct loadKlass(mRegP dst, memory mem) %{
6197 match(Set dst (LoadKlass mem));
6199 ins_cost(125);
6200 format %{ "MOV $dst,$mem @ loadKlass" %}
6201 ins_encode (load_P_enc(dst, mem));
6202 ins_pipe( ialu_loadI );
6203 %}
6205 // Load narrow Klass Pointer
6206 instruct loadNKlass(mRegN dst, memory mem)
6207 %{
6208 match(Set dst (LoadNKlass mem));
6210 ins_cost(125); // XXX
6211 format %{ "lwu $dst, $mem\t# compressed klass ptr @ loadNKlass" %}
6212 ins_encode (load_N_enc(dst, mem));
6213 ins_pipe( ialu_loadI ); // XXX
6214 %}
6216 // Load Constant
6217 instruct loadConI(mRegI dst, immI src) %{
6218 match(Set dst src);
6220 ins_cost(150);
6221 format %{ "mov $dst, $src #@loadConI" %}
6222 ins_encode %{
6223 Register dst = $dst$$Register;
6224 int value = $src$$constant;
6225 __ move(dst, value);
6226 %}
6227 ins_pipe( ialu_regI_regI );
6228 %}
6231 instruct loadConL_set64(mRegL dst, immL src) %{
6232 match(Set dst src);
6233 ins_cost(120);
6234 format %{ "li $dst, $src @ loadConL_set64" %}
6235 ins_encode %{
6236 __ set64($dst$$Register, $src$$constant);
6237 %}
6238 ins_pipe(ialu_regL_regL);
6239 %}
6241 /*
6242 // Load long value from constant table (predicated by immL_expensive).
6243 instruct loadConL_load(mRegL dst, immL_expensive src) %{
6244 match(Set dst src);
6245 ins_cost(150);
6246 format %{ "ld $dst, $constantoffset[$constanttablebase] # load long $src from table @ loadConL_ldx" %}
6247 ins_encode %{
6248 int con_offset = $constantoffset($src);
6250 if (Assembler::is_simm16(con_offset)) {
6251 __ ld($dst$$Register, $constanttablebase, con_offset);
6252 } else {
6253 __ set64(AT, con_offset);
6254 if (UseLoongsonISA) {
6255 __ gsldx($dst$$Register, $constanttablebase, AT, 0);
6256 } else {
6257 __ daddu(AT, $constanttablebase, AT);
6258 __ ld($dst$$Register, AT, 0);
6259 }
6260 }
6261 %}
6262 ins_pipe(ialu_loadI);
6263 %}
6264 */
6266 instruct loadConL16(mRegL dst, immL16 src) %{
6267 match(Set dst src);
6268 ins_cost(105);
6269 format %{ "mov $dst, $src #@loadConL16" %}
6270 ins_encode %{
6271 Register dst_reg = as_Register($dst$$reg);
6272 int value = $src$$constant;
6273 __ daddiu(dst_reg, R0, value);
6274 %}
6275 ins_pipe( ialu_regL_regL );
6276 %}
6279 instruct loadConL0(mRegL dst, immL0 src) %{
6280 match(Set dst src);
6281 ins_cost(100);
6282 format %{ "mov $dst, zero #@loadConL0" %}
6283 ins_encode %{
6284 Register dst_reg = as_Register($dst$$reg);
6285 __ daddu(dst_reg, R0, R0);
6286 %}
6287 ins_pipe( ialu_regL_regL );
6288 %}
6290 // Load Range
6291 instruct loadRange(mRegI dst, memory mem) %{
6292 match(Set dst (LoadRange mem));
6294 ins_cost(125);
6295 format %{ "MOV $dst,$mem @ loadRange" %}
6296 ins_encode(load_I_enc(dst, mem));
6297 ins_pipe( ialu_loadI );
6298 %}
6301 instruct storeP(memory mem, mRegP src ) %{
6302 match(Set mem (StoreP mem src));
6304 ins_cost(125);
6305 format %{ "sd $src, $mem #@storeP" %}
6306 ins_encode(store_P_reg_enc(mem, src));
6307 ins_pipe( ialu_storeI );
6308 %}
6310 // Store NULL Pointer, mark word, or other simple pointer constant.
6311 instruct storeImmP0(memory mem, immP0 zero) %{
6312 match(Set mem (StoreP mem zero));
6314 ins_cost(125);
6315 format %{ "mov $mem, $zero #@storeImmP0" %}
6316 ins_encode(store_P_immP0_enc(mem));
6317 ins_pipe( ialu_storeI );
6318 %}
6320 // Store Byte Immediate
6321 instruct storeImmB(memory mem, immI8 src) %{
6322 match(Set mem (StoreB mem src));
6324 ins_cost(150);
6325 format %{ "movb $mem, $src #@storeImmB" %}
6326 ins_encode(store_B_immI_enc(mem, src));
6327 ins_pipe( ialu_storeI );
6328 %}
6330 // Store Compressed Pointer
6331 instruct storeN(memory mem, mRegN src)
6332 %{
6333 match(Set mem (StoreN mem src));
6335 ins_cost(125); // XXX
6336 format %{ "sw $mem, $src\t# compressed ptr @ storeN" %}
6337 ins_encode(store_N_reg_enc(mem, src));
6338 ins_pipe( ialu_storeI );
6339 %}
6341 instruct storeNKlass(memory mem, mRegN src)
6342 %{
6343 match(Set mem (StoreNKlass mem src));
6345 ins_cost(125); // XXX
6346 format %{ "sw $mem, $src\t# compressed klass ptr @ storeNKlass" %}
6347 ins_encode(store_N_reg_enc(mem, src));
6348 ins_pipe( ialu_storeI );
6349 %}
6351 instruct storeImmN0(memory mem, immN0 zero)
6352 %{
6353 predicate(Universe::narrow_oop_base() == NULL && Universe::narrow_klass_base() == NULL);
6354 match(Set mem (StoreN mem zero));
6356 ins_cost(125); // XXX
6357 format %{ "storeN0 $mem, R12\t# compressed ptr" %}
6358 ins_encode(storeImmN0_enc(mem, zero));
6359 ins_pipe( ialu_storeI );
6360 %}
6362 // Store Byte
6363 instruct storeB(memory mem, mRegI src) %{
6364 match(Set mem (StoreB mem src));
6366 ins_cost(125);
6367 format %{ "sb $src, $mem #@storeB" %}
6368 ins_encode(store_B_reg_enc(mem, src));
6369 ins_pipe( ialu_storeI );
6370 %}
6372 instruct storeB_convL2I(memory mem, mRegL src) %{
6373 match(Set mem (StoreB mem (ConvL2I src)));
6375 ins_cost(125);
6376 format %{ "sb $src, $mem #@storeB_convL2I" %}
6377 ins_encode(store_B_reg_enc(mem, src));
6378 ins_pipe( ialu_storeI );
6379 %}
6381 // Load Byte (8bit signed)
6382 instruct loadB(mRegI dst, memory mem) %{
6383 match(Set dst (LoadB mem));
6385 ins_cost(125);
6386 format %{ "lb $dst, $mem #@loadB" %}
6387 ins_encode(load_B_enc(dst, mem));
6388 ins_pipe( ialu_loadI );
6389 %}
6391 instruct loadB_convI2L(mRegL dst, memory mem) %{
6392 match(Set dst (ConvI2L (LoadB mem)));
6394 ins_cost(125);
6395 format %{ "lb $dst, $mem #@loadB_convI2L" %}
6396 ins_encode(load_B_enc(dst, mem));
6397 ins_pipe( ialu_loadI );
6398 %}
6400 // Load Byte (8bit UNsigned)
6401 instruct loadUB(mRegI dst, memory mem) %{
6402 match(Set dst (LoadUB mem));
6404 ins_cost(125);
6405 format %{ "lbu $dst, $mem #@loadUB" %}
6406 ins_encode(load_UB_enc(dst, mem));
6407 ins_pipe( ialu_loadI );
6408 %}
6410 instruct loadUB_convI2L(mRegL dst, memory mem) %{
6411 match(Set dst (ConvI2L (LoadUB mem)));
6413 ins_cost(125);
6414 format %{ "lbu $dst, $mem #@loadUB_convI2L" %}
6415 ins_encode(load_UB_enc(dst, mem));
6416 ins_pipe( ialu_loadI );
6417 %}
6419 // Load Short (16bit signed)
6420 instruct loadS(mRegI dst, memory mem) %{
6421 match(Set dst (LoadS mem));
6423 ins_cost(125);
6424 format %{ "lh $dst, $mem #@loadS" %}
6425 ins_encode(load_S_enc(dst, mem));
6426 ins_pipe( ialu_loadI );
6427 %}
6429 // Load Short (16 bit signed) to Byte (8 bit signed)
6430 instruct loadS2B(mRegI dst, memory mem, immI_24 twentyfour) %{
6431 match(Set dst (RShiftI (LShiftI (LoadS mem) twentyfour) twentyfour));
6433 ins_cost(125);
6434 format %{ "lb $dst, $mem\t# short -> byte #@loadS2B" %}
6435 ins_encode(load_B_enc(dst, mem));
6436 ins_pipe(ialu_loadI);
6437 %}
6439 instruct loadS_convI2L(mRegL dst, memory mem) %{
6440 match(Set dst (ConvI2L (LoadS mem)));
6442 ins_cost(125);
6443 format %{ "lh $dst, $mem #@loadS_convI2L" %}
6444 ins_encode(load_S_enc(dst, mem));
6445 ins_pipe( ialu_loadI );
6446 %}
6448 // Store Integer Immediate
6449 instruct storeImmI(memory mem, immI src) %{
6450 match(Set mem (StoreI mem src));
6452 ins_cost(150);
6453 format %{ "mov $mem, $src #@storeImmI" %}
6454 ins_encode(store_I_immI_enc(mem, src));
6455 ins_pipe( ialu_storeI );
6456 %}
6458 // Store Integer
6459 instruct storeI(memory mem, mRegI src) %{
6460 match(Set mem (StoreI mem src));
6462 ins_cost(125);
6463 format %{ "sw $mem, $src #@storeI" %}
6464 ins_encode(store_I_reg_enc(mem, src));
6465 ins_pipe( ialu_storeI );
6466 %}
6468 instruct storeI_convL2I(memory mem, mRegL src) %{
6469 match(Set mem (StoreI mem (ConvL2I src)));
6471 ins_cost(125);
6472 format %{ "sw $mem, $src #@storeI_convL2I" %}
6473 ins_encode(store_I_reg_enc(mem, src));
6474 ins_pipe( ialu_storeI );
6475 %}
6477 // Load Float
6478 instruct loadF(regF dst, memory mem) %{
6479 match(Set dst (LoadF mem));
6481 ins_cost(150);
6482 format %{ "loadF $dst, $mem #@loadF" %}
6483 ins_encode(load_F_enc(dst, mem));
6484 ins_pipe( ialu_loadI );
6485 %}
6487 instruct loadConP_general(mRegP dst, immP src) %{
6488 match(Set dst src);
6490 ins_cost(120);
6491 format %{ "li $dst, $src #@loadConP_general" %}
6493 ins_encode %{
6494 Register dst = $dst$$Register;
6495 long* value = (long*)$src$$constant;
6497 if($src->constant_reloc() == relocInfo::metadata_type){
6498 int klass_index = __ oop_recorder()->find_index((Klass*)value);
6499 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
6501 __ relocate(rspec);
6502 __ patchable_set48(dst, (long)value);
6503 }else if($src->constant_reloc() == relocInfo::oop_type){
6504 int oop_index = __ oop_recorder()->find_index((jobject)value);
6505 RelocationHolder rspec = oop_Relocation::spec(oop_index);
6507 __ relocate(rspec);
6508 __ patchable_set48(dst, (long)value);
6509 } else if ($src->constant_reloc() == relocInfo::none) {
6510 __ set64(dst, (long)value);
6511 }
6512 %}
6514 ins_pipe( ialu_regI_regI );
6515 %}
6517 /*
6518 instruct loadConP_load(mRegP dst, immP_load src) %{
6519 match(Set dst src);
6521 ins_cost(100);
6522 format %{ "ld $dst, [$constanttablebase + $constantoffset] load from constant table: ptr=$src @ loadConP_load" %}
6524 ins_encode %{
6526 int con_offset = $constantoffset($src);
6528 if (Assembler::is_simm16(con_offset)) {
6529 __ ld($dst$$Register, $constanttablebase, con_offset);
6530 } else {
6531 __ set64(AT, con_offset);
6532 if (UseLoongsonISA) {
6533 __ gsldx($dst$$Register, $constanttablebase, AT, 0);
6534 } else {
6535 __ daddu(AT, $constanttablebase, AT);
6536 __ ld($dst$$Register, AT, 0);
6537 }
6538 }
6539 %}
6541 ins_pipe(ialu_loadI);
6542 %}
6543 */
6545 instruct loadConP_no_oop_cheap(mRegP dst, immP_no_oop_cheap src) %{
6546 match(Set dst src);
6548 ins_cost(80);
6549 format %{ "li $dst, $src @ loadConP_no_oop_cheap" %}
6551 ins_encode %{
6552 __ set64($dst$$Register, $src$$constant);
6553 %}
6555 ins_pipe(ialu_regI_regI);
6556 %}
6559 instruct loadConP_poll(mRegP dst, immP_poll src) %{
6560 match(Set dst src);
6562 ins_cost(50);
6563 format %{ "li $dst, $src #@loadConP_poll" %}
6565 ins_encode %{
6566 Register dst = $dst$$Register;
6567 intptr_t value = (intptr_t)$src$$constant;
6569 __ set64(dst, (jlong)value);
6570 %}
6572 ins_pipe( ialu_regI_regI );
6573 %}
6575 instruct loadConP0(mRegP dst, immP0 src)
6576 %{
6577 match(Set dst src);
6579 ins_cost(50);
6580 format %{ "mov $dst, R0\t# ptr" %}
6581 ins_encode %{
6582 Register dst_reg = $dst$$Register;
6583 __ daddu(dst_reg, R0, R0);
6584 %}
6585 ins_pipe( ialu_regI_regI );
6586 %}
6588 instruct loadConN0(mRegN dst, immN0 src) %{
6589 match(Set dst src);
6590 format %{ "move $dst, R0\t# compressed NULL ptr" %}
6591 ins_encode %{
6592 __ move($dst$$Register, R0);
6593 %}
6594 ins_pipe( ialu_regI_regI );
6595 %}
6597 instruct loadConN(mRegN dst, immN src) %{
6598 match(Set dst src);
6600 ins_cost(125);
6601 format %{ "li $dst, $src\t# compressed ptr @ loadConN" %}
6602 ins_encode %{
6603 Register dst = $dst$$Register;
6604 __ set_narrow_oop(dst, (jobject)$src$$constant);
6605 %}
6606 ins_pipe( ialu_regI_regI ); // XXX
6607 %}
6609 instruct loadConNKlass(mRegN dst, immNKlass src) %{
6610 match(Set dst src);
6612 ins_cost(125);
6613 format %{ "li $dst, $src\t# compressed klass ptr @ loadConNKlass" %}
6614 ins_encode %{
6615 Register dst = $dst$$Register;
6616 __ set_narrow_klass(dst, (Klass*)$src$$constant);
6617 %}
6618 ins_pipe( ialu_regI_regI ); // XXX
6619 %}
6621 //FIXME
6622 // Tail Call; Jump from runtime stub to Java code.
6623 // Also known as an 'interprocedural jump'.
6624 // Target of jump will eventually return to caller.
6625 // TailJump below removes the return address.
6626 instruct TailCalljmpInd(mRegP jump_target, mRegP method_oop) %{
6627 match(TailCall jump_target method_oop );
6628 ins_cost(300);
6629 format %{ "JMP $jump_target \t# @TailCalljmpInd" %}
6631 ins_encode %{
6632 Register target = $jump_target$$Register;
6633 Register oop = $method_oop$$Register;
6635 /* 2012/10/12 Jin: RA will be used in generate_forward_exception() */
6636 __ push(RA);
6638 __ move(S3, oop);
6639 __ jr(target);
6640 __ nop();
6641 %}
6643 ins_pipe( pipe_jump );
6644 %}
6646 // Create exception oop: created by stack-crawling runtime code.
6647 // Created exception is now available to this handler, and is setup
6648 // just prior to jumping to this handler. No code emitted.
6649 instruct CreateException( a0_RegP ex_oop )
6650 %{
6651 match(Set ex_oop (CreateEx));
6653 // use the following format syntax
6654 format %{ "# exception oop is in A0; no code emitted @CreateException" %}
6655 ins_encode %{
6656 /* Jin: X86 leaves this function empty */
6657 __ block_comment("CreateException is empty in X86/MIPS");
6658 %}
6659 ins_pipe( empty );
6660 // ins_pipe( pipe_jump );
6661 %}
6664 /* 2012/9/14 Jin: The mechanism of exception handling is clear now.
6666 - Common try/catch:
6667 2012/9/14 Jin: [stubGenerator_mips.cpp] generate_forward_exception()
6668 |- V0, V1 are created
6669 |- T9 <= SharedRuntime::exception_handler_for_return_address
6670 `- jr T9
6671 `- the caller's exception_handler
6672 `- jr OptoRuntime::exception_blob
6673 `- here
6674 - Rethrow(e.g. 'unwind'):
6675 * The callee:
6676 |- an exception is triggered during execution
6677 `- exits the callee method through RethrowException node
6678 |- The callee pushes exception_oop(T0) and exception_pc(RA)
6679 `- The callee jumps to OptoRuntime::rethrow_stub()
6680 * In OptoRuntime::rethrow_stub:
6681 |- The VM calls _rethrow_Java to determine the return address in the caller method
6682 `- exits the stub with tailjmpInd
6683 |- pops exception_oop(V0) and exception_pc(V1)
6684 `- jumps to the return address(usually an exception_handler)
6685 * The caller:
6686 `- continues processing the exception_blob with V0/V1
6687 */
6689 /*
6690 Disassembling OptoRuntime::rethrow_stub()
6692 ; locals
6693 0x2d3bf320: addiu sp, sp, 0xfffffff8
6694 0x2d3bf324: sw ra, 0x4(sp)
6695 0x2d3bf328: sw fp, 0x0(sp)
6696 0x2d3bf32c: addu fp, sp, zero
6697 0x2d3bf330: addiu sp, sp, 0xfffffff0
6698 0x2d3bf334: sw ra, 0x8(sp)
6699 0x2d3bf338: sw t0, 0x4(sp)
6700 0x2d3bf33c: sw sp, 0x0(sp)
6702 ; get_thread(S2)
6703 0x2d3bf340: addu s2, sp, zero
6704 0x2d3bf344: srl s2, s2, 12
6705 0x2d3bf348: sll s2, s2, 2
6706 0x2d3bf34c: lui at, 0x2c85
6707 0x2d3bf350: addu at, at, s2
6708 0x2d3bf354: lw s2, 0xffffcc80(at)
6710 0x2d3bf358: lw s0, 0x0(sp)
6711 0x2d3bf35c: sw s0, 0x118(s2) // last_sp -> threa
6712 0x2d3bf360: sw s2, 0xc(sp)
6714 ; OptoRuntime::rethrow_C(oopDesc* exception, JavaThread* thread, address ret_pc)
6715 0x2d3bf364: lw a0, 0x4(sp)
6716 0x2d3bf368: lw a1, 0xc(sp)
6717 0x2d3bf36c: lw a2, 0x8(sp)
6718 ;; Java_To_Runtime
6719 0x2d3bf370: lui t9, 0x2c34
6720 0x2d3bf374: addiu t9, t9, 0xffff8a48
6721 0x2d3bf378: jalr t9
6722 0x2d3bf37c: nop
6724 0x2d3bf380: addu s3, v0, zero ; S3: SharedRuntime::raw_exception_handler_for_return_address()
6726 0x2d3bf384: lw s0, 0xc(sp)
6727 0x2d3bf388: sw zero, 0x118(s0)
6728 0x2d3bf38c: sw zero, 0x11c(s0)
6729 0x2d3bf390: lw s1, 0x144(s0) ; ex_oop: S1
6730 0x2d3bf394: addu s2, s0, zero
6731 0x2d3bf398: sw zero, 0x144(s2)
6732 0x2d3bf39c: lw s0, 0x4(s2)
6733 0x2d3bf3a0: addiu s4, zero, 0x0
6734 0x2d3bf3a4: bne s0, s4, 0x2d3bf3d4
6735 0x2d3bf3a8: nop
6736 0x2d3bf3ac: addiu sp, sp, 0x10
6737 0x2d3bf3b0: addiu sp, sp, 0x8
6738 0x2d3bf3b4: lw ra, 0xfffffffc(sp)
6739 0x2d3bf3b8: lw fp, 0xfffffff8(sp)
6740 0x2d3bf3bc: lui at, 0x2b48
6741 0x2d3bf3c0: lw at, 0x100(at)
6743 ; tailjmpInd: Restores exception_oop & exception_pc
6744 0x2d3bf3c4: addu v1, ra, zero
6745 0x2d3bf3c8: addu v0, s1, zero
6746 0x2d3bf3cc: jr s3
6747 0x2d3bf3d0: nop
6748 ; Exception:
6749 0x2d3bf3d4: lui s1, 0x2cc8 ; generate_forward_exception()
6750 0x2d3bf3d8: addiu s1, s1, 0x40
6751 0x2d3bf3dc: addiu s2, zero, 0x0
6752 0x2d3bf3e0: addiu sp, sp, 0x10
6753 0x2d3bf3e4: addiu sp, sp, 0x8
6754 0x2d3bf3e8: lw ra, 0xfffffffc(sp)
6755 0x2d3bf3ec: lw fp, 0xfffffff8(sp)
6756 0x2d3bf3f0: lui at, 0x2b48
6757 0x2d3bf3f4: lw at, 0x100(at)
6758 ; TailCalljmpInd
6759 __ push(RA); ; to be used in generate_forward_exception()
6760 0x2d3bf3f8: addu t7, s2, zero
6761 0x2d3bf3fc: jr s1
6762 0x2d3bf400: nop
6763 */
6764 // Rethrow exception:
6765 // The exception oop will come in the first argument position.
6766 // Then JUMP (not call) to the rethrow stub code.
6767 instruct RethrowException()
6768 %{
6769 match(Rethrow);
6771 // use the following format syntax
6772 format %{ "JMP rethrow_stub #@RethrowException" %}
6773 ins_encode %{
6774 __ block_comment("@ RethrowException");
6776 cbuf.set_insts_mark();
6777 cbuf.relocate(cbuf.insts_mark(), runtime_call_Relocation::spec());
6779 // call OptoRuntime::rethrow_stub to get the exception handler in parent method
6780 __ patchable_set48(T9, (jlong)OptoRuntime::rethrow_stub());
6781 __ jr(T9);
6782 __ nop();
6783 %}
6784 ins_pipe( pipe_jump );
6785 %}
6787 instruct branchConP_zero(cmpOpU cmp, mRegP op1, immP0 zero, label labl) %{
6788 match(If cmp (CmpP op1 zero));
6789 effect(USE labl);
6791 ins_cost(180);
6792 format %{ "b$cmp $op1, R0, $labl #@branchConP_zero" %}
6794 ins_encode %{
6795 Register op1 = $op1$$Register;
6796 Register op2 = R0;
6797 Label &L = *($labl$$label);
6798 int flag = $cmp$$cmpcode;
6800 switch(flag)
6801 {
6802 case 0x01: //equal
6803 if (&L)
6804 __ beq(op1, op2, L);
6805 else
6806 __ beq(op1, op2, (int)0);
6807 break;
6808 case 0x02: //not_equal
6809 if (&L)
6810 __ bne(op1, op2, L);
6811 else
6812 __ bne(op1, op2, (int)0);
6813 break;
6814 /*
6815 case 0x03: //above
6816 __ sltu(AT, op2, op1);
6817 if(&L)
6818 __ bne(R0, AT, L);
6819 else
6820 __ bne(R0, AT, (int)0);
6821 break;
6822 case 0x04: //above_equal
6823 __ sltu(AT, op1, op2);
6824 if(&L)
6825 __ beq(AT, R0, L);
6826 else
6827 __ beq(AT, R0, (int)0);
6828 break;
6829 case 0x05: //below
6830 __ sltu(AT, op1, op2);
6831 if(&L)
6832 __ bne(R0, AT, L);
6833 else
6834 __ bne(R0, AT, (int)0);
6835 break;
6836 case 0x06: //below_equal
6837 __ sltu(AT, op2, op1);
6838 if(&L)
6839 __ beq(AT, R0, L);
6840 else
6841 __ beq(AT, R0, (int)0);
6842 break;
6843 */
6844 default:
6845 Unimplemented();
6846 }
6847 __ nop();
6848 %}
6850 ins_pc_relative(1);
6851 ins_pipe( pipe_alu_branch );
6852 %}
6855 instruct branchConP(cmpOpU cmp, mRegP op1, mRegP op2, label labl) %{
6856 match(If cmp (CmpP op1 op2));
6857 // predicate(can_branch_register(_kids[0]->_leaf, _kids[1]->_leaf));
6858 effect(USE labl);
6860 ins_cost(200);
6861 format %{ "b$cmp $op1, $op2, $labl #@branchConP" %}
6863 ins_encode %{
6864 Register op1 = $op1$$Register;
6865 Register op2 = $op2$$Register;
6866 Label &L = *($labl$$label);
6867 int flag = $cmp$$cmpcode;
6869 switch(flag)
6870 {
6871 case 0x01: //equal
6872 if (&L)
6873 __ beq(op1, op2, L);
6874 else
6875 __ beq(op1, op2, (int)0);
6876 break;
6877 case 0x02: //not_equal
6878 if (&L)
6879 __ bne(op1, op2, L);
6880 else
6881 __ bne(op1, op2, (int)0);
6882 break;
6883 case 0x03: //above
6884 __ sltu(AT, op2, op1);
6885 if(&L)
6886 __ bne(R0, AT, L);
6887 else
6888 __ bne(R0, AT, (int)0);
6889 break;
6890 case 0x04: //above_equal
6891 __ sltu(AT, op1, op2);
6892 if(&L)
6893 __ beq(AT, R0, L);
6894 else
6895 __ beq(AT, R0, (int)0);
6896 break;
6897 case 0x05: //below
6898 __ sltu(AT, op1, op2);
6899 if(&L)
6900 __ bne(R0, AT, L);
6901 else
6902 __ bne(R0, AT, (int)0);
6903 break;
6904 case 0x06: //below_equal
6905 __ sltu(AT, op2, op1);
6906 if(&L)
6907 __ beq(AT, R0, L);
6908 else
6909 __ beq(AT, R0, (int)0);
6910 break;
6911 default:
6912 Unimplemented();
6913 }
6914 __ nop();
6915 %}
6917 ins_pc_relative(1);
6918 ins_pipe( pipe_alu_branch );
6919 %}
6921 instruct cmpN_null_branch(cmpOp cmp, mRegN op1, immN0 null, label labl) %{
6922 match(If cmp (CmpN op1 null));
6923 effect(USE labl);
6925 ins_cost(180);
6926 format %{ "CMP $op1,0\t! compressed ptr\n\t"
6927 "BP$cmp $labl @ cmpN_null_branch" %}
6928 ins_encode %{
6929 Register op1 = $op1$$Register;
6930 Register op2 = R0;
6931 Label &L = *($labl$$label);
6932 int flag = $cmp$$cmpcode;
6934 switch(flag)
6935 {
6936 case 0x01: //equal
6937 if (&L)
6938 __ beq(op1, op2, L);
6939 else
6940 __ beq(op1, op2, (int)0);
6941 break;
6942 case 0x02: //not_equal
6943 if (&L)
6944 __ bne(op1, op2, L);
6945 else
6946 __ bne(op1, op2, (int)0);
6947 break;
6948 default:
6949 Unimplemented();
6950 }
6951 __ nop();
6952 %}
6953 //TODO: pipe_branchP or create pipe_branchN LEE
6954 ins_pc_relative(1);
6955 ins_pipe( pipe_alu_branch );
6956 %}
6958 instruct cmpN_reg_branch(cmpOp cmp, mRegN op1, mRegN op2, label labl) %{
6959 match(If cmp (CmpN op1 op2));
6960 effect(USE labl);
6962 ins_cost(180);
6963 format %{ "CMP $op1,$op2\t! compressed ptr\n\t"
6964 "BP$cmp $labl" %}
6965 ins_encode %{
6966 Register op1_reg = $op1$$Register;
6967 Register op2_reg = $op2$$Register;
6968 Label &L = *($labl$$label);
6969 int flag = $cmp$$cmpcode;
6971 switch(flag)
6972 {
6973 case 0x01: //equal
6974 if (&L)
6975 __ beq(op1_reg, op2_reg, L);
6976 else
6977 __ beq(op1_reg, op2_reg, (int)0);
6978 break;
6979 case 0x02: //not_equal
6980 if (&L)
6981 __ bne(op1_reg, op2_reg, L);
6982 else
6983 __ bne(op1_reg, op2_reg, (int)0);
6984 break;
6985 case 0x03: //above
6986 __ sltu(AT, op2_reg, op1_reg);
6987 if(&L)
6988 __ bne(R0, AT, L);
6989 else
6990 __ bne(R0, AT, (int)0);
6991 break;
6992 case 0x04: //above_equal
6993 __ sltu(AT, op1_reg, op2_reg);
6994 if(&L)
6995 __ beq(AT, R0, L);
6996 else
6997 __ beq(AT, R0, (int)0);
6998 break;
6999 case 0x05: //below
7000 __ sltu(AT, op1_reg, op2_reg);
7001 if(&L)
7002 __ bne(R0, AT, L);
7003 else
7004 __ bne(R0, AT, (int)0);
7005 break;
7006 case 0x06: //below_equal
7007 __ sltu(AT, op2_reg, op1_reg);
7008 if(&L)
7009 __ beq(AT, R0, L);
7010 else
7011 __ beq(AT, R0, (int)0);
7012 break;
7013 default:
7014 Unimplemented();
7015 }
7016 __ nop();
7017 %}
7018 ins_pc_relative(1);
7019 ins_pipe( pipe_alu_branch );
7020 %}
7022 instruct branchConIU_reg_reg(cmpOpU cmp, mRegI src1, mRegI src2, label labl) %{
7023 match( If cmp (CmpU src1 src2) );
7024 effect(USE labl);
7025 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_reg" %}
7027 ins_encode %{
7028 Register op1 = $src1$$Register;
7029 Register op2 = $src2$$Register;
7030 Label &L = *($labl$$label);
7031 int flag = $cmp$$cmpcode;
7033 switch(flag)
7034 {
7035 case 0x01: //equal
7036 if (&L)
7037 __ beq(op1, op2, L);
7038 else
7039 __ beq(op1, op2, (int)0);
7040 break;
7041 case 0x02: //not_equal
7042 if (&L)
7043 __ bne(op1, op2, L);
7044 else
7045 __ bne(op1, op2, (int)0);
7046 break;
7047 case 0x03: //above
7048 __ sltu(AT, op2, op1);
7049 if(&L)
7050 __ bne(AT, R0, L);
7051 else
7052 __ bne(AT, R0, (int)0);
7053 break;
7054 case 0x04: //above_equal
7055 __ sltu(AT, op1, op2);
7056 if(&L)
7057 __ beq(AT, R0, L);
7058 else
7059 __ beq(AT, R0, (int)0);
7060 break;
7061 case 0x05: //below
7062 __ sltu(AT, op1, op2);
7063 if(&L)
7064 __ bne(AT, R0, L);
7065 else
7066 __ bne(AT, R0, (int)0);
7067 break;
7068 case 0x06: //below_equal
7069 __ sltu(AT, op2, op1);
7070 if(&L)
7071 __ beq(AT, R0, L);
7072 else
7073 __ beq(AT, R0, (int)0);
7074 break;
7075 default:
7076 Unimplemented();
7077 }
7078 __ nop();
7079 %}
7081 ins_pc_relative(1);
7082 ins_pipe( pipe_alu_branch );
7083 %}
7086 instruct branchConIU_reg_imm(cmpOpU cmp, mRegI src1, immI src2, label labl) %{
7087 match( If cmp (CmpU src1 src2) );
7088 effect(USE labl);
7089 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_imm" %}
7091 ins_encode %{
7092 Register op1 = $src1$$Register;
7093 int val = $src2$$constant;
7094 Label &L = *($labl$$label);
7095 int flag = $cmp$$cmpcode;
7097 __ move(AT, val);
7098 switch(flag)
7099 {
7100 case 0x01: //equal
7101 if (&L)
7102 __ beq(op1, AT, L);
7103 else
7104 __ beq(op1, AT, (int)0);
7105 break;
7106 case 0x02: //not_equal
7107 if (&L)
7108 __ bne(op1, AT, L);
7109 else
7110 __ bne(op1, AT, (int)0);
7111 break;
7112 case 0x03: //above
7113 __ sltu(AT, AT, op1);
7114 if(&L)
7115 __ bne(R0, AT, L);
7116 else
7117 __ bne(R0, AT, (int)0);
7118 break;
7119 case 0x04: //above_equal
7120 __ sltu(AT, op1, AT);
7121 if(&L)
7122 __ beq(AT, R0, L);
7123 else
7124 __ beq(AT, R0, (int)0);
7125 break;
7126 case 0x05: //below
7127 __ sltu(AT, op1, AT);
7128 if(&L)
7129 __ bne(R0, AT, L);
7130 else
7131 __ bne(R0, AT, (int)0);
7132 break;
7133 case 0x06: //below_equal
7134 __ sltu(AT, AT, op1);
7135 if(&L)
7136 __ beq(AT, R0, L);
7137 else
7138 __ beq(AT, R0, (int)0);
7139 break;
7140 default:
7141 Unimplemented();
7142 }
7143 __ nop();
7144 %}
7146 ins_pc_relative(1);
7147 ins_pipe( pipe_alu_branch );
7148 %}
7150 instruct branchConI_reg_reg(cmpOp cmp, mRegI src1, mRegI src2, label labl) %{
7151 match( If cmp (CmpI src1 src2) );
7152 effect(USE labl);
7153 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_reg" %}
7155 ins_encode %{
7156 Register op1 = $src1$$Register;
7157 Register op2 = $src2$$Register;
7158 Label &L = *($labl$$label);
7159 int flag = $cmp$$cmpcode;
7161 switch(flag)
7162 {
7163 case 0x01: //equal
7164 if (&L)
7165 __ beq(op1, op2, L);
7166 else
7167 __ beq(op1, op2, (int)0);
7168 break;
7169 case 0x02: //not_equal
7170 if (&L)
7171 __ bne(op1, op2, L);
7172 else
7173 __ bne(op1, op2, (int)0);
7174 break;
7175 case 0x03: //above
7176 __ slt(AT, op2, op1);
7177 if(&L)
7178 __ bne(R0, AT, L);
7179 else
7180 __ bne(R0, AT, (int)0);
7181 break;
7182 case 0x04: //above_equal
7183 __ slt(AT, op1, op2);
7184 if(&L)
7185 __ beq(AT, R0, L);
7186 else
7187 __ beq(AT, R0, (int)0);
7188 break;
7189 case 0x05: //below
7190 __ slt(AT, op1, op2);
7191 if(&L)
7192 __ bne(R0, AT, L);
7193 else
7194 __ bne(R0, AT, (int)0);
7195 break;
7196 case 0x06: //below_equal
7197 __ slt(AT, op2, op1);
7198 if(&L)
7199 __ beq(AT, R0, L);
7200 else
7201 __ beq(AT, R0, (int)0);
7202 break;
7203 default:
7204 Unimplemented();
7205 }
7206 __ nop();
7207 %}
7209 ins_pc_relative(1);
7210 ins_pipe( pipe_alu_branch );
7211 %}
7213 instruct branchConI_reg_imm0(cmpOp cmp, mRegI src1, immI0 src2, label labl) %{
7214 match( If cmp (CmpI src1 src2) );
7215 effect(USE labl);
7216 ins_cost(170);
7217 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm0" %}
7219 ins_encode %{
7220 Register op1 = $src1$$Register;
7221 // int val = $src2$$constant;
7222 Label &L = *($labl$$label);
7223 int flag = $cmp$$cmpcode;
7225 //__ move(AT, val);
7226 switch(flag)
7227 {
7228 case 0x01: //equal
7229 if (&L)
7230 __ beq(op1, R0, L);
7231 else
7232 __ beq(op1, R0, (int)0);
7233 break;
7234 case 0x02: //not_equal
7235 if (&L)
7236 __ bne(op1, R0, L);
7237 else
7238 __ bne(op1, R0, (int)0);
7239 break;
7240 case 0x03: //greater
7241 if(&L)
7242 __ bgtz(op1, L);
7243 else
7244 __ bgtz(op1, (int)0);
7245 break;
7246 case 0x04: //greater_equal
7247 if(&L)
7248 __ bgez(op1, L);
7249 else
7250 __ bgez(op1, (int)0);
7251 break;
7252 case 0x05: //less
7253 if(&L)
7254 __ bltz(op1, L);
7255 else
7256 __ bltz(op1, (int)0);
7257 break;
7258 case 0x06: //less_equal
7259 if(&L)
7260 __ blez(op1, L);
7261 else
7262 __ blez(op1, (int)0);
7263 break;
7264 default:
7265 Unimplemented();
7266 }
7267 __ nop();
7268 %}
7270 ins_pc_relative(1);
7271 ins_pipe( pipe_alu_branch );
7272 %}
7275 instruct branchConI_reg_imm(cmpOp cmp, mRegI src1, immI src2, label labl) %{
7276 match( If cmp (CmpI src1 src2) );
7277 effect(USE labl);
7278 ins_cost(200);
7279 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm" %}
7281 ins_encode %{
7282 Register op1 = $src1$$Register;
7283 int val = $src2$$constant;
7284 Label &L = *($labl$$label);
7285 int flag = $cmp$$cmpcode;
7287 __ move(AT, val);
7288 switch(flag)
7289 {
7290 case 0x01: //equal
7291 if (&L)
7292 __ beq(op1, AT, L);
7293 else
7294 __ beq(op1, AT, (int)0);
7295 break;
7296 case 0x02: //not_equal
7297 if (&L)
7298 __ bne(op1, AT, L);
7299 else
7300 __ bne(op1, AT, (int)0);
7301 break;
7302 case 0x03: //greater
7303 __ slt(AT, AT, op1);
7304 if(&L)
7305 __ bne(R0, AT, L);
7306 else
7307 __ bne(R0, AT, (int)0);
7308 break;
7309 case 0x04: //greater_equal
7310 __ slt(AT, op1, AT);
7311 if(&L)
7312 __ beq(AT, R0, L);
7313 else
7314 __ beq(AT, R0, (int)0);
7315 break;
7316 case 0x05: //less
7317 __ slt(AT, op1, AT);
7318 if(&L)
7319 __ bne(R0, AT, L);
7320 else
7321 __ bne(R0, AT, (int)0);
7322 break;
7323 case 0x06: //less_equal
7324 __ slt(AT, AT, op1);
7325 if(&L)
7326 __ beq(AT, R0, L);
7327 else
7328 __ beq(AT, R0, (int)0);
7329 break;
7330 default:
7331 Unimplemented();
7332 }
7333 __ nop();
7334 %}
7336 ins_pc_relative(1);
7337 ins_pipe( pipe_alu_branch );
7338 %}
7340 instruct branchConIU_reg_imm0(cmpOpU cmp, mRegI src1, immI0 zero, label labl) %{
7341 match( If cmp (CmpU src1 zero) );
7342 effect(USE labl);
7343 format %{ "BR$cmp $src1, zero, $labl #@branchConIU_reg_imm0" %}
7345 ins_encode %{
7346 Register op1 = $src1$$Register;
7347 Label &L = *($labl$$label);
7348 int flag = $cmp$$cmpcode;
7350 switch(flag)
7351 {
7352 case 0x01: //equal
7353 if (&L)
7354 __ beq(op1, R0, L);
7355 else
7356 __ beq(op1, R0, (int)0);
7357 break;
7358 case 0x02: //not_equal
7359 if (&L)
7360 __ bne(op1, R0, L);
7361 else
7362 __ bne(op1, R0, (int)0);
7363 break;
7364 case 0x03: //above
7365 if(&L)
7366 __ bne(R0, op1, L);
7367 else
7368 __ bne(R0, op1, (int)0);
7369 break;
7370 case 0x04: //above_equal
7371 if(&L)
7372 __ beq(R0, R0, L);
7373 else
7374 __ beq(R0, R0, (int)0);
7375 break;
7376 case 0x05: //below
7377 return;
7378 break;
7379 case 0x06: //below_equal
7380 if(&L)
7381 __ beq(op1, R0, L);
7382 else
7383 __ beq(op1, R0, (int)0);
7384 break;
7385 default:
7386 Unimplemented();
7387 }
7388 __ nop();
7389 %}
7391 ins_pc_relative(1);
7392 ins_pipe( pipe_alu_branch );
7393 %}
7396 instruct branchConIU_reg_immI16(cmpOpU cmp, mRegI src1, immI16 src2, label labl) %{
7397 match( If cmp (CmpU src1 src2) );
7398 effect(USE labl);
7399 ins_cost(180);
7400 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_immI16" %}
7402 ins_encode %{
7403 Register op1 = $src1$$Register;
7404 int val = $src2$$constant;
7405 Label &L = *($labl$$label);
7406 int flag = $cmp$$cmpcode;
7408 switch(flag)
7409 {
7410 case 0x01: //equal
7411 __ move(AT, val);
7412 if (&L)
7413 __ beq(op1, AT, L);
7414 else
7415 __ beq(op1, AT, (int)0);
7416 break;
7417 case 0x02: //not_equal
7418 __ move(AT, val);
7419 if (&L)
7420 __ bne(op1, AT, L);
7421 else
7422 __ bne(op1, AT, (int)0);
7423 break;
7424 case 0x03: //above
7425 __ move(AT, val);
7426 __ sltu(AT, AT, op1);
7427 if(&L)
7428 __ bne(R0, AT, L);
7429 else
7430 __ bne(R0, AT, (int)0);
7431 break;
7432 case 0x04: //above_equal
7433 __ sltiu(AT, op1, val);
7434 if(&L)
7435 __ beq(AT, R0, L);
7436 else
7437 __ beq(AT, R0, (int)0);
7438 break;
7439 case 0x05: //below
7440 __ sltiu(AT, op1, val);
7441 if(&L)
7442 __ bne(R0, AT, L);
7443 else
7444 __ bne(R0, AT, (int)0);
7445 break;
7446 case 0x06: //below_equal
7447 __ move(AT, val);
7448 __ sltu(AT, AT, op1);
7449 if(&L)
7450 __ beq(AT, R0, L);
7451 else
7452 __ beq(AT, R0, (int)0);
7453 break;
7454 default:
7455 Unimplemented();
7456 }
7457 __ nop();
7458 %}
7460 ins_pc_relative(1);
7461 ins_pipe( pipe_alu_branch );
7462 %}
7465 instruct branchConL_regL_regL(cmpOp cmp, mRegL src1, mRegL src2, label labl) %{
7466 match( If cmp (CmpL src1 src2) );
7467 effect(USE labl);
7468 format %{ "BR$cmp $src1, $src2, $labl #@branchConL_regL_regL" %}
7469 ins_cost(250);
7471 ins_encode %{
7472 Register opr1_reg = as_Register($src1$$reg);
7473 Register opr2_reg = as_Register($src2$$reg);
7475 Label &target = *($labl$$label);
7476 int flag = $cmp$$cmpcode;
7478 switch(flag)
7479 {
7480 case 0x01: //equal
7481 if (&target)
7482 __ beq(opr1_reg, opr2_reg, target);
7483 else
7484 __ beq(opr1_reg, opr2_reg, (int)0);
7485 __ delayed()->nop();
7486 break;
7488 case 0x02: //not_equal
7489 if(&target)
7490 __ bne(opr1_reg, opr2_reg, target);
7491 else
7492 __ bne(opr1_reg, opr2_reg, (int)0);
7493 __ delayed()->nop();
7494 break;
7496 case 0x03: //greater
7497 __ slt(AT, opr2_reg, opr1_reg);
7498 if(&target)
7499 __ bne(AT, R0, target);
7500 else
7501 __ bne(AT, R0, (int)0);
7502 __ delayed()->nop();
7503 break;
7505 case 0x04: //greater_equal
7506 __ slt(AT, opr1_reg, opr2_reg);
7507 if(&target)
7508 __ beq(AT, R0, target);
7509 else
7510 __ beq(AT, R0, (int)0);
7511 __ delayed()->nop();
7513 break;
7515 case 0x05: //less
7516 __ slt(AT, opr1_reg, opr2_reg);
7517 if(&target)
7518 __ bne(AT, R0, target);
7519 else
7520 __ bne(AT, R0, (int)0);
7521 __ delayed()->nop();
7523 break;
7525 case 0x06: //less_equal
7526 __ slt(AT, opr2_reg, opr1_reg);
7528 if(&target)
7529 __ beq(AT, R0, target);
7530 else
7531 __ beq(AT, R0, (int)0);
7532 __ delayed()->nop();
7534 break;
7536 default:
7537 Unimplemented();
7538 }
7539 %}
7542 ins_pc_relative(1);
7543 ins_pipe( pipe_alu_branch );
7544 %}
7546 instruct branchConL_reg_immL16_sub(cmpOp cmp, mRegL src1, immL16_sub src2, label labl) %{
7547 match( If cmp (CmpL src1 src2) );
7548 effect(USE labl);
7549 ins_cost(180);
7550 format %{ "BR$cmp $src1, $src2, $labl #@branchConL_reg_immL16_sub" %}
7552 ins_encode %{
7553 Register op1 = $src1$$Register;
7554 int val = $src2$$constant;
7555 Label &L = *($labl$$label);
7556 int flag = $cmp$$cmpcode;
7558 __ daddiu(AT, op1, -1 * val);
7559 switch(flag)
7560 {
7561 case 0x01: //equal
7562 if (&L)
7563 __ beq(R0, AT, L);
7564 else
7565 __ beq(R0, AT, (int)0);
7566 break;
7567 case 0x02: //not_equal
7568 if (&L)
7569 __ bne(R0, AT, L);
7570 else
7571 __ bne(R0, AT, (int)0);
7572 break;
7573 case 0x03: //greater
7574 if(&L)
7575 __ bgtz(AT, L);
7576 else
7577 __ bgtz(AT, (int)0);
7578 break;
7579 case 0x04: //greater_equal
7580 if(&L)
7581 __ bgez(AT, L);
7582 else
7583 __ bgez(AT, (int)0);
7584 break;
7585 case 0x05: //less
7586 if(&L)
7587 __ bltz(AT, L);
7588 else
7589 __ bltz(AT, (int)0);
7590 break;
7591 case 0x06: //less_equal
7592 if(&L)
7593 __ blez(AT, L);
7594 else
7595 __ blez(AT, (int)0);
7596 break;
7597 default:
7598 Unimplemented();
7599 }
7600 __ nop();
7601 %}
7603 ins_pc_relative(1);
7604 ins_pipe( pipe_alu_branch );
7605 %}
7608 instruct branchConI_reg_imm16_sub(cmpOp cmp, mRegI src1, immI16_sub src2, label labl) %{
7609 match( If cmp (CmpI src1 src2) );
7610 effect(USE labl);
7611 ins_cost(180);
7612 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm16_sub" %}
7614 ins_encode %{
7615 Register op1 = $src1$$Register;
7616 int val = $src2$$constant;
7617 Label &L = *($labl$$label);
7618 int flag = $cmp$$cmpcode;
7620 __ addiu32(AT, op1, -1 * val);
7621 switch(flag)
7622 {
7623 case 0x01: //equal
7624 if (&L)
7625 __ beq(R0, AT, L);
7626 else
7627 __ beq(R0, AT, (int)0);
7628 break;
7629 case 0x02: //not_equal
7630 if (&L)
7631 __ bne(R0, AT, L);
7632 else
7633 __ bne(R0, AT, (int)0);
7634 break;
7635 case 0x03: //greater
7636 if(&L)
7637 __ bgtz(AT, L);
7638 else
7639 __ bgtz(AT, (int)0);
7640 break;
7641 case 0x04: //greater_equal
7642 if(&L)
7643 __ bgez(AT, L);
7644 else
7645 __ bgez(AT, (int)0);
7646 break;
7647 case 0x05: //less
7648 if(&L)
7649 __ bltz(AT, L);
7650 else
7651 __ bltz(AT, (int)0);
7652 break;
7653 case 0x06: //less_equal
7654 if(&L)
7655 __ blez(AT, L);
7656 else
7657 __ blez(AT, (int)0);
7658 break;
7659 default:
7660 Unimplemented();
7661 }
7662 __ nop();
7663 %}
7665 ins_pc_relative(1);
7666 ins_pipe( pipe_alu_branch );
7667 %}
7669 instruct branchConL_regL_immL0(cmpOp cmp, mRegL src1, immL0 zero, label labl) %{
7670 match( If cmp (CmpL src1 zero) );
7671 effect(USE labl);
7672 format %{ "BR$cmp $src1, zero, $labl #@branchConL_regL_immL0" %}
7673 ins_cost(150);
7675 ins_encode %{
7676 Register opr1_reg = as_Register($src1$$reg);
7677 Label &target = *($labl$$label);
7678 int flag = $cmp$$cmpcode;
7680 switch(flag)
7681 {
7682 case 0x01: //equal
7683 if (&target)
7684 __ beq(opr1_reg, R0, target);
7685 else
7686 __ beq(opr1_reg, R0, int(0));
7687 break;
7689 case 0x02: //not_equal
7690 if(&target)
7691 __ bne(opr1_reg, R0, target);
7692 else
7693 __ bne(opr1_reg, R0, (int)0);
7694 break;
7696 case 0x03: //greater
7697 if(&target)
7698 __ bgtz(opr1_reg, target);
7699 else
7700 __ bgtz(opr1_reg, (int)0);
7701 break;
7703 case 0x04: //greater_equal
7704 if(&target)
7705 __ bgez(opr1_reg, target);
7706 else
7707 __ bgez(opr1_reg, (int)0);
7708 break;
7710 case 0x05: //less
7711 __ slt(AT, opr1_reg, R0);
7712 if(&target)
7713 __ bne(AT, R0, target);
7714 else
7715 __ bne(AT, R0, (int)0);
7716 break;
7718 case 0x06: //less_equal
7719 if (&target)
7720 __ blez(opr1_reg, target);
7721 else
7722 __ blez(opr1_reg, int(0));
7723 break;
7725 default:
7726 Unimplemented();
7727 }
7728 __ delayed()->nop();
7729 %}
7732 ins_pc_relative(1);
7733 ins_pipe( pipe_alu_branch );
7734 %}
7737 //FIXME
7738 instruct branchConF_reg_reg(cmpOp cmp, regF src1, regF src2, label labl) %{
7739 match( If cmp (CmpF src1 src2) );
7740 effect(USE labl);
7741 format %{ "BR$cmp $src1, $src2, $labl #@branchConF_reg_reg" %}
7743 ins_encode %{
7744 FloatRegister reg_op1 = $src1$$FloatRegister;
7745 FloatRegister reg_op2 = $src2$$FloatRegister;
7746 Label &L = *($labl$$label);
7747 int flag = $cmp$$cmpcode;
7749 switch(flag)
7750 {
7751 case 0x01: //equal
7752 __ c_eq_s(reg_op1, reg_op2);
7753 if (&L)
7754 __ bc1t(L);
7755 else
7756 __ bc1t((int)0);
7757 break;
7758 case 0x02: //not_equal
7759 __ c_eq_s(reg_op1, reg_op2);
7760 if (&L)
7761 __ bc1f(L);
7762 else
7763 __ bc1f((int)0);
7764 break;
7765 case 0x03: //greater
7766 __ c_ule_s(reg_op1, reg_op2);
7767 if(&L)
7768 __ bc1f(L);
7769 else
7770 __ bc1f((int)0);
7771 break;
7772 case 0x04: //greater_equal
7773 __ c_ult_s(reg_op1, reg_op2);
7774 if(&L)
7775 __ bc1f(L);
7776 else
7777 __ bc1f((int)0);
7778 break;
7779 case 0x05: //less
7780 __ c_ult_s(reg_op1, reg_op2);
7781 if(&L)
7782 __ bc1t(L);
7783 else
7784 __ bc1t((int)0);
7785 break;
7786 case 0x06: //less_equal
7787 __ c_ule_s(reg_op1, reg_op2);
7788 if(&L)
7789 __ bc1t(L);
7790 else
7791 __ bc1t((int)0);
7792 break;
7793 default:
7794 Unimplemented();
7795 }
7796 __ nop();
7797 %}
7799 ins_pc_relative(1);
7800 ins_pipe(pipe_slow);
7801 %}
7803 instruct branchConD_reg_reg(cmpOp cmp, regD src1, regD src2, label labl) %{
7804 match( If cmp (CmpD src1 src2) );
7805 effect(USE labl);
7806 format %{ "BR$cmp $src1, $src2, $labl #@branchConD_reg_reg" %}
7808 ins_encode %{
7809 FloatRegister reg_op1 = $src1$$FloatRegister;
7810 FloatRegister reg_op2 = $src2$$FloatRegister;
7811 Label &L = *($labl$$label);
7812 int flag = $cmp$$cmpcode;
7814 switch(flag)
7815 {
7816 case 0x01: //equal
7817 __ c_eq_d(reg_op1, reg_op2);
7818 if (&L)
7819 __ bc1t(L);
7820 else
7821 __ bc1t((int)0);
7822 break;
7823 case 0x02: //not_equal
7824 //2016/4/19 aoqi: c_ueq_d cannot distinguish NaN from equal. Double.isNaN(Double) is implemented by 'f != f', so the use of c_ueq_d causes bugs.
7825 __ c_eq_d(reg_op1, reg_op2);
7826 if (&L)
7827 __ bc1f(L);
7828 else
7829 __ bc1f((int)0);
7830 break;
7831 case 0x03: //greater
7832 __ c_ule_d(reg_op1, reg_op2);
7833 if(&L)
7834 __ bc1f(L);
7835 else
7836 __ bc1f((int)0);
7837 break;
7838 case 0x04: //greater_equal
7839 __ c_ult_d(reg_op1, reg_op2);
7840 if(&L)
7841 __ bc1f(L);
7842 else
7843 __ bc1f((int)0);
7844 break;
7845 case 0x05: //less
7846 __ c_ult_d(reg_op1, reg_op2);
7847 if(&L)
7848 __ bc1t(L);
7849 else
7850 __ bc1t((int)0);
7851 break;
7852 case 0x06: //less_equal
7853 __ c_ule_d(reg_op1, reg_op2);
7854 if(&L)
7855 __ bc1t(L);
7856 else
7857 __ bc1t((int)0);
7858 break;
7859 default:
7860 Unimplemented();
7861 }
7862 __ nop();
7863 %}
7865 ins_pc_relative(1);
7866 ins_pipe(pipe_slow);
7867 %}
7870 // Call Runtime Instruction
7871 instruct CallRuntimeDirect(method meth) %{
7872 match(CallRuntime );
7873 effect(USE meth);
7875 ins_cost(300);
7876 format %{ "CALL,runtime #@CallRuntimeDirect" %}
7877 ins_encode( Java_To_Runtime( meth ) );
7878 ins_pipe( pipe_slow );
7879 ins_alignment(16);
7880 %}
7884 //------------------------MemBar Instructions-------------------------------
7885 //Memory barrier flavors
7887 instruct membar_acquire() %{
7888 match(MemBarAcquire);
7889 ins_cost(0);
7891 size(0);
7892 format %{ "MEMBAR-acquire (empty) @ membar_acquire" %}
7893 ins_encode();
7894 ins_pipe(empty);
7895 %}
7897 instruct load_fence() %{
7898 match(LoadFence);
7899 ins_cost(400);
7901 format %{ "MEMBAR @ load_fence" %}
7902 ins_encode %{
7903 __ sync();
7904 %}
7905 ins_pipe(pipe_slow);
7906 %}
7908 instruct membar_acquire_lock()
7909 %{
7910 match(MemBarAcquireLock);
7911 ins_cost(0);
7913 size(0);
7914 format %{ "MEMBAR-acquire (acquire as part of CAS in prior FastLock so empty encoding) @ membar_acquire_lock" %}
7915 ins_encode();
7916 ins_pipe(empty);
7917 %}
7919 instruct membar_release() %{
7920 match(MemBarRelease);
7921 ins_cost(0);
7923 size(0);
7924 format %{ "MEMBAR-release (empty) @ membar_release" %}
7925 ins_encode();
7926 ins_pipe(empty);
7927 %}
7929 instruct store_fence() %{
7930 match(StoreFence);
7931 ins_cost(400);
7933 format %{ "MEMBAR @ store_fence" %}
7935 ins_encode %{
7936 __ sync();
7937 %}
7939 ins_pipe(pipe_slow);
7940 %}
7942 instruct membar_release_lock()
7943 %{
7944 match(MemBarReleaseLock);
7945 ins_cost(0);
7947 size(0);
7948 format %{ "MEMBAR-release-lock (release in FastUnlock so empty) @ membar_release_lock" %}
7949 ins_encode();
7950 ins_pipe(empty);
7951 %}
7954 instruct membar_volatile() %{
7955 match(MemBarVolatile);
7956 ins_cost(400);
7958 format %{ "MEMBAR-volatile" %}
7959 ins_encode %{
7960 if( !os::is_MP() ) return; // Not needed on single CPU
7961 __ sync();
7963 %}
7964 ins_pipe(pipe_slow);
7965 %}
7967 instruct unnecessary_membar_volatile() %{
7968 match(MemBarVolatile);
7969 predicate(Matcher::post_store_load_barrier(n));
7970 ins_cost(0);
7972 size(0);
7973 format %{ "MEMBAR-volatile (unnecessary so empty encoding) @ unnecessary_membar_volatile" %}
7974 ins_encode( );
7975 ins_pipe(empty);
7976 %}
7978 instruct membar_storestore() %{
7979 match(MemBarStoreStore);
7981 ins_cost(0);
7982 size(0);
7983 format %{ "MEMBAR-storestore (empty encoding) @ membar_storestore" %}
7984 ins_encode( );
7985 ins_pipe(empty);
7986 %}
7988 //----------Move Instructions--------------------------------------------------
7989 instruct castX2P(mRegP dst, mRegL src) %{
7990 match(Set dst (CastX2P src));
7991 format %{ "castX2P $dst, $src @ castX2P" %}
7992 ins_encode %{
7993 Register src = $src$$Register;
7994 Register dst = $dst$$Register;
7996 if(src != dst)
7997 __ move(dst, src);
7998 %}
7999 ins_cost(10);
8000 ins_pipe( ialu_regI_mov );
8001 %}
8003 instruct castP2X(mRegL dst, mRegP src ) %{
8004 match(Set dst (CastP2X src));
8006 format %{ "mov $dst, $src\t #@castP2X" %}
8007 ins_encode %{
8008 Register src = $src$$Register;
8009 Register dst = $dst$$Register;
8011 if(src != dst)
8012 __ move(dst, src);
8013 %}
8014 ins_pipe( ialu_regI_mov );
8015 %}
8017 instruct MoveF2I_reg_reg(mRegI dst, regF src) %{
8018 match(Set dst (MoveF2I src));
8019 effect(DEF dst, USE src);
8020 ins_cost(85);
8021 format %{ "MoveF2I $dst, $src @ MoveF2I_reg_reg" %}
8022 ins_encode %{
8023 Register dst = as_Register($dst$$reg);
8024 FloatRegister src = as_FloatRegister($src$$reg);
8026 __ mfc1(dst, src);
8027 %}
8028 ins_pipe( pipe_slow );
8029 %}
8031 instruct MoveI2F_reg_reg(regF dst, mRegI src) %{
8032 match(Set dst (MoveI2F src));
8033 effect(DEF dst, USE src);
8034 ins_cost(85);
8035 format %{ "MoveI2F $dst, $src @ MoveI2F_reg_reg" %}
8036 ins_encode %{
8037 Register src = as_Register($src$$reg);
8038 FloatRegister dst = as_FloatRegister($dst$$reg);
8040 __ mtc1(src, dst);
8041 %}
8042 ins_pipe( pipe_slow );
8043 %}
8045 instruct MoveD2L_reg_reg(mRegL dst, regD src) %{
8046 match(Set dst (MoveD2L src));
8047 effect(DEF dst, USE src);
8048 ins_cost(85);
8049 format %{ "MoveD2L $dst, $src @ MoveD2L_reg_reg" %}
8050 ins_encode %{
8051 Register dst = as_Register($dst$$reg);
8052 FloatRegister src = as_FloatRegister($src$$reg);
8054 __ dmfc1(dst, src);
8055 %}
8056 ins_pipe( pipe_slow );
8057 %}
8059 instruct MoveL2D_reg_reg(regD dst, mRegL src) %{
8060 match(Set dst (MoveL2D src));
8061 effect(DEF dst, USE src);
8062 ins_cost(85);
8063 format %{ "MoveL2D $dst, $src @ MoveL2D_reg_reg" %}
8064 ins_encode %{
8065 FloatRegister dst = as_FloatRegister($dst$$reg);
8066 Register src = as_Register($src$$reg);
8068 __ dmtc1(src, dst);
8069 %}
8070 ins_pipe( pipe_slow );
8071 %}
8073 //----------Conditional Move---------------------------------------------------
8074 // Conditional move
8075 instruct cmovI_cmpI_reg_reg(mRegI dst, mRegI src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8076 match(Set dst (CMoveI (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8077 ins_cost(80);
8078 format %{
8079 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpI_reg_reg\n"
8080 "\tCMOV $dst,$src \t @cmovI_cmpI_reg_reg"
8081 %}
8083 ins_encode %{
8084 Register op1 = $tmp1$$Register;
8085 Register op2 = $tmp2$$Register;
8086 Register dst = $dst$$Register;
8087 Register src = $src$$Register;
8088 int flag = $cop$$cmpcode;
8090 switch(flag)
8091 {
8092 case 0x01: //equal
8093 __ subu32(AT, op1, op2);
8094 __ movz(dst, src, AT);
8095 break;
8097 case 0x02: //not_equal
8098 __ subu32(AT, op1, op2);
8099 __ movn(dst, src, AT);
8100 break;
8102 case 0x03: //great
8103 __ slt(AT, op2, op1);
8104 __ movn(dst, src, AT);
8105 break;
8107 case 0x04: //great_equal
8108 __ slt(AT, op1, op2);
8109 __ movz(dst, src, AT);
8110 break;
8112 case 0x05: //less
8113 __ slt(AT, op1, op2);
8114 __ movn(dst, src, AT);
8115 break;
8117 case 0x06: //less_equal
8118 __ slt(AT, op2, op1);
8119 __ movz(dst, src, AT);
8120 break;
8122 default:
8123 Unimplemented();
8124 }
8125 %}
8127 ins_pipe( pipe_slow );
8128 %}
8130 instruct cmovI_cmpP_reg_reg(mRegI dst, mRegI src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
8131 match(Set dst (CMoveI (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
8132 ins_cost(80);
8133 format %{
8134 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpP_reg_reg\n\t"
8135 "CMOV $dst,$src\t @cmovI_cmpP_reg_reg"
8136 %}
8137 ins_encode %{
8138 Register op1 = $tmp1$$Register;
8139 Register op2 = $tmp2$$Register;
8140 Register dst = $dst$$Register;
8141 Register src = $src$$Register;
8142 int flag = $cop$$cmpcode;
8144 switch(flag)
8145 {
8146 case 0x01: //equal
8147 __ subu(AT, op1, op2);
8148 __ movz(dst, src, AT);
8149 break;
8151 case 0x02: //not_equal
8152 __ subu(AT, op1, op2);
8153 __ movn(dst, src, AT);
8154 break;
8156 case 0x03: //above
8157 __ sltu(AT, op2, op1);
8158 __ movn(dst, src, AT);
8159 break;
8161 case 0x04: //above_equal
8162 __ sltu(AT, op1, op2);
8163 __ movz(dst, src, AT);
8164 break;
8166 case 0x05: //below
8167 __ sltu(AT, op1, op2);
8168 __ movn(dst, src, AT);
8169 break;
8171 case 0x06: //below_equal
8172 __ sltu(AT, op2, op1);
8173 __ movz(dst, src, AT);
8174 break;
8176 default:
8177 Unimplemented();
8178 }
8179 %}
8181 ins_pipe( pipe_slow );
8182 %}
8184 instruct cmovI_cmpN_reg_reg(mRegI dst, mRegI src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8185 match(Set dst (CMoveI (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8186 ins_cost(80);
8187 format %{
8188 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpN_reg_reg\n\t"
8189 "CMOV $dst,$src\t @cmovI_cmpN_reg_reg"
8190 %}
8191 ins_encode %{
8192 Register op1 = $tmp1$$Register;
8193 Register op2 = $tmp2$$Register;
8194 Register dst = $dst$$Register;
8195 Register src = $src$$Register;
8196 int flag = $cop$$cmpcode;
8198 switch(flag)
8199 {
8200 case 0x01: //equal
8201 __ subu32(AT, op1, op2);
8202 __ movz(dst, src, AT);
8203 break;
8205 case 0x02: //not_equal
8206 __ subu32(AT, op1, op2);
8207 __ movn(dst, src, AT);
8208 break;
8210 case 0x03: //above
8211 __ sltu(AT, op2, op1);
8212 __ movn(dst, src, AT);
8213 break;
8215 case 0x04: //above_equal
8216 __ sltu(AT, op1, op2);
8217 __ movz(dst, src, AT);
8218 break;
8220 case 0x05: //below
8221 __ sltu(AT, op1, op2);
8222 __ movn(dst, src, AT);
8223 break;
8225 case 0x06: //below_equal
8226 __ sltu(AT, op2, op1);
8227 __ movz(dst, src, AT);
8228 break;
8230 default:
8231 Unimplemented();
8232 }
8233 %}
8235 ins_pipe( pipe_slow );
8236 %}
8238 instruct cmovP_cmpN_reg_reg(mRegP dst, mRegP src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8239 match(Set dst (CMoveP (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8240 ins_cost(80);
8241 format %{
8242 "CMPU$cop $tmp1,$tmp2\t @cmovP_cmpN_reg_reg\n\t"
8243 "CMOV $dst,$src\t @cmovP_cmpN_reg_reg"
8244 %}
8245 ins_encode %{
8246 Register op1 = $tmp1$$Register;
8247 Register op2 = $tmp2$$Register;
8248 Register dst = $dst$$Register;
8249 Register src = $src$$Register;
8250 int flag = $cop$$cmpcode;
8252 switch(flag)
8253 {
8254 case 0x01: //equal
8255 __ subu32(AT, op1, op2);
8256 __ movz(dst, src, AT);
8257 break;
8259 case 0x02: //not_equal
8260 __ subu32(AT, op1, op2);
8261 __ movn(dst, src, AT);
8262 break;
8264 case 0x03: //above
8265 __ sltu(AT, op2, op1);
8266 __ movn(dst, src, AT);
8267 break;
8269 case 0x04: //above_equal
8270 __ sltu(AT, op1, op2);
8271 __ movz(dst, src, AT);
8272 break;
8274 case 0x05: //below
8275 __ sltu(AT, op1, op2);
8276 __ movn(dst, src, AT);
8277 break;
8279 case 0x06: //below_equal
8280 __ sltu(AT, op2, op1);
8281 __ movz(dst, src, AT);
8282 break;
8284 default:
8285 Unimplemented();
8286 }
8287 %}
8289 ins_pipe( pipe_slow );
8290 %}
8292 instruct cmovN_cmpP_reg_reg(mRegN dst, mRegN src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
8293 match(Set dst (CMoveN (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
8294 ins_cost(80);
8295 format %{
8296 "CMPU$cop $tmp1,$tmp2\t @cmovN_cmpP_reg_reg\n\t"
8297 "CMOV $dst,$src\t @cmovN_cmpP_reg_reg"
8298 %}
8299 ins_encode %{
8300 Register op1 = $tmp1$$Register;
8301 Register op2 = $tmp2$$Register;
8302 Register dst = $dst$$Register;
8303 Register src = $src$$Register;
8304 int flag = $cop$$cmpcode;
8306 switch(flag)
8307 {
8308 case 0x01: //equal
8309 __ subu(AT, op1, op2);
8310 __ movz(dst, src, AT);
8311 break;
8313 case 0x02: //not_equal
8314 __ subu(AT, op1, op2);
8315 __ movn(dst, src, AT);
8316 break;
8318 case 0x03: //above
8319 __ sltu(AT, op2, op1);
8320 __ movn(dst, src, AT);
8321 break;
8323 case 0x04: //above_equal
8324 __ sltu(AT, op1, op2);
8325 __ movz(dst, src, AT);
8326 break;
8328 case 0x05: //below
8329 __ sltu(AT, op1, op2);
8330 __ movn(dst, src, AT);
8331 break;
8333 case 0x06: //below_equal
8334 __ sltu(AT, op2, op1);
8335 __ movz(dst, src, AT);
8336 break;
8338 default:
8339 Unimplemented();
8340 }
8341 %}
8343 ins_pipe( pipe_slow );
8344 %}
8346 instruct cmovP_cmpD_reg_reg(mRegP dst, mRegP src, regD tmp1, regD tmp2, cmpOp cop ) %{
8347 match(Set dst (CMoveP (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8348 ins_cost(80);
8349 format %{
8350 "CMP$cop $tmp1, $tmp2\t @cmovP_cmpD_reg_reg\n"
8351 "\tCMOV $dst,$src \t @cmovP_cmpD_reg_reg"
8352 %}
8353 ins_encode %{
8354 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8355 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8356 Register dst = as_Register($dst$$reg);
8357 Register src = as_Register($src$$reg);
8359 int flag = $cop$$cmpcode;
8361 switch(flag)
8362 {
8363 case 0x01: //equal
8364 __ c_eq_d(reg_op1, reg_op2);
8365 __ movt(dst, src);
8366 break;
8367 case 0x02: //not_equal
8368 __ c_eq_d(reg_op1, reg_op2);
8369 __ movf(dst, src);
8370 break;
8371 case 0x03: //greater
8372 __ c_ole_d(reg_op1, reg_op2);
8373 __ movf(dst, src);
8374 break;
8375 case 0x04: //greater_equal
8376 __ c_olt_d(reg_op1, reg_op2);
8377 __ movf(dst, src);
8378 break;
8379 case 0x05: //less
8380 __ c_ult_d(reg_op1, reg_op2);
8381 __ movt(dst, src);
8382 break;
8383 case 0x06: //less_equal
8384 __ c_ule_d(reg_op1, reg_op2);
8385 __ movt(dst, src);
8386 break;
8387 default:
8388 Unimplemented();
8389 }
8390 %}
8392 ins_pipe( pipe_slow );
8393 %}
8396 instruct cmovN_cmpN_reg_reg(mRegN dst, mRegN src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8397 match(Set dst (CMoveN (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8398 ins_cost(80);
8399 format %{
8400 "CMPU$cop $tmp1,$tmp2\t @cmovN_cmpN_reg_reg\n\t"
8401 "CMOV $dst,$src\t @cmovN_cmpN_reg_reg"
8402 %}
8403 ins_encode %{
8404 Register op1 = $tmp1$$Register;
8405 Register op2 = $tmp2$$Register;
8406 Register dst = $dst$$Register;
8407 Register src = $src$$Register;
8408 int flag = $cop$$cmpcode;
8410 switch(flag)
8411 {
8412 case 0x01: //equal
8413 __ subu32(AT, op1, op2);
8414 __ movz(dst, src, AT);
8415 break;
8417 case 0x02: //not_equal
8418 __ subu32(AT, op1, op2);
8419 __ movn(dst, src, AT);
8420 break;
8422 case 0x03: //above
8423 __ sltu(AT, op2, op1);
8424 __ movn(dst, src, AT);
8425 break;
8427 case 0x04: //above_equal
8428 __ sltu(AT, op1, op2);
8429 __ movz(dst, src, AT);
8430 break;
8432 case 0x05: //below
8433 __ sltu(AT, op1, op2);
8434 __ movn(dst, src, AT);
8435 break;
8437 case 0x06: //below_equal
8438 __ sltu(AT, op2, op1);
8439 __ movz(dst, src, AT);
8440 break;
8442 default:
8443 Unimplemented();
8444 }
8445 %}
8447 ins_pipe( pipe_slow );
8448 %}
8451 instruct cmovI_cmpU_reg_reg(mRegI dst, mRegI src, mRegI tmp1, mRegI tmp2, cmpOpU cop ) %{
8452 match(Set dst (CMoveI (Binary cop (CmpU tmp1 tmp2)) (Binary dst src)));
8453 ins_cost(80);
8454 format %{
8455 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpU_reg_reg\n\t"
8456 "CMOV $dst,$src\t @cmovI_cmpU_reg_reg"
8457 %}
8458 ins_encode %{
8459 Register op1 = $tmp1$$Register;
8460 Register op2 = $tmp2$$Register;
8461 Register dst = $dst$$Register;
8462 Register src = $src$$Register;
8463 int flag = $cop$$cmpcode;
8465 switch(flag)
8466 {
8467 case 0x01: //equal
8468 __ subu(AT, op1, op2);
8469 __ movz(dst, src, AT);
8470 break;
8472 case 0x02: //not_equal
8473 __ subu(AT, op1, op2);
8474 __ movn(dst, src, AT);
8475 break;
8477 case 0x03: //above
8478 __ sltu(AT, op2, op1);
8479 __ movn(dst, src, AT);
8480 break;
8482 case 0x04: //above_equal
8483 __ sltu(AT, op1, op2);
8484 __ movz(dst, src, AT);
8485 break;
8487 case 0x05: //below
8488 __ sltu(AT, op1, op2);
8489 __ movn(dst, src, AT);
8490 break;
8492 case 0x06: //below_equal
8493 __ sltu(AT, op2, op1);
8494 __ movz(dst, src, AT);
8495 break;
8497 default:
8498 Unimplemented();
8499 }
8500 %}
8502 ins_pipe( pipe_slow );
8503 %}
8505 instruct cmovI_cmpL_reg_reg(mRegI dst, mRegI src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8506 match(Set dst (CMoveI (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8507 ins_cost(80);
8508 format %{
8509 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpL_reg_reg\n"
8510 "\tCMOV $dst,$src \t @cmovI_cmpL_reg_reg"
8511 %}
8512 ins_encode %{
8513 Register opr1 = as_Register($tmp1$$reg);
8514 Register opr2 = as_Register($tmp2$$reg);
8515 Register dst = $dst$$Register;
8516 Register src = $src$$Register;
8517 int flag = $cop$$cmpcode;
8519 switch(flag)
8520 {
8521 case 0x01: //equal
8522 __ subu(AT, opr1, opr2);
8523 __ movz(dst, src, AT);
8524 break;
8526 case 0x02: //not_equal
8527 __ subu(AT, opr1, opr2);
8528 __ movn(dst, src, AT);
8529 break;
8531 case 0x03: //greater
8532 __ slt(AT, opr2, opr1);
8533 __ movn(dst, src, AT);
8534 break;
8536 case 0x04: //greater_equal
8537 __ slt(AT, opr1, opr2);
8538 __ movz(dst, src, AT);
8539 break;
8541 case 0x05: //less
8542 __ slt(AT, opr1, opr2);
8543 __ movn(dst, src, AT);
8544 break;
8546 case 0x06: //less_equal
8547 __ slt(AT, opr2, opr1);
8548 __ movz(dst, src, AT);
8549 break;
8551 default:
8552 Unimplemented();
8553 }
8554 %}
8556 ins_pipe( pipe_slow );
8557 %}
8559 instruct cmovP_cmpL_reg_reg(mRegP dst, mRegP src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8560 match(Set dst (CMoveP (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8561 ins_cost(80);
8562 format %{
8563 "CMP$cop $tmp1, $tmp2\t @cmovP_cmpL_reg_reg\n"
8564 "\tCMOV $dst,$src \t @cmovP_cmpL_reg_reg"
8565 %}
8566 ins_encode %{
8567 Register opr1 = as_Register($tmp1$$reg);
8568 Register opr2 = as_Register($tmp2$$reg);
8569 Register dst = $dst$$Register;
8570 Register src = $src$$Register;
8571 int flag = $cop$$cmpcode;
8573 switch(flag)
8574 {
8575 case 0x01: //equal
8576 __ subu(AT, opr1, opr2);
8577 __ movz(dst, src, AT);
8578 break;
8580 case 0x02: //not_equal
8581 __ subu(AT, opr1, opr2);
8582 __ movn(dst, src, AT);
8583 break;
8585 case 0x03: //greater
8586 __ slt(AT, opr2, opr1);
8587 __ movn(dst, src, AT);
8588 break;
8590 case 0x04: //greater_equal
8591 __ slt(AT, opr1, opr2);
8592 __ movz(dst, src, AT);
8593 break;
8595 case 0x05: //less
8596 __ slt(AT, opr1, opr2);
8597 __ movn(dst, src, AT);
8598 break;
8600 case 0x06: //less_equal
8601 __ slt(AT, opr2, opr1);
8602 __ movz(dst, src, AT);
8603 break;
8605 default:
8606 Unimplemented();
8607 }
8608 %}
8610 ins_pipe( pipe_slow );
8611 %}
8613 instruct cmovI_cmpD_reg_reg(mRegI dst, mRegI src, regD tmp1, regD tmp2, cmpOp cop ) %{
8614 match(Set dst (CMoveI (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8615 ins_cost(80);
8616 format %{
8617 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpD_reg_reg\n"
8618 "\tCMOV $dst,$src \t @cmovI_cmpD_reg_reg"
8619 %}
8620 ins_encode %{
8621 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8622 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8623 Register dst = as_Register($dst$$reg);
8624 Register src = as_Register($src$$reg);
8626 int flag = $cop$$cmpcode;
8628 switch(flag)
8629 {
8630 case 0x01: //equal
8631 __ c_eq_d(reg_op1, reg_op2);
8632 __ movt(dst, src);
8633 break;
8634 case 0x02: //not_equal
8635 //2016/4/19 aoqi: See instruct branchConD_reg_reg. The change in branchConD_reg_reg fixed a bug. It seems similar here, so I made thesame change.
8636 __ c_eq_d(reg_op1, reg_op2);
8637 __ movf(dst, src);
8638 break;
8639 case 0x03: //greater
8640 __ c_ole_d(reg_op1, reg_op2);
8641 __ movf(dst, src);
8642 break;
8643 case 0x04: //greater_equal
8644 __ c_olt_d(reg_op1, reg_op2);
8645 __ movf(dst, src);
8646 break;
8647 case 0x05: //less
8648 __ c_ult_d(reg_op1, reg_op2);
8649 __ movt(dst, src);
8650 break;
8651 case 0x06: //less_equal
8652 __ c_ule_d(reg_op1, reg_op2);
8653 __ movt(dst, src);
8654 break;
8655 default:
8656 Unimplemented();
8657 }
8658 %}
8660 ins_pipe( pipe_slow );
8661 %}
8664 instruct cmovP_cmpP_reg_reg(mRegP dst, mRegP src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
8665 match(Set dst (CMoveP (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
8666 ins_cost(80);
8667 format %{
8668 "CMPU$cop $tmp1,$tmp2\t @cmovP_cmpP_reg_reg\n\t"
8669 "CMOV $dst,$src\t @cmovP_cmpP_reg_reg"
8670 %}
8671 ins_encode %{
8672 Register op1 = $tmp1$$Register;
8673 Register op2 = $tmp2$$Register;
8674 Register dst = $dst$$Register;
8675 Register src = $src$$Register;
8676 int flag = $cop$$cmpcode;
8678 switch(flag)
8679 {
8680 case 0x01: //equal
8681 __ subu(AT, op1, op2);
8682 __ movz(dst, src, AT);
8683 break;
8685 case 0x02: //not_equal
8686 __ subu(AT, op1, op2);
8687 __ movn(dst, src, AT);
8688 break;
8690 case 0x03: //above
8691 __ sltu(AT, op2, op1);
8692 __ movn(dst, src, AT);
8693 break;
8695 case 0x04: //above_equal
8696 __ sltu(AT, op1, op2);
8697 __ movz(dst, src, AT);
8698 break;
8700 case 0x05: //below
8701 __ sltu(AT, op1, op2);
8702 __ movn(dst, src, AT);
8703 break;
8705 case 0x06: //below_equal
8706 __ sltu(AT, op2, op1);
8707 __ movz(dst, src, AT);
8708 break;
8710 default:
8711 Unimplemented();
8712 }
8713 %}
8715 ins_pipe( pipe_slow );
8716 %}
8718 instruct cmovP_cmpI_reg_reg(mRegP dst, mRegP src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8719 match(Set dst (CMoveP (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8720 ins_cost(80);
8721 format %{
8722 "CMP$cop $tmp1,$tmp2\t @cmovP_cmpI_reg_reg\n\t"
8723 "CMOV $dst,$src\t @cmovP_cmpI_reg_reg"
8724 %}
8725 ins_encode %{
8726 Register op1 = $tmp1$$Register;
8727 Register op2 = $tmp2$$Register;
8728 Register dst = $dst$$Register;
8729 Register src = $src$$Register;
8730 int flag = $cop$$cmpcode;
8732 switch(flag)
8733 {
8734 case 0x01: //equal
8735 __ subu32(AT, op1, op2);
8736 __ movz(dst, src, AT);
8737 break;
8739 case 0x02: //not_equal
8740 __ subu32(AT, op1, op2);
8741 __ movn(dst, src, AT);
8742 break;
8744 case 0x03: //above
8745 __ slt(AT, op2, op1);
8746 __ movn(dst, src, AT);
8747 break;
8749 case 0x04: //above_equal
8750 __ slt(AT, op1, op2);
8751 __ movz(dst, src, AT);
8752 break;
8754 case 0x05: //below
8755 __ slt(AT, op1, op2);
8756 __ movn(dst, src, AT);
8757 break;
8759 case 0x06: //below_equal
8760 __ slt(AT, op2, op1);
8761 __ movz(dst, src, AT);
8762 break;
8764 default:
8765 Unimplemented();
8766 }
8767 %}
8769 ins_pipe( pipe_slow );
8770 %}
8772 instruct cmovN_cmpI_reg_reg(mRegN dst, mRegN src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8773 match(Set dst (CMoveN (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8774 ins_cost(80);
8775 format %{
8776 "CMP$cop $tmp1,$tmp2\t @cmovN_cmpI_reg_reg\n\t"
8777 "CMOV $dst,$src\t @cmovN_cmpI_reg_reg"
8778 %}
8779 ins_encode %{
8780 Register op1 = $tmp1$$Register;
8781 Register op2 = $tmp2$$Register;
8782 Register dst = $dst$$Register;
8783 Register src = $src$$Register;
8784 int flag = $cop$$cmpcode;
8786 switch(flag)
8787 {
8788 case 0x01: //equal
8789 __ subu32(AT, op1, op2);
8790 __ movz(dst, src, AT);
8791 break;
8793 case 0x02: //not_equal
8794 __ subu32(AT, op1, op2);
8795 __ movn(dst, src, AT);
8796 break;
8798 case 0x03: //above
8799 __ slt(AT, op2, op1);
8800 __ movn(dst, src, AT);
8801 break;
8803 case 0x04: //above_equal
8804 __ slt(AT, op1, op2);
8805 __ movz(dst, src, AT);
8806 break;
8808 case 0x05: //below
8809 __ slt(AT, op1, op2);
8810 __ movn(dst, src, AT);
8811 break;
8813 case 0x06: //below_equal
8814 __ slt(AT, op2, op1);
8815 __ movz(dst, src, AT);
8816 break;
8818 default:
8819 Unimplemented();
8820 }
8821 %}
8823 ins_pipe( pipe_slow );
8824 %}
8827 instruct cmovL_cmpI_reg_reg(mRegL dst, mRegL src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8828 match(Set dst (CMoveL (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8829 ins_cost(80);
8830 format %{
8831 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpI_reg_reg\n"
8832 "\tCMOV $dst,$src \t @cmovL_cmpI_reg_reg"
8833 %}
8835 ins_encode %{
8836 Register op1 = $tmp1$$Register;
8837 Register op2 = $tmp2$$Register;
8838 Register dst = as_Register($dst$$reg);
8839 Register src = as_Register($src$$reg);
8840 int flag = $cop$$cmpcode;
8842 switch(flag)
8843 {
8844 case 0x01: //equal
8845 __ subu32(AT, op1, op2);
8846 __ movz(dst, src, AT);
8847 break;
8849 case 0x02: //not_equal
8850 __ subu32(AT, op1, op2);
8851 __ movn(dst, src, AT);
8852 break;
8854 case 0x03: //great
8855 __ slt(AT, op2, op1);
8856 __ movn(dst, src, AT);
8857 break;
8859 case 0x04: //great_equal
8860 __ slt(AT, op1, op2);
8861 __ movz(dst, src, AT);
8862 break;
8864 case 0x05: //less
8865 __ slt(AT, op1, op2);
8866 __ movn(dst, src, AT);
8867 break;
8869 case 0x06: //less_equal
8870 __ slt(AT, op2, op1);
8871 __ movz(dst, src, AT);
8872 break;
8874 default:
8875 Unimplemented();
8876 }
8877 %}
8879 ins_pipe( pipe_slow );
8880 %}
8882 instruct cmovL_cmpL_reg_reg(mRegL dst, mRegL src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8883 match(Set dst (CMoveL (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8884 ins_cost(80);
8885 format %{
8886 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpL_reg_reg\n"
8887 "\tCMOV $dst,$src \t @cmovL_cmpL_reg_reg"
8888 %}
8889 ins_encode %{
8890 Register opr1 = as_Register($tmp1$$reg);
8891 Register opr2 = as_Register($tmp2$$reg);
8892 Register dst = as_Register($dst$$reg);
8893 Register src = as_Register($src$$reg);
8894 int flag = $cop$$cmpcode;
8896 switch(flag)
8897 {
8898 case 0x01: //equal
8899 __ subu(AT, opr1, opr2);
8900 __ movz(dst, src, AT);
8901 break;
8903 case 0x02: //not_equal
8904 __ subu(AT, opr1, opr2);
8905 __ movn(dst, src, AT);
8906 break;
8908 case 0x03: //greater
8909 __ slt(AT, opr2, opr1);
8910 __ movn(dst, src, AT);
8911 break;
8913 case 0x04: //greater_equal
8914 __ slt(AT, opr1, opr2);
8915 __ movz(dst, src, AT);
8916 break;
8918 case 0x05: //less
8919 __ slt(AT, opr1, opr2);
8920 __ movn(dst, src, AT);
8921 break;
8923 case 0x06: //less_equal
8924 __ slt(AT, opr2, opr1);
8925 __ movz(dst, src, AT);
8926 break;
8928 default:
8929 Unimplemented();
8930 }
8931 %}
8933 ins_pipe( pipe_slow );
8934 %}
8936 instruct cmovL_cmpN_reg_reg(mRegL dst, mRegL src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8937 match(Set dst (CMoveL (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8938 ins_cost(80);
8939 format %{
8940 "CMPU$cop $tmp1,$tmp2\t @cmovL_cmpN_reg_reg\n\t"
8941 "CMOV $dst,$src\t @cmovL_cmpN_reg_reg"
8942 %}
8943 ins_encode %{
8944 Register op1 = $tmp1$$Register;
8945 Register op2 = $tmp2$$Register;
8946 Register dst = $dst$$Register;
8947 Register src = $src$$Register;
8948 int flag = $cop$$cmpcode;
8950 switch(flag)
8951 {
8952 case 0x01: //equal
8953 __ subu32(AT, op1, op2);
8954 __ movz(dst, src, AT);
8955 break;
8957 case 0x02: //not_equal
8958 __ subu32(AT, op1, op2);
8959 __ movn(dst, src, AT);
8960 break;
8962 case 0x03: //above
8963 __ sltu(AT, op2, op1);
8964 __ movn(dst, src, AT);
8965 break;
8967 case 0x04: //above_equal
8968 __ sltu(AT, op1, op2);
8969 __ movz(dst, src, AT);
8970 break;
8972 case 0x05: //below
8973 __ sltu(AT, op1, op2);
8974 __ movn(dst, src, AT);
8975 break;
8977 case 0x06: //below_equal
8978 __ sltu(AT, op2, op1);
8979 __ movz(dst, src, AT);
8980 break;
8982 default:
8983 Unimplemented();
8984 }
8985 %}
8987 ins_pipe( pipe_slow );
8988 %}
8991 instruct cmovL_cmpD_reg_reg(mRegL dst, mRegL src, regD tmp1, regD tmp2, cmpOp cop ) %{
8992 match(Set dst (CMoveL (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8993 ins_cost(80);
8994 format %{
8995 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpD_reg_reg\n"
8996 "\tCMOV $dst,$src \t @cmovL_cmpD_reg_reg"
8997 %}
8998 ins_encode %{
8999 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
9000 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
9001 Register dst = as_Register($dst$$reg);
9002 Register src = as_Register($src$$reg);
9004 int flag = $cop$$cmpcode;
9006 switch(flag)
9007 {
9008 case 0x01: //equal
9009 __ c_eq_d(reg_op1, reg_op2);
9010 __ movt(dst, src);
9011 break;
9012 case 0x02: //not_equal
9013 __ c_eq_d(reg_op1, reg_op2);
9014 __ movf(dst, src);
9015 break;
9016 case 0x03: //greater
9017 __ c_ole_d(reg_op1, reg_op2);
9018 __ movf(dst, src);
9019 break;
9020 case 0x04: //greater_equal
9021 __ c_olt_d(reg_op1, reg_op2);
9022 __ movf(dst, src);
9023 break;
9024 case 0x05: //less
9025 __ c_ult_d(reg_op1, reg_op2);
9026 __ movt(dst, src);
9027 break;
9028 case 0x06: //less_equal
9029 __ c_ule_d(reg_op1, reg_op2);
9030 __ movt(dst, src);
9031 break;
9032 default:
9033 Unimplemented();
9034 }
9035 %}
9037 ins_pipe( pipe_slow );
9038 %}
9040 instruct cmovD_cmpD_reg_reg(regD dst, regD src, regD tmp1, regD tmp2, cmpOp cop ) %{
9041 match(Set dst (CMoveD (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
9042 ins_cost(200);
9043 format %{
9044 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpD_reg_reg\n"
9045 "\tCMOV $dst,$src \t @cmovD_cmpD_reg_reg"
9046 %}
9047 ins_encode %{
9048 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
9049 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
9050 FloatRegister dst = as_FloatRegister($dst$$reg);
9051 FloatRegister src = as_FloatRegister($src$$reg);
9053 int flag = $cop$$cmpcode;
9055 Label L;
9057 switch(flag)
9058 {
9059 case 0x01: //equal
9060 __ c_eq_d(reg_op1, reg_op2);
9061 __ bc1f(L);
9062 __ nop();
9063 __ mov_d(dst, src);
9064 __ bind(L);
9065 break;
9066 case 0x02: //not_equal
9067 //2016/4/19 aoqi: See instruct branchConD_reg_reg. The change in branchConD_reg_reg fixed a bug. It seems similar here, so I made thesame change.
9068 __ c_eq_d(reg_op1, reg_op2);
9069 __ bc1t(L);
9070 __ nop();
9071 __ mov_d(dst, src);
9072 __ bind(L);
9073 break;
9074 case 0x03: //greater
9075 __ c_ole_d(reg_op1, reg_op2);
9076 __ bc1t(L);
9077 __ nop();
9078 __ mov_d(dst, src);
9079 __ bind(L);
9080 break;
9081 case 0x04: //greater_equal
9082 __ c_olt_d(reg_op1, reg_op2);
9083 __ bc1t(L);
9084 __ nop();
9085 __ mov_d(dst, src);
9086 __ bind(L);
9087 break;
9088 case 0x05: //less
9089 __ c_ult_d(reg_op1, reg_op2);
9090 __ bc1f(L);
9091 __ nop();
9092 __ mov_d(dst, src);
9093 __ bind(L);
9094 break;
9095 case 0x06: //less_equal
9096 __ c_ule_d(reg_op1, reg_op2);
9097 __ bc1f(L);
9098 __ nop();
9099 __ mov_d(dst, src);
9100 __ bind(L);
9101 break;
9102 default:
9103 Unimplemented();
9104 }
9105 %}
9107 ins_pipe( pipe_slow );
9108 %}
9110 instruct cmovF_cmpI_reg_reg(regF dst, regF src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
9111 match(Set dst (CMoveF (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
9112 ins_cost(200);
9113 format %{
9114 "CMP$cop $tmp1, $tmp2\t @cmovF_cmpI_reg_reg\n"
9115 "\tCMOV $dst, $src \t @cmovF_cmpI_reg_reg"
9116 %}
9118 ins_encode %{
9119 Register op1 = $tmp1$$Register;
9120 Register op2 = $tmp2$$Register;
9121 FloatRegister dst = as_FloatRegister($dst$$reg);
9122 FloatRegister src = as_FloatRegister($src$$reg);
9123 int flag = $cop$$cmpcode;
9124 Label L;
9126 switch(flag)
9127 {
9128 case 0x01: //equal
9129 __ bne(op1, op2, L);
9130 __ nop();
9131 __ mov_s(dst, src);
9132 __ bind(L);
9133 break;
9134 case 0x02: //not_equal
9135 __ beq(op1, op2, L);
9136 __ nop();
9137 __ mov_s(dst, src);
9138 __ bind(L);
9139 break;
9140 case 0x03: //great
9141 __ slt(AT, op2, op1);
9142 __ beq(AT, R0, L);
9143 __ nop();
9144 __ mov_s(dst, src);
9145 __ bind(L);
9146 break;
9147 case 0x04: //great_equal
9148 __ slt(AT, op1, op2);
9149 __ bne(AT, R0, L);
9150 __ nop();
9151 __ mov_s(dst, src);
9152 __ bind(L);
9153 break;
9154 case 0x05: //less
9155 __ slt(AT, op1, op2);
9156 __ beq(AT, R0, L);
9157 __ nop();
9158 __ mov_s(dst, src);
9159 __ bind(L);
9160 break;
9161 case 0x06: //less_equal
9162 __ slt(AT, op2, op1);
9163 __ bne(AT, R0, L);
9164 __ nop();
9165 __ mov_s(dst, src);
9166 __ bind(L);
9167 break;
9168 default:
9169 Unimplemented();
9170 }
9171 %}
9173 ins_pipe( pipe_slow );
9174 %}
9176 instruct cmovD_cmpI_reg_reg(regD dst, regD src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
9177 match(Set dst (CMoveD (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
9178 ins_cost(200);
9179 format %{
9180 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpI_reg_reg\n"
9181 "\tCMOV $dst, $src \t @cmovD_cmpI_reg_reg"
9182 %}
9184 ins_encode %{
9185 Register op1 = $tmp1$$Register;
9186 Register op2 = $tmp2$$Register;
9187 FloatRegister dst = as_FloatRegister($dst$$reg);
9188 FloatRegister src = as_FloatRegister($src$$reg);
9189 int flag = $cop$$cmpcode;
9190 Label L;
9192 switch(flag)
9193 {
9194 case 0x01: //equal
9195 __ bne(op1, op2, L);
9196 __ nop();
9197 __ mov_d(dst, src);
9198 __ bind(L);
9199 break;
9200 case 0x02: //not_equal
9201 __ beq(op1, op2, L);
9202 __ nop();
9203 __ mov_d(dst, src);
9204 __ bind(L);
9205 break;
9206 case 0x03: //great
9207 __ slt(AT, op2, op1);
9208 __ beq(AT, R0, L);
9209 __ nop();
9210 __ mov_d(dst, src);
9211 __ bind(L);
9212 break;
9213 case 0x04: //great_equal
9214 __ slt(AT, op1, op2);
9215 __ bne(AT, R0, L);
9216 __ nop();
9217 __ mov_d(dst, src);
9218 __ bind(L);
9219 break;
9220 case 0x05: //less
9221 __ slt(AT, op1, op2);
9222 __ beq(AT, R0, L);
9223 __ nop();
9224 __ mov_d(dst, src);
9225 __ bind(L);
9226 break;
9227 case 0x06: //less_equal
9228 __ slt(AT, op2, op1);
9229 __ bne(AT, R0, L);
9230 __ nop();
9231 __ mov_d(dst, src);
9232 __ bind(L);
9233 break;
9234 default:
9235 Unimplemented();
9236 }
9237 %}
9239 ins_pipe( pipe_slow );
9240 %}
9242 instruct cmovD_cmpP_reg_reg(regD dst, regD src, mRegP tmp1, mRegP tmp2, cmpOp cop ) %{
9243 match(Set dst (CMoveD (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
9244 ins_cost(200);
9245 format %{
9246 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpP_reg_reg\n"
9247 "\tCMOV $dst, $src \t @cmovD_cmpP_reg_reg"
9248 %}
9250 ins_encode %{
9251 Register op1 = $tmp1$$Register;
9252 Register op2 = $tmp2$$Register;
9253 FloatRegister dst = as_FloatRegister($dst$$reg);
9254 FloatRegister src = as_FloatRegister($src$$reg);
9255 int flag = $cop$$cmpcode;
9256 Label L;
9258 switch(flag)
9259 {
9260 case 0x01: //equal
9261 __ bne(op1, op2, L);
9262 __ nop();
9263 __ mov_d(dst, src);
9264 __ bind(L);
9265 break;
9266 case 0x02: //not_equal
9267 __ beq(op1, op2, L);
9268 __ nop();
9269 __ mov_d(dst, src);
9270 __ bind(L);
9271 break;
9272 case 0x03: //great
9273 __ slt(AT, op2, op1);
9274 __ beq(AT, R0, L);
9275 __ nop();
9276 __ mov_d(dst, src);
9277 __ bind(L);
9278 break;
9279 case 0x04: //great_equal
9280 __ slt(AT, op1, op2);
9281 __ bne(AT, R0, L);
9282 __ nop();
9283 __ mov_d(dst, src);
9284 __ bind(L);
9285 break;
9286 case 0x05: //less
9287 __ slt(AT, op1, op2);
9288 __ beq(AT, R0, L);
9289 __ nop();
9290 __ mov_d(dst, src);
9291 __ bind(L);
9292 break;
9293 case 0x06: //less_equal
9294 __ slt(AT, op2, op1);
9295 __ bne(AT, R0, L);
9296 __ nop();
9297 __ mov_d(dst, src);
9298 __ bind(L);
9299 break;
9300 default:
9301 Unimplemented();
9302 }
9303 %}
9305 ins_pipe( pipe_slow );
9306 %}
9308 //FIXME
9309 instruct cmovI_cmpF_reg_reg(mRegI dst, mRegI src, regF tmp1, regF tmp2, cmpOp cop ) %{
9310 match(Set dst (CMoveI (Binary cop (CmpF tmp1 tmp2)) (Binary dst src)));
9311 ins_cost(80);
9312 format %{
9313 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpF_reg_reg\n"
9314 "\tCMOV $dst,$src \t @cmovI_cmpF_reg_reg"
9315 %}
9317 ins_encode %{
9318 FloatRegister reg_op1 = $tmp1$$FloatRegister;
9319 FloatRegister reg_op2 = $tmp2$$FloatRegister;
9320 Register dst = $dst$$Register;
9321 Register src = $src$$Register;
9322 int flag = $cop$$cmpcode;
9324 switch(flag)
9325 {
9326 case 0x01: //equal
9327 __ c_eq_s(reg_op1, reg_op2);
9328 __ movt(dst, src);
9329 break;
9330 case 0x02: //not_equal
9331 __ c_eq_s(reg_op1, reg_op2);
9332 __ movf(dst, src);
9333 break;
9334 case 0x03: //greater
9335 __ c_ole_s(reg_op1, reg_op2);
9336 __ movf(dst, src);
9337 break;
9338 case 0x04: //greater_equal
9339 __ c_olt_s(reg_op1, reg_op2);
9340 __ movf(dst, src);
9341 break;
9342 case 0x05: //less
9343 __ c_ult_s(reg_op1, reg_op2);
9344 __ movt(dst, src);
9345 break;
9346 case 0x06: //less_equal
9347 __ c_ule_s(reg_op1, reg_op2);
9348 __ movt(dst, src);
9349 break;
9350 default:
9351 Unimplemented();
9352 }
9353 %}
9354 ins_pipe( pipe_slow );
9355 %}
9357 instruct cmovF_cmpF_reg_reg(regF dst, regF src, regF tmp1, regF tmp2, cmpOp cop ) %{
9358 match(Set dst (CMoveF (Binary cop (CmpF tmp1 tmp2)) (Binary dst src)));
9359 ins_cost(200);
9360 format %{
9361 "CMP$cop $tmp1, $tmp2\t @cmovF_cmpF_reg_reg\n"
9362 "\tCMOV $dst,$src \t @cmovF_cmpF_reg_reg"
9363 %}
9365 ins_encode %{
9366 FloatRegister reg_op1 = $tmp1$$FloatRegister;
9367 FloatRegister reg_op2 = $tmp2$$FloatRegister;
9368 FloatRegister dst = $dst$$FloatRegister;
9369 FloatRegister src = $src$$FloatRegister;
9370 Label L;
9371 int flag = $cop$$cmpcode;
9373 switch(flag)
9374 {
9375 case 0x01: //equal
9376 __ c_eq_s(reg_op1, reg_op2);
9377 __ bc1f(L);
9378 __ nop();
9379 __ mov_s(dst, src);
9380 __ bind(L);
9381 break;
9382 case 0x02: //not_equal
9383 __ c_eq_s(reg_op1, reg_op2);
9384 __ bc1t(L);
9385 __ nop();
9386 __ mov_s(dst, src);
9387 __ bind(L);
9388 break;
9389 case 0x03: //greater
9390 __ c_ole_s(reg_op1, reg_op2);
9391 __ bc1t(L);
9392 __ nop();
9393 __ mov_s(dst, src);
9394 __ bind(L);
9395 break;
9396 case 0x04: //greater_equal
9397 __ c_olt_s(reg_op1, reg_op2);
9398 __ bc1t(L);
9399 __ nop();
9400 __ mov_s(dst, src);
9401 __ bind(L);
9402 break;
9403 case 0x05: //less
9404 __ c_ult_s(reg_op1, reg_op2);
9405 __ bc1f(L);
9406 __ nop();
9407 __ mov_s(dst, src);
9408 __ bind(L);
9409 break;
9410 case 0x06: //less_equal
9411 __ c_ule_s(reg_op1, reg_op2);
9412 __ bc1f(L);
9413 __ nop();
9414 __ mov_s(dst, src);
9415 __ bind(L);
9416 break;
9417 default:
9418 Unimplemented();
9419 }
9420 %}
9421 ins_pipe( pipe_slow );
9422 %}
9424 // Manifest a CmpL result in an integer register. Very painful.
9425 // This is the test to avoid.
9426 instruct cmpL3_reg_reg(mRegI dst, mRegL src1, mRegL src2) %{
9427 match(Set dst (CmpL3 src1 src2));
9428 ins_cost(1000);
9429 format %{ "cmpL3 $dst, $src1, $src2 @ cmpL3_reg_reg" %}
9430 ins_encode %{
9431 Register opr1 = as_Register($src1$$reg);
9432 Register opr2 = as_Register($src2$$reg);
9433 Register dst = as_Register($dst$$reg);
9435 Label Done;
9437 __ subu(AT, opr1, opr2);
9438 __ bltz(AT, Done);
9439 __ delayed()->daddiu(dst, R0, -1);
9441 __ move(dst, 1);
9442 __ movz(dst, R0, AT);
9444 __ bind(Done);
9445 %}
9446 ins_pipe( pipe_slow );
9447 %}
9449 //
9450 // less_rsult = -1
9451 // greater_result = 1
9452 // equal_result = 0
9453 // nan_result = -1
9454 //
9455 instruct cmpF3_reg_reg(mRegI dst, regF src1, regF src2) %{
9456 match(Set dst (CmpF3 src1 src2));
9457 ins_cost(1000);
9458 format %{ "cmpF3 $dst, $src1, $src2 @ cmpF3_reg_reg" %}
9459 ins_encode %{
9460 FloatRegister src1 = as_FloatRegister($src1$$reg);
9461 FloatRegister src2 = as_FloatRegister($src2$$reg);
9462 Register dst = as_Register($dst$$reg);
9464 Label Done;
9466 __ c_ult_s(src1, src2);
9467 __ bc1t(Done);
9468 __ delayed()->daddiu(dst, R0, -1);
9470 __ c_eq_s(src1, src2);
9471 __ move(dst, 1);
9472 __ movt(dst, R0);
9474 __ bind(Done);
9475 %}
9476 ins_pipe( pipe_slow );
9477 %}
9479 instruct cmpD3_reg_reg(mRegI dst, regD src1, regD src2) %{
9480 match(Set dst (CmpD3 src1 src2));
9481 ins_cost(1000);
9482 format %{ "cmpD3 $dst, $src1, $src2 @ cmpD3_reg_reg" %}
9483 ins_encode %{
9484 FloatRegister src1 = as_FloatRegister($src1$$reg);
9485 FloatRegister src2 = as_FloatRegister($src2$$reg);
9486 Register dst = as_Register($dst$$reg);
9488 Label Done;
9490 __ c_ult_d(src1, src2);
9491 __ bc1t(Done);
9492 __ delayed()->daddiu(dst, R0, -1);
9494 __ c_eq_d(src1, src2);
9495 __ move(dst, 1);
9496 __ movt(dst, R0);
9498 __ bind(Done);
9499 %}
9500 ins_pipe( pipe_slow );
9501 %}
9503 instruct clear_array(mRegL cnt, mRegP base, Universe dummy) %{
9504 match(Set dummy (ClearArray cnt base));
9505 format %{ "CLEAR_ARRAY base = $base, cnt = $cnt # Clear doublewords" %}
9506 ins_encode %{
9507 //Assume cnt is the number of bytes in an array to be cleared,
9508 //and base points to the starting address of the array.
9509 Register base = $base$$Register;
9510 Register num = $cnt$$Register;
9511 Label Loop, done;
9513 /* 2012/9/21 Jin: according to X86, $cnt is caculated by doublewords(8 bytes) */
9514 __ move(T9, num); /* T9 = words */
9515 __ beq(T9, R0, done);
9516 __ nop();
9517 __ move(AT, base);
9519 __ bind(Loop);
9520 __ sd(R0, Address(AT, 0));
9521 __ daddi(AT, AT, wordSize);
9522 __ daddi(T9, T9, -1);
9523 __ bne(T9, R0, Loop);
9524 __ delayed()->nop();
9525 __ bind(done);
9526 %}
9527 ins_pipe( pipe_slow );
9528 %}
9530 instruct string_compare(a4_RegP str1, mA5RegI cnt1, a6_RegP str2, mA7RegI cnt2, no_Ax_mRegI result) %{
9531 match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
9532 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2);
9534 format %{ "String Compare $str1[len: $cnt1], $str2[len: $cnt2] -> $result @ string_compare" %}
9535 ins_encode %{
9536 // Get the first character position in both strings
9537 // [8] char array, [12] offset, [16] count
9538 Register str1 = $str1$$Register;
9539 Register str2 = $str2$$Register;
9540 Register cnt1 = $cnt1$$Register;
9541 Register cnt2 = $cnt2$$Register;
9542 Register result = $result$$Register;
9544 Label L, Loop, haveResult, done;
9546 // compute the and difference of lengths (in result)
9547 __ subu(result, cnt1, cnt2); // result holds the difference of two lengths
9549 // compute the shorter length (in cnt1)
9550 __ slt(AT, cnt2, cnt1);
9551 __ movn(cnt1, cnt2, AT);
9553 // Now the shorter length is in cnt1 and cnt2 can be used as a tmp register
9554 __ bind(Loop); // Loop begin
9555 __ beq(cnt1, R0, done);
9556 __ delayed()->lhu(AT, str1, 0);;
9558 // compare current character
9559 __ lhu(cnt2, str2, 0);
9560 __ bne(AT, cnt2, haveResult);
9561 __ delayed()->addi(str1, str1, 2);
9562 __ addi(str2, str2, 2);
9563 __ b(Loop);
9564 __ delayed()->addi(cnt1, cnt1, -1); // Loop end
9566 __ bind(haveResult);
9567 __ subu(result, AT, cnt2);
9569 __ bind(done);
9570 %}
9572 ins_pipe( pipe_slow );
9573 %}
9575 // intrinsic optimization
9576 instruct string_equals(a4_RegP str1, a5_RegP str2, mA6RegI cnt, mA7RegI temp, no_Ax_mRegI result) %{
9577 match(Set result (StrEquals (Binary str1 str2) cnt));
9578 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL temp);
9580 format %{ "String Equal $str1, $str2, len:$cnt tmp:$temp -> $result @ string_equals" %}
9581 ins_encode %{
9582 // Get the first character position in both strings
9583 // [8] char array, [12] offset, [16] count
9584 Register str1 = $str1$$Register;
9585 Register str2 = $str2$$Register;
9586 Register cnt = $cnt$$Register;
9587 Register tmp = $temp$$Register;
9588 Register result = $result$$Register;
9590 Label Loop, done;
9593 __ beq(str1, str2, done); // same char[] ?
9594 __ daddiu(result, R0, 1);
9596 __ bind(Loop); // Loop begin
9597 __ beq(cnt, R0, done);
9598 __ daddiu(result, R0, 1); // count == 0
9600 // compare current character
9601 __ lhu(AT, str1, 0);;
9602 __ lhu(tmp, str2, 0);
9603 __ bne(AT, tmp, done);
9604 __ delayed()->daddi(result, R0, 0);
9605 __ addi(str1, str1, 2);
9606 __ addi(str2, str2, 2);
9607 __ b(Loop);
9608 __ delayed()->addi(cnt, cnt, -1); // Loop end
9610 __ bind(done);
9611 %}
9613 ins_pipe( pipe_slow );
9614 %}
9616 //----------Arithmetic Instructions-------------------------------------------
9617 //----------Addition Instructions---------------------------------------------
9618 instruct addI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9619 match(Set dst (AddI src1 src2));
9621 format %{ "add $dst, $src1, $src2 #@addI_Reg_Reg" %}
9622 ins_encode %{
9623 Register dst = $dst$$Register;
9624 Register src1 = $src1$$Register;
9625 Register src2 = $src2$$Register;
9626 __ addu32(dst, src1, src2);
9627 %}
9628 ins_pipe( ialu_regI_regI );
9629 %}
9631 instruct addI_Reg_imm(mRegI dst, mRegI src1, immI src2) %{
9632 match(Set dst (AddI src1 src2));
9634 format %{ "add $dst, $src1, $src2 #@addI_Reg_imm" %}
9635 ins_encode %{
9636 Register dst = $dst$$Register;
9637 Register src1 = $src1$$Register;
9638 int imm = $src2$$constant;
9640 if(Assembler::is_simm16(imm)) {
9641 __ addiu32(dst, src1, imm);
9642 } else {
9643 __ move(AT, imm);
9644 __ addu32(dst, src1, AT);
9645 }
9646 %}
9647 ins_pipe( ialu_regI_regI );
9648 %}
9650 instruct addP_reg_reg(mRegP dst, mRegP src1, mRegL src2) %{
9651 match(Set dst (AddP src1 src2));
9653 format %{ "dadd $dst, $src1, $src2 #@addP_reg_reg" %}
9655 ins_encode %{
9656 Register dst = $dst$$Register;
9657 Register src1 = $src1$$Register;
9658 Register src2 = $src2$$Register;
9659 __ daddu(dst, src1, src2);
9660 %}
9662 ins_pipe( ialu_regI_regI );
9663 %}
9665 instruct addP_reg_reg_convI2L(mRegP dst, mRegP src1, mRegI src2) %{
9666 match(Set dst (AddP src1 (ConvI2L src2)));
9668 format %{ "dadd $dst, $src1, $src2 #@addP_reg_reg_convI2L" %}
9670 ins_encode %{
9671 Register dst = $dst$$Register;
9672 Register src1 = $src1$$Register;
9673 Register src2 = $src2$$Register;
9674 __ daddu(dst, src1, src2);
9675 %}
9677 ins_pipe( ialu_regI_regI );
9678 %}
9680 instruct addP_reg_imm(mRegP dst, mRegP src1, immL src2) %{
9681 match(Set dst (AddP src1 src2));
9683 format %{ "daddi $dst, $src1, $src2 #@addP_reg_imm" %}
9684 ins_encode %{
9685 Register src1 = $src1$$Register;
9686 long src2 = $src2$$constant;
9687 Register dst = $dst$$Register;
9689 if(Assembler::is_simm16(src2)) {
9690 __ daddiu(dst, src1, src2);
9691 } else {
9692 __ set64(AT, src2);
9693 __ daddu(dst, src1, AT);
9694 }
9695 %}
9696 ins_pipe( ialu_regI_imm16 );
9697 %}
9699 // Add Long Register with Register
9700 instruct addL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
9701 match(Set dst (AddL src1 src2));
9702 ins_cost(200);
9703 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_Reg\t" %}
9705 ins_encode %{
9706 Register dst_reg = as_Register($dst$$reg);
9707 Register src1_reg = as_Register($src1$$reg);
9708 Register src2_reg = as_Register($src2$$reg);
9710 __ daddu(dst_reg, src1_reg, src2_reg);
9711 %}
9713 ins_pipe( ialu_regL_regL );
9714 %}
9716 instruct addL_Reg_imm(mRegL dst, mRegL src1, immL16 src2)
9717 %{
9718 match(Set dst (AddL src1 src2));
9720 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_imm " %}
9721 ins_encode %{
9722 Register dst_reg = as_Register($dst$$reg);
9723 Register src1_reg = as_Register($src1$$reg);
9724 int src2_imm = $src2$$constant;
9726 __ daddiu(dst_reg, src1_reg, src2_imm);
9727 %}
9729 ins_pipe( ialu_regL_regL );
9730 %}
9732 instruct addL_RegI2L_imm(mRegL dst, mRegI src1, immL16 src2)
9733 %{
9734 match(Set dst (AddL (ConvI2L src1) src2));
9736 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_imm " %}
9737 ins_encode %{
9738 Register dst_reg = as_Register($dst$$reg);
9739 Register src1_reg = as_Register($src1$$reg);
9740 int src2_imm = $src2$$constant;
9742 __ daddiu(dst_reg, src1_reg, src2_imm);
9743 %}
9745 ins_pipe( ialu_regL_regL );
9746 %}
9748 instruct addL_RegI2L_Reg(mRegL dst, mRegI src1, mRegL src2) %{
9749 match(Set dst (AddL (ConvI2L src1) src2));
9750 ins_cost(200);
9751 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_Reg\t" %}
9753 ins_encode %{
9754 Register dst_reg = as_Register($dst$$reg);
9755 Register src1_reg = as_Register($src1$$reg);
9756 Register src2_reg = as_Register($src2$$reg);
9758 __ daddu(dst_reg, src1_reg, src2_reg);
9759 %}
9761 ins_pipe( ialu_regL_regL );
9762 %}
9764 instruct addL_RegI2L_RegI2L(mRegL dst, mRegI src1, mRegI src2) %{
9765 match(Set dst (AddL (ConvI2L src1) (ConvI2L src2)));
9766 ins_cost(200);
9767 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_RegI2L\t" %}
9769 ins_encode %{
9770 Register dst_reg = as_Register($dst$$reg);
9771 Register src1_reg = as_Register($src1$$reg);
9772 Register src2_reg = as_Register($src2$$reg);
9774 __ daddu(dst_reg, src1_reg, src2_reg);
9775 %}
9777 ins_pipe( ialu_regL_regL );
9778 %}
9780 instruct addL_Reg_RegI2L(mRegL dst, mRegL src1, mRegI src2) %{
9781 match(Set dst (AddL src1 (ConvI2L src2)));
9782 ins_cost(200);
9783 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_RegI2L\t" %}
9785 ins_encode %{
9786 Register dst_reg = as_Register($dst$$reg);
9787 Register src1_reg = as_Register($src1$$reg);
9788 Register src2_reg = as_Register($src2$$reg);
9790 __ daddu(dst_reg, src1_reg, src2_reg);
9791 %}
9793 ins_pipe( ialu_regL_regL );
9794 %}
9796 //----------Subtraction Instructions-------------------------------------------
9797 // Integer Subtraction Instructions
9798 instruct subI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9799 match(Set dst (SubI src1 src2));
9800 ins_cost(100);
9802 format %{ "sub $dst, $src1, $src2 #@subI_Reg_Reg" %}
9803 ins_encode %{
9804 Register dst = $dst$$Register;
9805 Register src1 = $src1$$Register;
9806 Register src2 = $src2$$Register;
9807 __ subu32(dst, src1, src2);
9808 %}
9809 ins_pipe( ialu_regI_regI );
9810 %}
9812 instruct subI_Reg_immI16_sub(mRegI dst, mRegI src1, immI16_sub src2) %{
9813 match(Set dst (SubI src1 src2));
9814 ins_cost(80);
9816 format %{ "sub $dst, $src1, $src2 #@subI_Reg_immI16_sub" %}
9817 ins_encode %{
9818 Register dst = $dst$$Register;
9819 Register src1 = $src1$$Register;
9820 __ addiu32(dst, src1, -1 * $src2$$constant);
9821 %}
9822 ins_pipe( ialu_regI_regI );
9823 %}
9825 instruct negI_Reg(mRegI dst, immI0 zero, mRegI src) %{
9826 match(Set dst (SubI zero src));
9827 ins_cost(80);
9829 format %{ "neg $dst, $src #@negI_Reg" %}
9830 ins_encode %{
9831 Register dst = $dst$$Register;
9832 Register src = $src$$Register;
9833 __ subu32(dst, R0, src);
9834 %}
9835 ins_pipe( ialu_regI_regI );
9836 %}
9838 instruct negL_Reg(mRegL dst, immL0 zero, mRegL src) %{
9839 match(Set dst (SubL zero src));
9840 ins_cost(80);
9842 format %{ "neg $dst, $src #@negL_Reg" %}
9843 ins_encode %{
9844 Register dst = $dst$$Register;
9845 Register src = $src$$Register;
9846 __ subu(dst, R0, src);
9847 %}
9848 ins_pipe( ialu_regI_regI );
9849 %}
9851 instruct subL_Reg_immL16_sub(mRegL dst, mRegL src1, immL16_sub src2) %{
9852 match(Set dst (SubL src1 src2));
9853 ins_cost(80);
9855 format %{ "sub $dst, $src1, $src2 #@subL_Reg_immL16_sub" %}
9856 ins_encode %{
9857 Register dst = $dst$$Register;
9858 Register src1 = $src1$$Register;
9859 __ daddiu(dst, src1, -1 * $src2$$constant);
9860 %}
9861 ins_pipe( ialu_regI_regI );
9862 %}
9864 // Subtract Long Register with Register.
9865 instruct subL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
9866 match(Set dst (SubL src1 src2));
9867 ins_cost(100);
9868 format %{ "SubL $dst, $src1, $src2 @ subL_Reg_Reg" %}
9869 ins_encode %{
9870 Register dst = as_Register($dst$$reg);
9871 Register src1 = as_Register($src1$$reg);
9872 Register src2 = as_Register($src2$$reg);
9874 __ subu(dst, src1, src2);
9875 %}
9876 ins_pipe( ialu_regL_regL );
9877 %}
9879 instruct subL_Reg_RegI2L(mRegL dst, mRegL src1, mRegI src2) %{
9880 match(Set dst (SubL src1 (ConvI2L src2)));
9881 ins_cost(100);
9882 format %{ "SubL $dst, $src1, $src2 @ subL_Reg_RegI2L" %}
9883 ins_encode %{
9884 Register dst = as_Register($dst$$reg);
9885 Register src1 = as_Register($src1$$reg);
9886 Register src2 = as_Register($src2$$reg);
9888 __ subu(dst, src1, src2);
9889 %}
9890 ins_pipe( ialu_regL_regL );
9891 %}
9893 instruct subL_RegI2L_Reg(mRegL dst, mRegI src1, mRegL src2) %{
9894 match(Set dst (SubL (ConvI2L src1) src2));
9895 ins_cost(200);
9896 format %{ "SubL $dst, $src1, $src2 @ subL_RegI2L_Reg" %}
9897 ins_encode %{
9898 Register dst = as_Register($dst$$reg);
9899 Register src1 = as_Register($src1$$reg);
9900 Register src2 = as_Register($src2$$reg);
9902 __ subu(dst, src1, src2);
9903 %}
9904 ins_pipe( ialu_regL_regL );
9905 %}
9907 instruct subL_RegI2L_RegI2L(mRegL dst, mRegI src1, mRegI src2) %{
9908 match(Set dst (SubL (ConvI2L src1) (ConvI2L src2)));
9909 ins_cost(200);
9910 format %{ "SubL $dst, $src1, $src2 @ subL_RegI2L_RegI2L" %}
9911 ins_encode %{
9912 Register dst = as_Register($dst$$reg);
9913 Register src1 = as_Register($src1$$reg);
9914 Register src2 = as_Register($src2$$reg);
9916 __ subu(dst, src1, src2);
9917 %}
9918 ins_pipe( ialu_regL_regL );
9919 %}
9921 // Integer MOD with Register
9922 instruct modI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9923 match(Set dst (ModI src1 src2));
9924 ins_cost(300);
9925 format %{ "modi $dst, $src1, $src2 @ modI_Reg_Reg" %}
9926 ins_encode %{
9927 Register dst = $dst$$Register;
9928 Register src1 = $src1$$Register;
9929 Register src2 = $src2$$Register;
9931 //if (UseLoongsonISA) {
9932 if (0) {
9933 // 2016.08.10
9934 // Experiments show that gsmod is slower that div+mfhi.
9935 // So I just disable it here.
9936 __ gsmod(dst, src1, src2);
9937 } else {
9938 __ div(src1, src2);
9939 __ mfhi(dst);
9940 }
9941 %}
9943 //ins_pipe( ialu_mod );
9944 ins_pipe( ialu_regI_regI );
9945 %}
9947 instruct modL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
9948 match(Set dst (ModL src1 src2));
9949 format %{ "modL $dst, $src1, $src2 @modL_reg_reg" %}
9951 ins_encode %{
9952 Register dst = as_Register($dst$$reg);
9953 Register op1 = as_Register($src1$$reg);
9954 Register op2 = as_Register($src2$$reg);
9956 if (UseLoongsonISA) {
9957 __ gsdmod(dst, op1, op2);
9958 } else {
9959 __ ddiv(op1, op2);
9960 __ mfhi(dst);
9961 }
9962 %}
9963 ins_pipe( pipe_slow );
9964 %}
9966 instruct mulI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9967 match(Set dst (MulI src1 src2));
9969 ins_cost(300);
9970 format %{ "mul $dst, $src1, $src2 @ mulI_Reg_Reg" %}
9971 ins_encode %{
9972 Register src1 = $src1$$Register;
9973 Register src2 = $src2$$Register;
9974 Register dst = $dst$$Register;
9976 __ mul(dst, src1, src2);
9977 %}
9978 ins_pipe( ialu_mult );
9979 %}
9981 instruct maddI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2, mRegI src3) %{
9982 match(Set dst (AddI (MulI src1 src2) src3));
9984 ins_cost(999);
9985 format %{ "madd $dst, $src1 * $src2 + $src3 #@maddI_Reg_Reg" %}
9986 ins_encode %{
9987 Register src1 = $src1$$Register;
9988 Register src2 = $src2$$Register;
9989 Register src3 = $src3$$Register;
9990 Register dst = $dst$$Register;
9992 __ mtlo(src3);
9993 __ madd(src1, src2);
9994 __ mflo(dst);
9995 %}
9996 ins_pipe( ialu_mult );
9997 %}
9999 instruct divI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10000 match(Set dst (DivI src1 src2));
10002 ins_cost(300);
10003 format %{ "div $dst, $src1, $src2 @ divI_Reg_Reg" %}
10004 ins_encode %{
10005 Register src1 = $src1$$Register;
10006 Register src2 = $src2$$Register;
10007 Register dst = $dst$$Register;
10009 /* 2012/4/21 Jin: In MIPS, div does not cause exception.
10010 We must trap an exception manually. */
10011 __ teq(R0, src2, 0x7);
10013 if (UseLoongsonISA) {
10014 __ gsdiv(dst, src1, src2);
10015 } else {
10016 __ div(src1, src2);
10018 __ nop();
10019 __ nop();
10020 __ mflo(dst);
10021 }
10022 %}
10023 ins_pipe( ialu_mod );
10024 %}
10026 instruct divF_Reg_Reg(regF dst, regF src1, regF src2) %{
10027 match(Set dst (DivF src1 src2));
10029 ins_cost(300);
10030 format %{ "divF $dst, $src1, $src2 @ divF_Reg_Reg" %}
10031 ins_encode %{
10032 FloatRegister src1 = $src1$$FloatRegister;
10033 FloatRegister src2 = $src2$$FloatRegister;
10034 FloatRegister dst = $dst$$FloatRegister;
10036 /* Here do we need to trap an exception manually ? */
10037 __ div_s(dst, src1, src2);
10038 %}
10039 ins_pipe( pipe_slow );
10040 %}
10042 instruct divD_Reg_Reg(regD dst, regD src1, regD src2) %{
10043 match(Set dst (DivD src1 src2));
10045 ins_cost(300);
10046 format %{ "divD $dst, $src1, $src2 @ divD_Reg_Reg" %}
10047 ins_encode %{
10048 FloatRegister src1 = $src1$$FloatRegister;
10049 FloatRegister src2 = $src2$$FloatRegister;
10050 FloatRegister dst = $dst$$FloatRegister;
10052 /* Here do we need to trap an exception manually ? */
10053 __ div_d(dst, src1, src2);
10054 %}
10055 ins_pipe( pipe_slow );
10056 %}
10058 instruct mulL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
10059 match(Set dst (MulL src1 src2));
10060 format %{ "mulL $dst, $src1, $src2 @mulL_reg_reg" %}
10061 ins_encode %{
10062 Register dst = as_Register($dst$$reg);
10063 Register op1 = as_Register($src1$$reg);
10064 Register op2 = as_Register($src2$$reg);
10066 if (UseLoongsonISA) {
10067 __ gsdmult(dst, op1, op2);
10068 } else {
10069 __ dmult(op1, op2);
10070 __ mflo(dst);
10071 }
10072 %}
10073 ins_pipe( pipe_slow );
10074 %}
10076 instruct mulL_reg_regI2L(mRegL dst, mRegL src1, mRegI src2) %{
10077 match(Set dst (MulL src1 (ConvI2L src2)));
10078 format %{ "mulL $dst, $src1, $src2 @mulL_reg_regI2L" %}
10079 ins_encode %{
10080 Register dst = as_Register($dst$$reg);
10081 Register op1 = as_Register($src1$$reg);
10082 Register op2 = as_Register($src2$$reg);
10084 if (UseLoongsonISA) {
10085 __ gsdmult(dst, op1, op2);
10086 } else {
10087 __ dmult(op1, op2);
10088 __ mflo(dst);
10089 }
10090 %}
10091 ins_pipe( pipe_slow );
10092 %}
10094 instruct divL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
10095 match(Set dst (DivL src1 src2));
10096 format %{ "divL $dst, $src1, $src2 @divL_reg_reg" %}
10098 ins_encode %{
10099 Register dst = as_Register($dst$$reg);
10100 Register op1 = as_Register($src1$$reg);
10101 Register op2 = as_Register($src2$$reg);
10103 if (UseLoongsonISA) {
10104 __ gsddiv(dst, op1, op2);
10105 } else {
10106 __ ddiv(op1, op2);
10107 __ mflo(dst);
10108 }
10109 %}
10110 ins_pipe( pipe_slow );
10111 %}
10113 instruct addF_reg_reg(regF dst, regF src1, regF src2) %{
10114 match(Set dst (AddF src1 src2));
10115 format %{ "AddF $dst, $src1, $src2 @addF_reg_reg" %}
10116 ins_encode %{
10117 FloatRegister src1 = as_FloatRegister($src1$$reg);
10118 FloatRegister src2 = as_FloatRegister($src2$$reg);
10119 FloatRegister dst = as_FloatRegister($dst$$reg);
10121 __ add_s(dst, src1, src2);
10122 %}
10123 ins_pipe( fpu_regF_regF );
10124 %}
10126 instruct subF_reg_reg(regF dst, regF src1, regF src2) %{
10127 match(Set dst (SubF src1 src2));
10128 format %{ "SubF $dst, $src1, $src2 @subF_reg_reg" %}
10129 ins_encode %{
10130 FloatRegister src1 = as_FloatRegister($src1$$reg);
10131 FloatRegister src2 = as_FloatRegister($src2$$reg);
10132 FloatRegister dst = as_FloatRegister($dst$$reg);
10134 __ sub_s(dst, src1, src2);
10135 %}
10136 ins_pipe( fpu_regF_regF );
10137 %}
10138 instruct addD_reg_reg(regD dst, regD src1, regD src2) %{
10139 match(Set dst (AddD src1 src2));
10140 format %{ "AddD $dst, $src1, $src2 @addD_reg_reg" %}
10141 ins_encode %{
10142 FloatRegister src1 = as_FloatRegister($src1$$reg);
10143 FloatRegister src2 = as_FloatRegister($src2$$reg);
10144 FloatRegister dst = as_FloatRegister($dst$$reg);
10146 __ add_d(dst, src1, src2);
10147 %}
10148 ins_pipe( fpu_regF_regF );
10149 %}
10151 instruct subD_reg_reg(regD dst, regD src1, regD src2) %{
10152 match(Set dst (SubD src1 src2));
10153 format %{ "SubD $dst, $src1, $src2 @subD_reg_reg" %}
10154 ins_encode %{
10155 FloatRegister src1 = as_FloatRegister($src1$$reg);
10156 FloatRegister src2 = as_FloatRegister($src2$$reg);
10157 FloatRegister dst = as_FloatRegister($dst$$reg);
10159 __ sub_d(dst, src1, src2);
10160 %}
10161 ins_pipe( fpu_regF_regF );
10162 %}
10164 instruct negF_reg(regF dst, regF src) %{
10165 match(Set dst (NegF src));
10166 format %{ "negF $dst, $src @negF_reg" %}
10167 ins_encode %{
10168 FloatRegister src = as_FloatRegister($src$$reg);
10169 FloatRegister dst = as_FloatRegister($dst$$reg);
10171 __ neg_s(dst, src);
10172 %}
10173 ins_pipe( fpu_regF_regF );
10174 %}
10176 instruct negD_reg(regD dst, regD src) %{
10177 match(Set dst (NegD src));
10178 format %{ "negD $dst, $src @negD_reg" %}
10179 ins_encode %{
10180 FloatRegister src = as_FloatRegister($src$$reg);
10181 FloatRegister dst = as_FloatRegister($dst$$reg);
10183 __ neg_d(dst, src);
10184 %}
10185 ins_pipe( fpu_regF_regF );
10186 %}
10189 instruct mulF_reg_reg(regF dst, regF src1, regF src2) %{
10190 match(Set dst (MulF src1 src2));
10191 format %{ "MULF $dst, $src1, $src2 @mulF_reg_reg" %}
10192 ins_encode %{
10193 FloatRegister src1 = $src1$$FloatRegister;
10194 FloatRegister src2 = $src2$$FloatRegister;
10195 FloatRegister dst = $dst$$FloatRegister;
10197 __ mul_s(dst, src1, src2);
10198 %}
10199 ins_pipe( fpu_regF_regF );
10200 %}
10202 instruct maddF_reg_reg(regF dst, regF src1, regF src2, regF src3) %{
10203 match(Set dst (AddF (MulF src1 src2) src3));
10204 // For compatibility reason (e.g. on the Loongson platform), disable this guy.
10205 ins_cost(44444);
10206 format %{ "maddF $dst, $src1, $src2, $src3 @maddF_reg_reg" %}
10207 ins_encode %{
10208 FloatRegister src1 = $src1$$FloatRegister;
10209 FloatRegister src2 = $src2$$FloatRegister;
10210 FloatRegister src3 = $src3$$FloatRegister;
10211 FloatRegister dst = $dst$$FloatRegister;
10213 __ madd_s(dst, src1, src2, src3);
10214 %}
10215 ins_pipe( fpu_regF_regF );
10216 %}
10218 // Mul two double precision floating piont number
10219 instruct mulD_reg_reg(regD dst, regD src1, regD src2) %{
10220 match(Set dst (MulD src1 src2));
10221 format %{ "MULD $dst, $src1, $src2 @mulD_reg_reg" %}
10222 ins_encode %{
10223 FloatRegister src1 = $src1$$FloatRegister;
10224 FloatRegister src2 = $src2$$FloatRegister;
10225 FloatRegister dst = $dst$$FloatRegister;
10227 __ mul_d(dst, src1, src2);
10228 %}
10229 ins_pipe( fpu_regF_regF );
10230 %}
10232 instruct maddD_reg_reg(regD dst, regD src1, regD src2, regD src3) %{
10233 match(Set dst (AddD (MulD src1 src2) src3));
10234 // For compatibility reason (e.g. on the Loongson platform), disable this guy.
10235 ins_cost(44444);
10236 format %{ "maddD $dst, $src1, $src2, $src3 @maddD_reg_reg" %}
10237 ins_encode %{
10238 FloatRegister src1 = $src1$$FloatRegister;
10239 FloatRegister src2 = $src2$$FloatRegister;
10240 FloatRegister src3 = $src3$$FloatRegister;
10241 FloatRegister dst = $dst$$FloatRegister;
10243 __ madd_d(dst, src1, src2, src3);
10244 %}
10245 ins_pipe( fpu_regF_regF );
10246 %}
10248 instruct absF_reg(regF dst, regF src) %{
10249 match(Set dst (AbsF src));
10250 ins_cost(100);
10251 format %{ "absF $dst, $src @absF_reg" %}
10252 ins_encode %{
10253 FloatRegister src = as_FloatRegister($src$$reg);
10254 FloatRegister dst = as_FloatRegister($dst$$reg);
10256 __ abs_s(dst, src);
10257 %}
10258 ins_pipe( fpu_regF_regF );
10259 %}
10262 // intrinsics for math_native.
10263 // AbsD SqrtD CosD SinD TanD LogD Log10D
10265 instruct absD_reg(regD dst, regD src) %{
10266 match(Set dst (AbsD src));
10267 ins_cost(100);
10268 format %{ "absD $dst, $src @absD_reg" %}
10269 ins_encode %{
10270 FloatRegister src = as_FloatRegister($src$$reg);
10271 FloatRegister dst = as_FloatRegister($dst$$reg);
10273 __ abs_d(dst, src);
10274 %}
10275 ins_pipe( fpu_regF_regF );
10276 %}
10278 instruct sqrtD_reg(regD dst, regD src) %{
10279 match(Set dst (SqrtD src));
10280 ins_cost(100);
10281 format %{ "SqrtD $dst, $src @sqrtD_reg" %}
10282 ins_encode %{
10283 FloatRegister src = as_FloatRegister($src$$reg);
10284 FloatRegister dst = as_FloatRegister($dst$$reg);
10286 __ sqrt_d(dst, src);
10287 %}
10288 ins_pipe( fpu_regF_regF );
10289 %}
10291 instruct sqrtF_reg(regF dst, regF src) %{
10292 match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
10293 ins_cost(100);
10294 format %{ "SqrtF $dst, $src @sqrtF_reg" %}
10295 ins_encode %{
10296 FloatRegister src = as_FloatRegister($src$$reg);
10297 FloatRegister dst = as_FloatRegister($dst$$reg);
10299 __ sqrt_s(dst, src);
10300 %}
10301 ins_pipe( fpu_regF_regF );
10302 %}
10303 //----------------------------------Logical Instructions----------------------
10304 //__________________________________Integer Logical Instructions-------------
10306 //And Instuctions
10307 // And Register with Immediate
10308 instruct andI_Reg_immI(mRegI dst, mRegI src1, immI src2) %{
10309 match(Set dst (AndI src1 src2));
10311 format %{ "and $dst, $src1, $src2 #@andI_Reg_immI" %}
10312 ins_encode %{
10313 Register dst = $dst$$Register;
10314 Register src = $src1$$Register;
10315 int val = $src2$$constant;
10317 __ move(AT, val);
10318 __ andr(dst, src, AT);
10319 %}
10320 ins_pipe( ialu_regI_regI );
10321 %}
10323 instruct andI_Reg_imm_0_65535(mRegI dst, mRegI src1, immI_0_65535 src2) %{
10324 match(Set dst (AndI src1 src2));
10325 ins_cost(60);
10327 format %{ "and $dst, $src1, $src2 #@andI_Reg_imm_0_65535" %}
10328 ins_encode %{
10329 Register dst = $dst$$Register;
10330 Register src = $src1$$Register;
10331 int val = $src2$$constant;
10333 __ andi(dst, src, val);
10334 %}
10335 ins_pipe( ialu_regI_regI );
10336 %}
10338 instruct andI_Reg_immI_nonneg_mask(mRegI dst, mRegI src1, immI_nonneg_mask mask) %{
10339 match(Set dst (AndI src1 mask));
10340 ins_cost(60);
10342 format %{ "and $dst, $src1, $mask #@andI_Reg_immI_nonneg_mask" %}
10343 ins_encode %{
10344 Register dst = $dst$$Register;
10345 Register src = $src1$$Register;
10346 int size = Assembler::is_int_mask($mask$$constant);
10348 __ ext(dst, src, 0, size);
10349 %}
10350 ins_pipe( ialu_regI_regI );
10351 %}
10353 instruct andL_Reg_immL_nonneg_mask(mRegL dst, mRegL src1, immL_nonneg_mask mask) %{
10354 match(Set dst (AndL src1 mask));
10355 ins_cost(60);
10357 format %{ "and $dst, $src1, $mask #@andL_Reg_immL_nonneg_mask" %}
10358 ins_encode %{
10359 Register dst = $dst$$Register;
10360 Register src = $src1$$Register;
10361 int size = Assembler::is_jlong_mask($mask$$constant);
10363 __ dext(dst, src, 0, size);
10364 %}
10365 ins_pipe( ialu_regI_regI );
10366 %}
10368 instruct xorI_Reg_imm_0_65535(mRegI dst, mRegI src1, immI_0_65535 src2) %{
10369 match(Set dst (XorI src1 src2));
10370 ins_cost(60);
10372 format %{ "xori $dst, $src1, $src2 #@xorI_Reg_imm_0_65535" %}
10373 ins_encode %{
10374 Register dst = $dst$$Register;
10375 Register src = $src1$$Register;
10376 int val = $src2$$constant;
10378 __ xori(dst, src, val);
10379 %}
10380 ins_pipe( ialu_regI_regI );
10381 %}
10383 instruct xorI_Reg_immI_M1(mRegI dst, mRegI src1, immI_M1 M1) %{
10384 match(Set dst (XorI src1 M1));
10385 predicate(UseLoongsonISA && Use3A2000);
10386 ins_cost(60);
10388 format %{ "xor $dst, $src1, $M1 #@xorI_Reg_immI_M1" %}
10389 ins_encode %{
10390 Register dst = $dst$$Register;
10391 Register src = $src1$$Register;
10393 __ gsorn(dst, R0, src);
10394 %}
10395 ins_pipe( ialu_regI_regI );
10396 %}
10398 instruct xorL2I_Reg_immI_M1(mRegI dst, mRegL src1, immI_M1 M1) %{
10399 match(Set dst (XorI (ConvL2I src1) M1));
10400 predicate(UseLoongsonISA && Use3A2000);
10401 ins_cost(60);
10403 format %{ "xor $dst, $src1, $M1 #@xorL2I_Reg_immI_M1" %}
10404 ins_encode %{
10405 Register dst = $dst$$Register;
10406 Register src = $src1$$Register;
10408 __ gsorn(dst, R0, src);
10409 %}
10410 ins_pipe( ialu_regI_regI );
10411 %}
10413 instruct xorL_Reg_imm_0_65535(mRegL dst, mRegL src1, immL_0_65535 src2) %{
10414 match(Set dst (XorL src1 src2));
10415 ins_cost(60);
10417 format %{ "xori $dst, $src1, $src2 #@xorL_Reg_imm_0_65535" %}
10418 ins_encode %{
10419 Register dst = $dst$$Register;
10420 Register src = $src1$$Register;
10421 int val = $src2$$constant;
10423 __ xori(dst, src, val);
10424 %}
10425 ins_pipe( ialu_regI_regI );
10426 %}
10428 /*
10429 instruct xorL_Reg_immL_M1(mRegL dst, mRegL src1, immL_M1 M1) %{
10430 match(Set dst (XorL src1 M1));
10431 predicate(UseLoongsonISA);
10432 ins_cost(60);
10434 format %{ "xor $dst, $src1, $M1 #@xorL_Reg_immL_M1" %}
10435 ins_encode %{
10436 Register dst = $dst$$Register;
10437 Register src = $src1$$Register;
10439 __ gsorn(dst, R0, src);
10440 %}
10441 ins_pipe( ialu_regI_regI );
10442 %}
10443 */
10445 instruct lbu_and_lmask(mRegI dst, memory mem, immI_255 mask) %{
10446 match(Set dst (AndI mask (LoadB mem)));
10447 ins_cost(60);
10449 format %{ "lhu $dst, $mem #@lbu_and_lmask" %}
10450 ins_encode(load_UB_enc(dst, mem));
10451 ins_pipe( ialu_loadI );
10452 %}
10454 instruct lbu_and_rmask(mRegI dst, memory mem, immI_255 mask) %{
10455 match(Set dst (AndI (LoadB mem) mask));
10456 ins_cost(60);
10458 format %{ "lhu $dst, $mem #@lbu_and_rmask" %}
10459 ins_encode(load_UB_enc(dst, mem));
10460 ins_pipe( ialu_loadI );
10461 %}
10463 instruct andI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10464 match(Set dst (AndI src1 src2));
10466 format %{ "and $dst, $src1, $src2 #@andI_Reg_Reg" %}
10467 ins_encode %{
10468 Register dst = $dst$$Register;
10469 Register src1 = $src1$$Register;
10470 Register src2 = $src2$$Register;
10471 __ andr(dst, src1, src2);
10472 %}
10473 ins_pipe( ialu_regI_regI );
10474 %}
10476 instruct andnI_Reg_nReg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10477 match(Set dst (AndI src1 (XorI src2 M1)));
10478 predicate(UseLoongsonISA && Use3A2000);
10480 format %{ "andn $dst, $src1, $src2 #@andnI_Reg_nReg" %}
10481 ins_encode %{
10482 Register dst = $dst$$Register;
10483 Register src1 = $src1$$Register;
10484 Register src2 = $src2$$Register;
10486 __ gsandn(dst, src1, src2);
10487 %}
10488 ins_pipe( ialu_regI_regI );
10489 %}
10491 instruct ornI_Reg_nReg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10492 match(Set dst (OrI src1 (XorI src2 M1)));
10493 predicate(UseLoongsonISA && Use3A2000);
10495 format %{ "orn $dst, $src1, $src2 #@ornI_Reg_nReg" %}
10496 ins_encode %{
10497 Register dst = $dst$$Register;
10498 Register src1 = $src1$$Register;
10499 Register src2 = $src2$$Register;
10501 __ gsorn(dst, src1, src2);
10502 %}
10503 ins_pipe( ialu_regI_regI );
10504 %}
10506 instruct andnI_nReg_Reg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10507 match(Set dst (AndI (XorI src1 M1) src2));
10508 predicate(UseLoongsonISA && Use3A2000);
10510 format %{ "andn $dst, $src2, $src1 #@andnI_nReg_Reg" %}
10511 ins_encode %{
10512 Register dst = $dst$$Register;
10513 Register src1 = $src1$$Register;
10514 Register src2 = $src2$$Register;
10516 __ gsandn(dst, src2, src1);
10517 %}
10518 ins_pipe( ialu_regI_regI );
10519 %}
10521 instruct ornI_nReg_Reg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10522 match(Set dst (OrI (XorI src1 M1) src2));
10523 predicate(UseLoongsonISA && Use3A2000);
10525 format %{ "orn $dst, $src2, $src1 #@ornI_nReg_Reg" %}
10526 ins_encode %{
10527 Register dst = $dst$$Register;
10528 Register src1 = $src1$$Register;
10529 Register src2 = $src2$$Register;
10531 __ gsorn(dst, src2, src1);
10532 %}
10533 ins_pipe( ialu_regI_regI );
10534 %}
10536 // And Long Register with Register
10537 instruct andL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10538 match(Set dst (AndL src1 src2));
10539 format %{ "AND $dst, $src1, $src2 @ andL_Reg_Reg\n\t" %}
10540 ins_encode %{
10541 Register dst_reg = as_Register($dst$$reg);
10542 Register src1_reg = as_Register($src1$$reg);
10543 Register src2_reg = as_Register($src2$$reg);
10545 __ andr(dst_reg, src1_reg, src2_reg);
10546 %}
10547 ins_pipe( ialu_regL_regL );
10548 %}
10550 instruct andL_Reg_Reg_convI2L(mRegL dst, mRegL src1, mRegI src2) %{
10551 match(Set dst (AndL src1 (ConvI2L src2)));
10552 format %{ "AND $dst, $src1, $src2 @ andL_Reg_Reg_convI2L\n\t" %}
10553 ins_encode %{
10554 Register dst_reg = as_Register($dst$$reg);
10555 Register src1_reg = as_Register($src1$$reg);
10556 Register src2_reg = as_Register($src2$$reg);
10558 __ andr(dst_reg, src1_reg, src2_reg);
10559 %}
10560 ins_pipe( ialu_regL_regL );
10561 %}
10563 instruct andL_Reg_imm_0_65535(mRegL dst, mRegL src1, immL_0_65535 src2) %{
10564 match(Set dst (AndL src1 src2));
10565 ins_cost(60);
10567 format %{ "and $dst, $src1, $src2 #@andL_Reg_imm_0_65535" %}
10568 ins_encode %{
10569 Register dst = $dst$$Register;
10570 Register src = $src1$$Register;
10571 long val = $src2$$constant;
10573 __ andi(dst, src, val);
10574 %}
10575 ins_pipe( ialu_regI_regI );
10576 %}
10578 instruct andL2I_Reg_imm_0_65535(mRegI dst, mRegL src1, immL_0_65535 src2) %{
10579 match(Set dst (ConvL2I (AndL src1 src2)));
10580 ins_cost(60);
10582 format %{ "and $dst, $src1, $src2 #@andL2I_Reg_imm_0_65535" %}
10583 ins_encode %{
10584 Register dst = $dst$$Register;
10585 Register src = $src1$$Register;
10586 long val = $src2$$constant;
10588 __ andi(dst, src, val);
10589 %}
10590 ins_pipe( ialu_regI_regI );
10591 %}
10593 /*
10594 instruct andnL_Reg_nReg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10595 match(Set dst (AndL src1 (XorL src2 M1)));
10596 predicate(UseLoongsonISA);
10598 format %{ "andn $dst, $src1, $src2 #@andnL_Reg_nReg" %}
10599 ins_encode %{
10600 Register dst = $dst$$Register;
10601 Register src1 = $src1$$Register;
10602 Register src2 = $src2$$Register;
10604 __ gsandn(dst, src1, src2);
10605 %}
10606 ins_pipe( ialu_regI_regI );
10607 %}
10608 */
10610 /*
10611 instruct ornL_Reg_nReg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10612 match(Set dst (OrL src1 (XorL src2 M1)));
10613 predicate(UseLoongsonISA);
10615 format %{ "orn $dst, $src1, $src2 #@ornL_Reg_nReg" %}
10616 ins_encode %{
10617 Register dst = $dst$$Register;
10618 Register src1 = $src1$$Register;
10619 Register src2 = $src2$$Register;
10621 __ gsorn(dst, src1, src2);
10622 %}
10623 ins_pipe( ialu_regI_regI );
10624 %}
10625 */
10627 /*
10628 instruct andnL_nReg_Reg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10629 match(Set dst (AndL (XorL src1 M1) src2));
10630 predicate(UseLoongsonISA);
10632 format %{ "andn $dst, $src2, $src1 #@andnL_nReg_Reg" %}
10633 ins_encode %{
10634 Register dst = $dst$$Register;
10635 Register src1 = $src1$$Register;
10636 Register src2 = $src2$$Register;
10638 __ gsandn(dst, src2, src1);
10639 %}
10640 ins_pipe( ialu_regI_regI );
10641 %}
10642 */
10644 /*
10645 instruct ornL_nReg_Reg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10646 match(Set dst (OrL (XorL src1 M1) src2));
10647 predicate(UseLoongsonISA);
10649 format %{ "orn $dst, $src2, $src1 #@ornL_nReg_Reg" %}
10650 ins_encode %{
10651 Register dst = $dst$$Register;
10652 Register src1 = $src1$$Register;
10653 Register src2 = $src2$$Register;
10655 __ gsorn(dst, src2, src1);
10656 %}
10657 ins_pipe( ialu_regI_regI );
10658 %}
10659 */
10661 instruct andL_Reg_immL_M8(mRegL dst, immL_M8 M8) %{
10662 match(Set dst (AndL dst M8));
10663 ins_cost(60);
10665 format %{ "and $dst, $dst, $M8 #@andL_Reg_immL_M8" %}
10666 ins_encode %{
10667 Register dst = $dst$$Register;
10669 __ dins(dst, R0, 0, 3);
10670 %}
10671 ins_pipe( ialu_regI_regI );
10672 %}
10674 instruct andL_Reg_immL_M5(mRegL dst, immL_M5 M5) %{
10675 match(Set dst (AndL dst M5));
10676 ins_cost(60);
10678 format %{ "and $dst, $dst, $M5 #@andL_Reg_immL_M5" %}
10679 ins_encode %{
10680 Register dst = $dst$$Register;
10682 __ dins(dst, R0, 2, 1);
10683 %}
10684 ins_pipe( ialu_regI_regI );
10685 %}
10687 instruct andL_Reg_immL_M7(mRegL dst, immL_M7 M7) %{
10688 match(Set dst (AndL dst M7));
10689 ins_cost(60);
10691 format %{ "and $dst, $dst, $M7 #@andL_Reg_immL_M7" %}
10692 ins_encode %{
10693 Register dst = $dst$$Register;
10695 __ dins(dst, R0, 1, 2);
10696 %}
10697 ins_pipe( ialu_regI_regI );
10698 %}
10700 instruct andL_Reg_immL_M4(mRegL dst, immL_M4 M4) %{
10701 match(Set dst (AndL dst M4));
10702 ins_cost(60);
10704 format %{ "and $dst, $dst, $M4 #@andL_Reg_immL_M4" %}
10705 ins_encode %{
10706 Register dst = $dst$$Register;
10708 __ dins(dst, R0, 0, 2);
10709 %}
10710 ins_pipe( ialu_regI_regI );
10711 %}
10713 instruct andL_Reg_immL_M121(mRegL dst, immL_M121 M121) %{
10714 match(Set dst (AndL dst M121));
10715 ins_cost(60);
10717 format %{ "and $dst, $dst, $M121 #@andL_Reg_immL_M121" %}
10718 ins_encode %{
10719 Register dst = $dst$$Register;
10721 __ dins(dst, R0, 3, 4);
10722 %}
10723 ins_pipe( ialu_regI_regI );
10724 %}
10726 // Or Long Register with Register
10727 instruct orL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10728 match(Set dst (OrL src1 src2));
10729 format %{ "OR $dst, $src1, $src2 @ orL_Reg_Reg\t" %}
10730 ins_encode %{
10731 Register dst_reg = $dst$$Register;
10732 Register src1_reg = $src1$$Register;
10733 Register src2_reg = $src2$$Register;
10735 __ orr(dst_reg, src1_reg, src2_reg);
10736 %}
10737 ins_pipe( ialu_regL_regL );
10738 %}
10740 instruct orL_Reg_P2XReg(mRegL dst, mRegP src1, mRegL src2) %{
10741 match(Set dst (OrL (CastP2X src1) src2));
10742 format %{ "OR $dst, $src1, $src2 @ orL_Reg_P2XReg\t" %}
10743 ins_encode %{
10744 Register dst_reg = $dst$$Register;
10745 Register src1_reg = $src1$$Register;
10746 Register src2_reg = $src2$$Register;
10748 __ orr(dst_reg, src1_reg, src2_reg);
10749 %}
10750 ins_pipe( ialu_regL_regL );
10751 %}
10753 // Xor Long Register with Register
10754 instruct xorL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10755 match(Set dst (XorL src1 src2));
10756 format %{ "XOR $dst, $src1, $src2 @ xorL_Reg_Reg\t" %}
10757 ins_encode %{
10758 Register dst_reg = as_Register($dst$$reg);
10759 Register src1_reg = as_Register($src1$$reg);
10760 Register src2_reg = as_Register($src2$$reg);
10762 __ xorr(dst_reg, src1_reg, src2_reg);
10763 %}
10764 ins_pipe( ialu_regL_regL );
10765 %}
10767 // Shift Left by 8-bit immediate
10768 instruct salI_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
10769 match(Set dst (LShiftI src shift));
10771 format %{ "SHL $dst, $src, $shift #@salI_Reg_imm" %}
10772 ins_encode %{
10773 Register src = $src$$Register;
10774 Register dst = $dst$$Register;
10775 int shamt = $shift$$constant;
10777 __ sll(dst, src, shamt);
10778 %}
10779 ins_pipe( ialu_regI_regI );
10780 %}
10782 instruct salL2I_Reg_imm(mRegI dst, mRegL src, immI8 shift) %{
10783 match(Set dst (LShiftI (ConvL2I src) shift));
10785 format %{ "SHL $dst, $src, $shift #@salL2I_Reg_imm" %}
10786 ins_encode %{
10787 Register src = $src$$Register;
10788 Register dst = $dst$$Register;
10789 int shamt = $shift$$constant;
10791 __ sll(dst, src, shamt);
10792 %}
10793 ins_pipe( ialu_regI_regI );
10794 %}
10796 instruct salI_Reg_imm_and_M65536(mRegI dst, mRegI src, immI_16 shift, immI_M65536 mask) %{
10797 match(Set dst (AndI (LShiftI src shift) mask));
10799 format %{ "SHL $dst, $src, $shift #@salI_Reg_imm_and_M65536" %}
10800 ins_encode %{
10801 Register src = $src$$Register;
10802 Register dst = $dst$$Register;
10804 __ sll(dst, src, 16);
10805 %}
10806 ins_pipe( ialu_regI_regI );
10807 %}
10809 instruct land7_2_s(mRegI dst, mRegL src, immL7 seven, immI_16 sixteen)
10810 %{
10811 match(Set dst (RShiftI (LShiftI (ConvL2I (AndL src seven)) sixteen) sixteen));
10813 format %{ "andi $dst, $src, 7\t# @land7_2_s" %}
10814 ins_encode %{
10815 Register src = $src$$Register;
10816 Register dst = $dst$$Register;
10818 __ andi(dst, src, 7);
10819 %}
10820 ins_pipe(ialu_regI_regI);
10821 %}
10823 instruct ori2s(mRegI dst, mRegI src1, immI_0_32767 src2, immI_16 sixteen)
10824 %{
10825 match(Set dst (RShiftI (LShiftI (OrI src1 src2) sixteen) sixteen));
10827 format %{ "ori $dst, $src1, $src2\t# @ori2s" %}
10828 ins_encode %{
10829 Register src = $src1$$Register;
10830 int val = $src2$$constant;
10831 Register dst = $dst$$Register;
10833 __ ori(dst, src, val);
10834 %}
10835 ins_pipe(ialu_regI_regI);
10836 %}
10838 // Logical Shift Right by 16, followed by Arithmetic Shift Left by 16.
10839 // This idiom is used by the compiler the i2s bytecode.
10840 instruct i2s(mRegI dst, mRegI src, immI_16 sixteen)
10841 %{
10842 match(Set dst (RShiftI (LShiftI src sixteen) sixteen));
10844 format %{ "i2s $dst, $src\t# @i2s" %}
10845 ins_encode %{
10846 Register src = $src$$Register;
10847 Register dst = $dst$$Register;
10849 __ seh(dst, src);
10850 %}
10851 ins_pipe(ialu_regI_regI);
10852 %}
10854 // Logical Shift Right by 24, followed by Arithmetic Shift Left by 24.
10855 // This idiom is used by the compiler for the i2b bytecode.
10856 instruct i2b(mRegI dst, mRegI src, immI_24 twentyfour)
10857 %{
10858 match(Set dst (RShiftI (LShiftI src twentyfour) twentyfour));
10860 format %{ "i2b $dst, $src\t# @i2b" %}
10861 ins_encode %{
10862 Register src = $src$$Register;
10863 Register dst = $dst$$Register;
10865 __ seb(dst, src);
10866 %}
10867 ins_pipe(ialu_regI_regI);
10868 %}
10871 instruct salI_RegL2I_imm(mRegI dst, mRegL src, immI8 shift) %{
10872 match(Set dst (LShiftI (ConvL2I src) shift));
10874 format %{ "SHL $dst, $src, $shift #@salI_RegL2I_imm" %}
10875 ins_encode %{
10876 Register src = $src$$Register;
10877 Register dst = $dst$$Register;
10878 int shamt = $shift$$constant;
10880 __ sll(dst, src, shamt);
10881 %}
10882 ins_pipe( ialu_regI_regI );
10883 %}
10885 // Shift Left by 8-bit immediate
10886 instruct salI_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
10887 match(Set dst (LShiftI src shift));
10889 format %{ "SHL $dst, $src, $shift #@salI_Reg_Reg" %}
10890 ins_encode %{
10891 Register src = $src$$Register;
10892 Register dst = $dst$$Register;
10893 Register shamt = $shift$$Register;
10894 __ sllv(dst, src, shamt);
10895 %}
10896 ins_pipe( ialu_regI_regI );
10897 %}
10900 // Shift Left Long
10901 instruct salL_Reg_imm(mRegL dst, mRegL src, immI8 shift) %{
10902 //predicate(UseNewLongLShift);
10903 match(Set dst (LShiftL src shift));
10904 ins_cost(100);
10905 format %{ "salL $dst, $src, $shift @ salL_Reg_imm" %}
10906 ins_encode %{
10907 Register src_reg = as_Register($src$$reg);
10908 Register dst_reg = as_Register($dst$$reg);
10909 int shamt = $shift$$constant;
10911 if (__ is_simm(shamt, 5))
10912 __ dsll(dst_reg, src_reg, shamt);
10913 else
10914 {
10915 int sa = Assembler::low(shamt, 6);
10916 if (sa < 32) {
10917 __ dsll(dst_reg, src_reg, sa);
10918 } else {
10919 __ dsll32(dst_reg, src_reg, sa - 32);
10920 }
10921 }
10922 %}
10923 ins_pipe( ialu_regL_regL );
10924 %}
10926 instruct salL_RegI2L_imm(mRegL dst, mRegI src, immI8 shift) %{
10927 //predicate(UseNewLongLShift);
10928 match(Set dst (LShiftL (ConvI2L src) shift));
10929 ins_cost(100);
10930 format %{ "salL $dst, $src, $shift @ salL_RegI2L_imm" %}
10931 ins_encode %{
10932 Register src_reg = as_Register($src$$reg);
10933 Register dst_reg = as_Register($dst$$reg);
10934 int shamt = $shift$$constant;
10936 if (__ is_simm(shamt, 5))
10937 __ dsll(dst_reg, src_reg, shamt);
10938 else
10939 {
10940 int sa = Assembler::low(shamt, 6);
10941 if (sa < 32) {
10942 __ dsll(dst_reg, src_reg, sa);
10943 } else {
10944 __ dsll32(dst_reg, src_reg, sa - 32);
10945 }
10946 }
10947 %}
10948 ins_pipe( ialu_regL_regL );
10949 %}
10951 // Shift Left Long
10952 instruct salL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
10953 //predicate(UseNewLongLShift);
10954 match(Set dst (LShiftL src shift));
10955 ins_cost(100);
10956 format %{ "salL $dst, $src, $shift @ salL_Reg_Reg" %}
10957 ins_encode %{
10958 Register src_reg = as_Register($src$$reg);
10959 Register dst_reg = as_Register($dst$$reg);
10961 __ dsllv(dst_reg, src_reg, $shift$$Register);
10962 %}
10963 ins_pipe( ialu_regL_regL );
10964 %}
10966 instruct salL_convI2L_Reg_imm(mRegL dst, mRegI src, immI8 shift) %{
10967 match(Set dst (LShiftL (ConvI2L src) shift));
10968 ins_cost(100);
10969 format %{ "salL $dst, $src, $shift @ salL_convI2L_Reg_imm" %}
10970 ins_encode %{
10971 Register src_reg = as_Register($src$$reg);
10972 Register dst_reg = as_Register($dst$$reg);
10973 int shamt = $shift$$constant;
10975 if (__ is_simm(shamt, 5)) {
10976 __ dsll(dst_reg, src_reg, shamt);
10977 } else {
10978 int sa = Assembler::low(shamt, 6);
10979 if (sa < 32) {
10980 __ dsll(dst_reg, src_reg, sa);
10981 } else {
10982 __ dsll32(dst_reg, src_reg, sa - 32);
10983 }
10984 }
10985 %}
10986 ins_pipe( ialu_regL_regL );
10987 %}
10989 // Shift Right Long
10990 instruct sarL_Reg_imm(mRegL dst, mRegL src, immI8 shift) %{
10991 match(Set dst (RShiftL src shift));
10992 ins_cost(100);
10993 format %{ "sarL $dst, $src, $shift @ sarL_Reg_imm" %}
10994 ins_encode %{
10995 Register src_reg = as_Register($src$$reg);
10996 Register dst_reg = as_Register($dst$$reg);
10997 int shamt = ($shift$$constant & 0x3f);
10998 if (__ is_simm(shamt, 5))
10999 __ dsra(dst_reg, src_reg, shamt);
11000 else {
11001 int sa = Assembler::low(shamt, 6);
11002 if (sa < 32) {
11003 __ dsra(dst_reg, src_reg, sa);
11004 } else {
11005 __ dsra32(dst_reg, src_reg, sa - 32);
11006 }
11007 }
11008 %}
11009 ins_pipe( ialu_regL_regL );
11010 %}
11012 instruct sarL2I_Reg_immI_32_63(mRegI dst, mRegL src, immI_32_63 shift) %{
11013 match(Set dst (ConvL2I (RShiftL src shift)));
11014 ins_cost(100);
11015 format %{ "sarL $dst, $src, $shift @ sarL2I_Reg_immI_32_63" %}
11016 ins_encode %{
11017 Register src_reg = as_Register($src$$reg);
11018 Register dst_reg = as_Register($dst$$reg);
11019 int shamt = $shift$$constant;
11021 __ dsra32(dst_reg, src_reg, shamt - 32);
11022 %}
11023 ins_pipe( ialu_regL_regL );
11024 %}
11026 // Shift Right Long arithmetically
11027 instruct sarL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
11028 //predicate(UseNewLongLShift);
11029 match(Set dst (RShiftL src shift));
11030 ins_cost(100);
11031 format %{ "sarL $dst, $src, $shift @ sarL_Reg_Reg" %}
11032 ins_encode %{
11033 Register src_reg = as_Register($src$$reg);
11034 Register dst_reg = as_Register($dst$$reg);
11036 __ dsrav(dst_reg, src_reg, $shift$$Register);
11037 %}
11038 ins_pipe( ialu_regL_regL );
11039 %}
11041 // Shift Right Long logically
11042 instruct slrL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
11043 match(Set dst (URShiftL src shift));
11044 ins_cost(100);
11045 format %{ "slrL $dst, $src, $shift @ slrL_Reg_Reg" %}
11046 ins_encode %{
11047 Register src_reg = as_Register($src$$reg);
11048 Register dst_reg = as_Register($dst$$reg);
11050 __ dsrlv(dst_reg, src_reg, $shift$$Register);
11051 %}
11052 ins_pipe( ialu_regL_regL );
11053 %}
11055 instruct slrL_Reg_immI_0_31(mRegL dst, mRegL src, immI_0_31 shift) %{
11056 match(Set dst (URShiftL src shift));
11057 ins_cost(80);
11058 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_0_31" %}
11059 ins_encode %{
11060 Register src_reg = as_Register($src$$reg);
11061 Register dst_reg = as_Register($dst$$reg);
11062 int shamt = $shift$$constant;
11064 __ dsrl(dst_reg, src_reg, shamt);
11065 %}
11066 ins_pipe( ialu_regL_regL );
11067 %}
11069 instruct slrL_Reg_immI_0_31_and_max_int(mRegI dst, mRegL src, immI_0_31 shift, immI_MaxI max_int) %{
11070 match(Set dst (AndI (ConvL2I (URShiftL src shift)) max_int));
11071 ins_cost(80);
11072 format %{ "dext $dst, $src, $shift, 31 @ slrL_Reg_immI_0_31_and_max_int" %}
11073 ins_encode %{
11074 Register src_reg = as_Register($src$$reg);
11075 Register dst_reg = as_Register($dst$$reg);
11076 int shamt = $shift$$constant;
11078 __ dext(dst_reg, src_reg, shamt, 31);
11079 %}
11080 ins_pipe( ialu_regL_regL );
11081 %}
11083 instruct slrL_P2XReg_immI_0_31(mRegL dst, mRegP src, immI_0_31 shift) %{
11084 match(Set dst (URShiftL (CastP2X src) shift));
11085 ins_cost(80);
11086 format %{ "slrL $dst, $src, $shift @ slrL_P2XReg_immI_0_31" %}
11087 ins_encode %{
11088 Register src_reg = as_Register($src$$reg);
11089 Register dst_reg = as_Register($dst$$reg);
11090 int shamt = $shift$$constant;
11092 __ dsrl(dst_reg, src_reg, shamt);
11093 %}
11094 ins_pipe( ialu_regL_regL );
11095 %}
11097 instruct slrL_Reg_immI_32_63(mRegL dst, mRegL src, immI_32_63 shift) %{
11098 match(Set dst (URShiftL src shift));
11099 ins_cost(80);
11100 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_32_63" %}
11101 ins_encode %{
11102 Register src_reg = as_Register($src$$reg);
11103 Register dst_reg = as_Register($dst$$reg);
11104 int shamt = $shift$$constant;
11106 __ dsrl32(dst_reg, src_reg, shamt - 32);
11107 %}
11108 ins_pipe( ialu_regL_regL );
11109 %}
11111 instruct slrL_Reg_immI_convL2I(mRegI dst, mRegL src, immI_32_63 shift) %{
11112 match(Set dst (ConvL2I (URShiftL src shift)));
11113 predicate(n->in(1)->in(2)->get_int() > 32);
11114 ins_cost(80);
11115 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_convL2I" %}
11116 ins_encode %{
11117 Register src_reg = as_Register($src$$reg);
11118 Register dst_reg = as_Register($dst$$reg);
11119 int shamt = $shift$$constant;
11121 __ dsrl32(dst_reg, src_reg, shamt - 32);
11122 %}
11123 ins_pipe( ialu_regL_regL );
11124 %}
11126 instruct slrL_P2XReg_immI_32_63(mRegL dst, mRegP src, immI_32_63 shift) %{
11127 match(Set dst (URShiftL (CastP2X src) shift));
11128 ins_cost(80);
11129 format %{ "slrL $dst, $src, $shift @ slrL_P2XReg_immI_32_63" %}
11130 ins_encode %{
11131 Register src_reg = as_Register($src$$reg);
11132 Register dst_reg = as_Register($dst$$reg);
11133 int shamt = $shift$$constant;
11135 __ dsrl32(dst_reg, src_reg, shamt - 32);
11136 %}
11137 ins_pipe( ialu_regL_regL );
11138 %}
11140 // Xor Instructions
11141 // Xor Register with Register
11142 instruct xorI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
11143 match(Set dst (XorI src1 src2));
11145 format %{ "XOR $dst, $src1, $src2 #@xorI_Reg_Reg" %}
11147 ins_encode %{
11148 Register dst = $dst$$Register;
11149 Register src1 = $src1$$Register;
11150 Register src2 = $src2$$Register;
11151 __ xorr(dst, src1, src2);
11152 __ sll(dst, dst, 0); /* long -> int */
11153 %}
11155 ins_pipe( ialu_regI_regI );
11156 %}
11158 // Or Instructions
11159 // Or Register with Register
11160 instruct orI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
11161 match(Set dst (OrI src1 src2));
11163 format %{ "OR $dst, $src1, $src2 #@orI_Reg_Reg" %}
11164 ins_encode %{
11165 Register dst = $dst$$Register;
11166 Register src1 = $src1$$Register;
11167 Register src2 = $src2$$Register;
11168 __ orr(dst, src1, src2);
11169 %}
11171 ins_pipe( ialu_regI_regI );
11172 %}
11174 instruct rotI_shr_logical_Reg(mRegI dst, mRegI src, immI_0_31 rshift, immI_0_31 lshift, immI_1 one) %{
11175 match(Set dst (OrI (URShiftI src rshift) (LShiftI (AndI src one) lshift)));
11176 predicate(32 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int())));
11178 format %{ "rotr $dst, $src, 1 ...\n\t"
11179 "srl $dst, $dst, ($rshift-1) @ rotI_shr_logical_Reg" %}
11180 ins_encode %{
11181 Register dst = $dst$$Register;
11182 Register src = $src$$Register;
11183 int rshift = $rshift$$constant;
11185 __ rotr(dst, src, 1);
11186 if (rshift - 1) {
11187 __ srl(dst, dst, rshift - 1);
11188 }
11189 %}
11191 ins_pipe( ialu_regI_regI );
11192 %}
11194 instruct orI_Reg_castP2X(mRegL dst, mRegL src1, mRegP src2) %{
11195 match(Set dst (OrI src1 (CastP2X src2)));
11197 format %{ "OR $dst, $src1, $src2 #@orI_Reg_castP2X" %}
11198 ins_encode %{
11199 Register dst = $dst$$Register;
11200 Register src1 = $src1$$Register;
11201 Register src2 = $src2$$Register;
11202 __ orr(dst, src1, src2);
11203 %}
11205 ins_pipe( ialu_regI_regI );
11206 %}
11208 // Logical Shift Right by 8-bit immediate
11209 instruct shr_logical_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
11210 match(Set dst (URShiftI src shift));
11211 // effect(KILL cr);
11213 format %{ "SRL $dst, $src, $shift #@shr_logical_Reg_imm" %}
11214 ins_encode %{
11215 Register src = $src$$Register;
11216 Register dst = $dst$$Register;
11217 int shift = $shift$$constant;
11219 __ srl(dst, src, shift);
11220 %}
11221 ins_pipe( ialu_regI_regI );
11222 %}
11224 instruct shr_logical_Reg_imm_nonneg_mask(mRegI dst, mRegI src, immI_0_31 shift, immI_nonneg_mask mask) %{
11225 match(Set dst (AndI (URShiftI src shift) mask));
11227 format %{ "ext $dst, $src, $shift, one-bits($mask) #@shr_logical_Reg_imm_nonneg_mask" %}
11228 ins_encode %{
11229 Register src = $src$$Register;
11230 Register dst = $dst$$Register;
11231 int pos = $shift$$constant;
11232 int size = Assembler::is_int_mask($mask$$constant);
11234 __ ext(dst, src, pos, size);
11235 %}
11236 ins_pipe( ialu_regI_regI );
11237 %}
11239 instruct rolI_Reg_immI_0_31(mRegI dst, immI_0_31 lshift, immI_0_31 rshift)
11240 %{
11241 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
11242 match(Set dst (OrI (LShiftI dst lshift) (URShiftI dst rshift)));
11244 ins_cost(100);
11245 format %{ "rotr $dst, $dst, $rshift #@rolI_Reg_immI_0_31" %}
11246 ins_encode %{
11247 Register dst = $dst$$Register;
11248 int sa = $rshift$$constant;
11250 __ rotr(dst, dst, sa);
11251 %}
11252 ins_pipe( ialu_regI_regI );
11253 %}
11255 instruct rolL_Reg_immI_0_31(mRegL dst, immI_32_63 lshift, immI_0_31 rshift)
11256 %{
11257 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11258 match(Set dst (OrL (LShiftL dst lshift) (URShiftL dst rshift)));
11260 ins_cost(100);
11261 format %{ "rotr $dst, $dst, $rshift #@rolL_Reg_immI_0_31" %}
11262 ins_encode %{
11263 Register dst = $dst$$Register;
11264 int sa = $rshift$$constant;
11266 __ drotr(dst, dst, sa);
11267 %}
11268 ins_pipe( ialu_regI_regI );
11269 %}
11271 instruct rolL_Reg_immI_32_63(mRegL dst, immI_0_31 lshift, immI_32_63 rshift)
11272 %{
11273 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11274 match(Set dst (OrL (LShiftL dst lshift) (URShiftL dst rshift)));
11276 ins_cost(100);
11277 format %{ "rotr $dst, $dst, $rshift #@rolL_Reg_immI_32_63" %}
11278 ins_encode %{
11279 Register dst = $dst$$Register;
11280 int sa = $rshift$$constant;
11282 __ drotr32(dst, dst, sa - 32);
11283 %}
11284 ins_pipe( ialu_regI_regI );
11285 %}
11287 instruct rorI_Reg_immI_0_31(mRegI dst, immI_0_31 rshift, immI_0_31 lshift)
11288 %{
11289 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
11290 match(Set dst (OrI (URShiftI dst rshift) (LShiftI dst lshift)));
11292 ins_cost(100);
11293 format %{ "rotr $dst, $dst, $rshift #@rorI_Reg_immI_0_31" %}
11294 ins_encode %{
11295 Register dst = $dst$$Register;
11296 int sa = $rshift$$constant;
11298 __ rotr(dst, dst, sa);
11299 %}
11300 ins_pipe( ialu_regI_regI );
11301 %}
11303 instruct rorL_Reg_immI_0_31(mRegL dst, immI_0_31 rshift, immI_32_63 lshift)
11304 %{
11305 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11306 match(Set dst (OrL (URShiftL dst rshift) (LShiftL dst lshift)));
11308 ins_cost(100);
11309 format %{ "rotr $dst, $dst, $rshift #@rorL_Reg_immI_0_31" %}
11310 ins_encode %{
11311 Register dst = $dst$$Register;
11312 int sa = $rshift$$constant;
11314 __ drotr(dst, dst, sa);
11315 %}
11316 ins_pipe( ialu_regI_regI );
11317 %}
11319 instruct rorL_Reg_immI_32_63(mRegL dst, immI_32_63 rshift, immI_0_31 lshift)
11320 %{
11321 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11322 match(Set dst (OrL (URShiftL dst rshift) (LShiftL dst lshift)));
11324 ins_cost(100);
11325 format %{ "rotr $dst, $dst, $rshift #@rorL_Reg_immI_32_63" %}
11326 ins_encode %{
11327 Register dst = $dst$$Register;
11328 int sa = $rshift$$constant;
11330 __ drotr32(dst, dst, sa - 32);
11331 %}
11332 ins_pipe( ialu_regI_regI );
11333 %}
11335 // Logical Shift Right
11336 instruct shr_logical_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
11337 match(Set dst (URShiftI src shift));
11339 format %{ "SRL $dst, $src, $shift #@shr_logical_Reg_Reg" %}
11340 ins_encode %{
11341 Register src = $src$$Register;
11342 Register dst = $dst$$Register;
11343 Register shift = $shift$$Register;
11344 __ srlv(dst, src, shift);
11345 %}
11346 ins_pipe( ialu_regI_regI );
11347 %}
11350 instruct shr_arith_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
11351 match(Set dst (RShiftI src shift));
11352 // effect(KILL cr);
11354 format %{ "SRA $dst, $src, $shift #@shr_arith_Reg_imm" %}
11355 ins_encode %{
11356 Register src = $src$$Register;
11357 Register dst = $dst$$Register;
11358 int shift = $shift$$constant;
11359 __ sra(dst, src, shift);
11360 %}
11361 ins_pipe( ialu_regI_regI );
11362 %}
11364 instruct shr_arith_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
11365 match(Set dst (RShiftI src shift));
11366 // effect(KILL cr);
11368 format %{ "SRA $dst, $src, $shift #@shr_arith_Reg_Reg" %}
11369 ins_encode %{
11370 Register src = $src$$Register;
11371 Register dst = $dst$$Register;
11372 Register shift = $shift$$Register;
11373 __ srav(dst, src, shift);
11374 %}
11375 ins_pipe( ialu_regI_regI );
11376 %}
11378 //----------Convert Int to Boolean---------------------------------------------
11380 instruct convI2B(mRegI dst, mRegI src) %{
11381 match(Set dst (Conv2B src));
11383 ins_cost(100);
11384 format %{ "convI2B $dst, $src @ convI2B" %}
11385 ins_encode %{
11386 Register dst = as_Register($dst$$reg);
11387 Register src = as_Register($src$$reg);
11389 if (dst != src) {
11390 __ daddiu(dst, R0, 1);
11391 __ movz(dst, R0, src);
11392 } else {
11393 __ move(AT, src);
11394 __ daddiu(dst, R0, 1);
11395 __ movz(dst, R0, AT);
11396 }
11397 %}
11399 ins_pipe( ialu_regL_regL );
11400 %}
11402 instruct convI2L_reg( mRegL dst, mRegI src) %{
11403 match(Set dst (ConvI2L src));
11405 ins_cost(100);
11406 format %{ "SLL $dst, $src @ convI2L_reg\t" %}
11407 ins_encode %{
11408 Register dst = as_Register($dst$$reg);
11409 Register src = as_Register($src$$reg);
11411 if(dst != src) __ sll(dst, src, 0);
11412 %}
11413 ins_pipe( ialu_regL_regL );
11414 %}
11417 instruct convL2I_reg( mRegI dst, mRegL src ) %{
11418 match(Set dst (ConvL2I src));
11420 format %{ "MOV $dst, $src @ convL2I_reg" %}
11421 ins_encode %{
11422 Register dst = as_Register($dst$$reg);
11423 Register src = as_Register($src$$reg);
11425 __ sll(dst, src, 0);
11426 %}
11428 ins_pipe( ialu_regI_regI );
11429 %}
11431 instruct convL2I2L_reg( mRegL dst, mRegL src ) %{
11432 match(Set dst (ConvI2L (ConvL2I src)));
11434 format %{ "sll $dst, $src, 0 @ convL2I2L_reg" %}
11435 ins_encode %{
11436 Register dst = as_Register($dst$$reg);
11437 Register src = as_Register($src$$reg);
11439 __ sll(dst, src, 0);
11440 %}
11442 ins_pipe( ialu_regI_regI );
11443 %}
11445 instruct convL2D_reg( regD dst, mRegL src ) %{
11446 match(Set dst (ConvL2D src));
11447 format %{ "convL2D $dst, $src @ convL2D_reg" %}
11448 ins_encode %{
11449 Register src = as_Register($src$$reg);
11450 FloatRegister dst = as_FloatRegister($dst$$reg);
11452 __ dmtc1(src, dst);
11453 __ cvt_d_l(dst, dst);
11454 %}
11456 ins_pipe( pipe_slow );
11457 %}
11459 instruct convD2L_reg_fast( mRegL dst, regD src ) %{
11460 match(Set dst (ConvD2L src));
11461 ins_cost(150);
11462 format %{ "convD2L $dst, $src @ convD2L_reg_fast" %}
11463 ins_encode %{
11464 Register dst = as_Register($dst$$reg);
11465 FloatRegister src = as_FloatRegister($src$$reg);
11467 Label Done;
11469 __ trunc_l_d(F30, src);
11470 // max_long: 0x7fffffffffffffff
11471 // __ set64(AT, 0x7fffffffffffffff);
11472 __ daddiu(AT, R0, -1);
11473 __ dsrl(AT, AT, 1);
11474 __ dmfc1(dst, F30);
11476 __ bne(dst, AT, Done);
11477 __ delayed()->mtc1(R0, F30);
11479 __ cvt_d_w(F30, F30);
11480 __ c_ult_d(src, F30);
11481 __ bc1f(Done);
11482 __ delayed()->daddiu(T9, R0, -1);
11484 __ c_un_d(src, src); //NaN?
11485 __ subu(dst, T9, AT);
11486 __ movt(dst, R0);
11488 __ bind(Done);
11489 %}
11491 ins_pipe( pipe_slow );
11492 %}
11494 instruct convD2L_reg_slow( mRegL dst, regD src ) %{
11495 match(Set dst (ConvD2L src));
11496 ins_cost(250);
11497 format %{ "convD2L $dst, $src @ convD2L_reg_slow" %}
11498 ins_encode %{
11499 Register dst = as_Register($dst$$reg);
11500 FloatRegister src = as_FloatRegister($src$$reg);
11502 Label L;
11504 __ c_un_d(src, src); //NaN?
11505 __ bc1t(L);
11506 __ delayed();
11507 __ move(dst, R0);
11509 __ trunc_l_d(F30, src);
11510 __ cfc1(AT, 31);
11511 __ li(T9, 0x10000);
11512 __ andr(AT, AT, T9);
11513 __ beq(AT, R0, L);
11514 __ delayed()->dmfc1(dst, F30);
11516 __ mov_d(F12, src);
11517 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1);
11518 __ move(dst, V0);
11519 __ bind(L);
11520 %}
11522 ins_pipe( pipe_slow );
11523 %}
11525 instruct convF2I_reg_fast( mRegI dst, regF src ) %{
11526 match(Set dst (ConvF2I src));
11527 ins_cost(150);
11528 format %{ "convf2i $dst, $src @ convF2I_reg_fast" %}
11529 ins_encode %{
11530 Register dreg = $dst$$Register;
11531 FloatRegister fval = $src$$FloatRegister;
11533 __ trunc_w_s(F30, fval);
11534 __ mfc1(dreg, F30);
11535 __ c_un_s(fval, fval); //NaN?
11536 __ movt(dreg, R0);
11537 %}
11539 ins_pipe( pipe_slow );
11540 %}
11542 instruct convF2I_reg_slow( mRegI dst, regF src ) %{
11543 match(Set dst (ConvF2I src));
11544 ins_cost(250);
11545 format %{ "convf2i $dst, $src @ convF2I_reg_slow" %}
11546 ins_encode %{
11547 Register dreg = $dst$$Register;
11548 FloatRegister fval = $src$$FloatRegister;
11549 Label L;
11551 __ c_un_s(fval, fval); //NaN?
11552 __ bc1t(L);
11553 __ delayed();
11554 __ move(dreg, R0);
11556 __ trunc_w_s(F30, fval);
11558 /* Call SharedRuntime:f2i() to do valid convention */
11559 __ cfc1(AT, 31);
11560 __ li(T9, 0x10000);
11561 __ andr(AT, AT, T9);
11562 __ beq(AT, R0, L);
11563 __ delayed()->mfc1(dreg, F30);
11565 __ mov_s(F12, fval);
11567 /* 2014/01/08 Fu : This bug was found when running ezDS's control-panel.
11568 * J 982 C2 javax.swing.text.BoxView.layoutMajorAxis(II[I[I)V (283 bytes) @ 0x000000555c46aa74
11569 *
11570 * An interger array index has been assigned to V0, and then changed from 1 to Integer.MAX_VALUE.
11571 * V0 is corrupted during call_VM_leaf(), and should be preserved.
11572 */
11573 if(dreg != V0) {
11574 __ push(V0);
11575 }
11576 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
11577 if(dreg != V0) {
11578 __ move(dreg, V0);
11579 __ pop(V0);
11580 }
11581 __ bind(L);
11582 %}
11584 ins_pipe( pipe_slow );
11585 %}
11587 instruct convF2L_reg_fast( mRegL dst, regF src ) %{
11588 match(Set dst (ConvF2L src));
11589 ins_cost(150);
11590 format %{ "convf2l $dst, $src @ convF2L_reg_fast" %}
11591 ins_encode %{
11592 Register dreg = $dst$$Register;
11593 FloatRegister fval = $src$$FloatRegister;
11595 __ trunc_l_s(F30, fval);
11596 __ dmfc1(dreg, F30);
11597 __ c_un_s(fval, fval); //NaN?
11598 __ movt(dreg, R0);
11599 %}
11601 ins_pipe( pipe_slow );
11602 %}
11604 instruct convF2L_reg_slow( mRegL dst, regF src ) %{
11605 match(Set dst (ConvF2L src));
11606 ins_cost(250);
11607 format %{ "convf2l $dst, $src @ convF2L_reg_slow" %}
11608 ins_encode %{
11609 Register dst = as_Register($dst$$reg);
11610 FloatRegister fval = $src$$FloatRegister;
11611 Label L;
11613 __ c_un_s(fval, fval); //NaN?
11614 __ bc1t(L);
11615 __ delayed();
11616 __ move(dst, R0);
11618 __ trunc_l_s(F30, fval);
11619 __ cfc1(AT, 31);
11620 __ li(T9, 0x10000);
11621 __ andr(AT, AT, T9);
11622 __ beq(AT, R0, L);
11623 __ delayed()->dmfc1(dst, F30);
11625 __ mov_s(F12, fval);
11626 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
11627 __ move(dst, V0);
11628 __ bind(L);
11629 %}
11631 ins_pipe( pipe_slow );
11632 %}
11634 instruct convL2F_reg( regF dst, mRegL src ) %{
11635 match(Set dst (ConvL2F src));
11636 format %{ "convl2f $dst, $src @ convL2F_reg" %}
11637 ins_encode %{
11638 FloatRegister dst = $dst$$FloatRegister;
11639 Register src = as_Register($src$$reg);
11640 Label L;
11642 __ dmtc1(src, dst);
11643 __ cvt_s_l(dst, dst);
11644 %}
11646 ins_pipe( pipe_slow );
11647 %}
11649 instruct convI2F_reg( regF dst, mRegI src ) %{
11650 match(Set dst (ConvI2F src));
11651 format %{ "convi2f $dst, $src @ convI2F_reg" %}
11652 ins_encode %{
11653 Register src = $src$$Register;
11654 FloatRegister dst = $dst$$FloatRegister;
11656 __ mtc1(src, dst);
11657 __ cvt_s_w(dst, dst);
11658 %}
11660 ins_pipe( fpu_regF_regF );
11661 %}
11663 instruct cmpLTMask_immI0( mRegI dst, mRegI p, immI0 zero ) %{
11664 match(Set dst (CmpLTMask p zero));
11665 ins_cost(100);
11667 format %{ "sra $dst, $p, 31 @ cmpLTMask_immI0" %}
11668 ins_encode %{
11669 Register src = $p$$Register;
11670 Register dst = $dst$$Register;
11672 __ sra(dst, src, 31);
11673 %}
11674 ins_pipe( pipe_slow );
11675 %}
11678 instruct cmpLTMask( mRegI dst, mRegI p, mRegI q ) %{
11679 match(Set dst (CmpLTMask p q));
11680 ins_cost(400);
11682 format %{ "cmpLTMask $dst, $p, $q @ cmpLTMask" %}
11683 ins_encode %{
11684 Register p = $p$$Register;
11685 Register q = $q$$Register;
11686 Register dst = $dst$$Register;
11688 __ slt(dst, p, q);
11689 __ subu(dst, R0, dst);
11690 %}
11691 ins_pipe( pipe_slow );
11692 %}
11694 instruct convP2B(mRegI dst, mRegP src) %{
11695 match(Set dst (Conv2B src));
11697 ins_cost(100);
11698 format %{ "convP2B $dst, $src @ convP2B" %}
11699 ins_encode %{
11700 Register dst = as_Register($dst$$reg);
11701 Register src = as_Register($src$$reg);
11703 if (dst != src) {
11704 __ daddiu(dst, R0, 1);
11705 __ movz(dst, R0, src);
11706 } else {
11707 __ move(AT, src);
11708 __ daddiu(dst, R0, 1);
11709 __ movz(dst, R0, AT);
11710 }
11711 %}
11713 ins_pipe( ialu_regL_regL );
11714 %}
11717 instruct convI2D_reg_reg(regD dst, mRegI src) %{
11718 match(Set dst (ConvI2D src));
11719 format %{ "conI2D $dst, $src @convI2D_reg" %}
11720 ins_encode %{
11721 Register src = $src$$Register;
11722 FloatRegister dst = $dst$$FloatRegister;
11723 __ mtc1(src, dst);
11724 __ cvt_d_w(dst, dst);
11725 %}
11726 ins_pipe( fpu_regF_regF );
11727 %}
11729 instruct convF2D_reg_reg(regD dst, regF src) %{
11730 match(Set dst (ConvF2D src));
11731 format %{ "convF2D $dst, $src\t# @convF2D_reg_reg" %}
11732 ins_encode %{
11733 FloatRegister dst = $dst$$FloatRegister;
11734 FloatRegister src = $src$$FloatRegister;
11736 __ cvt_d_s(dst, src);
11737 %}
11738 ins_pipe( fpu_regF_regF );
11739 %}
11741 instruct convD2F_reg_reg(regF dst, regD src) %{
11742 match(Set dst (ConvD2F src));
11743 format %{ "convD2F $dst, $src\t# @convD2F_reg_reg" %}
11744 ins_encode %{
11745 FloatRegister dst = $dst$$FloatRegister;
11746 FloatRegister src = $src$$FloatRegister;
11748 __ cvt_s_d(dst, src);
11749 %}
11750 ins_pipe( fpu_regF_regF );
11751 %}
11753 // Convert a double to an int. If the double is a NAN, stuff a zero in instead.
11754 instruct convD2I_reg_reg_fast( mRegI dst, regD src ) %{
11755 match(Set dst (ConvD2I src));
11757 ins_cost(150);
11758 format %{ "convD2I $dst, $src\t# @ convD2I_reg_reg_fast" %}
11760 ins_encode %{
11761 FloatRegister src = $src$$FloatRegister;
11762 Register dst = $dst$$Register;
11764 Label Done;
11766 __ trunc_w_d(F30, src);
11767 // max_int: 2147483647
11768 __ move(AT, 0x7fffffff);
11769 __ mfc1(dst, F30);
11771 __ bne(dst, AT, Done);
11772 __ delayed()->mtc1(R0, F30);
11774 __ cvt_d_w(F30, F30);
11775 __ c_ult_d(src, F30);
11776 __ bc1f(Done);
11777 __ delayed()->addiu(T9, R0, -1);
11779 __ c_un_d(src, src); //NaN?
11780 __ subu32(dst, T9, AT);
11781 __ movt(dst, R0);
11783 __ bind(Done);
11784 %}
11785 ins_pipe( pipe_slow );
11786 %}
11788 instruct convD2I_reg_reg_slow( mRegI dst, regD src ) %{
11789 match(Set dst (ConvD2I src));
11791 ins_cost(250);
11792 format %{ "convD2I $dst, $src\t# @ convD2I_reg_reg_slow" %}
11794 ins_encode %{
11795 FloatRegister src = $src$$FloatRegister;
11796 Register dst = $dst$$Register;
11797 Label L;
11799 __ trunc_w_d(F30, src);
11800 __ cfc1(AT, 31);
11801 __ li(T9, 0x10000);
11802 __ andr(AT, AT, T9);
11803 __ beq(AT, R0, L);
11804 __ delayed()->mfc1(dst, F30);
11806 __ mov_d(F12, src);
11807 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 1);
11808 __ move(dst, V0);
11809 __ bind(L);
11811 %}
11812 ins_pipe( pipe_slow );
11813 %}
11815 // Convert oop pointer into compressed form
11816 instruct encodeHeapOop(mRegN dst, mRegP src) %{
11817 predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
11818 match(Set dst (EncodeP src));
11819 format %{ "encode_heap_oop $dst,$src" %}
11820 ins_encode %{
11821 Register src = $src$$Register;
11822 Register dst = $dst$$Register;
11823 if (src != dst) {
11824 __ move(dst, src);
11825 }
11826 __ encode_heap_oop(dst);
11827 %}
11828 ins_pipe( ialu_regL_regL );
11829 %}
11831 instruct encodeHeapOop_not_null(mRegN dst, mRegP src) %{
11832 predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
11833 match(Set dst (EncodeP src));
11834 format %{ "encode_heap_oop_not_null $dst,$src @ encodeHeapOop_not_null" %}
11835 ins_encode %{
11836 __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
11837 %}
11838 ins_pipe( ialu_regL_regL );
11839 %}
11841 instruct decodeHeapOop(mRegP dst, mRegN src) %{
11842 predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
11843 n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
11844 match(Set dst (DecodeN src));
11845 format %{ "decode_heap_oop $dst,$src @ decodeHeapOop" %}
11846 ins_encode %{
11847 Register s = $src$$Register;
11848 Register d = $dst$$Register;
11849 if (s != d) {
11850 __ move(d, s);
11851 }
11852 __ decode_heap_oop(d);
11853 %}
11854 ins_pipe( ialu_regL_regL );
11855 %}
11857 instruct decodeHeapOop_not_null(mRegP dst, mRegN src) %{
11858 predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
11859 n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
11860 match(Set dst (DecodeN src));
11861 format %{ "decode_heap_oop_not_null $dst,$src @ decodeHeapOop_not_null" %}
11862 ins_encode %{
11863 Register s = $src$$Register;
11864 Register d = $dst$$Register;
11865 if (s != d) {
11866 __ decode_heap_oop_not_null(d, s);
11867 } else {
11868 __ decode_heap_oop_not_null(d);
11869 }
11870 %}
11871 ins_pipe( ialu_regL_regL );
11872 %}
11874 instruct encodeKlass_not_null(mRegN dst, mRegP src) %{
11875 match(Set dst (EncodePKlass src));
11876 format %{ "encode_heap_oop_not_null $dst,$src @ encodeKlass_not_null" %}
11877 ins_encode %{
11878 __ encode_klass_not_null($dst$$Register, $src$$Register);
11879 %}
11880 ins_pipe( ialu_regL_regL );
11881 %}
11883 instruct decodeKlass_not_null(mRegP dst, mRegN src) %{
11884 match(Set dst (DecodeNKlass src));
11885 format %{ "decode_heap_klass_not_null $dst,$src" %}
11886 ins_encode %{
11887 Register s = $src$$Register;
11888 Register d = $dst$$Register;
11889 if (s != d) {
11890 __ decode_klass_not_null(d, s);
11891 } else {
11892 __ decode_klass_not_null(d);
11893 }
11894 %}
11895 ins_pipe( ialu_regL_regL );
11896 %}
11898 //FIXME
11899 instruct tlsLoadP(mRegP dst) %{
11900 match(Set dst (ThreadLocal));
11902 ins_cost(0);
11903 format %{ " get_thread in $dst #@tlsLoadP" %}
11904 ins_encode %{
11905 Register dst = $dst$$Register;
11906 #ifdef OPT_THREAD
11907 __ move(dst, TREG);
11908 #else
11909 __ get_thread(dst);
11910 #endif
11911 %}
11913 ins_pipe( ialu_loadI );
11914 %}
11917 instruct checkCastPP( mRegP dst ) %{
11918 match(Set dst (CheckCastPP dst));
11920 format %{ "#checkcastPP of $dst (empty encoding) #@chekCastPP" %}
11921 ins_encode( /*empty encoding*/ );
11922 ins_pipe( empty );
11923 %}
11925 instruct castPP(mRegP dst)
11926 %{
11927 match(Set dst (CastPP dst));
11929 size(0);
11930 format %{ "# castPP of $dst" %}
11931 ins_encode(/* empty encoding */);
11932 ins_pipe(empty);
11933 %}
11935 instruct castII( mRegI dst ) %{
11936 match(Set dst (CastII dst));
11937 format %{ "#castII of $dst empty encoding" %}
11938 ins_encode( /*empty encoding*/ );
11939 ins_cost(0);
11940 ins_pipe( empty );
11941 %}
11943 // Return Instruction
11944 // Remove the return address & jump to it.
11945 instruct Ret() %{
11946 match(Return);
11947 format %{ "RET #@Ret" %}
11949 ins_encode %{
11950 __ jr(RA);
11951 __ nop();
11952 %}
11954 ins_pipe( pipe_jump );
11955 %}
11957 /*
11958 // For Loongson CPUs, jr seems too slow, so this rule shouldn't be imported.
11959 instruct jumpXtnd(mRegL switch_val) %{
11960 match(Jump switch_val);
11962 ins_cost(350);
11964 format %{ "load T9 <-- [$constanttablebase, $switch_val, $constantoffset] @ jumpXtnd\n\t"
11965 "jr T9\n\t"
11966 "nop" %}
11967 ins_encode %{
11968 Register table_base = $constanttablebase;
11969 int con_offset = $constantoffset;
11970 Register switch_reg = $switch_val$$Register;
11972 if (UseLoongsonISA) {
11973 if (Assembler::is_simm(con_offset, 8)) {
11974 __ gsldx(T9, table_base, switch_reg, con_offset);
11975 } else if (Assembler::is_simm16(con_offset)) {
11976 __ daddu(T9, table_base, switch_reg);
11977 __ ld(T9, T9, con_offset);
11978 } else {
11979 __ move(T9, con_offset);
11980 __ daddu(AT, table_base, switch_reg);
11981 __ gsldx(T9, AT, T9, 0);
11982 }
11983 } else {
11984 if (Assembler::is_simm16(con_offset)) {
11985 __ daddu(T9, table_base, switch_reg);
11986 __ ld(T9, T9, con_offset);
11987 } else {
11988 __ move(T9, con_offset);
11989 __ daddu(AT, table_base, switch_reg);
11990 __ daddu(AT, T9, AT);
11991 __ ld(T9, AT, 0);
11992 }
11993 }
11995 __ jr(T9);
11996 __ nop();
11998 %}
11999 ins_pipe(pipe_jump);
12000 %}
12001 */
12003 // Jump Direct - Label defines a relative address from JMP
12004 instruct jmpDir(label labl) %{
12005 match(Goto);
12006 effect(USE labl);
12008 ins_cost(300);
12009 format %{ "JMP $labl #@jmpDir" %}
12011 ins_encode %{
12012 Label &L = *($labl$$label);
12013 if(&L)
12014 __ b(L);
12015 else
12016 __ b(int(0));
12017 __ nop();
12018 %}
12020 ins_pipe( pipe_jump );
12021 ins_pc_relative(1);
12022 %}
12026 // Tail Jump; remove the return address; jump to target.
12027 // TailCall above leaves the return address around.
12028 // TailJump is used in only one place, the rethrow_Java stub (fancy_jump=2).
12029 // ex_oop (Exception Oop) is needed in %o0 at the jump. As there would be a
12030 // "restore" before this instruction (in Epilogue), we need to materialize it
12031 // in %i0.
12032 //FIXME
12033 instruct tailjmpInd(mRegP jump_target,mRegP ex_oop) %{
12034 match( TailJump jump_target ex_oop );
12035 ins_cost(200);
12036 format %{ "Jmp $jump_target ; ex_oop = $ex_oop #@tailjmpInd" %}
12037 ins_encode %{
12038 Register target = $jump_target$$Register;
12040 /* 2012/9/14 Jin: V0, V1 are indicated in:
12041 * [stubGenerator_mips.cpp] generate_forward_exception()
12042 * [runtime_mips.cpp] OptoRuntime::generate_exception_blob()
12043 */
12044 Register oop = $ex_oop$$Register;
12045 Register exception_oop = V0;
12046 Register exception_pc = V1;
12048 __ move(exception_pc, RA);
12049 __ move(exception_oop, oop);
12051 __ jr(target);
12052 __ nop();
12053 %}
12054 ins_pipe( pipe_jump );
12055 %}
12057 // ============================================================================
12058 // Procedure Call/Return Instructions
12059 // Call Java Static Instruction
12060 // Note: If this code changes, the corresponding ret_addr_offset() and
12061 // compute_padding() functions will have to be adjusted.
12062 instruct CallStaticJavaDirect(method meth) %{
12063 match(CallStaticJava);
12064 effect(USE meth);
12066 ins_cost(300);
12067 format %{ "CALL,static #@CallStaticJavaDirect " %}
12068 ins_encode( Java_Static_Call( meth ) );
12069 ins_pipe( pipe_slow );
12070 ins_pc_relative(1);
12071 ins_alignment(16);
12072 %}
12074 // Call Java Dynamic Instruction
12075 // Note: If this code changes, the corresponding ret_addr_offset() and
12076 // compute_padding() functions will have to be adjusted.
12077 instruct CallDynamicJavaDirect(method meth) %{
12078 match(CallDynamicJava);
12079 effect(USE meth);
12081 ins_cost(300);
12082 format %{"MOV IC_Klass, (oop)-1 @ CallDynamicJavaDirect\n\t"
12083 "CallDynamic @ CallDynamicJavaDirect" %}
12084 ins_encode( Java_Dynamic_Call( meth ) );
12085 ins_pipe( pipe_slow );
12086 ins_pc_relative(1);
12087 ins_alignment(16);
12088 %}
12090 instruct CallLeafNoFPDirect(method meth) %{
12091 match(CallLeafNoFP);
12092 effect(USE meth);
12094 ins_cost(300);
12095 format %{ "CALL_LEAF_NOFP,runtime " %}
12096 ins_encode(Java_To_Runtime(meth));
12097 ins_pipe( pipe_slow );
12098 ins_pc_relative(1);
12099 ins_alignment(16);
12100 %}
12102 // Prefetch instructions.
12104 instruct prefetchrNTA( memory mem ) %{
12105 match(PrefetchRead mem);
12106 ins_cost(125);
12108 format %{ "pref $mem\t# Prefetch into non-temporal cache for read @ prefetchrNTA" %}
12109 ins_encode %{
12110 int base = $mem$$base;
12111 int index = $mem$$index;
12112 int scale = $mem$$scale;
12113 int disp = $mem$$disp;
12115 if( index != 0 ) {
12116 if (scale == 0) {
12117 __ daddu(AT, as_Register(base), as_Register(index));
12118 } else {
12119 __ dsll(AT, as_Register(index), scale);
12120 __ daddu(AT, as_Register(base), AT);
12121 }
12122 } else {
12123 __ move(AT, as_Register(base));
12124 }
12125 if( Assembler::is_simm16(disp) ) {
12126 __ daddiu(AT, as_Register(base), disp);
12127 __ daddiu(AT, AT, disp);
12128 } else {
12129 __ move(T9, disp);
12130 __ daddu(AT, as_Register(base), T9);
12131 }
12132 __ pref(0, AT, 0); //hint: 0:load
12133 %}
12134 ins_pipe(pipe_slow);
12135 %}
12137 instruct prefetchwNTA( memory mem ) %{
12138 match(PrefetchWrite mem);
12139 ins_cost(125);
12140 format %{ "pref $mem\t# Prefetch to non-temporal cache for write @ prefetchwNTA" %}
12141 ins_encode %{
12142 int base = $mem$$base;
12143 int index = $mem$$index;
12144 int scale = $mem$$scale;
12145 int disp = $mem$$disp;
12147 if( index != 0 ) {
12148 if (scale == 0) {
12149 __ daddu(AT, as_Register(base), as_Register(index));
12150 } else {
12151 __ dsll(AT, as_Register(index), scale);
12152 __ daddu(AT, as_Register(base), AT);
12153 }
12154 } else {
12155 __ move(AT, as_Register(base));
12156 }
12157 if( Assembler::is_simm16(disp) ) {
12158 __ daddiu(AT, as_Register(base), disp);
12159 __ daddiu(AT, AT, disp);
12160 } else {
12161 __ move(T9, disp);
12162 __ daddu(AT, as_Register(base), T9);
12163 }
12164 __ pref(1, AT, 0); //hint: 1:store
12165 %}
12166 ins_pipe(pipe_slow);
12167 %}
12169 // Prefetch instructions for allocation.
12171 instruct prefetchAllocNTA( memory mem ) %{
12172 match(PrefetchAllocation mem);
12173 ins_cost(125);
12174 format %{ "pref $mem\t# Prefetch allocation @ prefetchAllocNTA" %}
12175 ins_encode %{
12176 int base = $mem$$base;
12177 int index = $mem$$index;
12178 int scale = $mem$$scale;
12179 int disp = $mem$$disp;
12181 Register dst = R0;
12183 if( index != 0 ) {
12184 if( Assembler::is_simm16(disp) ) {
12185 if( UseLoongsonISA ) {
12186 if (scale == 0) {
12187 __ gslbx(dst, as_Register(base), as_Register(index), disp);
12188 } else {
12189 __ dsll(AT, as_Register(index), scale);
12190 __ gslbx(dst, as_Register(base), AT, disp);
12191 }
12192 } else {
12193 if (scale == 0) {
12194 __ addu(AT, as_Register(base), as_Register(index));
12195 } else {
12196 __ dsll(AT, as_Register(index), scale);
12197 __ addu(AT, as_Register(base), AT);
12198 }
12199 __ lb(dst, AT, disp);
12200 }
12201 } else {
12202 if (scale == 0) {
12203 __ addu(AT, as_Register(base), as_Register(index));
12204 } else {
12205 __ dsll(AT, as_Register(index), scale);
12206 __ addu(AT, as_Register(base), AT);
12207 }
12208 __ move(T9, disp);
12209 if( UseLoongsonISA ) {
12210 __ gslbx(dst, AT, T9, 0);
12211 } else {
12212 __ addu(AT, AT, T9);
12213 __ lb(dst, AT, 0);
12214 }
12215 }
12216 } else {
12217 if( Assembler::is_simm16(disp) ) {
12218 __ lb(dst, as_Register(base), disp);
12219 } else {
12220 __ move(T9, disp);
12221 if( UseLoongsonISA ) {
12222 __ gslbx(dst, as_Register(base), T9, 0);
12223 } else {
12224 __ addu(AT, as_Register(base), T9);
12225 __ lb(dst, AT, 0);
12226 }
12227 }
12228 }
12229 %}
12230 ins_pipe(pipe_slow);
12231 %}
12234 // Call runtime without safepoint
12235 instruct CallLeafDirect(method meth) %{
12236 match(CallLeaf);
12237 effect(USE meth);
12239 ins_cost(300);
12240 format %{ "CALL_LEAF,runtime #@CallLeafDirect " %}
12241 ins_encode(Java_To_Runtime(meth));
12242 ins_pipe( pipe_slow );
12243 ins_pc_relative(1);
12244 ins_alignment(16);
12245 %}
12247 // Load Char (16bit unsigned)
12248 instruct loadUS(mRegI dst, memory mem) %{
12249 match(Set dst (LoadUS mem));
12251 ins_cost(125);
12252 format %{ "loadUS $dst,$mem @ loadC" %}
12253 ins_encode(load_C_enc(dst, mem));
12254 ins_pipe( ialu_loadI );
12255 %}
12257 instruct loadUS_convI2L(mRegL dst, memory mem) %{
12258 match(Set dst (ConvI2L (LoadUS mem)));
12260 ins_cost(125);
12261 format %{ "loadUS $dst,$mem @ loadUS_convI2L" %}
12262 ins_encode(load_C_enc(dst, mem));
12263 ins_pipe( ialu_loadI );
12264 %}
12266 // Store Char (16bit unsigned)
12267 instruct storeC(memory mem, mRegI src) %{
12268 match(Set mem (StoreC mem src));
12270 ins_cost(125);
12271 format %{ "storeC $src, $mem @ storeC" %}
12272 ins_encode(store_C_reg_enc(mem, src));
12273 ins_pipe( ialu_loadI );
12274 %}
12276 instruct storeC0(memory mem, immI0 zero) %{
12277 match(Set mem (StoreC mem zero));
12279 ins_cost(125);
12280 format %{ "storeC $zero, $mem @ storeC0" %}
12281 ins_encode(store_C0_enc(mem));
12282 ins_pipe( ialu_loadI );
12283 %}
12286 instruct loadConF0(regF dst, immF0 zero) %{
12287 match(Set dst zero);
12288 ins_cost(100);
12290 format %{ "mov $dst, zero @ loadConF0\n"%}
12291 ins_encode %{
12292 FloatRegister dst = $dst$$FloatRegister;
12294 __ mtc1(R0, dst);
12295 %}
12296 ins_pipe( fpu_loadF );
12297 %}
12300 instruct loadConF(regF dst, immF src) %{
12301 match(Set dst src);
12302 ins_cost(125);
12304 format %{ "lwc1 $dst, $constantoffset[$constanttablebase] # load FLOAT $src from table @ loadConF" %}
12305 ins_encode %{
12306 int con_offset = $constantoffset($src);
12308 if (Assembler::is_simm16(con_offset)) {
12309 __ lwc1($dst$$FloatRegister, $constanttablebase, con_offset);
12310 } else {
12311 __ set64(AT, con_offset);
12312 if (UseLoongsonISA) {
12313 __ gslwxc1($dst$$FloatRegister, $constanttablebase, AT, 0);
12314 } else {
12315 __ daddu(AT, $constanttablebase, AT);
12316 __ lwc1($dst$$FloatRegister, AT, 0);
12317 }
12318 }
12319 %}
12320 ins_pipe( fpu_loadF );
12321 %}
12324 instruct loadConD0(regD dst, immD0 zero) %{
12325 match(Set dst zero);
12326 ins_cost(100);
12328 format %{ "mov $dst, zero @ loadConD0"%}
12329 ins_encode %{
12330 FloatRegister dst = as_FloatRegister($dst$$reg);
12332 __ dmtc1(R0, dst);
12333 %}
12334 ins_pipe( fpu_loadF );
12335 %}
12337 instruct loadConD(regD dst, immD src) %{
12338 match(Set dst src);
12339 ins_cost(125);
12341 format %{ "ldc1 $dst, $constantoffset[$constanttablebase] # load DOUBLE $src from table @ loadConD" %}
12342 ins_encode %{
12343 int con_offset = $constantoffset($src);
12345 if (Assembler::is_simm16(con_offset)) {
12346 __ ldc1($dst$$FloatRegister, $constanttablebase, con_offset);
12347 } else {
12348 __ set64(AT, con_offset);
12349 if (UseLoongsonISA) {
12350 __ gsldxc1($dst$$FloatRegister, $constanttablebase, AT, 0);
12351 } else {
12352 __ daddu(AT, $constanttablebase, AT);
12353 __ ldc1($dst$$FloatRegister, AT, 0);
12354 }
12355 }
12356 %}
12357 ins_pipe( fpu_loadF );
12358 %}
12360 // Store register Float value (it is faster than store from FPU register)
12361 instruct storeF_reg( memory mem, regF src) %{
12362 match(Set mem (StoreF mem src));
12364 ins_cost(50);
12365 format %{ "store $mem, $src\t# store float @ storeF_reg" %}
12366 ins_encode(store_F_reg_enc(mem, src));
12367 ins_pipe( fpu_storeF );
12368 %}
12370 instruct storeF_imm0( memory mem, immF0 zero) %{
12371 match(Set mem (StoreF mem zero));
12373 ins_cost(40);
12374 format %{ "store $mem, zero\t# store float @ storeF_imm0" %}
12375 ins_encode %{
12376 int base = $mem$$base;
12377 int index = $mem$$index;
12378 int scale = $mem$$scale;
12379 int disp = $mem$$disp;
12381 if( index != 0 ) {
12382 if ( UseLoongsonISA ) {
12383 if ( Assembler::is_simm(disp, 8) ) {
12384 if ( scale == 0 ) {
12385 __ gsswx(R0, as_Register(base), as_Register(index), disp);
12386 } else {
12387 __ dsll(T9, as_Register(index), scale);
12388 __ gsswx(R0, as_Register(base), T9, disp);
12389 }
12390 } else if ( Assembler::is_simm16(disp) ) {
12391 if ( scale == 0 ) {
12392 __ daddu(AT, as_Register(base), as_Register(index));
12393 } else {
12394 __ dsll(T9, as_Register(index), scale);
12395 __ daddu(AT, as_Register(base), T9);
12396 }
12397 __ sw(R0, AT, disp);
12398 } else {
12399 if ( scale == 0 ) {
12400 __ move(T9, disp);
12401 __ daddu(AT, as_Register(index), T9);
12402 __ gsswx(R0, as_Register(base), AT, 0);
12403 } else {
12404 __ dsll(T9, as_Register(index), scale);
12405 __ move(AT, disp);
12406 __ daddu(AT, AT, T9);
12407 __ gsswx(R0, as_Register(base), AT, 0);
12408 }
12409 }
12410 } else { //not use loongson isa
12411 if(scale != 0) {
12412 __ dsll(T9, as_Register(index), scale);
12413 __ daddu(AT, as_Register(base), T9);
12414 } else {
12415 __ daddu(AT, as_Register(base), as_Register(index));
12416 }
12417 if( Assembler::is_simm16(disp) ) {
12418 __ sw(R0, AT, disp);
12419 } else {
12420 __ move(T9, disp);
12421 __ daddu(AT, AT, T9);
12422 __ sw(R0, AT, 0);
12423 }
12424 }
12425 } else { //index is 0
12426 if ( UseLoongsonISA ) {
12427 if ( Assembler::is_simm16(disp) ) {
12428 __ sw(R0, as_Register(base), disp);
12429 } else {
12430 __ move(T9, disp);
12431 __ gsswx(R0, as_Register(base), T9, 0);
12432 }
12433 } else {
12434 if( Assembler::is_simm16(disp) ) {
12435 __ sw(R0, as_Register(base), disp);
12436 } else {
12437 __ move(T9, disp);
12438 __ daddu(AT, as_Register(base), T9);
12439 __ sw(R0, AT, 0);
12440 }
12441 }
12442 }
12443 %}
12444 ins_pipe( ialu_storeI );
12445 %}
12447 // Load Double
12448 instruct loadD(regD dst, memory mem) %{
12449 match(Set dst (LoadD mem));
12451 ins_cost(150);
12452 format %{ "loadD $dst, $mem #@loadD" %}
12453 ins_encode(load_D_enc(dst, mem));
12454 ins_pipe( ialu_loadI );
12455 %}
12457 // Load Double - UNaligned
12458 instruct loadD_unaligned(regD dst, memory mem ) %{
12459 match(Set dst (LoadD_unaligned mem));
12460 ins_cost(250);
12461 // FIXME: Jin: Need more effective ldl/ldr
12462 format %{ "loadD_unaligned $dst, $mem #@loadD_unaligned" %}
12463 ins_encode(load_D_enc(dst, mem));
12464 ins_pipe( ialu_loadI );
12465 %}
12467 instruct storeD_reg( memory mem, regD src) %{
12468 match(Set mem (StoreD mem src));
12470 ins_cost(50);
12471 format %{ "store $mem, $src\t# store float @ storeD_reg" %}
12472 ins_encode(store_D_reg_enc(mem, src));
12473 ins_pipe( fpu_storeF );
12474 %}
12476 instruct storeD_imm0( memory mem, immD0 zero) %{
12477 match(Set mem (StoreD mem zero));
12479 ins_cost(40);
12480 format %{ "store $mem, zero\t# store float @ storeD_imm0" %}
12481 ins_encode %{
12482 int base = $mem$$base;
12483 int index = $mem$$index;
12484 int scale = $mem$$scale;
12485 int disp = $mem$$disp;
12487 __ mtc1(R0, F30);
12488 __ cvt_d_w(F30, F30);
12490 if( index != 0 ) {
12491 if ( UseLoongsonISA ) {
12492 if ( Assembler::is_simm(disp, 8) ) {
12493 if (scale == 0) {
12494 __ gssdxc1(F30, as_Register(base), as_Register(index), disp);
12495 } else {
12496 __ dsll(T9, as_Register(index), scale);
12497 __ gssdxc1(F30, as_Register(base), T9, disp);
12498 }
12499 } else if ( Assembler::is_simm16(disp) ) {
12500 if (scale == 0) {
12501 __ daddu(AT, as_Register(base), as_Register(index));
12502 __ sdc1(F30, AT, disp);
12503 } else {
12504 __ dsll(T9, as_Register(index), scale);
12505 __ daddu(AT, as_Register(base), T9);
12506 __ sdc1(F30, AT, disp);
12507 }
12508 } else {
12509 if (scale == 0) {
12510 __ move(T9, disp);
12511 __ daddu(AT, as_Register(index), T9);
12512 __ gssdxc1(F30, as_Register(base), AT, 0);
12513 } else {
12514 __ move(T9, disp);
12515 __ dsll(AT, as_Register(index), scale);
12516 __ daddu(AT, AT, T9);
12517 __ gssdxc1(F30, as_Register(base), AT, 0);
12518 }
12519 }
12520 } else { // not use loongson isa
12521 if(scale != 0) {
12522 __ dsll(T9, as_Register(index), scale);
12523 __ daddu(AT, as_Register(base), T9);
12524 } else {
12525 __ daddu(AT, as_Register(base), as_Register(index));
12526 }
12527 if( Assembler::is_simm16(disp) ) {
12528 __ sdc1(F30, AT, disp);
12529 } else {
12530 __ move(T9, disp);
12531 __ daddu(AT, AT, T9);
12532 __ sdc1(F30, AT, 0);
12533 }
12534 }
12535 } else {// index is 0
12536 if ( UseLoongsonISA ) {
12537 if ( Assembler::is_simm16(disp) ) {
12538 __ sdc1(F30, as_Register(base), disp);
12539 } else {
12540 __ move(T9, disp);
12541 __ gssdxc1(F30, as_Register(base), T9, 0);
12542 }
12543 } else {
12544 if( Assembler::is_simm16(disp) ) {
12545 __ sdc1(F30, as_Register(base), disp);
12546 } else {
12547 __ move(T9, disp);
12548 __ daddu(AT, as_Register(base), T9);
12549 __ sdc1(F30, AT, 0);
12550 }
12551 }
12552 }
12553 %}
12554 ins_pipe( ialu_storeI );
12555 %}
12557 instruct loadSSI(mRegI dst, stackSlotI src)
12558 %{
12559 match(Set dst src);
12561 ins_cost(125);
12562 format %{ "lw $dst, $src\t# int stk @ loadSSI" %}
12563 ins_encode %{
12564 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSI) !");
12565 __ lw($dst$$Register, SP, $src$$disp);
12566 %}
12567 ins_pipe(ialu_loadI);
12568 %}
12570 instruct storeSSI(stackSlotI dst, mRegI src)
12571 %{
12572 match(Set dst src);
12574 ins_cost(100);
12575 format %{ "sw $dst, $src\t# int stk @ storeSSI" %}
12576 ins_encode %{
12577 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSI) !");
12578 __ sw($src$$Register, SP, $dst$$disp);
12579 %}
12580 ins_pipe(ialu_storeI);
12581 %}
12583 instruct loadSSL(mRegL dst, stackSlotL src)
12584 %{
12585 match(Set dst src);
12587 ins_cost(125);
12588 format %{ "ld $dst, $src\t# long stk @ loadSSL" %}
12589 ins_encode %{
12590 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSL) !");
12591 __ ld($dst$$Register, SP, $src$$disp);
12592 %}
12593 ins_pipe(ialu_loadI);
12594 %}
12596 instruct storeSSL(stackSlotL dst, mRegL src)
12597 %{
12598 match(Set dst src);
12600 ins_cost(100);
12601 format %{ "sd $dst, $src\t# long stk @ storeSSL" %}
12602 ins_encode %{
12603 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSL) !");
12604 __ sd($src$$Register, SP, $dst$$disp);
12605 %}
12606 ins_pipe(ialu_storeI);
12607 %}
12609 instruct loadSSP(mRegP dst, stackSlotP src)
12610 %{
12611 match(Set dst src);
12613 ins_cost(125);
12614 format %{ "ld $dst, $src\t# ptr stk @ loadSSP" %}
12615 ins_encode %{
12616 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSP) !");
12617 __ ld($dst$$Register, SP, $src$$disp);
12618 %}
12619 ins_pipe(ialu_loadI);
12620 %}
12622 instruct storeSSP(stackSlotP dst, mRegP src)
12623 %{
12624 match(Set dst src);
12626 ins_cost(100);
12627 format %{ "sd $dst, $src\t# ptr stk @ storeSSP" %}
12628 ins_encode %{
12629 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSP) !");
12630 __ sd($src$$Register, SP, $dst$$disp);
12631 %}
12632 ins_pipe(ialu_storeI);
12633 %}
12635 instruct loadSSF(regF dst, stackSlotF src)
12636 %{
12637 match(Set dst src);
12639 ins_cost(125);
12640 format %{ "lwc1 $dst, $src\t# float stk @ loadSSF" %}
12641 ins_encode %{
12642 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSF) !");
12643 __ lwc1($dst$$FloatRegister, SP, $src$$disp);
12644 %}
12645 ins_pipe(ialu_loadI);
12646 %}
12648 instruct storeSSF(stackSlotF dst, regF src)
12649 %{
12650 match(Set dst src);
12652 ins_cost(100);
12653 format %{ "swc1 $dst, $src\t# float stk @ storeSSF" %}
12654 ins_encode %{
12655 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSF) !");
12656 __ swc1($src$$FloatRegister, SP, $dst$$disp);
12657 %}
12658 ins_pipe(fpu_storeF);
12659 %}
12661 // Use the same format since predicate() can not be used here.
12662 instruct loadSSD(regD dst, stackSlotD src)
12663 %{
12664 match(Set dst src);
12666 ins_cost(125);
12667 format %{ "ldc1 $dst, $src\t# double stk @ loadSSD" %}
12668 ins_encode %{
12669 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSD) !");
12670 __ ldc1($dst$$FloatRegister, SP, $src$$disp);
12671 %}
12672 ins_pipe(ialu_loadI);
12673 %}
12675 instruct storeSSD(stackSlotD dst, regD src)
12676 %{
12677 match(Set dst src);
12679 ins_cost(100);
12680 format %{ "sdc1 $dst, $src\t# double stk @ storeSSD" %}
12681 ins_encode %{
12682 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSD) !");
12683 __ sdc1($src$$FloatRegister, SP, $dst$$disp);
12684 %}
12685 ins_pipe(fpu_storeF);
12686 %}
12688 instruct cmpFastLock( FlagsReg cr, mRegP object, s0_RegP box, mRegI tmp, mRegP scr) %{
12689 match( Set cr (FastLock object box) );
12690 effect( TEMP tmp, TEMP scr, USE_KILL box );
12691 ins_cost(300);
12692 format %{ "FASTLOCK $cr $object, $box, $tmp #@ cmpFastLock" %}
12693 ins_encode %{
12694 __ fast_lock($object$$Register, $box$$Register, $tmp$$Register, $scr$$Register);
12695 %}
12697 ins_pipe( pipe_slow );
12698 ins_pc_relative(1);
12699 %}
12701 instruct cmpFastUnlock( FlagsReg cr, mRegP object, s0_RegP box, mRegP tmp ) %{
12702 match( Set cr (FastUnlock object box) );
12703 effect( TEMP tmp, USE_KILL box );
12704 ins_cost(300);
12705 format %{ "FASTUNLOCK $object, $box, $tmp #@cmpFastUnlock" %}
12706 ins_encode %{
12707 __ fast_unlock($object$$Register, $box$$Register, $tmp$$Register);
12708 %}
12710 ins_pipe( pipe_slow );
12711 ins_pc_relative(1);
12712 %}
12714 // Store CMS card-mark Immediate
12715 instruct storeImmCM(memory mem, immI8 src) %{
12716 match(Set mem (StoreCM mem src));
12718 ins_cost(150);
12719 format %{ "MOV8 $mem,$src\t! CMS card-mark imm0" %}
12720 // opcode(0xC6);
12721 ins_encode(store_B_immI_enc_sync(mem, src));
12722 ins_pipe( ialu_storeI );
12723 %}
12725 // Die now
12726 instruct ShouldNotReachHere( )
12727 %{
12728 match(Halt);
12729 ins_cost(300);
12731 // Use the following format syntax
12732 format %{ "ILLTRAP ;#@ShouldNotReachHere" %}
12733 ins_encode %{
12734 // Here we should emit illtrap !
12736 __ stop("in ShoudNotReachHere");
12738 %}
12739 ins_pipe( pipe_jump );
12740 %}
12742 instruct leaP8Narrow(mRegP dst, indOffset8Narrow mem)
12743 %{
12744 predicate(Universe::narrow_oop_shift() == 0);
12745 match(Set dst mem);
12747 ins_cost(110);
12748 format %{ "leaq $dst, $mem\t# ptr off8narrow @ leaP8Narrow" %}
12749 ins_encode %{
12750 Register dst = $dst$$Register;
12751 Register base = as_Register($mem$$base);
12752 int disp = $mem$$disp;
12754 __ daddiu(dst, base, disp);
12755 %}
12756 ins_pipe( ialu_regI_imm16 );
12757 %}
12759 instruct leaPPosIdxScaleOff8(mRegP dst, basePosIndexScaleOffset8 mem)
12760 %{
12761 match(Set dst mem);
12763 ins_cost(110);
12764 format %{ "leaq $dst, $mem\t# @ PosIdxScaleOff8" %}
12765 ins_encode %{
12766 Register dst = $dst$$Register;
12767 Register base = as_Register($mem$$base);
12768 Register index = as_Register($mem$$index);
12769 int scale = $mem$$scale;
12770 int disp = $mem$$disp;
12772 if (scale == 0) {
12773 __ daddu(AT, base, index);
12774 __ daddiu(dst, AT, disp);
12775 } else {
12776 __ dsll(AT, index, scale);
12777 __ daddu(AT, base, AT);
12778 __ daddiu(dst, AT, disp);
12779 }
12780 %}
12782 ins_pipe( ialu_regI_imm16 );
12783 %}
12785 instruct leaPIdxScale(mRegP dst, indIndexScale mem)
12786 %{
12787 match(Set dst mem);
12789 ins_cost(110);
12790 format %{ "leaq $dst, $mem\t# @ leaPIdxScale" %}
12791 ins_encode %{
12792 Register dst = $dst$$Register;
12793 Register base = as_Register($mem$$base);
12794 Register index = as_Register($mem$$index);
12795 int scale = $mem$$scale;
12797 if (scale == 0) {
12798 __ daddu(dst, base, index);
12799 } else {
12800 __ dsll(AT, index, scale);
12801 __ daddu(dst, base, AT);
12802 }
12803 %}
12805 ins_pipe( ialu_regI_imm16 );
12806 %}
12808 // Jump Direct Conditional - Label defines a relative address from Jcc+1
12809 instruct jmpLoopEnd(cmpOp cop, mRegI src1, mRegI src2, label labl) %{
12810 match(CountedLoopEnd cop (CmpI src1 src2));
12811 effect(USE labl);
12813 ins_cost(300);
12814 format %{ "J$cop $src1, $src2, $labl\t# Loop end @ jmpLoopEnd" %}
12815 ins_encode %{
12816 Register op1 = $src1$$Register;
12817 Register op2 = $src2$$Register;
12818 Label &L = *($labl$$label);
12819 int flag = $cop$$cmpcode;
12821 switch(flag)
12822 {
12823 case 0x01: //equal
12824 if (&L)
12825 __ beq(op1, op2, L);
12826 else
12827 __ beq(op1, op2, (int)0);
12828 break;
12829 case 0x02: //not_equal
12830 if (&L)
12831 __ bne(op1, op2, L);
12832 else
12833 __ bne(op1, op2, (int)0);
12834 break;
12835 case 0x03: //above
12836 __ slt(AT, op2, op1);
12837 if(&L)
12838 __ bne(AT, R0, L);
12839 else
12840 __ bne(AT, R0, (int)0);
12841 break;
12842 case 0x04: //above_equal
12843 __ slt(AT, op1, op2);
12844 if(&L)
12845 __ beq(AT, R0, L);
12846 else
12847 __ beq(AT, R0, (int)0);
12848 break;
12849 case 0x05: //below
12850 __ slt(AT, op1, op2);
12851 if(&L)
12852 __ bne(AT, R0, L);
12853 else
12854 __ bne(AT, R0, (int)0);
12855 break;
12856 case 0x06: //below_equal
12857 __ slt(AT, op2, op1);
12858 if(&L)
12859 __ beq(AT, R0, L);
12860 else
12861 __ beq(AT, R0, (int)0);
12862 break;
12863 default:
12864 Unimplemented();
12865 }
12866 __ nop();
12867 %}
12868 ins_pipe( pipe_jump );
12869 ins_pc_relative(1);
12870 %}
12873 instruct jmpLoopEnd_reg_imm16_sub(cmpOp cop, mRegI src1, immI16_sub src2, label labl) %{
12874 match(CountedLoopEnd cop (CmpI src1 src2));
12875 effect(USE labl);
12877 ins_cost(250);
12878 format %{ "J$cop $src1, $src2, $labl\t# Loop end @ jmpLoopEnd_reg_imm16_sub" %}
12879 ins_encode %{
12880 Register op1 = $src1$$Register;
12881 int op2 = $src2$$constant;
12882 Label &L = *($labl$$label);
12883 int flag = $cop$$cmpcode;
12885 __ addiu32(AT, op1, -1 * op2);
12887 switch(flag)
12888 {
12889 case 0x01: //equal
12890 if (&L)
12891 __ beq(AT, R0, L);
12892 else
12893 __ beq(AT, R0, (int)0);
12894 break;
12895 case 0x02: //not_equal
12896 if (&L)
12897 __ bne(AT, R0, L);
12898 else
12899 __ bne(AT, R0, (int)0);
12900 break;
12901 case 0x03: //above
12902 if(&L)
12903 __ bgtz(AT, L);
12904 else
12905 __ bgtz(AT, (int)0);
12906 break;
12907 case 0x04: //above_equal
12908 if(&L)
12909 __ bgez(AT, L);
12910 else
12911 __ bgez(AT,(int)0);
12912 break;
12913 case 0x05: //below
12914 if(&L)
12915 __ bltz(AT, L);
12916 else
12917 __ bltz(AT, (int)0);
12918 break;
12919 case 0x06: //below_equal
12920 if(&L)
12921 __ blez(AT, L);
12922 else
12923 __ blez(AT, (int)0);
12924 break;
12925 default:
12926 Unimplemented();
12927 }
12928 __ nop();
12929 %}
12930 ins_pipe( pipe_jump );
12931 ins_pc_relative(1);
12932 %}
12935 /*
12936 // Jump Direct Conditional - Label defines a relative address from Jcc+1
12937 instruct jmpLoopEndU(cmpOpU cop, eFlagsRegU cmp, label labl) %{
12938 match(CountedLoopEnd cop cmp);
12939 effect(USE labl);
12941 ins_cost(300);
12942 format %{ "J$cop,u $labl\t# Loop end" %}
12943 size(6);
12944 opcode(0x0F, 0x80);
12945 ins_encode( Jcc( cop, labl) );
12946 ins_pipe( pipe_jump );
12947 ins_pc_relative(1);
12948 %}
12950 instruct jmpLoopEndUCF(cmpOpUCF cop, eFlagsRegUCF cmp, label labl) %{
12951 match(CountedLoopEnd cop cmp);
12952 effect(USE labl);
12954 ins_cost(200);
12955 format %{ "J$cop,u $labl\t# Loop end" %}
12956 opcode(0x0F, 0x80);
12957 ins_encode( Jcc( cop, labl) );
12958 ins_pipe( pipe_jump );
12959 ins_pc_relative(1);
12960 %}
12961 */
12963 // This match pattern is created for StoreIConditional since I cannot match IfNode without a RegFlags! fujie 2012/07/17
12964 instruct jmpCon_flags(cmpOp cop, FlagsReg cr, label labl) %{
12965 match(If cop cr);
12966 effect(USE labl);
12968 ins_cost(300);
12969 format %{ "J$cop $labl #mips uses AT as eflag @jmpCon_flags" %}
12971 ins_encode %{
12972 Label &L = *($labl$$label);
12973 switch($cop$$cmpcode)
12974 {
12975 case 0x01: //equal
12976 if (&L)
12977 __ bne(AT, R0, L);
12978 else
12979 __ bne(AT, R0, (int)0);
12980 break;
12981 case 0x02: //not equal
12982 if (&L)
12983 __ beq(AT, R0, L);
12984 else
12985 __ beq(AT, R0, (int)0);
12986 break;
12987 default:
12988 Unimplemented();
12989 }
12990 __ nop();
12991 %}
12993 ins_pipe( pipe_jump );
12994 ins_pc_relative(1);
12995 %}
12998 // ============================================================================
12999 // The 2nd slow-half of a subtype check. Scan the subklass's 2ndary superklass
13000 // array for an instance of the superklass. Set a hidden internal cache on a
13001 // hit (cache is checked with exposed code in gen_subtype_check()). Return
13002 // NZ for a miss or zero for a hit. The encoding ALSO sets flags.
13003 instruct partialSubtypeCheck( mRegP result, no_T8_mRegP sub, no_T8_mRegP super, mT8RegI tmp ) %{
13004 match(Set result (PartialSubtypeCheck sub super));
13005 effect(KILL tmp);
13006 ins_cost(1100); // slightly larger than the next version
13007 format %{ "partialSubtypeCheck result=$result, sub=$sub, super=$super, tmp=$tmp " %}
13009 ins_encode( enc_PartialSubtypeCheck(result, sub, super, tmp) );
13010 ins_pipe( pipe_slow );
13011 %}
13014 // Conditional-store of an int value.
13015 // ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG on Intel.
13016 instruct storeIConditional( memory mem, mRegI oldval, mRegI newval, FlagsReg cr ) %{
13017 match(Set cr (StoreIConditional mem (Binary oldval newval)));
13018 // effect(KILL oldval);
13019 format %{ "CMPXCHG $newval, $mem, $oldval \t# @storeIConditional" %}
13021 ins_encode %{
13022 Register oldval = $oldval$$Register;
13023 Register newval = $newval$$Register;
13024 Address addr(as_Register($mem$$base), $mem$$disp);
13025 Label again, failure;
13027 // int base = $mem$$base;
13028 int index = $mem$$index;
13029 int scale = $mem$$scale;
13030 int disp = $mem$$disp;
13032 guarantee(Assembler::is_simm16(disp), "");
13034 if( index != 0 ) {
13035 __ stop("in storeIConditional: index != 0");
13036 } else {
13037 __ bind(again);
13038 if(UseSyncLevel <= 1000) __ sync();
13039 __ ll(AT, addr);
13040 __ bne(AT, oldval, failure);
13041 __ delayed()->addu(AT, R0, R0);
13043 __ addu(AT, newval, R0);
13044 __ sc(AT, addr);
13045 __ beq(AT, R0, again);
13046 __ delayed()->addiu(AT, R0, 0xFF);
13047 __ bind(failure);
13048 __ sync();
13049 }
13050 %}
13052 ins_pipe( long_memory_op );
13053 %}
13055 // Conditional-store of a long value.
13056 // ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG.
13057 instruct storeLConditional(memory mem, t2RegL oldval, mRegL newval, FlagsReg cr )
13058 %{
13059 match(Set cr (StoreLConditional mem (Binary oldval newval)));
13060 effect(KILL oldval);
13062 format %{ "cmpxchg $mem, $newval\t# If $oldval == $mem then store $newval into $mem" %}
13063 ins_encode%{
13064 Register oldval = $oldval$$Register;
13065 Register newval = $newval$$Register;
13066 Address addr((Register)$mem$$base, $mem$$disp);
13068 int index = $mem$$index;
13069 int scale = $mem$$scale;
13070 int disp = $mem$$disp;
13072 guarantee(Assembler::is_simm16(disp), "");
13074 if( index != 0 ) {
13075 __ stop("in storeIConditional: index != 0");
13076 } else {
13077 __ cmpxchg(newval, addr, oldval);
13078 }
13079 %}
13080 ins_pipe( long_memory_op );
13081 %}
13084 instruct compareAndSwapI( mRegI res, mRegP mem_ptr, mS2RegI oldval, mRegI newval) %{
13085 match(Set res (CompareAndSwapI mem_ptr (Binary oldval newval)));
13086 effect(KILL oldval);
13087 // match(CompareAndSwapI mem_ptr (Binary oldval newval));
13088 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapI\n\t"
13089 "MOV $res, 1 @ compareAndSwapI\n\t"
13090 "BNE AT, R0 @ compareAndSwapI\n\t"
13091 "MOV $res, 0 @ compareAndSwapI\n"
13092 "L:" %}
13093 ins_encode %{
13094 Register newval = $newval$$Register;
13095 Register oldval = $oldval$$Register;
13096 Register res = $res$$Register;
13097 Address addr($mem_ptr$$Register, 0);
13098 Label L;
13100 __ cmpxchg32(newval, addr, oldval);
13101 __ move(res, AT);
13102 %}
13103 ins_pipe( long_memory_op );
13104 %}
13106 //FIXME:
13107 instruct compareAndSwapP( mRegI res, mRegP mem_ptr, s2_RegP oldval, mRegP newval) %{
13108 match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval)));
13109 effect(KILL oldval);
13110 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapP\n\t"
13111 "MOV $res, AT @ compareAndSwapP\n\t"
13112 "L:" %}
13113 ins_encode %{
13114 Register newval = $newval$$Register;
13115 Register oldval = $oldval$$Register;
13116 Register res = $res$$Register;
13117 Address addr($mem_ptr$$Register, 0);
13118 Label L;
13120 __ cmpxchg(newval, addr, oldval);
13121 __ move(res, AT);
13122 %}
13123 ins_pipe( long_memory_op );
13124 %}
13126 instruct compareAndSwapN( mRegI res, mRegP mem_ptr, t2_RegN oldval, mRegN newval) %{
13127 match(Set res (CompareAndSwapN mem_ptr (Binary oldval newval)));
13128 effect(KILL oldval);
13129 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapN\n\t"
13130 "MOV $res, AT @ compareAndSwapN\n\t"
13131 "L:" %}
13132 ins_encode %{
13133 Register newval = $newval$$Register;
13134 Register oldval = $oldval$$Register;
13135 Register res = $res$$Register;
13136 Address addr($mem_ptr$$Register, 0);
13137 Label L;
13139 /* 2013/7/19 Jin: cmpxchg32 is implemented with ll/sc, which will do sign extension.
13140 * Thus, we should extend oldval's sign for correct comparision.
13141 */
13142 __ sll(oldval, oldval, 0);
13144 __ cmpxchg32(newval, addr, oldval);
13145 __ move(res, AT);
13146 %}
13147 ins_pipe( long_memory_op );
13148 %}
13150 //----------Max and Min--------------------------------------------------------
13151 // Min Instructions
13152 ////
13153 // *** Min and Max using the conditional move are slower than the
13154 // *** branch version on a Pentium III.
13155 // // Conditional move for min
13156 //instruct cmovI_reg_lt( eRegI op2, eRegI op1, eFlagsReg cr ) %{
13157 // effect( USE_DEF op2, USE op1, USE cr );
13158 // format %{ "CMOVlt $op2,$op1\t! min" %}
13159 // opcode(0x4C,0x0F);
13160 // ins_encode( OpcS, OpcP, RegReg( op2, op1 ) );
13161 // ins_pipe( pipe_cmov_reg );
13162 //%}
13163 //
13164 //// Min Register with Register (P6 version)
13165 //instruct minI_eReg_p6( eRegI op1, eRegI op2 ) %{
13166 // predicate(VM_Version::supports_cmov() );
13167 // match(Set op2 (MinI op1 op2));
13168 // ins_cost(200);
13169 // expand %{
13170 // eFlagsReg cr;
13171 // compI_eReg(cr,op1,op2);
13172 // cmovI_reg_lt(op2,op1,cr);
13173 // %}
13174 //%}
13176 // Min Register with Register (generic version)
13177 instruct minI_Reg_Reg(mRegI dst, mRegI src) %{
13178 match(Set dst (MinI dst src));
13179 //effect(KILL flags);
13180 ins_cost(80);
13182 format %{ "MIN $dst, $src @minI_Reg_Reg" %}
13183 ins_encode %{
13184 Register dst = $dst$$Register;
13185 Register src = $src$$Register;
13187 __ slt(AT, src, dst);
13188 __ movn(dst, src, AT);
13190 %}
13192 ins_pipe( pipe_slow );
13193 %}
13195 // Max Register with Register
13196 // *** Min and Max using the conditional move are slower than the
13197 // *** branch version on a Pentium III.
13198 // // Conditional move for max
13199 //instruct cmovI_reg_gt( eRegI op2, eRegI op1, eFlagsReg cr ) %{
13200 // effect( USE_DEF op2, USE op1, USE cr );
13201 // format %{ "CMOVgt $op2,$op1\t! max" %}
13202 // opcode(0x4F,0x0F);
13203 // ins_encode( OpcS, OpcP, RegReg( op2, op1 ) );
13204 // ins_pipe( pipe_cmov_reg );
13205 //%}
13206 //
13207 // // Max Register with Register (P6 version)
13208 //instruct maxI_eReg_p6( eRegI op1, eRegI op2 ) %{
13209 // predicate(VM_Version::supports_cmov() );
13210 // match(Set op2 (MaxI op1 op2));
13211 // ins_cost(200);
13212 // expand %{
13213 // eFlagsReg cr;
13214 // compI_eReg(cr,op1,op2);
13215 // cmovI_reg_gt(op2,op1,cr);
13216 // %}
13217 //%}
13219 // Max Register with Register (generic version)
13220 instruct maxI_Reg_Reg(mRegI dst, mRegI src) %{
13221 match(Set dst (MaxI dst src));
13222 ins_cost(80);
13224 format %{ "MAX $dst, $src @maxI_Reg_Reg" %}
13226 ins_encode %{
13227 Register dst = $dst$$Register;
13228 Register src = $src$$Register;
13230 __ slt(AT, dst, src);
13231 __ movn(dst, src, AT);
13233 %}
13235 ins_pipe( pipe_slow );
13236 %}
13238 instruct maxI_Reg_zero(mRegI dst, immI0 zero) %{
13239 match(Set dst (MaxI dst zero));
13240 ins_cost(50);
13242 format %{ "MAX $dst, 0 @maxI_Reg_zero" %}
13244 ins_encode %{
13245 Register dst = $dst$$Register;
13247 __ slt(AT, dst, R0);
13248 __ movn(dst, R0, AT);
13250 %}
13252 ins_pipe( pipe_slow );
13253 %}
13255 instruct zerox_long_reg_reg(mRegL dst, mRegL src, immL_32bits mask)
13256 %{
13257 match(Set dst (AndL src mask));
13259 format %{ "movl $dst, $src\t# zero-extend long @ zerox_long_reg_reg" %}
13260 ins_encode %{
13261 Register dst = $dst$$Register;
13262 Register src = $src$$Register;
13264 __ dext(dst, src, 0, 32);
13265 %}
13266 ins_pipe(ialu_regI_regI);
13267 %}
13269 instruct combine_i2l(mRegL dst, mRegI src1, immL_32bits mask, mRegI src2, immI_32 shift32)
13270 %{
13271 match(Set dst (OrL (AndL (ConvI2L src1) mask) (LShiftL (ConvI2L src2) shift32)));
13273 format %{ "combine_i2l $dst, $src2(H), $src1(L) @ combine_i2l" %}
13274 ins_encode %{
13275 Register dst = $dst$$Register;
13276 Register src1 = $src1$$Register;
13277 Register src2 = $src2$$Register;
13279 if (src1 == dst) {
13280 __ dinsu(dst, src2, 32, 32);
13281 } else if (src2 == dst) {
13282 __ dsll32(dst, dst, 0);
13283 __ dins(dst, src1, 0, 32);
13284 } else {
13285 __ dext(dst, src1, 0, 32);
13286 __ dinsu(dst, src2, 32, 32);
13287 }
13288 %}
13289 ins_pipe(ialu_regI_regI);
13290 %}
13292 // Zero-extend convert int to long
13293 instruct convI2L_reg_reg_zex(mRegL dst, mRegI src, immL_32bits mask)
13294 %{
13295 match(Set dst (AndL (ConvI2L src) mask));
13297 format %{ "movl $dst, $src\t# i2l zero-extend @ convI2L_reg_reg_zex" %}
13298 ins_encode %{
13299 Register dst = $dst$$Register;
13300 Register src = $src$$Register;
13302 __ dext(dst, src, 0, 32);
13303 %}
13304 ins_pipe(ialu_regI_regI);
13305 %}
13307 instruct convL2I2L_reg_reg_zex(mRegL dst, mRegL src, immL_32bits mask)
13308 %{
13309 match(Set dst (AndL (ConvI2L (ConvL2I src)) mask));
13311 format %{ "movl $dst, $src\t# i2l zero-extend @ convL2I2L_reg_reg_zex" %}
13312 ins_encode %{
13313 Register dst = $dst$$Register;
13314 Register src = $src$$Register;
13316 __ dext(dst, src, 0, 32);
13317 %}
13318 ins_pipe(ialu_regI_regI);
13319 %}
13321 // Match loading integer and casting it to unsigned int in long register.
13322 // LoadI + ConvI2L + AndL 0xffffffff.
13323 instruct loadUI2L_rmask(mRegL dst, memory mem, immL_32bits mask) %{
13324 match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
13326 format %{ "lwu $dst, $mem \t// zero-extend to long @ loadUI2L_rmask" %}
13327 ins_encode (load_N_enc(dst, mem));
13328 ins_pipe(ialu_loadI);
13329 %}
13331 instruct loadUI2L_lmask(mRegL dst, memory mem, immL_32bits mask) %{
13332 match(Set dst (AndL mask (ConvI2L (LoadI mem))));
13334 format %{ "lwu $dst, $mem \t// zero-extend to long @ loadUI2L_lmask" %}
13335 ins_encode (load_N_enc(dst, mem));
13336 ins_pipe(ialu_loadI);
13337 %}
13340 // ============================================================================
13341 // Safepoint Instruction
13342 instruct safePoint_poll_reg(mRegP poll) %{
13343 match(SafePoint poll);
13344 predicate(false);
13345 effect(USE poll);
13347 ins_cost(125);
13348 format %{ "Safepoint @ [$poll] : poll for GC @ safePoint_poll_reg" %}
13350 ins_encode %{
13351 Register poll_reg = $poll$$Register;
13353 __ block_comment("Safepoint:");
13354 __ relocate(relocInfo::poll_type);
13355 __ lw(AT, poll_reg, 0);
13356 %}
13358 ins_pipe( ialu_storeI );
13359 %}
13361 instruct safePoint_poll() %{
13362 match(SafePoint);
13364 ins_cost(105);
13365 format %{ "poll for GC @ safePoint_poll" %}
13367 ins_encode %{
13368 __ block_comment("Safepoint:");
13369 __ relocate(relocInfo::poll_type);
13370 __ set64(T9, (long)os::get_polling_page());
13371 __ lw(AT, T9, 0);
13372 %}
13374 ins_pipe( ialu_storeI );
13375 %}
13377 //----------Arithmetic Conversion Instructions---------------------------------
13379 instruct roundFloat_nop(regF dst)
13380 %{
13381 match(Set dst (RoundFloat dst));
13383 ins_cost(0);
13384 ins_encode();
13385 ins_pipe(empty);
13386 %}
13388 instruct roundDouble_nop(regD dst)
13389 %{
13390 match(Set dst (RoundDouble dst));
13392 ins_cost(0);
13393 ins_encode();
13394 ins_pipe(empty);
13395 %}
13397 //---------- Zeros Count Instructions ------------------------------------------
13398 // CountLeadingZerosINode CountTrailingZerosINode
13399 instruct countLeadingZerosI(mRegI dst, mRegI src) %{
13400 predicate(UseCountLeadingZerosInstruction);
13401 match(Set dst (CountLeadingZerosI src));
13403 format %{ "clz $dst, $src\t# count leading zeros (int)" %}
13404 ins_encode %{
13405 __ clz($dst$$Register, $src$$Register);
13406 %}
13407 ins_pipe( ialu_regL_regL );
13408 %}
13410 instruct countLeadingZerosL(mRegI dst, mRegL src) %{
13411 predicate(UseCountLeadingZerosInstruction);
13412 match(Set dst (CountLeadingZerosL src));
13414 format %{ "dclz $dst, $src\t# count leading zeros (long)" %}
13415 ins_encode %{
13416 __ dclz($dst$$Register, $src$$Register);
13417 %}
13418 ins_pipe( ialu_regL_regL );
13419 %}
13421 instruct countTrailingZerosI(mRegI dst, mRegI src) %{
13422 predicate(UseCountTrailingZerosInstruction);
13423 match(Set dst (CountTrailingZerosI src));
13425 format %{ "ctz $dst, $src\t# count trailing zeros (int)" %}
13426 ins_encode %{
13427 // ctz and dctz is gs instructions.
13428 __ ctz($dst$$Register, $src$$Register);
13429 %}
13430 ins_pipe( ialu_regL_regL );
13431 %}
13433 instruct countTrailingZerosL(mRegI dst, mRegL src) %{
13434 predicate(UseCountTrailingZerosInstruction);
13435 match(Set dst (CountTrailingZerosL src));
13437 format %{ "dcto $dst, $src\t# count trailing zeros (long)" %}
13438 ins_encode %{
13439 __ dctz($dst$$Register, $src$$Register);
13440 %}
13441 ins_pipe( ialu_regL_regL );
13442 %}
13444 // ====================VECTOR INSTRUCTIONS=====================================
13446 // Load vectors (8 bytes long)
13447 instruct loadV8(vecD dst, memory mem) %{
13448 predicate(n->as_LoadVector()->memory_size() == 8);
13449 match(Set dst (LoadVector mem));
13450 ins_cost(125);
13451 format %{ "load $dst, $mem\t! load vector (8 bytes)" %}
13452 ins_encode(load_D_enc(dst, mem));
13453 ins_pipe( fpu_loadF );
13454 %}
13456 // Store vectors (8 bytes long)
13457 instruct storeV8(memory mem, vecD src) %{
13458 predicate(n->as_StoreVector()->memory_size() == 8);
13459 match(Set mem (StoreVector mem src));
13460 ins_cost(145);
13461 format %{ "store $mem, $src\t! store vector (8 bytes)" %}
13462 ins_encode(store_D_reg_enc(mem, src));
13463 ins_pipe( fpu_storeF );
13464 %}
13466 instruct Repl8B_DSP(vecD dst, mRegI src) %{
13467 predicate(n->as_Vector()->length() == 8 && Use3A2000);
13468 match(Set dst (ReplicateB src));
13469 ins_cost(100);
13470 format %{ "replv_ob AT, $src\n\t"
13471 "dmtc1 AT, $dst\t! replicate8B" %}
13472 ins_encode %{
13473 __ replv_ob(AT, $src$$Register);
13474 __ dmtc1(AT, $dst$$FloatRegister);
13475 %}
13476 ins_pipe( pipe_mtc1 );
13477 %}
13479 instruct Repl8B(vecD dst, mRegI src) %{
13480 predicate(n->as_Vector()->length() == 8);
13481 match(Set dst (ReplicateB src));
13482 ins_cost(140);
13483 format %{ "move AT, $src\n\t"
13484 "dins AT, AT, 8, 8\n\t"
13485 "dins AT, AT, 16, 16\n\t"
13486 "dinsu AT, AT, 32, 32\n\t"
13487 "dmtc1 AT, $dst\t! replicate8B" %}
13488 ins_encode %{
13489 __ move(AT, $src$$Register);
13490 __ dins(AT, AT, 8, 8);
13491 __ dins(AT, AT, 16, 16);
13492 __ dinsu(AT, AT, 32, 32);
13493 __ dmtc1(AT, $dst$$FloatRegister);
13494 %}
13495 ins_pipe( pipe_mtc1 );
13496 %}
13498 instruct Repl8B_imm_DSP(vecD dst, immI con) %{
13499 predicate(n->as_Vector()->length() == 8 && Use3A2000);
13500 match(Set dst (ReplicateB con));
13501 ins_cost(110);
13502 format %{ "repl_ob AT, [$con]\n\t"
13503 "dmtc1 AT, $dst,0x00\t! replicate8B($con)" %}
13504 ins_encode %{
13505 int val = $con$$constant;
13506 __ repl_ob(AT, val);
13507 __ dmtc1(AT, $dst$$FloatRegister);
13508 %}
13509 ins_pipe( pipe_mtc1 );
13510 %}
13512 instruct Repl8B_imm(vecD dst, immI con) %{
13513 predicate(n->as_Vector()->length() == 8);
13514 match(Set dst (ReplicateB con));
13515 ins_cost(150);
13516 format %{ "move AT, [$con]\n\t"
13517 "dins AT, AT, 8, 8\n\t"
13518 "dins AT, AT, 16, 16\n\t"
13519 "dinsu AT, AT, 32, 32\n\t"
13520 "dmtc1 AT, $dst,0x00\t! replicate8B($con)" %}
13521 ins_encode %{
13522 __ move(AT, $con$$constant);
13523 __ dins(AT, AT, 8, 8);
13524 __ dins(AT, AT, 16, 16);
13525 __ dinsu(AT, AT, 32, 32);
13526 __ dmtc1(AT, $dst$$FloatRegister);
13527 %}
13528 ins_pipe( pipe_mtc1 );
13529 %}
13531 instruct Repl8B_zero(vecD dst, immI0 zero) %{
13532 predicate(n->as_Vector()->length() == 8);
13533 match(Set dst (ReplicateB zero));
13534 ins_cost(90);
13535 format %{ "dmtc1 R0, $dst\t! replicate8B zero" %}
13536 ins_encode %{
13537 __ dmtc1(R0, $dst$$FloatRegister);
13538 %}
13539 ins_pipe( pipe_mtc1 );
13540 %}
13542 instruct Repl8B_M1(vecD dst, immI_M1 M1) %{
13543 predicate(n->as_Vector()->length() == 8);
13544 match(Set dst (ReplicateB M1));
13545 ins_cost(80);
13546 format %{ "dmtc1 -1, $dst\t! replicate8B -1" %}
13547 ins_encode %{
13548 __ nor(AT, R0, R0);
13549 __ dmtc1(AT, $dst$$FloatRegister);
13550 %}
13551 ins_pipe( pipe_mtc1 );
13552 %}
13554 instruct Repl4S_DSP(vecD dst, mRegI src) %{
13555 predicate(n->as_Vector()->length() == 4 && Use3A2000);
13556 match(Set dst (ReplicateS src));
13557 ins_cost(100);
13558 format %{ "replv_qh AT, $src\n\t"
13559 "dmtc1 AT, $dst\t! replicate4S" %}
13560 ins_encode %{
13561 __ replv_qh(AT, $src$$Register);
13562 __ dmtc1(AT, $dst$$FloatRegister);
13563 %}
13564 ins_pipe( pipe_mtc1 );
13565 %}
13567 instruct Repl4S(vecD dst, mRegI src) %{
13568 predicate(n->as_Vector()->length() == 4);
13569 match(Set dst (ReplicateS src));
13570 ins_cost(120);
13571 format %{ "move AT, $src \n\t"
13572 "dins AT, AT, 16, 16\n\t"
13573 "dinsu AT, AT, 32, 32\n\t"
13574 "dmtc1 AT, $dst\t! replicate4S" %}
13575 ins_encode %{
13576 __ move(AT, $src$$Register);
13577 __ dins(AT, AT, 16, 16);
13578 __ dinsu(AT, AT, 32, 32);
13579 __ dmtc1(AT, $dst$$FloatRegister);
13580 %}
13581 ins_pipe( pipe_mtc1 );
13582 %}
13584 instruct Repl4S_imm_DSP(vecD dst, immI con) %{
13585 predicate(n->as_Vector()->length() == 4 && Use3A2000);
13586 match(Set dst (ReplicateS con));
13587 ins_cost(100);
13588 format %{ "replv_qh AT, [$con]\n\t"
13589 "dmtc1 AT, $dst\t! replicate4S($con)" %}
13590 ins_encode %{
13591 int val = $con$$constant;
13592 if ( Assembler::is_simm(val, 10)) {
13593 //repl_qh supports 10 bits immediate
13594 __ repl_qh(AT, val);
13595 } else {
13596 __ li32(AT, val);
13597 __ replv_qh(AT, AT);
13598 }
13599 __ dmtc1(AT, $dst$$FloatRegister);
13600 %}
13601 ins_pipe( pipe_mtc1 );
13602 %}
13604 instruct Repl4S_imm(vecD dst, immI con) %{
13605 predicate(n->as_Vector()->length() == 4);
13606 match(Set dst (ReplicateS con));
13607 ins_cost(110);
13608 format %{ "move AT, [$con]\n\t"
13609 "dins AT, AT, 16, 16\n\t"
13610 "dinsu AT, AT, 32, 32\n\t"
13611 "dmtc1 AT, $dst\t! replicate4S($con)" %}
13612 ins_encode %{
13613 __ move(AT, $con$$constant);
13614 __ dins(AT, AT, 16, 16);
13615 __ dinsu(AT, AT, 32, 32);
13616 __ dmtc1(AT, $dst$$FloatRegister);
13617 %}
13618 ins_pipe( pipe_mtc1 );
13619 %}
13621 instruct Repl4S_zero(vecD dst, immI0 zero) %{
13622 predicate(n->as_Vector()->length() == 4);
13623 match(Set dst (ReplicateS zero));
13624 format %{ "dmtc1 R0, $dst\t! replicate4S zero" %}
13625 ins_encode %{
13626 __ dmtc1(R0, $dst$$FloatRegister);
13627 %}
13628 ins_pipe( pipe_mtc1 );
13629 %}
13631 instruct Repl4S_M1(vecD dst, immI_M1 M1) %{
13632 predicate(n->as_Vector()->length() == 4);
13633 match(Set dst (ReplicateS M1));
13634 format %{ "dmtc1 -1, $dst\t! replicate4S -1" %}
13635 ins_encode %{
13636 __ nor(AT, R0, R0);
13637 __ dmtc1(AT, $dst$$FloatRegister);
13638 %}
13639 ins_pipe( pipe_mtc1 );
13640 %}
13642 // Replicate integer (4 byte) scalar to be vector
13643 instruct Repl2I(vecD dst, mRegI src) %{
13644 predicate(n->as_Vector()->length() == 2);
13645 match(Set dst (ReplicateI src));
13646 format %{ "dins AT, $src, 0, 32\n\t"
13647 "dinsu AT, $src, 32, 32\n\t"
13648 "dmtc1 AT, $dst\t! replicate2I" %}
13649 ins_encode %{
13650 __ dins(AT, $src$$Register, 0, 32);
13651 __ dinsu(AT, $src$$Register, 32, 32);
13652 __ dmtc1(AT, $dst$$FloatRegister);
13653 %}
13654 ins_pipe( pipe_mtc1 );
13655 %}
13657 // Replicate integer (4 byte) scalar immediate to be vector by loading from const table.
13658 instruct Repl2I_imm(vecD dst, immI con, mA7RegI tmp) %{
13659 predicate(n->as_Vector()->length() == 2);
13660 match(Set dst (ReplicateI con));
13661 effect(KILL tmp);
13662 format %{ "li32 AT, [$con], 32\n\t"
13663 "dinsu AT, AT\n\t"
13664 "dmtc1 AT, $dst\t! replicate2I($con)" %}
13665 ins_encode %{
13666 int val = $con$$constant;
13667 __ li32(AT, val);
13668 __ dinsu(AT, AT, 32, 32);
13669 __ dmtc1(AT, $dst$$FloatRegister);
13670 %}
13671 ins_pipe( pipe_mtc1 );
13672 %}
13674 // Replicate integer (4 byte) scalar zero to be vector
13675 instruct Repl2I_zero(vecD dst, immI0 zero) %{
13676 predicate(n->as_Vector()->length() == 2);
13677 match(Set dst (ReplicateI zero));
13678 format %{ "dmtc1 R0, $dst\t! replicate2I zero" %}
13679 ins_encode %{
13680 __ dmtc1(R0, $dst$$FloatRegister);
13681 %}
13682 ins_pipe( pipe_mtc1 );
13683 %}
13685 // Replicate integer (4 byte) scalar -1 to be vector
13686 instruct Repl2I_M1(vecD dst, immI_M1 M1) %{
13687 predicate(n->as_Vector()->length() == 2);
13688 match(Set dst (ReplicateI M1));
13689 format %{ "dmtc1 -1, $dst\t! replicate2I -1, use AT" %}
13690 ins_encode %{
13691 __ nor(AT, R0, R0);
13692 __ dmtc1(AT, $dst$$FloatRegister);
13693 %}
13694 ins_pipe( pipe_mtc1 );
13695 %}
13697 // Replicate float (4 byte) scalar to be vector
13698 instruct Repl2F(vecD dst, regF src) %{
13699 predicate(n->as_Vector()->length() == 2);
13700 match(Set dst (ReplicateF src));
13701 format %{ "cvt.ps $dst, $src, $src\t! replicate2F" %}
13702 ins_encode %{
13703 __ cvt_ps_s($dst$$FloatRegister, $src$$FloatRegister, $src$$FloatRegister);
13704 %}
13705 ins_pipe( pipe_slow );
13706 %}
13708 // Replicate float (4 byte) scalar zero to be vector
13709 instruct Repl2F_zero(vecD dst, immF0 zero) %{
13710 predicate(n->as_Vector()->length() == 2);
13711 match(Set dst (ReplicateF zero));
13712 format %{ "dmtc1 R0, $dst\t! replicate2F zero" %}
13713 ins_encode %{
13714 __ dmtc1(R0, $dst$$FloatRegister);
13715 %}
13716 ins_pipe( pipe_mtc1 );
13717 %}
13720 // ====================VECTOR ARITHMETIC=======================================
13722 // --------------------------------- ADD --------------------------------------
13724 // Floats vector add
13725 instruct vadd2F(vecD dst, vecD src) %{
13726 predicate(n->as_Vector()->length() == 2);
13727 match(Set dst (AddVF dst src));
13728 format %{ "add.ps $dst,$src\t! add packed2F" %}
13729 ins_encode %{
13730 __ add_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
13731 %}
13732 ins_pipe( pipe_slow );
13733 %}
13735 instruct vadd2F3(vecD dst, vecD src1, vecD src2) %{
13736 predicate(n->as_Vector()->length() == 2);
13737 match(Set dst (AddVF src1 src2));
13738 format %{ "add.ps $dst,$src1,$src2\t! add packed2F" %}
13739 ins_encode %{
13740 __ add_ps($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
13741 %}
13742 ins_pipe( fpu_regF_regF );
13743 %}
13745 // --------------------------------- SUB --------------------------------------
13747 // Floats vector sub
13748 instruct vsub2F(vecD dst, vecD src) %{
13749 predicate(n->as_Vector()->length() == 2);
13750 match(Set dst (SubVF dst src));
13751 format %{ "sub.ps $dst,$src\t! sub packed2F" %}
13752 ins_encode %{
13753 __ sub_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
13754 %}
13755 ins_pipe( fpu_regF_regF );
13756 %}
13758 // --------------------------------- MUL --------------------------------------
13760 // Floats vector mul
13761 instruct vmul2F(vecD dst, vecD src) %{
13762 predicate(n->as_Vector()->length() == 2);
13763 match(Set dst (MulVF dst src));
13764 format %{ "mul.ps $dst, $src\t! mul packed2F" %}
13765 ins_encode %{
13766 __ mul_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
13767 %}
13768 ins_pipe( fpu_regF_regF );
13769 %}
13771 instruct vmul2F3(vecD dst, vecD src1, vecD src2) %{
13772 predicate(n->as_Vector()->length() == 2);
13773 match(Set dst (MulVF src1 src2));
13774 format %{ "mul.ps $dst, $src1, $src2\t! mul packed2F" %}
13775 ins_encode %{
13776 __ mul_ps($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
13777 %}
13778 ins_pipe( fpu_regF_regF );
13779 %}
13781 // --------------------------------- DIV --------------------------------------
13782 // MIPS do not have div.ps
13785 //----------PEEPHOLE RULES-----------------------------------------------------
13786 // These must follow all instruction definitions as they use the names
13787 // defined in the instructions definitions.
13788 //
13789 // peepmatch ( root_instr_name [preceeding_instruction]* );
13790 //
13791 // peepconstraint %{
13792 // (instruction_number.operand_name relational_op instruction_number.operand_name
13793 // [, ...] );
13794 // // instruction numbers are zero-based using left to right order in peepmatch
13795 //
13796 // peepreplace ( instr_name ( [instruction_number.operand_name]* ) );
13797 // // provide an instruction_number.operand_name for each operand that appears
13798 // // in the replacement instruction's match rule
13799 //
13800 // ---------VM FLAGS---------------------------------------------------------
13801 //
13802 // All peephole optimizations can be turned off using -XX:-OptoPeephole
13803 //
13804 // Each peephole rule is given an identifying number starting with zero and
13805 // increasing by one in the order seen by the parser. An individual peephole
13806 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
13807 // on the command-line.
13808 //
13809 // ---------CURRENT LIMITATIONS----------------------------------------------
13810 //
13811 // Only match adjacent instructions in same basic block
13812 // Only equality constraints
13813 // Only constraints between operands, not (0.dest_reg == EAX_enc)
13814 // Only one replacement instruction
13815 //
13816 // ---------EXAMPLE----------------------------------------------------------
13817 //
13818 // // pertinent parts of existing instructions in architecture description
13819 // instruct movI(eRegI dst, eRegI src) %{
13820 // match(Set dst (CopyI src));
13821 // %}
13822 //
13823 // instruct incI_eReg(eRegI dst, immI1 src, eFlagsReg cr) %{
13824 // match(Set dst (AddI dst src));
13825 // effect(KILL cr);
13826 // %}
13827 //
13828 // // Change (inc mov) to lea
13829 // peephole %{
13830 // // increment preceeded by register-register move
13831 // peepmatch ( incI_eReg movI );
13832 // // require that the destination register of the increment
13833 // // match the destination register of the move
13834 // peepconstraint ( 0.dst == 1.dst );
13835 // // construct a replacement instruction that sets
13836 // // the destination to ( move's source register + one )
13837 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13838 // %}
13839 //
13840 // Implementation no longer uses movX instructions since
13841 // machine-independent system no longer uses CopyX nodes.
13842 //
13843 // peephole %{
13844 // peepmatch ( incI_eReg movI );
13845 // peepconstraint ( 0.dst == 1.dst );
13846 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13847 // %}
13848 //
13849 // peephole %{
13850 // peepmatch ( decI_eReg movI );
13851 // peepconstraint ( 0.dst == 1.dst );
13852 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13853 // %}
13854 //
13855 // peephole %{
13856 // peepmatch ( addI_eReg_imm movI );
13857 // peepconstraint ( 0.dst == 1.dst );
13858 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13859 // %}
13860 //
13861 // peephole %{
13862 // peepmatch ( addP_eReg_imm movP );
13863 // peepconstraint ( 0.dst == 1.dst );
13864 // peepreplace ( leaP_eReg_immI( 0.dst 1.src 0.src ) );
13865 // %}
13867 // // Change load of spilled value to only a spill
13868 // instruct storeI(memory mem, eRegI src) %{
13869 // match(Set mem (StoreI mem src));
13870 // %}
13871 //
13872 // instruct loadI(eRegI dst, memory mem) %{
13873 // match(Set dst (LoadI mem));
13874 // %}
13875 //
13876 //peephole %{
13877 // peepmatch ( loadI storeI );
13878 // peepconstraint ( 1.src == 0.dst, 1.mem == 0.mem );
13879 // peepreplace ( storeI( 1.mem 1.mem 1.src ) );
13880 //%}
13882 //----------SMARTSPILL RULES---------------------------------------------------
13883 // These must follow all instruction definitions as they use the names
13884 // defined in the instructions definitions.