Sun, 05 Mar 2017 16:29:58 -0500
[C2] Use patchable_set48 in calls and jumps for MIPS CPUs.
1 //
2 // Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
3 // Copyright (c) 2015, 2016, Loongson Technology. All rights reserved.
4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 //
6 // This code is free software; you can redistribute it and/or modify it
7 // under the terms of the GNU General Public License version 2 only, as
8 // published by the Free Software Foundation.
9 //
10 // This code is distributed in the hope that it will be useful, but WITHOUT
11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 // version 2 for more details (a copy is included in the LICENSE file that
14 // accompanied this code).
15 //
16 // You should have received a copy of the GNU General Public License version
17 // 2 along with this work; if not, write to the Free Software Foundation,
18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 //
20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 // or visit www.oracle.com if you need additional information or have any
22 // questions.
23 //
24 //
26 // GodSon3 Architecture Description File
28 //----------REGISTER DEFINITION BLOCK------------------------------------------
29 // This information is used by the matcher and the register allocator to
30 // describe individual registers and classes of registers within the target
31 // archtecture.
33 // format:
34 // reg_def name (call convention, c-call convention, ideal type, encoding);
35 // call convention :
36 // NS = No-Save
37 // SOC = Save-On-Call
38 // SOE = Save-On-Entry
39 // AS = Always-Save
40 // ideal type :
41 // see opto/opcodes.hpp for more info
42 // reg_class name (reg, ...);
43 // alloc_class name (reg, ...);
44 register %{
46 // General Registers
47 // Integer Registers
48 reg_def R0 ( NS, NS, Op_RegI, 0, VMRegImpl::Bad());
49 reg_def AT ( NS, NS, Op_RegI, 1, AT->as_VMReg());
50 reg_def AT_H ( NS, NS, Op_RegI, 1, AT->as_VMReg()->next());
51 reg_def V0 (SOC, SOC, Op_RegI, 2, V0->as_VMReg());
52 reg_def V0_H (SOC, SOC, Op_RegI, 2, V0->as_VMReg()->next());
53 reg_def V1 (SOC, SOC, Op_RegI, 3, V1->as_VMReg());
54 reg_def V1_H (SOC, SOC, Op_RegI, 3, V1->as_VMReg()->next());
55 reg_def A0 (SOC, SOC, Op_RegI, 4, A0->as_VMReg());
56 reg_def A0_H (SOC, SOC, Op_RegI, 4, A0->as_VMReg()->next());
57 reg_def A1 (SOC, SOC, Op_RegI, 5, A1->as_VMReg());
58 reg_def A1_H (SOC, SOC, Op_RegI, 5, A1->as_VMReg()->next());
59 reg_def A2 (SOC, SOC, Op_RegI, 6, A2->as_VMReg());
60 reg_def A2_H (SOC, SOC, Op_RegI, 6, A2->as_VMReg()->next());
61 reg_def A3 (SOC, SOC, Op_RegI, 7, A3->as_VMReg());
62 reg_def A3_H (SOC, SOC, Op_RegI, 7, A3->as_VMReg()->next());
63 reg_def A4 (SOC, SOC, Op_RegI, 8, A4->as_VMReg());
64 reg_def A4_H (SOC, SOC, Op_RegI, 8, A4->as_VMReg()->next());
65 reg_def A5 (SOC, SOC, Op_RegI, 9, A5->as_VMReg());
66 reg_def A5_H (SOC, SOC, Op_RegI, 9, A5->as_VMReg()->next());
67 reg_def A6 (SOC, SOC, Op_RegI, 10, A6->as_VMReg());
68 reg_def A6_H (SOC, SOC, Op_RegI, 10, A6->as_VMReg()->next());
69 reg_def A7 (SOC, SOC, Op_RegI, 11, A7->as_VMReg());
70 reg_def A7_H (SOC, SOC, Op_RegI, 11, A7->as_VMReg()->next());
71 reg_def T0 (SOC, SOC, Op_RegI, 12, T0->as_VMReg());
72 reg_def T0_H (SOC, SOC, Op_RegI, 12, T0->as_VMReg()->next());
73 reg_def T1 (SOC, SOC, Op_RegI, 13, T1->as_VMReg());
74 reg_def T1_H (SOC, SOC, Op_RegI, 13, T1->as_VMReg()->next());
75 reg_def T2 (SOC, SOC, Op_RegI, 14, T2->as_VMReg());
76 reg_def T2_H (SOC, SOC, Op_RegI, 14, T2->as_VMReg()->next());
77 reg_def T3 (SOC, SOC, Op_RegI, 15, T3->as_VMReg());
78 reg_def T3_H (SOC, SOC, Op_RegI, 15, T3->as_VMReg()->next());
79 reg_def S0 (SOC, SOE, Op_RegI, 16, S0->as_VMReg());
80 reg_def S0_H (SOC, SOE, Op_RegI, 16, S0->as_VMReg()->next());
81 reg_def S1 (SOC, SOE, Op_RegI, 17, S1->as_VMReg());
82 reg_def S1_H (SOC, SOE, Op_RegI, 17, S1->as_VMReg()->next());
83 reg_def S2 (SOC, SOE, Op_RegI, 18, S2->as_VMReg());
84 reg_def S2_H (SOC, SOE, Op_RegI, 18, S2->as_VMReg()->next());
85 reg_def S3 (SOC, SOE, Op_RegI, 19, S3->as_VMReg());
86 reg_def S3_H (SOC, SOE, Op_RegI, 19, S3->as_VMReg()->next());
87 reg_def S4 (SOC, SOE, Op_RegI, 20, S4->as_VMReg());
88 reg_def S4_H (SOC, SOE, Op_RegI, 20, S4->as_VMReg()->next());
89 reg_def S5 (SOC, SOE, Op_RegI, 21, S5->as_VMReg());
90 reg_def S5_H (SOC, SOE, Op_RegI, 21, S5->as_VMReg()->next());
91 reg_def S6 (SOC, SOE, Op_RegI, 22, S6->as_VMReg());
92 reg_def S6_H (SOC, SOE, Op_RegI, 22, S6->as_VMReg()->next());
93 reg_def S7 (SOC, SOE, Op_RegI, 23, S7->as_VMReg());
94 reg_def S7_H (SOC, SOE, Op_RegI, 23, S7->as_VMReg()->next());
95 reg_def T8 (SOC, SOC, Op_RegI, 24, T8->as_VMReg());
96 reg_def T8_H (SOC, SOC, Op_RegI, 24, T8->as_VMReg()->next());
97 reg_def T9 (SOC, SOC, Op_RegI, 25, T9->as_VMReg());
98 reg_def T9_H (SOC, SOC, Op_RegI, 25, T9->as_VMReg()->next());
100 // Special Registers
101 reg_def K0 ( NS, NS, Op_RegI, 26, K0->as_VMReg());
102 reg_def K1 ( NS, NS, Op_RegI, 27, K1->as_VMReg());
103 reg_def GP ( NS, NS, Op_RegI, 28, GP->as_VMReg());
104 reg_def GP_H ( NS, NS, Op_RegI, 28, GP->as_VMReg()->next());
105 reg_def SP ( NS, NS, Op_RegI, 29, SP->as_VMReg());
106 reg_def SP_H ( NS, NS, Op_RegI, 29, SP->as_VMReg()->next());
107 reg_def FP ( NS, NS, Op_RegI, 30, FP->as_VMReg());
108 reg_def FP_H ( NS, NS, Op_RegI, 30, FP->as_VMReg()->next());
109 reg_def RA ( NS, NS, Op_RegI, 31, RA->as_VMReg());
110 reg_def RA_H ( NS, NS, Op_RegI, 31, RA->as_VMReg()->next());
112 // Floating registers.
113 reg_def F0 ( SOC, SOC, Op_RegF, 0, F0->as_VMReg());
114 reg_def F0_H ( SOC, SOC, Op_RegF, 0, F0->as_VMReg()->next());
115 reg_def F1 ( SOC, SOC, Op_RegF, 1, F1->as_VMReg());
116 reg_def F1_H ( SOC, SOC, Op_RegF, 1, F1->as_VMReg()->next());
117 reg_def F2 ( SOC, SOC, Op_RegF, 2, F2->as_VMReg());
118 reg_def F2_H ( SOC, SOC, Op_RegF, 2, F2->as_VMReg()->next());
119 reg_def F3 ( SOC, SOC, Op_RegF, 3, F3->as_VMReg());
120 reg_def F3_H ( SOC, SOC, Op_RegF, 3, F3->as_VMReg()->next());
121 reg_def F4 ( SOC, SOC, Op_RegF, 4, F4->as_VMReg());
122 reg_def F4_H ( SOC, SOC, Op_RegF, 4, F4->as_VMReg()->next());
123 reg_def F5 ( SOC, SOC, Op_RegF, 5, F5->as_VMReg());
124 reg_def F5_H ( SOC, SOC, Op_RegF, 5, F5->as_VMReg()->next());
125 reg_def F6 ( SOC, SOC, Op_RegF, 6, F6->as_VMReg());
126 reg_def F6_H ( SOC, SOC, Op_RegF, 6, F6->as_VMReg()->next());
127 reg_def F7 ( SOC, SOC, Op_RegF, 7, F7->as_VMReg());
128 reg_def F7_H ( SOC, SOC, Op_RegF, 7, F7->as_VMReg()->next());
129 reg_def F8 ( SOC, SOC, Op_RegF, 8, F8->as_VMReg());
130 reg_def F8_H ( SOC, SOC, Op_RegF, 8, F8->as_VMReg()->next());
131 reg_def F9 ( SOC, SOC, Op_RegF, 9, F9->as_VMReg());
132 reg_def F9_H ( SOC, SOC, Op_RegF, 9, F9->as_VMReg()->next());
133 reg_def F10 ( SOC, SOC, Op_RegF, 10, F10->as_VMReg());
134 reg_def F10_H ( SOC, SOC, Op_RegF, 10, F10->as_VMReg()->next());
135 reg_def F11 ( SOC, SOC, Op_RegF, 11, F11->as_VMReg());
136 reg_def F11_H ( SOC, SOC, Op_RegF, 11, F11->as_VMReg()->next());
137 reg_def F12 ( SOC, SOC, Op_RegF, 12, F12->as_VMReg());
138 reg_def F12_H ( SOC, SOC, Op_RegF, 12, F12->as_VMReg()->next());
139 reg_def F13 ( SOC, SOC, Op_RegF, 13, F13->as_VMReg());
140 reg_def F13_H ( SOC, SOC, Op_RegF, 13, F13->as_VMReg()->next());
141 reg_def F14 ( SOC, SOC, Op_RegF, 14, F14->as_VMReg());
142 reg_def F14_H ( SOC, SOC, Op_RegF, 14, F14->as_VMReg()->next());
143 reg_def F15 ( SOC, SOC, Op_RegF, 15, F15->as_VMReg());
144 reg_def F15_H ( SOC, SOC, Op_RegF, 15, F15->as_VMReg()->next());
145 reg_def F16 ( SOC, SOC, Op_RegF, 16, F16->as_VMReg());
146 reg_def F16_H ( SOC, SOC, Op_RegF, 16, F16->as_VMReg()->next());
147 reg_def F17 ( SOC, SOC, Op_RegF, 17, F17->as_VMReg());
148 reg_def F17_H ( SOC, SOC, Op_RegF, 17, F17->as_VMReg()->next());
149 reg_def F18 ( SOC, SOC, Op_RegF, 18, F18->as_VMReg());
150 reg_def F18_H ( SOC, SOC, Op_RegF, 18, F18->as_VMReg()->next());
151 reg_def F19 ( SOC, SOC, Op_RegF, 19, F19->as_VMReg());
152 reg_def F19_H ( SOC, SOC, Op_RegF, 19, F19->as_VMReg()->next());
153 reg_def F20 ( SOC, SOC, Op_RegF, 20, F20->as_VMReg());
154 reg_def F20_H ( SOC, SOC, Op_RegF, 20, F20->as_VMReg()->next());
155 reg_def F21 ( SOC, SOC, Op_RegF, 21, F21->as_VMReg());
156 reg_def F21_H ( SOC, SOC, Op_RegF, 21, F21->as_VMReg()->next());
157 reg_def F22 ( SOC, SOC, Op_RegF, 22, F22->as_VMReg());
158 reg_def F22_H ( SOC, SOC, Op_RegF, 22, F22->as_VMReg()->next());
159 reg_def F23 ( SOC, SOC, Op_RegF, 23, F23->as_VMReg());
160 reg_def F23_H ( SOC, SOC, Op_RegF, 23, F23->as_VMReg()->next());
161 reg_def F24 ( SOC, SOC, Op_RegF, 24, F24->as_VMReg());
162 reg_def F24_H ( SOC, SOC, Op_RegF, 24, F24->as_VMReg()->next());
163 reg_def F25 ( SOC, SOC, Op_RegF, 25, F25->as_VMReg());
164 reg_def F25_H ( SOC, SOC, Op_RegF, 25, F25->as_VMReg()->next());
165 reg_def F26 ( SOC, SOC, Op_RegF, 26, F26->as_VMReg());
166 reg_def F26_H ( SOC, SOC, Op_RegF, 26, F26->as_VMReg()->next());
167 reg_def F27 ( SOC, SOC, Op_RegF, 27, F27->as_VMReg());
168 reg_def F27_H ( SOC, SOC, Op_RegF, 27, F27->as_VMReg()->next());
169 reg_def F28 ( SOC, SOC, Op_RegF, 28, F28->as_VMReg());
170 reg_def F28_H ( SOC, SOC, Op_RegF, 28, F28->as_VMReg()->next());
171 reg_def F29 ( SOC, SOC, Op_RegF, 29, F29->as_VMReg());
172 reg_def F29_H ( SOC, SOC, Op_RegF, 29, F29->as_VMReg()->next());
173 reg_def F30 ( SOC, SOC, Op_RegF, 30, F30->as_VMReg());
174 reg_def F30_H ( SOC, SOC, Op_RegF, 30, F30->as_VMReg()->next());
175 reg_def F31 ( SOC, SOC, Op_RegF, 31, F31->as_VMReg());
176 reg_def F31_H ( SOC, SOC, Op_RegF, 31, F31->as_VMReg()->next());
179 // ----------------------------
180 // Special Registers
181 // Condition Codes Flag Registers
182 reg_def MIPS_FLAG (SOC, SOC, Op_RegFlags, 1, as_Register(1)->as_VMReg());
183 //S6 is used for get_thread(S6)
184 //S5 is uesd for heapbase of compressed oop
185 alloc_class chunk0(
186 S7, S7_H,
187 S0, S0_H,
188 S1, S1_H,
189 S2, S2_H,
190 S4, S4_H,
191 S5, S5_H,
192 S6, S6_H,
193 S3, S3_H,
194 T2, T2_H,
195 T3, T3_H,
196 T8, T8_H,
197 T9, T9_H,
198 T1, T1_H, // inline_cache_reg
199 V1, V1_H,
200 A7, A7_H,
201 A6, A6_H,
202 A5, A5_H,
203 A4, A4_H,
204 V0, V0_H,
205 A3, A3_H,
206 A2, A2_H,
207 A1, A1_H,
208 A0, A0_H,
209 T0, T0_H,
210 GP, GP_H
211 RA, RA_H,
212 SP, SP_H, // stack_pointer
213 FP, FP_H // frame_pointer
214 );
216 alloc_class chunk1( F0, F0_H,
217 F1, F1_H,
218 F2, F2_H,
219 F3, F3_H,
220 F4, F4_H,
221 F5, F5_H,
222 F6, F6_H,
223 F7, F7_H,
224 F8, F8_H,
225 F9, F9_H,
226 F10, F10_H,
227 F11, F11_H,
228 F20, F20_H,
229 F21, F21_H,
230 F22, F22_H,
231 F23, F23_H,
232 F24, F24_H,
233 F25, F25_H,
234 F26, F26_H,
235 F27, F27_H,
236 F28, F28_H,
237 F19, F19_H,
238 F18, F18_H,
239 F17, F17_H,
240 F16, F16_H,
241 F15, F15_H,
242 F14, F14_H,
243 F13, F13_H,
244 F12, F12_H,
245 F29, F29_H,
246 F30, F30_H,
247 F31, F31_H);
249 alloc_class chunk2(MIPS_FLAG);
251 reg_class s_reg( S0, S1, S2, S3, S4, S5, S6, S7 );
252 reg_class s0_reg( S0 );
253 reg_class s1_reg( S1 );
254 reg_class s2_reg( S2 );
255 reg_class s3_reg( S3 );
256 reg_class s4_reg( S4 );
257 reg_class s5_reg( S5 );
258 reg_class s6_reg( S6 );
259 reg_class s7_reg( S7 );
261 reg_class t_reg( T0, T1, T2, T3, T8, T9 );
262 reg_class t0_reg( T0 );
263 reg_class t1_reg( T1 );
264 reg_class t2_reg( T2 );
265 reg_class t3_reg( T3 );
266 reg_class t8_reg( T8 );
267 reg_class t9_reg( T9 );
269 reg_class a_reg( A0, A1, A2, A3, A4, A5, A6, A7 );
270 reg_class a0_reg( A0 );
271 reg_class a1_reg( A1 );
272 reg_class a2_reg( A2 );
273 reg_class a3_reg( A3 );
274 reg_class a4_reg( A4 );
275 reg_class a5_reg( A5 );
276 reg_class a6_reg( A6 );
277 reg_class a7_reg( A7 );
279 reg_class v0_reg( V0 );
280 reg_class v1_reg( V1 );
282 reg_class sp_reg( SP, SP_H );
283 reg_class fp_reg( FP, FP_H );
285 reg_class mips_flags(MIPS_FLAG);
287 reg_class v0_long_reg( V0, V0_H );
288 reg_class v1_long_reg( V1, V1_H );
289 reg_class a0_long_reg( A0, A0_H );
290 reg_class a1_long_reg( A1, A1_H );
291 reg_class a2_long_reg( A2, A2_H );
292 reg_class a3_long_reg( A3, A3_H );
293 reg_class a4_long_reg( A4, A4_H );
294 reg_class a5_long_reg( A5, A5_H );
295 reg_class a6_long_reg( A6, A6_H );
296 reg_class a7_long_reg( A7, A7_H );
297 reg_class t0_long_reg( T0, T0_H );
298 reg_class t1_long_reg( T1, T1_H );
299 reg_class t2_long_reg( T2, T2_H );
300 reg_class t3_long_reg( T3, T3_H );
301 reg_class t8_long_reg( T8, T8_H );
302 reg_class t9_long_reg( T9, T9_H );
303 reg_class s0_long_reg( S0, S0_H );
304 reg_class s1_long_reg( S1, S1_H );
305 reg_class s2_long_reg( S2, S2_H );
306 reg_class s3_long_reg( S3, S3_H );
307 reg_class s4_long_reg( S4, S4_H );
308 reg_class s5_long_reg( S5, S5_H );
309 reg_class s6_long_reg( S6, S6_H );
310 reg_class s7_long_reg( S7, S7_H );
312 reg_class int_reg( S7, S0, S1, S2, S4, S3, T8, T2, T3, T1, V1, A7, A6, A5, A4, V0, A3, A2, A1, A0, T0 );
314 reg_class no_Ax_int_reg( S7, S0, S1, S2, S4, S3, T8, T2, T3, T1, V1, V0, T0 );
316 reg_class p_reg(
317 S7, S7_H,
318 S0, S0_H,
319 S1, S1_H,
320 S2, S2_H,
321 S4, S4_H,
322 S3, S3_H,
323 T8, T8_H,
324 T2, T2_H,
325 T3, T3_H,
326 T1, T1_H,
327 A7, A7_H,
328 A6, A6_H,
329 A5, A5_H,
330 A4, A4_H,
331 A3, A3_H,
332 A2, A2_H,
333 A1, A1_H,
334 A0, A0_H,
335 T0, T0_H
336 );
338 reg_class no_T8_p_reg(
339 S7, S7_H,
340 S0, S0_H,
341 S1, S1_H,
342 S2, S2_H,
343 S4, S4_H,
344 S3, S3_H,
345 T2, T2_H,
346 T3, T3_H,
347 T1, T1_H,
348 A7, A7_H,
349 A6, A6_H,
350 A5, A5_H,
351 A4, A4_H,
352 A3, A3_H,
353 A2, A2_H,
354 A1, A1_H,
355 A0, A0_H,
356 T0, T0_H
357 );
359 reg_class long_reg(
360 S7, S7_H,
361 S0, S0_H,
362 S1, S1_H,
363 S2, S2_H,
364 S4, S4_H,
365 S3, S3_H,
366 T8, T8_H,
367 T2, T2_H,
368 T3, T3_H,
369 T1, T1_H,
370 A7, A7_H,
371 A6, A6_H,
372 A5, A5_H,
373 A4, A4_H,
374 A3, A3_H,
375 A2, A2_H,
376 A1, A1_H,
377 A0, A0_H,
378 T0, T0_H
379 );
382 // Floating point registers.
383 // 2012/8/23 Fu: F30/F31 are used as temporary registers in D2I
384 // 2016/12/1 aoqi: F31 are not used as temporary registers in D2I
385 reg_class flt_reg( F0, F1, F2, F3, F4, F5, F6, F7, F8, F9, F10, F11, F12, F13, F14, F15, F16, F17 F18, F19, F20, F21, F22, F23, F24, F25, F26, F27, F28, F29, F31);
386 reg_class dbl_reg( F0, F0_H,
387 F1, F1_H,
388 F2, F2_H,
389 F3, F3_H,
390 F4, F4_H,
391 F5, F5_H,
392 F6, F6_H,
393 F7, F7_H,
394 F8, F8_H,
395 F9, F9_H,
396 F10, F10_H,
397 F11, F11_H,
398 F12, F12_H,
399 F13, F13_H,
400 F14, F14_H,
401 F15, F15_H,
402 F16, F16_H,
403 F17, F17_H,
404 F18, F18_H,
405 F19, F19_H,
406 F20, F20_H,
407 F21, F21_H,
408 F22, F22_H,
409 F23, F23_H,
410 F24, F24_H,
411 F25, F25_H,
412 F26, F26_H,
413 F27, F27_H,
414 F28, F28_H,
415 F29, F29_H,
416 F31, F31_H);
418 reg_class flt_arg0( F12 );
419 reg_class dbl_arg0( F12, F12_H );
420 reg_class dbl_arg1( F14, F14_H );
422 %}
424 //----------DEFINITION BLOCK---------------------------------------------------
425 // Define name --> value mappings to inform the ADLC of an integer valued name
426 // Current support includes integer values in the range [0, 0x7FFFFFFF]
427 // Format:
428 // int_def <name> ( <int_value>, <expression>);
429 // Generated Code in ad_<arch>.hpp
430 // #define <name> (<expression>)
431 // // value == <int_value>
432 // Generated code in ad_<arch>.cpp adlc_verification()
433 // assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
434 //
435 definitions %{
436 int_def DEFAULT_COST ( 100, 100);
437 int_def HUGE_COST (1000000, 1000000);
439 // Memory refs are twice as expensive as run-of-the-mill.
440 int_def MEMORY_REF_COST ( 200, DEFAULT_COST * 2);
442 // Branches are even more expensive.
443 int_def BRANCH_COST ( 300, DEFAULT_COST * 3);
444 // we use jr instruction to construct call, so more expensive
445 // by yjl 2/28/2006
446 int_def CALL_COST ( 500, DEFAULT_COST * 5);
447 /*
448 int_def EQUAL ( 1, 1 );
449 int_def NOT_EQUAL ( 2, 2 );
450 int_def GREATER ( 3, 3 );
451 int_def GREATER_EQUAL ( 4, 4 );
452 int_def LESS ( 5, 5 );
453 int_def LESS_EQUAL ( 6, 6 );
454 */
455 %}
459 //----------SOURCE BLOCK-------------------------------------------------------
460 // This is a block of C++ code which provides values, functions, and
461 // definitions necessary in the rest of the architecture description
463 source_hpp %{
464 // Header information of the source block.
465 // Method declarations/definitions which are used outside
466 // the ad-scope can conveniently be defined here.
467 //
468 // To keep related declarations/definitions/uses close together,
469 // we switch between source %{ }% and source_hpp %{ }% freely as needed.
471 class CallStubImpl {
473 //--------------------------------------------------------------
474 //---< Used for optimization in Compile::shorten_branches >---
475 //--------------------------------------------------------------
477 public:
478 // Size of call trampoline stub.
479 static uint size_call_trampoline() {
480 return 0; // no call trampolines on this platform
481 }
483 // number of relocations needed by a call trampoline stub
484 static uint reloc_call_trampoline() {
485 return 0; // no call trampolines on this platform
486 }
487 };
489 class HandlerImpl {
491 public:
493 static int emit_exception_handler(CodeBuffer &cbuf);
494 static int emit_deopt_handler(CodeBuffer& cbuf);
496 static uint size_exception_handler() {
497 // NativeCall instruction size is the same as NativeJump.
498 // exception handler starts out as jump and can be patched to
499 // a call be deoptimization. (4932387)
500 // Note that this value is also credited (in output.cpp) to
501 // the size of the code section.
502 // return NativeJump::instruction_size;
503 int size = NativeCall::instruction_size;
504 return round_to(size, 16);
505 }
507 #ifdef _LP64
508 static uint size_deopt_handler() {
509 int size = NativeCall::instruction_size;
510 return round_to(size, 16);
511 }
512 #else
513 static uint size_deopt_handler() {
514 // NativeCall instruction size is the same as NativeJump.
515 // exception handler starts out as jump and can be patched to
516 // a call be deoptimization. (4932387)
517 // Note that this value is also credited (in output.cpp) to
518 // the size of the code section.
519 return 5 + NativeJump::instruction_size; // pushl(); jmp;
520 }
521 #endif
522 };
524 %} // end source_hpp
526 source %{
528 #define NO_INDEX 0
529 #define RELOC_IMM64 Assembler::imm_operand
530 #define RELOC_DISP32 Assembler::disp32_operand
533 #define __ _masm.
536 // Emit exception handler code.
537 // Stuff framesize into a register and call a VM stub routine.
538 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf) {
539 /*
540 // Note that the code buffer's insts_mark is always relative to insts.
541 // That's why we must use the macroassembler to generate a handler.
542 MacroAssembler _masm(&cbuf);
543 address base = __ start_a_stub(size_exception_handler());
544 if (base == NULL) return 0; // CodeBuffer::expand failed
545 int offset = __ offset();
546 __ jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
547 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
548 __ end_a_stub();
549 return offset;
550 */
551 // Note that the code buffer's insts_mark is always relative to insts.
552 // That's why we must use the macroassembler to generate a handler.
553 MacroAssembler _masm(&cbuf);
554 address base =
555 __ start_a_stub(size_exception_handler());
556 if (base == NULL) return 0; // CodeBuffer::expand failed
557 int offset = __ offset();
559 __ block_comment("; emit_exception_handler");
561 /* 2012/9/25 FIXME Jin: According to X86, we should use direct jumpt.
562 * * However, this will trigger an assert after the 40th method:
563 * *
564 * * 39 b java.lang.Throwable::<init> (25 bytes)
565 * * --- ns java.lang.Throwable::fillInStackTrace
566 * * 40 !b java.net.URLClassLoader::findClass (29 bytes)
567 * * /vm/opto/runtime.cpp, 900 , assert(caller.is_compiled_frame(),"must be")
568 * * 40 made not entrant (2) java.net.URLClassLoader::findClass (29 bytes)
569 * *
570 * * If we change from JR to JALR, the assert will disappear, but WebClient will
571 * * fail after the 403th method with unknown reason.
572 * */
573 cbuf.set_insts_mark();
574 __ relocate(relocInfo::runtime_call_type);
576 __ patchable_set48(T9, (long)OptoRuntime::exception_blob()->entry_point());
577 __ jr(T9);
578 __ delayed()->nop();
579 __ align(16);
580 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
581 __ end_a_stub();
582 return offset;
583 }
585 // Emit deopt handler code.
586 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) {
587 // Note that the code buffer's insts_mark is always relative to insts.
588 // That's why we must use the macroassembler to generate a handler.
589 MacroAssembler _masm(&cbuf);
590 address base =
591 __ start_a_stub(size_deopt_handler());
593 // FIXME
594 if (base == NULL) return 0; // CodeBuffer::expand failed
595 int offset = __ offset();
597 __ block_comment("; emit_deopt_handler");
599 cbuf.set_insts_mark();
600 __ relocate(relocInfo::runtime_call_type);
602 __ patchable_set48(T9, (long)SharedRuntime::deopt_blob()->unpack());
603 __ jalr(T9);
604 __ delayed()->nop();
605 __ align(16);
606 assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
607 __ end_a_stub();
608 return offset;
609 }
612 const bool Matcher::match_rule_supported(int opcode) {
613 if (!has_match_rule(opcode))
614 return false;
616 switch (opcode) {
617 //Op_CountLeadingZerosI Op_CountLeadingZerosL can be deleted, all MIPS CPUs support clz & dclz.
618 case Op_CountLeadingZerosI:
619 case Op_CountLeadingZerosL:
620 if (!UseCountLeadingZerosInstruction)
621 return false;
622 break;
623 case Op_CountTrailingZerosI:
624 case Op_CountTrailingZerosL:
625 if (!UseCountTrailingZerosInstruction)
626 return false;
627 break;
628 }
630 return true; // Per default match rules are supported.
631 }
633 //FIXME
634 // emit call stub, compiled java to interpreter
635 void emit_java_to_interp(CodeBuffer &cbuf ) {
636 // Stub is fixed up when the corresponding call is converted from calling
637 // compiled code to calling interpreted code.
638 // mov rbx,0
639 // jmp -1
641 address mark = cbuf.insts_mark(); // get mark within main instrs section
643 // Note that the code buffer's insts_mark is always relative to insts.
644 // That's why we must use the macroassembler to generate a stub.
645 MacroAssembler _masm(&cbuf);
647 address base =
648 __ start_a_stub(Compile::MAX_stubs_size);
649 if (base == NULL) return; // CodeBuffer::expand failed
650 // static stub relocation stores the instruction address of the call
652 __ relocate(static_stub_Relocation::spec(mark), 0);
654 /* 2012/10/29 Jin: Rmethod contains methodOop, it should be relocated for GC */
655 /*
656 int oop_index = __ oop_recorder()->allocate_index(NULL);
657 RelocationHolder rspec = oop_Relocation::spec(oop_index);
658 __ relocate(rspec);
659 */
661 // static stub relocation also tags the methodOop in the code-stream.
662 __ patchable_set48(S3, (long)0);
663 // This is recognized as unresolved by relocs/nativeInst/ic code
665 __ relocate(relocInfo::runtime_call_type);
667 cbuf.set_insts_mark();
668 address call_pc = (address)-1;
669 __ patchable_set48(AT, (long)call_pc);
670 __ jr(AT);
671 __ nop();
672 __ align(16);
673 __ end_a_stub();
674 // Update current stubs pointer and restore code_end.
675 }
677 // size of call stub, compiled java to interpretor
678 uint size_java_to_interp() {
679 int size = 4 * 4 + NativeCall::instruction_size; // sizeof(li48) + NativeCall::instruction_size
680 return round_to(size, 16);
681 }
683 // relocation entries for call stub, compiled java to interpreter
684 uint reloc_java_to_interp() {
685 return 16; // in emit_java_to_interp + in Java_Static_Call
686 }
688 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
689 if( Assembler::is_simm16(offset) ) return true;
690 else
691 {
692 assert(false, "Not implemented yet !" );
693 Unimplemented();
694 }
695 }
698 // No additional cost for CMOVL.
699 const int Matcher::long_cmove_cost() { return 0; }
701 // No CMOVF/CMOVD with SSE2
702 const int Matcher::float_cmove_cost() { return ConditionalMoveLimit; }
704 // Does the CPU require late expand (see block.cpp for description of late expand)?
705 const bool Matcher::require_postalloc_expand = false;
707 // Should the Matcher clone shifts on addressing modes, expecting them
708 // to be subsumed into complex addressing expressions or compute them
709 // into registers? True for Intel but false for most RISCs
710 const bool Matcher::clone_shift_expressions = false;
712 // Do we need to mask the count passed to shift instructions or does
713 // the cpu only look at the lower 5/6 bits anyway?
714 const bool Matcher::need_masked_shift_count = false;
716 bool Matcher::narrow_oop_use_complex_address() {
717 NOT_LP64(ShouldNotCallThis());
718 assert(UseCompressedOops, "only for compressed oops code");
719 return false;
720 }
722 bool Matcher::narrow_klass_use_complex_address() {
723 NOT_LP64(ShouldNotCallThis());
724 assert(UseCompressedClassPointers, "only for compressed klass code");
725 return false;
726 }
728 // This is UltraSparc specific, true just means we have fast l2f conversion
729 const bool Matcher::convL2FSupported(void) {
730 return true;
731 }
733 // Max vector size in bytes. 0 if not supported.
734 const int Matcher::vector_width_in_bytes(BasicType bt) {
735 assert(MaxVectorSize == 8, "");
736 return 8;
737 }
739 // Vector ideal reg
740 const int Matcher::vector_ideal_reg(int size) {
741 assert(MaxVectorSize == 8, "");
742 switch(size) {
743 case 8: return Op_VecD;
744 }
745 ShouldNotReachHere();
746 return 0;
747 }
749 // Only lowest bits of xmm reg are used for vector shift count.
750 const int Matcher::vector_shift_count_ideal_reg(int size) {
751 fatal("vector shift is not supported");
752 return Node::NotAMachineReg;
753 }
755 // Limits on vector size (number of elements) loaded into vector.
756 const int Matcher::max_vector_size(const BasicType bt) {
757 assert(is_java_primitive(bt), "only primitive type vectors");
758 return vector_width_in_bytes(bt)/type2aelembytes(bt);
759 }
761 const int Matcher::min_vector_size(const BasicType bt) {
762 return max_vector_size(bt); // Same as max.
763 }
765 // MIPS supports misaligned vectors store/load? FIXME
766 const bool Matcher::misaligned_vectors_ok() {
767 return false;
768 //return !AlignVector; // can be changed by flag
769 }
771 // Register for DIVI projection of divmodI
772 RegMask Matcher::divI_proj_mask() {
773 ShouldNotReachHere();
774 return RegMask();
775 }
777 // Register for MODI projection of divmodI
778 RegMask Matcher::modI_proj_mask() {
779 ShouldNotReachHere();
780 return RegMask();
781 }
783 // Register for DIVL projection of divmodL
784 RegMask Matcher::divL_proj_mask() {
785 ShouldNotReachHere();
786 return RegMask();
787 }
789 int Matcher::regnum_to_fpu_offset(int regnum) {
790 return regnum - 32; // The FP registers are in the second chunk
791 }
794 const bool Matcher::isSimpleConstant64(jlong value) {
795 // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
796 return true;
797 }
800 // Return whether or not this register is ever used as an argument. This
801 // function is used on startup to build the trampoline stubs in generateOptoStub.
802 // Registers not mentioned will be killed by the VM call in the trampoline, and
803 // arguments in those registers not be available to the callee.
804 bool Matcher::can_be_java_arg( int reg ) {
805 /* Refer to: [sharedRuntime_mips_64.cpp] SharedRuntime::java_calling_convention() */
806 if ( reg == T0_num || reg == T0_H_num
807 || reg == A0_num || reg == A0_H_num
808 || reg == A1_num || reg == A1_H_num
809 || reg == A2_num || reg == A2_H_num
810 || reg == A3_num || reg == A3_H_num
811 || reg == A4_num || reg == A4_H_num
812 || reg == A5_num || reg == A5_H_num
813 || reg == A6_num || reg == A6_H_num
814 || reg == A7_num || reg == A7_H_num )
815 return true;
817 if ( reg == F12_num || reg == F12_H_num
818 || reg == F13_num || reg == F13_H_num
819 || reg == F14_num || reg == F14_H_num
820 || reg == F15_num || reg == F15_H_num
821 || reg == F16_num || reg == F16_H_num
822 || reg == F17_num || reg == F17_H_num
823 || reg == F18_num || reg == F18_H_num
824 || reg == F19_num || reg == F19_H_num )
825 return true;
827 return false;
828 }
830 bool Matcher::is_spillable_arg( int reg ) {
831 return can_be_java_arg(reg);
832 }
834 bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) {
835 return false;
836 }
838 // Register for MODL projection of divmodL
839 RegMask Matcher::modL_proj_mask() {
840 ShouldNotReachHere();
841 return RegMask();
842 }
844 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
845 return FP_REG_mask();
846 }
848 // MIPS doesn't support AES intrinsics
849 const bool Matcher::pass_original_key_for_aes() {
850 return false;
851 }
853 // The address of the call instruction needs to be 16-byte aligned to
854 // ensure that it does not span a cache line so that it can be patched.
856 int CallStaticJavaDirectNode::compute_padding(int current_offset) const {
857 //lui
858 //ori
859 //dsll
860 //ori
862 //jalr
863 //nop
865 return round_to(current_offset, alignment_required()) - current_offset;
866 }
868 // The address of the call instruction needs to be 16-byte aligned to
869 // ensure that it does not span a cache line so that it can be patched.
870 int CallDynamicJavaDirectNode::compute_padding(int current_offset) const {
871 //loadIC <--- skip
873 //lui
874 //ori
875 //dsll
876 //ori
878 //jalr
879 //nop
881 current_offset += 4 * 4;
882 return round_to(current_offset, alignment_required()) - current_offset;
883 }
885 int CallLeafNoFPDirectNode::compute_padding(int current_offset) const {
886 //lui
887 //ori
888 //dsll
889 //ori
891 //jalr
892 //nop
894 return round_to(current_offset, alignment_required()) - current_offset;
895 }
897 int CallLeafDirectNode::compute_padding(int current_offset) const {
898 //lui
899 //ori
900 //dsll
901 //ori
903 //jalr
904 //nop
906 return round_to(current_offset, alignment_required()) - current_offset;
907 }
909 int CallRuntimeDirectNode::compute_padding(int current_offset) const {
910 //lui
911 //ori
912 //dsll
913 //ori
915 //jalr
916 //nop
918 return round_to(current_offset, alignment_required()) - current_offset;
919 }
921 // If CPU can load and store mis-aligned doubles directly then no fixup is
922 // needed. Else we split the double into 2 integer pieces and move it
923 // piece-by-piece. Only happens when passing doubles into C code as the
924 // Java calling convention forces doubles to be aligned.
925 const bool Matcher::misaligned_doubles_ok = false;
926 // Do floats take an entire double register or just half?
927 //const bool Matcher::float_in_double = true;
928 bool Matcher::float_in_double() { return false; }
929 // Threshold size for cleararray.
930 const int Matcher::init_array_short_size = 8 * BytesPerLong;
931 // Do ints take an entire long register or just half?
932 const bool Matcher::int_in_long = true;
933 // Is it better to copy float constants, or load them directly from memory?
934 // Intel can load a float constant from a direct address, requiring no
935 // extra registers. Most RISCs will have to materialize an address into a
936 // register first, so they would do better to copy the constant from stack.
937 const bool Matcher::rematerialize_float_constants = false;
938 // Advertise here if the CPU requires explicit rounding operations
939 // to implement the UseStrictFP mode.
940 const bool Matcher::strict_fp_requires_explicit_rounding = false;
941 // The ecx parameter to rep stos for the ClearArray node is in dwords.
942 const bool Matcher::init_array_count_is_in_bytes = false;
945 // Indicate if the safepoint node needs the polling page as an input.
946 // Since MIPS doesn't have absolute addressing, it needs.
947 bool SafePointNode::needs_polling_address_input() {
948 return true;
949 }
951 // !!!!! Special hack to get all type of calls to specify the byte offset
952 // from the start of the call to the point where the return address
953 // will point.
954 int MachCallStaticJavaNode::ret_addr_offset() {
955 assert(NativeCall::instruction_size == 24, "in MachCallStaticJavaNode::ret_addr_offset");
956 //The value ought to be 16 bytes.
957 //lui
958 //ori
959 //dsll
960 //ori
961 //jalr
962 //nop
963 return NativeCall::instruction_size;
964 }
966 int MachCallDynamicJavaNode::ret_addr_offset() {
967 assert(NativeCall::instruction_size == 24, "in MachCallDynamicJavaNode::ret_addr_offset");
968 //The value ought to be 4 + 16 bytes.
969 //lui IC_Klass,
970 //ori IC_Klass,
971 //dsll IC_Klass
972 //ori IC_Klass
973 //lui T9
974 //ori T9
975 //dsll T9
976 //ori T9
977 //jalr T9
978 //nop
979 return 4 * 4 + NativeCall::instruction_size;
980 }
982 //=============================================================================
984 // Figure out which register class each belongs in: rc_int, rc_float, rc_stack
985 enum RC { rc_bad, rc_int, rc_float, rc_stack };
986 static enum RC rc_class( OptoReg::Name reg ) {
987 if( !OptoReg::is_valid(reg) ) return rc_bad;
988 if (OptoReg::is_stack(reg)) return rc_stack;
989 VMReg r = OptoReg::as_VMReg(reg);
990 if (r->is_Register()) return rc_int;
991 assert(r->is_FloatRegister(), "must be");
992 return rc_float;
993 }
995 uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream* st ) const {
996 // Get registers to move
997 OptoReg::Name src_second = ra_->get_reg_second(in(1));
998 OptoReg::Name src_first = ra_->get_reg_first(in(1));
999 OptoReg::Name dst_second = ra_->get_reg_second(this );
1000 OptoReg::Name dst_first = ra_->get_reg_first(this );
1002 enum RC src_second_rc = rc_class(src_second);
1003 enum RC src_first_rc = rc_class(src_first);
1004 enum RC dst_second_rc = rc_class(dst_second);
1005 enum RC dst_first_rc = rc_class(dst_first);
1007 assert(OptoReg::is_valid(src_first) && OptoReg::is_valid(dst_first), "must move at least 1 register" );
1009 // Generate spill code!
1010 int size = 0;
1012 if( src_first == dst_first && src_second == dst_second )
1013 return 0; // Self copy, no move
1015 if (src_first_rc == rc_stack) {
1016 // mem ->
1017 if (dst_first_rc == rc_stack) {
1018 // mem -> mem
1019 assert(src_second != dst_first, "overlap");
1020 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1021 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1022 // 64-bit
1023 int src_offset = ra_->reg2offset(src_first);
1024 int dst_offset = ra_->reg2offset(dst_first);
1025 if (cbuf) {
1026 MacroAssembler _masm(cbuf);
1027 __ ld(AT, Address(SP, src_offset));
1028 __ sd(AT, Address(SP, dst_offset));
1029 #ifndef PRODUCT
1030 } else {
1031 if(!do_size){
1032 if (size != 0) st->print("\n\t");
1033 st->print("ld AT, [SP + #%d]\t# 64-bit mem-mem spill 1\n\t"
1034 "sd AT, [SP + #%d]",
1035 src_offset, dst_offset);
1036 }
1037 #endif
1038 }
1039 size += 8;
1040 } else {
1041 // 32-bit
1042 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1043 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1044 // No pushl/popl, so:
1045 int src_offset = ra_->reg2offset(src_first);
1046 int dst_offset = ra_->reg2offset(dst_first);
1047 if (cbuf) {
1048 MacroAssembler _masm(cbuf);
1049 __ lw(AT, Address(SP, src_offset));
1050 __ sw(AT, Address(SP, dst_offset));
1051 #ifndef PRODUCT
1052 } else {
1053 if(!do_size){
1054 if (size != 0) st->print("\n\t");
1055 st->print("lw AT, [SP + #%d] spill 2\n\t"
1056 "sw AT, [SP + #%d]\n\t",
1057 src_offset, dst_offset);
1058 }
1059 #endif
1060 }
1061 size += 8;
1062 }
1063 return size;
1064 } else if (dst_first_rc == rc_int) {
1065 // mem -> gpr
1066 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1067 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1068 // 64-bit
1069 int offset = ra_->reg2offset(src_first);
1070 if (cbuf) {
1071 MacroAssembler _masm(cbuf);
1072 __ ld(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1073 #ifndef PRODUCT
1074 } else {
1075 if(!do_size){
1076 if (size != 0) st->print("\n\t");
1077 st->print("ld %s, [SP + #%d]\t# spill 3",
1078 Matcher::regName[dst_first],
1079 offset);
1080 }
1081 #endif
1082 }
1083 size += 4;
1084 } else {
1085 // 32-bit
1086 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1087 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1088 int offset = ra_->reg2offset(src_first);
1089 if (cbuf) {
1090 MacroAssembler _masm(cbuf);
1091 if (this->ideal_reg() == Op_RegI)
1092 __ lw(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1093 else
1094 __ lwu(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1095 #ifndef PRODUCT
1096 } else {
1097 if(!do_size){
1098 if (size != 0) st->print("\n\t");
1099 if (this->ideal_reg() == Op_RegI)
1100 st->print("lw %s, [SP + #%d]\t# spill 4",
1101 Matcher::regName[dst_first],
1102 offset);
1103 else
1104 st->print("lwu %s, [SP + #%d]\t# spill 5",
1105 Matcher::regName[dst_first],
1106 offset);
1107 }
1108 #endif
1109 }
1110 size += 4;
1111 }
1112 return size;
1113 } else if (dst_first_rc == rc_float) {
1114 // mem-> xmm
1115 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1116 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1117 // 64-bit
1118 int offset = ra_->reg2offset(src_first);
1119 if (cbuf) {
1120 MacroAssembler _masm(cbuf);
1121 __ ldc1( as_FloatRegister(Matcher::_regEncode[dst_first]), Address(SP, offset));
1122 #ifndef PRODUCT
1123 } else {
1124 if(!do_size){
1125 if (size != 0) st->print("\n\t");
1126 st->print("ldc1 %s, [SP + #%d]\t# spill 6",
1127 Matcher::regName[dst_first],
1128 offset);
1129 }
1130 #endif
1131 }
1132 size += 4;
1133 } else {
1134 // 32-bit
1135 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1136 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1137 int offset = ra_->reg2offset(src_first);
1138 if (cbuf) {
1139 MacroAssembler _masm(cbuf);
1140 __ lwc1( as_FloatRegister(Matcher::_regEncode[dst_first]), Address(SP, offset));
1141 #ifndef PRODUCT
1142 } else {
1143 if(!do_size){
1144 if (size != 0) st->print("\n\t");
1145 st->print("lwc1 %s, [SP + #%d]\t# spill 7",
1146 Matcher::regName[dst_first],
1147 offset);
1148 }
1149 #endif
1150 }
1151 size += 4;
1152 }
1153 return size;
1154 }
1155 } else if (src_first_rc == rc_int) {
1156 // gpr ->
1157 if (dst_first_rc == rc_stack) {
1158 // gpr -> mem
1159 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1160 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1161 // 64-bit
1162 int offset = ra_->reg2offset(dst_first);
1163 if (cbuf) {
1164 MacroAssembler _masm(cbuf);
1165 __ sd(as_Register(Matcher::_regEncode[src_first]), Address(SP, offset));
1166 #ifndef PRODUCT
1167 } else {
1168 if(!do_size){
1169 if (size != 0) st->print("\n\t");
1170 st->print("sd %s, [SP + #%d] # spill 8",
1171 Matcher::regName[src_first],
1172 offset);
1173 }
1174 #endif
1175 }
1176 size += 4;
1177 } else {
1178 // 32-bit
1179 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1180 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1181 int offset = ra_->reg2offset(dst_first);
1182 if (cbuf) {
1183 MacroAssembler _masm(cbuf);
1184 __ sw(as_Register(Matcher::_regEncode[src_first]), Address(SP, offset));
1185 #ifndef PRODUCT
1186 } else {
1187 if(!do_size){
1188 if (size != 0) st->print("\n\t");
1189 st->print("sw %s, [SP + #%d]\t# spill 9",
1190 Matcher::regName[src_first], offset);
1191 }
1192 #endif
1193 }
1194 size += 4;
1195 }
1196 return size;
1197 } else if (dst_first_rc == rc_int) {
1198 // gpr -> gpr
1199 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1200 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1201 // 64-bit
1202 if (cbuf) {
1203 MacroAssembler _masm(cbuf);
1204 __ move(as_Register(Matcher::_regEncode[dst_first]),
1205 as_Register(Matcher::_regEncode[src_first]));
1206 #ifndef PRODUCT
1207 } else {
1208 if(!do_size){
1209 if (size != 0) st->print("\n\t");
1210 st->print("move(64bit) %s <-- %s\t# spill 10",
1211 Matcher::regName[dst_first],
1212 Matcher::regName[src_first]);
1213 }
1214 #endif
1215 }
1216 size += 4;
1217 return size;
1218 } else {
1219 // 32-bit
1220 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1221 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1222 if (cbuf) {
1223 MacroAssembler _masm(cbuf);
1224 if (this->ideal_reg() == Op_RegI)
1225 __ move_u32(as_Register(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first]));
1226 else
1227 __ daddu(as_Register(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first]), R0);
1229 #ifndef PRODUCT
1230 } else {
1231 if(!do_size){
1232 if (size != 0) st->print("\n\t");
1233 st->print("move(32-bit) %s <-- %s\t# spill 11",
1234 Matcher::regName[dst_first],
1235 Matcher::regName[src_first]);
1236 }
1237 #endif
1238 }
1239 size += 4;
1240 return size;
1241 }
1242 } else if (dst_first_rc == rc_float) {
1243 // gpr -> xmm
1244 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1245 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1246 // 64-bit
1247 if (cbuf) {
1248 MacroAssembler _masm(cbuf);
1249 __ dmtc1(as_Register(Matcher::_regEncode[src_first]), as_FloatRegister(Matcher::_regEncode[dst_first]));
1250 #ifndef PRODUCT
1251 } else {
1252 if(!do_size){
1253 if (size != 0) st->print("\n\t");
1254 st->print("dmtc1 %s, %s\t# spill 12",
1255 Matcher::regName[dst_first],
1256 Matcher::regName[src_first]);
1257 }
1258 #endif
1259 }
1260 size += 4;
1261 } else {
1262 // 32-bit
1263 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1264 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1265 if (cbuf) {
1266 MacroAssembler _masm(cbuf);
1267 __ mtc1( as_Register(Matcher::_regEncode[src_first]), as_FloatRegister(Matcher::_regEncode[dst_first]) );
1268 #ifndef PRODUCT
1269 } else {
1270 if(!do_size){
1271 if (size != 0) st->print("\n\t");
1272 st->print("mtc1 %s, %s\t# spill 13",
1273 Matcher::regName[dst_first],
1274 Matcher::regName[src_first]);
1275 }
1276 #endif
1277 }
1278 size += 4;
1279 }
1280 return size;
1281 }
1282 } else if (src_first_rc == rc_float) {
1283 // xmm ->
1284 if (dst_first_rc == rc_stack) {
1285 // xmm -> mem
1286 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1287 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1288 // 64-bit
1289 int offset = ra_->reg2offset(dst_first);
1290 if (cbuf) {
1291 MacroAssembler _masm(cbuf);
1292 __ sdc1( as_FloatRegister(Matcher::_regEncode[src_first]), Address(SP, offset) );
1293 #ifndef PRODUCT
1294 } else {
1295 if(!do_size){
1296 if (size != 0) st->print("\n\t");
1297 st->print("sdc1 %s, [SP + #%d]\t# spill 14",
1298 Matcher::regName[src_first],
1299 offset);
1300 }
1301 #endif
1302 }
1303 size += 4;
1304 } else {
1305 // 32-bit
1306 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1307 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1308 int offset = ra_->reg2offset(dst_first);
1309 if (cbuf) {
1310 MacroAssembler _masm(cbuf);
1311 __ swc1(as_FloatRegister(Matcher::_regEncode[src_first]), Address(SP, offset));
1312 #ifndef PRODUCT
1313 } else {
1314 if(!do_size){
1315 if (size != 0) st->print("\n\t");
1316 st->print("swc1 %s, [SP + #%d]\t# spill 15",
1317 Matcher::regName[src_first],
1318 offset);
1319 }
1320 #endif
1321 }
1322 size += 4;
1323 }
1324 return size;
1325 } else if (dst_first_rc == rc_int) {
1326 // xmm -> gpr
1327 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1328 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1329 // 64-bit
1330 if (cbuf) {
1331 MacroAssembler _masm(cbuf);
1332 __ dmfc1( as_Register(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1333 #ifndef PRODUCT
1334 } else {
1335 if(!do_size){
1336 if (size != 0) st->print("\n\t");
1337 st->print("dmfc1 %s, %s\t# spill 16",
1338 Matcher::regName[dst_first],
1339 Matcher::regName[src_first]);
1340 }
1341 #endif
1342 }
1343 size += 4;
1344 } else {
1345 // 32-bit
1346 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1347 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1348 if (cbuf) {
1349 MacroAssembler _masm(cbuf);
1350 __ mfc1( as_Register(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1351 #ifndef PRODUCT
1352 } else {
1353 if(!do_size){
1354 if (size != 0) st->print("\n\t");
1355 st->print("mfc1 %s, %s\t# spill 17",
1356 Matcher::regName[dst_first],
1357 Matcher::regName[src_first]);
1358 }
1359 #endif
1360 }
1361 size += 4;
1362 }
1363 return size;
1364 } else if (dst_first_rc == rc_float) {
1365 // xmm -> xmm
1366 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1367 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1368 // 64-bit
1369 if (cbuf) {
1370 MacroAssembler _masm(cbuf);
1371 __ mov_d( as_FloatRegister(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1372 #ifndef PRODUCT
1373 } else {
1374 if(!do_size){
1375 if (size != 0) st->print("\n\t");
1376 st->print("mov_d %s <-- %s\t# spill 18",
1377 Matcher::regName[dst_first],
1378 Matcher::regName[src_first]);
1379 }
1380 #endif
1381 }
1382 size += 4;
1383 } else {
1384 // 32-bit
1385 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1386 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1387 if (cbuf) {
1388 MacroAssembler _masm(cbuf);
1389 __ mov_s( as_FloatRegister(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1390 #ifndef PRODUCT
1391 } else {
1392 if(!do_size){
1393 if (size != 0) st->print("\n\t");
1394 st->print("mov_s %s <-- %s\t# spill 19",
1395 Matcher::regName[dst_first],
1396 Matcher::regName[src_first]);
1397 }
1398 #endif
1399 }
1400 size += 4;
1401 }
1402 return size;
1403 }
1404 }
1406 assert(0," foo ");
1407 Unimplemented();
1408 return size;
1410 }
1412 #ifndef PRODUCT
1413 void MachSpillCopyNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1414 implementation( NULL, ra_, false, st );
1415 }
1416 #endif
1418 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1419 implementation( &cbuf, ra_, false, NULL );
1420 }
1422 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
1423 return implementation( NULL, ra_, true, NULL );
1424 }
1426 //=============================================================================
1427 #
1429 #ifndef PRODUCT
1430 void MachBreakpointNode::format( PhaseRegAlloc *, outputStream* st ) const {
1431 st->print("INT3");
1432 }
1433 #endif
1435 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc* ra_) const {
1436 MacroAssembler _masm(&cbuf);
1437 __ int3();
1438 }
1440 uint MachBreakpointNode::size(PhaseRegAlloc* ra_) const {
1441 return MachNode::size(ra_);
1442 }
1445 //=============================================================================
1446 #ifndef PRODUCT
1447 void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1448 Compile *C = ra_->C;
1449 int framesize = C->frame_size_in_bytes();
1451 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1453 st->print("daddiu SP, SP, %d # Rlease stack @ MachEpilogNode",framesize);
1454 st->cr(); st->print("\t");
1455 if (UseLoongsonISA) {
1456 st->print("gslq RA, FP, SP, %d # Restore FP & RA @ MachEpilogNode", -wordSize*2);
1457 } else {
1458 st->print("ld RA, SP, %d # Restore RA @ MachEpilogNode", -wordSize);
1459 st->cr(); st->print("\t");
1460 st->print("ld FP, SP, %d # Restore FP @ MachEpilogNode", -wordSize*2);
1461 }
1463 if( do_polling() && C->is_method_compilation() ) {
1464 st->print("Poll Safepoint # MachEpilogNode");
1465 }
1466 }
1467 #endif
1469 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1470 Compile *C = ra_->C;
1471 MacroAssembler _masm(&cbuf);
1472 int framesize = C->frame_size_in_bytes();
1474 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1476 __ daddiu(SP, SP, framesize);
1478 if (UseLoongsonISA) {
1479 __ gslq(RA, FP, SP, -wordSize*2);
1480 } else {
1481 __ ld(RA, SP, -wordSize );
1482 __ ld(FP, SP, -wordSize*2 );
1483 }
1485 /* 2012/11/19 Jin: The epilog in a RuntimeStub should not contain a safepoint */
1486 if( do_polling() && C->is_method_compilation() ) {
1487 #ifndef OPT_SAFEPOINT
1488 __ set64(AT, (long)os::get_polling_page());
1489 __ relocate(relocInfo::poll_return_type);
1490 __ lw(AT, AT, 0);
1491 #else
1492 __ lui(AT, Assembler::split_high((intptr_t)os::get_polling_page()));
1493 __ relocate(relocInfo::poll_return_type);
1494 __ lw(AT, AT, Assembler::split_low((intptr_t)os::get_polling_page()));
1495 #endif
1496 }
1497 }
1499 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
1500 return MachNode::size(ra_); // too many variables; just compute it the hard way fujie debug
1501 }
1503 int MachEpilogNode::reloc() const {
1504 return 0; // a large enough number
1505 }
1507 const Pipeline * MachEpilogNode::pipeline() const {
1508 return MachNode::pipeline_class();
1509 }
1511 int MachEpilogNode::safepoint_offset() const { return 0; }
1513 //=============================================================================
1515 #ifndef PRODUCT
1516 void BoxLockNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1517 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1518 int reg = ra_->get_reg_first(this);
1519 st->print("ADDI %s, SP, %d @BoxLockNode",Matcher::regName[reg],offset);
1520 }
1521 #endif
1524 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
1525 return 4;
1526 }
1528 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1529 MacroAssembler _masm(&cbuf);
1530 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1531 int reg = ra_->get_encode(this);
1533 __ addi(as_Register(reg), SP, offset);
1534 /*
1535 if( offset >= 128 ) {
1536 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
1537 emit_rm(cbuf, 0x2, reg, 0x04);
1538 emit_rm(cbuf, 0x0, 0x04, SP_enc);
1539 emit_d32(cbuf, offset);
1540 }
1541 else {
1542 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
1543 emit_rm(cbuf, 0x1, reg, 0x04);
1544 emit_rm(cbuf, 0x0, 0x04, SP_enc);
1545 emit_d8(cbuf, offset);
1546 }
1547 */
1548 }
1551 //static int sizeof_FFree_Float_Stack_All = -1;
1553 int MachCallRuntimeNode::ret_addr_offset() {
1554 //lui
1555 //ori
1556 //dsll
1557 //ori
1558 //jalr
1559 //nop
1560 assert(NativeCall::instruction_size == 24, "in MachCallRuntimeNode::ret_addr_offset()");
1561 return NativeCall::instruction_size;
1562 // return 16;
1563 }
1569 //=============================================================================
1570 #ifndef PRODUCT
1571 void MachNopNode::format( PhaseRegAlloc *, outputStream* st ) const {
1572 st->print("NOP \t# %d bytes pad for loops and calls", 4 * _count);
1573 }
1574 #endif
1576 void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc * ) const {
1577 MacroAssembler _masm(&cbuf);
1578 int i = 0;
1579 for(i = 0; i < _count; i++)
1580 __ nop();
1581 }
1583 uint MachNopNode::size(PhaseRegAlloc *) const {
1584 return 4 * _count;
1585 }
1586 const Pipeline* MachNopNode::pipeline() const {
1587 return MachNode::pipeline_class();
1588 }
1590 //=============================================================================
1592 //=============================================================================
1593 #ifndef PRODUCT
1594 void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1595 st->print_cr("load_klass(AT, T0)");
1596 st->print_cr("\tbeq(AT, iCache, L)");
1597 st->print_cr("\tnop");
1598 st->print_cr("\tjmp(SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type)");
1599 st->print_cr("\tnop");
1600 st->print_cr("\tnop");
1601 st->print_cr(" L:");
1602 }
1603 #endif
1606 void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1607 MacroAssembler _masm(&cbuf);
1608 #ifdef ASSERT
1609 //uint code_size = cbuf.code_size();
1610 #endif
1611 int ic_reg = Matcher::inline_cache_reg_encode();
1612 Label L;
1613 Register receiver = T0;
1614 Register iCache = as_Register(ic_reg);
1615 __ load_klass(AT, receiver);
1616 __ beq(AT, iCache, L);
1617 __ nop();
1619 __ relocate(relocInfo::runtime_call_type);
1620 __ patchable_set48(T9, (long)SharedRuntime::get_ic_miss_stub());
1621 __ jr(T9);
1622 __ nop();
1624 /* WARNING these NOPs are critical so that verified entry point is properly
1625 * 8 bytes aligned for patching by NativeJump::patch_verified_entry() */
1626 __ align(CodeEntryAlignment);
1627 __ bind(L);
1628 }
1630 uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
1631 return MachNode::size(ra_);
1632 }
1636 //=============================================================================
1638 const RegMask& MachConstantBaseNode::_out_RegMask = P_REG_mask();
1640 int Compile::ConstantTable::calculate_table_base_offset() const {
1641 return 0; // absolute addressing, no offset
1642 }
1644 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
1645 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
1646 ShouldNotReachHere();
1647 }
1649 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
1650 Compile* C = ra_->C;
1651 Compile::ConstantTable& constant_table = C->constant_table();
1652 MacroAssembler _masm(&cbuf);
1654 Register Rtoc = as_Register(ra_->get_encode(this));
1655 CodeSection* consts_section = __ code()->consts();
1656 int consts_size = consts_section->align_at_start(consts_section->size());
1657 assert(constant_table.size() == consts_size, "must be equal");
1659 if (consts_section->size()) {
1660 // Materialize the constant table base.
1661 address baseaddr = consts_section->start() + -(constant_table.table_base_offset());
1662 // RelocationHolder rspec = internal_word_Relocation::spec(baseaddr);
1663 __ relocate(relocInfo::internal_pc_type);
1664 __ patchable_set48(Rtoc, (long)baseaddr);
1665 }
1666 }
1668 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
1669 // patchable_set48 (4 insts)
1670 return 4 * 4;
1671 }
1673 #ifndef PRODUCT
1674 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
1675 Register r = as_Register(ra_->get_encode(this));
1676 st->print("patchable_set48 %s, &constanttable (constant table base) @ MachConstantBaseNode", r->name());
1677 }
1678 #endif
1681 //=============================================================================
1682 #ifndef PRODUCT
1683 void MachPrologNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1684 Compile* C = ra_->C;
1686 int framesize = C->frame_size_in_bytes();
1687 int bangsize = C->bang_size_in_bytes();
1688 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1690 // Calls to C2R adapters often do not accept exceptional returns.
1691 // We require that their callers must bang for them. But be careful, because
1692 // some VM calls (such as call site linkage) can use several kilobytes of
1693 // stack. But the stack safety zone should account for that.
1694 // See bugs 4446381, 4468289, 4497237.
1695 if (C->need_stack_bang(bangsize)) {
1696 st->print_cr("# stack bang"); st->print("\t");
1697 }
1698 if (UseLoongsonISA) {
1699 st->print("gssq RA, FP, %d(SP) @ MachPrologNode\n\t", -wordSize*2);
1700 } else {
1701 st->print("sd RA, %d(SP) @ MachPrologNode\n\t", -wordSize);
1702 st->print("sd FP, %d(SP) @ MachPrologNode\n\t", -wordSize*2);
1703 }
1704 st->print("daddiu FP, SP, -%d \n\t", wordSize*2);
1705 st->print("daddiu SP, SP, -%d \t",framesize);
1706 }
1707 #endif
1710 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1711 Compile* C = ra_->C;
1712 MacroAssembler _masm(&cbuf);
1714 int framesize = C->frame_size_in_bytes();
1715 int bangsize = C->bang_size_in_bytes();
1717 // __ verified_entry(framesize, C->need_stack_bang(bangsize)?bangsize:0, false);
1719 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1721 if (C->need_stack_bang(framesize)) {
1722 __ generate_stack_overflow_check(framesize);
1723 }
1725 if (UseLoongsonISA) {
1726 __ gssq(RA, FP, SP, -wordSize*2);
1727 } else {
1728 __ sd(RA, SP, -wordSize);
1729 __ sd(FP, SP, -wordSize*2);
1730 }
1731 __ daddiu(FP, SP, -wordSize*2);
1732 __ daddiu(SP, SP, -framesize);
1733 __ nop(); /* 2013.10.22 Jin: Make enough room for patch_verified_entry() */
1734 __ nop();
1736 C->set_frame_complete(cbuf.insts_size());
1737 if (C->has_mach_constant_base_node()) {
1738 // NOTE: We set the table base offset here because users might be
1739 // emitted before MachConstantBaseNode.
1740 Compile::ConstantTable& constant_table = C->constant_table();
1741 constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
1742 }
1744 }
1747 uint MachPrologNode::size(PhaseRegAlloc *ra_) const {
1748 //fprintf(stderr, "\nPrologNode::size(ra_)= %d \n", MachNode::size(ra_));//fujie debug
1749 return MachNode::size(ra_); // too many variables; just compute it the hard way
1750 }
1752 int MachPrologNode::reloc() const {
1753 return 0; // a large enough number
1754 }
1756 %}
1758 //----------ENCODING BLOCK-----------------------------------------------------
1759 // This block specifies the encoding classes used by the compiler to output
1760 // byte streams. Encoding classes generate functions which are called by
1761 // Machine Instruction Nodes in order to generate the bit encoding of the
1762 // instruction. Operands specify their base encoding interface with the
1763 // interface keyword. There are currently supported four interfaces,
1764 // REG_INTER, CONST_INTER, MEMORY_INTER, & COND_INTER. REG_INTER causes an
1765 // operand to generate a function which returns its register number when
1766 // queried. CONST_INTER causes an operand to generate a function which
1767 // returns the value of the constant when queried. MEMORY_INTER causes an
1768 // operand to generate four functions which return the Base Register, the
1769 // Index Register, the Scale Value, and the Offset Value of the operand when
1770 // queried. COND_INTER causes an operand to generate six functions which
1771 // return the encoding code (ie - encoding bits for the instruction)
1772 // associated with each basic boolean condition for a conditional instruction.
1773 // Instructions specify two basic values for encoding. They use the
1774 // ins_encode keyword to specify their encoding class (which must be one of
1775 // the class names specified in the encoding block), and they use the
1776 // opcode keyword to specify, in order, their primary, secondary, and
1777 // tertiary opcode. Only the opcode sections which a particular instruction
1778 // needs for encoding need to be specified.
1779 encode %{
1780 /*
1781 Alias:
1782 1044 b java.io.ObjectInputStream::readHandle (130 bytes)
1783 118 B14: # B19 B15 <- B13 Freq: 0.899955
1784 118 add S1, S2, V0 #@addP_reg_reg
1785 11c lb S0, [S1 + #-8257524] #@loadB
1786 120 BReq S0, #3, B19 #@branchConI_reg_imm P=0.100000 C=-1.000000
1787 */
1788 //Load byte signed
1789 enc_class load_B_enc (mRegI dst, memory mem) %{
1790 MacroAssembler _masm(&cbuf);
1791 int dst = $dst$$reg;
1792 int base = $mem$$base;
1793 int index = $mem$$index;
1794 int scale = $mem$$scale;
1795 int disp = $mem$$disp;
1797 if( index != 0 ) {
1798 if( Assembler::is_simm16(disp) ) {
1799 if( UseLoongsonISA ) {
1800 if (scale == 0) {
1801 __ gslbx(as_Register(dst), as_Register(base), as_Register(index), disp);
1802 } else {
1803 __ dsll(AT, as_Register(index), scale);
1804 __ gslbx(as_Register(dst), as_Register(base), AT, disp);
1805 }
1806 } else {
1807 if (scale == 0) {
1808 __ addu(AT, as_Register(base), as_Register(index));
1809 } else {
1810 __ dsll(AT, as_Register(index), scale);
1811 __ addu(AT, as_Register(base), AT);
1812 }
1813 __ lb(as_Register(dst), AT, disp);
1814 }
1815 } else {
1816 if (scale == 0) {
1817 __ addu(AT, as_Register(base), as_Register(index));
1818 } else {
1819 __ dsll(AT, as_Register(index), scale);
1820 __ addu(AT, as_Register(base), AT);
1821 }
1822 __ move(T9, disp);
1823 if( UseLoongsonISA ) {
1824 __ gslbx(as_Register(dst), AT, T9, 0);
1825 } else {
1826 __ addu(AT, AT, T9);
1827 __ lb(as_Register(dst), AT, 0);
1828 }
1829 }
1830 } else {
1831 if( Assembler::is_simm16(disp) ) {
1832 __ lb(as_Register(dst), as_Register(base), disp);
1833 } else {
1834 __ move(T9, disp);
1835 if( UseLoongsonISA ) {
1836 __ gslbx(as_Register(dst), as_Register(base), T9, 0);
1837 } else {
1838 __ addu(AT, as_Register(base), T9);
1839 __ lb(as_Register(dst), AT, 0);
1840 }
1841 }
1842 }
1843 %}
1845 //Load byte unsigned
1846 enc_class load_UB_enc (mRegI dst, memory mem) %{
1847 MacroAssembler _masm(&cbuf);
1848 int dst = $dst$$reg;
1849 int base = $mem$$base;
1850 int index = $mem$$index;
1851 int scale = $mem$$scale;
1852 int disp = $mem$$disp;
1854 if( index != 0 ) {
1855 if (scale == 0) {
1856 __ daddu(AT, as_Register(base), as_Register(index));
1857 } else {
1858 __ dsll(AT, as_Register(index), scale);
1859 __ daddu(AT, as_Register(base), AT);
1860 }
1861 if( Assembler::is_simm16(disp) ) {
1862 __ lbu(as_Register(dst), AT, disp);
1863 } else {
1864 __ move(T9, disp);
1865 __ daddu(AT, AT, T9);
1866 __ lbu(as_Register(dst), AT, 0);
1867 }
1868 } else {
1869 if( Assembler::is_simm16(disp) ) {
1870 __ lbu(as_Register(dst), as_Register(base), disp);
1871 } else {
1872 __ move(T9, disp);
1873 __ daddu(AT, as_Register(base), T9);
1874 __ lbu(as_Register(dst), AT, 0);
1875 }
1876 }
1877 %}
1879 enc_class store_B_reg_enc (memory mem, mRegI src) %{
1880 MacroAssembler _masm(&cbuf);
1881 int src = $src$$reg;
1882 int base = $mem$$base;
1883 int index = $mem$$index;
1884 int scale = $mem$$scale;
1885 int disp = $mem$$disp;
1887 if( index != 0 ) {
1888 if (scale == 0) {
1889 if( Assembler::is_simm(disp, 8) ) {
1890 if (UseLoongsonISA) {
1891 __ gssbx(as_Register(src), as_Register(base), as_Register(index), disp);
1892 } else {
1893 __ addu(AT, as_Register(base), as_Register(index));
1894 __ sb(as_Register(src), AT, disp);
1895 }
1896 } else if( Assembler::is_simm16(disp) ) {
1897 __ addu(AT, as_Register(base), as_Register(index));
1898 __ sb(as_Register(src), AT, disp);
1899 } else {
1900 __ addu(AT, as_Register(base), as_Register(index));
1901 __ move(T9, disp);
1902 if (UseLoongsonISA) {
1903 __ gssbx(as_Register(src), AT, T9, 0);
1904 } else {
1905 __ addu(AT, AT, T9);
1906 __ sb(as_Register(src), AT, 0);
1907 }
1908 }
1909 } else {
1910 __ dsll(AT, as_Register(index), scale);
1911 if( Assembler::is_simm(disp, 8) ) {
1912 if (UseLoongsonISA) {
1913 __ gssbx(as_Register(src), AT, as_Register(base), disp);
1914 } else {
1915 __ addu(AT, as_Register(base), AT);
1916 __ sb(as_Register(src), AT, disp);
1917 }
1918 } else if( Assembler::is_simm16(disp) ) {
1919 __ addu(AT, as_Register(base), AT);
1920 __ sb(as_Register(src), AT, disp);
1921 } else {
1922 __ addu(AT, as_Register(base), AT);
1923 __ move(T9, disp);
1924 if (UseLoongsonISA) {
1925 __ gssbx(as_Register(src), AT, T9, 0);
1926 } else {
1927 __ addu(AT, AT, T9);
1928 __ sb(as_Register(src), AT, 0);
1929 }
1930 }
1931 }
1932 } else {
1933 if( Assembler::is_simm16(disp) ) {
1934 __ sb(as_Register(src), as_Register(base), disp);
1935 } else {
1936 __ move(T9, disp);
1937 if (UseLoongsonISA) {
1938 __ gssbx(as_Register(src), as_Register(base), T9, 0);
1939 } else {
1940 __ addu(AT, as_Register(base), T9);
1941 __ sb(as_Register(src), AT, 0);
1942 }
1943 }
1944 }
1945 %}
1947 enc_class store_B_immI_enc (memory mem, immI8 src) %{
1948 MacroAssembler _masm(&cbuf);
1949 int base = $mem$$base;
1950 int index = $mem$$index;
1951 int scale = $mem$$scale;
1952 int disp = $mem$$disp;
1953 int value = $src$$constant;
1955 if( index != 0 ) {
1956 if (!UseLoongsonISA) {
1957 if (scale == 0) {
1958 __ daddu(AT, as_Register(base), as_Register(index));
1959 } else {
1960 __ dsll(AT, as_Register(index), scale);
1961 __ daddu(AT, as_Register(base), AT);
1962 }
1963 if( Assembler::is_simm16(disp) ) {
1964 if (value == 0) {
1965 __ sb(R0, AT, disp);
1966 } else {
1967 __ move(T9, value);
1968 __ sb(T9, AT, disp);
1969 }
1970 } else {
1971 if (value == 0) {
1972 __ move(T9, disp);
1973 __ daddu(AT, AT, T9);
1974 __ sb(R0, AT, 0);
1975 } else {
1976 __ move(T9, disp);
1977 __ daddu(AT, AT, T9);
1978 __ move(T9, value);
1979 __ sb(T9, AT, 0);
1980 }
1981 }
1982 } else {
1984 if (scale == 0) {
1985 if( Assembler::is_simm(disp, 8) ) {
1986 if (value == 0) {
1987 __ gssbx(R0, as_Register(base), as_Register(index), disp);
1988 } else {
1989 __ move(T9, value);
1990 __ gssbx(T9, as_Register(base), as_Register(index), disp);
1991 }
1992 } else if( Assembler::is_simm16(disp) ) {
1993 __ daddu(AT, as_Register(base), as_Register(index));
1994 if (value == 0) {
1995 __ sb(R0, AT, disp);
1996 } else {
1997 __ move(T9, value);
1998 __ sb(T9, AT, disp);
1999 }
2000 } else {
2001 if (value == 0) {
2002 __ daddu(AT, as_Register(base), as_Register(index));
2003 __ move(T9, disp);
2004 __ gssbx(R0, AT, T9, 0);
2005 } else {
2006 __ move(AT, disp);
2007 __ move(T9, value);
2008 __ daddu(AT, as_Register(base), AT);
2009 __ gssbx(T9, AT, as_Register(index), 0);
2010 }
2011 }
2013 } else {
2015 if( Assembler::is_simm(disp, 8) ) {
2016 __ dsll(AT, as_Register(index), scale);
2017 if (value == 0) {
2018 __ gssbx(R0, as_Register(base), AT, disp);
2019 } else {
2020 __ move(T9, value);
2021 __ gssbx(T9, as_Register(base), AT, disp);
2022 }
2023 } else if( Assembler::is_simm16(disp) ) {
2024 __ dsll(AT, as_Register(index), scale);
2025 __ daddu(AT, as_Register(base), AT);
2026 if (value == 0) {
2027 __ sb(R0, AT, disp);
2028 } else {
2029 __ move(T9, value);
2030 __ sb(T9, AT, disp);
2031 }
2032 } else {
2033 __ dsll(AT, as_Register(index), scale);
2034 if (value == 0) {
2035 __ daddu(AT, as_Register(base), AT);
2036 __ move(T9, disp);
2037 __ gssbx(R0, AT, T9, 0);
2038 } else {
2039 __ move(T9, disp);
2040 __ daddu(AT, AT, T9);
2041 __ move(T9, value);
2042 __ gssbx(T9, as_Register(base), AT, 0);
2043 }
2044 }
2045 }
2046 }
2047 } else {
2048 if( Assembler::is_simm16(disp) ) {
2049 if (value == 0) {
2050 __ sb(R0, as_Register(base), disp);
2051 } else {
2052 __ move(AT, value);
2053 __ sb(AT, as_Register(base), disp);
2054 }
2055 } else {
2056 if (value == 0) {
2057 __ move(T9, disp);
2058 if (UseLoongsonISA) {
2059 __ gssbx(R0, as_Register(base), T9, 0);
2060 } else {
2061 __ daddu(AT, as_Register(base), T9);
2062 __ sb(R0, AT, 0);
2063 }
2064 } else {
2065 __ move(T9, disp);
2066 if (UseLoongsonISA) {
2067 __ move(AT, value);
2068 __ gssbx(AT, as_Register(base), T9, 0);
2069 } else {
2070 __ daddu(AT, as_Register(base), T9);
2071 __ move(T9, value);
2072 __ sb(T9, AT, 0);
2073 }
2074 }
2075 }
2076 }
2077 %}
2080 enc_class store_B_immI_enc_sync (memory mem, immI8 src) %{
2081 MacroAssembler _masm(&cbuf);
2082 int base = $mem$$base;
2083 int index = $mem$$index;
2084 int scale = $mem$$scale;
2085 int disp = $mem$$disp;
2086 int value = $src$$constant;
2088 if( index != 0 ) {
2089 if ( UseLoongsonISA ) {
2090 if ( Assembler::is_simm(disp,8) ) {
2091 if ( scale == 0 ) {
2092 if ( value == 0 ) {
2093 __ gssbx(R0, as_Register(base), as_Register(index), disp);
2094 } else {
2095 __ move(AT, value);
2096 __ gssbx(AT, as_Register(base), as_Register(index), disp);
2097 }
2098 } else {
2099 __ dsll(AT, as_Register(index), scale);
2100 if ( value == 0 ) {
2101 __ gssbx(R0, as_Register(base), AT, disp);
2102 } else {
2103 __ move(T9, value);
2104 __ gssbx(T9, as_Register(base), AT, disp);
2105 }
2106 }
2107 } else if ( Assembler::is_simm16(disp) ) {
2108 if ( scale == 0 ) {
2109 __ daddu(AT, as_Register(base), as_Register(index));
2110 if ( value == 0 ){
2111 __ sb(R0, AT, disp);
2112 } else {
2113 __ move(T9, value);
2114 __ sb(T9, AT, disp);
2115 }
2116 } else {
2117 __ dsll(AT, as_Register(index), scale);
2118 __ daddu(AT, as_Register(base), AT);
2119 if ( value == 0 ) {
2120 __ sb(R0, AT, disp);
2121 } else {
2122 __ move(T9, value);
2123 __ sb(T9, AT, disp);
2124 }
2125 }
2126 } else {
2127 if ( scale == 0 ) {
2128 __ move(AT, disp);
2129 __ daddu(AT, as_Register(index), AT);
2130 if ( value == 0 ) {
2131 __ gssbx(R0, as_Register(base), AT, 0);
2132 } else {
2133 __ move(T9, value);
2134 __ gssbx(T9, as_Register(base), AT, 0);
2135 }
2136 } else {
2137 __ dsll(AT, as_Register(index), scale);
2138 __ move(T9, disp);
2139 __ daddu(AT, AT, T9);
2140 if ( value == 0 ) {
2141 __ gssbx(R0, as_Register(base), AT, 0);
2142 } else {
2143 __ move(T9, value);
2144 __ gssbx(T9, as_Register(base), AT, 0);
2145 }
2146 }
2147 }
2148 } else { //not use loongson isa
2149 if (scale == 0) {
2150 __ daddu(AT, as_Register(base), as_Register(index));
2151 } else {
2152 __ dsll(AT, as_Register(index), scale);
2153 __ daddu(AT, as_Register(base), AT);
2154 }
2155 if( Assembler::is_simm16(disp) ) {
2156 if (value == 0) {
2157 __ sb(R0, AT, disp);
2158 } else {
2159 __ move(T9, value);
2160 __ sb(T9, AT, disp);
2161 }
2162 } else {
2163 if (value == 0) {
2164 __ move(T9, disp);
2165 __ daddu(AT, AT, T9);
2166 __ sb(R0, AT, 0);
2167 } else {
2168 __ move(T9, disp);
2169 __ daddu(AT, AT, T9);
2170 __ move(T9, value);
2171 __ sb(T9, AT, 0);
2172 }
2173 }
2174 }
2175 } else {
2176 if ( UseLoongsonISA ){
2177 if ( Assembler::is_simm16(disp) ){
2178 if ( value == 0 ) {
2179 __ sb(R0, as_Register(base), disp);
2180 } else {
2181 __ move(AT, value);
2182 __ sb(AT, as_Register(base), disp);
2183 }
2184 } else {
2185 __ move(AT, disp);
2186 if ( value == 0 ) {
2187 __ gssbx(R0, as_Register(base), AT, 0);
2188 } else {
2189 __ move(T9, value);
2190 __ gssbx(T9, as_Register(base), AT, 0);
2191 }
2192 }
2193 } else {
2194 if( Assembler::is_simm16(disp) ) {
2195 if (value == 0) {
2196 __ sb(R0, as_Register(base), disp);
2197 } else {
2198 __ move(AT, value);
2199 __ sb(AT, as_Register(base), disp);
2200 }
2201 } else {
2202 if (value == 0) {
2203 __ move(T9, disp);
2204 __ daddu(AT, as_Register(base), T9);
2205 __ sb(R0, AT, 0);
2206 } else {
2207 __ move(T9, disp);
2208 __ daddu(AT, as_Register(base), T9);
2209 __ move(T9, value);
2210 __ sb(T9, AT, 0);
2211 }
2212 }
2213 }
2214 }
2216 __ sync();
2217 %}
2219 // Load Short (16bit signed)
2220 enc_class load_S_enc (mRegI dst, memory mem) %{
2221 MacroAssembler _masm(&cbuf);
2222 int dst = $dst$$reg;
2223 int base = $mem$$base;
2224 int index = $mem$$index;
2225 int scale = $mem$$scale;
2226 int disp = $mem$$disp;
2228 if( index != 0 ) {
2229 if ( UseLoongsonISA ) {
2230 if ( Assembler::is_simm(disp, 8) ) {
2231 if (scale == 0) {
2232 __ gslhx(as_Register(dst), as_Register(base), as_Register(index), disp);
2233 } else {
2234 __ dsll(AT, as_Register(index), scale);
2235 __ gslhx(as_Register(dst), as_Register(base), AT, disp);
2236 }
2237 } else if ( Assembler::is_simm16(disp) ) {
2238 if (scale == 0) {
2239 __ daddu(AT, as_Register(base), as_Register(index));
2240 __ lh(as_Register(dst), AT, disp);
2241 } else {
2242 __ dsll(AT, as_Register(index), scale);
2243 __ daddu(AT, as_Register(base), AT);
2244 __ lh(as_Register(dst), AT, disp);
2245 }
2246 } else {
2247 if (scale == 0) {
2248 __ move(AT, disp);
2249 __ daddu(AT, as_Register(index), AT);
2250 __ gslhx(as_Register(dst), as_Register(base), AT, 0);
2251 } else {
2252 __ dsll(AT, as_Register(index), scale);
2253 __ move(T9, disp);
2254 __ daddu(AT, AT, T9);
2255 __ gslhx(as_Register(dst), as_Register(base), AT, 0);
2256 }
2257 }
2258 } else { // not use loongson isa
2259 if (scale == 0) {
2260 __ daddu(AT, as_Register(base), as_Register(index));
2261 } else {
2262 __ dsll(AT, as_Register(index), scale);
2263 __ daddu(AT, as_Register(base), AT);
2264 }
2265 if( Assembler::is_simm16(disp) ) {
2266 __ lh(as_Register(dst), AT, disp);
2267 } else {
2268 __ move(T9, disp);
2269 __ daddu(AT, AT, T9);
2270 __ lh(as_Register(dst), AT, 0);
2271 }
2272 }
2273 } else { // index is 0
2274 if ( UseLoongsonISA ) {
2275 if ( Assembler::is_simm16(disp) ) {
2276 __ lh(as_Register(dst), as_Register(base), disp);
2277 } else {
2278 __ move(T9, disp);
2279 __ gslhx(as_Register(dst), as_Register(base), T9, 0);
2280 }
2281 } else { //not use loongson isa
2282 if( Assembler::is_simm16(disp) ) {
2283 __ lh(as_Register(dst), as_Register(base), disp);
2284 } else {
2285 __ move(T9, disp);
2286 __ daddu(AT, as_Register(base), T9);
2287 __ lh(as_Register(dst), AT, 0);
2288 }
2289 }
2290 }
2291 %}
2293 // Load Char (16bit unsigned)
2294 enc_class load_C_enc (mRegI dst, memory mem) %{
2295 MacroAssembler _masm(&cbuf);
2296 int dst = $dst$$reg;
2297 int base = $mem$$base;
2298 int index = $mem$$index;
2299 int scale = $mem$$scale;
2300 int disp = $mem$$disp;
2302 if( index != 0 ) {
2303 if (scale == 0) {
2304 __ daddu(AT, as_Register(base), as_Register(index));
2305 } else {
2306 __ dsll(AT, as_Register(index), scale);
2307 __ daddu(AT, as_Register(base), AT);
2308 }
2309 if( Assembler::is_simm16(disp) ) {
2310 __ lhu(as_Register(dst), AT, disp);
2311 } else {
2312 __ move(T9, disp);
2313 __ addu(AT, AT, T9);
2314 __ lhu(as_Register(dst), AT, 0);
2315 }
2316 } else {
2317 if( Assembler::is_simm16(disp) ) {
2318 __ lhu(as_Register(dst), as_Register(base), disp);
2319 } else {
2320 __ move(T9, disp);
2321 __ daddu(AT, as_Register(base), T9);
2322 __ lhu(as_Register(dst), AT, 0);
2323 }
2324 }
2325 %}
2327 // Store Char (16bit unsigned)
2328 enc_class store_C_reg_enc (memory mem, mRegI src) %{
2329 MacroAssembler _masm(&cbuf);
2330 int src = $src$$reg;
2331 int base = $mem$$base;
2332 int index = $mem$$index;
2333 int scale = $mem$$scale;
2334 int disp = $mem$$disp;
2336 if( index != 0 ) {
2337 if( Assembler::is_simm16(disp) ) {
2338 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2339 if (scale == 0) {
2340 __ gsshx(as_Register(src), as_Register(base), as_Register(index), disp);
2341 } else {
2342 __ dsll(AT, as_Register(index), scale);
2343 __ gsshx(as_Register(src), as_Register(base), AT, disp);
2344 }
2345 } else {
2346 if (scale == 0) {
2347 __ addu(AT, as_Register(base), as_Register(index));
2348 } else {
2349 __ dsll(AT, as_Register(index), scale);
2350 __ addu(AT, as_Register(base), AT);
2351 }
2352 __ sh(as_Register(src), AT, disp);
2353 }
2354 } else {
2355 if (scale == 0) {
2356 __ addu(AT, as_Register(base), as_Register(index));
2357 } else {
2358 __ dsll(AT, as_Register(index), scale);
2359 __ addu(AT, as_Register(base), AT);
2360 }
2361 __ move(T9, disp);
2362 if( UseLoongsonISA ) {
2363 __ gsshx(as_Register(src), AT, T9, 0);
2364 } else {
2365 __ addu(AT, AT, T9);
2366 __ sh(as_Register(src), AT, 0);
2367 }
2368 }
2369 } else {
2370 if( Assembler::is_simm16(disp) ) {
2371 __ sh(as_Register(src), as_Register(base), disp);
2372 } else {
2373 __ move(T9, disp);
2374 if( UseLoongsonISA ) {
2375 __ gsshx(as_Register(src), as_Register(base), T9, 0);
2376 } else {
2377 __ addu(AT, as_Register(base), T9);
2378 __ sh(as_Register(src), AT, 0);
2379 }
2380 }
2381 }
2382 %}
2384 enc_class store_C0_enc (memory mem) %{
2385 MacroAssembler _masm(&cbuf);
2386 int base = $mem$$base;
2387 int index = $mem$$index;
2388 int scale = $mem$$scale;
2389 int disp = $mem$$disp;
2391 if( index != 0 ) {
2392 if( Assembler::is_simm16(disp) ) {
2393 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2394 if (scale == 0) {
2395 __ gsshx(R0, as_Register(base), as_Register(index), disp);
2396 } else {
2397 __ dsll(AT, as_Register(index), scale);
2398 __ gsshx(R0, as_Register(base), AT, disp);
2399 }
2400 } else {
2401 if (scale == 0) {
2402 __ addu(AT, as_Register(base), as_Register(index));
2403 } else {
2404 __ dsll(AT, as_Register(index), scale);
2405 __ addu(AT, as_Register(base), AT);
2406 }
2407 __ sh(R0, AT, disp);
2408 }
2409 } else {
2410 if (scale == 0) {
2411 __ addu(AT, as_Register(base), as_Register(index));
2412 } else {
2413 __ dsll(AT, as_Register(index), scale);
2414 __ addu(AT, as_Register(base), AT);
2415 }
2416 __ move(T9, disp);
2417 if( UseLoongsonISA ) {
2418 __ gsshx(R0, AT, T9, 0);
2419 } else {
2420 __ addu(AT, AT, T9);
2421 __ sh(R0, AT, 0);
2422 }
2423 }
2424 } else {
2425 if( Assembler::is_simm16(disp) ) {
2426 __ sh(R0, as_Register(base), disp);
2427 } else {
2428 __ move(T9, disp);
2429 if( UseLoongsonISA ) {
2430 __ gsshx(R0, as_Register(base), T9, 0);
2431 } else {
2432 __ addu(AT, as_Register(base), T9);
2433 __ sh(R0, AT, 0);
2434 }
2435 }
2436 }
2437 %}
2439 enc_class load_I_enc (mRegI dst, memory mem) %{
2440 MacroAssembler _masm(&cbuf);
2441 int dst = $dst$$reg;
2442 int base = $mem$$base;
2443 int index = $mem$$index;
2444 int scale = $mem$$scale;
2445 int disp = $mem$$disp;
2447 if( index != 0 ) {
2448 if( Assembler::is_simm16(disp) ) {
2449 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2450 if (scale == 0) {
2451 __ gslwx(as_Register(dst), as_Register(base), as_Register(index), disp);
2452 } else {
2453 __ dsll(AT, as_Register(index), scale);
2454 __ gslwx(as_Register(dst), as_Register(base), AT, disp);
2455 }
2456 } else {
2457 if (scale == 0) {
2458 __ addu(AT, as_Register(base), as_Register(index));
2459 } else {
2460 __ dsll(AT, as_Register(index), scale);
2461 __ addu(AT, as_Register(base), AT);
2462 }
2463 __ lw(as_Register(dst), AT, disp);
2464 }
2465 } else {
2466 if (scale == 0) {
2467 __ addu(AT, as_Register(base), as_Register(index));
2468 } else {
2469 __ dsll(AT, as_Register(index), scale);
2470 __ addu(AT, as_Register(base), AT);
2471 }
2472 __ move(T9, disp);
2473 if( UseLoongsonISA ) {
2474 __ gslwx(as_Register(dst), AT, T9, 0);
2475 } else {
2476 __ addu(AT, AT, T9);
2477 __ lw(as_Register(dst), AT, 0);
2478 }
2479 }
2480 } else {
2481 if( Assembler::is_simm16(disp) ) {
2482 __ lw(as_Register(dst), as_Register(base), disp);
2483 } else {
2484 __ move(T9, disp);
2485 if( UseLoongsonISA ) {
2486 __ gslwx(as_Register(dst), as_Register(base), T9, 0);
2487 } else {
2488 __ addu(AT, as_Register(base), T9);
2489 __ lw(as_Register(dst), AT, 0);
2490 }
2491 }
2492 }
2493 %}
2495 enc_class store_I_reg_enc (memory mem, mRegI src) %{
2496 MacroAssembler _masm(&cbuf);
2497 int src = $src$$reg;
2498 int base = $mem$$base;
2499 int index = $mem$$index;
2500 int scale = $mem$$scale;
2501 int disp = $mem$$disp;
2503 if( index != 0 ) {
2504 if( Assembler::is_simm16(disp) ) {
2505 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2506 if (scale == 0) {
2507 __ gsswx(as_Register(src), as_Register(base), as_Register(index), disp);
2508 } else {
2509 __ dsll(AT, as_Register(index), scale);
2510 __ gsswx(as_Register(src), as_Register(base), AT, disp);
2511 }
2512 } else {
2513 if (scale == 0) {
2514 __ addu(AT, as_Register(base), as_Register(index));
2515 } else {
2516 __ dsll(AT, as_Register(index), scale);
2517 __ addu(AT, as_Register(base), AT);
2518 }
2519 __ sw(as_Register(src), AT, disp);
2520 }
2521 } else {
2522 if (scale == 0) {
2523 __ addu(AT, as_Register(base), as_Register(index));
2524 } else {
2525 __ dsll(AT, as_Register(index), scale);
2526 __ addu(AT, as_Register(base), AT);
2527 }
2528 __ move(T9, disp);
2529 if( UseLoongsonISA ) {
2530 __ gsswx(as_Register(src), AT, T9, 0);
2531 } else {
2532 __ addu(AT, AT, T9);
2533 __ sw(as_Register(src), AT, 0);
2534 }
2535 }
2536 } else {
2537 if( Assembler::is_simm16(disp) ) {
2538 __ sw(as_Register(src), as_Register(base), disp);
2539 } else {
2540 __ move(T9, disp);
2541 if( UseLoongsonISA ) {
2542 __ gsswx(as_Register(src), as_Register(base), T9, 0);
2543 } else {
2544 __ addu(AT, as_Register(base), T9);
2545 __ sw(as_Register(src), AT, 0);
2546 }
2547 }
2548 }
2549 %}
2551 enc_class store_I_immI_enc (memory mem, immI src) %{
2552 MacroAssembler _masm(&cbuf);
2553 int base = $mem$$base;
2554 int index = $mem$$index;
2555 int scale = $mem$$scale;
2556 int disp = $mem$$disp;
2557 int value = $src$$constant;
2559 if( index != 0 ) {
2560 if ( UseLoongsonISA ) {
2561 if ( Assembler::is_simm(disp, 8) ) {
2562 if ( scale == 0 ) {
2563 if ( value == 0 ) {
2564 __ gsswx(R0, as_Register(base), as_Register(index), disp);
2565 } else {
2566 __ move(T9, value);
2567 __ gsswx(T9, as_Register(base), as_Register(index), disp);
2568 }
2569 } else {
2570 __ dsll(AT, as_Register(index), scale);
2571 if ( value == 0 ) {
2572 __ gsswx(R0, as_Register(base), AT, disp);
2573 } else {
2574 __ move(T9, value);
2575 __ gsswx(T9, as_Register(base), AT, disp);
2576 }
2577 }
2578 } else if ( Assembler::is_simm16(disp) ) {
2579 if ( scale == 0 ) {
2580 __ daddu(AT, as_Register(base), as_Register(index));
2581 if ( value == 0 ) {
2582 __ sw(R0, AT, disp);
2583 } else {
2584 __ move(T9, value);
2585 __ sw(T9, AT, disp);
2586 }
2587 } else {
2588 __ dsll(AT, as_Register(index), scale);
2589 __ daddu(AT, as_Register(base), AT);
2590 if ( value == 0 ) {
2591 __ sw(R0, AT, disp);
2592 } else {
2593 __ move(T9, value);
2594 __ sw(T9, AT, disp);
2595 }
2596 }
2597 } else {
2598 if ( scale == 0 ) {
2599 __ move(T9, disp);
2600 __ daddu(AT, as_Register(index), T9);
2601 if ( value ==0 ) {
2602 __ gsswx(R0, as_Register(base), AT, 0);
2603 } else {
2604 __ move(T9, value);
2605 __ gsswx(T9, as_Register(base), AT, 0);
2606 }
2607 } else {
2608 __ dsll(AT, as_Register(index), scale);
2609 __ move(T9, disp);
2610 __ daddu(AT, AT, T9);
2611 if ( value == 0 ) {
2612 __ gsswx(R0, as_Register(base), AT, 0);
2613 } else {
2614 __ move(T9, value);
2615 __ gsswx(T9, as_Register(base), AT, 0);
2616 }
2617 }
2618 }
2619 } else { //not use loongson isa
2620 if (scale == 0) {
2621 __ daddu(AT, as_Register(base), as_Register(index));
2622 } else {
2623 __ dsll(AT, as_Register(index), scale);
2624 __ daddu(AT, as_Register(base), AT);
2625 }
2626 if( Assembler::is_simm16(disp) ) {
2627 if (value == 0) {
2628 __ sw(R0, AT, disp);
2629 } else {
2630 __ move(T9, value);
2631 __ sw(T9, AT, disp);
2632 }
2633 } else {
2634 if (value == 0) {
2635 __ move(T9, disp);
2636 __ daddu(AT, AT, T9);
2637 __ sw(R0, AT, 0);
2638 } else {
2639 __ move(T9, disp);
2640 __ daddu(AT, AT, T9);
2641 __ move(T9, value);
2642 __ sw(T9, AT, 0);
2643 }
2644 }
2645 }
2646 } else {
2647 if ( UseLoongsonISA ) {
2648 if ( Assembler::is_simm16(disp) ) {
2649 if ( value == 0 ) {
2650 __ sw(R0, as_Register(base), disp);
2651 } else {
2652 __ move(AT, value);
2653 __ sw(AT, as_Register(base), disp);
2654 }
2655 } else {
2656 __ move(T9, disp);
2657 if ( value == 0 ) {
2658 __ gsswx(R0, as_Register(base), T9, 0);
2659 } else {
2660 __ move(AT, value);
2661 __ gsswx(AT, as_Register(base), T9, 0);
2662 }
2663 }
2664 } else {
2665 if( Assembler::is_simm16(disp) ) {
2666 if (value == 0) {
2667 __ sw(R0, as_Register(base), disp);
2668 } else {
2669 __ move(AT, value);
2670 __ sw(AT, as_Register(base), disp);
2671 }
2672 } else {
2673 if (value == 0) {
2674 __ move(T9, disp);
2675 __ daddu(AT, as_Register(base), T9);
2676 __ sw(R0, AT, 0);
2677 } else {
2678 __ move(T9, disp);
2679 __ daddu(AT, as_Register(base), T9);
2680 __ move(T9, value);
2681 __ sw(T9, AT, 0);
2682 }
2683 }
2684 }
2685 }
2686 %}
2688 enc_class load_N_enc (mRegN dst, memory mem) %{
2689 MacroAssembler _masm(&cbuf);
2690 int dst = $dst$$reg;
2691 int base = $mem$$base;
2692 int index = $mem$$index;
2693 int scale = $mem$$scale;
2694 int disp = $mem$$disp;
2695 relocInfo::relocType disp_reloc = $mem->disp_reloc();
2696 assert(disp_reloc == relocInfo::none, "cannot have disp");
2698 if( index != 0 ) {
2699 if (scale == 0) {
2700 __ daddu(AT, as_Register(base), as_Register(index));
2701 } else {
2702 __ dsll(AT, as_Register(index), scale);
2703 __ daddu(AT, as_Register(base), AT);
2704 }
2705 if( Assembler::is_simm16(disp) ) {
2706 __ lwu(as_Register(dst), AT, disp);
2707 } else {
2708 __ li(T9, disp);
2709 __ daddu(AT, AT, T9);
2710 __ lwu(as_Register(dst), AT, 0);
2711 }
2712 } else {
2713 if( Assembler::is_simm16(disp) ) {
2714 __ lwu(as_Register(dst), as_Register(base), disp);
2715 } else {
2716 __ li(T9, disp);
2717 __ daddu(AT, as_Register(base), T9);
2718 __ lwu(as_Register(dst), AT, 0);
2719 }
2720 }
2722 %}
2725 enc_class load_P_enc (mRegP dst, memory mem) %{
2726 MacroAssembler _masm(&cbuf);
2727 int dst = $dst$$reg;
2728 int base = $mem$$base;
2729 int index = $mem$$index;
2730 int scale = $mem$$scale;
2731 int disp = $mem$$disp;
2732 relocInfo::relocType disp_reloc = $mem->disp_reloc();
2733 assert(disp_reloc == relocInfo::none, "cannot have disp");
2735 if( index != 0 ) {
2736 if ( UseLoongsonISA ) {
2737 if ( Assembler::is_simm(disp, 8) ) {
2738 if ( scale != 0 ) {
2739 __ dsll(AT, as_Register(index), scale);
2740 __ gsldx(as_Register(dst), as_Register(base), AT, disp);
2741 } else {
2742 __ gsldx(as_Register(dst), as_Register(base), as_Register(index), disp);
2743 }
2744 } else if ( Assembler::is_simm16(disp) ){
2745 if ( scale != 0 ) {
2746 __ dsll(AT, as_Register(index), scale);
2747 __ daddu(AT, AT, as_Register(base));
2748 } else {
2749 __ daddu(AT, as_Register(index), as_Register(base));
2750 }
2751 __ ld(as_Register(dst), AT, disp);
2752 } else {
2753 if ( scale != 0 ) {
2754 __ dsll(AT, as_Register(index), scale);
2755 __ move(T9, disp);
2756 __ daddu(AT, AT, T9);
2757 } else {
2758 __ move(T9, disp);
2759 __ daddu(AT, as_Register(index), T9);
2760 }
2761 __ gsldx(as_Register(dst), as_Register(base), AT, 0);
2762 }
2763 } else { //not use loongson isa
2764 if (scale == 0) {
2765 __ daddu(AT, as_Register(base), as_Register(index));
2766 } else {
2767 __ dsll(AT, as_Register(index), scale);
2768 __ daddu(AT, as_Register(base), AT);
2769 }
2770 if( Assembler::is_simm16(disp) ) {
2771 __ ld(as_Register(dst), AT, disp);
2772 } else {
2773 __ li(T9, disp);
2774 __ daddu(AT, AT, T9);
2775 __ ld(as_Register(dst), AT, 0);
2776 }
2777 }
2778 } else {
2779 if ( UseLoongsonISA ) {
2780 if ( Assembler::is_simm16(disp) ){
2781 __ ld(as_Register(dst), as_Register(base), disp);
2782 } else {
2783 __ li(T9, disp);
2784 __ gsldx(as_Register(dst), as_Register(base), T9, 0);
2785 }
2786 } else { //not use loongson isa
2787 if( Assembler::is_simm16(disp) ) {
2788 __ ld(as_Register(dst), as_Register(base), disp);
2789 } else {
2790 __ li(T9, disp);
2791 __ daddu(AT, as_Register(base), T9);
2792 __ ld(as_Register(dst), AT, 0);
2793 }
2794 }
2795 }
2796 // if( disp_reloc != relocInfo::none) __ ld(as_Register(dst), as_Register(dst), 0);
2797 %}
2799 enc_class store_P_reg_enc (memory mem, mRegP src) %{
2800 MacroAssembler _masm(&cbuf);
2801 int src = $src$$reg;
2802 int base = $mem$$base;
2803 int index = $mem$$index;
2804 int scale = $mem$$scale;
2805 int disp = $mem$$disp;
2807 if( index != 0 ) {
2808 if ( UseLoongsonISA ){
2809 if ( Assembler::is_simm(disp, 8) ) {
2810 if ( scale == 0 ) {
2811 __ gssdx(as_Register(src), as_Register(base), as_Register(index), disp);
2812 } else {
2813 __ dsll(AT, as_Register(index), scale);
2814 __ gssdx(as_Register(src), as_Register(base), AT, disp);
2815 }
2816 } else if ( Assembler::is_simm16(disp) ) {
2817 if ( scale == 0 ) {
2818 __ daddu(AT, as_Register(base), as_Register(index));
2819 } else {
2820 __ dsll(AT, as_Register(index), scale);
2821 __ daddu(AT, as_Register(base), AT);
2822 }
2823 __ sd(as_Register(src), AT, disp);
2824 } else {
2825 if ( scale == 0 ) {
2826 __ move(T9, disp);
2827 __ daddu(AT, as_Register(index), T9);
2828 } else {
2829 __ dsll(AT, as_Register(index), scale);
2830 __ move(T9, disp);
2831 __ daddu(AT, AT, T9);
2832 }
2833 __ gssdx(as_Register(src), as_Register(base), AT, 0);
2834 }
2835 } else { //not use loongson isa
2836 if (scale == 0) {
2837 __ daddu(AT, as_Register(base), as_Register(index));
2838 } else {
2839 __ dsll(AT, as_Register(index), scale);
2840 __ daddu(AT, as_Register(base), AT);
2841 }
2842 if( Assembler::is_simm16(disp) ) {
2843 __ sd(as_Register(src), AT, disp);
2844 } else {
2845 __ move(T9, disp);
2846 __ daddu(AT, AT, T9);
2847 __ sd(as_Register(src), AT, 0);
2848 }
2849 }
2850 } else {
2851 if ( UseLoongsonISA ) {
2852 if ( Assembler::is_simm16(disp) ) {
2853 __ sd(as_Register(src), as_Register(base), disp);
2854 } else {
2855 __ move(T9, disp);
2856 __ gssdx(as_Register(src), as_Register(base), T9, 0);
2857 }
2858 } else {
2859 if( Assembler::is_simm16(disp) ) {
2860 __ sd(as_Register(src), as_Register(base), disp);
2861 } else {
2862 __ move(T9, disp);
2863 __ daddu(AT, as_Register(base), T9);
2864 __ sd(as_Register(src), AT, 0);
2865 }
2866 }
2867 }
2868 %}
2870 enc_class store_N_reg_enc (memory mem, mRegN src) %{
2871 MacroAssembler _masm(&cbuf);
2872 int src = $src$$reg;
2873 int base = $mem$$base;
2874 int index = $mem$$index;
2875 int scale = $mem$$scale;
2876 int disp = $mem$$disp;
2878 if( index != 0 ) {
2879 if ( UseLoongsonISA ){
2880 if ( Assembler::is_simm(disp, 8) ) {
2881 if ( scale == 0 ) {
2882 __ gsswx(as_Register(src), as_Register(base), as_Register(index), disp);
2883 } else {
2884 __ dsll(AT, as_Register(index), scale);
2885 __ gsswx(as_Register(src), as_Register(base), AT, disp);
2886 }
2887 } else if ( Assembler::is_simm16(disp) ) {
2888 if ( scale == 0 ) {
2889 __ daddu(AT, as_Register(base), as_Register(index));
2890 } else {
2891 __ dsll(AT, as_Register(index), scale);
2892 __ daddu(AT, as_Register(base), AT);
2893 }
2894 __ sw(as_Register(src), AT, disp);
2895 } else {
2896 if ( scale == 0 ) {
2897 __ move(T9, disp);
2898 __ daddu(AT, as_Register(index), T9);
2899 } else {
2900 __ dsll(AT, as_Register(index), scale);
2901 __ move(T9, disp);
2902 __ daddu(AT, AT, T9);
2903 }
2904 __ gsswx(as_Register(src), as_Register(base), AT, 0);
2905 }
2906 } else { //not use loongson isa
2907 if (scale == 0) {
2908 __ daddu(AT, as_Register(base), as_Register(index));
2909 } else {
2910 __ dsll(AT, as_Register(index), scale);
2911 __ daddu(AT, as_Register(base), AT);
2912 }
2913 if( Assembler::is_simm16(disp) ) {
2914 __ sw(as_Register(src), AT, disp);
2915 } else {
2916 __ move(T9, disp);
2917 __ daddu(AT, AT, T9);
2918 __ sw(as_Register(src), AT, 0);
2919 }
2920 }
2921 } else {
2922 if ( UseLoongsonISA ) {
2923 if ( Assembler::is_simm16(disp) ) {
2924 __ sw(as_Register(src), as_Register(base), disp);
2925 } else {
2926 __ move(T9, disp);
2927 __ gsswx(as_Register(src), as_Register(base), T9, 0);
2928 }
2929 } else {
2930 if( Assembler::is_simm16(disp) ) {
2931 __ sw(as_Register(src), as_Register(base), disp);
2932 } else {
2933 __ move(T9, disp);
2934 __ daddu(AT, as_Register(base), T9);
2935 __ sw(as_Register(src), AT, 0);
2936 }
2937 }
2938 }
2939 %}
2941 enc_class store_P_immP0_enc (memory mem) %{
2942 MacroAssembler _masm(&cbuf);
2943 int base = $mem$$base;
2944 int index = $mem$$index;
2945 int scale = $mem$$scale;
2946 int disp = $mem$$disp;
2948 if( index != 0 ) {
2949 if (scale == 0) {
2950 if( Assembler::is_simm16(disp) ) {
2951 if (UseLoongsonISA && Assembler::is_simm(disp, 8)) {
2952 __ gssdx(R0, as_Register(base), as_Register(index), disp);
2953 } else {
2954 __ daddu(AT, as_Register(base), as_Register(index));
2955 __ sd(R0, AT, disp);
2956 }
2957 } else {
2958 __ daddu(AT, as_Register(base), as_Register(index));
2959 __ move(T9, disp);
2960 if(UseLoongsonISA) {
2961 __ gssdx(R0, AT, T9, 0);
2962 } else {
2963 __ daddu(AT, AT, T9);
2964 __ sd(R0, AT, 0);
2965 }
2966 }
2967 } else {
2968 __ dsll(AT, as_Register(index), scale);
2969 if( Assembler::is_simm16(disp) ) {
2970 if (UseLoongsonISA && Assembler::is_simm(disp, 8)) {
2971 __ gssdx(R0, as_Register(base), AT, disp);
2972 } else {
2973 __ daddu(AT, as_Register(base), AT);
2974 __ sd(R0, AT, disp);
2975 }
2976 } else {
2977 __ daddu(AT, as_Register(base), AT);
2978 __ move(T9, disp);
2979 if (UseLoongsonISA) {
2980 __ gssdx(R0, AT, T9, 0);
2981 } else {
2982 __ daddu(AT, AT, T9);
2983 __ sd(R0, AT, 0);
2984 }
2985 }
2986 }
2987 } else {
2988 if( Assembler::is_simm16(disp) ) {
2989 __ sd(R0, as_Register(base), disp);
2990 } else {
2991 __ move(T9, disp);
2992 if (UseLoongsonISA) {
2993 __ gssdx(R0, as_Register(base), T9, 0);
2994 } else {
2995 __ daddu(AT, as_Register(base), T9);
2996 __ sd(R0, AT, 0);
2997 }
2998 }
2999 }
3000 %}
3003 enc_class storeImmN0_enc(memory mem, ImmN0 src) %{
3004 MacroAssembler _masm(&cbuf);
3005 int base = $mem$$base;
3006 int index = $mem$$index;
3007 int scale = $mem$$scale;
3008 int disp = $mem$$disp;
3010 if(index!=0){
3011 if (scale == 0) {
3012 __ daddu(AT, as_Register(base), as_Register(index));
3013 } else {
3014 __ dsll(AT, as_Register(index), scale);
3015 __ daddu(AT, as_Register(base), AT);
3016 }
3018 if( Assembler::is_simm16(disp) ) {
3019 __ sw(R0, AT, disp);
3020 } else {
3021 __ move(T9, disp);
3022 __ daddu(AT, AT, T9);
3023 __ sw(R0, AT, 0);
3024 }
3025 }
3026 else {
3027 if( Assembler::is_simm16(disp) ) {
3028 __ sw(R0, as_Register(base), disp);
3029 } else {
3030 __ move(T9, disp);
3031 __ daddu(AT, as_Register(base), T9);
3032 __ sw(R0, AT, 0);
3033 }
3034 }
3035 %}
3037 enc_class load_L_enc (mRegL dst, memory mem) %{
3038 MacroAssembler _masm(&cbuf);
3039 int base = $mem$$base;
3040 int index = $mem$$index;
3041 int scale = $mem$$scale;
3042 int disp = $mem$$disp;
3043 Register dst_reg = as_Register($dst$$reg);
3045 /*********************2013/03/27**************************
3046 * Jin: $base may contain a null object.
3047 * Server JIT force the exception_offset to be the pos of
3048 * the first instruction.
3049 * I insert such a 'null_check' at the beginning.
3050 *******************************************************/
3052 __ lw(AT, as_Register(base), 0);
3054 /*********************2012/10/04**************************
3055 * Error case found in SortTest
3056 * 337 b java.util.Arrays::sort1 (401 bytes)
3057 * B73:
3058 * d34 lw T4.lo, [T4 + #16] #@loadL-lo
3059 * lw T4.hi, [T4 + #16]+4 #@loadL-hi
3060 *
3061 * The original instructions generated here are :
3062 * __ lw(dst_lo, as_Register(base), disp);
3063 * __ lw(dst_hi, as_Register(base), disp + 4);
3064 *******************************************************/
3066 if( index != 0 ) {
3067 if (scale == 0) {
3068 __ daddu(AT, as_Register(base), as_Register(index));
3069 } else {
3070 __ dsll(AT, as_Register(index), scale);
3071 __ daddu(AT, as_Register(base), AT);
3072 }
3073 if( Assembler::is_simm16(disp) ) {
3074 __ ld(dst_reg, AT, disp);
3075 } else {
3076 __ move(T9, disp);
3077 __ daddu(AT, AT, T9);
3078 __ ld(dst_reg, AT, 0);
3079 }
3080 } else {
3081 if( Assembler::is_simm16(disp) ) {
3082 __ move(AT, as_Register(base));
3083 __ ld(dst_reg, AT, disp);
3084 } else {
3085 __ move(T9, disp);
3086 __ daddu(AT, as_Register(base), T9);
3087 __ ld(dst_reg, AT, 0);
3088 }
3089 }
3090 %}
3092 enc_class store_L_reg_enc (memory mem, mRegL src) %{
3093 MacroAssembler _masm(&cbuf);
3094 int base = $mem$$base;
3095 int index = $mem$$index;
3096 int scale = $mem$$scale;
3097 int disp = $mem$$disp;
3098 Register src_reg = as_Register($src$$reg);
3100 if( index != 0 ) {
3101 if (scale == 0) {
3102 __ daddu(AT, as_Register(base), as_Register(index));
3103 } else {
3104 __ dsll(AT, as_Register(index), scale);
3105 __ daddu(AT, as_Register(base), AT);
3106 }
3107 if( Assembler::is_simm16(disp) ) {
3108 __ sd(src_reg, AT, disp);
3109 } else {
3110 __ move(T9, disp);
3111 __ daddu(AT, AT, T9);
3112 __ sd(src_reg, AT, 0);
3113 }
3114 } else {
3115 if( Assembler::is_simm16(disp) ) {
3116 __ move(AT, as_Register(base));
3117 __ sd(src_reg, AT, disp);
3118 } else {
3119 __ move(T9, disp);
3120 __ daddu(AT, as_Register(base), T9);
3121 __ sd(src_reg, AT, 0);
3122 }
3123 }
3124 %}
3126 enc_class store_L_immL0_enc (memory mem, immL0 src) %{
3127 MacroAssembler _masm(&cbuf);
3128 int base = $mem$$base;
3129 int index = $mem$$index;
3130 int scale = $mem$$scale;
3131 int disp = $mem$$disp;
3133 if( index != 0 ) {
3134 if (scale == 0) {
3135 __ daddu(AT, as_Register(base), as_Register(index));
3136 } else {
3137 __ dsll(AT, as_Register(index), scale);
3138 __ daddu(AT, as_Register(base), AT);
3139 }
3140 if( Assembler::is_simm16(disp) ) {
3141 __ sd(R0, AT, disp);
3142 } else {
3143 __ move(T9, disp);
3144 __ addu(AT, AT, T9);
3145 __ sd(R0, AT, 0);
3146 }
3147 } else {
3148 if( Assembler::is_simm16(disp) ) {
3149 __ move(AT, as_Register(base));
3150 __ sd(R0, AT, disp);
3151 } else {
3152 __ move(T9, disp);
3153 __ addu(AT, as_Register(base), T9);
3154 __ sd(R0, AT, 0);
3155 }
3156 }
3157 %}
3159 enc_class load_F_enc (regF dst, memory mem) %{
3160 MacroAssembler _masm(&cbuf);
3161 int base = $mem$$base;
3162 int index = $mem$$index;
3163 int scale = $mem$$scale;
3164 int disp = $mem$$disp;
3165 FloatRegister dst = $dst$$FloatRegister;
3167 if( index != 0 ) {
3168 if( Assembler::is_simm16(disp) ) {
3169 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3170 if (scale == 0) {
3171 __ gslwxc1(dst, as_Register(base), as_Register(index), disp);
3172 } else {
3173 __ dsll(AT, as_Register(index), scale);
3174 __ gslwxc1(dst, as_Register(base), AT, disp);
3175 }
3176 } else {
3177 if (scale == 0) {
3178 __ daddu(AT, as_Register(base), as_Register(index));
3179 } else {
3180 __ dsll(AT, as_Register(index), scale);
3181 __ daddu(AT, as_Register(base), AT);
3182 }
3183 __ lwc1(dst, AT, disp);
3184 }
3185 } else {
3186 if (scale == 0) {
3187 __ daddu(AT, as_Register(base), as_Register(index));
3188 } else {
3189 __ dsll(AT, as_Register(index), scale);
3190 __ daddu(AT, as_Register(base), AT);
3191 }
3192 __ move(T9, disp);
3193 if( UseLoongsonISA ) {
3194 __ gslwxc1(dst, AT, T9, 0);
3195 } else {
3196 __ daddu(AT, AT, T9);
3197 __ lwc1(dst, AT, 0);
3198 }
3199 }
3200 } else {
3201 if( Assembler::is_simm16(disp) ) {
3202 __ lwc1(dst, as_Register(base), disp);
3203 } else {
3204 __ move(T9, disp);
3205 if( UseLoongsonISA ) {
3206 __ gslwxc1(dst, as_Register(base), T9, 0);
3207 } else {
3208 __ daddu(AT, as_Register(base), T9);
3209 __ lwc1(dst, AT, 0);
3210 }
3211 }
3212 }
3213 %}
3215 enc_class store_F_reg_enc (memory mem, regF src) %{
3216 MacroAssembler _masm(&cbuf);
3217 int base = $mem$$base;
3218 int index = $mem$$index;
3219 int scale = $mem$$scale;
3220 int disp = $mem$$disp;
3221 FloatRegister src = $src$$FloatRegister;
3223 if( index != 0 ) {
3224 if( Assembler::is_simm16(disp) ) {
3225 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3226 if (scale == 0) {
3227 __ gsswxc1(src, as_Register(base), as_Register(index), disp);
3228 } else {
3229 __ dsll(AT, as_Register(index), scale);
3230 __ gsswxc1(src, as_Register(base), AT, disp);
3231 }
3232 } else {
3233 if (scale == 0) {
3234 __ daddu(AT, as_Register(base), as_Register(index));
3235 } else {
3236 __ dsll(AT, as_Register(index), scale);
3237 __ daddu(AT, as_Register(base), AT);
3238 }
3239 __ swc1(src, AT, disp);
3240 }
3241 } else {
3242 if (scale == 0) {
3243 __ daddu(AT, as_Register(base), as_Register(index));
3244 } else {
3245 __ dsll(AT, as_Register(index), scale);
3246 __ daddu(AT, as_Register(base), AT);
3247 }
3248 __ move(T9, disp);
3249 if( UseLoongsonISA ) {
3250 __ gsswxc1(src, AT, T9, 0);
3251 } else {
3252 __ daddu(AT, AT, T9);
3253 __ swc1(src, AT, 0);
3254 }
3255 }
3256 } else {
3257 if( Assembler::is_simm16(disp) ) {
3258 __ swc1(src, as_Register(base), disp);
3259 } else {
3260 __ move(T9, disp);
3261 if( UseLoongsonISA ) {
3262 __ gslwxc1(src, as_Register(base), T9, 0);
3263 } else {
3264 __ daddu(AT, as_Register(base), T9);
3265 __ swc1(src, AT, 0);
3266 }
3267 }
3268 }
3269 %}
3271 enc_class load_D_enc (regD dst, memory mem) %{
3272 MacroAssembler _masm(&cbuf);
3273 int base = $mem$$base;
3274 int index = $mem$$index;
3275 int scale = $mem$$scale;
3276 int disp = $mem$$disp;
3277 FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3279 if( index != 0 ) {
3280 if( Assembler::is_simm16(disp) ) {
3281 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3282 if (scale == 0) {
3283 __ gsldxc1(dst_reg, as_Register(base), as_Register(index), disp);
3284 } else {
3285 __ dsll(AT, as_Register(index), scale);
3286 __ gsldxc1(dst_reg, as_Register(base), AT, disp);
3287 }
3288 } else {
3289 if (scale == 0) {
3290 __ daddu(AT, as_Register(base), as_Register(index));
3291 } else {
3292 __ dsll(AT, as_Register(index), scale);
3293 __ daddu(AT, as_Register(base), AT);
3294 }
3295 __ ldc1(dst_reg, AT, disp);
3296 }
3297 } else {
3298 if (scale == 0) {
3299 __ daddu(AT, as_Register(base), as_Register(index));
3300 } else {
3301 __ dsll(AT, as_Register(index), scale);
3302 __ daddu(AT, as_Register(base), AT);
3303 }
3304 __ move(T9, disp);
3305 if( UseLoongsonISA ) {
3306 __ gsldxc1(dst_reg, AT, T9, 0);
3307 } else {
3308 __ addu(AT, AT, T9);
3309 __ ldc1(dst_reg, AT, 0);
3310 }
3311 }
3312 } else {
3313 if( Assembler::is_simm16(disp) ) {
3314 __ ldc1(dst_reg, as_Register(base), disp);
3315 } else {
3316 __ move(T9, disp);
3317 if( UseLoongsonISA ) {
3318 __ gsldxc1(dst_reg, as_Register(base), T9, 0);
3319 } else {
3320 __ addu(AT, as_Register(base), T9);
3321 __ ldc1(dst_reg, AT, 0);
3322 }
3323 }
3324 }
3325 %}
3327 enc_class store_D_reg_enc (memory mem, regD src) %{
3328 MacroAssembler _masm(&cbuf);
3329 int base = $mem$$base;
3330 int index = $mem$$index;
3331 int scale = $mem$$scale;
3332 int disp = $mem$$disp;
3333 FloatRegister src_reg = as_FloatRegister($src$$reg);
3335 if( index != 0 ) {
3336 if( Assembler::is_simm16(disp) ) {
3337 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3338 if (scale == 0) {
3339 __ gssdxc1(src_reg, as_Register(base), as_Register(index), disp);
3340 } else {
3341 __ dsll(AT, as_Register(index), scale);
3342 __ gssdxc1(src_reg, as_Register(base), AT, disp);
3343 }
3344 } else {
3345 if (scale == 0) {
3346 __ daddu(AT, as_Register(base), as_Register(index));
3347 } else {
3348 __ dsll(AT, as_Register(index), scale);
3349 __ daddu(AT, as_Register(base), AT);
3350 }
3351 __ sdc1(src_reg, AT, disp);
3352 }
3353 } else {
3354 if (scale == 0) {
3355 __ daddu(AT, as_Register(base), as_Register(index));
3356 } else {
3357 __ dsll(AT, as_Register(index), scale);
3358 __ daddu(AT, as_Register(base), AT);
3359 }
3360 __ move(T9, disp);
3361 if( UseLoongsonISA ) {
3362 __ gssdxc1(src_reg, AT, T9, 0);
3363 } else {
3364 __ addu(AT, AT, T9);
3365 __ sdc1(src_reg, AT, 0);
3366 }
3367 }
3368 } else {
3369 if( Assembler::is_simm16(disp) ) {
3370 __ sdc1(src_reg, as_Register(base), disp);
3371 } else {
3372 __ move(T9, disp);
3373 if( UseLoongsonISA ) {
3374 __ gssdxc1(src_reg, as_Register(base), T9, 0);
3375 } else {
3376 __ addu(AT, as_Register(base), T9);
3377 __ sdc1(src_reg, AT, 0);
3378 }
3379 }
3380 }
3381 %}
3383 enc_class Java_To_Runtime (method meth) %{ // CALL Java_To_Runtime, Java_To_Runtime_Leaf
3384 MacroAssembler _masm(&cbuf);
3385 // This is the instruction starting address for relocation info.
3386 __ block_comment("Java_To_Runtime");
3387 cbuf.set_insts_mark();
3388 __ relocate(relocInfo::runtime_call_type);
3390 __ patchable_set48(T9, (long)$meth$$method);
3391 __ jalr(T9);
3392 __ nop();
3393 %}
3395 enc_class Java_Static_Call (method meth) %{ // JAVA STATIC CALL
3396 // CALL to fixup routine. Fixup routine uses ScopeDesc info to determine
3397 // who we intended to call.
3398 MacroAssembler _masm(&cbuf);
3399 cbuf.set_insts_mark();
3401 if ( !_method ) {
3402 __ relocate(relocInfo::runtime_call_type);
3403 } else if(_optimized_virtual) {
3404 __ relocate(relocInfo::opt_virtual_call_type);
3405 } else {
3406 __ relocate(relocInfo::static_call_type);
3407 }
3409 __ li(T9, $meth$$method);
3410 __ jalr(T9);
3411 __ nop();
3412 if( _method ) { // Emit stub for static call
3413 emit_java_to_interp(cbuf);
3414 }
3415 %}
3418 /*
3419 * [Ref: LIR_Assembler::ic_call() ]
3420 */
3421 enc_class Java_Dynamic_Call (method meth) %{ // JAVA DYNAMIC CALL
3422 MacroAssembler _masm(&cbuf);
3423 __ block_comment("Java_Dynamic_Call");
3424 __ ic_call((address)$meth$$method);
3425 %}
3428 enc_class Set_Flags_After_Fast_Lock_Unlock(FlagsReg cr) %{
3429 Register flags = $cr$$Register;
3430 Label L;
3432 MacroAssembler _masm(&cbuf);
3434 __ addu(flags, R0, R0);
3435 __ beq(AT, R0, L);
3436 __ delayed()->nop();
3437 __ move(flags, 0xFFFFFFFF);
3438 __ bind(L);
3439 %}
3441 enc_class enc_PartialSubtypeCheck(mRegP result, mRegP sub, mRegP super, mRegI tmp) %{
3442 Register result = $result$$Register;
3443 Register sub = $sub$$Register;
3444 Register super = $super$$Register;
3445 Register length = $tmp$$Register;
3446 Register tmp = T9;
3447 Label miss;
3449 /* 2012/9/28 Jin: result may be the same as sub
3450 * 47c B40: # B21 B41 <- B20 Freq: 0.155379
3451 * 47c partialSubtypeCheck result=S1, sub=S1, super=S3, length=S0
3452 * 4bc mov S2, NULL #@loadConP
3453 * 4c0 beq S1, S2, B21 #@branchConP P=0.999999 C=-1.000000
3454 */
3455 MacroAssembler _masm(&cbuf);
3456 Label done;
3457 __ check_klass_subtype_slow_path(sub, super, length, tmp,
3458 NULL, &miss,
3459 /*set_cond_codes:*/ true);
3460 /* 2013/7/22 Jin: Refer to X86_64's RDI */
3461 __ move(result, 0);
3462 __ b(done);
3463 __ nop();
3465 __ bind(miss);
3466 __ move(result, 1);
3467 __ bind(done);
3468 %}
3470 %}
3473 //---------MIPS FRAME--------------------------------------------------------------
3474 // Definition of frame structure and management information.
3475 //
3476 // S T A C K L A Y O U T Allocators stack-slot number
3477 // | (to get allocators register number
3478 // G Owned by | | v add SharedInfo::stack0)
3479 // r CALLER | |
3480 // o | +--------+ pad to even-align allocators stack-slot
3481 // w V | pad0 | numbers; owned by CALLER
3482 // t -----------+--------+----> Matcher::_in_arg_limit, unaligned
3483 // h ^ | in | 5
3484 // | | args | 4 Holes in incoming args owned by SELF
3485 // | | old | | 3
3486 // | | SP-+--------+----> Matcher::_old_SP, even aligned
3487 // v | | ret | 3 return address
3488 // Owned by +--------+
3489 // Self | pad2 | 2 pad to align old SP
3490 // | +--------+ 1
3491 // | | locks | 0
3492 // | +--------+----> SharedInfo::stack0, even aligned
3493 // | | pad1 | 11 pad to align new SP
3494 // | +--------+
3495 // | | | 10
3496 // | | spills | 9 spills
3497 // V | | 8 (pad0 slot for callee)
3498 // -----------+--------+----> Matcher::_out_arg_limit, unaligned
3499 // ^ | out | 7
3500 // | | args | 6 Holes in outgoing args owned by CALLEE
3501 // Owned by new | |
3502 // Callee SP-+--------+----> Matcher::_new_SP, even aligned
3503 // | |
3504 //
3505 // Note 1: Only region 8-11 is determined by the allocator. Region 0-5 is
3506 // known from SELF's arguments and the Java calling convention.
3507 // Region 6-7 is determined per call site.
3508 // Note 2: If the calling convention leaves holes in the incoming argument
3509 // area, those holes are owned by SELF. Holes in the outgoing area
3510 // are owned by the CALLEE. Holes should not be nessecary in the
3511 // incoming area, as the Java calling convention is completely under
3512 // the control of the AD file. Doubles can be sorted and packed to
3513 // avoid holes. Holes in the outgoing arguments may be nessecary for
3514 // varargs C calling conventions.
3515 // Note 3: Region 0-3 is even aligned, with pad2 as needed. Region 3-5 is
3516 // even aligned with pad0 as needed.
3517 // Region 6 is even aligned. Region 6-7 is NOT even aligned;
3518 // region 6-11 is even aligned; it may be padded out more so that
3519 // the region from SP to FP meets the minimum stack alignment.
3520 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
3521 // alignment. Region 11, pad1, may be dynamically extended so that
3522 // SP meets the minimum alignment.
3525 frame %{
3527 stack_direction(TOWARDS_LOW);
3529 // These two registers define part of the calling convention
3530 // between compiled code and the interpreter.
3531 // SEE StartI2CNode::calling_convention & StartC2INode::calling_convention & StartOSRNode::calling_convention
3532 // for more information. by yjl 3/16/2006
3534 inline_cache_reg(T1); // Inline Cache Register
3535 interpreter_method_oop_reg(S3); // Method Oop Register when calling interpreter
3536 /*
3537 inline_cache_reg(T1); // Inline Cache Register or methodOop for I2C
3538 interpreter_arg_ptr_reg(A0); // Argument pointer for I2C adapters
3539 */
3541 // Optional: name the operand used by cisc-spilling to access [stack_pointer + offset]
3542 cisc_spilling_operand_name(indOffset32);
3544 // Number of stack slots consumed by locking an object
3545 // generate Compile::sync_stack_slots
3546 #ifdef _LP64
3547 sync_stack_slots(2);
3548 #else
3549 sync_stack_slots(1);
3550 #endif
3552 frame_pointer(SP);
3554 // Interpreter stores its frame pointer in a register which is
3555 // stored to the stack by I2CAdaptors.
3556 // I2CAdaptors convert from interpreted java to compiled java.
3558 interpreter_frame_pointer(FP);
3560 // generate Matcher::stack_alignment
3561 stack_alignment(StackAlignmentInBytes); //wordSize = sizeof(char*);
3563 // Number of stack slots between incoming argument block and the start of
3564 // a new frame. The PROLOG must add this many slots to the stack. The
3565 // EPILOG must remove this many slots. Intel needs one slot for
3566 // return address.
3567 // generate Matcher::in_preserve_stack_slots
3568 //in_preserve_stack_slots(VerifyStackAtCalls + 2); //Now VerifyStackAtCalls is defined as false ! Leave one stack slot for ra and fp
3569 in_preserve_stack_slots(4); //Now VerifyStackAtCalls is defined as false ! Leave two stack slots for ra and fp
3571 // Number of outgoing stack slots killed above the out_preserve_stack_slots
3572 // for calls to C. Supports the var-args backing area for register parms.
3573 varargs_C_out_slots_killed(0);
3575 // The after-PROLOG location of the return address. Location of
3576 // return address specifies a type (REG or STACK) and a number
3577 // representing the register number (i.e. - use a register name) or
3578 // stack slot.
3579 // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
3580 // Otherwise, it is above the locks and verification slot and alignment word
3581 //return_addr(STACK -1+ round_to(1+VerifyStackAtCalls+Compile::current()->sync()*Compile::current()->sync_stack_slots(),WordsPerLong));
3582 return_addr(REG RA);
3584 // Body of function which returns an integer array locating
3585 // arguments either in registers or in stack slots. Passed an array
3586 // of ideal registers called "sig" and a "length" count. Stack-slot
3587 // offsets are based on outgoing arguments, i.e. a CALLER setting up
3588 // arguments for a CALLEE. Incoming stack arguments are
3589 // automatically biased by the preserve_stack_slots field above.
3592 // will generated to Matcher::calling_convention(OptoRegPair *sig, uint length, bool is_outgoing)
3593 // StartNode::calling_convention call this. by yjl 3/16/2006
3594 calling_convention %{
3595 SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
3596 %}
3601 // Body of function which returns an integer array locating
3602 // arguments either in registers or in stack slots. Passed an array
3603 // of ideal registers called "sig" and a "length" count. Stack-slot
3604 // offsets are based on outgoing arguments, i.e. a CALLER setting up
3605 // arguments for a CALLEE. Incoming stack arguments are
3606 // automatically biased by the preserve_stack_slots field above.
3609 // SEE CallRuntimeNode::calling_convention for more information. by yjl 3/16/2006
3610 c_calling_convention %{
3611 (void) SharedRuntime::c_calling_convention(sig_bt, regs, /*regs2=*/NULL, length);
3612 %}
3615 // Location of C & interpreter return values
3616 // register(s) contain(s) return value for Op_StartI2C and Op_StartOSR.
3617 // SEE Matcher::match. by yjl 3/16/2006
3618 c_return_value %{
3619 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3620 /* -- , -- , Op_RegN, Op_RegI, Op_RegP, Op_RegF, Op_RegD, Op_RegL */
3621 static int lo[Op_RegL+1] = { 0, 0, V0_num, V0_num, V0_num, F0_num, F0_num, V0_num };
3622 static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, V0_H_num, OptoReg::Bad, F0_H_num, V0_H_num };
3623 return OptoRegPair(hi[ideal_reg],lo[ideal_reg]);
3624 %}
3626 // Location of return values
3627 // register(s) contain(s) return value for Op_StartC2I and Op_Start.
3628 // SEE Matcher::match. by yjl 3/16/2006
3630 return_value %{
3631 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3632 /* -- , -- , Op_RegN, Op_RegI, Op_RegP, Op_RegF, Op_RegD, Op_RegL */
3633 static int lo[Op_RegL+1] = { 0, 0, V0_num, V0_num, V0_num, F0_num, F0_num, V0_num };
3634 static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, V0_H_num, OptoReg::Bad, F0_H_num, V0_H_num};
3635 return OptoRegPair(hi[ideal_reg],lo[ideal_reg]);
3636 %}
3638 %}
3640 //----------ATTRIBUTES---------------------------------------------------------
3641 //----------Operand Attributes-------------------------------------------------
3642 op_attrib op_cost(0); // Required cost attribute
3644 //----------Instruction Attributes---------------------------------------------
3645 ins_attrib ins_cost(100); // Required cost attribute
3646 ins_attrib ins_size(32); // Required size attribute (in bits)
3647 ins_attrib ins_pc_relative(0); // Required PC Relative flag
3648 ins_attrib ins_short_branch(0); // Required flag: is this instruction a
3649 // non-matching short branch variant of some
3650 // long branch?
3651 ins_attrib ins_alignment(4); // Required alignment attribute (must be a power of 2)
3652 // specifies the alignment that some part of the instruction (not
3653 // necessarily the start) requires. If > 1, a compute_padding()
3654 // function must be provided for the instruction
3656 //----------OPERANDS-----------------------------------------------------------
3657 // Operand definitions must precede instruction definitions for correct parsing
3658 // in the ADLC because operands constitute user defined types which are used in
3659 // instruction definitions.
3661 // Vectors
3662 operand vecD() %{
3663 constraint(ALLOC_IN_RC(dbl_reg));
3664 match(VecD);
3666 format %{ %}
3667 interface(REG_INTER);
3668 %}
3670 // Flags register, used as output of compare instructions
3671 operand FlagsReg() %{
3672 constraint(ALLOC_IN_RC(mips_flags));
3673 match(RegFlags);
3675 format %{ "EFLAGS" %}
3676 interface(REG_INTER);
3677 %}
3679 //----------Simple Operands----------------------------------------------------
3680 //TODO: Should we need to define some more special immediate number ?
3681 // Immediate Operands
3682 // Integer Immediate
3683 operand immI() %{
3684 match(ConI);
3685 //TODO: should not match immI8 here LEE
3686 match(immI8);
3688 op_cost(20);
3689 format %{ %}
3690 interface(CONST_INTER);
3691 %}
3693 // Long Immediate 8-bit
3694 operand immL8()
3695 %{
3696 predicate(-0x80L <= n->get_long() && n->get_long() < 0x80L);
3697 match(ConL);
3699 op_cost(5);
3700 format %{ %}
3701 interface(CONST_INTER);
3702 %}
3704 // Constant for test vs zero
3705 operand immI0() %{
3706 predicate(n->get_int() == 0);
3707 match(ConI);
3709 op_cost(0);
3710 format %{ %}
3711 interface(CONST_INTER);
3712 %}
3714 // Constant for increment
3715 operand immI1() %{
3716 predicate(n->get_int() == 1);
3717 match(ConI);
3719 op_cost(0);
3720 format %{ %}
3721 interface(CONST_INTER);
3722 %}
3724 // Constant for decrement
3725 operand immI_M1() %{
3726 predicate(n->get_int() == -1);
3727 match(ConI);
3729 op_cost(0);
3730 format %{ %}
3731 interface(CONST_INTER);
3732 %}
3734 operand immI_MaxI() %{
3735 predicate(n->get_int() == 2147483647);
3736 match(ConI);
3738 op_cost(0);
3739 format %{ %}
3740 interface(CONST_INTER);
3741 %}
3743 // Valid scale values for addressing modes
3744 operand immI2() %{
3745 predicate(0 <= n->get_int() && (n->get_int() <= 3));
3746 match(ConI);
3748 format %{ %}
3749 interface(CONST_INTER);
3750 %}
3752 operand immI8() %{
3753 predicate((-128 <= n->get_int()) && (n->get_int() <= 127));
3754 match(ConI);
3756 op_cost(5);
3757 format %{ %}
3758 interface(CONST_INTER);
3759 %}
3761 operand immI16() %{
3762 predicate((-32768 <= n->get_int()) && (n->get_int() <= 32767));
3763 match(ConI);
3765 op_cost(10);
3766 format %{ %}
3767 interface(CONST_INTER);
3768 %}
3770 // Constant for long shifts
3771 operand immI_32() %{
3772 predicate( n->get_int() == 32 );
3773 match(ConI);
3775 op_cost(0);
3776 format %{ %}
3777 interface(CONST_INTER);
3778 %}
3780 operand immI_63() %{
3781 predicate( n->get_int() == 63 );
3782 match(ConI);
3784 op_cost(0);
3785 format %{ %}
3786 interface(CONST_INTER);
3787 %}
3789 operand immI_0_31() %{
3790 predicate( n->get_int() >= 0 && n->get_int() <= 31 );
3791 match(ConI);
3793 op_cost(0);
3794 format %{ %}
3795 interface(CONST_INTER);
3796 %}
3798 // Operand for non-negtive integer mask
3799 operand immI_nonneg_mask() %{
3800 predicate( (n->get_int() >= 0) && (Assembler::is_int_mask(n->get_int()) != -1) );
3801 match(ConI);
3803 op_cost(0);
3804 format %{ %}
3805 interface(CONST_INTER);
3806 %}
3808 operand immI_32_63() %{
3809 predicate( n->get_int() >= 32 && n->get_int() <= 63 );
3810 match(ConI);
3811 op_cost(0);
3813 format %{ %}
3814 interface(CONST_INTER);
3815 %}
3817 operand immI16_sub() %{
3818 predicate((-32767 <= n->get_int()) && (n->get_int() <= 32768));
3819 match(ConI);
3821 op_cost(10);
3822 format %{ %}
3823 interface(CONST_INTER);
3824 %}
3826 operand immI_0_32767() %{
3827 predicate( n->get_int() >= 0 && n->get_int() <= 32767 );
3828 match(ConI);
3829 op_cost(0);
3831 format %{ %}
3832 interface(CONST_INTER);
3833 %}
3835 operand immI_0_65535() %{
3836 predicate( n->get_int() >= 0 && n->get_int() <= 65535 );
3837 match(ConI);
3838 op_cost(0);
3840 format %{ %}
3841 interface(CONST_INTER);
3842 %}
3844 operand immI_1() %{
3845 predicate( n->get_int() == 1 );
3846 match(ConI);
3848 op_cost(0);
3849 format %{ %}
3850 interface(CONST_INTER);
3851 %}
3853 operand immI_2() %{
3854 predicate( n->get_int() == 2 );
3855 match(ConI);
3857 op_cost(0);
3858 format %{ %}
3859 interface(CONST_INTER);
3860 %}
3862 operand immI_3() %{
3863 predicate( n->get_int() == 3 );
3864 match(ConI);
3866 op_cost(0);
3867 format %{ %}
3868 interface(CONST_INTER);
3869 %}
3871 operand immI_7() %{
3872 predicate( n->get_int() == 7 );
3873 match(ConI);
3875 format %{ %}
3876 interface(CONST_INTER);
3877 %}
3879 // Immediates for special shifts (sign extend)
3881 // Constants for increment
3882 operand immI_16() %{
3883 predicate( n->get_int() == 16 );
3884 match(ConI);
3886 format %{ %}
3887 interface(CONST_INTER);
3888 %}
3890 operand immI_24() %{
3891 predicate( n->get_int() == 24 );
3892 match(ConI);
3894 format %{ %}
3895 interface(CONST_INTER);
3896 %}
3898 // Constant for byte-wide masking
3899 operand immI_255() %{
3900 predicate( n->get_int() == 255 );
3901 match(ConI);
3903 op_cost(0);
3904 format %{ %}
3905 interface(CONST_INTER);
3906 %}
3908 operand immI_65535() %{
3909 predicate( n->get_int() == 65535 );
3910 match(ConI);
3912 op_cost(5);
3913 format %{ %}
3914 interface(CONST_INTER);
3915 %}
3917 operand immI_65536() %{
3918 predicate( n->get_int() == 65536 );
3919 match(ConI);
3921 op_cost(5);
3922 format %{ %}
3923 interface(CONST_INTER);
3924 %}
3926 operand immI_M65536() %{
3927 predicate( n->get_int() == -65536 );
3928 match(ConI);
3930 op_cost(5);
3931 format %{ %}
3932 interface(CONST_INTER);
3933 %}
3935 // Pointer Immediate
3936 operand immP() %{
3937 match(ConP);
3939 op_cost(10);
3940 format %{ %}
3941 interface(CONST_INTER);
3942 %}
3944 // NULL Pointer Immediate
3945 operand immP0() %{
3946 predicate( n->get_ptr() == 0 );
3947 match(ConP);
3948 op_cost(0);
3950 format %{ %}
3951 interface(CONST_INTER);
3952 %}
3954 // Pointer Immediate: 64-bit
3955 operand immP_set() %{
3956 match(ConP);
3958 op_cost(5);
3959 // formats are generated automatically for constants and base registers
3960 format %{ %}
3961 interface(CONST_INTER);
3962 %}
3964 // Pointer Immediate: 64-bit
3965 operand immP_load() %{
3966 predicate(n->bottom_type()->isa_oop_ptr() || (MacroAssembler::insts_for_set64(n->get_ptr()) > 3));
3967 match(ConP);
3969 op_cost(5);
3970 // formats are generated automatically for constants and base registers
3971 format %{ %}
3972 interface(CONST_INTER);
3973 %}
3975 // Pointer Immediate: 64-bit
3976 operand immP_no_oop_cheap() %{
3977 predicate(!n->bottom_type()->isa_oop_ptr() && (MacroAssembler::insts_for_set64(n->get_ptr()) <= 3));
3978 match(ConP);
3980 op_cost(5);
3981 // formats are generated automatically for constants and base registers
3982 format %{ %}
3983 interface(CONST_INTER);
3984 %}
3986 // Pointer for polling page
3987 operand immP_poll() %{
3988 predicate(n->get_ptr() != 0 && n->get_ptr() == (intptr_t)os::get_polling_page());
3989 match(ConP);
3990 op_cost(5);
3992 format %{ %}
3993 interface(CONST_INTER);
3994 %}
3996 // Pointer Immediate
3997 operand immN() %{
3998 match(ConN);
4000 op_cost(10);
4001 format %{ %}
4002 interface(CONST_INTER);
4003 %}
4005 operand immNKlass() %{
4006 match(ConNKlass);
4008 op_cost(10);
4009 format %{ %}
4010 interface(CONST_INTER);
4011 %}
4013 // NULL Pointer Immediate
4014 operand immN0() %{
4015 predicate(n->get_narrowcon() == 0);
4016 match(ConN);
4018 op_cost(5);
4019 format %{ %}
4020 interface(CONST_INTER);
4021 %}
4023 // Long Immediate
4024 operand immL() %{
4025 match(ConL);
4027 op_cost(20);
4028 format %{ %}
4029 interface(CONST_INTER);
4030 %}
4032 // Long Immediate zero
4033 operand immL0() %{
4034 predicate( n->get_long() == 0L );
4035 match(ConL);
4036 op_cost(0);
4038 format %{ %}
4039 interface(CONST_INTER);
4040 %}
4042 operand immL7() %{
4043 predicate( n->get_long() == 7L );
4044 match(ConL);
4045 op_cost(0);
4047 format %{ %}
4048 interface(CONST_INTER);
4049 %}
4051 operand immL_M1() %{
4052 predicate( n->get_long() == -1L );
4053 match(ConL);
4054 op_cost(0);
4056 format %{ %}
4057 interface(CONST_INTER);
4058 %}
4060 // bit 0..2 zero
4061 operand immL_M8() %{
4062 predicate( n->get_long() == -8L );
4063 match(ConL);
4064 op_cost(0);
4066 format %{ %}
4067 interface(CONST_INTER);
4068 %}
4070 // bit 2 zero
4071 operand immL_M5() %{
4072 predicate( n->get_long() == -5L );
4073 match(ConL);
4074 op_cost(0);
4076 format %{ %}
4077 interface(CONST_INTER);
4078 %}
4080 // bit 1..2 zero
4081 operand immL_M7() %{
4082 predicate( n->get_long() == -7L );
4083 match(ConL);
4084 op_cost(0);
4086 format %{ %}
4087 interface(CONST_INTER);
4088 %}
4090 // bit 0..1 zero
4091 operand immL_M4() %{
4092 predicate( n->get_long() == -4L );
4093 match(ConL);
4094 op_cost(0);
4096 format %{ %}
4097 interface(CONST_INTER);
4098 %}
4100 // bit 3..6 zero
4101 operand immL_M121() %{
4102 predicate( n->get_long() == -121L );
4103 match(ConL);
4104 op_cost(0);
4106 format %{ %}
4107 interface(CONST_INTER);
4108 %}
4110 // Long immediate from 0 to 127.
4111 // Used for a shorter form of long mul by 10.
4112 operand immL_127() %{
4113 predicate((0 <= n->get_long()) && (n->get_long() <= 127));
4114 match(ConL);
4115 op_cost(0);
4117 format %{ %}
4118 interface(CONST_INTER);
4119 %}
4121 // Operand for non-negtive long mask
4122 operand immL_nonneg_mask() %{
4123 predicate( (n->get_long() >= 0) && (Assembler::is_jlong_mask(n->get_long()) != -1) );
4124 match(ConL);
4126 op_cost(0);
4127 format %{ %}
4128 interface(CONST_INTER);
4129 %}
4131 operand immL_0_65535() %{
4132 predicate( n->get_long() >= 0 && n->get_long() <= 65535 );
4133 match(ConL);
4134 op_cost(0);
4136 format %{ %}
4137 interface(CONST_INTER);
4138 %}
4140 // Long Immediate: cheap (materialize in <= 3 instructions)
4141 operand immL_cheap() %{
4142 predicate(MacroAssembler::insts_for_set64(n->get_long()) <= 3);
4143 match(ConL);
4144 op_cost(0);
4146 format %{ %}
4147 interface(CONST_INTER);
4148 %}
4150 // Long Immediate: expensive (materialize in > 3 instructions)
4151 operand immL_expensive() %{
4152 predicate(MacroAssembler::insts_for_set64(n->get_long()) > 3);
4153 match(ConL);
4154 op_cost(0);
4156 format %{ %}
4157 interface(CONST_INTER);
4158 %}
4160 operand immL16() %{
4161 predicate((-32768 <= n->get_long()) && (n->get_long() <= 32767));
4162 match(ConL);
4164 op_cost(10);
4165 format %{ %}
4166 interface(CONST_INTER);
4167 %}
4169 operand immL16_sub() %{
4170 predicate((-32767 <= n->get_long()) && (n->get_long() <= 32768));
4171 match(ConL);
4173 op_cost(10);
4174 format %{ %}
4175 interface(CONST_INTER);
4176 %}
4178 // Long Immediate: low 32-bit mask
4179 operand immL_32bits() %{
4180 predicate(n->get_long() == 0xFFFFFFFFL);
4181 match(ConL);
4182 op_cost(20);
4184 format %{ %}
4185 interface(CONST_INTER);
4186 %}
4188 // Long Immediate 32-bit signed
4189 operand immL32()
4190 %{
4191 predicate(n->get_long() == (int) (n->get_long()));
4192 match(ConL);
4194 op_cost(15);
4195 format %{ %}
4196 interface(CONST_INTER);
4197 %}
4200 //single-precision floating-point zero
4201 operand immF0() %{
4202 predicate(jint_cast(n->getf()) == 0);
4203 match(ConF);
4205 op_cost(5);
4206 format %{ %}
4207 interface(CONST_INTER);
4208 %}
4210 //single-precision floating-point immediate
4211 operand immF() %{
4212 match(ConF);
4214 op_cost(20);
4215 format %{ %}
4216 interface(CONST_INTER);
4217 %}
4219 //double-precision floating-point zero
4220 operand immD0() %{
4221 predicate(jlong_cast(n->getd()) == 0);
4222 match(ConD);
4224 op_cost(5);
4225 format %{ %}
4226 interface(CONST_INTER);
4227 %}
4229 //double-precision floating-point immediate
4230 operand immD() %{
4231 match(ConD);
4233 op_cost(20);
4234 format %{ %}
4235 interface(CONST_INTER);
4236 %}
4238 // Register Operands
4239 // Integer Register
4240 operand mRegI() %{
4241 constraint(ALLOC_IN_RC(int_reg));
4242 match(RegI);
4244 format %{ %}
4245 interface(REG_INTER);
4246 %}
4248 operand no_Ax_mRegI() %{
4249 constraint(ALLOC_IN_RC(no_Ax_int_reg));
4250 match(RegI);
4251 match(mRegI);
4253 format %{ %}
4254 interface(REG_INTER);
4255 %}
4257 operand mS0RegI() %{
4258 constraint(ALLOC_IN_RC(s0_reg));
4259 match(RegI);
4260 match(mRegI);
4262 format %{ "S0" %}
4263 interface(REG_INTER);
4264 %}
4266 operand mS1RegI() %{
4267 constraint(ALLOC_IN_RC(s1_reg));
4268 match(RegI);
4269 match(mRegI);
4271 format %{ "S1" %}
4272 interface(REG_INTER);
4273 %}
4275 operand mS2RegI() %{
4276 constraint(ALLOC_IN_RC(s2_reg));
4277 match(RegI);
4278 match(mRegI);
4280 format %{ "S2" %}
4281 interface(REG_INTER);
4282 %}
4284 operand mS3RegI() %{
4285 constraint(ALLOC_IN_RC(s3_reg));
4286 match(RegI);
4287 match(mRegI);
4289 format %{ "S3" %}
4290 interface(REG_INTER);
4291 %}
4293 operand mS4RegI() %{
4294 constraint(ALLOC_IN_RC(s4_reg));
4295 match(RegI);
4296 match(mRegI);
4298 format %{ "S4" %}
4299 interface(REG_INTER);
4300 %}
4302 operand mS5RegI() %{
4303 constraint(ALLOC_IN_RC(s5_reg));
4304 match(RegI);
4305 match(mRegI);
4307 format %{ "S5" %}
4308 interface(REG_INTER);
4309 %}
4311 operand mS6RegI() %{
4312 constraint(ALLOC_IN_RC(s6_reg));
4313 match(RegI);
4314 match(mRegI);
4316 format %{ "S6" %}
4317 interface(REG_INTER);
4318 %}
4320 operand mS7RegI() %{
4321 constraint(ALLOC_IN_RC(s7_reg));
4322 match(RegI);
4323 match(mRegI);
4325 format %{ "S7" %}
4326 interface(REG_INTER);
4327 %}
4330 operand mT0RegI() %{
4331 constraint(ALLOC_IN_RC(t0_reg));
4332 match(RegI);
4333 match(mRegI);
4335 format %{ "T0" %}
4336 interface(REG_INTER);
4337 %}
4339 operand mT1RegI() %{
4340 constraint(ALLOC_IN_RC(t1_reg));
4341 match(RegI);
4342 match(mRegI);
4344 format %{ "T1" %}
4345 interface(REG_INTER);
4346 %}
4348 operand mT2RegI() %{
4349 constraint(ALLOC_IN_RC(t2_reg));
4350 match(RegI);
4351 match(mRegI);
4353 format %{ "T2" %}
4354 interface(REG_INTER);
4355 %}
4357 operand mT3RegI() %{
4358 constraint(ALLOC_IN_RC(t3_reg));
4359 match(RegI);
4360 match(mRegI);
4362 format %{ "T3" %}
4363 interface(REG_INTER);
4364 %}
4366 operand mT8RegI() %{
4367 constraint(ALLOC_IN_RC(t8_reg));
4368 match(RegI);
4369 match(mRegI);
4371 format %{ "T8" %}
4372 interface(REG_INTER);
4373 %}
4375 operand mT9RegI() %{
4376 constraint(ALLOC_IN_RC(t9_reg));
4377 match(RegI);
4378 match(mRegI);
4380 format %{ "T9" %}
4381 interface(REG_INTER);
4382 %}
4384 operand mA0RegI() %{
4385 constraint(ALLOC_IN_RC(a0_reg));
4386 match(RegI);
4387 match(mRegI);
4389 format %{ "A0" %}
4390 interface(REG_INTER);
4391 %}
4393 operand mA1RegI() %{
4394 constraint(ALLOC_IN_RC(a1_reg));
4395 match(RegI);
4396 match(mRegI);
4398 format %{ "A1" %}
4399 interface(REG_INTER);
4400 %}
4402 operand mA2RegI() %{
4403 constraint(ALLOC_IN_RC(a2_reg));
4404 match(RegI);
4405 match(mRegI);
4407 format %{ "A2" %}
4408 interface(REG_INTER);
4409 %}
4411 operand mA3RegI() %{
4412 constraint(ALLOC_IN_RC(a3_reg));
4413 match(RegI);
4414 match(mRegI);
4416 format %{ "A3" %}
4417 interface(REG_INTER);
4418 %}
4420 operand mA4RegI() %{
4421 constraint(ALLOC_IN_RC(a4_reg));
4422 match(RegI);
4423 match(mRegI);
4425 format %{ "A4" %}
4426 interface(REG_INTER);
4427 %}
4429 operand mA5RegI() %{
4430 constraint(ALLOC_IN_RC(a5_reg));
4431 match(RegI);
4432 match(mRegI);
4434 format %{ "A5" %}
4435 interface(REG_INTER);
4436 %}
4438 operand mA6RegI() %{
4439 constraint(ALLOC_IN_RC(a6_reg));
4440 match(RegI);
4441 match(mRegI);
4443 format %{ "A6" %}
4444 interface(REG_INTER);
4445 %}
4447 operand mA7RegI() %{
4448 constraint(ALLOC_IN_RC(a7_reg));
4449 match(RegI);
4450 match(mRegI);
4452 format %{ "A7" %}
4453 interface(REG_INTER);
4454 %}
4456 operand mV0RegI() %{
4457 constraint(ALLOC_IN_RC(v0_reg));
4458 match(RegI);
4459 match(mRegI);
4461 format %{ "V0" %}
4462 interface(REG_INTER);
4463 %}
4465 operand mV1RegI() %{
4466 constraint(ALLOC_IN_RC(v1_reg));
4467 match(RegI);
4468 match(mRegI);
4470 format %{ "V1" %}
4471 interface(REG_INTER);
4472 %}
4474 operand mRegN() %{
4475 constraint(ALLOC_IN_RC(int_reg));
4476 match(RegN);
4478 format %{ %}
4479 interface(REG_INTER);
4480 %}
4482 operand t0_RegN() %{
4483 constraint(ALLOC_IN_RC(t0_reg));
4484 match(RegN);
4485 match(mRegN);
4487 format %{ %}
4488 interface(REG_INTER);
4489 %}
4491 operand t1_RegN() %{
4492 constraint(ALLOC_IN_RC(t1_reg));
4493 match(RegN);
4494 match(mRegN);
4496 format %{ %}
4497 interface(REG_INTER);
4498 %}
4500 operand t2_RegN() %{
4501 constraint(ALLOC_IN_RC(t2_reg));
4502 match(RegN);
4503 match(mRegN);
4505 format %{ %}
4506 interface(REG_INTER);
4507 %}
4509 operand t3_RegN() %{
4510 constraint(ALLOC_IN_RC(t3_reg));
4511 match(RegN);
4512 match(mRegN);
4514 format %{ %}
4515 interface(REG_INTER);
4516 %}
4518 operand t8_RegN() %{
4519 constraint(ALLOC_IN_RC(t8_reg));
4520 match(RegN);
4521 match(mRegN);
4523 format %{ %}
4524 interface(REG_INTER);
4525 %}
4527 operand t9_RegN() %{
4528 constraint(ALLOC_IN_RC(t9_reg));
4529 match(RegN);
4530 match(mRegN);
4532 format %{ %}
4533 interface(REG_INTER);
4534 %}
4536 operand a0_RegN() %{
4537 constraint(ALLOC_IN_RC(a0_reg));
4538 match(RegN);
4539 match(mRegN);
4541 format %{ %}
4542 interface(REG_INTER);
4543 %}
4545 operand a1_RegN() %{
4546 constraint(ALLOC_IN_RC(a1_reg));
4547 match(RegN);
4548 match(mRegN);
4550 format %{ %}
4551 interface(REG_INTER);
4552 %}
4554 operand a2_RegN() %{
4555 constraint(ALLOC_IN_RC(a2_reg));
4556 match(RegN);
4557 match(mRegN);
4559 format %{ %}
4560 interface(REG_INTER);
4561 %}
4563 operand a3_RegN() %{
4564 constraint(ALLOC_IN_RC(a3_reg));
4565 match(RegN);
4566 match(mRegN);
4568 format %{ %}
4569 interface(REG_INTER);
4570 %}
4572 operand a4_RegN() %{
4573 constraint(ALLOC_IN_RC(a4_reg));
4574 match(RegN);
4575 match(mRegN);
4577 format %{ %}
4578 interface(REG_INTER);
4579 %}
4581 operand a5_RegN() %{
4582 constraint(ALLOC_IN_RC(a5_reg));
4583 match(RegN);
4584 match(mRegN);
4586 format %{ %}
4587 interface(REG_INTER);
4588 %}
4590 operand a6_RegN() %{
4591 constraint(ALLOC_IN_RC(a6_reg));
4592 match(RegN);
4593 match(mRegN);
4595 format %{ %}
4596 interface(REG_INTER);
4597 %}
4599 operand a7_RegN() %{
4600 constraint(ALLOC_IN_RC(a7_reg));
4601 match(RegN);
4602 match(mRegN);
4604 format %{ %}
4605 interface(REG_INTER);
4606 %}
4608 operand s0_RegN() %{
4609 constraint(ALLOC_IN_RC(s0_reg));
4610 match(RegN);
4611 match(mRegN);
4613 format %{ %}
4614 interface(REG_INTER);
4615 %}
4617 operand s1_RegN() %{
4618 constraint(ALLOC_IN_RC(s1_reg));
4619 match(RegN);
4620 match(mRegN);
4622 format %{ %}
4623 interface(REG_INTER);
4624 %}
4626 operand s2_RegN() %{
4627 constraint(ALLOC_IN_RC(s2_reg));
4628 match(RegN);
4629 match(mRegN);
4631 format %{ %}
4632 interface(REG_INTER);
4633 %}
4635 operand s3_RegN() %{
4636 constraint(ALLOC_IN_RC(s3_reg));
4637 match(RegN);
4638 match(mRegN);
4640 format %{ %}
4641 interface(REG_INTER);
4642 %}
4644 operand s4_RegN() %{
4645 constraint(ALLOC_IN_RC(s4_reg));
4646 match(RegN);
4647 match(mRegN);
4649 format %{ %}
4650 interface(REG_INTER);
4651 %}
4653 operand s5_RegN() %{
4654 constraint(ALLOC_IN_RC(s5_reg));
4655 match(RegN);
4656 match(mRegN);
4658 format %{ %}
4659 interface(REG_INTER);
4660 %}
4662 operand s6_RegN() %{
4663 constraint(ALLOC_IN_RC(s6_reg));
4664 match(RegN);
4665 match(mRegN);
4667 format %{ %}
4668 interface(REG_INTER);
4669 %}
4671 operand s7_RegN() %{
4672 constraint(ALLOC_IN_RC(s7_reg));
4673 match(RegN);
4674 match(mRegN);
4676 format %{ %}
4677 interface(REG_INTER);
4678 %}
4680 operand v0_RegN() %{
4681 constraint(ALLOC_IN_RC(v0_reg));
4682 match(RegN);
4683 match(mRegN);
4685 format %{ %}
4686 interface(REG_INTER);
4687 %}
4689 operand v1_RegN() %{
4690 constraint(ALLOC_IN_RC(v1_reg));
4691 match(RegN);
4692 match(mRegN);
4694 format %{ %}
4695 interface(REG_INTER);
4696 %}
4698 // Pointer Register
4699 operand mRegP() %{
4700 constraint(ALLOC_IN_RC(p_reg));
4701 match(RegP);
4703 format %{ %}
4704 interface(REG_INTER);
4705 %}
4707 operand no_T8_mRegP() %{
4708 constraint(ALLOC_IN_RC(no_T8_p_reg));
4709 match(RegP);
4710 match(mRegP);
4712 format %{ %}
4713 interface(REG_INTER);
4714 %}
4716 operand s0_RegP()
4717 %{
4718 constraint(ALLOC_IN_RC(s0_long_reg));
4719 match(RegP);
4720 match(mRegP);
4721 match(no_T8_mRegP);
4723 format %{ %}
4724 interface(REG_INTER);
4725 %}
4727 operand s1_RegP()
4728 %{
4729 constraint(ALLOC_IN_RC(s1_long_reg));
4730 match(RegP);
4731 match(mRegP);
4732 match(no_T8_mRegP);
4734 format %{ %}
4735 interface(REG_INTER);
4736 %}
4738 operand s2_RegP()
4739 %{
4740 constraint(ALLOC_IN_RC(s2_long_reg));
4741 match(RegP);
4742 match(mRegP);
4743 match(no_T8_mRegP);
4745 format %{ %}
4746 interface(REG_INTER);
4747 %}
4749 operand s3_RegP()
4750 %{
4751 constraint(ALLOC_IN_RC(s3_long_reg));
4752 match(RegP);
4753 match(mRegP);
4754 match(no_T8_mRegP);
4756 format %{ %}
4757 interface(REG_INTER);
4758 %}
4760 operand s4_RegP()
4761 %{
4762 constraint(ALLOC_IN_RC(s4_long_reg));
4763 match(RegP);
4764 match(mRegP);
4765 match(no_T8_mRegP);
4767 format %{ %}
4768 interface(REG_INTER);
4769 %}
4771 operand s5_RegP()
4772 %{
4773 constraint(ALLOC_IN_RC(s5_long_reg));
4774 match(RegP);
4775 match(mRegP);
4776 match(no_T8_mRegP);
4778 format %{ %}
4779 interface(REG_INTER);
4780 %}
4782 operand s6_RegP()
4783 %{
4784 constraint(ALLOC_IN_RC(s6_long_reg));
4785 match(RegP);
4786 match(mRegP);
4787 match(no_T8_mRegP);
4789 format %{ %}
4790 interface(REG_INTER);
4791 %}
4793 operand s7_RegP()
4794 %{
4795 constraint(ALLOC_IN_RC(s7_long_reg));
4796 match(RegP);
4797 match(mRegP);
4798 match(no_T8_mRegP);
4800 format %{ %}
4801 interface(REG_INTER);
4802 %}
4804 operand t0_RegP()
4805 %{
4806 constraint(ALLOC_IN_RC(t0_long_reg));
4807 match(RegP);
4808 match(mRegP);
4809 match(no_T8_mRegP);
4811 format %{ %}
4812 interface(REG_INTER);
4813 %}
4815 operand t1_RegP()
4816 %{
4817 constraint(ALLOC_IN_RC(t1_long_reg));
4818 match(RegP);
4819 match(mRegP);
4820 match(no_T8_mRegP);
4822 format %{ %}
4823 interface(REG_INTER);
4824 %}
4826 operand t2_RegP()
4827 %{
4828 constraint(ALLOC_IN_RC(t2_long_reg));
4829 match(RegP);
4830 match(mRegP);
4831 match(no_T8_mRegP);
4833 format %{ %}
4834 interface(REG_INTER);
4835 %}
4837 operand t3_RegP()
4838 %{
4839 constraint(ALLOC_IN_RC(t3_long_reg));
4840 match(RegP);
4841 match(mRegP);
4842 match(no_T8_mRegP);
4844 format %{ %}
4845 interface(REG_INTER);
4846 %}
4848 operand t8_RegP()
4849 %{
4850 constraint(ALLOC_IN_RC(t8_long_reg));
4851 match(RegP);
4852 match(mRegP);
4854 format %{ %}
4855 interface(REG_INTER);
4856 %}
4858 operand t9_RegP()
4859 %{
4860 constraint(ALLOC_IN_RC(t9_long_reg));
4861 match(RegP);
4862 match(mRegP);
4863 match(no_T8_mRegP);
4865 format %{ %}
4866 interface(REG_INTER);
4867 %}
4869 operand a0_RegP()
4870 %{
4871 constraint(ALLOC_IN_RC(a0_long_reg));
4872 match(RegP);
4873 match(mRegP);
4874 match(no_T8_mRegP);
4876 format %{ %}
4877 interface(REG_INTER);
4878 %}
4880 operand a1_RegP()
4881 %{
4882 constraint(ALLOC_IN_RC(a1_long_reg));
4883 match(RegP);
4884 match(mRegP);
4885 match(no_T8_mRegP);
4887 format %{ %}
4888 interface(REG_INTER);
4889 %}
4891 operand a2_RegP()
4892 %{
4893 constraint(ALLOC_IN_RC(a2_long_reg));
4894 match(RegP);
4895 match(mRegP);
4896 match(no_T8_mRegP);
4898 format %{ %}
4899 interface(REG_INTER);
4900 %}
4902 operand a3_RegP()
4903 %{
4904 constraint(ALLOC_IN_RC(a3_long_reg));
4905 match(RegP);
4906 match(mRegP);
4907 match(no_T8_mRegP);
4909 format %{ %}
4910 interface(REG_INTER);
4911 %}
4913 operand a4_RegP()
4914 %{
4915 constraint(ALLOC_IN_RC(a4_long_reg));
4916 match(RegP);
4917 match(mRegP);
4918 match(no_T8_mRegP);
4920 format %{ %}
4921 interface(REG_INTER);
4922 %}
4925 operand a5_RegP()
4926 %{
4927 constraint(ALLOC_IN_RC(a5_long_reg));
4928 match(RegP);
4929 match(mRegP);
4930 match(no_T8_mRegP);
4932 format %{ %}
4933 interface(REG_INTER);
4934 %}
4936 operand a6_RegP()
4937 %{
4938 constraint(ALLOC_IN_RC(a6_long_reg));
4939 match(RegP);
4940 match(mRegP);
4941 match(no_T8_mRegP);
4943 format %{ %}
4944 interface(REG_INTER);
4945 %}
4947 operand a7_RegP()
4948 %{
4949 constraint(ALLOC_IN_RC(a7_long_reg));
4950 match(RegP);
4951 match(mRegP);
4952 match(no_T8_mRegP);
4954 format %{ %}
4955 interface(REG_INTER);
4956 %}
4958 operand v0_RegP()
4959 %{
4960 constraint(ALLOC_IN_RC(v0_long_reg));
4961 match(RegP);
4962 match(mRegP);
4963 match(no_T8_mRegP);
4965 format %{ %}
4966 interface(REG_INTER);
4967 %}
4969 operand v1_RegP()
4970 %{
4971 constraint(ALLOC_IN_RC(v1_long_reg));
4972 match(RegP);
4973 match(mRegP);
4974 match(no_T8_mRegP);
4976 format %{ %}
4977 interface(REG_INTER);
4978 %}
4980 /*
4981 operand mSPRegP(mRegP reg) %{
4982 constraint(ALLOC_IN_RC(sp_reg));
4983 match(reg);
4985 format %{ "SP" %}
4986 interface(REG_INTER);
4987 %}
4989 operand mFPRegP(mRegP reg) %{
4990 constraint(ALLOC_IN_RC(fp_reg));
4991 match(reg);
4993 format %{ "FP" %}
4994 interface(REG_INTER);
4995 %}
4996 */
4998 operand mRegL() %{
4999 constraint(ALLOC_IN_RC(long_reg));
5000 match(RegL);
5002 format %{ %}
5003 interface(REG_INTER);
5004 %}
5006 operand v0RegL() %{
5007 constraint(ALLOC_IN_RC(v0_long_reg));
5008 match(RegL);
5009 match(mRegL);
5011 format %{ %}
5012 interface(REG_INTER);
5013 %}
5015 operand v1RegL() %{
5016 constraint(ALLOC_IN_RC(v1_long_reg));
5017 match(RegL);
5018 match(mRegL);
5020 format %{ %}
5021 interface(REG_INTER);
5022 %}
5024 operand a0RegL() %{
5025 constraint(ALLOC_IN_RC(a0_long_reg));
5026 match(RegL);
5027 match(mRegL);
5029 format %{ "A0" %}
5030 interface(REG_INTER);
5031 %}
5033 operand a1RegL() %{
5034 constraint(ALLOC_IN_RC(a1_long_reg));
5035 match(RegL);
5036 match(mRegL);
5038 format %{ %}
5039 interface(REG_INTER);
5040 %}
5042 operand a2RegL() %{
5043 constraint(ALLOC_IN_RC(a2_long_reg));
5044 match(RegL);
5045 match(mRegL);
5047 format %{ %}
5048 interface(REG_INTER);
5049 %}
5051 operand a3RegL() %{
5052 constraint(ALLOC_IN_RC(a3_long_reg));
5053 match(RegL);
5054 match(mRegL);
5056 format %{ %}
5057 interface(REG_INTER);
5058 %}
5060 operand t0RegL() %{
5061 constraint(ALLOC_IN_RC(t0_long_reg));
5062 match(RegL);
5063 match(mRegL);
5065 format %{ %}
5066 interface(REG_INTER);
5067 %}
5069 operand t1RegL() %{
5070 constraint(ALLOC_IN_RC(t1_long_reg));
5071 match(RegL);
5072 match(mRegL);
5074 format %{ %}
5075 interface(REG_INTER);
5076 %}
5078 operand t2RegL() %{
5079 constraint(ALLOC_IN_RC(t2_long_reg));
5080 match(RegL);
5081 match(mRegL);
5083 format %{ %}
5084 interface(REG_INTER);
5085 %}
5087 operand t3RegL() %{
5088 constraint(ALLOC_IN_RC(t3_long_reg));
5089 match(RegL);
5090 match(mRegL);
5092 format %{ %}
5093 interface(REG_INTER);
5094 %}
5096 operand t8RegL() %{
5097 constraint(ALLOC_IN_RC(t8_long_reg));
5098 match(RegL);
5099 match(mRegL);
5101 format %{ %}
5102 interface(REG_INTER);
5103 %}
5105 operand a4RegL() %{
5106 constraint(ALLOC_IN_RC(a4_long_reg));
5107 match(RegL);
5108 match(mRegL);
5110 format %{ %}
5111 interface(REG_INTER);
5112 %}
5114 operand a5RegL() %{
5115 constraint(ALLOC_IN_RC(a5_long_reg));
5116 match(RegL);
5117 match(mRegL);
5119 format %{ %}
5120 interface(REG_INTER);
5121 %}
5123 operand a6RegL() %{
5124 constraint(ALLOC_IN_RC(a6_long_reg));
5125 match(RegL);
5126 match(mRegL);
5128 format %{ %}
5129 interface(REG_INTER);
5130 %}
5132 operand a7RegL() %{
5133 constraint(ALLOC_IN_RC(a7_long_reg));
5134 match(RegL);
5135 match(mRegL);
5137 format %{ %}
5138 interface(REG_INTER);
5139 %}
5141 operand s0RegL() %{
5142 constraint(ALLOC_IN_RC(s0_long_reg));
5143 match(RegL);
5144 match(mRegL);
5146 format %{ %}
5147 interface(REG_INTER);
5148 %}
5150 operand s1RegL() %{
5151 constraint(ALLOC_IN_RC(s1_long_reg));
5152 match(RegL);
5153 match(mRegL);
5155 format %{ %}
5156 interface(REG_INTER);
5157 %}
5159 operand s2RegL() %{
5160 constraint(ALLOC_IN_RC(s2_long_reg));
5161 match(RegL);
5162 match(mRegL);
5164 format %{ %}
5165 interface(REG_INTER);
5166 %}
5168 operand s3RegL() %{
5169 constraint(ALLOC_IN_RC(s3_long_reg));
5170 match(RegL);
5171 match(mRegL);
5173 format %{ %}
5174 interface(REG_INTER);
5175 %}
5177 operand s4RegL() %{
5178 constraint(ALLOC_IN_RC(s4_long_reg));
5179 match(RegL);
5180 match(mRegL);
5182 format %{ %}
5183 interface(REG_INTER);
5184 %}
5186 operand s7RegL() %{
5187 constraint(ALLOC_IN_RC(s7_long_reg));
5188 match(RegL);
5189 match(mRegL);
5191 format %{ %}
5192 interface(REG_INTER);
5193 %}
5195 // Floating register operands
5196 operand regF() %{
5197 constraint(ALLOC_IN_RC(flt_reg));
5198 match(RegF);
5200 format %{ %}
5201 interface(REG_INTER);
5202 %}
5204 //Double Precision Floating register operands
5205 operand regD() %{
5206 constraint(ALLOC_IN_RC(dbl_reg));
5207 match(RegD);
5209 format %{ %}
5210 interface(REG_INTER);
5211 %}
5213 //----------Memory Operands----------------------------------------------------
5214 // Indirect Memory Operand
5215 operand indirect(mRegP reg) %{
5216 constraint(ALLOC_IN_RC(p_reg));
5217 match(reg);
5219 format %{ "[$reg] @ indirect" %}
5220 interface(MEMORY_INTER) %{
5221 base($reg);
5222 index(0x0); /* NO_INDEX */
5223 scale(0x0);
5224 disp(0x0);
5225 %}
5226 %}
5228 // Indirect Memory Plus Short Offset Operand
5229 operand indOffset8(mRegP reg, immL8 off)
5230 %{
5231 constraint(ALLOC_IN_RC(p_reg));
5232 match(AddP reg off);
5234 format %{ "[$reg + $off (8-bit)] @ indOffset8" %}
5235 interface(MEMORY_INTER) %{
5236 base($reg);
5237 index(0x0); /* NO_INDEX */
5238 scale(0x0);
5239 disp($off);
5240 %}
5241 %}
5243 // Indirect Memory Times Scale Plus Index Register
5244 operand indIndexScale(mRegP reg, mRegL lreg, immI2 scale)
5245 %{
5246 constraint(ALLOC_IN_RC(p_reg));
5247 match(AddP reg (LShiftL lreg scale));
5249 op_cost(10);
5250 format %{"[$reg + $lreg << $scale] @ indIndexScale" %}
5251 interface(MEMORY_INTER) %{
5252 base($reg);
5253 index($lreg);
5254 scale($scale);
5255 disp(0x0);
5256 %}
5257 %}
5260 // [base + index + offset]
5261 operand baseIndexOffset8(mRegP base, mRegL index, immL8 off)
5262 %{
5263 constraint(ALLOC_IN_RC(p_reg));
5264 op_cost(5);
5265 match(AddP (AddP base index) off);
5267 format %{ "[$base + $index + $off (8-bit)] @ baseIndexOffset8" %}
5268 interface(MEMORY_INTER) %{
5269 base($base);
5270 index($index);
5271 scale(0x0);
5272 disp($off);
5273 %}
5274 %}
5276 // [base + index + offset]
5277 operand baseIndexOffset8_convI2L(mRegP base, mRegI index, immL8 off)
5278 %{
5279 constraint(ALLOC_IN_RC(p_reg));
5280 op_cost(5);
5281 match(AddP (AddP base (ConvI2L index)) off);
5283 format %{ "[$base + $index + $off (8-bit)] @ baseIndexOffset8_convI2L" %}
5284 interface(MEMORY_INTER) %{
5285 base($base);
5286 index($index);
5287 scale(0x0);
5288 disp($off);
5289 %}
5290 %}
5292 // Indirect Memory Times Scale Plus Index Register Plus Offset Operand
5293 operand indIndexScaleOffset8(mRegP reg, immL8 off, mRegL lreg, immI2 scale)
5294 %{
5295 constraint(ALLOC_IN_RC(p_reg));
5296 match(AddP (AddP reg (LShiftL lreg scale)) off);
5298 op_cost(10);
5299 format %{"[$reg + $off + $lreg << $scale] @ indIndexScaleOffset8" %}
5300 interface(MEMORY_INTER) %{
5301 base($reg);
5302 index($lreg);
5303 scale($scale);
5304 disp($off);
5305 %}
5306 %}
5308 operand indIndexScaleOffset8_convI2L(mRegP reg, immL8 off, mRegI ireg, immI2 scale)
5309 %{
5310 constraint(ALLOC_IN_RC(p_reg));
5311 match(AddP (AddP reg (LShiftL (ConvI2L ireg) scale)) off);
5313 op_cost(10);
5314 format %{"[$reg + $off + $ireg << $scale] @ indIndexScaleOffset8_convI2L" %}
5315 interface(MEMORY_INTER) %{
5316 base($reg);
5317 index($ireg);
5318 scale($scale);
5319 disp($off);
5320 %}
5321 %}
5323 // [base + index<<scale + offset]
5324 operand basePosIndexScaleOffset8(mRegP base, mRegI index, immL8 off, immI_0_31 scale)
5325 %{
5326 constraint(ALLOC_IN_RC(p_reg));
5327 //predicate(n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
5328 op_cost(10);
5329 match(AddP (AddP base (LShiftL (ConvI2L index) scale)) off);
5331 format %{ "[$base + $index << $scale + $off (8-bit)] @ basePosIndexScaleOffset8" %}
5332 interface(MEMORY_INTER) %{
5333 base($base);
5334 index($index);
5335 scale($scale);
5336 disp($off);
5337 %}
5338 %}
5340 // Indirect Memory Times Scale Plus Index Register Plus Offset Operand
5341 operand indIndexScaleOffsetNarrow(mRegN reg, immL8 off, mRegL lreg, immI2 scale)
5342 %{
5343 predicate(Universe::narrow_oop_shift() == 0);
5344 constraint(ALLOC_IN_RC(p_reg));
5345 match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
5347 op_cost(10);
5348 format %{"[$reg + $off + $lreg << $scale] @ indIndexScaleOffsetNarrow" %}
5349 interface(MEMORY_INTER) %{
5350 base($reg);
5351 index($lreg);
5352 scale($scale);
5353 disp($off);
5354 %}
5355 %}
5357 // [base + index<<scale + offset] for compressd Oops
5358 operand indPosIndexI2LScaleOffset8Narrow(mRegN base, mRegI index, immL8 off, immI_0_31 scale)
5359 %{
5360 constraint(ALLOC_IN_RC(p_reg));
5361 //predicate(Universe::narrow_oop_shift() == 0 && n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
5362 predicate(Universe::narrow_oop_shift() == 0);
5363 op_cost(10);
5364 match(AddP (AddP (DecodeN base) (LShiftL (ConvI2L index) scale)) off);
5366 format %{ "[$base + $index << $scale + $off (8-bit)] @ indPosIndexI2LScaleOffset8Narrow" %}
5367 interface(MEMORY_INTER) %{
5368 base($base);
5369 index($index);
5370 scale($scale);
5371 disp($off);
5372 %}
5373 %}
5375 //FIXME: I think it's better to limit the immI to be 16-bit at most!
5376 // Indirect Memory Plus Long Offset Operand
5377 operand indOffset32(mRegP reg, immL32 off) %{
5378 constraint(ALLOC_IN_RC(p_reg));
5379 op_cost(20);
5380 match(AddP reg off);
5382 format %{ "[$reg + $off (32-bit)] @ indOffset32" %}
5383 interface(MEMORY_INTER) %{
5384 base($reg);
5385 index(0x0); /* NO_INDEX */
5386 scale(0x0);
5387 disp($off);
5388 %}
5389 %}
5391 // Indirect Memory Plus Index Register
5392 operand indIndex(mRegP addr, mRegL index) %{
5393 constraint(ALLOC_IN_RC(p_reg));
5394 match(AddP addr index);
5396 op_cost(20);
5397 format %{"[$addr + $index] @ indIndex" %}
5398 interface(MEMORY_INTER) %{
5399 base($addr);
5400 index($index);
5401 scale(0x0);
5402 disp(0x0);
5403 %}
5404 %}
5406 operand indirectNarrowKlass(mRegN reg)
5407 %{
5408 predicate(Universe::narrow_klass_shift() == 0);
5409 constraint(ALLOC_IN_RC(p_reg));
5410 op_cost(10);
5411 match(DecodeNKlass reg);
5413 format %{ "[$reg] @ indirectNarrowKlass" %}
5414 interface(MEMORY_INTER) %{
5415 base($reg);
5416 index(0x0);
5417 scale(0x0);
5418 disp(0x0);
5419 %}
5420 %}
5422 operand indOffset8NarrowKlass(mRegN reg, immL8 off)
5423 %{
5424 predicate(Universe::narrow_klass_shift() == 0);
5425 constraint(ALLOC_IN_RC(p_reg));
5426 op_cost(10);
5427 match(AddP (DecodeNKlass reg) off);
5429 format %{ "[$reg + $off (8-bit)] @ indOffset8NarrowKlass" %}
5430 interface(MEMORY_INTER) %{
5431 base($reg);
5432 index(0x0);
5433 scale(0x0);
5434 disp($off);
5435 %}
5436 %}
5438 operand indOffset32NarrowKlass(mRegN reg, immL32 off)
5439 %{
5440 predicate(Universe::narrow_klass_shift() == 0);
5441 constraint(ALLOC_IN_RC(p_reg));
5442 op_cost(10);
5443 match(AddP (DecodeNKlass reg) off);
5445 format %{ "[$reg + $off (32-bit)] @ indOffset32NarrowKlass" %}
5446 interface(MEMORY_INTER) %{
5447 base($reg);
5448 index(0x0);
5449 scale(0x0);
5450 disp($off);
5451 %}
5452 %}
5454 operand indIndexOffsetNarrowKlass(mRegN reg, mRegL lreg, immL32 off)
5455 %{
5456 predicate(Universe::narrow_klass_shift() == 0);
5457 constraint(ALLOC_IN_RC(p_reg));
5458 match(AddP (AddP (DecodeNKlass reg) lreg) off);
5460 op_cost(10);
5461 format %{"[$reg + $off + $lreg] @ indIndexOffsetNarrowKlass" %}
5462 interface(MEMORY_INTER) %{
5463 base($reg);
5464 index($lreg);
5465 scale(0x0);
5466 disp($off);
5467 %}
5468 %}
5470 operand indIndexNarrowKlass(mRegN reg, mRegL lreg)
5471 %{
5472 predicate(Universe::narrow_klass_shift() == 0);
5473 constraint(ALLOC_IN_RC(p_reg));
5474 match(AddP (DecodeNKlass reg) lreg);
5476 op_cost(10);
5477 format %{"[$reg + $lreg] @ indIndexNarrowKlass" %}
5478 interface(MEMORY_INTER) %{
5479 base($reg);
5480 index($lreg);
5481 scale(0x0);
5482 disp(0x0);
5483 %}
5484 %}
5486 // Indirect Memory Operand
5487 operand indirectNarrow(mRegN reg)
5488 %{
5489 predicate(Universe::narrow_oop_shift() == 0);
5490 constraint(ALLOC_IN_RC(p_reg));
5491 op_cost(10);
5492 match(DecodeN reg);
5494 format %{ "[$reg] @ indirectNarrow" %}
5495 interface(MEMORY_INTER) %{
5496 base($reg);
5497 index(0x0);
5498 scale(0x0);
5499 disp(0x0);
5500 %}
5501 %}
5503 // Indirect Memory Plus Short Offset Operand
5504 operand indOffset8Narrow(mRegN reg, immL8 off)
5505 %{
5506 predicate(Universe::narrow_oop_shift() == 0);
5507 constraint(ALLOC_IN_RC(p_reg));
5508 op_cost(10);
5509 match(AddP (DecodeN reg) off);
5511 format %{ "[$reg + $off (8-bit)] @ indOffset8Narrow" %}
5512 interface(MEMORY_INTER) %{
5513 base($reg);
5514 index(0x0);
5515 scale(0x0);
5516 disp($off);
5517 %}
5518 %}
5520 // Indirect Memory Plus Index Register Plus Offset Operand
5521 operand indIndexOffset8Narrow(mRegN reg, mRegL lreg, immL8 off)
5522 %{
5523 predicate(Universe::narrow_oop_shift() == 0);
5524 constraint(ALLOC_IN_RC(p_reg));
5525 match(AddP (AddP (DecodeN reg) lreg) off);
5527 op_cost(10);
5528 format %{"[$reg + $off + $lreg] @ indIndexOffset8Narrow" %}
5529 interface(MEMORY_INTER) %{
5530 base($reg);
5531 index($lreg);
5532 scale(0x0);
5533 disp($off);
5534 %}
5535 %}
5537 //----------Load Long Memory Operands------------------------------------------
5538 // The load-long idiom will use it's address expression again after loading
5539 // the first word of the long. If the load-long destination overlaps with
5540 // registers used in the addressing expression, the 2nd half will be loaded
5541 // from a clobbered address. Fix this by requiring that load-long use
5542 // address registers that do not overlap with the load-long target.
5544 // load-long support
5545 operand load_long_RegP() %{
5546 constraint(ALLOC_IN_RC(p_reg));
5547 match(RegP);
5548 match(mRegP);
5549 op_cost(100);
5550 format %{ %}
5551 interface(REG_INTER);
5552 %}
5554 // Indirect Memory Operand Long
5555 operand load_long_indirect(load_long_RegP reg) %{
5556 constraint(ALLOC_IN_RC(p_reg));
5557 match(reg);
5559 format %{ "[$reg]" %}
5560 interface(MEMORY_INTER) %{
5561 base($reg);
5562 index(0x0);
5563 scale(0x0);
5564 disp(0x0);
5565 %}
5566 %}
5568 // Indirect Memory Plus Long Offset Operand
5569 operand load_long_indOffset32(load_long_RegP reg, immL32 off) %{
5570 match(AddP reg off);
5572 format %{ "[$reg + $off]" %}
5573 interface(MEMORY_INTER) %{
5574 base($reg);
5575 index(0x0);
5576 scale(0x0);
5577 disp($off);
5578 %}
5579 %}
5581 //----------Conditional Branch Operands----------------------------------------
5582 // Comparison Op - This is the operation of the comparison, and is limited to
5583 // the following set of codes:
5584 // L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
5585 //
5586 // Other attributes of the comparison, such as unsignedness, are specified
5587 // by the comparison instruction that sets a condition code flags register.
5588 // That result is represented by a flags operand whose subtype is appropriate
5589 // to the unsignedness (etc.) of the comparison.
5590 //
5591 // Later, the instruction which matches both the Comparison Op (a Bool) and
5592 // the flags (produced by the Cmp) specifies the coding of the comparison op
5593 // by matching a specific subtype of Bool operand below, such as cmpOpU.
5595 // Comparision Code
5596 operand cmpOp() %{
5597 match(Bool);
5599 format %{ "" %}
5600 interface(COND_INTER) %{
5601 equal(0x01);
5602 not_equal(0x02);
5603 greater(0x03);
5604 greater_equal(0x04);
5605 less(0x05);
5606 less_equal(0x06);
5607 overflow(0x7);
5608 no_overflow(0x8);
5609 %}
5610 %}
5613 // Comparision Code
5614 // Comparison Code, unsigned compare. Used by FP also, with
5615 // C2 (unordered) turned into GT or LT already. The other bits
5616 // C0 and C3 are turned into Carry & Zero flags.
5617 operand cmpOpU() %{
5618 match(Bool);
5620 format %{ "" %}
5621 interface(COND_INTER) %{
5622 equal(0x01);
5623 not_equal(0x02);
5624 greater(0x03);
5625 greater_equal(0x04);
5626 less(0x05);
5627 less_equal(0x06);
5628 overflow(0x7);
5629 no_overflow(0x8);
5630 %}
5631 %}
5633 /*
5634 // Comparison Code, unsigned compare. Used by FP also, with
5635 // C2 (unordered) turned into GT or LT already. The other bits
5636 // C0 and C3 are turned into Carry & Zero flags.
5637 operand cmpOpU() %{
5638 match(Bool);
5640 format %{ "" %}
5641 interface(COND_INTER) %{
5642 equal(0x4);
5643 not_equal(0x5);
5644 less(0x2);
5645 greater_equal(0x3);
5646 less_equal(0x6);
5647 greater(0x7);
5648 %}
5649 %}
5650 */
5651 /*
5652 // Comparison Code for FP conditional move
5653 operand cmpOp_fcmov() %{
5654 match(Bool);
5656 format %{ "" %}
5657 interface(COND_INTER) %{
5658 equal (0x01);
5659 not_equal (0x02);
5660 greater (0x03);
5661 greater_equal(0x04);
5662 less (0x05);
5663 less_equal (0x06);
5664 %}
5665 %}
5667 // Comparision Code used in long compares
5668 operand cmpOp_commute() %{
5669 match(Bool);
5671 format %{ "" %}
5672 interface(COND_INTER) %{
5673 equal(0x4);
5674 not_equal(0x5);
5675 less(0xF);
5676 greater_equal(0xE);
5677 less_equal(0xD);
5678 greater(0xC);
5679 %}
5680 %}
5681 */
5683 //----------Special Memory Operands--------------------------------------------
5684 // Stack Slot Operand - This operand is used for loading and storing temporary
5685 // values on the stack where a match requires a value to
5686 // flow through memory.
5687 operand stackSlotP(sRegP reg) %{
5688 constraint(ALLOC_IN_RC(stack_slots));
5689 // No match rule because this operand is only generated in matching
5690 op_cost(50);
5691 format %{ "[$reg]" %}
5692 interface(MEMORY_INTER) %{
5693 base(0x1d); // SP
5694 index(0x0); // No Index
5695 scale(0x0); // No Scale
5696 disp($reg); // Stack Offset
5697 %}
5698 %}
5700 operand stackSlotI(sRegI reg) %{
5701 constraint(ALLOC_IN_RC(stack_slots));
5702 // No match rule because this operand is only generated in matching
5703 op_cost(50);
5704 format %{ "[$reg]" %}
5705 interface(MEMORY_INTER) %{
5706 base(0x1d); // SP
5707 index(0x0); // No Index
5708 scale(0x0); // No Scale
5709 disp($reg); // Stack Offset
5710 %}
5711 %}
5713 operand stackSlotF(sRegF reg) %{
5714 constraint(ALLOC_IN_RC(stack_slots));
5715 // No match rule because this operand is only generated in matching
5716 op_cost(50);
5717 format %{ "[$reg]" %}
5718 interface(MEMORY_INTER) %{
5719 base(0x1d); // SP
5720 index(0x0); // No Index
5721 scale(0x0); // No Scale
5722 disp($reg); // Stack Offset
5723 %}
5724 %}
5726 operand stackSlotD(sRegD reg) %{
5727 constraint(ALLOC_IN_RC(stack_slots));
5728 // No match rule because this operand is only generated in matching
5729 op_cost(50);
5730 format %{ "[$reg]" %}
5731 interface(MEMORY_INTER) %{
5732 base(0x1d); // SP
5733 index(0x0); // No Index
5734 scale(0x0); // No Scale
5735 disp($reg); // Stack Offset
5736 %}
5737 %}
5739 operand stackSlotL(sRegL reg) %{
5740 constraint(ALLOC_IN_RC(stack_slots));
5741 // No match rule because this operand is only generated in matching
5742 op_cost(50);
5743 format %{ "[$reg]" %}
5744 interface(MEMORY_INTER) %{
5745 base(0x1d); // SP
5746 index(0x0); // No Index
5747 scale(0x0); // No Scale
5748 disp($reg); // Stack Offset
5749 %}
5750 %}
5753 //------------------------OPERAND CLASSES--------------------------------------
5754 //opclass memory( direct, indirect, indOffset16, indOffset32, indOffset32X, indIndexOffset );
5755 opclass memory( indirect, indirectNarrow, indOffset8, indOffset32, indIndex, indIndexScale, load_long_indirect, load_long_indOffset32, baseIndexOffset8, baseIndexOffset8_convI2L, indIndexScaleOffset8, indIndexScaleOffset8_convI2L, basePosIndexScaleOffset8, indIndexScaleOffsetNarrow, indPosIndexI2LScaleOffset8Narrow, indOffset8Narrow, indIndexOffset8Narrow);
5758 //----------PIPELINE-----------------------------------------------------------
5759 // Rules which define the behavior of the target architectures pipeline.
5761 pipeline %{
5763 //----------ATTRIBUTES---------------------------------------------------------
5764 attributes %{
5765 fixed_size_instructions; // Fixed size instructions
5766 branch_has_delay_slot; // branch have delay slot in gs2
5767 max_instructions_per_bundle = 1; // 1 instruction per bundle
5768 max_bundles_per_cycle = 4; // Up to 4 bundles per cycle
5769 bundle_unit_size=4;
5770 instruction_unit_size = 4; // An instruction is 4 bytes long
5771 instruction_fetch_unit_size = 16; // The processor fetches one line
5772 instruction_fetch_units = 1; // of 16 bytes
5774 // List of nop instructions
5775 nops( MachNop );
5776 %}
5778 //----------RESOURCES----------------------------------------------------------
5779 // Resources are the functional units available to the machine
5781 resources(D1, D2, D3, D4, DECODE = D1 | D2 | D3| D4, ALU1, ALU2, ALU = ALU1 | ALU2, FPU1, FPU2, FPU = FPU1 | FPU2, MEM, BR);
5783 //----------PIPELINE DESCRIPTION-----------------------------------------------
5784 // Pipeline Description specifies the stages in the machine's pipeline
5786 // IF: fetch
5787 // ID: decode
5788 // RD: read
5789 // CA: caculate
5790 // WB: write back
5791 // CM: commit
5793 pipe_desc(IF, ID, RD, CA, WB, CM);
5796 //----------PIPELINE CLASSES---------------------------------------------------
5797 // Pipeline Classes describe the stages in which input and output are
5798 // referenced by the hardware pipeline.
5800 //No.1 Integer ALU reg-reg operation : dst <-- reg1 op reg2
5801 pipe_class ialu_regI_regI(mRegI dst, mRegI src1, mRegI src2) %{
5802 single_instruction;
5803 src1 : RD(read);
5804 src2 : RD(read);
5805 dst : WB(write)+1;
5806 DECODE : ID;
5807 ALU : CA;
5808 %}
5810 //No.19 Integer mult operation : dst <-- reg1 mult reg2
5811 pipe_class ialu_mult(mRegI dst, mRegI src1, mRegI src2) %{
5812 src1 : RD(read);
5813 src2 : RD(read);
5814 dst : WB(write)+5;
5815 DECODE : ID;
5816 ALU2 : CA;
5817 %}
5819 pipe_class mulL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
5820 src1 : RD(read);
5821 src2 : RD(read);
5822 dst : WB(write)+10;
5823 DECODE : ID;
5824 ALU2 : CA;
5825 %}
5827 //No.19 Integer div operation : dst <-- reg1 div reg2
5828 pipe_class ialu_div(mRegI dst, mRegI src1, mRegI src2) %{
5829 src1 : RD(read);
5830 src2 : RD(read);
5831 dst : WB(write)+10;
5832 DECODE : ID;
5833 ALU2 : CA;
5834 %}
5836 //No.19 Integer mod operation : dst <-- reg1 mod reg2
5837 pipe_class ialu_mod(mRegI dst, mRegI src1, mRegI src2) %{
5838 instruction_count(2);
5839 src1 : RD(read);
5840 src2 : RD(read);
5841 dst : WB(write)+10;
5842 DECODE : ID;
5843 ALU2 : CA;
5844 %}
5846 //No.15 Long ALU reg-reg operation : dst <-- reg1 op reg2
5847 pipe_class ialu_regL_regL(mRegL dst, mRegL src1, mRegL src2) %{
5848 instruction_count(2);
5849 src1 : RD(read);
5850 src2 : RD(read);
5851 dst : WB(write);
5852 DECODE : ID;
5853 ALU : CA;
5854 %}
5856 //No.18 Long ALU reg-imm16 operation : dst <-- reg1 op imm16
5857 pipe_class ialu_regL_imm16(mRegL dst, mRegL src) %{
5858 instruction_count(2);
5859 src : RD(read);
5860 dst : WB(write);
5861 DECODE : ID;
5862 ALU : CA;
5863 %}
5865 //no.16 load Long from memory :
5866 pipe_class ialu_loadL(mRegL dst, memory mem) %{
5867 instruction_count(2);
5868 mem : RD(read);
5869 dst : WB(write)+5;
5870 DECODE : ID;
5871 MEM : RD;
5872 %}
5874 //No.17 Store Long to Memory :
5875 pipe_class ialu_storeL(mRegL src, memory mem) %{
5876 instruction_count(2);
5877 mem : RD(read);
5878 src : RD(read);
5879 DECODE : ID;
5880 MEM : RD;
5881 %}
5883 //No.2 Integer ALU reg-imm16 operation : dst <-- reg1 op imm16
5884 pipe_class ialu_regI_imm16(mRegI dst, mRegI src) %{
5885 single_instruction;
5886 src : RD(read);
5887 dst : WB(write);
5888 DECODE : ID;
5889 ALU : CA;
5890 %}
5892 //No.3 Integer move operation : dst <-- reg
5893 pipe_class ialu_regI_mov(mRegI dst, mRegI src) %{
5894 src : RD(read);
5895 dst : WB(write);
5896 DECODE : ID;
5897 ALU : CA;
5898 %}
5900 //No.4 No instructions : do nothing
5901 pipe_class empty( ) %{
5902 instruction_count(0);
5903 %}
5905 //No.5 UnConditional branch :
5906 pipe_class pipe_jump( label labl ) %{
5907 multiple_bundles;
5908 DECODE : ID;
5909 BR : RD;
5910 %}
5912 //No.6 ALU Conditional branch :
5913 pipe_class pipe_alu_branch(mRegI src1, mRegI src2, label labl ) %{
5914 multiple_bundles;
5915 src1 : RD(read);
5916 src2 : RD(read);
5917 DECODE : ID;
5918 BR : RD;
5919 %}
5921 //no.7 load integer from memory :
5922 pipe_class ialu_loadI(mRegI dst, memory mem) %{
5923 mem : RD(read);
5924 dst : WB(write)+3;
5925 DECODE : ID;
5926 MEM : RD;
5927 %}
5929 //No.8 Store Integer to Memory :
5930 pipe_class ialu_storeI(mRegI src, memory mem) %{
5931 mem : RD(read);
5932 src : RD(read);
5933 DECODE : ID;
5934 MEM : RD;
5935 %}
5938 //No.10 Floating FPU reg-reg operation : dst <-- reg1 op reg2
5939 pipe_class fpu_regF_regF(regF dst, regF src1, regF src2) %{
5940 src1 : RD(read);
5941 src2 : RD(read);
5942 dst : WB(write);
5943 DECODE : ID;
5944 FPU : CA;
5945 %}
5947 //No.22 Floating div operation : dst <-- reg1 div reg2
5948 pipe_class fpu_div(regF dst, regF src1, regF src2) %{
5949 src1 : RD(read);
5950 src2 : RD(read);
5951 dst : WB(write);
5952 DECODE : ID;
5953 FPU2 : CA;
5954 %}
5956 pipe_class fcvt_I2D(regD dst, mRegI src) %{
5957 src : RD(read);
5958 dst : WB(write);
5959 DECODE : ID;
5960 FPU1 : CA;
5961 %}
5963 pipe_class fcvt_D2I(mRegI dst, regD src) %{
5964 src : RD(read);
5965 dst : WB(write);
5966 DECODE : ID;
5967 FPU1 : CA;
5968 %}
5970 pipe_class pipe_mfc1(mRegI dst, regD src) %{
5971 src : RD(read);
5972 dst : WB(write);
5973 DECODE : ID;
5974 MEM : RD;
5975 %}
5977 pipe_class pipe_mtc1(regD dst, mRegI src) %{
5978 src : RD(read);
5979 dst : WB(write);
5980 DECODE : ID;
5981 MEM : RD(5);
5982 %}
5984 //No.23 Floating sqrt operation : dst <-- reg1 sqrt reg2
5985 pipe_class fpu_sqrt(regF dst, regF src1, regF src2) %{
5986 multiple_bundles;
5987 src1 : RD(read);
5988 src2 : RD(read);
5989 dst : WB(write);
5990 DECODE : ID;
5991 FPU2 : CA;
5992 %}
5994 //No.11 Load Floating from Memory :
5995 pipe_class fpu_loadF(regF dst, memory mem) %{
5996 instruction_count(1);
5997 mem : RD(read);
5998 dst : WB(write)+3;
5999 DECODE : ID;
6000 MEM : RD;
6001 %}
6003 //No.12 Store Floating to Memory :
6004 pipe_class fpu_storeF(regF src, memory mem) %{
6005 instruction_count(1);
6006 mem : RD(read);
6007 src : RD(read);
6008 DECODE : ID;
6009 MEM : RD;
6010 %}
6012 //No.13 FPU Conditional branch :
6013 pipe_class pipe_fpu_branch(regF src1, regF src2, label labl ) %{
6014 multiple_bundles;
6015 src1 : RD(read);
6016 src2 : RD(read);
6017 DECODE : ID;
6018 BR : RD;
6019 %}
6021 //No.14 Floating FPU reg operation : dst <-- op reg
6022 pipe_class fpu1_regF(regF dst, regF src) %{
6023 src : RD(read);
6024 dst : WB(write);
6025 DECODE : ID;
6026 FPU : CA;
6027 %}
6029 pipe_class long_memory_op() %{
6030 instruction_count(10); multiple_bundles; force_serialization;
6031 fixed_latency(30);
6032 %}
6034 pipe_class simple_call() %{
6035 instruction_count(10); multiple_bundles; force_serialization;
6036 fixed_latency(200);
6037 BR : RD;
6038 %}
6040 pipe_class call() %{
6041 instruction_count(10); multiple_bundles; force_serialization;
6042 fixed_latency(200);
6043 %}
6045 //FIXME:
6046 //No.9 Piple slow : for multi-instructions
6047 pipe_class pipe_slow( ) %{
6048 instruction_count(20);
6049 force_serialization;
6050 multiple_bundles;
6051 fixed_latency(50);
6052 %}
6054 %}
6058 //----------INSTRUCTIONS-------------------------------------------------------
6059 //
6060 // match -- States which machine-independent subtree may be replaced
6061 // by this instruction.
6062 // ins_cost -- The estimated cost of this instruction is used by instruction
6063 // selection to identify a minimum cost tree of machine
6064 // instructions that matches a tree of machine-independent
6065 // instructions.
6066 // format -- A string providing the disassembly for this instruction.
6067 // The value of an instruction's operand may be inserted
6068 // by referring to it with a '$' prefix.
6069 // opcode -- Three instruction opcodes may be provided. These are referred
6070 // to within an encode class as $primary, $secondary, and $tertiary
6071 // respectively. The primary opcode is commonly used to
6072 // indicate the type of machine instruction, while secondary
6073 // and tertiary are often used for prefix options or addressing
6074 // modes.
6075 // ins_encode -- A list of encode classes with parameters. The encode class
6076 // name must have been defined in an 'enc_class' specification
6077 // in the encode section of the architecture description.
6080 // Load Integer
6081 instruct loadI(mRegI dst, memory mem) %{
6082 match(Set dst (LoadI mem));
6084 ins_cost(125);
6085 format %{ "lw $dst, $mem #@loadI" %}
6086 ins_encode (load_I_enc(dst, mem));
6087 ins_pipe( ialu_loadI );
6088 %}
6090 instruct loadI_convI2L(mRegL dst, memory mem) %{
6091 match(Set dst (ConvI2L (LoadI mem)));
6093 ins_cost(125);
6094 format %{ "lw $dst, $mem #@loadI_convI2L" %}
6095 ins_encode (load_I_enc(dst, mem));
6096 ins_pipe( ialu_loadI );
6097 %}
6099 // Load Integer (32 bit signed) to Byte (8 bit signed)
6100 instruct loadI2B(mRegI dst, memory mem, immI_24 twentyfour) %{
6101 match(Set dst (RShiftI (LShiftI (LoadI mem) twentyfour) twentyfour));
6103 ins_cost(125);
6104 format %{ "lb $dst, $mem\t# int -> byte #@loadI2B" %}
6105 ins_encode(load_B_enc(dst, mem));
6106 ins_pipe(ialu_loadI);
6107 %}
6109 // Load Integer (32 bit signed) to Unsigned Byte (8 bit UNsigned)
6110 instruct loadI2UB(mRegI dst, memory mem, immI_255 mask) %{
6111 match(Set dst (AndI (LoadI mem) mask));
6113 ins_cost(125);
6114 format %{ "lbu $dst, $mem\t# int -> ubyte #@loadI2UB" %}
6115 ins_encode(load_UB_enc(dst, mem));
6116 ins_pipe(ialu_loadI);
6117 %}
6119 // Load Integer (32 bit signed) to Short (16 bit signed)
6120 instruct loadI2S(mRegI dst, memory mem, immI_16 sixteen) %{
6121 match(Set dst (RShiftI (LShiftI (LoadI mem) sixteen) sixteen));
6123 ins_cost(125);
6124 format %{ "lh $dst, $mem\t# int -> short #@loadI2S" %}
6125 ins_encode(load_S_enc(dst, mem));
6126 ins_pipe(ialu_loadI);
6127 %}
6129 // Load Integer (32 bit signed) to Unsigned Short/Char (16 bit UNsigned)
6130 instruct loadI2US(mRegI dst, memory mem, immI_65535 mask) %{
6131 match(Set dst (AndI (LoadI mem) mask));
6133 ins_cost(125);
6134 format %{ "lhu $dst, $mem\t# int -> ushort/char #@loadI2US" %}
6135 ins_encode(load_C_enc(dst, mem));
6136 ins_pipe(ialu_loadI);
6137 %}
6139 // Load Long.
6140 instruct loadL(mRegL dst, memory mem) %{
6141 // predicate(!((LoadLNode*)n)->require_atomic_access());
6142 match(Set dst (LoadL mem));
6144 ins_cost(250);
6145 format %{ "ld $dst, $mem #@loadL" %}
6146 ins_encode(load_L_enc(dst, mem));
6147 ins_pipe( ialu_loadL );
6148 %}
6150 // Load Long - UNaligned
6151 instruct loadL_unaligned(mRegL dst, memory mem) %{
6152 match(Set dst (LoadL_unaligned mem));
6154 // FIXME: Jin: Need more effective ldl/ldr
6155 ins_cost(450);
6156 format %{ "ld $dst, $mem #@loadL_unaligned\n\t" %}
6157 ins_encode(load_L_enc(dst, mem));
6158 ins_pipe( ialu_loadL );
6159 %}
6161 // Store Long
6162 instruct storeL_reg(memory mem, mRegL src) %{
6163 match(Set mem (StoreL mem src));
6165 ins_cost(200);
6166 format %{ "sd $mem, $src #@storeL_reg\n" %}
6167 ins_encode(store_L_reg_enc(mem, src));
6168 ins_pipe( ialu_storeL );
6169 %}
6172 instruct storeL_immL0(memory mem, immL0 zero) %{
6173 match(Set mem (StoreL mem zero));
6175 ins_cost(180);
6176 format %{ "sd $mem, zero #@storeL_immL0" %}
6177 ins_encode(store_L_immL0_enc(mem, zero));
6178 ins_pipe( ialu_storeL );
6179 %}
6181 // Load Compressed Pointer
6182 instruct loadN(mRegN dst, memory mem)
6183 %{
6184 match(Set dst (LoadN mem));
6186 ins_cost(125); // XXX
6187 format %{ "lwu $dst, $mem\t# compressed ptr @ loadN" %}
6188 ins_encode (load_N_enc(dst, mem));
6189 ins_pipe( ialu_loadI ); // XXX
6190 %}
6192 // Load Pointer
6193 instruct loadP(mRegP dst, memory mem) %{
6194 match(Set dst (LoadP mem));
6196 ins_cost(125);
6197 format %{ "ld $dst, $mem #@loadP" %}
6198 ins_encode (load_P_enc(dst, mem));
6199 ins_pipe( ialu_loadI );
6200 %}
6202 // Load Klass Pointer
6203 instruct loadKlass(mRegP dst, memory mem) %{
6204 match(Set dst (LoadKlass mem));
6206 ins_cost(125);
6207 format %{ "MOV $dst,$mem @ loadKlass" %}
6208 ins_encode (load_P_enc(dst, mem));
6209 ins_pipe( ialu_loadI );
6210 %}
6212 // Load narrow Klass Pointer
6213 instruct loadNKlass(mRegN dst, memory mem)
6214 %{
6215 match(Set dst (LoadNKlass mem));
6217 ins_cost(125); // XXX
6218 format %{ "lwu $dst, $mem\t# compressed klass ptr @ loadNKlass" %}
6219 ins_encode (load_N_enc(dst, mem));
6220 ins_pipe( ialu_loadI ); // XXX
6221 %}
6223 // Load Constant
6224 instruct loadConI(mRegI dst, immI src) %{
6225 match(Set dst src);
6227 ins_cost(150);
6228 format %{ "mov $dst, $src #@loadConI" %}
6229 ins_encode %{
6230 Register dst = $dst$$Register;
6231 int value = $src$$constant;
6232 __ move(dst, value);
6233 %}
6234 ins_pipe( ialu_regI_regI );
6235 %}
6238 instruct loadConL_set64(mRegL dst, immL src) %{
6239 match(Set dst src);
6240 ins_cost(120);
6241 format %{ "li $dst, $src @ loadConL_set64" %}
6242 ins_encode %{
6243 __ set64($dst$$Register, $src$$constant);
6244 %}
6245 ins_pipe(ialu_regL_regL);
6246 %}
6248 /*
6249 // Load long value from constant table (predicated by immL_expensive).
6250 instruct loadConL_load(mRegL dst, immL_expensive src) %{
6251 match(Set dst src);
6252 ins_cost(150);
6253 format %{ "ld $dst, $constantoffset[$constanttablebase] # load long $src from table @ loadConL_ldx" %}
6254 ins_encode %{
6255 int con_offset = $constantoffset($src);
6257 if (Assembler::is_simm16(con_offset)) {
6258 __ ld($dst$$Register, $constanttablebase, con_offset);
6259 } else {
6260 __ set64(AT, con_offset);
6261 if (UseLoongsonISA) {
6262 __ gsldx($dst$$Register, $constanttablebase, AT, 0);
6263 } else {
6264 __ daddu(AT, $constanttablebase, AT);
6265 __ ld($dst$$Register, AT, 0);
6266 }
6267 }
6268 %}
6269 ins_pipe(ialu_loadI);
6270 %}
6271 */
6273 instruct loadConL16(mRegL dst, immL16 src) %{
6274 match(Set dst src);
6275 ins_cost(105);
6276 format %{ "mov $dst, $src #@loadConL16" %}
6277 ins_encode %{
6278 Register dst_reg = as_Register($dst$$reg);
6279 int value = $src$$constant;
6280 __ daddiu(dst_reg, R0, value);
6281 %}
6282 ins_pipe( ialu_regL_regL );
6283 %}
6286 instruct loadConL0(mRegL dst, immL0 src) %{
6287 match(Set dst src);
6288 ins_cost(100);
6289 format %{ "mov $dst, zero #@loadConL0" %}
6290 ins_encode %{
6291 Register dst_reg = as_Register($dst$$reg);
6292 __ daddu(dst_reg, R0, R0);
6293 %}
6294 ins_pipe( ialu_regL_regL );
6295 %}
6297 // Load Range
6298 instruct loadRange(mRegI dst, memory mem) %{
6299 match(Set dst (LoadRange mem));
6301 ins_cost(125);
6302 format %{ "MOV $dst,$mem @ loadRange" %}
6303 ins_encode(load_I_enc(dst, mem));
6304 ins_pipe( ialu_loadI );
6305 %}
6308 instruct storeP(memory mem, mRegP src ) %{
6309 match(Set mem (StoreP mem src));
6311 ins_cost(125);
6312 format %{ "sd $src, $mem #@storeP" %}
6313 ins_encode(store_P_reg_enc(mem, src));
6314 ins_pipe( ialu_storeI );
6315 %}
6317 // Store NULL Pointer, mark word, or other simple pointer constant.
6318 instruct storeImmP0(memory mem, immP0 zero) %{
6319 match(Set mem (StoreP mem zero));
6321 ins_cost(125);
6322 format %{ "mov $mem, $zero #@storeImmP0" %}
6323 ins_encode(store_P_immP0_enc(mem));
6324 ins_pipe( ialu_storeI );
6325 %}
6327 // Store Byte Immediate
6328 instruct storeImmB(memory mem, immI8 src) %{
6329 match(Set mem (StoreB mem src));
6331 ins_cost(150);
6332 format %{ "movb $mem, $src #@storeImmB" %}
6333 ins_encode(store_B_immI_enc(mem, src));
6334 ins_pipe( ialu_storeI );
6335 %}
6337 // Store Compressed Pointer
6338 instruct storeN(memory mem, mRegN src)
6339 %{
6340 match(Set mem (StoreN mem src));
6342 ins_cost(125); // XXX
6343 format %{ "sw $mem, $src\t# compressed ptr @ storeN" %}
6344 ins_encode(store_N_reg_enc(mem, src));
6345 ins_pipe( ialu_storeI );
6346 %}
6348 instruct storeNKlass(memory mem, mRegN src)
6349 %{
6350 match(Set mem (StoreNKlass mem src));
6352 ins_cost(125); // XXX
6353 format %{ "sw $mem, $src\t# compressed klass ptr @ storeNKlass" %}
6354 ins_encode(store_N_reg_enc(mem, src));
6355 ins_pipe( ialu_storeI );
6356 %}
6358 instruct storeImmN0(memory mem, immN0 zero)
6359 %{
6360 predicate(Universe::narrow_oop_base() == NULL && Universe::narrow_klass_base() == NULL);
6361 match(Set mem (StoreN mem zero));
6363 ins_cost(125); // XXX
6364 format %{ "storeN0 $mem, R12\t# compressed ptr" %}
6365 ins_encode(storeImmN0_enc(mem, zero));
6366 ins_pipe( ialu_storeI );
6367 %}
6369 // Store Byte
6370 instruct storeB(memory mem, mRegI src) %{
6371 match(Set mem (StoreB mem src));
6373 ins_cost(125);
6374 format %{ "sb $src, $mem #@storeB" %}
6375 ins_encode(store_B_reg_enc(mem, src));
6376 ins_pipe( ialu_storeI );
6377 %}
6379 instruct storeB_convL2I(memory mem, mRegL src) %{
6380 match(Set mem (StoreB mem (ConvL2I src)));
6382 ins_cost(125);
6383 format %{ "sb $src, $mem #@storeB_convL2I" %}
6384 ins_encode(store_B_reg_enc(mem, src));
6385 ins_pipe( ialu_storeI );
6386 %}
6388 // Load Byte (8bit signed)
6389 instruct loadB(mRegI dst, memory mem) %{
6390 match(Set dst (LoadB mem));
6392 ins_cost(125);
6393 format %{ "lb $dst, $mem #@loadB" %}
6394 ins_encode(load_B_enc(dst, mem));
6395 ins_pipe( ialu_loadI );
6396 %}
6398 instruct loadB_convI2L(mRegL dst, memory mem) %{
6399 match(Set dst (ConvI2L (LoadB mem)));
6401 ins_cost(125);
6402 format %{ "lb $dst, $mem #@loadB_convI2L" %}
6403 ins_encode(load_B_enc(dst, mem));
6404 ins_pipe( ialu_loadI );
6405 %}
6407 // Load Byte (8bit UNsigned)
6408 instruct loadUB(mRegI dst, memory mem) %{
6409 match(Set dst (LoadUB mem));
6411 ins_cost(125);
6412 format %{ "lbu $dst, $mem #@loadUB" %}
6413 ins_encode(load_UB_enc(dst, mem));
6414 ins_pipe( ialu_loadI );
6415 %}
6417 instruct loadUB_convI2L(mRegL dst, memory mem) %{
6418 match(Set dst (ConvI2L (LoadUB mem)));
6420 ins_cost(125);
6421 format %{ "lbu $dst, $mem #@loadUB_convI2L" %}
6422 ins_encode(load_UB_enc(dst, mem));
6423 ins_pipe( ialu_loadI );
6424 %}
6426 // Load Short (16bit signed)
6427 instruct loadS(mRegI dst, memory mem) %{
6428 match(Set dst (LoadS mem));
6430 ins_cost(125);
6431 format %{ "lh $dst, $mem #@loadS" %}
6432 ins_encode(load_S_enc(dst, mem));
6433 ins_pipe( ialu_loadI );
6434 %}
6436 // Load Short (16 bit signed) to Byte (8 bit signed)
6437 instruct loadS2B(mRegI dst, memory mem, immI_24 twentyfour) %{
6438 match(Set dst (RShiftI (LShiftI (LoadS mem) twentyfour) twentyfour));
6440 ins_cost(125);
6441 format %{ "lb $dst, $mem\t# short -> byte #@loadS2B" %}
6442 ins_encode(load_B_enc(dst, mem));
6443 ins_pipe(ialu_loadI);
6444 %}
6446 instruct loadS_convI2L(mRegL dst, memory mem) %{
6447 match(Set dst (ConvI2L (LoadS mem)));
6449 ins_cost(125);
6450 format %{ "lh $dst, $mem #@loadS_convI2L" %}
6451 ins_encode(load_S_enc(dst, mem));
6452 ins_pipe( ialu_loadI );
6453 %}
6455 // Store Integer Immediate
6456 instruct storeImmI(memory mem, immI src) %{
6457 match(Set mem (StoreI mem src));
6459 ins_cost(150);
6460 format %{ "mov $mem, $src #@storeImmI" %}
6461 ins_encode(store_I_immI_enc(mem, src));
6462 ins_pipe( ialu_storeI );
6463 %}
6465 // Store Integer
6466 instruct storeI(memory mem, mRegI src) %{
6467 match(Set mem (StoreI mem src));
6469 ins_cost(125);
6470 format %{ "sw $mem, $src #@storeI" %}
6471 ins_encode(store_I_reg_enc(mem, src));
6472 ins_pipe( ialu_storeI );
6473 %}
6475 instruct storeI_convL2I(memory mem, mRegL src) %{
6476 match(Set mem (StoreI mem (ConvL2I src)));
6478 ins_cost(125);
6479 format %{ "sw $mem, $src #@storeI_convL2I" %}
6480 ins_encode(store_I_reg_enc(mem, src));
6481 ins_pipe( ialu_storeI );
6482 %}
6484 // Load Float
6485 instruct loadF(regF dst, memory mem) %{
6486 match(Set dst (LoadF mem));
6488 ins_cost(150);
6489 format %{ "loadF $dst, $mem #@loadF" %}
6490 ins_encode(load_F_enc(dst, mem));
6491 ins_pipe( ialu_loadI );
6492 %}
6494 instruct loadConP_general(mRegP dst, immP src) %{
6495 match(Set dst src);
6497 ins_cost(120);
6498 format %{ "li $dst, $src #@loadConP_general" %}
6500 ins_encode %{
6501 Register dst = $dst$$Register;
6502 long* value = (long*)$src$$constant;
6504 if($src->constant_reloc() == relocInfo::metadata_type){
6505 int klass_index = __ oop_recorder()->find_index((Klass*)value);
6506 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
6508 __ relocate(rspec);
6509 __ patchable_set48(dst, (long)value);
6510 }else if($src->constant_reloc() == relocInfo::oop_type){
6511 int oop_index = __ oop_recorder()->find_index((jobject)value);
6512 RelocationHolder rspec = oop_Relocation::spec(oop_index);
6514 __ relocate(rspec);
6515 __ patchable_set48(dst, (long)value);
6516 } else if ($src->constant_reloc() == relocInfo::none) {
6517 __ set64(dst, (long)value);
6518 }
6519 %}
6521 ins_pipe( ialu_regI_regI );
6522 %}
6524 /*
6525 instruct loadConP_load(mRegP dst, immP_load src) %{
6526 match(Set dst src);
6528 ins_cost(100);
6529 format %{ "ld $dst, [$constanttablebase + $constantoffset] load from constant table: ptr=$src @ loadConP_load" %}
6531 ins_encode %{
6533 int con_offset = $constantoffset($src);
6535 if (Assembler::is_simm16(con_offset)) {
6536 __ ld($dst$$Register, $constanttablebase, con_offset);
6537 } else {
6538 __ set64(AT, con_offset);
6539 if (UseLoongsonISA) {
6540 __ gsldx($dst$$Register, $constanttablebase, AT, 0);
6541 } else {
6542 __ daddu(AT, $constanttablebase, AT);
6543 __ ld($dst$$Register, AT, 0);
6544 }
6545 }
6546 %}
6548 ins_pipe(ialu_loadI);
6549 %}
6550 */
6552 instruct loadConP_no_oop_cheap(mRegP dst, immP_no_oop_cheap src) %{
6553 match(Set dst src);
6555 ins_cost(80);
6556 format %{ "li $dst, $src @ loadConP_no_oop_cheap" %}
6558 ins_encode %{
6559 __ set64($dst$$Register, $src$$constant);
6560 %}
6562 ins_pipe(ialu_regI_regI);
6563 %}
6566 instruct loadConP_poll(mRegP dst, immP_poll src) %{
6567 match(Set dst src);
6569 ins_cost(50);
6570 format %{ "li $dst, $src #@loadConP_poll" %}
6572 ins_encode %{
6573 Register dst = $dst$$Register;
6574 intptr_t value = (intptr_t)$src$$constant;
6576 __ set64(dst, (jlong)value);
6577 %}
6579 ins_pipe( ialu_regI_regI );
6580 %}
6582 instruct loadConP0(mRegP dst, immP0 src)
6583 %{
6584 match(Set dst src);
6586 ins_cost(50);
6587 format %{ "mov $dst, R0\t# ptr" %}
6588 ins_encode %{
6589 Register dst_reg = $dst$$Register;
6590 __ daddu(dst_reg, R0, R0);
6591 %}
6592 ins_pipe( ialu_regI_regI );
6593 %}
6595 instruct loadConN0(mRegN dst, immN0 src) %{
6596 match(Set dst src);
6597 format %{ "move $dst, R0\t# compressed NULL ptr" %}
6598 ins_encode %{
6599 __ move($dst$$Register, R0);
6600 %}
6601 ins_pipe( ialu_regI_regI );
6602 %}
6604 instruct loadConN(mRegN dst, immN src) %{
6605 match(Set dst src);
6607 ins_cost(125);
6608 format %{ "li $dst, $src\t# compressed ptr @ loadConN" %}
6609 ins_encode %{
6610 Register dst = $dst$$Register;
6611 __ set_narrow_oop(dst, (jobject)$src$$constant);
6612 %}
6613 ins_pipe( ialu_regI_regI ); // XXX
6614 %}
6616 instruct loadConNKlass(mRegN dst, immNKlass src) %{
6617 match(Set dst src);
6619 ins_cost(125);
6620 format %{ "li $dst, $src\t# compressed klass ptr @ loadConNKlass" %}
6621 ins_encode %{
6622 Register dst = $dst$$Register;
6623 __ set_narrow_klass(dst, (Klass*)$src$$constant);
6624 %}
6625 ins_pipe( ialu_regI_regI ); // XXX
6626 %}
6628 //FIXME
6629 // Tail Call; Jump from runtime stub to Java code.
6630 // Also known as an 'interprocedural jump'.
6631 // Target of jump will eventually return to caller.
6632 // TailJump below removes the return address.
6633 instruct TailCalljmpInd(mRegP jump_target, mRegP method_oop) %{
6634 match(TailCall jump_target method_oop );
6635 ins_cost(300);
6636 format %{ "JMP $jump_target \t# @TailCalljmpInd" %}
6638 ins_encode %{
6639 Register target = $jump_target$$Register;
6640 Register oop = $method_oop$$Register;
6642 /* 2012/10/12 Jin: RA will be used in generate_forward_exception() */
6643 __ push(RA);
6645 __ move(S3, oop);
6646 __ jr(target);
6647 __ nop();
6648 %}
6650 ins_pipe( pipe_jump );
6651 %}
6653 // Create exception oop: created by stack-crawling runtime code.
6654 // Created exception is now available to this handler, and is setup
6655 // just prior to jumping to this handler. No code emitted.
6656 instruct CreateException( a0_RegP ex_oop )
6657 %{
6658 match(Set ex_oop (CreateEx));
6660 // use the following format syntax
6661 format %{ "# exception oop is in A0; no code emitted @CreateException" %}
6662 ins_encode %{
6663 /* Jin: X86 leaves this function empty */
6664 __ block_comment("CreateException is empty in X86/MIPS");
6665 %}
6666 ins_pipe( empty );
6667 // ins_pipe( pipe_jump );
6668 %}
6671 /* 2012/9/14 Jin: The mechanism of exception handling is clear now.
6673 - Common try/catch:
6674 2012/9/14 Jin: [stubGenerator_mips.cpp] generate_forward_exception()
6675 |- V0, V1 are created
6676 |- T9 <= SharedRuntime::exception_handler_for_return_address
6677 `- jr T9
6678 `- the caller's exception_handler
6679 `- jr OptoRuntime::exception_blob
6680 `- here
6681 - Rethrow(e.g. 'unwind'):
6682 * The callee:
6683 |- an exception is triggered during execution
6684 `- exits the callee method through RethrowException node
6685 |- The callee pushes exception_oop(T0) and exception_pc(RA)
6686 `- The callee jumps to OptoRuntime::rethrow_stub()
6687 * In OptoRuntime::rethrow_stub:
6688 |- The VM calls _rethrow_Java to determine the return address in the caller method
6689 `- exits the stub with tailjmpInd
6690 |- pops exception_oop(V0) and exception_pc(V1)
6691 `- jumps to the return address(usually an exception_handler)
6692 * The caller:
6693 `- continues processing the exception_blob with V0/V1
6694 */
6696 /*
6697 Disassembling OptoRuntime::rethrow_stub()
6699 ; locals
6700 0x2d3bf320: addiu sp, sp, 0xfffffff8
6701 0x2d3bf324: sw ra, 0x4(sp)
6702 0x2d3bf328: sw fp, 0x0(sp)
6703 0x2d3bf32c: addu fp, sp, zero
6704 0x2d3bf330: addiu sp, sp, 0xfffffff0
6705 0x2d3bf334: sw ra, 0x8(sp)
6706 0x2d3bf338: sw t0, 0x4(sp)
6707 0x2d3bf33c: sw sp, 0x0(sp)
6709 ; get_thread(S2)
6710 0x2d3bf340: addu s2, sp, zero
6711 0x2d3bf344: srl s2, s2, 12
6712 0x2d3bf348: sll s2, s2, 2
6713 0x2d3bf34c: lui at, 0x2c85
6714 0x2d3bf350: addu at, at, s2
6715 0x2d3bf354: lw s2, 0xffffcc80(at)
6717 0x2d3bf358: lw s0, 0x0(sp)
6718 0x2d3bf35c: sw s0, 0x118(s2) // last_sp -> threa
6719 0x2d3bf360: sw s2, 0xc(sp)
6721 ; OptoRuntime::rethrow_C(oopDesc* exception, JavaThread* thread, address ret_pc)
6722 0x2d3bf364: lw a0, 0x4(sp)
6723 0x2d3bf368: lw a1, 0xc(sp)
6724 0x2d3bf36c: lw a2, 0x8(sp)
6725 ;; Java_To_Runtime
6726 0x2d3bf370: lui t9, 0x2c34
6727 0x2d3bf374: addiu t9, t9, 0xffff8a48
6728 0x2d3bf378: jalr t9
6729 0x2d3bf37c: nop
6731 0x2d3bf380: addu s3, v0, zero ; S3: SharedRuntime::raw_exception_handler_for_return_address()
6733 0x2d3bf384: lw s0, 0xc(sp)
6734 0x2d3bf388: sw zero, 0x118(s0)
6735 0x2d3bf38c: sw zero, 0x11c(s0)
6736 0x2d3bf390: lw s1, 0x144(s0) ; ex_oop: S1
6737 0x2d3bf394: addu s2, s0, zero
6738 0x2d3bf398: sw zero, 0x144(s2)
6739 0x2d3bf39c: lw s0, 0x4(s2)
6740 0x2d3bf3a0: addiu s4, zero, 0x0
6741 0x2d3bf3a4: bne s0, s4, 0x2d3bf3d4
6742 0x2d3bf3a8: nop
6743 0x2d3bf3ac: addiu sp, sp, 0x10
6744 0x2d3bf3b0: addiu sp, sp, 0x8
6745 0x2d3bf3b4: lw ra, 0xfffffffc(sp)
6746 0x2d3bf3b8: lw fp, 0xfffffff8(sp)
6747 0x2d3bf3bc: lui at, 0x2b48
6748 0x2d3bf3c0: lw at, 0x100(at)
6750 ; tailjmpInd: Restores exception_oop & exception_pc
6751 0x2d3bf3c4: addu v1, ra, zero
6752 0x2d3bf3c8: addu v0, s1, zero
6753 0x2d3bf3cc: jr s3
6754 0x2d3bf3d0: nop
6755 ; Exception:
6756 0x2d3bf3d4: lui s1, 0x2cc8 ; generate_forward_exception()
6757 0x2d3bf3d8: addiu s1, s1, 0x40
6758 0x2d3bf3dc: addiu s2, zero, 0x0
6759 0x2d3bf3e0: addiu sp, sp, 0x10
6760 0x2d3bf3e4: addiu sp, sp, 0x8
6761 0x2d3bf3e8: lw ra, 0xfffffffc(sp)
6762 0x2d3bf3ec: lw fp, 0xfffffff8(sp)
6763 0x2d3bf3f0: lui at, 0x2b48
6764 0x2d3bf3f4: lw at, 0x100(at)
6765 ; TailCalljmpInd
6766 __ push(RA); ; to be used in generate_forward_exception()
6767 0x2d3bf3f8: addu t7, s2, zero
6768 0x2d3bf3fc: jr s1
6769 0x2d3bf400: nop
6770 */
6771 // Rethrow exception:
6772 // The exception oop will come in the first argument position.
6773 // Then JUMP (not call) to the rethrow stub code.
6774 instruct RethrowException()
6775 %{
6776 match(Rethrow);
6778 // use the following format syntax
6779 format %{ "JMP rethrow_stub #@RethrowException" %}
6780 ins_encode %{
6781 __ block_comment("@ RethrowException");
6783 cbuf.set_insts_mark();
6784 cbuf.relocate(cbuf.insts_mark(), runtime_call_Relocation::spec());
6786 // call OptoRuntime::rethrow_stub to get the exception handler in parent method
6787 __ li(T9, OptoRuntime::rethrow_stub());
6788 __ jr(T9);
6789 __ nop();
6790 %}
6791 ins_pipe( pipe_jump );
6792 %}
6794 instruct branchConP_zero(cmpOpU cmp, mRegP op1, immP0 zero, label labl) %{
6795 match(If cmp (CmpP op1 zero));
6796 effect(USE labl);
6798 ins_cost(180);
6799 format %{ "b$cmp $op1, R0, $labl #@branchConP_zero" %}
6801 ins_encode %{
6802 Register op1 = $op1$$Register;
6803 Register op2 = R0;
6804 Label &L = *($labl$$label);
6805 int flag = $cmp$$cmpcode;
6807 switch(flag)
6808 {
6809 case 0x01: //equal
6810 if (&L)
6811 __ beq(op1, op2, L);
6812 else
6813 __ beq(op1, op2, (int)0);
6814 break;
6815 case 0x02: //not_equal
6816 if (&L)
6817 __ bne(op1, op2, L);
6818 else
6819 __ bne(op1, op2, (int)0);
6820 break;
6821 /*
6822 case 0x03: //above
6823 __ sltu(AT, op2, op1);
6824 if(&L)
6825 __ bne(R0, AT, L);
6826 else
6827 __ bne(R0, AT, (int)0);
6828 break;
6829 case 0x04: //above_equal
6830 __ sltu(AT, op1, op2);
6831 if(&L)
6832 __ beq(AT, R0, L);
6833 else
6834 __ beq(AT, R0, (int)0);
6835 break;
6836 case 0x05: //below
6837 __ sltu(AT, op1, op2);
6838 if(&L)
6839 __ bne(R0, AT, L);
6840 else
6841 __ bne(R0, AT, (int)0);
6842 break;
6843 case 0x06: //below_equal
6844 __ sltu(AT, op2, op1);
6845 if(&L)
6846 __ beq(AT, R0, L);
6847 else
6848 __ beq(AT, R0, (int)0);
6849 break;
6850 */
6851 default:
6852 Unimplemented();
6853 }
6854 __ nop();
6855 %}
6857 ins_pc_relative(1);
6858 ins_pipe( pipe_alu_branch );
6859 %}
6862 instruct branchConP(cmpOpU cmp, mRegP op1, mRegP op2, label labl) %{
6863 match(If cmp (CmpP op1 op2));
6864 // predicate(can_branch_register(_kids[0]->_leaf, _kids[1]->_leaf));
6865 effect(USE labl);
6867 ins_cost(200);
6868 format %{ "b$cmp $op1, $op2, $labl #@branchConP" %}
6870 ins_encode %{
6871 Register op1 = $op1$$Register;
6872 Register op2 = $op2$$Register;
6873 Label &L = *($labl$$label);
6874 int flag = $cmp$$cmpcode;
6876 switch(flag)
6877 {
6878 case 0x01: //equal
6879 if (&L)
6880 __ beq(op1, op2, L);
6881 else
6882 __ beq(op1, op2, (int)0);
6883 break;
6884 case 0x02: //not_equal
6885 if (&L)
6886 __ bne(op1, op2, L);
6887 else
6888 __ bne(op1, op2, (int)0);
6889 break;
6890 case 0x03: //above
6891 __ sltu(AT, op2, op1);
6892 if(&L)
6893 __ bne(R0, AT, L);
6894 else
6895 __ bne(R0, AT, (int)0);
6896 break;
6897 case 0x04: //above_equal
6898 __ sltu(AT, op1, op2);
6899 if(&L)
6900 __ beq(AT, R0, L);
6901 else
6902 __ beq(AT, R0, (int)0);
6903 break;
6904 case 0x05: //below
6905 __ sltu(AT, op1, op2);
6906 if(&L)
6907 __ bne(R0, AT, L);
6908 else
6909 __ bne(R0, AT, (int)0);
6910 break;
6911 case 0x06: //below_equal
6912 __ sltu(AT, op2, op1);
6913 if(&L)
6914 __ beq(AT, R0, L);
6915 else
6916 __ beq(AT, R0, (int)0);
6917 break;
6918 default:
6919 Unimplemented();
6920 }
6921 __ nop();
6922 %}
6924 ins_pc_relative(1);
6925 ins_pipe( pipe_alu_branch );
6926 %}
6928 instruct cmpN_null_branch(cmpOp cmp, mRegN op1, immN0 null, label labl) %{
6929 match(If cmp (CmpN op1 null));
6930 effect(USE labl);
6932 ins_cost(180);
6933 format %{ "CMP $op1,0\t! compressed ptr\n\t"
6934 "BP$cmp $labl @ cmpN_null_branch" %}
6935 ins_encode %{
6936 Register op1 = $op1$$Register;
6937 Register op2 = R0;
6938 Label &L = *($labl$$label);
6939 int flag = $cmp$$cmpcode;
6941 switch(flag)
6942 {
6943 case 0x01: //equal
6944 if (&L)
6945 __ beq(op1, op2, L);
6946 else
6947 __ beq(op1, op2, (int)0);
6948 break;
6949 case 0x02: //not_equal
6950 if (&L)
6951 __ bne(op1, op2, L);
6952 else
6953 __ bne(op1, op2, (int)0);
6954 break;
6955 default:
6956 Unimplemented();
6957 }
6958 __ nop();
6959 %}
6960 //TODO: pipe_branchP or create pipe_branchN LEE
6961 ins_pc_relative(1);
6962 ins_pipe( pipe_alu_branch );
6963 %}
6965 instruct cmpN_reg_branch(cmpOp cmp, mRegN op1, mRegN op2, label labl) %{
6966 match(If cmp (CmpN op1 op2));
6967 effect(USE labl);
6969 ins_cost(180);
6970 format %{ "CMP $op1,$op2\t! compressed ptr\n\t"
6971 "BP$cmp $labl" %}
6972 ins_encode %{
6973 Register op1_reg = $op1$$Register;
6974 Register op2_reg = $op2$$Register;
6975 Label &L = *($labl$$label);
6976 int flag = $cmp$$cmpcode;
6978 switch(flag)
6979 {
6980 case 0x01: //equal
6981 if (&L)
6982 __ beq(op1_reg, op2_reg, L);
6983 else
6984 __ beq(op1_reg, op2_reg, (int)0);
6985 break;
6986 case 0x02: //not_equal
6987 if (&L)
6988 __ bne(op1_reg, op2_reg, L);
6989 else
6990 __ bne(op1_reg, op2_reg, (int)0);
6991 break;
6992 case 0x03: //above
6993 __ sltu(AT, op2_reg, op1_reg);
6994 if(&L)
6995 __ bne(R0, AT, L);
6996 else
6997 __ bne(R0, AT, (int)0);
6998 break;
6999 case 0x04: //above_equal
7000 __ sltu(AT, op1_reg, op2_reg);
7001 if(&L)
7002 __ beq(AT, R0, L);
7003 else
7004 __ beq(AT, R0, (int)0);
7005 break;
7006 case 0x05: //below
7007 __ sltu(AT, op1_reg, op2_reg);
7008 if(&L)
7009 __ bne(R0, AT, L);
7010 else
7011 __ bne(R0, AT, (int)0);
7012 break;
7013 case 0x06: //below_equal
7014 __ sltu(AT, op2_reg, op1_reg);
7015 if(&L)
7016 __ beq(AT, R0, L);
7017 else
7018 __ beq(AT, R0, (int)0);
7019 break;
7020 default:
7021 Unimplemented();
7022 }
7023 __ nop();
7024 %}
7025 ins_pc_relative(1);
7026 ins_pipe( pipe_alu_branch );
7027 %}
7029 instruct branchConIU_reg_reg(cmpOpU cmp, mRegI src1, mRegI src2, label labl) %{
7030 match( If cmp (CmpU src1 src2) );
7031 effect(USE labl);
7032 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_reg" %}
7034 ins_encode %{
7035 Register op1 = $src1$$Register;
7036 Register op2 = $src2$$Register;
7037 Label &L = *($labl$$label);
7038 int flag = $cmp$$cmpcode;
7040 switch(flag)
7041 {
7042 case 0x01: //equal
7043 if (&L)
7044 __ beq(op1, op2, L);
7045 else
7046 __ beq(op1, op2, (int)0);
7047 break;
7048 case 0x02: //not_equal
7049 if (&L)
7050 __ bne(op1, op2, L);
7051 else
7052 __ bne(op1, op2, (int)0);
7053 break;
7054 case 0x03: //above
7055 __ sltu(AT, op2, op1);
7056 if(&L)
7057 __ bne(AT, R0, L);
7058 else
7059 __ bne(AT, R0, (int)0);
7060 break;
7061 case 0x04: //above_equal
7062 __ sltu(AT, op1, op2);
7063 if(&L)
7064 __ beq(AT, R0, L);
7065 else
7066 __ beq(AT, R0, (int)0);
7067 break;
7068 case 0x05: //below
7069 __ sltu(AT, op1, op2);
7070 if(&L)
7071 __ bne(AT, R0, L);
7072 else
7073 __ bne(AT, R0, (int)0);
7074 break;
7075 case 0x06: //below_equal
7076 __ sltu(AT, op2, op1);
7077 if(&L)
7078 __ beq(AT, R0, L);
7079 else
7080 __ beq(AT, R0, (int)0);
7081 break;
7082 default:
7083 Unimplemented();
7084 }
7085 __ nop();
7086 %}
7088 ins_pc_relative(1);
7089 ins_pipe( pipe_alu_branch );
7090 %}
7093 instruct branchConIU_reg_imm(cmpOpU cmp, mRegI src1, immI src2, label labl) %{
7094 match( If cmp (CmpU src1 src2) );
7095 effect(USE labl);
7096 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_imm" %}
7098 ins_encode %{
7099 Register op1 = $src1$$Register;
7100 int val = $src2$$constant;
7101 Label &L = *($labl$$label);
7102 int flag = $cmp$$cmpcode;
7104 __ move(AT, val);
7105 switch(flag)
7106 {
7107 case 0x01: //equal
7108 if (&L)
7109 __ beq(op1, AT, L);
7110 else
7111 __ beq(op1, AT, (int)0);
7112 break;
7113 case 0x02: //not_equal
7114 if (&L)
7115 __ bne(op1, AT, L);
7116 else
7117 __ bne(op1, AT, (int)0);
7118 break;
7119 case 0x03: //above
7120 __ sltu(AT, AT, op1);
7121 if(&L)
7122 __ bne(R0, AT, L);
7123 else
7124 __ bne(R0, AT, (int)0);
7125 break;
7126 case 0x04: //above_equal
7127 __ sltu(AT, op1, AT);
7128 if(&L)
7129 __ beq(AT, R0, L);
7130 else
7131 __ beq(AT, R0, (int)0);
7132 break;
7133 case 0x05: //below
7134 __ sltu(AT, op1, AT);
7135 if(&L)
7136 __ bne(R0, AT, L);
7137 else
7138 __ bne(R0, AT, (int)0);
7139 break;
7140 case 0x06: //below_equal
7141 __ sltu(AT, AT, op1);
7142 if(&L)
7143 __ beq(AT, R0, L);
7144 else
7145 __ beq(AT, R0, (int)0);
7146 break;
7147 default:
7148 Unimplemented();
7149 }
7150 __ nop();
7151 %}
7153 ins_pc_relative(1);
7154 ins_pipe( pipe_alu_branch );
7155 %}
7157 instruct branchConI_reg_reg(cmpOp cmp, mRegI src1, mRegI src2, label labl) %{
7158 match( If cmp (CmpI src1 src2) );
7159 effect(USE labl);
7160 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_reg" %}
7162 ins_encode %{
7163 Register op1 = $src1$$Register;
7164 Register op2 = $src2$$Register;
7165 Label &L = *($labl$$label);
7166 int flag = $cmp$$cmpcode;
7168 switch(flag)
7169 {
7170 case 0x01: //equal
7171 if (&L)
7172 __ beq(op1, op2, L);
7173 else
7174 __ beq(op1, op2, (int)0);
7175 break;
7176 case 0x02: //not_equal
7177 if (&L)
7178 __ bne(op1, op2, L);
7179 else
7180 __ bne(op1, op2, (int)0);
7181 break;
7182 case 0x03: //above
7183 __ slt(AT, op2, op1);
7184 if(&L)
7185 __ bne(R0, AT, L);
7186 else
7187 __ bne(R0, AT, (int)0);
7188 break;
7189 case 0x04: //above_equal
7190 __ slt(AT, op1, op2);
7191 if(&L)
7192 __ beq(AT, R0, L);
7193 else
7194 __ beq(AT, R0, (int)0);
7195 break;
7196 case 0x05: //below
7197 __ slt(AT, op1, op2);
7198 if(&L)
7199 __ bne(R0, AT, L);
7200 else
7201 __ bne(R0, AT, (int)0);
7202 break;
7203 case 0x06: //below_equal
7204 __ slt(AT, op2, op1);
7205 if(&L)
7206 __ beq(AT, R0, L);
7207 else
7208 __ beq(AT, R0, (int)0);
7209 break;
7210 default:
7211 Unimplemented();
7212 }
7213 __ nop();
7214 %}
7216 ins_pc_relative(1);
7217 ins_pipe( pipe_alu_branch );
7218 %}
7220 instruct branchConI_reg_imm0(cmpOp cmp, mRegI src1, immI0 src2, label labl) %{
7221 match( If cmp (CmpI src1 src2) );
7222 effect(USE labl);
7223 ins_cost(170);
7224 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm0" %}
7226 ins_encode %{
7227 Register op1 = $src1$$Register;
7228 // int val = $src2$$constant;
7229 Label &L = *($labl$$label);
7230 int flag = $cmp$$cmpcode;
7232 //__ move(AT, val);
7233 switch(flag)
7234 {
7235 case 0x01: //equal
7236 if (&L)
7237 __ beq(op1, R0, L);
7238 else
7239 __ beq(op1, R0, (int)0);
7240 break;
7241 case 0x02: //not_equal
7242 if (&L)
7243 __ bne(op1, R0, L);
7244 else
7245 __ bne(op1, R0, (int)0);
7246 break;
7247 case 0x03: //greater
7248 if(&L)
7249 __ bgtz(op1, L);
7250 else
7251 __ bgtz(op1, (int)0);
7252 break;
7253 case 0x04: //greater_equal
7254 if(&L)
7255 __ bgez(op1, L);
7256 else
7257 __ bgez(op1, (int)0);
7258 break;
7259 case 0x05: //less
7260 if(&L)
7261 __ bltz(op1, L);
7262 else
7263 __ bltz(op1, (int)0);
7264 break;
7265 case 0x06: //less_equal
7266 if(&L)
7267 __ blez(op1, L);
7268 else
7269 __ blez(op1, (int)0);
7270 break;
7271 default:
7272 Unimplemented();
7273 }
7274 __ nop();
7275 %}
7277 ins_pc_relative(1);
7278 ins_pipe( pipe_alu_branch );
7279 %}
7282 instruct branchConI_reg_imm(cmpOp cmp, mRegI src1, immI src2, label labl) %{
7283 match( If cmp (CmpI src1 src2) );
7284 effect(USE labl);
7285 ins_cost(200);
7286 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm" %}
7288 ins_encode %{
7289 Register op1 = $src1$$Register;
7290 int val = $src2$$constant;
7291 Label &L = *($labl$$label);
7292 int flag = $cmp$$cmpcode;
7294 __ move(AT, val);
7295 switch(flag)
7296 {
7297 case 0x01: //equal
7298 if (&L)
7299 __ beq(op1, AT, L);
7300 else
7301 __ beq(op1, AT, (int)0);
7302 break;
7303 case 0x02: //not_equal
7304 if (&L)
7305 __ bne(op1, AT, L);
7306 else
7307 __ bne(op1, AT, (int)0);
7308 break;
7309 case 0x03: //greater
7310 __ slt(AT, AT, op1);
7311 if(&L)
7312 __ bne(R0, AT, L);
7313 else
7314 __ bne(R0, AT, (int)0);
7315 break;
7316 case 0x04: //greater_equal
7317 __ slt(AT, op1, AT);
7318 if(&L)
7319 __ beq(AT, R0, L);
7320 else
7321 __ beq(AT, R0, (int)0);
7322 break;
7323 case 0x05: //less
7324 __ slt(AT, op1, AT);
7325 if(&L)
7326 __ bne(R0, AT, L);
7327 else
7328 __ bne(R0, AT, (int)0);
7329 break;
7330 case 0x06: //less_equal
7331 __ slt(AT, AT, op1);
7332 if(&L)
7333 __ beq(AT, R0, L);
7334 else
7335 __ beq(AT, R0, (int)0);
7336 break;
7337 default:
7338 Unimplemented();
7339 }
7340 __ nop();
7341 %}
7343 ins_pc_relative(1);
7344 ins_pipe( pipe_alu_branch );
7345 %}
7347 instruct branchConIU_reg_imm0(cmpOpU cmp, mRegI src1, immI0 zero, label labl) %{
7348 match( If cmp (CmpU src1 zero) );
7349 effect(USE labl);
7350 format %{ "BR$cmp $src1, zero, $labl #@branchConIU_reg_imm0" %}
7352 ins_encode %{
7353 Register op1 = $src1$$Register;
7354 Label &L = *($labl$$label);
7355 int flag = $cmp$$cmpcode;
7357 switch(flag)
7358 {
7359 case 0x01: //equal
7360 if (&L)
7361 __ beq(op1, R0, L);
7362 else
7363 __ beq(op1, R0, (int)0);
7364 break;
7365 case 0x02: //not_equal
7366 if (&L)
7367 __ bne(op1, R0, L);
7368 else
7369 __ bne(op1, R0, (int)0);
7370 break;
7371 case 0x03: //above
7372 if(&L)
7373 __ bne(R0, op1, L);
7374 else
7375 __ bne(R0, op1, (int)0);
7376 break;
7377 case 0x04: //above_equal
7378 if(&L)
7379 __ beq(R0, R0, L);
7380 else
7381 __ beq(R0, R0, (int)0);
7382 break;
7383 case 0x05: //below
7384 return;
7385 break;
7386 case 0x06: //below_equal
7387 if(&L)
7388 __ beq(op1, R0, L);
7389 else
7390 __ beq(op1, R0, (int)0);
7391 break;
7392 default:
7393 Unimplemented();
7394 }
7395 __ nop();
7396 %}
7398 ins_pc_relative(1);
7399 ins_pipe( pipe_alu_branch );
7400 %}
7403 instruct branchConIU_reg_immI16(cmpOpU cmp, mRegI src1, immI16 src2, label labl) %{
7404 match( If cmp (CmpU src1 src2) );
7405 effect(USE labl);
7406 ins_cost(180);
7407 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_immI16" %}
7409 ins_encode %{
7410 Register op1 = $src1$$Register;
7411 int val = $src2$$constant;
7412 Label &L = *($labl$$label);
7413 int flag = $cmp$$cmpcode;
7415 switch(flag)
7416 {
7417 case 0x01: //equal
7418 __ move(AT, val);
7419 if (&L)
7420 __ beq(op1, AT, L);
7421 else
7422 __ beq(op1, AT, (int)0);
7423 break;
7424 case 0x02: //not_equal
7425 __ move(AT, val);
7426 if (&L)
7427 __ bne(op1, AT, L);
7428 else
7429 __ bne(op1, AT, (int)0);
7430 break;
7431 case 0x03: //above
7432 __ move(AT, val);
7433 __ sltu(AT, AT, op1);
7434 if(&L)
7435 __ bne(R0, AT, L);
7436 else
7437 __ bne(R0, AT, (int)0);
7438 break;
7439 case 0x04: //above_equal
7440 __ sltiu(AT, op1, val);
7441 if(&L)
7442 __ beq(AT, R0, L);
7443 else
7444 __ beq(AT, R0, (int)0);
7445 break;
7446 case 0x05: //below
7447 __ sltiu(AT, op1, val);
7448 if(&L)
7449 __ bne(R0, AT, L);
7450 else
7451 __ bne(R0, AT, (int)0);
7452 break;
7453 case 0x06: //below_equal
7454 __ move(AT, val);
7455 __ sltu(AT, AT, op1);
7456 if(&L)
7457 __ beq(AT, R0, L);
7458 else
7459 __ beq(AT, R0, (int)0);
7460 break;
7461 default:
7462 Unimplemented();
7463 }
7464 __ nop();
7465 %}
7467 ins_pc_relative(1);
7468 ins_pipe( pipe_alu_branch );
7469 %}
7472 instruct branchConL_regL_regL(cmpOp cmp, mRegL src1, mRegL src2, label labl) %{
7473 match( If cmp (CmpL src1 src2) );
7474 effect(USE labl);
7475 format %{ "BR$cmp $src1, $src2, $labl #@branchConL_regL_regL" %}
7476 ins_cost(250);
7478 ins_encode %{
7479 Register opr1_reg = as_Register($src1$$reg);
7480 Register opr2_reg = as_Register($src2$$reg);
7482 Label &target = *($labl$$label);
7483 int flag = $cmp$$cmpcode;
7485 switch(flag)
7486 {
7487 case 0x01: //equal
7488 if (&target)
7489 __ beq(opr1_reg, opr2_reg, target);
7490 else
7491 __ beq(opr1_reg, opr2_reg, (int)0);
7492 __ delayed()->nop();
7493 break;
7495 case 0x02: //not_equal
7496 if(&target)
7497 __ bne(opr1_reg, opr2_reg, target);
7498 else
7499 __ bne(opr1_reg, opr2_reg, (int)0);
7500 __ delayed()->nop();
7501 break;
7503 case 0x03: //greater
7504 __ slt(AT, opr2_reg, opr1_reg);
7505 if(&target)
7506 __ bne(AT, R0, target);
7507 else
7508 __ bne(AT, R0, (int)0);
7509 __ delayed()->nop();
7510 break;
7512 case 0x04: //greater_equal
7513 __ slt(AT, opr1_reg, opr2_reg);
7514 if(&target)
7515 __ beq(AT, R0, target);
7516 else
7517 __ beq(AT, R0, (int)0);
7518 __ delayed()->nop();
7520 break;
7522 case 0x05: //less
7523 __ slt(AT, opr1_reg, opr2_reg);
7524 if(&target)
7525 __ bne(AT, R0, target);
7526 else
7527 __ bne(AT, R0, (int)0);
7528 __ delayed()->nop();
7530 break;
7532 case 0x06: //less_equal
7533 __ slt(AT, opr2_reg, opr1_reg);
7535 if(&target)
7536 __ beq(AT, R0, target);
7537 else
7538 __ beq(AT, R0, (int)0);
7539 __ delayed()->nop();
7541 break;
7543 default:
7544 Unimplemented();
7545 }
7546 %}
7549 ins_pc_relative(1);
7550 ins_pipe( pipe_alu_branch );
7551 %}
7553 instruct branchConL_reg_immL16_sub(cmpOp cmp, mRegL src1, immL16_sub src2, label labl) %{
7554 match( If cmp (CmpL src1 src2) );
7555 effect(USE labl);
7556 ins_cost(180);
7557 format %{ "BR$cmp $src1, $src2, $labl #@branchConL_reg_immL16_sub" %}
7559 ins_encode %{
7560 Register op1 = $src1$$Register;
7561 int val = $src2$$constant;
7562 Label &L = *($labl$$label);
7563 int flag = $cmp$$cmpcode;
7565 __ daddiu(AT, op1, -1 * val);
7566 switch(flag)
7567 {
7568 case 0x01: //equal
7569 if (&L)
7570 __ beq(R0, AT, L);
7571 else
7572 __ beq(R0, AT, (int)0);
7573 break;
7574 case 0x02: //not_equal
7575 if (&L)
7576 __ bne(R0, AT, L);
7577 else
7578 __ bne(R0, AT, (int)0);
7579 break;
7580 case 0x03: //greater
7581 if(&L)
7582 __ bgtz(AT, L);
7583 else
7584 __ bgtz(AT, (int)0);
7585 break;
7586 case 0x04: //greater_equal
7587 if(&L)
7588 __ bgez(AT, L);
7589 else
7590 __ bgez(AT, (int)0);
7591 break;
7592 case 0x05: //less
7593 if(&L)
7594 __ bltz(AT, L);
7595 else
7596 __ bltz(AT, (int)0);
7597 break;
7598 case 0x06: //less_equal
7599 if(&L)
7600 __ blez(AT, L);
7601 else
7602 __ blez(AT, (int)0);
7603 break;
7604 default:
7605 Unimplemented();
7606 }
7607 __ nop();
7608 %}
7610 ins_pc_relative(1);
7611 ins_pipe( pipe_alu_branch );
7612 %}
7615 instruct branchConI_reg_imm16_sub(cmpOp cmp, mRegI src1, immI16_sub src2, label labl) %{
7616 match( If cmp (CmpI src1 src2) );
7617 effect(USE labl);
7618 ins_cost(180);
7619 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm16_sub" %}
7621 ins_encode %{
7622 Register op1 = $src1$$Register;
7623 int val = $src2$$constant;
7624 Label &L = *($labl$$label);
7625 int flag = $cmp$$cmpcode;
7627 __ addiu32(AT, op1, -1 * val);
7628 switch(flag)
7629 {
7630 case 0x01: //equal
7631 if (&L)
7632 __ beq(R0, AT, L);
7633 else
7634 __ beq(R0, AT, (int)0);
7635 break;
7636 case 0x02: //not_equal
7637 if (&L)
7638 __ bne(R0, AT, L);
7639 else
7640 __ bne(R0, AT, (int)0);
7641 break;
7642 case 0x03: //greater
7643 if(&L)
7644 __ bgtz(AT, L);
7645 else
7646 __ bgtz(AT, (int)0);
7647 break;
7648 case 0x04: //greater_equal
7649 if(&L)
7650 __ bgez(AT, L);
7651 else
7652 __ bgez(AT, (int)0);
7653 break;
7654 case 0x05: //less
7655 if(&L)
7656 __ bltz(AT, L);
7657 else
7658 __ bltz(AT, (int)0);
7659 break;
7660 case 0x06: //less_equal
7661 if(&L)
7662 __ blez(AT, L);
7663 else
7664 __ blez(AT, (int)0);
7665 break;
7666 default:
7667 Unimplemented();
7668 }
7669 __ nop();
7670 %}
7672 ins_pc_relative(1);
7673 ins_pipe( pipe_alu_branch );
7674 %}
7676 instruct branchConL_regL_immL0(cmpOp cmp, mRegL src1, immL0 zero, label labl) %{
7677 match( If cmp (CmpL src1 zero) );
7678 effect(USE labl);
7679 format %{ "BR$cmp $src1, zero, $labl #@branchConL_regL_immL0" %}
7680 ins_cost(150);
7682 ins_encode %{
7683 Register opr1_reg = as_Register($src1$$reg);
7684 Label &target = *($labl$$label);
7685 int flag = $cmp$$cmpcode;
7687 switch(flag)
7688 {
7689 case 0x01: //equal
7690 if (&target)
7691 __ beq(opr1_reg, R0, target);
7692 else
7693 __ beq(opr1_reg, R0, int(0));
7694 break;
7696 case 0x02: //not_equal
7697 if(&target)
7698 __ bne(opr1_reg, R0, target);
7699 else
7700 __ bne(opr1_reg, R0, (int)0);
7701 break;
7703 case 0x03: //greater
7704 if(&target)
7705 __ bgtz(opr1_reg, target);
7706 else
7707 __ bgtz(opr1_reg, (int)0);
7708 break;
7710 case 0x04: //greater_equal
7711 if(&target)
7712 __ bgez(opr1_reg, target);
7713 else
7714 __ bgez(opr1_reg, (int)0);
7715 break;
7717 case 0x05: //less
7718 __ slt(AT, opr1_reg, R0);
7719 if(&target)
7720 __ bne(AT, R0, target);
7721 else
7722 __ bne(AT, R0, (int)0);
7723 break;
7725 case 0x06: //less_equal
7726 if (&target)
7727 __ blez(opr1_reg, target);
7728 else
7729 __ blez(opr1_reg, int(0));
7730 break;
7732 default:
7733 Unimplemented();
7734 }
7735 __ delayed()->nop();
7736 %}
7739 ins_pc_relative(1);
7740 ins_pipe( pipe_alu_branch );
7741 %}
7744 //FIXME
7745 instruct branchConF_reg_reg(cmpOp cmp, regF src1, regF src2, label labl) %{
7746 match( If cmp (CmpF src1 src2) );
7747 effect(USE labl);
7748 format %{ "BR$cmp $src1, $src2, $labl #@branchConF_reg_reg" %}
7750 ins_encode %{
7751 FloatRegister reg_op1 = $src1$$FloatRegister;
7752 FloatRegister reg_op2 = $src2$$FloatRegister;
7753 Label &L = *($labl$$label);
7754 int flag = $cmp$$cmpcode;
7756 switch(flag)
7757 {
7758 case 0x01: //equal
7759 __ c_eq_s(reg_op1, reg_op2);
7760 if (&L)
7761 __ bc1t(L);
7762 else
7763 __ bc1t((int)0);
7764 break;
7765 case 0x02: //not_equal
7766 __ c_eq_s(reg_op1, reg_op2);
7767 if (&L)
7768 __ bc1f(L);
7769 else
7770 __ bc1f((int)0);
7771 break;
7772 case 0x03: //greater
7773 __ c_ule_s(reg_op1, reg_op2);
7774 if(&L)
7775 __ bc1f(L);
7776 else
7777 __ bc1f((int)0);
7778 break;
7779 case 0x04: //greater_equal
7780 __ c_ult_s(reg_op1, reg_op2);
7781 if(&L)
7782 __ bc1f(L);
7783 else
7784 __ bc1f((int)0);
7785 break;
7786 case 0x05: //less
7787 __ c_ult_s(reg_op1, reg_op2);
7788 if(&L)
7789 __ bc1t(L);
7790 else
7791 __ bc1t((int)0);
7792 break;
7793 case 0x06: //less_equal
7794 __ c_ule_s(reg_op1, reg_op2);
7795 if(&L)
7796 __ bc1t(L);
7797 else
7798 __ bc1t((int)0);
7799 break;
7800 default:
7801 Unimplemented();
7802 }
7803 __ nop();
7804 %}
7806 ins_pc_relative(1);
7807 ins_pipe(pipe_slow);
7808 %}
7810 instruct branchConD_reg_reg(cmpOp cmp, regD src1, regD src2, label labl) %{
7811 match( If cmp (CmpD src1 src2) );
7812 effect(USE labl);
7813 format %{ "BR$cmp $src1, $src2, $labl #@branchConD_reg_reg" %}
7815 ins_encode %{
7816 FloatRegister reg_op1 = $src1$$FloatRegister;
7817 FloatRegister reg_op2 = $src2$$FloatRegister;
7818 Label &L = *($labl$$label);
7819 int flag = $cmp$$cmpcode;
7821 switch(flag)
7822 {
7823 case 0x01: //equal
7824 __ c_eq_d(reg_op1, reg_op2);
7825 if (&L)
7826 __ bc1t(L);
7827 else
7828 __ bc1t((int)0);
7829 break;
7830 case 0x02: //not_equal
7831 //2016/4/19 aoqi: c_ueq_d cannot distinguish NaN from equal. Double.isNaN(Double) is implemented by 'f != f', so the use of c_ueq_d causes bugs.
7832 __ c_eq_d(reg_op1, reg_op2);
7833 if (&L)
7834 __ bc1f(L);
7835 else
7836 __ bc1f((int)0);
7837 break;
7838 case 0x03: //greater
7839 __ c_ule_d(reg_op1, reg_op2);
7840 if(&L)
7841 __ bc1f(L);
7842 else
7843 __ bc1f((int)0);
7844 break;
7845 case 0x04: //greater_equal
7846 __ c_ult_d(reg_op1, reg_op2);
7847 if(&L)
7848 __ bc1f(L);
7849 else
7850 __ bc1f((int)0);
7851 break;
7852 case 0x05: //less
7853 __ c_ult_d(reg_op1, reg_op2);
7854 if(&L)
7855 __ bc1t(L);
7856 else
7857 __ bc1t((int)0);
7858 break;
7859 case 0x06: //less_equal
7860 __ c_ule_d(reg_op1, reg_op2);
7861 if(&L)
7862 __ bc1t(L);
7863 else
7864 __ bc1t((int)0);
7865 break;
7866 default:
7867 Unimplemented();
7868 }
7869 __ nop();
7870 %}
7872 ins_pc_relative(1);
7873 ins_pipe(pipe_slow);
7874 %}
7877 // Call Runtime Instruction
7878 instruct CallRuntimeDirect(method meth) %{
7879 match(CallRuntime );
7880 effect(USE meth);
7882 ins_cost(300);
7883 format %{ "CALL,runtime #@CallRuntimeDirect" %}
7884 ins_encode( Java_To_Runtime( meth ) );
7885 ins_pipe( pipe_slow );
7886 ins_alignment(16);
7887 %}
7891 //------------------------MemBar Instructions-------------------------------
7892 //Memory barrier flavors
7894 instruct membar_acquire() %{
7895 match(MemBarAcquire);
7896 ins_cost(0);
7898 size(0);
7899 format %{ "MEMBAR-acquire (empty) @ membar_acquire" %}
7900 ins_encode();
7901 ins_pipe(empty);
7902 %}
7904 instruct load_fence() %{
7905 match(LoadFence);
7906 ins_cost(400);
7908 format %{ "MEMBAR @ load_fence" %}
7909 ins_encode %{
7910 __ sync();
7911 %}
7912 ins_pipe(pipe_slow);
7913 %}
7915 instruct membar_acquire_lock()
7916 %{
7917 match(MemBarAcquireLock);
7918 ins_cost(0);
7920 size(0);
7921 format %{ "MEMBAR-acquire (acquire as part of CAS in prior FastLock so empty encoding) @ membar_acquire_lock" %}
7922 ins_encode();
7923 ins_pipe(empty);
7924 %}
7926 instruct membar_release() %{
7927 match(MemBarRelease);
7928 ins_cost(0);
7930 size(0);
7931 format %{ "MEMBAR-release (empty) @ membar_release" %}
7932 ins_encode();
7933 ins_pipe(empty);
7934 %}
7936 instruct store_fence() %{
7937 match(StoreFence);
7938 ins_cost(400);
7940 format %{ "MEMBAR @ store_fence" %}
7942 ins_encode %{
7943 __ sync();
7944 %}
7946 ins_pipe(pipe_slow);
7947 %}
7949 instruct membar_release_lock()
7950 %{
7951 match(MemBarReleaseLock);
7952 ins_cost(0);
7954 size(0);
7955 format %{ "MEMBAR-release-lock (release in FastUnlock so empty) @ membar_release_lock" %}
7956 ins_encode();
7957 ins_pipe(empty);
7958 %}
7961 instruct membar_volatile() %{
7962 match(MemBarVolatile);
7963 ins_cost(400);
7965 format %{ "MEMBAR-volatile" %}
7966 ins_encode %{
7967 if( !os::is_MP() ) return; // Not needed on single CPU
7968 __ sync();
7970 %}
7971 ins_pipe(pipe_slow);
7972 %}
7974 instruct unnecessary_membar_volatile() %{
7975 match(MemBarVolatile);
7976 predicate(Matcher::post_store_load_barrier(n));
7977 ins_cost(0);
7979 size(0);
7980 format %{ "MEMBAR-volatile (unnecessary so empty encoding) @ unnecessary_membar_volatile" %}
7981 ins_encode( );
7982 ins_pipe(empty);
7983 %}
7985 instruct membar_storestore() %{
7986 match(MemBarStoreStore);
7988 ins_cost(0);
7989 size(0);
7990 format %{ "MEMBAR-storestore (empty encoding) @ membar_storestore" %}
7991 ins_encode( );
7992 ins_pipe(empty);
7993 %}
7995 //----------Move Instructions--------------------------------------------------
7996 instruct castX2P(mRegP dst, mRegL src) %{
7997 match(Set dst (CastX2P src));
7998 format %{ "castX2P $dst, $src @ castX2P" %}
7999 ins_encode %{
8000 Register src = $src$$Register;
8001 Register dst = $dst$$Register;
8003 if(src != dst)
8004 __ move(dst, src);
8005 %}
8006 ins_cost(10);
8007 ins_pipe( ialu_regI_mov );
8008 %}
8010 instruct castP2X(mRegL dst, mRegP src ) %{
8011 match(Set dst (CastP2X src));
8013 format %{ "mov $dst, $src\t #@castP2X" %}
8014 ins_encode %{
8015 Register src = $src$$Register;
8016 Register dst = $dst$$Register;
8018 if(src != dst)
8019 __ move(dst, src);
8020 %}
8021 ins_pipe( ialu_regI_mov );
8022 %}
8024 instruct MoveF2I_reg_reg(mRegI dst, regF src) %{
8025 match(Set dst (MoveF2I src));
8026 effect(DEF dst, USE src);
8027 ins_cost(85);
8028 format %{ "MoveF2I $dst, $src @ MoveF2I_reg_reg" %}
8029 ins_encode %{
8030 Register dst = as_Register($dst$$reg);
8031 FloatRegister src = as_FloatRegister($src$$reg);
8033 __ mfc1(dst, src);
8034 %}
8035 ins_pipe( pipe_slow );
8036 %}
8038 instruct MoveI2F_reg_reg(regF dst, mRegI src) %{
8039 match(Set dst (MoveI2F src));
8040 effect(DEF dst, USE src);
8041 ins_cost(85);
8042 format %{ "MoveI2F $dst, $src @ MoveI2F_reg_reg" %}
8043 ins_encode %{
8044 Register src = as_Register($src$$reg);
8045 FloatRegister dst = as_FloatRegister($dst$$reg);
8047 __ mtc1(src, dst);
8048 %}
8049 ins_pipe( pipe_slow );
8050 %}
8052 instruct MoveD2L_reg_reg(mRegL dst, regD src) %{
8053 match(Set dst (MoveD2L src));
8054 effect(DEF dst, USE src);
8055 ins_cost(85);
8056 format %{ "MoveD2L $dst, $src @ MoveD2L_reg_reg" %}
8057 ins_encode %{
8058 Register dst = as_Register($dst$$reg);
8059 FloatRegister src = as_FloatRegister($src$$reg);
8061 __ dmfc1(dst, src);
8062 %}
8063 ins_pipe( pipe_slow );
8064 %}
8066 instruct MoveL2D_reg_reg(regD dst, mRegL src) %{
8067 match(Set dst (MoveL2D src));
8068 effect(DEF dst, USE src);
8069 ins_cost(85);
8070 format %{ "MoveL2D $dst, $src @ MoveL2D_reg_reg" %}
8071 ins_encode %{
8072 FloatRegister dst = as_FloatRegister($dst$$reg);
8073 Register src = as_Register($src$$reg);
8075 __ dmtc1(src, dst);
8076 %}
8077 ins_pipe( pipe_slow );
8078 %}
8080 //----------Conditional Move---------------------------------------------------
8081 // Conditional move
8082 instruct cmovI_cmpI_reg_reg(mRegI dst, mRegI src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8083 match(Set dst (CMoveI (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8084 ins_cost(80);
8085 format %{
8086 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpI_reg_reg\n"
8087 "\tCMOV $dst,$src \t @cmovI_cmpI_reg_reg"
8088 %}
8090 ins_encode %{
8091 Register op1 = $tmp1$$Register;
8092 Register op2 = $tmp2$$Register;
8093 Register dst = $dst$$Register;
8094 Register src = $src$$Register;
8095 int flag = $cop$$cmpcode;
8097 switch(flag)
8098 {
8099 case 0x01: //equal
8100 __ subu32(AT, op1, op2);
8101 __ movz(dst, src, AT);
8102 break;
8104 case 0x02: //not_equal
8105 __ subu32(AT, op1, op2);
8106 __ movn(dst, src, AT);
8107 break;
8109 case 0x03: //great
8110 __ slt(AT, op2, op1);
8111 __ movn(dst, src, AT);
8112 break;
8114 case 0x04: //great_equal
8115 __ slt(AT, op1, op2);
8116 __ movz(dst, src, AT);
8117 break;
8119 case 0x05: //less
8120 __ slt(AT, op1, op2);
8121 __ movn(dst, src, AT);
8122 break;
8124 case 0x06: //less_equal
8125 __ slt(AT, op2, op1);
8126 __ movz(dst, src, AT);
8127 break;
8129 default:
8130 Unimplemented();
8131 }
8132 %}
8134 ins_pipe( pipe_slow );
8135 %}
8137 instruct cmovI_cmpP_reg_reg(mRegI dst, mRegI src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
8138 match(Set dst (CMoveI (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
8139 ins_cost(80);
8140 format %{
8141 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpP_reg_reg\n\t"
8142 "CMOV $dst,$src\t @cmovI_cmpP_reg_reg"
8143 %}
8144 ins_encode %{
8145 Register op1 = $tmp1$$Register;
8146 Register op2 = $tmp2$$Register;
8147 Register dst = $dst$$Register;
8148 Register src = $src$$Register;
8149 int flag = $cop$$cmpcode;
8151 switch(flag)
8152 {
8153 case 0x01: //equal
8154 __ subu(AT, op1, op2);
8155 __ movz(dst, src, AT);
8156 break;
8158 case 0x02: //not_equal
8159 __ subu(AT, op1, op2);
8160 __ movn(dst, src, AT);
8161 break;
8163 case 0x03: //above
8164 __ sltu(AT, op2, op1);
8165 __ movn(dst, src, AT);
8166 break;
8168 case 0x04: //above_equal
8169 __ sltu(AT, op1, op2);
8170 __ movz(dst, src, AT);
8171 break;
8173 case 0x05: //below
8174 __ sltu(AT, op1, op2);
8175 __ movn(dst, src, AT);
8176 break;
8178 case 0x06: //below_equal
8179 __ sltu(AT, op2, op1);
8180 __ movz(dst, src, AT);
8181 break;
8183 default:
8184 Unimplemented();
8185 }
8186 %}
8188 ins_pipe( pipe_slow );
8189 %}
8191 instruct cmovI_cmpN_reg_reg(mRegI dst, mRegI src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8192 match(Set dst (CMoveI (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8193 ins_cost(80);
8194 format %{
8195 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpN_reg_reg\n\t"
8196 "CMOV $dst,$src\t @cmovI_cmpN_reg_reg"
8197 %}
8198 ins_encode %{
8199 Register op1 = $tmp1$$Register;
8200 Register op2 = $tmp2$$Register;
8201 Register dst = $dst$$Register;
8202 Register src = $src$$Register;
8203 int flag = $cop$$cmpcode;
8205 switch(flag)
8206 {
8207 case 0x01: //equal
8208 __ subu32(AT, op1, op2);
8209 __ movz(dst, src, AT);
8210 break;
8212 case 0x02: //not_equal
8213 __ subu32(AT, op1, op2);
8214 __ movn(dst, src, AT);
8215 break;
8217 case 0x03: //above
8218 __ sltu(AT, op2, op1);
8219 __ movn(dst, src, AT);
8220 break;
8222 case 0x04: //above_equal
8223 __ sltu(AT, op1, op2);
8224 __ movz(dst, src, AT);
8225 break;
8227 case 0x05: //below
8228 __ sltu(AT, op1, op2);
8229 __ movn(dst, src, AT);
8230 break;
8232 case 0x06: //below_equal
8233 __ sltu(AT, op2, op1);
8234 __ movz(dst, src, AT);
8235 break;
8237 default:
8238 Unimplemented();
8239 }
8240 %}
8242 ins_pipe( pipe_slow );
8243 %}
8245 instruct cmovP_cmpN_reg_reg(mRegP dst, mRegP src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8246 match(Set dst (CMoveP (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8247 ins_cost(80);
8248 format %{
8249 "CMPU$cop $tmp1,$tmp2\t @cmovP_cmpN_reg_reg\n\t"
8250 "CMOV $dst,$src\t @cmovP_cmpN_reg_reg"
8251 %}
8252 ins_encode %{
8253 Register op1 = $tmp1$$Register;
8254 Register op2 = $tmp2$$Register;
8255 Register dst = $dst$$Register;
8256 Register src = $src$$Register;
8257 int flag = $cop$$cmpcode;
8259 switch(flag)
8260 {
8261 case 0x01: //equal
8262 __ subu32(AT, op1, op2);
8263 __ movz(dst, src, AT);
8264 break;
8266 case 0x02: //not_equal
8267 __ subu32(AT, op1, op2);
8268 __ movn(dst, src, AT);
8269 break;
8271 case 0x03: //above
8272 __ sltu(AT, op2, op1);
8273 __ movn(dst, src, AT);
8274 break;
8276 case 0x04: //above_equal
8277 __ sltu(AT, op1, op2);
8278 __ movz(dst, src, AT);
8279 break;
8281 case 0x05: //below
8282 __ sltu(AT, op1, op2);
8283 __ movn(dst, src, AT);
8284 break;
8286 case 0x06: //below_equal
8287 __ sltu(AT, op2, op1);
8288 __ movz(dst, src, AT);
8289 break;
8291 default:
8292 Unimplemented();
8293 }
8294 %}
8296 ins_pipe( pipe_slow );
8297 %}
8299 instruct cmovN_cmpP_reg_reg(mRegN dst, mRegN src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
8300 match(Set dst (CMoveN (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
8301 ins_cost(80);
8302 format %{
8303 "CMPU$cop $tmp1,$tmp2\t @cmovN_cmpP_reg_reg\n\t"
8304 "CMOV $dst,$src\t @cmovN_cmpP_reg_reg"
8305 %}
8306 ins_encode %{
8307 Register op1 = $tmp1$$Register;
8308 Register op2 = $tmp2$$Register;
8309 Register dst = $dst$$Register;
8310 Register src = $src$$Register;
8311 int flag = $cop$$cmpcode;
8313 switch(flag)
8314 {
8315 case 0x01: //equal
8316 __ subu(AT, op1, op2);
8317 __ movz(dst, src, AT);
8318 break;
8320 case 0x02: //not_equal
8321 __ subu(AT, op1, op2);
8322 __ movn(dst, src, AT);
8323 break;
8325 case 0x03: //above
8326 __ sltu(AT, op2, op1);
8327 __ movn(dst, src, AT);
8328 break;
8330 case 0x04: //above_equal
8331 __ sltu(AT, op1, op2);
8332 __ movz(dst, src, AT);
8333 break;
8335 case 0x05: //below
8336 __ sltu(AT, op1, op2);
8337 __ movn(dst, src, AT);
8338 break;
8340 case 0x06: //below_equal
8341 __ sltu(AT, op2, op1);
8342 __ movz(dst, src, AT);
8343 break;
8345 default:
8346 Unimplemented();
8347 }
8348 %}
8350 ins_pipe( pipe_slow );
8351 %}
8353 instruct cmovP_cmpD_reg_reg(mRegP dst, mRegP src, regD tmp1, regD tmp2, cmpOp cop ) %{
8354 match(Set dst (CMoveP (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8355 ins_cost(80);
8356 format %{
8357 "CMP$cop $tmp1, $tmp2\t @cmovP_cmpD_reg_reg\n"
8358 "\tCMOV $dst,$src \t @cmovP_cmpD_reg_reg"
8359 %}
8360 ins_encode %{
8361 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8362 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8363 Register dst = as_Register($dst$$reg);
8364 Register src = as_Register($src$$reg);
8366 int flag = $cop$$cmpcode;
8368 switch(flag)
8369 {
8370 case 0x01: //equal
8371 __ c_eq_d(reg_op1, reg_op2);
8372 __ movt(dst, src);
8373 break;
8374 case 0x02: //not_equal
8375 __ c_eq_d(reg_op1, reg_op2);
8376 __ movf(dst, src);
8377 break;
8378 case 0x03: //greater
8379 __ c_ole_d(reg_op1, reg_op2);
8380 __ movf(dst, src);
8381 break;
8382 case 0x04: //greater_equal
8383 __ c_olt_d(reg_op1, reg_op2);
8384 __ movf(dst, src);
8385 break;
8386 case 0x05: //less
8387 __ c_ult_d(reg_op1, reg_op2);
8388 __ movt(dst, src);
8389 break;
8390 case 0x06: //less_equal
8391 __ c_ule_d(reg_op1, reg_op2);
8392 __ movt(dst, src);
8393 break;
8394 default:
8395 Unimplemented();
8396 }
8397 %}
8399 ins_pipe( pipe_slow );
8400 %}
8403 instruct cmovN_cmpN_reg_reg(mRegN dst, mRegN src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8404 match(Set dst (CMoveN (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8405 ins_cost(80);
8406 format %{
8407 "CMPU$cop $tmp1,$tmp2\t @cmovN_cmpN_reg_reg\n\t"
8408 "CMOV $dst,$src\t @cmovN_cmpN_reg_reg"
8409 %}
8410 ins_encode %{
8411 Register op1 = $tmp1$$Register;
8412 Register op2 = $tmp2$$Register;
8413 Register dst = $dst$$Register;
8414 Register src = $src$$Register;
8415 int flag = $cop$$cmpcode;
8417 switch(flag)
8418 {
8419 case 0x01: //equal
8420 __ subu32(AT, op1, op2);
8421 __ movz(dst, src, AT);
8422 break;
8424 case 0x02: //not_equal
8425 __ subu32(AT, op1, op2);
8426 __ movn(dst, src, AT);
8427 break;
8429 case 0x03: //above
8430 __ sltu(AT, op2, op1);
8431 __ movn(dst, src, AT);
8432 break;
8434 case 0x04: //above_equal
8435 __ sltu(AT, op1, op2);
8436 __ movz(dst, src, AT);
8437 break;
8439 case 0x05: //below
8440 __ sltu(AT, op1, op2);
8441 __ movn(dst, src, AT);
8442 break;
8444 case 0x06: //below_equal
8445 __ sltu(AT, op2, op1);
8446 __ movz(dst, src, AT);
8447 break;
8449 default:
8450 Unimplemented();
8451 }
8452 %}
8454 ins_pipe( pipe_slow );
8455 %}
8458 instruct cmovI_cmpU_reg_reg(mRegI dst, mRegI src, mRegI tmp1, mRegI tmp2, cmpOpU cop ) %{
8459 match(Set dst (CMoveI (Binary cop (CmpU tmp1 tmp2)) (Binary dst src)));
8460 ins_cost(80);
8461 format %{
8462 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpU_reg_reg\n\t"
8463 "CMOV $dst,$src\t @cmovI_cmpU_reg_reg"
8464 %}
8465 ins_encode %{
8466 Register op1 = $tmp1$$Register;
8467 Register op2 = $tmp2$$Register;
8468 Register dst = $dst$$Register;
8469 Register src = $src$$Register;
8470 int flag = $cop$$cmpcode;
8472 switch(flag)
8473 {
8474 case 0x01: //equal
8475 __ subu(AT, op1, op2);
8476 __ movz(dst, src, AT);
8477 break;
8479 case 0x02: //not_equal
8480 __ subu(AT, op1, op2);
8481 __ movn(dst, src, AT);
8482 break;
8484 case 0x03: //above
8485 __ sltu(AT, op2, op1);
8486 __ movn(dst, src, AT);
8487 break;
8489 case 0x04: //above_equal
8490 __ sltu(AT, op1, op2);
8491 __ movz(dst, src, AT);
8492 break;
8494 case 0x05: //below
8495 __ sltu(AT, op1, op2);
8496 __ movn(dst, src, AT);
8497 break;
8499 case 0x06: //below_equal
8500 __ sltu(AT, op2, op1);
8501 __ movz(dst, src, AT);
8502 break;
8504 default:
8505 Unimplemented();
8506 }
8507 %}
8509 ins_pipe( pipe_slow );
8510 %}
8512 instruct cmovI_cmpL_reg_reg(mRegI dst, mRegI src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8513 match(Set dst (CMoveI (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8514 ins_cost(80);
8515 format %{
8516 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpL_reg_reg\n"
8517 "\tCMOV $dst,$src \t @cmovI_cmpL_reg_reg"
8518 %}
8519 ins_encode %{
8520 Register opr1 = as_Register($tmp1$$reg);
8521 Register opr2 = as_Register($tmp2$$reg);
8522 Register dst = $dst$$Register;
8523 Register src = $src$$Register;
8524 int flag = $cop$$cmpcode;
8526 switch(flag)
8527 {
8528 case 0x01: //equal
8529 __ subu(AT, opr1, opr2);
8530 __ movz(dst, src, AT);
8531 break;
8533 case 0x02: //not_equal
8534 __ subu(AT, opr1, opr2);
8535 __ movn(dst, src, AT);
8536 break;
8538 case 0x03: //greater
8539 __ slt(AT, opr2, opr1);
8540 __ movn(dst, src, AT);
8541 break;
8543 case 0x04: //greater_equal
8544 __ slt(AT, opr1, opr2);
8545 __ movz(dst, src, AT);
8546 break;
8548 case 0x05: //less
8549 __ slt(AT, opr1, opr2);
8550 __ movn(dst, src, AT);
8551 break;
8553 case 0x06: //less_equal
8554 __ slt(AT, opr2, opr1);
8555 __ movz(dst, src, AT);
8556 break;
8558 default:
8559 Unimplemented();
8560 }
8561 %}
8563 ins_pipe( pipe_slow );
8564 %}
8566 instruct cmovP_cmpL_reg_reg(mRegP dst, mRegP src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8567 match(Set dst (CMoveP (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8568 ins_cost(80);
8569 format %{
8570 "CMP$cop $tmp1, $tmp2\t @cmovP_cmpL_reg_reg\n"
8571 "\tCMOV $dst,$src \t @cmovP_cmpL_reg_reg"
8572 %}
8573 ins_encode %{
8574 Register opr1 = as_Register($tmp1$$reg);
8575 Register opr2 = as_Register($tmp2$$reg);
8576 Register dst = $dst$$Register;
8577 Register src = $src$$Register;
8578 int flag = $cop$$cmpcode;
8580 switch(flag)
8581 {
8582 case 0x01: //equal
8583 __ subu(AT, opr1, opr2);
8584 __ movz(dst, src, AT);
8585 break;
8587 case 0x02: //not_equal
8588 __ subu(AT, opr1, opr2);
8589 __ movn(dst, src, AT);
8590 break;
8592 case 0x03: //greater
8593 __ slt(AT, opr2, opr1);
8594 __ movn(dst, src, AT);
8595 break;
8597 case 0x04: //greater_equal
8598 __ slt(AT, opr1, opr2);
8599 __ movz(dst, src, AT);
8600 break;
8602 case 0x05: //less
8603 __ slt(AT, opr1, opr2);
8604 __ movn(dst, src, AT);
8605 break;
8607 case 0x06: //less_equal
8608 __ slt(AT, opr2, opr1);
8609 __ movz(dst, src, AT);
8610 break;
8612 default:
8613 Unimplemented();
8614 }
8615 %}
8617 ins_pipe( pipe_slow );
8618 %}
8620 instruct cmovI_cmpD_reg_reg(mRegI dst, mRegI src, regD tmp1, regD tmp2, cmpOp cop ) %{
8621 match(Set dst (CMoveI (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8622 ins_cost(80);
8623 format %{
8624 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpD_reg_reg\n"
8625 "\tCMOV $dst,$src \t @cmovI_cmpD_reg_reg"
8626 %}
8627 ins_encode %{
8628 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8629 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8630 Register dst = as_Register($dst$$reg);
8631 Register src = as_Register($src$$reg);
8633 int flag = $cop$$cmpcode;
8635 switch(flag)
8636 {
8637 case 0x01: //equal
8638 __ c_eq_d(reg_op1, reg_op2);
8639 __ movt(dst, src);
8640 break;
8641 case 0x02: //not_equal
8642 //2016/4/19 aoqi: See instruct branchConD_reg_reg. The change in branchConD_reg_reg fixed a bug. It seems similar here, so I made thesame change.
8643 __ c_eq_d(reg_op1, reg_op2);
8644 __ movf(dst, src);
8645 break;
8646 case 0x03: //greater
8647 __ c_ole_d(reg_op1, reg_op2);
8648 __ movf(dst, src);
8649 break;
8650 case 0x04: //greater_equal
8651 __ c_olt_d(reg_op1, reg_op2);
8652 __ movf(dst, src);
8653 break;
8654 case 0x05: //less
8655 __ c_ult_d(reg_op1, reg_op2);
8656 __ movt(dst, src);
8657 break;
8658 case 0x06: //less_equal
8659 __ c_ule_d(reg_op1, reg_op2);
8660 __ movt(dst, src);
8661 break;
8662 default:
8663 Unimplemented();
8664 }
8665 %}
8667 ins_pipe( pipe_slow );
8668 %}
8671 instruct cmovP_cmpP_reg_reg(mRegP dst, mRegP src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
8672 match(Set dst (CMoveP (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
8673 ins_cost(80);
8674 format %{
8675 "CMPU$cop $tmp1,$tmp2\t @cmovP_cmpP_reg_reg\n\t"
8676 "CMOV $dst,$src\t @cmovP_cmpP_reg_reg"
8677 %}
8678 ins_encode %{
8679 Register op1 = $tmp1$$Register;
8680 Register op2 = $tmp2$$Register;
8681 Register dst = $dst$$Register;
8682 Register src = $src$$Register;
8683 int flag = $cop$$cmpcode;
8685 switch(flag)
8686 {
8687 case 0x01: //equal
8688 __ subu(AT, op1, op2);
8689 __ movz(dst, src, AT);
8690 break;
8692 case 0x02: //not_equal
8693 __ subu(AT, op1, op2);
8694 __ movn(dst, src, AT);
8695 break;
8697 case 0x03: //above
8698 __ sltu(AT, op2, op1);
8699 __ movn(dst, src, AT);
8700 break;
8702 case 0x04: //above_equal
8703 __ sltu(AT, op1, op2);
8704 __ movz(dst, src, AT);
8705 break;
8707 case 0x05: //below
8708 __ sltu(AT, op1, op2);
8709 __ movn(dst, src, AT);
8710 break;
8712 case 0x06: //below_equal
8713 __ sltu(AT, op2, op1);
8714 __ movz(dst, src, AT);
8715 break;
8717 default:
8718 Unimplemented();
8719 }
8720 %}
8722 ins_pipe( pipe_slow );
8723 %}
8725 instruct cmovP_cmpI_reg_reg(mRegP dst, mRegP src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8726 match(Set dst (CMoveP (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8727 ins_cost(80);
8728 format %{
8729 "CMP$cop $tmp1,$tmp2\t @cmovP_cmpI_reg_reg\n\t"
8730 "CMOV $dst,$src\t @cmovP_cmpI_reg_reg"
8731 %}
8732 ins_encode %{
8733 Register op1 = $tmp1$$Register;
8734 Register op2 = $tmp2$$Register;
8735 Register dst = $dst$$Register;
8736 Register src = $src$$Register;
8737 int flag = $cop$$cmpcode;
8739 switch(flag)
8740 {
8741 case 0x01: //equal
8742 __ subu32(AT, op1, op2);
8743 __ movz(dst, src, AT);
8744 break;
8746 case 0x02: //not_equal
8747 __ subu32(AT, op1, op2);
8748 __ movn(dst, src, AT);
8749 break;
8751 case 0x03: //above
8752 __ slt(AT, op2, op1);
8753 __ movn(dst, src, AT);
8754 break;
8756 case 0x04: //above_equal
8757 __ slt(AT, op1, op2);
8758 __ movz(dst, src, AT);
8759 break;
8761 case 0x05: //below
8762 __ slt(AT, op1, op2);
8763 __ movn(dst, src, AT);
8764 break;
8766 case 0x06: //below_equal
8767 __ slt(AT, op2, op1);
8768 __ movz(dst, src, AT);
8769 break;
8771 default:
8772 Unimplemented();
8773 }
8774 %}
8776 ins_pipe( pipe_slow );
8777 %}
8779 instruct cmovN_cmpI_reg_reg(mRegN dst, mRegN src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8780 match(Set dst (CMoveN (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8781 ins_cost(80);
8782 format %{
8783 "CMP$cop $tmp1,$tmp2\t @cmovN_cmpI_reg_reg\n\t"
8784 "CMOV $dst,$src\t @cmovN_cmpI_reg_reg"
8785 %}
8786 ins_encode %{
8787 Register op1 = $tmp1$$Register;
8788 Register op2 = $tmp2$$Register;
8789 Register dst = $dst$$Register;
8790 Register src = $src$$Register;
8791 int flag = $cop$$cmpcode;
8793 switch(flag)
8794 {
8795 case 0x01: //equal
8796 __ subu32(AT, op1, op2);
8797 __ movz(dst, src, AT);
8798 break;
8800 case 0x02: //not_equal
8801 __ subu32(AT, op1, op2);
8802 __ movn(dst, src, AT);
8803 break;
8805 case 0x03: //above
8806 __ slt(AT, op2, op1);
8807 __ movn(dst, src, AT);
8808 break;
8810 case 0x04: //above_equal
8811 __ slt(AT, op1, op2);
8812 __ movz(dst, src, AT);
8813 break;
8815 case 0x05: //below
8816 __ slt(AT, op1, op2);
8817 __ movn(dst, src, AT);
8818 break;
8820 case 0x06: //below_equal
8821 __ slt(AT, op2, op1);
8822 __ movz(dst, src, AT);
8823 break;
8825 default:
8826 Unimplemented();
8827 }
8828 %}
8830 ins_pipe( pipe_slow );
8831 %}
8834 instruct cmovL_cmpI_reg_reg(mRegL dst, mRegL src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8835 match(Set dst (CMoveL (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8836 ins_cost(80);
8837 format %{
8838 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpI_reg_reg\n"
8839 "\tCMOV $dst,$src \t @cmovL_cmpI_reg_reg"
8840 %}
8842 ins_encode %{
8843 Register op1 = $tmp1$$Register;
8844 Register op2 = $tmp2$$Register;
8845 Register dst = as_Register($dst$$reg);
8846 Register src = as_Register($src$$reg);
8847 int flag = $cop$$cmpcode;
8849 switch(flag)
8850 {
8851 case 0x01: //equal
8852 __ subu32(AT, op1, op2);
8853 __ movz(dst, src, AT);
8854 break;
8856 case 0x02: //not_equal
8857 __ subu32(AT, op1, op2);
8858 __ movn(dst, src, AT);
8859 break;
8861 case 0x03: //great
8862 __ slt(AT, op2, op1);
8863 __ movn(dst, src, AT);
8864 break;
8866 case 0x04: //great_equal
8867 __ slt(AT, op1, op2);
8868 __ movz(dst, src, AT);
8869 break;
8871 case 0x05: //less
8872 __ slt(AT, op1, op2);
8873 __ movn(dst, src, AT);
8874 break;
8876 case 0x06: //less_equal
8877 __ slt(AT, op2, op1);
8878 __ movz(dst, src, AT);
8879 break;
8881 default:
8882 Unimplemented();
8883 }
8884 %}
8886 ins_pipe( pipe_slow );
8887 %}
8889 instruct cmovL_cmpL_reg_reg(mRegL dst, mRegL src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8890 match(Set dst (CMoveL (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8891 ins_cost(80);
8892 format %{
8893 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpL_reg_reg\n"
8894 "\tCMOV $dst,$src \t @cmovL_cmpL_reg_reg"
8895 %}
8896 ins_encode %{
8897 Register opr1 = as_Register($tmp1$$reg);
8898 Register opr2 = as_Register($tmp2$$reg);
8899 Register dst = as_Register($dst$$reg);
8900 Register src = as_Register($src$$reg);
8901 int flag = $cop$$cmpcode;
8903 switch(flag)
8904 {
8905 case 0x01: //equal
8906 __ subu(AT, opr1, opr2);
8907 __ movz(dst, src, AT);
8908 break;
8910 case 0x02: //not_equal
8911 __ subu(AT, opr1, opr2);
8912 __ movn(dst, src, AT);
8913 break;
8915 case 0x03: //greater
8916 __ slt(AT, opr2, opr1);
8917 __ movn(dst, src, AT);
8918 break;
8920 case 0x04: //greater_equal
8921 __ slt(AT, opr1, opr2);
8922 __ movz(dst, src, AT);
8923 break;
8925 case 0x05: //less
8926 __ slt(AT, opr1, opr2);
8927 __ movn(dst, src, AT);
8928 break;
8930 case 0x06: //less_equal
8931 __ slt(AT, opr2, opr1);
8932 __ movz(dst, src, AT);
8933 break;
8935 default:
8936 Unimplemented();
8937 }
8938 %}
8940 ins_pipe( pipe_slow );
8941 %}
8943 instruct cmovL_cmpN_reg_reg(mRegL dst, mRegL src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8944 match(Set dst (CMoveL (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8945 ins_cost(80);
8946 format %{
8947 "CMPU$cop $tmp1,$tmp2\t @cmovL_cmpN_reg_reg\n\t"
8948 "CMOV $dst,$src\t @cmovL_cmpN_reg_reg"
8949 %}
8950 ins_encode %{
8951 Register op1 = $tmp1$$Register;
8952 Register op2 = $tmp2$$Register;
8953 Register dst = $dst$$Register;
8954 Register src = $src$$Register;
8955 int flag = $cop$$cmpcode;
8957 switch(flag)
8958 {
8959 case 0x01: //equal
8960 __ subu32(AT, op1, op2);
8961 __ movz(dst, src, AT);
8962 break;
8964 case 0x02: //not_equal
8965 __ subu32(AT, op1, op2);
8966 __ movn(dst, src, AT);
8967 break;
8969 case 0x03: //above
8970 __ sltu(AT, op2, op1);
8971 __ movn(dst, src, AT);
8972 break;
8974 case 0x04: //above_equal
8975 __ sltu(AT, op1, op2);
8976 __ movz(dst, src, AT);
8977 break;
8979 case 0x05: //below
8980 __ sltu(AT, op1, op2);
8981 __ movn(dst, src, AT);
8982 break;
8984 case 0x06: //below_equal
8985 __ sltu(AT, op2, op1);
8986 __ movz(dst, src, AT);
8987 break;
8989 default:
8990 Unimplemented();
8991 }
8992 %}
8994 ins_pipe( pipe_slow );
8995 %}
8998 instruct cmovL_cmpD_reg_reg(mRegL dst, mRegL src, regD tmp1, regD tmp2, cmpOp cop ) %{
8999 match(Set dst (CMoveL (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
9000 ins_cost(80);
9001 format %{
9002 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpD_reg_reg\n"
9003 "\tCMOV $dst,$src \t @cmovL_cmpD_reg_reg"
9004 %}
9005 ins_encode %{
9006 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
9007 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
9008 Register dst = as_Register($dst$$reg);
9009 Register src = as_Register($src$$reg);
9011 int flag = $cop$$cmpcode;
9013 switch(flag)
9014 {
9015 case 0x01: //equal
9016 __ c_eq_d(reg_op1, reg_op2);
9017 __ movt(dst, src);
9018 break;
9019 case 0x02: //not_equal
9020 __ c_eq_d(reg_op1, reg_op2);
9021 __ movf(dst, src);
9022 break;
9023 case 0x03: //greater
9024 __ c_ole_d(reg_op1, reg_op2);
9025 __ movf(dst, src);
9026 break;
9027 case 0x04: //greater_equal
9028 __ c_olt_d(reg_op1, reg_op2);
9029 __ movf(dst, src);
9030 break;
9031 case 0x05: //less
9032 __ c_ult_d(reg_op1, reg_op2);
9033 __ movt(dst, src);
9034 break;
9035 case 0x06: //less_equal
9036 __ c_ule_d(reg_op1, reg_op2);
9037 __ movt(dst, src);
9038 break;
9039 default:
9040 Unimplemented();
9041 }
9042 %}
9044 ins_pipe( pipe_slow );
9045 %}
9047 instruct cmovD_cmpD_reg_reg(regD dst, regD src, regD tmp1, regD tmp2, cmpOp cop ) %{
9048 match(Set dst (CMoveD (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
9049 ins_cost(200);
9050 format %{
9051 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpD_reg_reg\n"
9052 "\tCMOV $dst,$src \t @cmovD_cmpD_reg_reg"
9053 %}
9054 ins_encode %{
9055 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
9056 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
9057 FloatRegister dst = as_FloatRegister($dst$$reg);
9058 FloatRegister src = as_FloatRegister($src$$reg);
9060 int flag = $cop$$cmpcode;
9062 Label L;
9064 switch(flag)
9065 {
9066 case 0x01: //equal
9067 __ c_eq_d(reg_op1, reg_op2);
9068 __ bc1f(L);
9069 __ nop();
9070 __ mov_d(dst, src);
9071 __ bind(L);
9072 break;
9073 case 0x02: //not_equal
9074 //2016/4/19 aoqi: See instruct branchConD_reg_reg. The change in branchConD_reg_reg fixed a bug. It seems similar here, so I made thesame change.
9075 __ c_eq_d(reg_op1, reg_op2);
9076 __ bc1t(L);
9077 __ nop();
9078 __ mov_d(dst, src);
9079 __ bind(L);
9080 break;
9081 case 0x03: //greater
9082 __ c_ole_d(reg_op1, reg_op2);
9083 __ bc1t(L);
9084 __ nop();
9085 __ mov_d(dst, src);
9086 __ bind(L);
9087 break;
9088 case 0x04: //greater_equal
9089 __ c_olt_d(reg_op1, reg_op2);
9090 __ bc1t(L);
9091 __ nop();
9092 __ mov_d(dst, src);
9093 __ bind(L);
9094 break;
9095 case 0x05: //less
9096 __ c_ult_d(reg_op1, reg_op2);
9097 __ bc1f(L);
9098 __ nop();
9099 __ mov_d(dst, src);
9100 __ bind(L);
9101 break;
9102 case 0x06: //less_equal
9103 __ c_ule_d(reg_op1, reg_op2);
9104 __ bc1f(L);
9105 __ nop();
9106 __ mov_d(dst, src);
9107 __ bind(L);
9108 break;
9109 default:
9110 Unimplemented();
9111 }
9112 %}
9114 ins_pipe( pipe_slow );
9115 %}
9117 instruct cmovF_cmpI_reg_reg(regF dst, regF src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
9118 match(Set dst (CMoveF (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
9119 ins_cost(200);
9120 format %{
9121 "CMP$cop $tmp1, $tmp2\t @cmovF_cmpI_reg_reg\n"
9122 "\tCMOV $dst, $src \t @cmovF_cmpI_reg_reg"
9123 %}
9125 ins_encode %{
9126 Register op1 = $tmp1$$Register;
9127 Register op2 = $tmp2$$Register;
9128 FloatRegister dst = as_FloatRegister($dst$$reg);
9129 FloatRegister src = as_FloatRegister($src$$reg);
9130 int flag = $cop$$cmpcode;
9131 Label L;
9133 switch(flag)
9134 {
9135 case 0x01: //equal
9136 __ bne(op1, op2, L);
9137 __ nop();
9138 __ mov_s(dst, src);
9139 __ bind(L);
9140 break;
9141 case 0x02: //not_equal
9142 __ beq(op1, op2, L);
9143 __ nop();
9144 __ mov_s(dst, src);
9145 __ bind(L);
9146 break;
9147 case 0x03: //great
9148 __ slt(AT, op2, op1);
9149 __ beq(AT, R0, L);
9150 __ nop();
9151 __ mov_s(dst, src);
9152 __ bind(L);
9153 break;
9154 case 0x04: //great_equal
9155 __ slt(AT, op1, op2);
9156 __ bne(AT, R0, L);
9157 __ nop();
9158 __ mov_s(dst, src);
9159 __ bind(L);
9160 break;
9161 case 0x05: //less
9162 __ slt(AT, op1, op2);
9163 __ beq(AT, R0, L);
9164 __ nop();
9165 __ mov_s(dst, src);
9166 __ bind(L);
9167 break;
9168 case 0x06: //less_equal
9169 __ slt(AT, op2, op1);
9170 __ bne(AT, R0, L);
9171 __ nop();
9172 __ mov_s(dst, src);
9173 __ bind(L);
9174 break;
9175 default:
9176 Unimplemented();
9177 }
9178 %}
9180 ins_pipe( pipe_slow );
9181 %}
9183 instruct cmovD_cmpI_reg_reg(regD dst, regD src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
9184 match(Set dst (CMoveD (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
9185 ins_cost(200);
9186 format %{
9187 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpI_reg_reg\n"
9188 "\tCMOV $dst, $src \t @cmovD_cmpI_reg_reg"
9189 %}
9191 ins_encode %{
9192 Register op1 = $tmp1$$Register;
9193 Register op2 = $tmp2$$Register;
9194 FloatRegister dst = as_FloatRegister($dst$$reg);
9195 FloatRegister src = as_FloatRegister($src$$reg);
9196 int flag = $cop$$cmpcode;
9197 Label L;
9199 switch(flag)
9200 {
9201 case 0x01: //equal
9202 __ bne(op1, op2, L);
9203 __ nop();
9204 __ mov_d(dst, src);
9205 __ bind(L);
9206 break;
9207 case 0x02: //not_equal
9208 __ beq(op1, op2, L);
9209 __ nop();
9210 __ mov_d(dst, src);
9211 __ bind(L);
9212 break;
9213 case 0x03: //great
9214 __ slt(AT, op2, op1);
9215 __ beq(AT, R0, L);
9216 __ nop();
9217 __ mov_d(dst, src);
9218 __ bind(L);
9219 break;
9220 case 0x04: //great_equal
9221 __ slt(AT, op1, op2);
9222 __ bne(AT, R0, L);
9223 __ nop();
9224 __ mov_d(dst, src);
9225 __ bind(L);
9226 break;
9227 case 0x05: //less
9228 __ slt(AT, op1, op2);
9229 __ beq(AT, R0, L);
9230 __ nop();
9231 __ mov_d(dst, src);
9232 __ bind(L);
9233 break;
9234 case 0x06: //less_equal
9235 __ slt(AT, op2, op1);
9236 __ bne(AT, R0, L);
9237 __ nop();
9238 __ mov_d(dst, src);
9239 __ bind(L);
9240 break;
9241 default:
9242 Unimplemented();
9243 }
9244 %}
9246 ins_pipe( pipe_slow );
9247 %}
9249 instruct cmovD_cmpP_reg_reg(regD dst, regD src, mRegP tmp1, mRegP tmp2, cmpOp cop ) %{
9250 match(Set dst (CMoveD (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
9251 ins_cost(200);
9252 format %{
9253 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpP_reg_reg\n"
9254 "\tCMOV $dst, $src \t @cmovD_cmpP_reg_reg"
9255 %}
9257 ins_encode %{
9258 Register op1 = $tmp1$$Register;
9259 Register op2 = $tmp2$$Register;
9260 FloatRegister dst = as_FloatRegister($dst$$reg);
9261 FloatRegister src = as_FloatRegister($src$$reg);
9262 int flag = $cop$$cmpcode;
9263 Label L;
9265 switch(flag)
9266 {
9267 case 0x01: //equal
9268 __ bne(op1, op2, L);
9269 __ nop();
9270 __ mov_d(dst, src);
9271 __ bind(L);
9272 break;
9273 case 0x02: //not_equal
9274 __ beq(op1, op2, L);
9275 __ nop();
9276 __ mov_d(dst, src);
9277 __ bind(L);
9278 break;
9279 case 0x03: //great
9280 __ slt(AT, op2, op1);
9281 __ beq(AT, R0, L);
9282 __ nop();
9283 __ mov_d(dst, src);
9284 __ bind(L);
9285 break;
9286 case 0x04: //great_equal
9287 __ slt(AT, op1, op2);
9288 __ bne(AT, R0, L);
9289 __ nop();
9290 __ mov_d(dst, src);
9291 __ bind(L);
9292 break;
9293 case 0x05: //less
9294 __ slt(AT, op1, op2);
9295 __ beq(AT, R0, L);
9296 __ nop();
9297 __ mov_d(dst, src);
9298 __ bind(L);
9299 break;
9300 case 0x06: //less_equal
9301 __ slt(AT, op2, op1);
9302 __ bne(AT, R0, L);
9303 __ nop();
9304 __ mov_d(dst, src);
9305 __ bind(L);
9306 break;
9307 default:
9308 Unimplemented();
9309 }
9310 %}
9312 ins_pipe( pipe_slow );
9313 %}
9315 //FIXME
9316 instruct cmovI_cmpF_reg_reg(mRegI dst, mRegI src, regF tmp1, regF tmp2, cmpOp cop ) %{
9317 match(Set dst (CMoveI (Binary cop (CmpF tmp1 tmp2)) (Binary dst src)));
9318 ins_cost(80);
9319 format %{
9320 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpF_reg_reg\n"
9321 "\tCMOV $dst,$src \t @cmovI_cmpF_reg_reg"
9322 %}
9324 ins_encode %{
9325 FloatRegister reg_op1 = $tmp1$$FloatRegister;
9326 FloatRegister reg_op2 = $tmp2$$FloatRegister;
9327 Register dst = $dst$$Register;
9328 Register src = $src$$Register;
9329 int flag = $cop$$cmpcode;
9331 switch(flag)
9332 {
9333 case 0x01: //equal
9334 __ c_eq_s(reg_op1, reg_op2);
9335 __ movt(dst, src);
9336 break;
9337 case 0x02: //not_equal
9338 __ c_eq_s(reg_op1, reg_op2);
9339 __ movf(dst, src);
9340 break;
9341 case 0x03: //greater
9342 __ c_ole_s(reg_op1, reg_op2);
9343 __ movf(dst, src);
9344 break;
9345 case 0x04: //greater_equal
9346 __ c_olt_s(reg_op1, reg_op2);
9347 __ movf(dst, src);
9348 break;
9349 case 0x05: //less
9350 __ c_ult_s(reg_op1, reg_op2);
9351 __ movt(dst, src);
9352 break;
9353 case 0x06: //less_equal
9354 __ c_ule_s(reg_op1, reg_op2);
9355 __ movt(dst, src);
9356 break;
9357 default:
9358 Unimplemented();
9359 }
9360 %}
9361 ins_pipe( pipe_slow );
9362 %}
9364 instruct cmovF_cmpF_reg_reg(regF dst, regF src, regF tmp1, regF tmp2, cmpOp cop ) %{
9365 match(Set dst (CMoveF (Binary cop (CmpF tmp1 tmp2)) (Binary dst src)));
9366 ins_cost(200);
9367 format %{
9368 "CMP$cop $tmp1, $tmp2\t @cmovF_cmpF_reg_reg\n"
9369 "\tCMOV $dst,$src \t @cmovF_cmpF_reg_reg"
9370 %}
9372 ins_encode %{
9373 FloatRegister reg_op1 = $tmp1$$FloatRegister;
9374 FloatRegister reg_op2 = $tmp2$$FloatRegister;
9375 FloatRegister dst = $dst$$FloatRegister;
9376 FloatRegister src = $src$$FloatRegister;
9377 Label L;
9378 int flag = $cop$$cmpcode;
9380 switch(flag)
9381 {
9382 case 0x01: //equal
9383 __ c_eq_s(reg_op1, reg_op2);
9384 __ bc1f(L);
9385 __ nop();
9386 __ mov_s(dst, src);
9387 __ bind(L);
9388 break;
9389 case 0x02: //not_equal
9390 __ c_eq_s(reg_op1, reg_op2);
9391 __ bc1t(L);
9392 __ nop();
9393 __ mov_s(dst, src);
9394 __ bind(L);
9395 break;
9396 case 0x03: //greater
9397 __ c_ole_s(reg_op1, reg_op2);
9398 __ bc1t(L);
9399 __ nop();
9400 __ mov_s(dst, src);
9401 __ bind(L);
9402 break;
9403 case 0x04: //greater_equal
9404 __ c_olt_s(reg_op1, reg_op2);
9405 __ bc1t(L);
9406 __ nop();
9407 __ mov_s(dst, src);
9408 __ bind(L);
9409 break;
9410 case 0x05: //less
9411 __ c_ult_s(reg_op1, reg_op2);
9412 __ bc1f(L);
9413 __ nop();
9414 __ mov_s(dst, src);
9415 __ bind(L);
9416 break;
9417 case 0x06: //less_equal
9418 __ c_ule_s(reg_op1, reg_op2);
9419 __ bc1f(L);
9420 __ nop();
9421 __ mov_s(dst, src);
9422 __ bind(L);
9423 break;
9424 default:
9425 Unimplemented();
9426 }
9427 %}
9428 ins_pipe( pipe_slow );
9429 %}
9431 // Manifest a CmpL result in an integer register. Very painful.
9432 // This is the test to avoid.
9433 instruct cmpL3_reg_reg(mRegI dst, mRegL src1, mRegL src2) %{
9434 match(Set dst (CmpL3 src1 src2));
9435 ins_cost(1000);
9436 format %{ "cmpL3 $dst, $src1, $src2 @ cmpL3_reg_reg" %}
9437 ins_encode %{
9438 Register opr1 = as_Register($src1$$reg);
9439 Register opr2 = as_Register($src2$$reg);
9440 Register dst = as_Register($dst$$reg);
9442 Label Done;
9444 __ subu(AT, opr1, opr2);
9445 __ bltz(AT, Done);
9446 __ delayed()->daddiu(dst, R0, -1);
9448 __ move(dst, 1);
9449 __ movz(dst, R0, AT);
9451 __ bind(Done);
9452 %}
9453 ins_pipe( pipe_slow );
9454 %}
9456 //
9457 // less_rsult = -1
9458 // greater_result = 1
9459 // equal_result = 0
9460 // nan_result = -1
9461 //
9462 instruct cmpF3_reg_reg(mRegI dst, regF src1, regF src2) %{
9463 match(Set dst (CmpF3 src1 src2));
9464 ins_cost(1000);
9465 format %{ "cmpF3 $dst, $src1, $src2 @ cmpF3_reg_reg" %}
9466 ins_encode %{
9467 FloatRegister src1 = as_FloatRegister($src1$$reg);
9468 FloatRegister src2 = as_FloatRegister($src2$$reg);
9469 Register dst = as_Register($dst$$reg);
9471 Label Done;
9473 __ c_ult_s(src1, src2);
9474 __ bc1t(Done);
9475 __ delayed()->daddiu(dst, R0, -1);
9477 __ c_eq_s(src1, src2);
9478 __ move(dst, 1);
9479 __ movt(dst, R0);
9481 __ bind(Done);
9482 %}
9483 ins_pipe( pipe_slow );
9484 %}
9486 instruct cmpD3_reg_reg(mRegI dst, regD src1, regD src2) %{
9487 match(Set dst (CmpD3 src1 src2));
9488 ins_cost(1000);
9489 format %{ "cmpD3 $dst, $src1, $src2 @ cmpD3_reg_reg" %}
9490 ins_encode %{
9491 FloatRegister src1 = as_FloatRegister($src1$$reg);
9492 FloatRegister src2 = as_FloatRegister($src2$$reg);
9493 Register dst = as_Register($dst$$reg);
9495 Label Done;
9497 __ c_ult_d(src1, src2);
9498 __ bc1t(Done);
9499 __ delayed()->daddiu(dst, R0, -1);
9501 __ c_eq_d(src1, src2);
9502 __ move(dst, 1);
9503 __ movt(dst, R0);
9505 __ bind(Done);
9506 %}
9507 ins_pipe( pipe_slow );
9508 %}
9510 instruct clear_array(mRegL cnt, mRegP base, Universe dummy) %{
9511 match(Set dummy (ClearArray cnt base));
9512 format %{ "CLEAR_ARRAY base = $base, cnt = $cnt # Clear doublewords" %}
9513 ins_encode %{
9514 //Assume cnt is the number of bytes in an array to be cleared,
9515 //and base points to the starting address of the array.
9516 Register base = $base$$Register;
9517 Register num = $cnt$$Register;
9518 Label Loop, done;
9520 /* 2012/9/21 Jin: according to X86, $cnt is caculated by doublewords(8 bytes) */
9521 __ move(T9, num); /* T9 = words */
9522 __ beq(T9, R0, done);
9523 __ nop();
9524 __ move(AT, base);
9526 __ bind(Loop);
9527 __ sd(R0, Address(AT, 0));
9528 __ daddi(AT, AT, wordSize);
9529 __ daddi(T9, T9, -1);
9530 __ bne(T9, R0, Loop);
9531 __ delayed()->nop();
9532 __ bind(done);
9533 %}
9534 ins_pipe( pipe_slow );
9535 %}
9537 instruct string_compare(a4_RegP str1, mA5RegI cnt1, a6_RegP str2, mA7RegI cnt2, no_Ax_mRegI result) %{
9538 match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
9539 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2);
9541 format %{ "String Compare $str1[len: $cnt1], $str2[len: $cnt2] -> $result @ string_compare" %}
9542 ins_encode %{
9543 // Get the first character position in both strings
9544 // [8] char array, [12] offset, [16] count
9545 Register str1 = $str1$$Register;
9546 Register str2 = $str2$$Register;
9547 Register cnt1 = $cnt1$$Register;
9548 Register cnt2 = $cnt2$$Register;
9549 Register result = $result$$Register;
9551 Label L, Loop, haveResult, done;
9553 // compute the and difference of lengths (in result)
9554 __ subu(result, cnt1, cnt2); // result holds the difference of two lengths
9556 // compute the shorter length (in cnt1)
9557 __ slt(AT, cnt2, cnt1);
9558 __ movn(cnt1, cnt2, AT);
9560 // Now the shorter length is in cnt1 and cnt2 can be used as a tmp register
9561 __ bind(Loop); // Loop begin
9562 __ beq(cnt1, R0, done);
9563 __ delayed()->lhu(AT, str1, 0);;
9565 // compare current character
9566 __ lhu(cnt2, str2, 0);
9567 __ bne(AT, cnt2, haveResult);
9568 __ delayed()->addi(str1, str1, 2);
9569 __ addi(str2, str2, 2);
9570 __ b(Loop);
9571 __ delayed()->addi(cnt1, cnt1, -1); // Loop end
9573 __ bind(haveResult);
9574 __ subu(result, AT, cnt2);
9576 __ bind(done);
9577 %}
9579 ins_pipe( pipe_slow );
9580 %}
9582 // intrinsic optimization
9583 instruct string_equals(a4_RegP str1, a5_RegP str2, mA6RegI cnt, mA7RegI temp, no_Ax_mRegI result) %{
9584 match(Set result (StrEquals (Binary str1 str2) cnt));
9585 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL temp);
9587 format %{ "String Equal $str1, $str2, len:$cnt tmp:$temp -> $result @ string_equals" %}
9588 ins_encode %{
9589 // Get the first character position in both strings
9590 // [8] char array, [12] offset, [16] count
9591 Register str1 = $str1$$Register;
9592 Register str2 = $str2$$Register;
9593 Register cnt = $cnt$$Register;
9594 Register tmp = $temp$$Register;
9595 Register result = $result$$Register;
9597 Label Loop, done;
9600 __ beq(str1, str2, done); // same char[] ?
9601 __ daddiu(result, R0, 1);
9603 __ bind(Loop); // Loop begin
9604 __ beq(cnt, R0, done);
9605 __ daddiu(result, R0, 1); // count == 0
9607 // compare current character
9608 __ lhu(AT, str1, 0);;
9609 __ lhu(tmp, str2, 0);
9610 __ bne(AT, tmp, done);
9611 __ delayed()->daddi(result, R0, 0);
9612 __ addi(str1, str1, 2);
9613 __ addi(str2, str2, 2);
9614 __ b(Loop);
9615 __ delayed()->addi(cnt, cnt, -1); // Loop end
9617 __ bind(done);
9618 %}
9620 ins_pipe( pipe_slow );
9621 %}
9623 //----------Arithmetic Instructions-------------------------------------------
9624 //----------Addition Instructions---------------------------------------------
9625 instruct addI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9626 match(Set dst (AddI src1 src2));
9628 format %{ "add $dst, $src1, $src2 #@addI_Reg_Reg" %}
9629 ins_encode %{
9630 Register dst = $dst$$Register;
9631 Register src1 = $src1$$Register;
9632 Register src2 = $src2$$Register;
9633 __ addu32(dst, src1, src2);
9634 %}
9635 ins_pipe( ialu_regI_regI );
9636 %}
9638 instruct addI_Reg_imm(mRegI dst, mRegI src1, immI src2) %{
9639 match(Set dst (AddI src1 src2));
9641 format %{ "add $dst, $src1, $src2 #@addI_Reg_imm" %}
9642 ins_encode %{
9643 Register dst = $dst$$Register;
9644 Register src1 = $src1$$Register;
9645 int imm = $src2$$constant;
9647 if(Assembler::is_simm16(imm)) {
9648 __ addiu32(dst, src1, imm);
9649 } else {
9650 __ move(AT, imm);
9651 __ addu32(dst, src1, AT);
9652 }
9653 %}
9654 ins_pipe( ialu_regI_regI );
9655 %}
9657 instruct addP_reg_reg(mRegP dst, mRegP src1, mRegL src2) %{
9658 match(Set dst (AddP src1 src2));
9660 format %{ "dadd $dst, $src1, $src2 #@addP_reg_reg" %}
9662 ins_encode %{
9663 Register dst = $dst$$Register;
9664 Register src1 = $src1$$Register;
9665 Register src2 = $src2$$Register;
9666 __ daddu(dst, src1, src2);
9667 %}
9669 ins_pipe( ialu_regI_regI );
9670 %}
9672 instruct addP_reg_reg_convI2L(mRegP dst, mRegP src1, mRegI src2) %{
9673 match(Set dst (AddP src1 (ConvI2L src2)));
9675 format %{ "dadd $dst, $src1, $src2 #@addP_reg_reg_convI2L" %}
9677 ins_encode %{
9678 Register dst = $dst$$Register;
9679 Register src1 = $src1$$Register;
9680 Register src2 = $src2$$Register;
9681 __ daddu(dst, src1, src2);
9682 %}
9684 ins_pipe( ialu_regI_regI );
9685 %}
9687 instruct addP_reg_imm(mRegP dst, mRegP src1, immL src2) %{
9688 match(Set dst (AddP src1 src2));
9690 format %{ "daddi $dst, $src1, $src2 #@addP_reg_imm" %}
9691 ins_encode %{
9692 Register src1 = $src1$$Register;
9693 long src2 = $src2$$constant;
9694 Register dst = $dst$$Register;
9696 if(Assembler::is_simm16(src2)) {
9697 __ daddiu(dst, src1, src2);
9698 } else {
9699 __ set64(AT, src2);
9700 __ daddu(dst, src1, AT);
9701 }
9702 %}
9703 ins_pipe( ialu_regI_imm16 );
9704 %}
9706 // Add Long Register with Register
9707 instruct addL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
9708 match(Set dst (AddL src1 src2));
9709 ins_cost(200);
9710 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_Reg\t" %}
9712 ins_encode %{
9713 Register dst_reg = as_Register($dst$$reg);
9714 Register src1_reg = as_Register($src1$$reg);
9715 Register src2_reg = as_Register($src2$$reg);
9717 __ daddu(dst_reg, src1_reg, src2_reg);
9718 %}
9720 ins_pipe( ialu_regL_regL );
9721 %}
9723 instruct addL_Reg_imm(mRegL dst, mRegL src1, immL16 src2)
9724 %{
9725 match(Set dst (AddL src1 src2));
9727 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_imm " %}
9728 ins_encode %{
9729 Register dst_reg = as_Register($dst$$reg);
9730 Register src1_reg = as_Register($src1$$reg);
9731 int src2_imm = $src2$$constant;
9733 __ daddiu(dst_reg, src1_reg, src2_imm);
9734 %}
9736 ins_pipe( ialu_regL_regL );
9737 %}
9739 instruct addL_RegI2L_imm(mRegL dst, mRegI src1, immL16 src2)
9740 %{
9741 match(Set dst (AddL (ConvI2L src1) src2));
9743 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_imm " %}
9744 ins_encode %{
9745 Register dst_reg = as_Register($dst$$reg);
9746 Register src1_reg = as_Register($src1$$reg);
9747 int src2_imm = $src2$$constant;
9749 __ daddiu(dst_reg, src1_reg, src2_imm);
9750 %}
9752 ins_pipe( ialu_regL_regL );
9753 %}
9755 instruct addL_RegI2L_Reg(mRegL dst, mRegI src1, mRegL src2) %{
9756 match(Set dst (AddL (ConvI2L src1) src2));
9757 ins_cost(200);
9758 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_Reg\t" %}
9760 ins_encode %{
9761 Register dst_reg = as_Register($dst$$reg);
9762 Register src1_reg = as_Register($src1$$reg);
9763 Register src2_reg = as_Register($src2$$reg);
9765 __ daddu(dst_reg, src1_reg, src2_reg);
9766 %}
9768 ins_pipe( ialu_regL_regL );
9769 %}
9771 instruct addL_RegI2L_RegI2L(mRegL dst, mRegI src1, mRegI src2) %{
9772 match(Set dst (AddL (ConvI2L src1) (ConvI2L src2)));
9773 ins_cost(200);
9774 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_RegI2L\t" %}
9776 ins_encode %{
9777 Register dst_reg = as_Register($dst$$reg);
9778 Register src1_reg = as_Register($src1$$reg);
9779 Register src2_reg = as_Register($src2$$reg);
9781 __ daddu(dst_reg, src1_reg, src2_reg);
9782 %}
9784 ins_pipe( ialu_regL_regL );
9785 %}
9787 instruct addL_Reg_RegI2L(mRegL dst, mRegL src1, mRegI src2) %{
9788 match(Set dst (AddL src1 (ConvI2L src2)));
9789 ins_cost(200);
9790 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_RegI2L\t" %}
9792 ins_encode %{
9793 Register dst_reg = as_Register($dst$$reg);
9794 Register src1_reg = as_Register($src1$$reg);
9795 Register src2_reg = as_Register($src2$$reg);
9797 __ daddu(dst_reg, src1_reg, src2_reg);
9798 %}
9800 ins_pipe( ialu_regL_regL );
9801 %}
9803 //----------Subtraction Instructions-------------------------------------------
9804 // Integer Subtraction Instructions
9805 instruct subI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9806 match(Set dst (SubI src1 src2));
9807 ins_cost(100);
9809 format %{ "sub $dst, $src1, $src2 #@subI_Reg_Reg" %}
9810 ins_encode %{
9811 Register dst = $dst$$Register;
9812 Register src1 = $src1$$Register;
9813 Register src2 = $src2$$Register;
9814 __ subu32(dst, src1, src2);
9815 %}
9816 ins_pipe( ialu_regI_regI );
9817 %}
9819 instruct subI_Reg_immI16_sub(mRegI dst, mRegI src1, immI16_sub src2) %{
9820 match(Set dst (SubI src1 src2));
9821 ins_cost(80);
9823 format %{ "sub $dst, $src1, $src2 #@subI_Reg_immI16_sub" %}
9824 ins_encode %{
9825 Register dst = $dst$$Register;
9826 Register src1 = $src1$$Register;
9827 __ addiu32(dst, src1, -1 * $src2$$constant);
9828 %}
9829 ins_pipe( ialu_regI_regI );
9830 %}
9832 instruct negI_Reg(mRegI dst, immI0 zero, mRegI src) %{
9833 match(Set dst (SubI zero src));
9834 ins_cost(80);
9836 format %{ "neg $dst, $src #@negI_Reg" %}
9837 ins_encode %{
9838 Register dst = $dst$$Register;
9839 Register src = $src$$Register;
9840 __ subu32(dst, R0, src);
9841 %}
9842 ins_pipe( ialu_regI_regI );
9843 %}
9845 instruct negL_Reg(mRegL dst, immL0 zero, mRegL src) %{
9846 match(Set dst (SubL zero src));
9847 ins_cost(80);
9849 format %{ "neg $dst, $src #@negL_Reg" %}
9850 ins_encode %{
9851 Register dst = $dst$$Register;
9852 Register src = $src$$Register;
9853 __ subu(dst, R0, src);
9854 %}
9855 ins_pipe( ialu_regI_regI );
9856 %}
9858 instruct subL_Reg_immL16_sub(mRegL dst, mRegL src1, immL16_sub src2) %{
9859 match(Set dst (SubL src1 src2));
9860 ins_cost(80);
9862 format %{ "sub $dst, $src1, $src2 #@subL_Reg_immL16_sub" %}
9863 ins_encode %{
9864 Register dst = $dst$$Register;
9865 Register src1 = $src1$$Register;
9866 __ daddiu(dst, src1, -1 * $src2$$constant);
9867 %}
9868 ins_pipe( ialu_regI_regI );
9869 %}
9871 // Subtract Long Register with Register.
9872 instruct subL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
9873 match(Set dst (SubL src1 src2));
9874 ins_cost(100);
9875 format %{ "SubL $dst, $src1, $src2 @ subL_Reg_Reg" %}
9876 ins_encode %{
9877 Register dst = as_Register($dst$$reg);
9878 Register src1 = as_Register($src1$$reg);
9879 Register src2 = as_Register($src2$$reg);
9881 __ subu(dst, src1, src2);
9882 %}
9883 ins_pipe( ialu_regL_regL );
9884 %}
9886 instruct subL_Reg_RegI2L(mRegL dst, mRegL src1, mRegI src2) %{
9887 match(Set dst (SubL src1 (ConvI2L src2)));
9888 ins_cost(100);
9889 format %{ "SubL $dst, $src1, $src2 @ subL_Reg_RegI2L" %}
9890 ins_encode %{
9891 Register dst = as_Register($dst$$reg);
9892 Register src1 = as_Register($src1$$reg);
9893 Register src2 = as_Register($src2$$reg);
9895 __ subu(dst, src1, src2);
9896 %}
9897 ins_pipe( ialu_regL_regL );
9898 %}
9900 instruct subL_RegI2L_Reg(mRegL dst, mRegI src1, mRegL src2) %{
9901 match(Set dst (SubL (ConvI2L src1) src2));
9902 ins_cost(200);
9903 format %{ "SubL $dst, $src1, $src2 @ subL_RegI2L_Reg" %}
9904 ins_encode %{
9905 Register dst = as_Register($dst$$reg);
9906 Register src1 = as_Register($src1$$reg);
9907 Register src2 = as_Register($src2$$reg);
9909 __ subu(dst, src1, src2);
9910 %}
9911 ins_pipe( ialu_regL_regL );
9912 %}
9914 instruct subL_RegI2L_RegI2L(mRegL dst, mRegI src1, mRegI src2) %{
9915 match(Set dst (SubL (ConvI2L src1) (ConvI2L src2)));
9916 ins_cost(200);
9917 format %{ "SubL $dst, $src1, $src2 @ subL_RegI2L_RegI2L" %}
9918 ins_encode %{
9919 Register dst = as_Register($dst$$reg);
9920 Register src1 = as_Register($src1$$reg);
9921 Register src2 = as_Register($src2$$reg);
9923 __ subu(dst, src1, src2);
9924 %}
9925 ins_pipe( ialu_regL_regL );
9926 %}
9928 // Integer MOD with Register
9929 instruct modI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9930 match(Set dst (ModI src1 src2));
9931 ins_cost(300);
9932 format %{ "modi $dst, $src1, $src2 @ modI_Reg_Reg" %}
9933 ins_encode %{
9934 Register dst = $dst$$Register;
9935 Register src1 = $src1$$Register;
9936 Register src2 = $src2$$Register;
9938 //if (UseLoongsonISA) {
9939 if (0) {
9940 // 2016.08.10
9941 // Experiments show that gsmod is slower that div+mfhi.
9942 // So I just disable it here.
9943 __ gsmod(dst, src1, src2);
9944 } else {
9945 __ div(src1, src2);
9946 __ mfhi(dst);
9947 }
9948 %}
9950 //ins_pipe( ialu_mod );
9951 ins_pipe( ialu_regI_regI );
9952 %}
9954 instruct modL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
9955 match(Set dst (ModL src1 src2));
9956 format %{ "modL $dst, $src1, $src2 @modL_reg_reg" %}
9958 ins_encode %{
9959 Register dst = as_Register($dst$$reg);
9960 Register op1 = as_Register($src1$$reg);
9961 Register op2 = as_Register($src2$$reg);
9963 if (UseLoongsonISA) {
9964 __ gsdmod(dst, op1, op2);
9965 } else {
9966 __ ddiv(op1, op2);
9967 __ mfhi(dst);
9968 }
9969 %}
9970 ins_pipe( pipe_slow );
9971 %}
9973 instruct mulI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9974 match(Set dst (MulI src1 src2));
9976 ins_cost(300);
9977 format %{ "mul $dst, $src1, $src2 @ mulI_Reg_Reg" %}
9978 ins_encode %{
9979 Register src1 = $src1$$Register;
9980 Register src2 = $src2$$Register;
9981 Register dst = $dst$$Register;
9983 __ mul(dst, src1, src2);
9984 %}
9985 ins_pipe( ialu_mult );
9986 %}
9988 instruct maddI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2, mRegI src3) %{
9989 match(Set dst (AddI (MulI src1 src2) src3));
9991 ins_cost(999);
9992 format %{ "madd $dst, $src1 * $src2 + $src3 #@maddI_Reg_Reg" %}
9993 ins_encode %{
9994 Register src1 = $src1$$Register;
9995 Register src2 = $src2$$Register;
9996 Register src3 = $src3$$Register;
9997 Register dst = $dst$$Register;
9999 __ mtlo(src3);
10000 __ madd(src1, src2);
10001 __ mflo(dst);
10002 %}
10003 ins_pipe( ialu_mult );
10004 %}
10006 instruct divI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10007 match(Set dst (DivI src1 src2));
10009 ins_cost(300);
10010 format %{ "div $dst, $src1, $src2 @ divI_Reg_Reg" %}
10011 ins_encode %{
10012 Register src1 = $src1$$Register;
10013 Register src2 = $src2$$Register;
10014 Register dst = $dst$$Register;
10016 /* 2012/4/21 Jin: In MIPS, div does not cause exception.
10017 We must trap an exception manually. */
10018 __ teq(R0, src2, 0x7);
10020 if (UseLoongsonISA) {
10021 __ gsdiv(dst, src1, src2);
10022 } else {
10023 __ div(src1, src2);
10025 __ nop();
10026 __ nop();
10027 __ mflo(dst);
10028 }
10029 %}
10030 ins_pipe( ialu_mod );
10031 %}
10033 instruct divF_Reg_Reg(regF dst, regF src1, regF src2) %{
10034 match(Set dst (DivF src1 src2));
10036 ins_cost(300);
10037 format %{ "divF $dst, $src1, $src2 @ divF_Reg_Reg" %}
10038 ins_encode %{
10039 FloatRegister src1 = $src1$$FloatRegister;
10040 FloatRegister src2 = $src2$$FloatRegister;
10041 FloatRegister dst = $dst$$FloatRegister;
10043 /* Here do we need to trap an exception manually ? */
10044 __ div_s(dst, src1, src2);
10045 %}
10046 ins_pipe( pipe_slow );
10047 %}
10049 instruct divD_Reg_Reg(regD dst, regD src1, regD src2) %{
10050 match(Set dst (DivD src1 src2));
10052 ins_cost(300);
10053 format %{ "divD $dst, $src1, $src2 @ divD_Reg_Reg" %}
10054 ins_encode %{
10055 FloatRegister src1 = $src1$$FloatRegister;
10056 FloatRegister src2 = $src2$$FloatRegister;
10057 FloatRegister dst = $dst$$FloatRegister;
10059 /* Here do we need to trap an exception manually ? */
10060 __ div_d(dst, src1, src2);
10061 %}
10062 ins_pipe( pipe_slow );
10063 %}
10065 instruct mulL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
10066 match(Set dst (MulL src1 src2));
10067 format %{ "mulL $dst, $src1, $src2 @mulL_reg_reg" %}
10068 ins_encode %{
10069 Register dst = as_Register($dst$$reg);
10070 Register op1 = as_Register($src1$$reg);
10071 Register op2 = as_Register($src2$$reg);
10073 if (UseLoongsonISA) {
10074 __ gsdmult(dst, op1, op2);
10075 } else {
10076 __ dmult(op1, op2);
10077 __ mflo(dst);
10078 }
10079 %}
10080 ins_pipe( pipe_slow );
10081 %}
10083 instruct mulL_reg_regI2L(mRegL dst, mRegL src1, mRegI src2) %{
10084 match(Set dst (MulL src1 (ConvI2L src2)));
10085 format %{ "mulL $dst, $src1, $src2 @mulL_reg_regI2L" %}
10086 ins_encode %{
10087 Register dst = as_Register($dst$$reg);
10088 Register op1 = as_Register($src1$$reg);
10089 Register op2 = as_Register($src2$$reg);
10091 if (UseLoongsonISA) {
10092 __ gsdmult(dst, op1, op2);
10093 } else {
10094 __ dmult(op1, op2);
10095 __ mflo(dst);
10096 }
10097 %}
10098 ins_pipe( pipe_slow );
10099 %}
10101 instruct divL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
10102 match(Set dst (DivL src1 src2));
10103 format %{ "divL $dst, $src1, $src2 @divL_reg_reg" %}
10105 ins_encode %{
10106 Register dst = as_Register($dst$$reg);
10107 Register op1 = as_Register($src1$$reg);
10108 Register op2 = as_Register($src2$$reg);
10110 if (UseLoongsonISA) {
10111 __ gsddiv(dst, op1, op2);
10112 } else {
10113 __ ddiv(op1, op2);
10114 __ mflo(dst);
10115 }
10116 %}
10117 ins_pipe( pipe_slow );
10118 %}
10120 instruct addF_reg_reg(regF dst, regF src1, regF src2) %{
10121 match(Set dst (AddF src1 src2));
10122 format %{ "AddF $dst, $src1, $src2 @addF_reg_reg" %}
10123 ins_encode %{
10124 FloatRegister src1 = as_FloatRegister($src1$$reg);
10125 FloatRegister src2 = as_FloatRegister($src2$$reg);
10126 FloatRegister dst = as_FloatRegister($dst$$reg);
10128 __ add_s(dst, src1, src2);
10129 %}
10130 ins_pipe( fpu_regF_regF );
10131 %}
10133 instruct subF_reg_reg(regF dst, regF src1, regF src2) %{
10134 match(Set dst (SubF src1 src2));
10135 format %{ "SubF $dst, $src1, $src2 @subF_reg_reg" %}
10136 ins_encode %{
10137 FloatRegister src1 = as_FloatRegister($src1$$reg);
10138 FloatRegister src2 = as_FloatRegister($src2$$reg);
10139 FloatRegister dst = as_FloatRegister($dst$$reg);
10141 __ sub_s(dst, src1, src2);
10142 %}
10143 ins_pipe( fpu_regF_regF );
10144 %}
10145 instruct addD_reg_reg(regD dst, regD src1, regD src2) %{
10146 match(Set dst (AddD src1 src2));
10147 format %{ "AddD $dst, $src1, $src2 @addD_reg_reg" %}
10148 ins_encode %{
10149 FloatRegister src1 = as_FloatRegister($src1$$reg);
10150 FloatRegister src2 = as_FloatRegister($src2$$reg);
10151 FloatRegister dst = as_FloatRegister($dst$$reg);
10153 __ add_d(dst, src1, src2);
10154 %}
10155 ins_pipe( fpu_regF_regF );
10156 %}
10158 instruct subD_reg_reg(regD dst, regD src1, regD src2) %{
10159 match(Set dst (SubD src1 src2));
10160 format %{ "SubD $dst, $src1, $src2 @subD_reg_reg" %}
10161 ins_encode %{
10162 FloatRegister src1 = as_FloatRegister($src1$$reg);
10163 FloatRegister src2 = as_FloatRegister($src2$$reg);
10164 FloatRegister dst = as_FloatRegister($dst$$reg);
10166 __ sub_d(dst, src1, src2);
10167 %}
10168 ins_pipe( fpu_regF_regF );
10169 %}
10171 instruct negF_reg(regF dst, regF src) %{
10172 match(Set dst (NegF src));
10173 format %{ "negF $dst, $src @negF_reg" %}
10174 ins_encode %{
10175 FloatRegister src = as_FloatRegister($src$$reg);
10176 FloatRegister dst = as_FloatRegister($dst$$reg);
10178 __ neg_s(dst, src);
10179 %}
10180 ins_pipe( fpu_regF_regF );
10181 %}
10183 instruct negD_reg(regD dst, regD src) %{
10184 match(Set dst (NegD src));
10185 format %{ "negD $dst, $src @negD_reg" %}
10186 ins_encode %{
10187 FloatRegister src = as_FloatRegister($src$$reg);
10188 FloatRegister dst = as_FloatRegister($dst$$reg);
10190 __ neg_d(dst, src);
10191 %}
10192 ins_pipe( fpu_regF_regF );
10193 %}
10196 instruct mulF_reg_reg(regF dst, regF src1, regF src2) %{
10197 match(Set dst (MulF src1 src2));
10198 format %{ "MULF $dst, $src1, $src2 @mulF_reg_reg" %}
10199 ins_encode %{
10200 FloatRegister src1 = $src1$$FloatRegister;
10201 FloatRegister src2 = $src2$$FloatRegister;
10202 FloatRegister dst = $dst$$FloatRegister;
10204 __ mul_s(dst, src1, src2);
10205 %}
10206 ins_pipe( fpu_regF_regF );
10207 %}
10209 instruct maddF_reg_reg(regF dst, regF src1, regF src2, regF src3) %{
10210 match(Set dst (AddF (MulF src1 src2) src3));
10211 // For compatibility reason (e.g. on the Loongson platform), disable this guy.
10212 ins_cost(44444);
10213 format %{ "maddF $dst, $src1, $src2, $src3 @maddF_reg_reg" %}
10214 ins_encode %{
10215 FloatRegister src1 = $src1$$FloatRegister;
10216 FloatRegister src2 = $src2$$FloatRegister;
10217 FloatRegister src3 = $src3$$FloatRegister;
10218 FloatRegister dst = $dst$$FloatRegister;
10220 __ madd_s(dst, src1, src2, src3);
10221 %}
10222 ins_pipe( fpu_regF_regF );
10223 %}
10225 // Mul two double precision floating piont number
10226 instruct mulD_reg_reg(regD dst, regD src1, regD src2) %{
10227 match(Set dst (MulD src1 src2));
10228 format %{ "MULD $dst, $src1, $src2 @mulD_reg_reg" %}
10229 ins_encode %{
10230 FloatRegister src1 = $src1$$FloatRegister;
10231 FloatRegister src2 = $src2$$FloatRegister;
10232 FloatRegister dst = $dst$$FloatRegister;
10234 __ mul_d(dst, src1, src2);
10235 %}
10236 ins_pipe( fpu_regF_regF );
10237 %}
10239 instruct maddD_reg_reg(regD dst, regD src1, regD src2, regD src3) %{
10240 match(Set dst (AddD (MulD src1 src2) src3));
10241 // For compatibility reason (e.g. on the Loongson platform), disable this guy.
10242 ins_cost(44444);
10243 format %{ "maddD $dst, $src1, $src2, $src3 @maddD_reg_reg" %}
10244 ins_encode %{
10245 FloatRegister src1 = $src1$$FloatRegister;
10246 FloatRegister src2 = $src2$$FloatRegister;
10247 FloatRegister src3 = $src3$$FloatRegister;
10248 FloatRegister dst = $dst$$FloatRegister;
10250 __ madd_d(dst, src1, src2, src3);
10251 %}
10252 ins_pipe( fpu_regF_regF );
10253 %}
10255 instruct absF_reg(regF dst, regF src) %{
10256 match(Set dst (AbsF src));
10257 ins_cost(100);
10258 format %{ "absF $dst, $src @absF_reg" %}
10259 ins_encode %{
10260 FloatRegister src = as_FloatRegister($src$$reg);
10261 FloatRegister dst = as_FloatRegister($dst$$reg);
10263 __ abs_s(dst, src);
10264 %}
10265 ins_pipe( fpu_regF_regF );
10266 %}
10269 // intrinsics for math_native.
10270 // AbsD SqrtD CosD SinD TanD LogD Log10D
10272 instruct absD_reg(regD dst, regD src) %{
10273 match(Set dst (AbsD src));
10274 ins_cost(100);
10275 format %{ "absD $dst, $src @absD_reg" %}
10276 ins_encode %{
10277 FloatRegister src = as_FloatRegister($src$$reg);
10278 FloatRegister dst = as_FloatRegister($dst$$reg);
10280 __ abs_d(dst, src);
10281 %}
10282 ins_pipe( fpu_regF_regF );
10283 %}
10285 instruct sqrtD_reg(regD dst, regD src) %{
10286 match(Set dst (SqrtD src));
10287 ins_cost(100);
10288 format %{ "SqrtD $dst, $src @sqrtD_reg" %}
10289 ins_encode %{
10290 FloatRegister src = as_FloatRegister($src$$reg);
10291 FloatRegister dst = as_FloatRegister($dst$$reg);
10293 __ sqrt_d(dst, src);
10294 %}
10295 ins_pipe( fpu_regF_regF );
10296 %}
10298 instruct sqrtF_reg(regF dst, regF src) %{
10299 match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
10300 ins_cost(100);
10301 format %{ "SqrtF $dst, $src @sqrtF_reg" %}
10302 ins_encode %{
10303 FloatRegister src = as_FloatRegister($src$$reg);
10304 FloatRegister dst = as_FloatRegister($dst$$reg);
10306 __ sqrt_s(dst, src);
10307 %}
10308 ins_pipe( fpu_regF_regF );
10309 %}
10310 //----------------------------------Logical Instructions----------------------
10311 //__________________________________Integer Logical Instructions-------------
10313 //And Instuctions
10314 // And Register with Immediate
10315 instruct andI_Reg_immI(mRegI dst, mRegI src1, immI src2) %{
10316 match(Set dst (AndI src1 src2));
10318 format %{ "and $dst, $src1, $src2 #@andI_Reg_immI" %}
10319 ins_encode %{
10320 Register dst = $dst$$Register;
10321 Register src = $src1$$Register;
10322 int val = $src2$$constant;
10324 __ move(AT, val);
10325 __ andr(dst, src, AT);
10326 %}
10327 ins_pipe( ialu_regI_regI );
10328 %}
10330 instruct andI_Reg_imm_0_65535(mRegI dst, mRegI src1, immI_0_65535 src2) %{
10331 match(Set dst (AndI src1 src2));
10332 ins_cost(60);
10334 format %{ "and $dst, $src1, $src2 #@andI_Reg_imm_0_65535" %}
10335 ins_encode %{
10336 Register dst = $dst$$Register;
10337 Register src = $src1$$Register;
10338 int val = $src2$$constant;
10340 __ andi(dst, src, val);
10341 %}
10342 ins_pipe( ialu_regI_regI );
10343 %}
10345 instruct andI_Reg_immI_nonneg_mask(mRegI dst, mRegI src1, immI_nonneg_mask mask) %{
10346 match(Set dst (AndI src1 mask));
10347 ins_cost(60);
10349 format %{ "and $dst, $src1, $mask #@andI_Reg_immI_nonneg_mask" %}
10350 ins_encode %{
10351 Register dst = $dst$$Register;
10352 Register src = $src1$$Register;
10353 int size = Assembler::is_int_mask($mask$$constant);
10355 __ ext(dst, src, 0, size);
10356 %}
10357 ins_pipe( ialu_regI_regI );
10358 %}
10360 instruct andL_Reg_immL_nonneg_mask(mRegL dst, mRegL src1, immL_nonneg_mask mask) %{
10361 match(Set dst (AndL src1 mask));
10362 ins_cost(60);
10364 format %{ "and $dst, $src1, $mask #@andL_Reg_immL_nonneg_mask" %}
10365 ins_encode %{
10366 Register dst = $dst$$Register;
10367 Register src = $src1$$Register;
10368 int size = Assembler::is_jlong_mask($mask$$constant);
10370 __ dext(dst, src, 0, size);
10371 %}
10372 ins_pipe( ialu_regI_regI );
10373 %}
10375 instruct xorI_Reg_imm_0_65535(mRegI dst, mRegI src1, immI_0_65535 src2) %{
10376 match(Set dst (XorI src1 src2));
10377 ins_cost(60);
10379 format %{ "xori $dst, $src1, $src2 #@xorI_Reg_imm_0_65535" %}
10380 ins_encode %{
10381 Register dst = $dst$$Register;
10382 Register src = $src1$$Register;
10383 int val = $src2$$constant;
10385 __ xori(dst, src, val);
10386 %}
10387 ins_pipe( ialu_regI_regI );
10388 %}
10390 instruct xorI_Reg_immI_M1(mRegI dst, mRegI src1, immI_M1 M1) %{
10391 match(Set dst (XorI src1 M1));
10392 predicate(UseLoongsonISA && Use3A2000);
10393 ins_cost(60);
10395 format %{ "xor $dst, $src1, $M1 #@xorI_Reg_immI_M1" %}
10396 ins_encode %{
10397 Register dst = $dst$$Register;
10398 Register src = $src1$$Register;
10400 __ gsorn(dst, R0, src);
10401 %}
10402 ins_pipe( ialu_regI_regI );
10403 %}
10405 instruct xorL2I_Reg_immI_M1(mRegI dst, mRegL src1, immI_M1 M1) %{
10406 match(Set dst (XorI (ConvL2I src1) M1));
10407 predicate(UseLoongsonISA && Use3A2000);
10408 ins_cost(60);
10410 format %{ "xor $dst, $src1, $M1 #@xorL2I_Reg_immI_M1" %}
10411 ins_encode %{
10412 Register dst = $dst$$Register;
10413 Register src = $src1$$Register;
10415 __ gsorn(dst, R0, src);
10416 %}
10417 ins_pipe( ialu_regI_regI );
10418 %}
10420 instruct xorL_Reg_imm_0_65535(mRegL dst, mRegL src1, immL_0_65535 src2) %{
10421 match(Set dst (XorL src1 src2));
10422 ins_cost(60);
10424 format %{ "xori $dst, $src1, $src2 #@xorL_Reg_imm_0_65535" %}
10425 ins_encode %{
10426 Register dst = $dst$$Register;
10427 Register src = $src1$$Register;
10428 int val = $src2$$constant;
10430 __ xori(dst, src, val);
10431 %}
10432 ins_pipe( ialu_regI_regI );
10433 %}
10435 /*
10436 instruct xorL_Reg_immL_M1(mRegL dst, mRegL src1, immL_M1 M1) %{
10437 match(Set dst (XorL src1 M1));
10438 predicate(UseLoongsonISA);
10439 ins_cost(60);
10441 format %{ "xor $dst, $src1, $M1 #@xorL_Reg_immL_M1" %}
10442 ins_encode %{
10443 Register dst = $dst$$Register;
10444 Register src = $src1$$Register;
10446 __ gsorn(dst, R0, src);
10447 %}
10448 ins_pipe( ialu_regI_regI );
10449 %}
10450 */
10452 instruct lbu_and_lmask(mRegI dst, memory mem, immI_255 mask) %{
10453 match(Set dst (AndI mask (LoadB mem)));
10454 ins_cost(60);
10456 format %{ "lhu $dst, $mem #@lbu_and_lmask" %}
10457 ins_encode(load_UB_enc(dst, mem));
10458 ins_pipe( ialu_loadI );
10459 %}
10461 instruct lbu_and_rmask(mRegI dst, memory mem, immI_255 mask) %{
10462 match(Set dst (AndI (LoadB mem) mask));
10463 ins_cost(60);
10465 format %{ "lhu $dst, $mem #@lbu_and_rmask" %}
10466 ins_encode(load_UB_enc(dst, mem));
10467 ins_pipe( ialu_loadI );
10468 %}
10470 instruct andI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10471 match(Set dst (AndI src1 src2));
10473 format %{ "and $dst, $src1, $src2 #@andI_Reg_Reg" %}
10474 ins_encode %{
10475 Register dst = $dst$$Register;
10476 Register src1 = $src1$$Register;
10477 Register src2 = $src2$$Register;
10478 __ andr(dst, src1, src2);
10479 %}
10480 ins_pipe( ialu_regI_regI );
10481 %}
10483 instruct andnI_Reg_nReg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10484 match(Set dst (AndI src1 (XorI src2 M1)));
10485 predicate(UseLoongsonISA && Use3A2000);
10487 format %{ "andn $dst, $src1, $src2 #@andnI_Reg_nReg" %}
10488 ins_encode %{
10489 Register dst = $dst$$Register;
10490 Register src1 = $src1$$Register;
10491 Register src2 = $src2$$Register;
10493 __ gsandn(dst, src1, src2);
10494 %}
10495 ins_pipe( ialu_regI_regI );
10496 %}
10498 instruct ornI_Reg_nReg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10499 match(Set dst (OrI src1 (XorI src2 M1)));
10500 predicate(UseLoongsonISA && Use3A2000);
10502 format %{ "orn $dst, $src1, $src2 #@ornI_Reg_nReg" %}
10503 ins_encode %{
10504 Register dst = $dst$$Register;
10505 Register src1 = $src1$$Register;
10506 Register src2 = $src2$$Register;
10508 __ gsorn(dst, src1, src2);
10509 %}
10510 ins_pipe( ialu_regI_regI );
10511 %}
10513 instruct andnI_nReg_Reg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10514 match(Set dst (AndI (XorI src1 M1) src2));
10515 predicate(UseLoongsonISA && Use3A2000);
10517 format %{ "andn $dst, $src2, $src1 #@andnI_nReg_Reg" %}
10518 ins_encode %{
10519 Register dst = $dst$$Register;
10520 Register src1 = $src1$$Register;
10521 Register src2 = $src2$$Register;
10523 __ gsandn(dst, src2, src1);
10524 %}
10525 ins_pipe( ialu_regI_regI );
10526 %}
10528 instruct ornI_nReg_Reg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10529 match(Set dst (OrI (XorI src1 M1) src2));
10530 predicate(UseLoongsonISA && Use3A2000);
10532 format %{ "orn $dst, $src2, $src1 #@ornI_nReg_Reg" %}
10533 ins_encode %{
10534 Register dst = $dst$$Register;
10535 Register src1 = $src1$$Register;
10536 Register src2 = $src2$$Register;
10538 __ gsorn(dst, src2, src1);
10539 %}
10540 ins_pipe( ialu_regI_regI );
10541 %}
10543 // And Long Register with Register
10544 instruct andL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10545 match(Set dst (AndL src1 src2));
10546 format %{ "AND $dst, $src1, $src2 @ andL_Reg_Reg\n\t" %}
10547 ins_encode %{
10548 Register dst_reg = as_Register($dst$$reg);
10549 Register src1_reg = as_Register($src1$$reg);
10550 Register src2_reg = as_Register($src2$$reg);
10552 __ andr(dst_reg, src1_reg, src2_reg);
10553 %}
10554 ins_pipe( ialu_regL_regL );
10555 %}
10557 instruct andL_Reg_Reg_convI2L(mRegL dst, mRegL src1, mRegI src2) %{
10558 match(Set dst (AndL src1 (ConvI2L src2)));
10559 format %{ "AND $dst, $src1, $src2 @ andL_Reg_Reg_convI2L\n\t" %}
10560 ins_encode %{
10561 Register dst_reg = as_Register($dst$$reg);
10562 Register src1_reg = as_Register($src1$$reg);
10563 Register src2_reg = as_Register($src2$$reg);
10565 __ andr(dst_reg, src1_reg, src2_reg);
10566 %}
10567 ins_pipe( ialu_regL_regL );
10568 %}
10570 instruct andL_Reg_imm_0_65535(mRegL dst, mRegL src1, immL_0_65535 src2) %{
10571 match(Set dst (AndL src1 src2));
10572 ins_cost(60);
10574 format %{ "and $dst, $src1, $src2 #@andL_Reg_imm_0_65535" %}
10575 ins_encode %{
10576 Register dst = $dst$$Register;
10577 Register src = $src1$$Register;
10578 long val = $src2$$constant;
10580 __ andi(dst, src, val);
10581 %}
10582 ins_pipe( ialu_regI_regI );
10583 %}
10585 instruct andL2I_Reg_imm_0_65535(mRegI dst, mRegL src1, immL_0_65535 src2) %{
10586 match(Set dst (ConvL2I (AndL src1 src2)));
10587 ins_cost(60);
10589 format %{ "and $dst, $src1, $src2 #@andL2I_Reg_imm_0_65535" %}
10590 ins_encode %{
10591 Register dst = $dst$$Register;
10592 Register src = $src1$$Register;
10593 long val = $src2$$constant;
10595 __ andi(dst, src, val);
10596 %}
10597 ins_pipe( ialu_regI_regI );
10598 %}
10600 /*
10601 instruct andnL_Reg_nReg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10602 match(Set dst (AndL src1 (XorL src2 M1)));
10603 predicate(UseLoongsonISA);
10605 format %{ "andn $dst, $src1, $src2 #@andnL_Reg_nReg" %}
10606 ins_encode %{
10607 Register dst = $dst$$Register;
10608 Register src1 = $src1$$Register;
10609 Register src2 = $src2$$Register;
10611 __ gsandn(dst, src1, src2);
10612 %}
10613 ins_pipe( ialu_regI_regI );
10614 %}
10615 */
10617 /*
10618 instruct ornL_Reg_nReg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10619 match(Set dst (OrL src1 (XorL src2 M1)));
10620 predicate(UseLoongsonISA);
10622 format %{ "orn $dst, $src1, $src2 #@ornL_Reg_nReg" %}
10623 ins_encode %{
10624 Register dst = $dst$$Register;
10625 Register src1 = $src1$$Register;
10626 Register src2 = $src2$$Register;
10628 __ gsorn(dst, src1, src2);
10629 %}
10630 ins_pipe( ialu_regI_regI );
10631 %}
10632 */
10634 /*
10635 instruct andnL_nReg_Reg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10636 match(Set dst (AndL (XorL src1 M1) src2));
10637 predicate(UseLoongsonISA);
10639 format %{ "andn $dst, $src2, $src1 #@andnL_nReg_Reg" %}
10640 ins_encode %{
10641 Register dst = $dst$$Register;
10642 Register src1 = $src1$$Register;
10643 Register src2 = $src2$$Register;
10645 __ gsandn(dst, src2, src1);
10646 %}
10647 ins_pipe( ialu_regI_regI );
10648 %}
10649 */
10651 /*
10652 instruct ornL_nReg_Reg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10653 match(Set dst (OrL (XorL src1 M1) src2));
10654 predicate(UseLoongsonISA);
10656 format %{ "orn $dst, $src2, $src1 #@ornL_nReg_Reg" %}
10657 ins_encode %{
10658 Register dst = $dst$$Register;
10659 Register src1 = $src1$$Register;
10660 Register src2 = $src2$$Register;
10662 __ gsorn(dst, src2, src1);
10663 %}
10664 ins_pipe( ialu_regI_regI );
10665 %}
10666 */
10668 instruct andL_Reg_immL_M8(mRegL dst, immL_M8 M8) %{
10669 match(Set dst (AndL dst M8));
10670 ins_cost(60);
10672 format %{ "and $dst, $dst, $M8 #@andL_Reg_immL_M8" %}
10673 ins_encode %{
10674 Register dst = $dst$$Register;
10676 __ dins(dst, R0, 0, 3);
10677 %}
10678 ins_pipe( ialu_regI_regI );
10679 %}
10681 instruct andL_Reg_immL_M5(mRegL dst, immL_M5 M5) %{
10682 match(Set dst (AndL dst M5));
10683 ins_cost(60);
10685 format %{ "and $dst, $dst, $M5 #@andL_Reg_immL_M5" %}
10686 ins_encode %{
10687 Register dst = $dst$$Register;
10689 __ dins(dst, R0, 2, 1);
10690 %}
10691 ins_pipe( ialu_regI_regI );
10692 %}
10694 instruct andL_Reg_immL_M7(mRegL dst, immL_M7 M7) %{
10695 match(Set dst (AndL dst M7));
10696 ins_cost(60);
10698 format %{ "and $dst, $dst, $M7 #@andL_Reg_immL_M7" %}
10699 ins_encode %{
10700 Register dst = $dst$$Register;
10702 __ dins(dst, R0, 1, 2);
10703 %}
10704 ins_pipe( ialu_regI_regI );
10705 %}
10707 instruct andL_Reg_immL_M4(mRegL dst, immL_M4 M4) %{
10708 match(Set dst (AndL dst M4));
10709 ins_cost(60);
10711 format %{ "and $dst, $dst, $M4 #@andL_Reg_immL_M4" %}
10712 ins_encode %{
10713 Register dst = $dst$$Register;
10715 __ dins(dst, R0, 0, 2);
10716 %}
10717 ins_pipe( ialu_regI_regI );
10718 %}
10720 instruct andL_Reg_immL_M121(mRegL dst, immL_M121 M121) %{
10721 match(Set dst (AndL dst M121));
10722 ins_cost(60);
10724 format %{ "and $dst, $dst, $M121 #@andL_Reg_immL_M121" %}
10725 ins_encode %{
10726 Register dst = $dst$$Register;
10728 __ dins(dst, R0, 3, 4);
10729 %}
10730 ins_pipe( ialu_regI_regI );
10731 %}
10733 // Or Long Register with Register
10734 instruct orL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10735 match(Set dst (OrL src1 src2));
10736 format %{ "OR $dst, $src1, $src2 @ orL_Reg_Reg\t" %}
10737 ins_encode %{
10738 Register dst_reg = $dst$$Register;
10739 Register src1_reg = $src1$$Register;
10740 Register src2_reg = $src2$$Register;
10742 __ orr(dst_reg, src1_reg, src2_reg);
10743 %}
10744 ins_pipe( ialu_regL_regL );
10745 %}
10747 instruct orL_Reg_P2XReg(mRegL dst, mRegP src1, mRegL src2) %{
10748 match(Set dst (OrL (CastP2X src1) src2));
10749 format %{ "OR $dst, $src1, $src2 @ orL_Reg_P2XReg\t" %}
10750 ins_encode %{
10751 Register dst_reg = $dst$$Register;
10752 Register src1_reg = $src1$$Register;
10753 Register src2_reg = $src2$$Register;
10755 __ orr(dst_reg, src1_reg, src2_reg);
10756 %}
10757 ins_pipe( ialu_regL_regL );
10758 %}
10760 // Xor Long Register with Register
10761 instruct xorL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10762 match(Set dst (XorL src1 src2));
10763 format %{ "XOR $dst, $src1, $src2 @ xorL_Reg_Reg\t" %}
10764 ins_encode %{
10765 Register dst_reg = as_Register($dst$$reg);
10766 Register src1_reg = as_Register($src1$$reg);
10767 Register src2_reg = as_Register($src2$$reg);
10769 __ xorr(dst_reg, src1_reg, src2_reg);
10770 %}
10771 ins_pipe( ialu_regL_regL );
10772 %}
10774 // Shift Left by 8-bit immediate
10775 instruct salI_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
10776 match(Set dst (LShiftI src shift));
10778 format %{ "SHL $dst, $src, $shift #@salI_Reg_imm" %}
10779 ins_encode %{
10780 Register src = $src$$Register;
10781 Register dst = $dst$$Register;
10782 int shamt = $shift$$constant;
10784 __ sll(dst, src, shamt);
10785 %}
10786 ins_pipe( ialu_regI_regI );
10787 %}
10789 instruct salL2I_Reg_imm(mRegI dst, mRegL src, immI8 shift) %{
10790 match(Set dst (LShiftI (ConvL2I src) shift));
10792 format %{ "SHL $dst, $src, $shift #@salL2I_Reg_imm" %}
10793 ins_encode %{
10794 Register src = $src$$Register;
10795 Register dst = $dst$$Register;
10796 int shamt = $shift$$constant;
10798 __ sll(dst, src, shamt);
10799 %}
10800 ins_pipe( ialu_regI_regI );
10801 %}
10803 instruct salI_Reg_imm_and_M65536(mRegI dst, mRegI src, immI_16 shift, immI_M65536 mask) %{
10804 match(Set dst (AndI (LShiftI src shift) mask));
10806 format %{ "SHL $dst, $src, $shift #@salI_Reg_imm_and_M65536" %}
10807 ins_encode %{
10808 Register src = $src$$Register;
10809 Register dst = $dst$$Register;
10811 __ sll(dst, src, 16);
10812 %}
10813 ins_pipe( ialu_regI_regI );
10814 %}
10816 instruct land7_2_s(mRegI dst, mRegL src, immL7 seven, immI_16 sixteen)
10817 %{
10818 match(Set dst (RShiftI (LShiftI (ConvL2I (AndL src seven)) sixteen) sixteen));
10820 format %{ "andi $dst, $src, 7\t# @land7_2_s" %}
10821 ins_encode %{
10822 Register src = $src$$Register;
10823 Register dst = $dst$$Register;
10825 __ andi(dst, src, 7);
10826 %}
10827 ins_pipe(ialu_regI_regI);
10828 %}
10830 instruct ori2s(mRegI dst, mRegI src1, immI_0_32767 src2, immI_16 sixteen)
10831 %{
10832 match(Set dst (RShiftI (LShiftI (OrI src1 src2) sixteen) sixteen));
10834 format %{ "ori $dst, $src1, $src2\t# @ori2s" %}
10835 ins_encode %{
10836 Register src = $src1$$Register;
10837 int val = $src2$$constant;
10838 Register dst = $dst$$Register;
10840 __ ori(dst, src, val);
10841 %}
10842 ins_pipe(ialu_regI_regI);
10843 %}
10845 // Logical Shift Right by 16, followed by Arithmetic Shift Left by 16.
10846 // This idiom is used by the compiler the i2s bytecode.
10847 instruct i2s(mRegI dst, mRegI src, immI_16 sixteen)
10848 %{
10849 match(Set dst (RShiftI (LShiftI src sixteen) sixteen));
10851 format %{ "i2s $dst, $src\t# @i2s" %}
10852 ins_encode %{
10853 Register src = $src$$Register;
10854 Register dst = $dst$$Register;
10856 __ seh(dst, src);
10857 %}
10858 ins_pipe(ialu_regI_regI);
10859 %}
10861 // Logical Shift Right by 24, followed by Arithmetic Shift Left by 24.
10862 // This idiom is used by the compiler for the i2b bytecode.
10863 instruct i2b(mRegI dst, mRegI src, immI_24 twentyfour)
10864 %{
10865 match(Set dst (RShiftI (LShiftI src twentyfour) twentyfour));
10867 format %{ "i2b $dst, $src\t# @i2b" %}
10868 ins_encode %{
10869 Register src = $src$$Register;
10870 Register dst = $dst$$Register;
10872 __ seb(dst, src);
10873 %}
10874 ins_pipe(ialu_regI_regI);
10875 %}
10878 instruct salI_RegL2I_imm(mRegI dst, mRegL src, immI8 shift) %{
10879 match(Set dst (LShiftI (ConvL2I src) shift));
10881 format %{ "SHL $dst, $src, $shift #@salI_RegL2I_imm" %}
10882 ins_encode %{
10883 Register src = $src$$Register;
10884 Register dst = $dst$$Register;
10885 int shamt = $shift$$constant;
10887 __ sll(dst, src, shamt);
10888 %}
10889 ins_pipe( ialu_regI_regI );
10890 %}
10892 // Shift Left by 8-bit immediate
10893 instruct salI_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
10894 match(Set dst (LShiftI src shift));
10896 format %{ "SHL $dst, $src, $shift #@salI_Reg_Reg" %}
10897 ins_encode %{
10898 Register src = $src$$Register;
10899 Register dst = $dst$$Register;
10900 Register shamt = $shift$$Register;
10901 __ sllv(dst, src, shamt);
10902 %}
10903 ins_pipe( ialu_regI_regI );
10904 %}
10907 // Shift Left Long
10908 instruct salL_Reg_imm(mRegL dst, mRegL src, immI8 shift) %{
10909 //predicate(UseNewLongLShift);
10910 match(Set dst (LShiftL src shift));
10911 ins_cost(100);
10912 format %{ "salL $dst, $src, $shift @ salL_Reg_imm" %}
10913 ins_encode %{
10914 Register src_reg = as_Register($src$$reg);
10915 Register dst_reg = as_Register($dst$$reg);
10916 int shamt = $shift$$constant;
10918 if (__ is_simm(shamt, 5))
10919 __ dsll(dst_reg, src_reg, shamt);
10920 else
10921 {
10922 int sa = Assembler::low(shamt, 6);
10923 if (sa < 32) {
10924 __ dsll(dst_reg, src_reg, sa);
10925 } else {
10926 __ dsll32(dst_reg, src_reg, sa - 32);
10927 }
10928 }
10929 %}
10930 ins_pipe( ialu_regL_regL );
10931 %}
10933 instruct salL_RegI2L_imm(mRegL dst, mRegI src, immI8 shift) %{
10934 //predicate(UseNewLongLShift);
10935 match(Set dst (LShiftL (ConvI2L src) shift));
10936 ins_cost(100);
10937 format %{ "salL $dst, $src, $shift @ salL_RegI2L_imm" %}
10938 ins_encode %{
10939 Register src_reg = as_Register($src$$reg);
10940 Register dst_reg = as_Register($dst$$reg);
10941 int shamt = $shift$$constant;
10943 if (__ is_simm(shamt, 5))
10944 __ dsll(dst_reg, src_reg, shamt);
10945 else
10946 {
10947 int sa = Assembler::low(shamt, 6);
10948 if (sa < 32) {
10949 __ dsll(dst_reg, src_reg, sa);
10950 } else {
10951 __ dsll32(dst_reg, src_reg, sa - 32);
10952 }
10953 }
10954 %}
10955 ins_pipe( ialu_regL_regL );
10956 %}
10958 // Shift Left Long
10959 instruct salL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
10960 //predicate(UseNewLongLShift);
10961 match(Set dst (LShiftL src shift));
10962 ins_cost(100);
10963 format %{ "salL $dst, $src, $shift @ salL_Reg_Reg" %}
10964 ins_encode %{
10965 Register src_reg = as_Register($src$$reg);
10966 Register dst_reg = as_Register($dst$$reg);
10968 __ dsllv(dst_reg, src_reg, $shift$$Register);
10969 %}
10970 ins_pipe( ialu_regL_regL );
10971 %}
10973 instruct salL_convI2L_Reg_imm(mRegL dst, mRegI src, immI8 shift) %{
10974 match(Set dst (LShiftL (ConvI2L src) shift));
10975 ins_cost(100);
10976 format %{ "salL $dst, $src, $shift @ salL_convI2L_Reg_imm" %}
10977 ins_encode %{
10978 Register src_reg = as_Register($src$$reg);
10979 Register dst_reg = as_Register($dst$$reg);
10980 int shamt = $shift$$constant;
10982 if (__ is_simm(shamt, 5)) {
10983 __ dsll(dst_reg, src_reg, shamt);
10984 } else {
10985 int sa = Assembler::low(shamt, 6);
10986 if (sa < 32) {
10987 __ dsll(dst_reg, src_reg, sa);
10988 } else {
10989 __ dsll32(dst_reg, src_reg, sa - 32);
10990 }
10991 }
10992 %}
10993 ins_pipe( ialu_regL_regL );
10994 %}
10996 // Shift Right Long
10997 instruct sarL_Reg_imm(mRegL dst, mRegL src, immI8 shift) %{
10998 match(Set dst (RShiftL src shift));
10999 ins_cost(100);
11000 format %{ "sarL $dst, $src, $shift @ sarL_Reg_imm" %}
11001 ins_encode %{
11002 Register src_reg = as_Register($src$$reg);
11003 Register dst_reg = as_Register($dst$$reg);
11004 int shamt = ($shift$$constant & 0x3f);
11005 if (__ is_simm(shamt, 5))
11006 __ dsra(dst_reg, src_reg, shamt);
11007 else {
11008 int sa = Assembler::low(shamt, 6);
11009 if (sa < 32) {
11010 __ dsra(dst_reg, src_reg, sa);
11011 } else {
11012 __ dsra32(dst_reg, src_reg, sa - 32);
11013 }
11014 }
11015 %}
11016 ins_pipe( ialu_regL_regL );
11017 %}
11019 instruct sarL2I_Reg_immI_32_63(mRegI dst, mRegL src, immI_32_63 shift) %{
11020 match(Set dst (ConvL2I (RShiftL src shift)));
11021 ins_cost(100);
11022 format %{ "sarL $dst, $src, $shift @ sarL2I_Reg_immI_32_63" %}
11023 ins_encode %{
11024 Register src_reg = as_Register($src$$reg);
11025 Register dst_reg = as_Register($dst$$reg);
11026 int shamt = $shift$$constant;
11028 __ dsra32(dst_reg, src_reg, shamt - 32);
11029 %}
11030 ins_pipe( ialu_regL_regL );
11031 %}
11033 // Shift Right Long arithmetically
11034 instruct sarL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
11035 //predicate(UseNewLongLShift);
11036 match(Set dst (RShiftL src shift));
11037 ins_cost(100);
11038 format %{ "sarL $dst, $src, $shift @ sarL_Reg_Reg" %}
11039 ins_encode %{
11040 Register src_reg = as_Register($src$$reg);
11041 Register dst_reg = as_Register($dst$$reg);
11043 __ dsrav(dst_reg, src_reg, $shift$$Register);
11044 %}
11045 ins_pipe( ialu_regL_regL );
11046 %}
11048 // Shift Right Long logically
11049 instruct slrL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
11050 match(Set dst (URShiftL src shift));
11051 ins_cost(100);
11052 format %{ "slrL $dst, $src, $shift @ slrL_Reg_Reg" %}
11053 ins_encode %{
11054 Register src_reg = as_Register($src$$reg);
11055 Register dst_reg = as_Register($dst$$reg);
11057 __ dsrlv(dst_reg, src_reg, $shift$$Register);
11058 %}
11059 ins_pipe( ialu_regL_regL );
11060 %}
11062 instruct slrL_Reg_immI_0_31(mRegL dst, mRegL src, immI_0_31 shift) %{
11063 match(Set dst (URShiftL src shift));
11064 ins_cost(80);
11065 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_0_31" %}
11066 ins_encode %{
11067 Register src_reg = as_Register($src$$reg);
11068 Register dst_reg = as_Register($dst$$reg);
11069 int shamt = $shift$$constant;
11071 __ dsrl(dst_reg, src_reg, shamt);
11072 %}
11073 ins_pipe( ialu_regL_regL );
11074 %}
11076 instruct slrL_Reg_immI_0_31_and_max_int(mRegI dst, mRegL src, immI_0_31 shift, immI_MaxI max_int) %{
11077 match(Set dst (AndI (ConvL2I (URShiftL src shift)) max_int));
11078 ins_cost(80);
11079 format %{ "dext $dst, $src, $shift, 31 @ slrL_Reg_immI_0_31_and_max_int" %}
11080 ins_encode %{
11081 Register src_reg = as_Register($src$$reg);
11082 Register dst_reg = as_Register($dst$$reg);
11083 int shamt = $shift$$constant;
11085 __ dext(dst_reg, src_reg, shamt, 31);
11086 %}
11087 ins_pipe( ialu_regL_regL );
11088 %}
11090 instruct slrL_P2XReg_immI_0_31(mRegL dst, mRegP src, immI_0_31 shift) %{
11091 match(Set dst (URShiftL (CastP2X src) shift));
11092 ins_cost(80);
11093 format %{ "slrL $dst, $src, $shift @ slrL_P2XReg_immI_0_31" %}
11094 ins_encode %{
11095 Register src_reg = as_Register($src$$reg);
11096 Register dst_reg = as_Register($dst$$reg);
11097 int shamt = $shift$$constant;
11099 __ dsrl(dst_reg, src_reg, shamt);
11100 %}
11101 ins_pipe( ialu_regL_regL );
11102 %}
11104 instruct slrL_Reg_immI_32_63(mRegL dst, mRegL src, immI_32_63 shift) %{
11105 match(Set dst (URShiftL src shift));
11106 ins_cost(80);
11107 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_32_63" %}
11108 ins_encode %{
11109 Register src_reg = as_Register($src$$reg);
11110 Register dst_reg = as_Register($dst$$reg);
11111 int shamt = $shift$$constant;
11113 __ dsrl32(dst_reg, src_reg, shamt - 32);
11114 %}
11115 ins_pipe( ialu_regL_regL );
11116 %}
11118 instruct slrL_Reg_immI_convL2I(mRegI dst, mRegL src, immI_32_63 shift) %{
11119 match(Set dst (ConvL2I (URShiftL src shift)));
11120 predicate(n->in(1)->in(2)->get_int() > 32);
11121 ins_cost(80);
11122 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_convL2I" %}
11123 ins_encode %{
11124 Register src_reg = as_Register($src$$reg);
11125 Register dst_reg = as_Register($dst$$reg);
11126 int shamt = $shift$$constant;
11128 __ dsrl32(dst_reg, src_reg, shamt - 32);
11129 %}
11130 ins_pipe( ialu_regL_regL );
11131 %}
11133 instruct slrL_P2XReg_immI_32_63(mRegL dst, mRegP src, immI_32_63 shift) %{
11134 match(Set dst (URShiftL (CastP2X src) shift));
11135 ins_cost(80);
11136 format %{ "slrL $dst, $src, $shift @ slrL_P2XReg_immI_32_63" %}
11137 ins_encode %{
11138 Register src_reg = as_Register($src$$reg);
11139 Register dst_reg = as_Register($dst$$reg);
11140 int shamt = $shift$$constant;
11142 __ dsrl32(dst_reg, src_reg, shamt - 32);
11143 %}
11144 ins_pipe( ialu_regL_regL );
11145 %}
11147 // Xor Instructions
11148 // Xor Register with Register
11149 instruct xorI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
11150 match(Set dst (XorI src1 src2));
11152 format %{ "XOR $dst, $src1, $src2 #@xorI_Reg_Reg" %}
11154 ins_encode %{
11155 Register dst = $dst$$Register;
11156 Register src1 = $src1$$Register;
11157 Register src2 = $src2$$Register;
11158 __ xorr(dst, src1, src2);
11159 __ sll(dst, dst, 0); /* long -> int */
11160 %}
11162 ins_pipe( ialu_regI_regI );
11163 %}
11165 // Or Instructions
11166 // Or Register with Register
11167 instruct orI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
11168 match(Set dst (OrI src1 src2));
11170 format %{ "OR $dst, $src1, $src2 #@orI_Reg_Reg" %}
11171 ins_encode %{
11172 Register dst = $dst$$Register;
11173 Register src1 = $src1$$Register;
11174 Register src2 = $src2$$Register;
11175 __ orr(dst, src1, src2);
11176 %}
11178 ins_pipe( ialu_regI_regI );
11179 %}
11181 instruct rotI_shr_logical_Reg(mRegI dst, mRegI src, immI_0_31 rshift, immI_0_31 lshift, immI_1 one) %{
11182 match(Set dst (OrI (URShiftI src rshift) (LShiftI (AndI src one) lshift)));
11183 predicate(32 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int())));
11185 format %{ "rotr $dst, $src, 1 ...\n\t"
11186 "srl $dst, $dst, ($rshift-1) @ rotI_shr_logical_Reg" %}
11187 ins_encode %{
11188 Register dst = $dst$$Register;
11189 Register src = $src$$Register;
11190 int rshift = $rshift$$constant;
11192 __ rotr(dst, src, 1);
11193 if (rshift - 1) {
11194 __ srl(dst, dst, rshift - 1);
11195 }
11196 %}
11198 ins_pipe( ialu_regI_regI );
11199 %}
11201 instruct orI_Reg_castP2X(mRegL dst, mRegL src1, mRegP src2) %{
11202 match(Set dst (OrI src1 (CastP2X src2)));
11204 format %{ "OR $dst, $src1, $src2 #@orI_Reg_castP2X" %}
11205 ins_encode %{
11206 Register dst = $dst$$Register;
11207 Register src1 = $src1$$Register;
11208 Register src2 = $src2$$Register;
11209 __ orr(dst, src1, src2);
11210 %}
11212 ins_pipe( ialu_regI_regI );
11213 %}
11215 // Logical Shift Right by 8-bit immediate
11216 instruct shr_logical_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
11217 match(Set dst (URShiftI src shift));
11218 // effect(KILL cr);
11220 format %{ "SRL $dst, $src, $shift #@shr_logical_Reg_imm" %}
11221 ins_encode %{
11222 Register src = $src$$Register;
11223 Register dst = $dst$$Register;
11224 int shift = $shift$$constant;
11226 __ srl(dst, src, shift);
11227 %}
11228 ins_pipe( ialu_regI_regI );
11229 %}
11231 instruct shr_logical_Reg_imm_nonneg_mask(mRegI dst, mRegI src, immI_0_31 shift, immI_nonneg_mask mask) %{
11232 match(Set dst (AndI (URShiftI src shift) mask));
11234 format %{ "ext $dst, $src, $shift, one-bits($mask) #@shr_logical_Reg_imm_nonneg_mask" %}
11235 ins_encode %{
11236 Register src = $src$$Register;
11237 Register dst = $dst$$Register;
11238 int pos = $shift$$constant;
11239 int size = Assembler::is_int_mask($mask$$constant);
11241 __ ext(dst, src, pos, size);
11242 %}
11243 ins_pipe( ialu_regI_regI );
11244 %}
11246 instruct rolI_Reg_immI_0_31(mRegI dst, immI_0_31 lshift, immI_0_31 rshift)
11247 %{
11248 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
11249 match(Set dst (OrI (LShiftI dst lshift) (URShiftI dst rshift)));
11251 ins_cost(100);
11252 format %{ "rotr $dst, $dst, $rshift #@rolI_Reg_immI_0_31" %}
11253 ins_encode %{
11254 Register dst = $dst$$Register;
11255 int sa = $rshift$$constant;
11257 __ rotr(dst, dst, sa);
11258 %}
11259 ins_pipe( ialu_regI_regI );
11260 %}
11262 instruct rolL_Reg_immI_0_31(mRegL dst, immI_32_63 lshift, immI_0_31 rshift)
11263 %{
11264 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11265 match(Set dst (OrL (LShiftL dst lshift) (URShiftL dst rshift)));
11267 ins_cost(100);
11268 format %{ "rotr $dst, $dst, $rshift #@rolL_Reg_immI_0_31" %}
11269 ins_encode %{
11270 Register dst = $dst$$Register;
11271 int sa = $rshift$$constant;
11273 __ drotr(dst, dst, sa);
11274 %}
11275 ins_pipe( ialu_regI_regI );
11276 %}
11278 instruct rolL_Reg_immI_32_63(mRegL dst, immI_0_31 lshift, immI_32_63 rshift)
11279 %{
11280 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11281 match(Set dst (OrL (LShiftL dst lshift) (URShiftL dst rshift)));
11283 ins_cost(100);
11284 format %{ "rotr $dst, $dst, $rshift #@rolL_Reg_immI_32_63" %}
11285 ins_encode %{
11286 Register dst = $dst$$Register;
11287 int sa = $rshift$$constant;
11289 __ drotr32(dst, dst, sa - 32);
11290 %}
11291 ins_pipe( ialu_regI_regI );
11292 %}
11294 instruct rorI_Reg_immI_0_31(mRegI dst, immI_0_31 rshift, immI_0_31 lshift)
11295 %{
11296 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
11297 match(Set dst (OrI (URShiftI dst rshift) (LShiftI dst lshift)));
11299 ins_cost(100);
11300 format %{ "rotr $dst, $dst, $rshift #@rorI_Reg_immI_0_31" %}
11301 ins_encode %{
11302 Register dst = $dst$$Register;
11303 int sa = $rshift$$constant;
11305 __ rotr(dst, dst, sa);
11306 %}
11307 ins_pipe( ialu_regI_regI );
11308 %}
11310 instruct rorL_Reg_immI_0_31(mRegL dst, immI_0_31 rshift, immI_32_63 lshift)
11311 %{
11312 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11313 match(Set dst (OrL (URShiftL dst rshift) (LShiftL dst lshift)));
11315 ins_cost(100);
11316 format %{ "rotr $dst, $dst, $rshift #@rorL_Reg_immI_0_31" %}
11317 ins_encode %{
11318 Register dst = $dst$$Register;
11319 int sa = $rshift$$constant;
11321 __ drotr(dst, dst, sa);
11322 %}
11323 ins_pipe( ialu_regI_regI );
11324 %}
11326 instruct rorL_Reg_immI_32_63(mRegL dst, immI_32_63 rshift, immI_0_31 lshift)
11327 %{
11328 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11329 match(Set dst (OrL (URShiftL dst rshift) (LShiftL dst lshift)));
11331 ins_cost(100);
11332 format %{ "rotr $dst, $dst, $rshift #@rorL_Reg_immI_32_63" %}
11333 ins_encode %{
11334 Register dst = $dst$$Register;
11335 int sa = $rshift$$constant;
11337 __ drotr32(dst, dst, sa - 32);
11338 %}
11339 ins_pipe( ialu_regI_regI );
11340 %}
11342 // Logical Shift Right
11343 instruct shr_logical_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
11344 match(Set dst (URShiftI src shift));
11346 format %{ "SRL $dst, $src, $shift #@shr_logical_Reg_Reg" %}
11347 ins_encode %{
11348 Register src = $src$$Register;
11349 Register dst = $dst$$Register;
11350 Register shift = $shift$$Register;
11351 __ srlv(dst, src, shift);
11352 %}
11353 ins_pipe( ialu_regI_regI );
11354 %}
11357 instruct shr_arith_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
11358 match(Set dst (RShiftI src shift));
11359 // effect(KILL cr);
11361 format %{ "SRA $dst, $src, $shift #@shr_arith_Reg_imm" %}
11362 ins_encode %{
11363 Register src = $src$$Register;
11364 Register dst = $dst$$Register;
11365 int shift = $shift$$constant;
11366 __ sra(dst, src, shift);
11367 %}
11368 ins_pipe( ialu_regI_regI );
11369 %}
11371 instruct shr_arith_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
11372 match(Set dst (RShiftI src shift));
11373 // effect(KILL cr);
11375 format %{ "SRA $dst, $src, $shift #@shr_arith_Reg_Reg" %}
11376 ins_encode %{
11377 Register src = $src$$Register;
11378 Register dst = $dst$$Register;
11379 Register shift = $shift$$Register;
11380 __ srav(dst, src, shift);
11381 %}
11382 ins_pipe( ialu_regI_regI );
11383 %}
11385 //----------Convert Int to Boolean---------------------------------------------
11387 instruct convI2B(mRegI dst, mRegI src) %{
11388 match(Set dst (Conv2B src));
11390 ins_cost(100);
11391 format %{ "convI2B $dst, $src @ convI2B" %}
11392 ins_encode %{
11393 Register dst = as_Register($dst$$reg);
11394 Register src = as_Register($src$$reg);
11396 if (dst != src) {
11397 __ daddiu(dst, R0, 1);
11398 __ movz(dst, R0, src);
11399 } else {
11400 __ move(AT, src);
11401 __ daddiu(dst, R0, 1);
11402 __ movz(dst, R0, AT);
11403 }
11404 %}
11406 ins_pipe( ialu_regL_regL );
11407 %}
11409 instruct convI2L_reg( mRegL dst, mRegI src) %{
11410 match(Set dst (ConvI2L src));
11412 ins_cost(100);
11413 format %{ "SLL $dst, $src @ convI2L_reg\t" %}
11414 ins_encode %{
11415 Register dst = as_Register($dst$$reg);
11416 Register src = as_Register($src$$reg);
11418 if(dst != src) __ sll(dst, src, 0);
11419 %}
11420 ins_pipe( ialu_regL_regL );
11421 %}
11424 instruct convL2I_reg( mRegI dst, mRegL src ) %{
11425 match(Set dst (ConvL2I src));
11427 format %{ "MOV $dst, $src @ convL2I_reg" %}
11428 ins_encode %{
11429 Register dst = as_Register($dst$$reg);
11430 Register src = as_Register($src$$reg);
11432 __ sll(dst, src, 0);
11433 %}
11435 ins_pipe( ialu_regI_regI );
11436 %}
11438 instruct convL2I2L_reg( mRegL dst, mRegL src ) %{
11439 match(Set dst (ConvI2L (ConvL2I src)));
11441 format %{ "sll $dst, $src, 0 @ convL2I2L_reg" %}
11442 ins_encode %{
11443 Register dst = as_Register($dst$$reg);
11444 Register src = as_Register($src$$reg);
11446 __ sll(dst, src, 0);
11447 %}
11449 ins_pipe( ialu_regI_regI );
11450 %}
11452 instruct convL2D_reg( regD dst, mRegL src ) %{
11453 match(Set dst (ConvL2D src));
11454 format %{ "convL2D $dst, $src @ convL2D_reg" %}
11455 ins_encode %{
11456 Register src = as_Register($src$$reg);
11457 FloatRegister dst = as_FloatRegister($dst$$reg);
11459 __ dmtc1(src, dst);
11460 __ cvt_d_l(dst, dst);
11461 %}
11463 ins_pipe( pipe_slow );
11464 %}
11466 instruct convD2L_reg_fast( mRegL dst, regD src ) %{
11467 match(Set dst (ConvD2L src));
11468 ins_cost(150);
11469 format %{ "convD2L $dst, $src @ convD2L_reg_fast" %}
11470 ins_encode %{
11471 Register dst = as_Register($dst$$reg);
11472 FloatRegister src = as_FloatRegister($src$$reg);
11474 Label Done;
11476 __ trunc_l_d(F30, src);
11477 // max_long: 0x7fffffffffffffff
11478 // __ set64(AT, 0x7fffffffffffffff);
11479 __ daddiu(AT, R0, -1);
11480 __ dsrl(AT, AT, 1);
11481 __ dmfc1(dst, F30);
11483 __ bne(dst, AT, Done);
11484 __ delayed()->mtc1(R0, F30);
11486 __ cvt_d_w(F30, F30);
11487 __ c_ult_d(src, F30);
11488 __ bc1f(Done);
11489 __ delayed()->daddiu(T9, R0, -1);
11491 __ c_un_d(src, src); //NaN?
11492 __ subu(dst, T9, AT);
11493 __ movt(dst, R0);
11495 __ bind(Done);
11496 %}
11498 ins_pipe( pipe_slow );
11499 %}
11501 instruct convD2L_reg_slow( mRegL dst, regD src ) %{
11502 match(Set dst (ConvD2L src));
11503 ins_cost(250);
11504 format %{ "convD2L $dst, $src @ convD2L_reg_slow" %}
11505 ins_encode %{
11506 Register dst = as_Register($dst$$reg);
11507 FloatRegister src = as_FloatRegister($src$$reg);
11509 Label L;
11511 __ c_un_d(src, src); //NaN?
11512 __ bc1t(L);
11513 __ delayed();
11514 __ move(dst, R0);
11516 __ trunc_l_d(F30, src);
11517 __ cfc1(AT, 31);
11518 __ li(T9, 0x10000);
11519 __ andr(AT, AT, T9);
11520 __ beq(AT, R0, L);
11521 __ delayed()->dmfc1(dst, F30);
11523 __ mov_d(F12, src);
11524 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1);
11525 __ move(dst, V0);
11526 __ bind(L);
11527 %}
11529 ins_pipe( pipe_slow );
11530 %}
11532 instruct convF2I_reg_fast( mRegI dst, regF src ) %{
11533 match(Set dst (ConvF2I src));
11534 ins_cost(150);
11535 format %{ "convf2i $dst, $src @ convF2I_reg_fast" %}
11536 ins_encode %{
11537 Register dreg = $dst$$Register;
11538 FloatRegister fval = $src$$FloatRegister;
11540 __ trunc_w_s(F30, fval);
11541 __ mfc1(dreg, F30);
11542 __ c_un_s(fval, fval); //NaN?
11543 __ movt(dreg, R0);
11544 %}
11546 ins_pipe( pipe_slow );
11547 %}
11549 instruct convF2I_reg_slow( mRegI dst, regF src ) %{
11550 match(Set dst (ConvF2I src));
11551 ins_cost(250);
11552 format %{ "convf2i $dst, $src @ convF2I_reg_slow" %}
11553 ins_encode %{
11554 Register dreg = $dst$$Register;
11555 FloatRegister fval = $src$$FloatRegister;
11556 Label L;
11558 __ c_un_s(fval, fval); //NaN?
11559 __ bc1t(L);
11560 __ delayed();
11561 __ move(dreg, R0);
11563 __ trunc_w_s(F30, fval);
11565 /* Call SharedRuntime:f2i() to do valid convention */
11566 __ cfc1(AT, 31);
11567 __ li(T9, 0x10000);
11568 __ andr(AT, AT, T9);
11569 __ beq(AT, R0, L);
11570 __ delayed()->mfc1(dreg, F30);
11572 __ mov_s(F12, fval);
11574 /* 2014/01/08 Fu : This bug was found when running ezDS's control-panel.
11575 * J 982 C2 javax.swing.text.BoxView.layoutMajorAxis(II[I[I)V (283 bytes) @ 0x000000555c46aa74
11576 *
11577 * An interger array index has been assigned to V0, and then changed from 1 to Integer.MAX_VALUE.
11578 * V0 is corrupted during call_VM_leaf(), and should be preserved.
11579 */
11580 if(dreg != V0) {
11581 __ push(V0);
11582 }
11583 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
11584 if(dreg != V0) {
11585 __ move(dreg, V0);
11586 __ pop(V0);
11587 }
11588 __ bind(L);
11589 %}
11591 ins_pipe( pipe_slow );
11592 %}
11594 instruct convF2L_reg_fast( mRegL dst, regF src ) %{
11595 match(Set dst (ConvF2L src));
11596 ins_cost(150);
11597 format %{ "convf2l $dst, $src @ convF2L_reg_fast" %}
11598 ins_encode %{
11599 Register dreg = $dst$$Register;
11600 FloatRegister fval = $src$$FloatRegister;
11602 __ trunc_l_s(F30, fval);
11603 __ dmfc1(dreg, F30);
11604 __ c_un_s(fval, fval); //NaN?
11605 __ movt(dreg, R0);
11606 %}
11608 ins_pipe( pipe_slow );
11609 %}
11611 instruct convF2L_reg_slow( mRegL dst, regF src ) %{
11612 match(Set dst (ConvF2L src));
11613 ins_cost(250);
11614 format %{ "convf2l $dst, $src @ convF2L_reg_slow" %}
11615 ins_encode %{
11616 Register dst = as_Register($dst$$reg);
11617 FloatRegister fval = $src$$FloatRegister;
11618 Label L;
11620 __ c_un_s(fval, fval); //NaN?
11621 __ bc1t(L);
11622 __ delayed();
11623 __ move(dst, R0);
11625 __ trunc_l_s(F30, fval);
11626 __ cfc1(AT, 31);
11627 __ li(T9, 0x10000);
11628 __ andr(AT, AT, T9);
11629 __ beq(AT, R0, L);
11630 __ delayed()->dmfc1(dst, F30);
11632 __ mov_s(F12, fval);
11633 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
11634 __ move(dst, V0);
11635 __ bind(L);
11636 %}
11638 ins_pipe( pipe_slow );
11639 %}
11641 instruct convL2F_reg( regF dst, mRegL src ) %{
11642 match(Set dst (ConvL2F src));
11643 format %{ "convl2f $dst, $src @ convL2F_reg" %}
11644 ins_encode %{
11645 FloatRegister dst = $dst$$FloatRegister;
11646 Register src = as_Register($src$$reg);
11647 Label L;
11649 __ dmtc1(src, dst);
11650 __ cvt_s_l(dst, dst);
11651 %}
11653 ins_pipe( pipe_slow );
11654 %}
11656 instruct convI2F_reg( regF dst, mRegI src ) %{
11657 match(Set dst (ConvI2F src));
11658 format %{ "convi2f $dst, $src @ convI2F_reg" %}
11659 ins_encode %{
11660 Register src = $src$$Register;
11661 FloatRegister dst = $dst$$FloatRegister;
11663 __ mtc1(src, dst);
11664 __ cvt_s_w(dst, dst);
11665 %}
11667 ins_pipe( fpu_regF_regF );
11668 %}
11670 instruct cmpLTMask_immI0( mRegI dst, mRegI p, immI0 zero ) %{
11671 match(Set dst (CmpLTMask p zero));
11672 ins_cost(100);
11674 format %{ "sra $dst, $p, 31 @ cmpLTMask_immI0" %}
11675 ins_encode %{
11676 Register src = $p$$Register;
11677 Register dst = $dst$$Register;
11679 __ sra(dst, src, 31);
11680 %}
11681 ins_pipe( pipe_slow );
11682 %}
11685 instruct cmpLTMask( mRegI dst, mRegI p, mRegI q ) %{
11686 match(Set dst (CmpLTMask p q));
11687 ins_cost(400);
11689 format %{ "cmpLTMask $dst, $p, $q @ cmpLTMask" %}
11690 ins_encode %{
11691 Register p = $p$$Register;
11692 Register q = $q$$Register;
11693 Register dst = $dst$$Register;
11695 __ slt(dst, p, q);
11696 __ subu(dst, R0, dst);
11697 %}
11698 ins_pipe( pipe_slow );
11699 %}
11701 instruct convP2B(mRegI dst, mRegP src) %{
11702 match(Set dst (Conv2B src));
11704 ins_cost(100);
11705 format %{ "convP2B $dst, $src @ convP2B" %}
11706 ins_encode %{
11707 Register dst = as_Register($dst$$reg);
11708 Register src = as_Register($src$$reg);
11710 if (dst != src) {
11711 __ daddiu(dst, R0, 1);
11712 __ movz(dst, R0, src);
11713 } else {
11714 __ move(AT, src);
11715 __ daddiu(dst, R0, 1);
11716 __ movz(dst, R0, AT);
11717 }
11718 %}
11720 ins_pipe( ialu_regL_regL );
11721 %}
11724 instruct convI2D_reg_reg(regD dst, mRegI src) %{
11725 match(Set dst (ConvI2D src));
11726 format %{ "conI2D $dst, $src @convI2D_reg" %}
11727 ins_encode %{
11728 Register src = $src$$Register;
11729 FloatRegister dst = $dst$$FloatRegister;
11730 __ mtc1(src, dst);
11731 __ cvt_d_w(dst, dst);
11732 %}
11733 ins_pipe( fpu_regF_regF );
11734 %}
11736 instruct convF2D_reg_reg(regD dst, regF src) %{
11737 match(Set dst (ConvF2D src));
11738 format %{ "convF2D $dst, $src\t# @convF2D_reg_reg" %}
11739 ins_encode %{
11740 FloatRegister dst = $dst$$FloatRegister;
11741 FloatRegister src = $src$$FloatRegister;
11743 __ cvt_d_s(dst, src);
11744 %}
11745 ins_pipe( fpu_regF_regF );
11746 %}
11748 instruct convD2F_reg_reg(regF dst, regD src) %{
11749 match(Set dst (ConvD2F src));
11750 format %{ "convD2F $dst, $src\t# @convD2F_reg_reg" %}
11751 ins_encode %{
11752 FloatRegister dst = $dst$$FloatRegister;
11753 FloatRegister src = $src$$FloatRegister;
11755 __ cvt_s_d(dst, src);
11756 %}
11757 ins_pipe( fpu_regF_regF );
11758 %}
11760 // Convert a double to an int. If the double is a NAN, stuff a zero in instead.
11761 instruct convD2I_reg_reg_fast( mRegI dst, regD src ) %{
11762 match(Set dst (ConvD2I src));
11764 ins_cost(150);
11765 format %{ "convD2I $dst, $src\t# @ convD2I_reg_reg_fast" %}
11767 ins_encode %{
11768 FloatRegister src = $src$$FloatRegister;
11769 Register dst = $dst$$Register;
11771 Label Done;
11773 __ trunc_w_d(F30, src);
11774 // max_int: 2147483647
11775 __ move(AT, 0x7fffffff);
11776 __ mfc1(dst, F30);
11778 __ bne(dst, AT, Done);
11779 __ delayed()->mtc1(R0, F30);
11781 __ cvt_d_w(F30, F30);
11782 __ c_ult_d(src, F30);
11783 __ bc1f(Done);
11784 __ delayed()->addiu(T9, R0, -1);
11786 __ c_un_d(src, src); //NaN?
11787 __ subu32(dst, T9, AT);
11788 __ movt(dst, R0);
11790 __ bind(Done);
11791 %}
11792 ins_pipe( pipe_slow );
11793 %}
11795 instruct convD2I_reg_reg_slow( mRegI dst, regD src ) %{
11796 match(Set dst (ConvD2I src));
11798 ins_cost(250);
11799 format %{ "convD2I $dst, $src\t# @ convD2I_reg_reg_slow" %}
11801 ins_encode %{
11802 FloatRegister src = $src$$FloatRegister;
11803 Register dst = $dst$$Register;
11804 Label L;
11806 __ trunc_w_d(F30, src);
11807 __ cfc1(AT, 31);
11808 __ li(T9, 0x10000);
11809 __ andr(AT, AT, T9);
11810 __ beq(AT, R0, L);
11811 __ delayed()->mfc1(dst, F30);
11813 __ mov_d(F12, src);
11814 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 1);
11815 __ move(dst, V0);
11816 __ bind(L);
11818 %}
11819 ins_pipe( pipe_slow );
11820 %}
11822 // Convert oop pointer into compressed form
11823 instruct encodeHeapOop(mRegN dst, mRegP src) %{
11824 predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
11825 match(Set dst (EncodeP src));
11826 format %{ "encode_heap_oop $dst,$src" %}
11827 ins_encode %{
11828 Register src = $src$$Register;
11829 Register dst = $dst$$Register;
11830 if (src != dst) {
11831 __ move(dst, src);
11832 }
11833 __ encode_heap_oop(dst);
11834 %}
11835 ins_pipe( ialu_regL_regL );
11836 %}
11838 instruct encodeHeapOop_not_null(mRegN dst, mRegP src) %{
11839 predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
11840 match(Set dst (EncodeP src));
11841 format %{ "encode_heap_oop_not_null $dst,$src @ encodeHeapOop_not_null" %}
11842 ins_encode %{
11843 __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
11844 %}
11845 ins_pipe( ialu_regL_regL );
11846 %}
11848 instruct decodeHeapOop(mRegP dst, mRegN src) %{
11849 predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
11850 n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
11851 match(Set dst (DecodeN src));
11852 format %{ "decode_heap_oop $dst,$src @ decodeHeapOop" %}
11853 ins_encode %{
11854 Register s = $src$$Register;
11855 Register d = $dst$$Register;
11856 if (s != d) {
11857 __ move(d, s);
11858 }
11859 __ decode_heap_oop(d);
11860 %}
11861 ins_pipe( ialu_regL_regL );
11862 %}
11864 instruct decodeHeapOop_not_null(mRegP dst, mRegN src) %{
11865 predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
11866 n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
11867 match(Set dst (DecodeN src));
11868 format %{ "decode_heap_oop_not_null $dst,$src @ decodeHeapOop_not_null" %}
11869 ins_encode %{
11870 Register s = $src$$Register;
11871 Register d = $dst$$Register;
11872 if (s != d) {
11873 __ decode_heap_oop_not_null(d, s);
11874 } else {
11875 __ decode_heap_oop_not_null(d);
11876 }
11877 %}
11878 ins_pipe( ialu_regL_regL );
11879 %}
11881 instruct encodeKlass_not_null(mRegN dst, mRegP src) %{
11882 match(Set dst (EncodePKlass src));
11883 format %{ "encode_heap_oop_not_null $dst,$src @ encodeKlass_not_null" %}
11884 ins_encode %{
11885 __ encode_klass_not_null($dst$$Register, $src$$Register);
11886 %}
11887 ins_pipe( ialu_regL_regL );
11888 %}
11890 instruct decodeKlass_not_null(mRegP dst, mRegN src) %{
11891 match(Set dst (DecodeNKlass src));
11892 format %{ "decode_heap_klass_not_null $dst,$src" %}
11893 ins_encode %{
11894 Register s = $src$$Register;
11895 Register d = $dst$$Register;
11896 if (s != d) {
11897 __ decode_klass_not_null(d, s);
11898 } else {
11899 __ decode_klass_not_null(d);
11900 }
11901 %}
11902 ins_pipe( ialu_regL_regL );
11903 %}
11905 //FIXME
11906 instruct tlsLoadP(mRegP dst) %{
11907 match(Set dst (ThreadLocal));
11909 ins_cost(0);
11910 format %{ " get_thread in $dst #@tlsLoadP" %}
11911 ins_encode %{
11912 Register dst = $dst$$Register;
11913 #ifdef OPT_THREAD
11914 __ move(dst, TREG);
11915 #else
11916 __ get_thread(dst);
11917 #endif
11918 %}
11920 ins_pipe( ialu_loadI );
11921 %}
11924 instruct checkCastPP( mRegP dst ) %{
11925 match(Set dst (CheckCastPP dst));
11927 format %{ "#checkcastPP of $dst (empty encoding) #@chekCastPP" %}
11928 ins_encode( /*empty encoding*/ );
11929 ins_pipe( empty );
11930 %}
11932 instruct castPP(mRegP dst)
11933 %{
11934 match(Set dst (CastPP dst));
11936 size(0);
11937 format %{ "# castPP of $dst" %}
11938 ins_encode(/* empty encoding */);
11939 ins_pipe(empty);
11940 %}
11942 instruct castII( mRegI dst ) %{
11943 match(Set dst (CastII dst));
11944 format %{ "#castII of $dst empty encoding" %}
11945 ins_encode( /*empty encoding*/ );
11946 ins_cost(0);
11947 ins_pipe( empty );
11948 %}
11950 // Return Instruction
11951 // Remove the return address & jump to it.
11952 instruct Ret() %{
11953 match(Return);
11954 format %{ "RET #@Ret" %}
11956 ins_encode %{
11957 __ jr(RA);
11958 __ nop();
11959 %}
11961 ins_pipe( pipe_jump );
11962 %}
11964 /*
11965 // For Loongson CPUs, jr seems too slow, so this rule shouldn't be imported.
11966 instruct jumpXtnd(mRegL switch_val) %{
11967 match(Jump switch_val);
11969 ins_cost(350);
11971 format %{ "load T9 <-- [$constanttablebase, $switch_val, $constantoffset] @ jumpXtnd\n\t"
11972 "jr T9\n\t"
11973 "nop" %}
11974 ins_encode %{
11975 Register table_base = $constanttablebase;
11976 int con_offset = $constantoffset;
11977 Register switch_reg = $switch_val$$Register;
11979 if (UseLoongsonISA) {
11980 if (Assembler::is_simm(con_offset, 8)) {
11981 __ gsldx(T9, table_base, switch_reg, con_offset);
11982 } else if (Assembler::is_simm16(con_offset)) {
11983 __ daddu(T9, table_base, switch_reg);
11984 __ ld(T9, T9, con_offset);
11985 } else {
11986 __ move(T9, con_offset);
11987 __ daddu(AT, table_base, switch_reg);
11988 __ gsldx(T9, AT, T9, 0);
11989 }
11990 } else {
11991 if (Assembler::is_simm16(con_offset)) {
11992 __ daddu(T9, table_base, switch_reg);
11993 __ ld(T9, T9, con_offset);
11994 } else {
11995 __ move(T9, con_offset);
11996 __ daddu(AT, table_base, switch_reg);
11997 __ daddu(AT, T9, AT);
11998 __ ld(T9, AT, 0);
11999 }
12000 }
12002 __ jr(T9);
12003 __ nop();
12005 %}
12006 ins_pipe(pipe_jump);
12007 %}
12008 */
12010 // Jump Direct - Label defines a relative address from JMP
12011 instruct jmpDir(label labl) %{
12012 match(Goto);
12013 effect(USE labl);
12015 ins_cost(300);
12016 format %{ "JMP $labl #@jmpDir" %}
12018 ins_encode %{
12019 Label &L = *($labl$$label);
12020 if(&L)
12021 __ b(L);
12022 else
12023 __ b(int(0));
12024 __ nop();
12025 %}
12027 ins_pipe( pipe_jump );
12028 ins_pc_relative(1);
12029 %}
12033 // Tail Jump; remove the return address; jump to target.
12034 // TailCall above leaves the return address around.
12035 // TailJump is used in only one place, the rethrow_Java stub (fancy_jump=2).
12036 // ex_oop (Exception Oop) is needed in %o0 at the jump. As there would be a
12037 // "restore" before this instruction (in Epilogue), we need to materialize it
12038 // in %i0.
12039 //FIXME
12040 instruct tailjmpInd(mRegP jump_target,mRegP ex_oop) %{
12041 match( TailJump jump_target ex_oop );
12042 ins_cost(200);
12043 format %{ "Jmp $jump_target ; ex_oop = $ex_oop #@tailjmpInd" %}
12044 ins_encode %{
12045 Register target = $jump_target$$Register;
12047 /* 2012/9/14 Jin: V0, V1 are indicated in:
12048 * [stubGenerator_mips.cpp] generate_forward_exception()
12049 * [runtime_mips.cpp] OptoRuntime::generate_exception_blob()
12050 */
12051 Register oop = $ex_oop$$Register;
12052 Register exception_oop = V0;
12053 Register exception_pc = V1;
12055 __ move(exception_pc, RA);
12056 __ move(exception_oop, oop);
12058 __ jr(target);
12059 __ nop();
12060 %}
12061 ins_pipe( pipe_jump );
12062 %}
12064 // ============================================================================
12065 // Procedure Call/Return Instructions
12066 // Call Java Static Instruction
12067 // Note: If this code changes, the corresponding ret_addr_offset() and
12068 // compute_padding() functions will have to be adjusted.
12069 instruct CallStaticJavaDirect(method meth) %{
12070 match(CallStaticJava);
12071 effect(USE meth);
12073 ins_cost(300);
12074 format %{ "CALL,static #@CallStaticJavaDirect " %}
12075 ins_encode( Java_Static_Call( meth ) );
12076 ins_pipe( pipe_slow );
12077 ins_pc_relative(1);
12078 ins_alignment(16);
12079 %}
12081 // Call Java Dynamic Instruction
12082 // Note: If this code changes, the corresponding ret_addr_offset() and
12083 // compute_padding() functions will have to be adjusted.
12084 instruct CallDynamicJavaDirect(method meth) %{
12085 match(CallDynamicJava);
12086 effect(USE meth);
12088 ins_cost(300);
12089 format %{"MOV IC_Klass, (oop)-1 @ CallDynamicJavaDirect\n\t"
12090 "CallDynamic @ CallDynamicJavaDirect" %}
12091 ins_encode( Java_Dynamic_Call( meth ) );
12092 ins_pipe( pipe_slow );
12093 ins_pc_relative(1);
12094 ins_alignment(16);
12095 %}
12097 instruct CallLeafNoFPDirect(method meth) %{
12098 match(CallLeafNoFP);
12099 effect(USE meth);
12101 ins_cost(300);
12102 format %{ "CALL_LEAF_NOFP,runtime " %}
12103 ins_encode(Java_To_Runtime(meth));
12104 ins_pipe( pipe_slow );
12105 ins_pc_relative(1);
12106 ins_alignment(16);
12107 %}
12109 // Prefetch instructions.
12111 instruct prefetchrNTA( memory mem ) %{
12112 match(PrefetchRead mem);
12113 ins_cost(125);
12115 format %{ "pref $mem\t# Prefetch into non-temporal cache for read @ prefetchrNTA" %}
12116 ins_encode %{
12117 int base = $mem$$base;
12118 int index = $mem$$index;
12119 int scale = $mem$$scale;
12120 int disp = $mem$$disp;
12122 if( index != 0 ) {
12123 if (scale == 0) {
12124 __ daddu(AT, as_Register(base), as_Register(index));
12125 } else {
12126 __ dsll(AT, as_Register(index), scale);
12127 __ daddu(AT, as_Register(base), AT);
12128 }
12129 } else {
12130 __ move(AT, as_Register(base));
12131 }
12132 if( Assembler::is_simm16(disp) ) {
12133 __ daddiu(AT, as_Register(base), disp);
12134 __ daddiu(AT, AT, disp);
12135 } else {
12136 __ move(T9, disp);
12137 __ daddu(AT, as_Register(base), T9);
12138 }
12139 __ pref(0, AT, 0); //hint: 0:load
12140 %}
12141 ins_pipe(pipe_slow);
12142 %}
12144 instruct prefetchwNTA( memory mem ) %{
12145 match(PrefetchWrite mem);
12146 ins_cost(125);
12147 format %{ "pref $mem\t# Prefetch to non-temporal cache for write @ prefetchwNTA" %}
12148 ins_encode %{
12149 int base = $mem$$base;
12150 int index = $mem$$index;
12151 int scale = $mem$$scale;
12152 int disp = $mem$$disp;
12154 if( index != 0 ) {
12155 if (scale == 0) {
12156 __ daddu(AT, as_Register(base), as_Register(index));
12157 } else {
12158 __ dsll(AT, as_Register(index), scale);
12159 __ daddu(AT, as_Register(base), AT);
12160 }
12161 } else {
12162 __ move(AT, as_Register(base));
12163 }
12164 if( Assembler::is_simm16(disp) ) {
12165 __ daddiu(AT, as_Register(base), disp);
12166 __ daddiu(AT, AT, disp);
12167 } else {
12168 __ move(T9, disp);
12169 __ daddu(AT, as_Register(base), T9);
12170 }
12171 __ pref(1, AT, 0); //hint: 1:store
12172 %}
12173 ins_pipe(pipe_slow);
12174 %}
12176 // Prefetch instructions for allocation.
12178 instruct prefetchAllocNTA( memory mem ) %{
12179 match(PrefetchAllocation mem);
12180 ins_cost(125);
12181 format %{ "pref $mem\t# Prefetch allocation @ prefetchAllocNTA" %}
12182 ins_encode %{
12183 int base = $mem$$base;
12184 int index = $mem$$index;
12185 int scale = $mem$$scale;
12186 int disp = $mem$$disp;
12188 Register dst = R0;
12190 if( index != 0 ) {
12191 if( Assembler::is_simm16(disp) ) {
12192 if( UseLoongsonISA ) {
12193 if (scale == 0) {
12194 __ gslbx(dst, as_Register(base), as_Register(index), disp);
12195 } else {
12196 __ dsll(AT, as_Register(index), scale);
12197 __ gslbx(dst, as_Register(base), AT, disp);
12198 }
12199 } else {
12200 if (scale == 0) {
12201 __ addu(AT, as_Register(base), as_Register(index));
12202 } else {
12203 __ dsll(AT, as_Register(index), scale);
12204 __ addu(AT, as_Register(base), AT);
12205 }
12206 __ lb(dst, AT, disp);
12207 }
12208 } else {
12209 if (scale == 0) {
12210 __ addu(AT, as_Register(base), as_Register(index));
12211 } else {
12212 __ dsll(AT, as_Register(index), scale);
12213 __ addu(AT, as_Register(base), AT);
12214 }
12215 __ move(T9, disp);
12216 if( UseLoongsonISA ) {
12217 __ gslbx(dst, AT, T9, 0);
12218 } else {
12219 __ addu(AT, AT, T9);
12220 __ lb(dst, AT, 0);
12221 }
12222 }
12223 } else {
12224 if( Assembler::is_simm16(disp) ) {
12225 __ lb(dst, as_Register(base), disp);
12226 } else {
12227 __ move(T9, disp);
12228 if( UseLoongsonISA ) {
12229 __ gslbx(dst, as_Register(base), T9, 0);
12230 } else {
12231 __ addu(AT, as_Register(base), T9);
12232 __ lb(dst, AT, 0);
12233 }
12234 }
12235 }
12236 %}
12237 ins_pipe(pipe_slow);
12238 %}
12241 // Call runtime without safepoint
12242 instruct CallLeafDirect(method meth) %{
12243 match(CallLeaf);
12244 effect(USE meth);
12246 ins_cost(300);
12247 format %{ "CALL_LEAF,runtime #@CallLeafDirect " %}
12248 ins_encode(Java_To_Runtime(meth));
12249 ins_pipe( pipe_slow );
12250 ins_pc_relative(1);
12251 ins_alignment(16);
12252 %}
12254 // Load Char (16bit unsigned)
12255 instruct loadUS(mRegI dst, memory mem) %{
12256 match(Set dst (LoadUS mem));
12258 ins_cost(125);
12259 format %{ "loadUS $dst,$mem @ loadC" %}
12260 ins_encode(load_C_enc(dst, mem));
12261 ins_pipe( ialu_loadI );
12262 %}
12264 instruct loadUS_convI2L(mRegL dst, memory mem) %{
12265 match(Set dst (ConvI2L (LoadUS mem)));
12267 ins_cost(125);
12268 format %{ "loadUS $dst,$mem @ loadUS_convI2L" %}
12269 ins_encode(load_C_enc(dst, mem));
12270 ins_pipe( ialu_loadI );
12271 %}
12273 // Store Char (16bit unsigned)
12274 instruct storeC(memory mem, mRegI src) %{
12275 match(Set mem (StoreC mem src));
12277 ins_cost(125);
12278 format %{ "storeC $src, $mem @ storeC" %}
12279 ins_encode(store_C_reg_enc(mem, src));
12280 ins_pipe( ialu_loadI );
12281 %}
12283 instruct storeC0(memory mem, immI0 zero) %{
12284 match(Set mem (StoreC mem zero));
12286 ins_cost(125);
12287 format %{ "storeC $zero, $mem @ storeC0" %}
12288 ins_encode(store_C0_enc(mem));
12289 ins_pipe( ialu_loadI );
12290 %}
12293 instruct loadConF0(regF dst, immF0 zero) %{
12294 match(Set dst zero);
12295 ins_cost(100);
12297 format %{ "mov $dst, zero @ loadConF0\n"%}
12298 ins_encode %{
12299 FloatRegister dst = $dst$$FloatRegister;
12301 __ mtc1(R0, dst);
12302 %}
12303 ins_pipe( fpu_loadF );
12304 %}
12307 instruct loadConF(regF dst, immF src) %{
12308 match(Set dst src);
12309 ins_cost(125);
12311 format %{ "lwc1 $dst, $constantoffset[$constanttablebase] # load FLOAT $src from table @ loadConF" %}
12312 ins_encode %{
12313 int con_offset = $constantoffset($src);
12315 if (Assembler::is_simm16(con_offset)) {
12316 __ lwc1($dst$$FloatRegister, $constanttablebase, con_offset);
12317 } else {
12318 __ set64(AT, con_offset);
12319 if (UseLoongsonISA) {
12320 __ gslwxc1($dst$$FloatRegister, $constanttablebase, AT, 0);
12321 } else {
12322 __ daddu(AT, $constanttablebase, AT);
12323 __ lwc1($dst$$FloatRegister, AT, 0);
12324 }
12325 }
12326 %}
12327 ins_pipe( fpu_loadF );
12328 %}
12331 instruct loadConD0(regD dst, immD0 zero) %{
12332 match(Set dst zero);
12333 ins_cost(100);
12335 format %{ "mov $dst, zero @ loadConD0"%}
12336 ins_encode %{
12337 FloatRegister dst = as_FloatRegister($dst$$reg);
12339 __ dmtc1(R0, dst);
12340 %}
12341 ins_pipe( fpu_loadF );
12342 %}
12344 instruct loadConD(regD dst, immD src) %{
12345 match(Set dst src);
12346 ins_cost(125);
12348 format %{ "ldc1 $dst, $constantoffset[$constanttablebase] # load DOUBLE $src from table @ loadConD" %}
12349 ins_encode %{
12350 int con_offset = $constantoffset($src);
12352 if (Assembler::is_simm16(con_offset)) {
12353 __ ldc1($dst$$FloatRegister, $constanttablebase, con_offset);
12354 } else {
12355 __ set64(AT, con_offset);
12356 if (UseLoongsonISA) {
12357 __ gsldxc1($dst$$FloatRegister, $constanttablebase, AT, 0);
12358 } else {
12359 __ daddu(AT, $constanttablebase, AT);
12360 __ ldc1($dst$$FloatRegister, AT, 0);
12361 }
12362 }
12363 %}
12364 ins_pipe( fpu_loadF );
12365 %}
12367 // Store register Float value (it is faster than store from FPU register)
12368 instruct storeF_reg( memory mem, regF src) %{
12369 match(Set mem (StoreF mem src));
12371 ins_cost(50);
12372 format %{ "store $mem, $src\t# store float @ storeF_reg" %}
12373 ins_encode(store_F_reg_enc(mem, src));
12374 ins_pipe( fpu_storeF );
12375 %}
12377 instruct storeF_imm0( memory mem, immF0 zero) %{
12378 match(Set mem (StoreF mem zero));
12380 ins_cost(40);
12381 format %{ "store $mem, zero\t# store float @ storeF_imm0" %}
12382 ins_encode %{
12383 int base = $mem$$base;
12384 int index = $mem$$index;
12385 int scale = $mem$$scale;
12386 int disp = $mem$$disp;
12388 if( index != 0 ) {
12389 if ( UseLoongsonISA ) {
12390 if ( Assembler::is_simm(disp, 8) ) {
12391 if ( scale == 0 ) {
12392 __ gsswx(R0, as_Register(base), as_Register(index), disp);
12393 } else {
12394 __ dsll(T9, as_Register(index), scale);
12395 __ gsswx(R0, as_Register(base), T9, disp);
12396 }
12397 } else if ( Assembler::is_simm16(disp) ) {
12398 if ( scale == 0 ) {
12399 __ daddu(AT, as_Register(base), as_Register(index));
12400 } else {
12401 __ dsll(T9, as_Register(index), scale);
12402 __ daddu(AT, as_Register(base), T9);
12403 }
12404 __ sw(R0, AT, disp);
12405 } else {
12406 if ( scale == 0 ) {
12407 __ move(T9, disp);
12408 __ daddu(AT, as_Register(index), T9);
12409 __ gsswx(R0, as_Register(base), AT, 0);
12410 } else {
12411 __ dsll(T9, as_Register(index), scale);
12412 __ move(AT, disp);
12413 __ daddu(AT, AT, T9);
12414 __ gsswx(R0, as_Register(base), AT, 0);
12415 }
12416 }
12417 } else { //not use loongson isa
12418 if(scale != 0) {
12419 __ dsll(T9, as_Register(index), scale);
12420 __ daddu(AT, as_Register(base), T9);
12421 } else {
12422 __ daddu(AT, as_Register(base), as_Register(index));
12423 }
12424 if( Assembler::is_simm16(disp) ) {
12425 __ sw(R0, AT, disp);
12426 } else {
12427 __ move(T9, disp);
12428 __ daddu(AT, AT, T9);
12429 __ sw(R0, AT, 0);
12430 }
12431 }
12432 } else { //index is 0
12433 if ( UseLoongsonISA ) {
12434 if ( Assembler::is_simm16(disp) ) {
12435 __ sw(R0, as_Register(base), disp);
12436 } else {
12437 __ move(T9, disp);
12438 __ gsswx(R0, as_Register(base), T9, 0);
12439 }
12440 } else {
12441 if( Assembler::is_simm16(disp) ) {
12442 __ sw(R0, as_Register(base), disp);
12443 } else {
12444 __ move(T9, disp);
12445 __ daddu(AT, as_Register(base), T9);
12446 __ sw(R0, AT, 0);
12447 }
12448 }
12449 }
12450 %}
12451 ins_pipe( ialu_storeI );
12452 %}
12454 // Load Double
12455 instruct loadD(regD dst, memory mem) %{
12456 match(Set dst (LoadD mem));
12458 ins_cost(150);
12459 format %{ "loadD $dst, $mem #@loadD" %}
12460 ins_encode(load_D_enc(dst, mem));
12461 ins_pipe( ialu_loadI );
12462 %}
12464 // Load Double - UNaligned
12465 instruct loadD_unaligned(regD dst, memory mem ) %{
12466 match(Set dst (LoadD_unaligned mem));
12467 ins_cost(250);
12468 // FIXME: Jin: Need more effective ldl/ldr
12469 format %{ "loadD_unaligned $dst, $mem #@loadD_unaligned" %}
12470 ins_encode(load_D_enc(dst, mem));
12471 ins_pipe( ialu_loadI );
12472 %}
12474 instruct storeD_reg( memory mem, regD src) %{
12475 match(Set mem (StoreD mem src));
12477 ins_cost(50);
12478 format %{ "store $mem, $src\t# store float @ storeD_reg" %}
12479 ins_encode(store_D_reg_enc(mem, src));
12480 ins_pipe( fpu_storeF );
12481 %}
12483 instruct storeD_imm0( memory mem, immD0 zero) %{
12484 match(Set mem (StoreD mem zero));
12486 ins_cost(40);
12487 format %{ "store $mem, zero\t# store float @ storeD_imm0" %}
12488 ins_encode %{
12489 int base = $mem$$base;
12490 int index = $mem$$index;
12491 int scale = $mem$$scale;
12492 int disp = $mem$$disp;
12494 __ mtc1(R0, F30);
12495 __ cvt_d_w(F30, F30);
12497 if( index != 0 ) {
12498 if ( UseLoongsonISA ) {
12499 if ( Assembler::is_simm(disp, 8) ) {
12500 if (scale == 0) {
12501 __ gssdxc1(F30, as_Register(base), as_Register(index), disp);
12502 } else {
12503 __ dsll(T9, as_Register(index), scale);
12504 __ gssdxc1(F30, as_Register(base), T9, disp);
12505 }
12506 } else if ( Assembler::is_simm16(disp) ) {
12507 if (scale == 0) {
12508 __ daddu(AT, as_Register(base), as_Register(index));
12509 __ sdc1(F30, AT, disp);
12510 } else {
12511 __ dsll(T9, as_Register(index), scale);
12512 __ daddu(AT, as_Register(base), T9);
12513 __ sdc1(F30, AT, disp);
12514 }
12515 } else {
12516 if (scale == 0) {
12517 __ move(T9, disp);
12518 __ daddu(AT, as_Register(index), T9);
12519 __ gssdxc1(F30, as_Register(base), AT, 0);
12520 } else {
12521 __ move(T9, disp);
12522 __ dsll(AT, as_Register(index), scale);
12523 __ daddu(AT, AT, T9);
12524 __ gssdxc1(F30, as_Register(base), AT, 0);
12525 }
12526 }
12527 } else { // not use loongson isa
12528 if(scale != 0) {
12529 __ dsll(T9, as_Register(index), scale);
12530 __ daddu(AT, as_Register(base), T9);
12531 } else {
12532 __ daddu(AT, as_Register(base), as_Register(index));
12533 }
12534 if( Assembler::is_simm16(disp) ) {
12535 __ sdc1(F30, AT, disp);
12536 } else {
12537 __ move(T9, disp);
12538 __ daddu(AT, AT, T9);
12539 __ sdc1(F30, AT, 0);
12540 }
12541 }
12542 } else {// index is 0
12543 if ( UseLoongsonISA ) {
12544 if ( Assembler::is_simm16(disp) ) {
12545 __ sdc1(F30, as_Register(base), disp);
12546 } else {
12547 __ move(T9, disp);
12548 __ gssdxc1(F30, as_Register(base), T9, 0);
12549 }
12550 } else {
12551 if( Assembler::is_simm16(disp) ) {
12552 __ sdc1(F30, as_Register(base), disp);
12553 } else {
12554 __ move(T9, disp);
12555 __ daddu(AT, as_Register(base), T9);
12556 __ sdc1(F30, AT, 0);
12557 }
12558 }
12559 }
12560 %}
12561 ins_pipe( ialu_storeI );
12562 %}
12564 instruct loadSSI(mRegI dst, stackSlotI src)
12565 %{
12566 match(Set dst src);
12568 ins_cost(125);
12569 format %{ "lw $dst, $src\t# int stk @ loadSSI" %}
12570 ins_encode %{
12571 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSI) !");
12572 __ lw($dst$$Register, SP, $src$$disp);
12573 %}
12574 ins_pipe(ialu_loadI);
12575 %}
12577 instruct storeSSI(stackSlotI dst, mRegI src)
12578 %{
12579 match(Set dst src);
12581 ins_cost(100);
12582 format %{ "sw $dst, $src\t# int stk @ storeSSI" %}
12583 ins_encode %{
12584 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSI) !");
12585 __ sw($src$$Register, SP, $dst$$disp);
12586 %}
12587 ins_pipe(ialu_storeI);
12588 %}
12590 instruct loadSSL(mRegL dst, stackSlotL src)
12591 %{
12592 match(Set dst src);
12594 ins_cost(125);
12595 format %{ "ld $dst, $src\t# long stk @ loadSSL" %}
12596 ins_encode %{
12597 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSL) !");
12598 __ ld($dst$$Register, SP, $src$$disp);
12599 %}
12600 ins_pipe(ialu_loadI);
12601 %}
12603 instruct storeSSL(stackSlotL dst, mRegL src)
12604 %{
12605 match(Set dst src);
12607 ins_cost(100);
12608 format %{ "sd $dst, $src\t# long stk @ storeSSL" %}
12609 ins_encode %{
12610 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSL) !");
12611 __ sd($src$$Register, SP, $dst$$disp);
12612 %}
12613 ins_pipe(ialu_storeI);
12614 %}
12616 instruct loadSSP(mRegP dst, stackSlotP src)
12617 %{
12618 match(Set dst src);
12620 ins_cost(125);
12621 format %{ "ld $dst, $src\t# ptr stk @ loadSSP" %}
12622 ins_encode %{
12623 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSP) !");
12624 __ ld($dst$$Register, SP, $src$$disp);
12625 %}
12626 ins_pipe(ialu_loadI);
12627 %}
12629 instruct storeSSP(stackSlotP dst, mRegP src)
12630 %{
12631 match(Set dst src);
12633 ins_cost(100);
12634 format %{ "sd $dst, $src\t# ptr stk @ storeSSP" %}
12635 ins_encode %{
12636 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSP) !");
12637 __ sd($src$$Register, SP, $dst$$disp);
12638 %}
12639 ins_pipe(ialu_storeI);
12640 %}
12642 instruct loadSSF(regF dst, stackSlotF src)
12643 %{
12644 match(Set dst src);
12646 ins_cost(125);
12647 format %{ "lwc1 $dst, $src\t# float stk @ loadSSF" %}
12648 ins_encode %{
12649 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSF) !");
12650 __ lwc1($dst$$FloatRegister, SP, $src$$disp);
12651 %}
12652 ins_pipe(ialu_loadI);
12653 %}
12655 instruct storeSSF(stackSlotF dst, regF src)
12656 %{
12657 match(Set dst src);
12659 ins_cost(100);
12660 format %{ "swc1 $dst, $src\t# float stk @ storeSSF" %}
12661 ins_encode %{
12662 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSF) !");
12663 __ swc1($src$$FloatRegister, SP, $dst$$disp);
12664 %}
12665 ins_pipe(fpu_storeF);
12666 %}
12668 // Use the same format since predicate() can not be used here.
12669 instruct loadSSD(regD dst, stackSlotD src)
12670 %{
12671 match(Set dst src);
12673 ins_cost(125);
12674 format %{ "ldc1 $dst, $src\t# double stk @ loadSSD" %}
12675 ins_encode %{
12676 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSD) !");
12677 __ ldc1($dst$$FloatRegister, SP, $src$$disp);
12678 %}
12679 ins_pipe(ialu_loadI);
12680 %}
12682 instruct storeSSD(stackSlotD dst, regD src)
12683 %{
12684 match(Set dst src);
12686 ins_cost(100);
12687 format %{ "sdc1 $dst, $src\t# double stk @ storeSSD" %}
12688 ins_encode %{
12689 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSD) !");
12690 __ sdc1($src$$FloatRegister, SP, $dst$$disp);
12691 %}
12692 ins_pipe(fpu_storeF);
12693 %}
12695 instruct cmpFastLock( FlagsReg cr, mRegP object, s0_RegP box, mRegI tmp, mRegP scr) %{
12696 match( Set cr (FastLock object box) );
12697 effect( TEMP tmp, TEMP scr, USE_KILL box );
12698 ins_cost(300);
12699 format %{ "FASTLOCK $cr $object, $box, $tmp #@ cmpFastLock" %}
12700 ins_encode %{
12701 __ fast_lock($object$$Register, $box$$Register, $tmp$$Register, $scr$$Register);
12702 %}
12704 ins_pipe( pipe_slow );
12705 ins_pc_relative(1);
12706 %}
12708 instruct cmpFastUnlock( FlagsReg cr, mRegP object, s0_RegP box, mRegP tmp ) %{
12709 match( Set cr (FastUnlock object box) );
12710 effect( TEMP tmp, USE_KILL box );
12711 ins_cost(300);
12712 format %{ "FASTUNLOCK $object, $box, $tmp #@cmpFastUnlock" %}
12713 ins_encode %{
12714 __ fast_unlock($object$$Register, $box$$Register, $tmp$$Register);
12715 %}
12717 ins_pipe( pipe_slow );
12718 ins_pc_relative(1);
12719 %}
12721 // Store CMS card-mark Immediate
12722 instruct storeImmCM(memory mem, immI8 src) %{
12723 match(Set mem (StoreCM mem src));
12725 ins_cost(150);
12726 format %{ "MOV8 $mem,$src\t! CMS card-mark imm0" %}
12727 // opcode(0xC6);
12728 ins_encode(store_B_immI_enc_sync(mem, src));
12729 ins_pipe( ialu_storeI );
12730 %}
12732 // Die now
12733 instruct ShouldNotReachHere( )
12734 %{
12735 match(Halt);
12736 ins_cost(300);
12738 // Use the following format syntax
12739 format %{ "ILLTRAP ;#@ShouldNotReachHere" %}
12740 ins_encode %{
12741 // Here we should emit illtrap !
12743 __ stop("in ShoudNotReachHere");
12745 %}
12746 ins_pipe( pipe_jump );
12747 %}
12749 instruct leaP8Narrow(mRegP dst, indOffset8Narrow mem)
12750 %{
12751 predicate(Universe::narrow_oop_shift() == 0);
12752 match(Set dst mem);
12754 ins_cost(110);
12755 format %{ "leaq $dst, $mem\t# ptr off8narrow @ leaP8Narrow" %}
12756 ins_encode %{
12757 Register dst = $dst$$Register;
12758 Register base = as_Register($mem$$base);
12759 int disp = $mem$$disp;
12761 __ daddiu(dst, base, disp);
12762 %}
12763 ins_pipe( ialu_regI_imm16 );
12764 %}
12766 instruct leaPPosIdxScaleOff8(mRegP dst, basePosIndexScaleOffset8 mem)
12767 %{
12768 match(Set dst mem);
12770 ins_cost(110);
12771 format %{ "leaq $dst, $mem\t# @ PosIdxScaleOff8" %}
12772 ins_encode %{
12773 Register dst = $dst$$Register;
12774 Register base = as_Register($mem$$base);
12775 Register index = as_Register($mem$$index);
12776 int scale = $mem$$scale;
12777 int disp = $mem$$disp;
12779 if (scale == 0) {
12780 __ daddu(AT, base, index);
12781 __ daddiu(dst, AT, disp);
12782 } else {
12783 __ dsll(AT, index, scale);
12784 __ daddu(AT, base, AT);
12785 __ daddiu(dst, AT, disp);
12786 }
12787 %}
12789 ins_pipe( ialu_regI_imm16 );
12790 %}
12792 instruct leaPIdxScale(mRegP dst, indIndexScale mem)
12793 %{
12794 match(Set dst mem);
12796 ins_cost(110);
12797 format %{ "leaq $dst, $mem\t# @ leaPIdxScale" %}
12798 ins_encode %{
12799 Register dst = $dst$$Register;
12800 Register base = as_Register($mem$$base);
12801 Register index = as_Register($mem$$index);
12802 int scale = $mem$$scale;
12804 if (scale == 0) {
12805 __ daddu(dst, base, index);
12806 } else {
12807 __ dsll(AT, index, scale);
12808 __ daddu(dst, base, AT);
12809 }
12810 %}
12812 ins_pipe( ialu_regI_imm16 );
12813 %}
12815 // Jump Direct Conditional - Label defines a relative address from Jcc+1
12816 instruct jmpLoopEnd(cmpOp cop, mRegI src1, mRegI src2, label labl) %{
12817 match(CountedLoopEnd cop (CmpI src1 src2));
12818 effect(USE labl);
12820 ins_cost(300);
12821 format %{ "J$cop $src1, $src2, $labl\t# Loop end @ jmpLoopEnd" %}
12822 ins_encode %{
12823 Register op1 = $src1$$Register;
12824 Register op2 = $src2$$Register;
12825 Label &L = *($labl$$label);
12826 int flag = $cop$$cmpcode;
12828 switch(flag)
12829 {
12830 case 0x01: //equal
12831 if (&L)
12832 __ beq(op1, op2, L);
12833 else
12834 __ beq(op1, op2, (int)0);
12835 break;
12836 case 0x02: //not_equal
12837 if (&L)
12838 __ bne(op1, op2, L);
12839 else
12840 __ bne(op1, op2, (int)0);
12841 break;
12842 case 0x03: //above
12843 __ slt(AT, op2, op1);
12844 if(&L)
12845 __ bne(AT, R0, L);
12846 else
12847 __ bne(AT, R0, (int)0);
12848 break;
12849 case 0x04: //above_equal
12850 __ slt(AT, op1, op2);
12851 if(&L)
12852 __ beq(AT, R0, L);
12853 else
12854 __ beq(AT, R0, (int)0);
12855 break;
12856 case 0x05: //below
12857 __ slt(AT, op1, op2);
12858 if(&L)
12859 __ bne(AT, R0, L);
12860 else
12861 __ bne(AT, R0, (int)0);
12862 break;
12863 case 0x06: //below_equal
12864 __ slt(AT, op2, op1);
12865 if(&L)
12866 __ beq(AT, R0, L);
12867 else
12868 __ beq(AT, R0, (int)0);
12869 break;
12870 default:
12871 Unimplemented();
12872 }
12873 __ nop();
12874 %}
12875 ins_pipe( pipe_jump );
12876 ins_pc_relative(1);
12877 %}
12880 instruct jmpLoopEnd_reg_imm16_sub(cmpOp cop, mRegI src1, immI16_sub src2, label labl) %{
12881 match(CountedLoopEnd cop (CmpI src1 src2));
12882 effect(USE labl);
12884 ins_cost(250);
12885 format %{ "J$cop $src1, $src2, $labl\t# Loop end @ jmpLoopEnd_reg_imm16_sub" %}
12886 ins_encode %{
12887 Register op1 = $src1$$Register;
12888 int op2 = $src2$$constant;
12889 Label &L = *($labl$$label);
12890 int flag = $cop$$cmpcode;
12892 __ addiu32(AT, op1, -1 * op2);
12894 switch(flag)
12895 {
12896 case 0x01: //equal
12897 if (&L)
12898 __ beq(AT, R0, L);
12899 else
12900 __ beq(AT, R0, (int)0);
12901 break;
12902 case 0x02: //not_equal
12903 if (&L)
12904 __ bne(AT, R0, L);
12905 else
12906 __ bne(AT, R0, (int)0);
12907 break;
12908 case 0x03: //above
12909 if(&L)
12910 __ bgtz(AT, L);
12911 else
12912 __ bgtz(AT, (int)0);
12913 break;
12914 case 0x04: //above_equal
12915 if(&L)
12916 __ bgez(AT, L);
12917 else
12918 __ bgez(AT,(int)0);
12919 break;
12920 case 0x05: //below
12921 if(&L)
12922 __ bltz(AT, L);
12923 else
12924 __ bltz(AT, (int)0);
12925 break;
12926 case 0x06: //below_equal
12927 if(&L)
12928 __ blez(AT, L);
12929 else
12930 __ blez(AT, (int)0);
12931 break;
12932 default:
12933 Unimplemented();
12934 }
12935 __ nop();
12936 %}
12937 ins_pipe( pipe_jump );
12938 ins_pc_relative(1);
12939 %}
12942 /*
12943 // Jump Direct Conditional - Label defines a relative address from Jcc+1
12944 instruct jmpLoopEndU(cmpOpU cop, eFlagsRegU cmp, label labl) %{
12945 match(CountedLoopEnd cop cmp);
12946 effect(USE labl);
12948 ins_cost(300);
12949 format %{ "J$cop,u $labl\t# Loop end" %}
12950 size(6);
12951 opcode(0x0F, 0x80);
12952 ins_encode( Jcc( cop, labl) );
12953 ins_pipe( pipe_jump );
12954 ins_pc_relative(1);
12955 %}
12957 instruct jmpLoopEndUCF(cmpOpUCF cop, eFlagsRegUCF cmp, label labl) %{
12958 match(CountedLoopEnd cop cmp);
12959 effect(USE labl);
12961 ins_cost(200);
12962 format %{ "J$cop,u $labl\t# Loop end" %}
12963 opcode(0x0F, 0x80);
12964 ins_encode( Jcc( cop, labl) );
12965 ins_pipe( pipe_jump );
12966 ins_pc_relative(1);
12967 %}
12968 */
12970 // This match pattern is created for StoreIConditional since I cannot match IfNode without a RegFlags! fujie 2012/07/17
12971 instruct jmpCon_flags(cmpOp cop, FlagsReg cr, label labl) %{
12972 match(If cop cr);
12973 effect(USE labl);
12975 ins_cost(300);
12976 format %{ "J$cop $labl #mips uses AT as eflag @jmpCon_flags" %}
12978 ins_encode %{
12979 Label &L = *($labl$$label);
12980 switch($cop$$cmpcode)
12981 {
12982 case 0x01: //equal
12983 if (&L)
12984 __ bne(AT, R0, L);
12985 else
12986 __ bne(AT, R0, (int)0);
12987 break;
12988 case 0x02: //not equal
12989 if (&L)
12990 __ beq(AT, R0, L);
12991 else
12992 __ beq(AT, R0, (int)0);
12993 break;
12994 default:
12995 Unimplemented();
12996 }
12997 __ nop();
12998 %}
13000 ins_pipe( pipe_jump );
13001 ins_pc_relative(1);
13002 %}
13005 // ============================================================================
13006 // The 2nd slow-half of a subtype check. Scan the subklass's 2ndary superklass
13007 // array for an instance of the superklass. Set a hidden internal cache on a
13008 // hit (cache is checked with exposed code in gen_subtype_check()). Return
13009 // NZ for a miss or zero for a hit. The encoding ALSO sets flags.
13010 instruct partialSubtypeCheck( mRegP result, no_T8_mRegP sub, no_T8_mRegP super, mT8RegI tmp ) %{
13011 match(Set result (PartialSubtypeCheck sub super));
13012 effect(KILL tmp);
13013 ins_cost(1100); // slightly larger than the next version
13014 format %{ "partialSubtypeCheck result=$result, sub=$sub, super=$super, tmp=$tmp " %}
13016 ins_encode( enc_PartialSubtypeCheck(result, sub, super, tmp) );
13017 ins_pipe( pipe_slow );
13018 %}
13021 // Conditional-store of an int value.
13022 // ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG on Intel.
13023 instruct storeIConditional( memory mem, mRegI oldval, mRegI newval, FlagsReg cr ) %{
13024 match(Set cr (StoreIConditional mem (Binary oldval newval)));
13025 // effect(KILL oldval);
13026 format %{ "CMPXCHG $newval, $mem, $oldval \t# @storeIConditional" %}
13028 ins_encode %{
13029 Register oldval = $oldval$$Register;
13030 Register newval = $newval$$Register;
13031 Address addr(as_Register($mem$$base), $mem$$disp);
13032 Label again, failure;
13034 // int base = $mem$$base;
13035 int index = $mem$$index;
13036 int scale = $mem$$scale;
13037 int disp = $mem$$disp;
13039 guarantee(Assembler::is_simm16(disp), "");
13041 if( index != 0 ) {
13042 __ stop("in storeIConditional: index != 0");
13043 } else {
13044 __ bind(again);
13045 if(UseSyncLevel <= 1000) __ sync();
13046 __ ll(AT, addr);
13047 __ bne(AT, oldval, failure);
13048 __ delayed()->addu(AT, R0, R0);
13050 __ addu(AT, newval, R0);
13051 __ sc(AT, addr);
13052 __ beq(AT, R0, again);
13053 __ delayed()->addiu(AT, R0, 0xFF);
13054 __ bind(failure);
13055 __ sync();
13056 }
13057 %}
13059 ins_pipe( long_memory_op );
13060 %}
13062 // Conditional-store of a long value.
13063 // ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG.
13064 instruct storeLConditional(memory mem, t2RegL oldval, mRegL newval, FlagsReg cr )
13065 %{
13066 match(Set cr (StoreLConditional mem (Binary oldval newval)));
13067 effect(KILL oldval);
13069 format %{ "cmpxchg $mem, $newval\t# If $oldval == $mem then store $newval into $mem" %}
13070 ins_encode%{
13071 Register oldval = $oldval$$Register;
13072 Register newval = $newval$$Register;
13073 Address addr((Register)$mem$$base, $mem$$disp);
13075 int index = $mem$$index;
13076 int scale = $mem$$scale;
13077 int disp = $mem$$disp;
13079 guarantee(Assembler::is_simm16(disp), "");
13081 if( index != 0 ) {
13082 __ stop("in storeIConditional: index != 0");
13083 } else {
13084 __ cmpxchg(newval, addr, oldval);
13085 }
13086 %}
13087 ins_pipe( long_memory_op );
13088 %}
13091 instruct compareAndSwapI( mRegI res, mRegP mem_ptr, mS2RegI oldval, mRegI newval) %{
13092 match(Set res (CompareAndSwapI mem_ptr (Binary oldval newval)));
13093 effect(KILL oldval);
13094 // match(CompareAndSwapI mem_ptr (Binary oldval newval));
13095 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapI\n\t"
13096 "MOV $res, 1 @ compareAndSwapI\n\t"
13097 "BNE AT, R0 @ compareAndSwapI\n\t"
13098 "MOV $res, 0 @ compareAndSwapI\n"
13099 "L:" %}
13100 ins_encode %{
13101 Register newval = $newval$$Register;
13102 Register oldval = $oldval$$Register;
13103 Register res = $res$$Register;
13104 Address addr($mem_ptr$$Register, 0);
13105 Label L;
13107 __ cmpxchg32(newval, addr, oldval);
13108 __ move(res, AT);
13109 %}
13110 ins_pipe( long_memory_op );
13111 %}
13113 //FIXME:
13114 instruct compareAndSwapP( mRegI res, mRegP mem_ptr, s2_RegP oldval, mRegP newval) %{
13115 match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval)));
13116 effect(KILL oldval);
13117 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapP\n\t"
13118 "MOV $res, AT @ compareAndSwapP\n\t"
13119 "L:" %}
13120 ins_encode %{
13121 Register newval = $newval$$Register;
13122 Register oldval = $oldval$$Register;
13123 Register res = $res$$Register;
13124 Address addr($mem_ptr$$Register, 0);
13125 Label L;
13127 __ cmpxchg(newval, addr, oldval);
13128 __ move(res, AT);
13129 %}
13130 ins_pipe( long_memory_op );
13131 %}
13133 instruct compareAndSwapN( mRegI res, mRegP mem_ptr, t2_RegN oldval, mRegN newval) %{
13134 match(Set res (CompareAndSwapN mem_ptr (Binary oldval newval)));
13135 effect(KILL oldval);
13136 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapN\n\t"
13137 "MOV $res, AT @ compareAndSwapN\n\t"
13138 "L:" %}
13139 ins_encode %{
13140 Register newval = $newval$$Register;
13141 Register oldval = $oldval$$Register;
13142 Register res = $res$$Register;
13143 Address addr($mem_ptr$$Register, 0);
13144 Label L;
13146 /* 2013/7/19 Jin: cmpxchg32 is implemented with ll/sc, which will do sign extension.
13147 * Thus, we should extend oldval's sign for correct comparision.
13148 */
13149 __ sll(oldval, oldval, 0);
13151 __ cmpxchg32(newval, addr, oldval);
13152 __ move(res, AT);
13153 %}
13154 ins_pipe( long_memory_op );
13155 %}
13157 //----------Max and Min--------------------------------------------------------
13158 // Min Instructions
13159 ////
13160 // *** Min and Max using the conditional move are slower than the
13161 // *** branch version on a Pentium III.
13162 // // Conditional move for min
13163 //instruct cmovI_reg_lt( eRegI op2, eRegI op1, eFlagsReg cr ) %{
13164 // effect( USE_DEF op2, USE op1, USE cr );
13165 // format %{ "CMOVlt $op2,$op1\t! min" %}
13166 // opcode(0x4C,0x0F);
13167 // ins_encode( OpcS, OpcP, RegReg( op2, op1 ) );
13168 // ins_pipe( pipe_cmov_reg );
13169 //%}
13170 //
13171 //// Min Register with Register (P6 version)
13172 //instruct minI_eReg_p6( eRegI op1, eRegI op2 ) %{
13173 // predicate(VM_Version::supports_cmov() );
13174 // match(Set op2 (MinI op1 op2));
13175 // ins_cost(200);
13176 // expand %{
13177 // eFlagsReg cr;
13178 // compI_eReg(cr,op1,op2);
13179 // cmovI_reg_lt(op2,op1,cr);
13180 // %}
13181 //%}
13183 // Min Register with Register (generic version)
13184 instruct minI_Reg_Reg(mRegI dst, mRegI src) %{
13185 match(Set dst (MinI dst src));
13186 //effect(KILL flags);
13187 ins_cost(80);
13189 format %{ "MIN $dst, $src @minI_Reg_Reg" %}
13190 ins_encode %{
13191 Register dst = $dst$$Register;
13192 Register src = $src$$Register;
13194 __ slt(AT, src, dst);
13195 __ movn(dst, src, AT);
13197 %}
13199 ins_pipe( pipe_slow );
13200 %}
13202 // Max Register with Register
13203 // *** Min and Max using the conditional move are slower than the
13204 // *** branch version on a Pentium III.
13205 // // Conditional move for max
13206 //instruct cmovI_reg_gt( eRegI op2, eRegI op1, eFlagsReg cr ) %{
13207 // effect( USE_DEF op2, USE op1, USE cr );
13208 // format %{ "CMOVgt $op2,$op1\t! max" %}
13209 // opcode(0x4F,0x0F);
13210 // ins_encode( OpcS, OpcP, RegReg( op2, op1 ) );
13211 // ins_pipe( pipe_cmov_reg );
13212 //%}
13213 //
13214 // // Max Register with Register (P6 version)
13215 //instruct maxI_eReg_p6( eRegI op1, eRegI op2 ) %{
13216 // predicate(VM_Version::supports_cmov() );
13217 // match(Set op2 (MaxI op1 op2));
13218 // ins_cost(200);
13219 // expand %{
13220 // eFlagsReg cr;
13221 // compI_eReg(cr,op1,op2);
13222 // cmovI_reg_gt(op2,op1,cr);
13223 // %}
13224 //%}
13226 // Max Register with Register (generic version)
13227 instruct maxI_Reg_Reg(mRegI dst, mRegI src) %{
13228 match(Set dst (MaxI dst src));
13229 ins_cost(80);
13231 format %{ "MAX $dst, $src @maxI_Reg_Reg" %}
13233 ins_encode %{
13234 Register dst = $dst$$Register;
13235 Register src = $src$$Register;
13237 __ slt(AT, dst, src);
13238 __ movn(dst, src, AT);
13240 %}
13242 ins_pipe( pipe_slow );
13243 %}
13245 instruct maxI_Reg_zero(mRegI dst, immI0 zero) %{
13246 match(Set dst (MaxI dst zero));
13247 ins_cost(50);
13249 format %{ "MAX $dst, 0 @maxI_Reg_zero" %}
13251 ins_encode %{
13252 Register dst = $dst$$Register;
13254 __ slt(AT, dst, R0);
13255 __ movn(dst, R0, AT);
13257 %}
13259 ins_pipe( pipe_slow );
13260 %}
13262 instruct zerox_long_reg_reg(mRegL dst, mRegL src, immL_32bits mask)
13263 %{
13264 match(Set dst (AndL src mask));
13266 format %{ "movl $dst, $src\t# zero-extend long @ zerox_long_reg_reg" %}
13267 ins_encode %{
13268 Register dst = $dst$$Register;
13269 Register src = $src$$Register;
13271 __ dext(dst, src, 0, 32);
13272 %}
13273 ins_pipe(ialu_regI_regI);
13274 %}
13276 instruct combine_i2l(mRegL dst, mRegI src1, immL_32bits mask, mRegI src2, immI_32 shift32)
13277 %{
13278 match(Set dst (OrL (AndL (ConvI2L src1) mask) (LShiftL (ConvI2L src2) shift32)));
13280 format %{ "combine_i2l $dst, $src2(H), $src1(L) @ combine_i2l" %}
13281 ins_encode %{
13282 Register dst = $dst$$Register;
13283 Register src1 = $src1$$Register;
13284 Register src2 = $src2$$Register;
13286 if (src1 == dst) {
13287 __ dinsu(dst, src2, 32, 32);
13288 } else if (src2 == dst) {
13289 __ dsll32(dst, dst, 0);
13290 __ dins(dst, src1, 0, 32);
13291 } else {
13292 __ dext(dst, src1, 0, 32);
13293 __ dinsu(dst, src2, 32, 32);
13294 }
13295 %}
13296 ins_pipe(ialu_regI_regI);
13297 %}
13299 // Zero-extend convert int to long
13300 instruct convI2L_reg_reg_zex(mRegL dst, mRegI src, immL_32bits mask)
13301 %{
13302 match(Set dst (AndL (ConvI2L src) mask));
13304 format %{ "movl $dst, $src\t# i2l zero-extend @ convI2L_reg_reg_zex" %}
13305 ins_encode %{
13306 Register dst = $dst$$Register;
13307 Register src = $src$$Register;
13309 __ dext(dst, src, 0, 32);
13310 %}
13311 ins_pipe(ialu_regI_regI);
13312 %}
13314 instruct convL2I2L_reg_reg_zex(mRegL dst, mRegL src, immL_32bits mask)
13315 %{
13316 match(Set dst (AndL (ConvI2L (ConvL2I src)) mask));
13318 format %{ "movl $dst, $src\t# i2l zero-extend @ convL2I2L_reg_reg_zex" %}
13319 ins_encode %{
13320 Register dst = $dst$$Register;
13321 Register src = $src$$Register;
13323 __ dext(dst, src, 0, 32);
13324 %}
13325 ins_pipe(ialu_regI_regI);
13326 %}
13328 // Match loading integer and casting it to unsigned int in long register.
13329 // LoadI + ConvI2L + AndL 0xffffffff.
13330 instruct loadUI2L_rmask(mRegL dst, memory mem, immL_32bits mask) %{
13331 match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
13333 format %{ "lwu $dst, $mem \t// zero-extend to long @ loadUI2L_rmask" %}
13334 ins_encode (load_N_enc(dst, mem));
13335 ins_pipe(ialu_loadI);
13336 %}
13338 instruct loadUI2L_lmask(mRegL dst, memory mem, immL_32bits mask) %{
13339 match(Set dst (AndL mask (ConvI2L (LoadI mem))));
13341 format %{ "lwu $dst, $mem \t// zero-extend to long @ loadUI2L_lmask" %}
13342 ins_encode (load_N_enc(dst, mem));
13343 ins_pipe(ialu_loadI);
13344 %}
13347 // ============================================================================
13348 // Safepoint Instruction
13349 instruct safePoint_poll(mRegP poll) %{
13350 match(SafePoint poll);
13351 effect(USE poll);
13353 ins_cost(125);
13354 format %{ "Safepoint @ [$poll] : poll for GC @ safePoint_poll" %}
13356 ins_encode %{
13357 Register poll_reg = $poll$$Register;
13359 __ block_comment("Safepoint:");
13360 __ relocate(relocInfo::poll_type);
13361 __ lw(AT, poll_reg, 0);
13362 %}
13364 ins_pipe( ialu_storeI );
13365 %}
13367 //----------Arithmetic Conversion Instructions---------------------------------
13369 instruct roundFloat_nop(regF dst)
13370 %{
13371 match(Set dst (RoundFloat dst));
13373 ins_cost(0);
13374 ins_encode();
13375 ins_pipe(empty);
13376 %}
13378 instruct roundDouble_nop(regD dst)
13379 %{
13380 match(Set dst (RoundDouble dst));
13382 ins_cost(0);
13383 ins_encode();
13384 ins_pipe(empty);
13385 %}
13387 //---------- Zeros Count Instructions ------------------------------------------
13388 // CountLeadingZerosINode CountTrailingZerosINode
13389 instruct countLeadingZerosI(mRegI dst, mRegI src) %{
13390 predicate(UseCountLeadingZerosInstruction);
13391 match(Set dst (CountLeadingZerosI src));
13393 format %{ "clz $dst, $src\t# count leading zeros (int)" %}
13394 ins_encode %{
13395 __ clz($dst$$Register, $src$$Register);
13396 %}
13397 ins_pipe( ialu_regL_regL );
13398 %}
13400 instruct countLeadingZerosL(mRegI dst, mRegL src) %{
13401 predicate(UseCountLeadingZerosInstruction);
13402 match(Set dst (CountLeadingZerosL src));
13404 format %{ "dclz $dst, $src\t# count leading zeros (long)" %}
13405 ins_encode %{
13406 __ dclz($dst$$Register, $src$$Register);
13407 %}
13408 ins_pipe( ialu_regL_regL );
13409 %}
13411 instruct countTrailingZerosI(mRegI dst, mRegI src) %{
13412 predicate(UseCountTrailingZerosInstruction);
13413 match(Set dst (CountTrailingZerosI src));
13415 format %{ "ctz $dst, $src\t# count trailing zeros (int)" %}
13416 ins_encode %{
13417 // ctz and dctz is gs instructions.
13418 __ ctz($dst$$Register, $src$$Register);
13419 %}
13420 ins_pipe( ialu_regL_regL );
13421 %}
13423 instruct countTrailingZerosL(mRegI dst, mRegL src) %{
13424 predicate(UseCountTrailingZerosInstruction);
13425 match(Set dst (CountTrailingZerosL src));
13427 format %{ "dcto $dst, $src\t# count trailing zeros (long)" %}
13428 ins_encode %{
13429 __ dctz($dst$$Register, $src$$Register);
13430 %}
13431 ins_pipe( ialu_regL_regL );
13432 %}
13434 // ====================VECTOR INSTRUCTIONS=====================================
13436 // Load vectors (8 bytes long)
13437 instruct loadV8(vecD dst, memory mem) %{
13438 predicate(n->as_LoadVector()->memory_size() == 8);
13439 match(Set dst (LoadVector mem));
13440 ins_cost(125);
13441 format %{ "load $dst, $mem\t! load vector (8 bytes)" %}
13442 ins_encode(load_D_enc(dst, mem));
13443 ins_pipe( fpu_loadF );
13444 %}
13446 // Store vectors (8 bytes long)
13447 instruct storeV8(memory mem, vecD src) %{
13448 predicate(n->as_StoreVector()->memory_size() == 8);
13449 match(Set mem (StoreVector mem src));
13450 ins_cost(145);
13451 format %{ "store $mem, $src\t! store vector (8 bytes)" %}
13452 ins_encode(store_D_reg_enc(mem, src));
13453 ins_pipe( fpu_storeF );
13454 %}
13456 instruct Repl8B_DSP(vecD dst, mRegI src) %{
13457 predicate(n->as_Vector()->length() == 8 && Use3A2000);
13458 match(Set dst (ReplicateB src));
13459 ins_cost(100);
13460 format %{ "replv_ob AT, $src\n\t"
13461 "dmtc1 AT, $dst\t! replicate8B" %}
13462 ins_encode %{
13463 __ replv_ob(AT, $src$$Register);
13464 __ dmtc1(AT, $dst$$FloatRegister);
13465 %}
13466 ins_pipe( pipe_mtc1 );
13467 %}
13469 instruct Repl8B(vecD dst, mRegI src) %{
13470 predicate(n->as_Vector()->length() == 8);
13471 match(Set dst (ReplicateB src));
13472 ins_cost(140);
13473 format %{ "move AT, $src\n\t"
13474 "dins AT, AT, 8, 8\n\t"
13475 "dins AT, AT, 16, 16\n\t"
13476 "dinsu AT, AT, 32, 32\n\t"
13477 "dmtc1 AT, $dst\t! replicate8B" %}
13478 ins_encode %{
13479 __ move(AT, $src$$Register);
13480 __ dins(AT, AT, 8, 8);
13481 __ dins(AT, AT, 16, 16);
13482 __ dinsu(AT, AT, 32, 32);
13483 __ dmtc1(AT, $dst$$FloatRegister);
13484 %}
13485 ins_pipe( pipe_mtc1 );
13486 %}
13488 instruct Repl8B_imm_DSP(vecD dst, immI con) %{
13489 predicate(n->as_Vector()->length() == 8 && Use3A2000);
13490 match(Set dst (ReplicateB con));
13491 ins_cost(110);
13492 format %{ "repl_ob AT, [$con]\n\t"
13493 "dmtc1 AT, $dst,0x00\t! replicate8B($con)" %}
13494 ins_encode %{
13495 int val = $con$$constant;
13496 __ repl_ob(AT, val);
13497 __ dmtc1(AT, $dst$$FloatRegister);
13498 %}
13499 ins_pipe( pipe_mtc1 );
13500 %}
13502 instruct Repl8B_imm(vecD dst, immI con) %{
13503 predicate(n->as_Vector()->length() == 8);
13504 match(Set dst (ReplicateB con));
13505 ins_cost(150);
13506 format %{ "move AT, [$con]\n\t"
13507 "dins AT, AT, 8, 8\n\t"
13508 "dins AT, AT, 16, 16\n\t"
13509 "dinsu AT, AT, 32, 32\n\t"
13510 "dmtc1 AT, $dst,0x00\t! replicate8B($con)" %}
13511 ins_encode %{
13512 __ move(AT, $con$$constant);
13513 __ dins(AT, AT, 8, 8);
13514 __ dins(AT, AT, 16, 16);
13515 __ dinsu(AT, AT, 32, 32);
13516 __ dmtc1(AT, $dst$$FloatRegister);
13517 %}
13518 ins_pipe( pipe_mtc1 );
13519 %}
13521 instruct Repl8B_zero(vecD dst, immI0 zero) %{
13522 predicate(n->as_Vector()->length() == 8);
13523 match(Set dst (ReplicateB zero));
13524 ins_cost(90);
13525 format %{ "dmtc1 R0, $dst\t! replicate8B zero" %}
13526 ins_encode %{
13527 __ dmtc1(R0, $dst$$FloatRegister);
13528 %}
13529 ins_pipe( pipe_mtc1 );
13530 %}
13532 instruct Repl8B_M1(vecD dst, immI_M1 M1) %{
13533 predicate(n->as_Vector()->length() == 8);
13534 match(Set dst (ReplicateB M1));
13535 ins_cost(80);
13536 format %{ "dmtc1 -1, $dst\t! replicate8B -1" %}
13537 ins_encode %{
13538 __ nor(AT, R0, R0);
13539 __ dmtc1(AT, $dst$$FloatRegister);
13540 %}
13541 ins_pipe( pipe_mtc1 );
13542 %}
13544 instruct Repl4S_DSP(vecD dst, mRegI src) %{
13545 predicate(n->as_Vector()->length() == 4 && Use3A2000);
13546 match(Set dst (ReplicateS src));
13547 ins_cost(100);
13548 format %{ "replv_qh AT, $src\n\t"
13549 "dmtc1 AT, $dst\t! replicate4S" %}
13550 ins_encode %{
13551 __ replv_qh(AT, $src$$Register);
13552 __ dmtc1(AT, $dst$$FloatRegister);
13553 %}
13554 ins_pipe( pipe_mtc1 );
13555 %}
13557 instruct Repl4S(vecD dst, mRegI src) %{
13558 predicate(n->as_Vector()->length() == 4);
13559 match(Set dst (ReplicateS src));
13560 ins_cost(120);
13561 format %{ "move AT, $src \n\t"
13562 "dins AT, AT, 16, 16\n\t"
13563 "dinsu AT, AT, 32, 32\n\t"
13564 "dmtc1 AT, $dst\t! replicate4S" %}
13565 ins_encode %{
13566 __ move(AT, $src$$Register);
13567 __ dins(AT, AT, 16, 16);
13568 __ dinsu(AT, AT, 32, 32);
13569 __ dmtc1(AT, $dst$$FloatRegister);
13570 %}
13571 ins_pipe( pipe_mtc1 );
13572 %}
13574 instruct Repl4S_imm_DSP(vecD dst, immI con) %{
13575 predicate(n->as_Vector()->length() == 4 && Use3A2000);
13576 match(Set dst (ReplicateS con));
13577 ins_cost(100);
13578 format %{ "replv_qh AT, [$con]\n\t"
13579 "dmtc1 AT, $dst\t! replicate4S($con)" %}
13580 ins_encode %{
13581 int val = $con$$constant;
13582 if ( Assembler::is_simm(val, 10)) {
13583 //repl_qh supports 10 bits immediate
13584 __ repl_qh(AT, val);
13585 } else {
13586 __ li32(AT, val);
13587 __ replv_qh(AT, AT);
13588 }
13589 __ dmtc1(AT, $dst$$FloatRegister);
13590 %}
13591 ins_pipe( pipe_mtc1 );
13592 %}
13594 instruct Repl4S_imm(vecD dst, immI con) %{
13595 predicate(n->as_Vector()->length() == 4);
13596 match(Set dst (ReplicateS con));
13597 ins_cost(110);
13598 format %{ "move AT, [$con]\n\t"
13599 "dins AT, AT, 16, 16\n\t"
13600 "dinsu AT, AT, 32, 32\n\t"
13601 "dmtc1 AT, $dst\t! replicate4S($con)" %}
13602 ins_encode %{
13603 __ move(AT, $con$$constant);
13604 __ dins(AT, AT, 16, 16);
13605 __ dinsu(AT, AT, 32, 32);
13606 __ dmtc1(AT, $dst$$FloatRegister);
13607 %}
13608 ins_pipe( pipe_mtc1 );
13609 %}
13611 instruct Repl4S_zero(vecD dst, immI0 zero) %{
13612 predicate(n->as_Vector()->length() == 4);
13613 match(Set dst (ReplicateS zero));
13614 format %{ "dmtc1 R0, $dst\t! replicate4S zero" %}
13615 ins_encode %{
13616 __ dmtc1(R0, $dst$$FloatRegister);
13617 %}
13618 ins_pipe( pipe_mtc1 );
13619 %}
13621 instruct Repl4S_M1(vecD dst, immI_M1 M1) %{
13622 predicate(n->as_Vector()->length() == 4);
13623 match(Set dst (ReplicateS M1));
13624 format %{ "dmtc1 -1, $dst\t! replicate4S -1" %}
13625 ins_encode %{
13626 __ nor(AT, R0, R0);
13627 __ dmtc1(AT, $dst$$FloatRegister);
13628 %}
13629 ins_pipe( pipe_mtc1 );
13630 %}
13632 // Replicate integer (4 byte) scalar to be vector
13633 instruct Repl2I(vecD dst, mRegI src) %{
13634 predicate(n->as_Vector()->length() == 2);
13635 match(Set dst (ReplicateI src));
13636 format %{ "dins AT, $src, 0, 32\n\t"
13637 "dinsu AT, $src, 32, 32\n\t"
13638 "dmtc1 AT, $dst\t! replicate2I" %}
13639 ins_encode %{
13640 __ dins(AT, $src$$Register, 0, 32);
13641 __ dinsu(AT, $src$$Register, 32, 32);
13642 __ dmtc1(AT, $dst$$FloatRegister);
13643 %}
13644 ins_pipe( pipe_mtc1 );
13645 %}
13647 // Replicate integer (4 byte) scalar immediate to be vector by loading from const table.
13648 instruct Repl2I_imm(vecD dst, immI con, mA7RegI tmp) %{
13649 predicate(n->as_Vector()->length() == 2);
13650 match(Set dst (ReplicateI con));
13651 effect(KILL tmp);
13652 format %{ "li32 AT, [$con], 32\n\t"
13653 "dinsu AT, AT\n\t"
13654 "dmtc1 AT, $dst\t! replicate2I($con)" %}
13655 ins_encode %{
13656 int val = $con$$constant;
13657 __ li32(AT, val);
13658 __ dinsu(AT, AT, 32, 32);
13659 __ dmtc1(AT, $dst$$FloatRegister);
13660 %}
13661 ins_pipe( pipe_mtc1 );
13662 %}
13664 // Replicate integer (4 byte) scalar zero to be vector
13665 instruct Repl2I_zero(vecD dst, immI0 zero) %{
13666 predicate(n->as_Vector()->length() == 2);
13667 match(Set dst (ReplicateI zero));
13668 format %{ "dmtc1 R0, $dst\t! replicate2I zero" %}
13669 ins_encode %{
13670 __ dmtc1(R0, $dst$$FloatRegister);
13671 %}
13672 ins_pipe( pipe_mtc1 );
13673 %}
13675 // Replicate integer (4 byte) scalar -1 to be vector
13676 instruct Repl2I_M1(vecD dst, immI_M1 M1) %{
13677 predicate(n->as_Vector()->length() == 2);
13678 match(Set dst (ReplicateI M1));
13679 format %{ "dmtc1 -1, $dst\t! replicate2I -1, use AT" %}
13680 ins_encode %{
13681 __ nor(AT, R0, R0);
13682 __ dmtc1(AT, $dst$$FloatRegister);
13683 %}
13684 ins_pipe( pipe_mtc1 );
13685 %}
13687 // Replicate float (4 byte) scalar to be vector
13688 instruct Repl2F(vecD dst, regF src) %{
13689 predicate(n->as_Vector()->length() == 2);
13690 match(Set dst (ReplicateF src));
13691 format %{ "cvt.ps $dst, $src, $src\t! replicate2F" %}
13692 ins_encode %{
13693 __ cvt_ps_s($dst$$FloatRegister, $src$$FloatRegister, $src$$FloatRegister);
13694 %}
13695 ins_pipe( pipe_slow );
13696 %}
13698 // Replicate float (4 byte) scalar zero to be vector
13699 instruct Repl2F_zero(vecD dst, immF0 zero) %{
13700 predicate(n->as_Vector()->length() == 2);
13701 match(Set dst (ReplicateF zero));
13702 format %{ "dmtc1 R0, $dst\t! replicate2F zero" %}
13703 ins_encode %{
13704 __ dmtc1(R0, $dst$$FloatRegister);
13705 %}
13706 ins_pipe( pipe_mtc1 );
13707 %}
13710 // ====================VECTOR ARITHMETIC=======================================
13712 // --------------------------------- ADD --------------------------------------
13714 // Floats vector add
13715 instruct vadd2F(vecD dst, vecD src) %{
13716 predicate(n->as_Vector()->length() == 2);
13717 match(Set dst (AddVF dst src));
13718 format %{ "add.ps $dst,$src\t! add packed2F" %}
13719 ins_encode %{
13720 __ add_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
13721 %}
13722 ins_pipe( pipe_slow );
13723 %}
13725 instruct vadd2F3(vecD dst, vecD src1, vecD src2) %{
13726 predicate(n->as_Vector()->length() == 2);
13727 match(Set dst (AddVF src1 src2));
13728 format %{ "add.ps $dst,$src1,$src2\t! add packed2F" %}
13729 ins_encode %{
13730 __ add_ps($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
13731 %}
13732 ins_pipe( fpu_regF_regF );
13733 %}
13735 // --------------------------------- SUB --------------------------------------
13737 // Floats vector sub
13738 instruct vsub2F(vecD dst, vecD src) %{
13739 predicate(n->as_Vector()->length() == 2);
13740 match(Set dst (SubVF dst src));
13741 format %{ "sub.ps $dst,$src\t! sub packed2F" %}
13742 ins_encode %{
13743 __ sub_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
13744 %}
13745 ins_pipe( fpu_regF_regF );
13746 %}
13748 // --------------------------------- MUL --------------------------------------
13750 // Floats vector mul
13751 instruct vmul2F(vecD dst, vecD src) %{
13752 predicate(n->as_Vector()->length() == 2);
13753 match(Set dst (MulVF dst src));
13754 format %{ "mul.ps $dst, $src\t! mul packed2F" %}
13755 ins_encode %{
13756 __ mul_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
13757 %}
13758 ins_pipe( fpu_regF_regF );
13759 %}
13761 instruct vmul2F3(vecD dst, vecD src1, vecD src2) %{
13762 predicate(n->as_Vector()->length() == 2);
13763 match(Set dst (MulVF src1 src2));
13764 format %{ "mul.ps $dst, $src1, $src2\t! mul packed2F" %}
13765 ins_encode %{
13766 __ mul_ps($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
13767 %}
13768 ins_pipe( fpu_regF_regF );
13769 %}
13771 // --------------------------------- DIV --------------------------------------
13772 // MIPS do not have div.ps
13775 //----------PEEPHOLE RULES-----------------------------------------------------
13776 // These must follow all instruction definitions as they use the names
13777 // defined in the instructions definitions.
13778 //
13779 // peepmatch ( root_instr_name [preceeding_instruction]* );
13780 //
13781 // peepconstraint %{
13782 // (instruction_number.operand_name relational_op instruction_number.operand_name
13783 // [, ...] );
13784 // // instruction numbers are zero-based using left to right order in peepmatch
13785 //
13786 // peepreplace ( instr_name ( [instruction_number.operand_name]* ) );
13787 // // provide an instruction_number.operand_name for each operand that appears
13788 // // in the replacement instruction's match rule
13789 //
13790 // ---------VM FLAGS---------------------------------------------------------
13791 //
13792 // All peephole optimizations can be turned off using -XX:-OptoPeephole
13793 //
13794 // Each peephole rule is given an identifying number starting with zero and
13795 // increasing by one in the order seen by the parser. An individual peephole
13796 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
13797 // on the command-line.
13798 //
13799 // ---------CURRENT LIMITATIONS----------------------------------------------
13800 //
13801 // Only match adjacent instructions in same basic block
13802 // Only equality constraints
13803 // Only constraints between operands, not (0.dest_reg == EAX_enc)
13804 // Only one replacement instruction
13805 //
13806 // ---------EXAMPLE----------------------------------------------------------
13807 //
13808 // // pertinent parts of existing instructions in architecture description
13809 // instruct movI(eRegI dst, eRegI src) %{
13810 // match(Set dst (CopyI src));
13811 // %}
13812 //
13813 // instruct incI_eReg(eRegI dst, immI1 src, eFlagsReg cr) %{
13814 // match(Set dst (AddI dst src));
13815 // effect(KILL cr);
13816 // %}
13817 //
13818 // // Change (inc mov) to lea
13819 // peephole %{
13820 // // increment preceeded by register-register move
13821 // peepmatch ( incI_eReg movI );
13822 // // require that the destination register of the increment
13823 // // match the destination register of the move
13824 // peepconstraint ( 0.dst == 1.dst );
13825 // // construct a replacement instruction that sets
13826 // // the destination to ( move's source register + one )
13827 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13828 // %}
13829 //
13830 // Implementation no longer uses movX instructions since
13831 // machine-independent system no longer uses CopyX nodes.
13832 //
13833 // peephole %{
13834 // peepmatch ( incI_eReg movI );
13835 // peepconstraint ( 0.dst == 1.dst );
13836 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13837 // %}
13838 //
13839 // peephole %{
13840 // peepmatch ( decI_eReg movI );
13841 // peepconstraint ( 0.dst == 1.dst );
13842 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13843 // %}
13844 //
13845 // peephole %{
13846 // peepmatch ( addI_eReg_imm movI );
13847 // peepconstraint ( 0.dst == 1.dst );
13848 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13849 // %}
13850 //
13851 // peephole %{
13852 // peepmatch ( addP_eReg_imm movP );
13853 // peepconstraint ( 0.dst == 1.dst );
13854 // peepreplace ( leaP_eReg_immI( 0.dst 1.src 0.src ) );
13855 // %}
13857 // // Change load of spilled value to only a spill
13858 // instruct storeI(memory mem, eRegI src) %{
13859 // match(Set mem (StoreI mem src));
13860 // %}
13861 //
13862 // instruct loadI(eRegI dst, memory mem) %{
13863 // match(Set dst (LoadI mem));
13864 // %}
13865 //
13866 //peephole %{
13867 // peepmatch ( loadI storeI );
13868 // peepconstraint ( 1.src == 0.dst, 1.mem == 0.mem );
13869 // peepreplace ( storeI( 1.mem 1.mem 1.src ) );
13870 //%}
13872 //----------SMARTSPILL RULES---------------------------------------------------
13873 // These must follow all instruction definitions as they use the names
13874 // defined in the instructions definitions.