Wed, 01 Mar 2017 07:47:24 -0500
[C2] Rewrite loadConP_general and clean some dirty code.
1 //
2 // Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
3 // Copyright (c) 2015, 2016, Loongson Technology. All rights reserved.
4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 //
6 // This code is free software; you can redistribute it and/or modify it
7 // under the terms of the GNU General Public License version 2 only, as
8 // published by the Free Software Foundation.
9 //
10 // This code is distributed in the hope that it will be useful, but WITHOUT
11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 // version 2 for more details (a copy is included in the LICENSE file that
14 // accompanied this code).
15 //
16 // You should have received a copy of the GNU General Public License version
17 // 2 along with this work; if not, write to the Free Software Foundation,
18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 //
20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 // or visit www.oracle.com if you need additional information or have any
22 // questions.
23 //
24 //
26 // GodSon3 Architecture Description File
28 //----------REGISTER DEFINITION BLOCK------------------------------------------
29 // This information is used by the matcher and the register allocator to
30 // describe individual registers and classes of registers within the target
31 // archtecture.
33 // format:
34 // reg_def name (call convention, c-call convention, ideal type, encoding);
35 // call convention :
36 // NS = No-Save
37 // SOC = Save-On-Call
38 // SOE = Save-On-Entry
39 // AS = Always-Save
40 // ideal type :
41 // see opto/opcodes.hpp for more info
42 // reg_class name (reg, ...);
43 // alloc_class name (reg, ...);
44 register %{
46 // General Registers
47 // Integer Registers
48 reg_def R0 ( NS, NS, Op_RegI, 0, VMRegImpl::Bad());
49 reg_def AT ( NS, NS, Op_RegI, 1, AT->as_VMReg());
50 reg_def AT_H ( NS, NS, Op_RegI, 1, AT->as_VMReg()->next());
51 reg_def V0 (SOC, SOC, Op_RegI, 2, V0->as_VMReg());
52 reg_def V0_H (SOC, SOC, Op_RegI, 2, V0->as_VMReg()->next());
53 reg_def V1 (SOC, SOC, Op_RegI, 3, V1->as_VMReg());
54 reg_def V1_H (SOC, SOC, Op_RegI, 3, V1->as_VMReg()->next());
55 reg_def A0 (SOC, SOC, Op_RegI, 4, A0->as_VMReg());
56 reg_def A0_H (SOC, SOC, Op_RegI, 4, A0->as_VMReg()->next());
57 reg_def A1 (SOC, SOC, Op_RegI, 5, A1->as_VMReg());
58 reg_def A1_H (SOC, SOC, Op_RegI, 5, A1->as_VMReg()->next());
59 reg_def A2 (SOC, SOC, Op_RegI, 6, A2->as_VMReg());
60 reg_def A2_H (SOC, SOC, Op_RegI, 6, A2->as_VMReg()->next());
61 reg_def A3 (SOC, SOC, Op_RegI, 7, A3->as_VMReg());
62 reg_def A3_H (SOC, SOC, Op_RegI, 7, A3->as_VMReg()->next());
63 reg_def A4 (SOC, SOC, Op_RegI, 8, A4->as_VMReg());
64 reg_def A4_H (SOC, SOC, Op_RegI, 8, A4->as_VMReg()->next());
65 reg_def A5 (SOC, SOC, Op_RegI, 9, A5->as_VMReg());
66 reg_def A5_H (SOC, SOC, Op_RegI, 9, A5->as_VMReg()->next());
67 reg_def A6 (SOC, SOC, Op_RegI, 10, A6->as_VMReg());
68 reg_def A6_H (SOC, SOC, Op_RegI, 10, A6->as_VMReg()->next());
69 reg_def A7 (SOC, SOC, Op_RegI, 11, A7->as_VMReg());
70 reg_def A7_H (SOC, SOC, Op_RegI, 11, A7->as_VMReg()->next());
71 reg_def T0 (SOC, SOC, Op_RegI, 12, T0->as_VMReg());
72 reg_def T0_H (SOC, SOC, Op_RegI, 12, T0->as_VMReg()->next());
73 reg_def T1 (SOC, SOC, Op_RegI, 13, T1->as_VMReg());
74 reg_def T1_H (SOC, SOC, Op_RegI, 13, T1->as_VMReg()->next());
75 reg_def T2 (SOC, SOC, Op_RegI, 14, T2->as_VMReg());
76 reg_def T2_H (SOC, SOC, Op_RegI, 14, T2->as_VMReg()->next());
77 reg_def T3 (SOC, SOC, Op_RegI, 15, T3->as_VMReg());
78 reg_def T3_H (SOC, SOC, Op_RegI, 15, T3->as_VMReg()->next());
79 reg_def S0 (SOC, SOE, Op_RegI, 16, S0->as_VMReg());
80 reg_def S0_H (SOC, SOE, Op_RegI, 16, S0->as_VMReg()->next());
81 reg_def S1 (SOC, SOE, Op_RegI, 17, S1->as_VMReg());
82 reg_def S1_H (SOC, SOE, Op_RegI, 17, S1->as_VMReg()->next());
83 reg_def S2 (SOC, SOE, Op_RegI, 18, S2->as_VMReg());
84 reg_def S2_H (SOC, SOE, Op_RegI, 18, S2->as_VMReg()->next());
85 reg_def S3 (SOC, SOE, Op_RegI, 19, S3->as_VMReg());
86 reg_def S3_H (SOC, SOE, Op_RegI, 19, S3->as_VMReg()->next());
87 reg_def S4 (SOC, SOE, Op_RegI, 20, S4->as_VMReg());
88 reg_def S4_H (SOC, SOE, Op_RegI, 20, S4->as_VMReg()->next());
89 reg_def S5 (SOC, SOE, Op_RegI, 21, S5->as_VMReg());
90 reg_def S5_H (SOC, SOE, Op_RegI, 21, S5->as_VMReg()->next());
91 reg_def S6 (SOC, SOE, Op_RegI, 22, S6->as_VMReg());
92 reg_def S6_H (SOC, SOE, Op_RegI, 22, S6->as_VMReg()->next());
93 reg_def S7 (SOC, SOE, Op_RegI, 23, S7->as_VMReg());
94 reg_def S7_H (SOC, SOE, Op_RegI, 23, S7->as_VMReg()->next());
95 reg_def T8 (SOC, SOC, Op_RegI, 24, T8->as_VMReg());
96 reg_def T8_H (SOC, SOC, Op_RegI, 24, T8->as_VMReg()->next());
97 reg_def T9 (SOC, SOC, Op_RegI, 25, T9->as_VMReg());
98 reg_def T9_H (SOC, SOC, Op_RegI, 25, T9->as_VMReg()->next());
100 // Special Registers
101 reg_def K0 ( NS, NS, Op_RegI, 26, K0->as_VMReg());
102 reg_def K1 ( NS, NS, Op_RegI, 27, K1->as_VMReg());
103 reg_def GP ( NS, NS, Op_RegI, 28, GP->as_VMReg());
104 reg_def GP_H ( NS, NS, Op_RegI, 28, GP->as_VMReg()->next());
105 reg_def SP ( NS, NS, Op_RegI, 29, SP->as_VMReg());
106 reg_def SP_H ( NS, NS, Op_RegI, 29, SP->as_VMReg()->next());
107 reg_def FP ( NS, NS, Op_RegI, 30, FP->as_VMReg());
108 reg_def FP_H ( NS, NS, Op_RegI, 30, FP->as_VMReg()->next());
109 reg_def RA ( NS, NS, Op_RegI, 31, RA->as_VMReg());
110 reg_def RA_H ( NS, NS, Op_RegI, 31, RA->as_VMReg()->next());
112 // Floating registers.
113 reg_def F0 ( SOC, SOC, Op_RegF, 0, F0->as_VMReg());
114 reg_def F0_H ( SOC, SOC, Op_RegF, 0, F0->as_VMReg()->next());
115 reg_def F1 ( SOC, SOC, Op_RegF, 1, F1->as_VMReg());
116 reg_def F1_H ( SOC, SOC, Op_RegF, 1, F1->as_VMReg()->next());
117 reg_def F2 ( SOC, SOC, Op_RegF, 2, F2->as_VMReg());
118 reg_def F2_H ( SOC, SOC, Op_RegF, 2, F2->as_VMReg()->next());
119 reg_def F3 ( SOC, SOC, Op_RegF, 3, F3->as_VMReg());
120 reg_def F3_H ( SOC, SOC, Op_RegF, 3, F3->as_VMReg()->next());
121 reg_def F4 ( SOC, SOC, Op_RegF, 4, F4->as_VMReg());
122 reg_def F4_H ( SOC, SOC, Op_RegF, 4, F4->as_VMReg()->next());
123 reg_def F5 ( SOC, SOC, Op_RegF, 5, F5->as_VMReg());
124 reg_def F5_H ( SOC, SOC, Op_RegF, 5, F5->as_VMReg()->next());
125 reg_def F6 ( SOC, SOC, Op_RegF, 6, F6->as_VMReg());
126 reg_def F6_H ( SOC, SOC, Op_RegF, 6, F6->as_VMReg()->next());
127 reg_def F7 ( SOC, SOC, Op_RegF, 7, F7->as_VMReg());
128 reg_def F7_H ( SOC, SOC, Op_RegF, 7, F7->as_VMReg()->next());
129 reg_def F8 ( SOC, SOC, Op_RegF, 8, F8->as_VMReg());
130 reg_def F8_H ( SOC, SOC, Op_RegF, 8, F8->as_VMReg()->next());
131 reg_def F9 ( SOC, SOC, Op_RegF, 9, F9->as_VMReg());
132 reg_def F9_H ( SOC, SOC, Op_RegF, 9, F9->as_VMReg()->next());
133 reg_def F10 ( SOC, SOC, Op_RegF, 10, F10->as_VMReg());
134 reg_def F10_H ( SOC, SOC, Op_RegF, 10, F10->as_VMReg()->next());
135 reg_def F11 ( SOC, SOC, Op_RegF, 11, F11->as_VMReg());
136 reg_def F11_H ( SOC, SOC, Op_RegF, 11, F11->as_VMReg()->next());
137 reg_def F12 ( SOC, SOC, Op_RegF, 12, F12->as_VMReg());
138 reg_def F12_H ( SOC, SOC, Op_RegF, 12, F12->as_VMReg()->next());
139 reg_def F13 ( SOC, SOC, Op_RegF, 13, F13->as_VMReg());
140 reg_def F13_H ( SOC, SOC, Op_RegF, 13, F13->as_VMReg()->next());
141 reg_def F14 ( SOC, SOC, Op_RegF, 14, F14->as_VMReg());
142 reg_def F14_H ( SOC, SOC, Op_RegF, 14, F14->as_VMReg()->next());
143 reg_def F15 ( SOC, SOC, Op_RegF, 15, F15->as_VMReg());
144 reg_def F15_H ( SOC, SOC, Op_RegF, 15, F15->as_VMReg()->next());
145 reg_def F16 ( SOC, SOC, Op_RegF, 16, F16->as_VMReg());
146 reg_def F16_H ( SOC, SOC, Op_RegF, 16, F16->as_VMReg()->next());
147 reg_def F17 ( SOC, SOC, Op_RegF, 17, F17->as_VMReg());
148 reg_def F17_H ( SOC, SOC, Op_RegF, 17, F17->as_VMReg()->next());
149 reg_def F18 ( SOC, SOC, Op_RegF, 18, F18->as_VMReg());
150 reg_def F18_H ( SOC, SOC, Op_RegF, 18, F18->as_VMReg()->next());
151 reg_def F19 ( SOC, SOC, Op_RegF, 19, F19->as_VMReg());
152 reg_def F19_H ( SOC, SOC, Op_RegF, 19, F19->as_VMReg()->next());
153 reg_def F20 ( SOC, SOC, Op_RegF, 20, F20->as_VMReg());
154 reg_def F20_H ( SOC, SOC, Op_RegF, 20, F20->as_VMReg()->next());
155 reg_def F21 ( SOC, SOC, Op_RegF, 21, F21->as_VMReg());
156 reg_def F21_H ( SOC, SOC, Op_RegF, 21, F21->as_VMReg()->next());
157 reg_def F22 ( SOC, SOC, Op_RegF, 22, F22->as_VMReg());
158 reg_def F22_H ( SOC, SOC, Op_RegF, 22, F22->as_VMReg()->next());
159 reg_def F23 ( SOC, SOC, Op_RegF, 23, F23->as_VMReg());
160 reg_def F23_H ( SOC, SOC, Op_RegF, 23, F23->as_VMReg()->next());
161 reg_def F24 ( SOC, SOC, Op_RegF, 24, F24->as_VMReg());
162 reg_def F24_H ( SOC, SOC, Op_RegF, 24, F24->as_VMReg()->next());
163 reg_def F25 ( SOC, SOC, Op_RegF, 25, F25->as_VMReg());
164 reg_def F25_H ( SOC, SOC, Op_RegF, 25, F25->as_VMReg()->next());
165 reg_def F26 ( SOC, SOC, Op_RegF, 26, F26->as_VMReg());
166 reg_def F26_H ( SOC, SOC, Op_RegF, 26, F26->as_VMReg()->next());
167 reg_def F27 ( SOC, SOC, Op_RegF, 27, F27->as_VMReg());
168 reg_def F27_H ( SOC, SOC, Op_RegF, 27, F27->as_VMReg()->next());
169 reg_def F28 ( SOC, SOC, Op_RegF, 28, F28->as_VMReg());
170 reg_def F28_H ( SOC, SOC, Op_RegF, 28, F28->as_VMReg()->next());
171 reg_def F29 ( SOC, SOC, Op_RegF, 29, F29->as_VMReg());
172 reg_def F29_H ( SOC, SOC, Op_RegF, 29, F29->as_VMReg()->next());
173 reg_def F30 ( SOC, SOC, Op_RegF, 30, F30->as_VMReg());
174 reg_def F30_H ( SOC, SOC, Op_RegF, 30, F30->as_VMReg()->next());
175 reg_def F31 ( SOC, SOC, Op_RegF, 31, F31->as_VMReg());
176 reg_def F31_H ( SOC, SOC, Op_RegF, 31, F31->as_VMReg()->next());
179 // ----------------------------
180 // Special Registers
181 // Condition Codes Flag Registers
182 reg_def MIPS_FLAG (SOC, SOC, Op_RegFlags, 1, as_Register(1)->as_VMReg());
183 //S6 is used for get_thread(S6)
184 //S5 is uesd for heapbase of compressed oop
185 alloc_class chunk0(
186 S7, S7_H,
187 S0, S0_H,
188 S1, S1_H,
189 S2, S2_H,
190 S4, S4_H,
191 S5, S5_H,
192 S6, S6_H,
193 S3, S3_H,
194 T2, T2_H,
195 T3, T3_H,
196 T8, T8_H,
197 T9, T9_H,
198 T1, T1_H, // inline_cache_reg
199 V1, V1_H,
200 A7, A7_H,
201 A6, A6_H,
202 A5, A5_H,
203 A4, A4_H,
204 V0, V0_H,
205 A3, A3_H,
206 A2, A2_H,
207 A1, A1_H,
208 A0, A0_H,
209 T0, T0_H,
210 GP, GP_H
211 RA, RA_H,
212 SP, SP_H, // stack_pointer
213 FP, FP_H // frame_pointer
214 );
216 alloc_class chunk1( F0, F0_H,
217 F1, F1_H,
218 F2, F2_H,
219 F3, F3_H,
220 F4, F4_H,
221 F5, F5_H,
222 F6, F6_H,
223 F7, F7_H,
224 F8, F8_H,
225 F9, F9_H,
226 F10, F10_H,
227 F11, F11_H,
228 F20, F20_H,
229 F21, F21_H,
230 F22, F22_H,
231 F23, F23_H,
232 F24, F24_H,
233 F25, F25_H,
234 F26, F26_H,
235 F27, F27_H,
236 F28, F28_H,
237 F19, F19_H,
238 F18, F18_H,
239 F17, F17_H,
240 F16, F16_H,
241 F15, F15_H,
242 F14, F14_H,
243 F13, F13_H,
244 F12, F12_H,
245 F29, F29_H,
246 F30, F30_H,
247 F31, F31_H);
249 alloc_class chunk2(MIPS_FLAG);
251 reg_class s_reg( S0, S1, S2, S3, S4, S5, S6, S7 );
252 reg_class s0_reg( S0 );
253 reg_class s1_reg( S1 );
254 reg_class s2_reg( S2 );
255 reg_class s3_reg( S3 );
256 reg_class s4_reg( S4 );
257 reg_class s5_reg( S5 );
258 reg_class s6_reg( S6 );
259 reg_class s7_reg( S7 );
261 reg_class t_reg( T0, T1, T2, T3, T8, T9 );
262 reg_class t0_reg( T0 );
263 reg_class t1_reg( T1 );
264 reg_class t2_reg( T2 );
265 reg_class t3_reg( T3 );
266 reg_class t8_reg( T8 );
267 reg_class t9_reg( T9 );
269 reg_class a_reg( A0, A1, A2, A3, A4, A5, A6, A7 );
270 reg_class a0_reg( A0 );
271 reg_class a1_reg( A1 );
272 reg_class a2_reg( A2 );
273 reg_class a3_reg( A3 );
274 reg_class a4_reg( A4 );
275 reg_class a5_reg( A5 );
276 reg_class a6_reg( A6 );
277 reg_class a7_reg( A7 );
279 reg_class v0_reg( V0 );
280 reg_class v1_reg( V1 );
282 reg_class sp_reg( SP, SP_H );
283 reg_class fp_reg( FP, FP_H );
285 reg_class mips_flags(MIPS_FLAG);
287 reg_class v0_long_reg( V0, V0_H );
288 reg_class v1_long_reg( V1, V1_H );
289 reg_class a0_long_reg( A0, A0_H );
290 reg_class a1_long_reg( A1, A1_H );
291 reg_class a2_long_reg( A2, A2_H );
292 reg_class a3_long_reg( A3, A3_H );
293 reg_class a4_long_reg( A4, A4_H );
294 reg_class a5_long_reg( A5, A5_H );
295 reg_class a6_long_reg( A6, A6_H );
296 reg_class a7_long_reg( A7, A7_H );
297 reg_class t0_long_reg( T0, T0_H );
298 reg_class t1_long_reg( T1, T1_H );
299 reg_class t2_long_reg( T2, T2_H );
300 reg_class t3_long_reg( T3, T3_H );
301 reg_class t8_long_reg( T8, T8_H );
302 reg_class t9_long_reg( T9, T9_H );
303 reg_class s0_long_reg( S0, S0_H );
304 reg_class s1_long_reg( S1, S1_H );
305 reg_class s2_long_reg( S2, S2_H );
306 reg_class s3_long_reg( S3, S3_H );
307 reg_class s4_long_reg( S4, S4_H );
308 reg_class s5_long_reg( S5, S5_H );
309 reg_class s6_long_reg( S6, S6_H );
310 reg_class s7_long_reg( S7, S7_H );
312 reg_class int_reg( S7, S0, S1, S2, S4, S3, T8, T2, T3, T1, V1, A7, A6, A5, A4, V0, A3, A2, A1, A0, T0 );
314 reg_class no_Ax_int_reg( S7, S0, S1, S2, S4, S3, T8, T2, T3, T1, V1, V0, T0 );
316 reg_class p_reg(
317 S7, S7_H,
318 S0, S0_H,
319 S1, S1_H,
320 S2, S2_H,
321 S4, S4_H,
322 S3, S3_H,
323 T8, T8_H,
324 T2, T2_H,
325 T3, T3_H,
326 T1, T1_H,
327 A7, A7_H,
328 A6, A6_H,
329 A5, A5_H,
330 A4, A4_H,
331 A3, A3_H,
332 A2, A2_H,
333 A1, A1_H,
334 A0, A0_H,
335 T0, T0_H
336 );
338 reg_class no_T8_p_reg(
339 S7, S7_H,
340 S0, S0_H,
341 S1, S1_H,
342 S2, S2_H,
343 S4, S4_H,
344 S3, S3_H,
345 T2, T2_H,
346 T3, T3_H,
347 T1, T1_H,
348 A7, A7_H,
349 A6, A6_H,
350 A5, A5_H,
351 A4, A4_H,
352 A3, A3_H,
353 A2, A2_H,
354 A1, A1_H,
355 A0, A0_H,
356 T0, T0_H
357 );
359 reg_class long_reg(
360 S7, S7_H,
361 S0, S0_H,
362 S1, S1_H,
363 S2, S2_H,
364 S4, S4_H,
365 S3, S3_H,
366 T8, T8_H,
367 T2, T2_H,
368 T3, T3_H,
369 T1, T1_H,
370 A7, A7_H,
371 A6, A6_H,
372 A5, A5_H,
373 A4, A4_H,
374 A3, A3_H,
375 A2, A2_H,
376 A1, A1_H,
377 A0, A0_H,
378 T0, T0_H
379 );
382 // Floating point registers.
383 // 2012/8/23 Fu: F30/F31 are used as temporary registers in D2I
384 // 2016/12/1 aoqi: F31 are not used as temporary registers in D2I
385 reg_class flt_reg( F0, F1, F2, F3, F4, F5, F6, F7, F8, F9, F10, F11, F12, F13, F14, F15, F16, F17 F18, F19, F20, F21, F22, F23, F24, F25, F26, F27, F28, F29, F31);
386 reg_class dbl_reg( F0, F0_H,
387 F1, F1_H,
388 F2, F2_H,
389 F3, F3_H,
390 F4, F4_H,
391 F5, F5_H,
392 F6, F6_H,
393 F7, F7_H,
394 F8, F8_H,
395 F9, F9_H,
396 F10, F10_H,
397 F11, F11_H,
398 F12, F12_H,
399 F13, F13_H,
400 F14, F14_H,
401 F15, F15_H,
402 F16, F16_H,
403 F17, F17_H,
404 F18, F18_H,
405 F19, F19_H,
406 F20, F20_H,
407 F21, F21_H,
408 F22, F22_H,
409 F23, F23_H,
410 F24, F24_H,
411 F25, F25_H,
412 F26, F26_H,
413 F27, F27_H,
414 F28, F28_H,
415 F29, F29_H,
416 F31, F31_H);
418 reg_class flt_arg0( F12 );
419 reg_class dbl_arg0( F12, F12_H );
420 reg_class dbl_arg1( F14, F14_H );
422 %}
424 //----------DEFINITION BLOCK---------------------------------------------------
425 // Define name --> value mappings to inform the ADLC of an integer valued name
426 // Current support includes integer values in the range [0, 0x7FFFFFFF]
427 // Format:
428 // int_def <name> ( <int_value>, <expression>);
429 // Generated Code in ad_<arch>.hpp
430 // #define <name> (<expression>)
431 // // value == <int_value>
432 // Generated code in ad_<arch>.cpp adlc_verification()
433 // assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
434 //
435 definitions %{
436 int_def DEFAULT_COST ( 100, 100);
437 int_def HUGE_COST (1000000, 1000000);
439 // Memory refs are twice as expensive as run-of-the-mill.
440 int_def MEMORY_REF_COST ( 200, DEFAULT_COST * 2);
442 // Branches are even more expensive.
443 int_def BRANCH_COST ( 300, DEFAULT_COST * 3);
444 // we use jr instruction to construct call, so more expensive
445 // by yjl 2/28/2006
446 int_def CALL_COST ( 500, DEFAULT_COST * 5);
447 /*
448 int_def EQUAL ( 1, 1 );
449 int_def NOT_EQUAL ( 2, 2 );
450 int_def GREATER ( 3, 3 );
451 int_def GREATER_EQUAL ( 4, 4 );
452 int_def LESS ( 5, 5 );
453 int_def LESS_EQUAL ( 6, 6 );
454 */
455 %}
459 //----------SOURCE BLOCK-------------------------------------------------------
460 // This is a block of C++ code which provides values, functions, and
461 // definitions necessary in the rest of the architecture description
463 source_hpp %{
464 // Header information of the source block.
465 // Method declarations/definitions which are used outside
466 // the ad-scope can conveniently be defined here.
467 //
468 // To keep related declarations/definitions/uses close together,
469 // we switch between source %{ }% and source_hpp %{ }% freely as needed.
471 class CallStubImpl {
473 //--------------------------------------------------------------
474 //---< Used for optimization in Compile::shorten_branches >---
475 //--------------------------------------------------------------
477 public:
478 // Size of call trampoline stub.
479 static uint size_call_trampoline() {
480 return 0; // no call trampolines on this platform
481 }
483 // number of relocations needed by a call trampoline stub
484 static uint reloc_call_trampoline() {
485 return 0; // no call trampolines on this platform
486 }
487 };
489 class HandlerImpl {
491 public:
493 static int emit_exception_handler(CodeBuffer &cbuf);
494 static int emit_deopt_handler(CodeBuffer& cbuf);
496 static uint size_exception_handler() {
497 // NativeCall instruction size is the same as NativeJump.
498 // exception handler starts out as jump and can be patched to
499 // a call be deoptimization. (4932387)
500 // Note that this value is also credited (in output.cpp) to
501 // the size of the code section.
502 // return NativeJump::instruction_size;
503 int size = NativeCall::instruction_size;
504 return round_to(size, 16);
505 }
507 #ifdef _LP64
508 static uint size_deopt_handler() {
509 int size = NativeCall::instruction_size;
510 return round_to(size, 16);
511 }
512 #else
513 static uint size_deopt_handler() {
514 // NativeCall instruction size is the same as NativeJump.
515 // exception handler starts out as jump and can be patched to
516 // a call be deoptimization. (4932387)
517 // Note that this value is also credited (in output.cpp) to
518 // the size of the code section.
519 return 5 + NativeJump::instruction_size; // pushl(); jmp;
520 }
521 #endif
522 };
524 %} // end source_hpp
526 source %{
528 #define NO_INDEX 0
529 #define RELOC_IMM64 Assembler::imm_operand
530 #define RELOC_DISP32 Assembler::disp32_operand
533 #define __ _masm.
536 // Emit exception handler code.
537 // Stuff framesize into a register and call a VM stub routine.
538 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf) {
539 /*
540 // Note that the code buffer's insts_mark is always relative to insts.
541 // That's why we must use the macroassembler to generate a handler.
542 MacroAssembler _masm(&cbuf);
543 address base = __ start_a_stub(size_exception_handler());
544 if (base == NULL) return 0; // CodeBuffer::expand failed
545 int offset = __ offset();
546 __ jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
547 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
548 __ end_a_stub();
549 return offset;
550 */
551 // Note that the code buffer's insts_mark is always relative to insts.
552 // That's why we must use the macroassembler to generate a handler.
553 MacroAssembler _masm(&cbuf);
554 address base =
555 __ start_a_stub(size_exception_handler());
556 if (base == NULL) return 0; // CodeBuffer::expand failed
557 int offset = __ offset();
559 __ block_comment("; emit_exception_handler");
561 /* 2012/9/25 FIXME Jin: According to X86, we should use direct jumpt.
562 * * However, this will trigger an assert after the 40th method:
563 * *
564 * * 39 b java.lang.Throwable::<init> (25 bytes)
565 * * --- ns java.lang.Throwable::fillInStackTrace
566 * * 40 !b java.net.URLClassLoader::findClass (29 bytes)
567 * * /vm/opto/runtime.cpp, 900 , assert(caller.is_compiled_frame(),"must be")
568 * * 40 made not entrant (2) java.net.URLClassLoader::findClass (29 bytes)
569 * *
570 * * If we change from JR to JALR, the assert will disappear, but WebClient will
571 * * fail after the 403th method with unknown reason.
572 * */
573 __ li48(T9, (long)OptoRuntime::exception_blob()->entry_point());
574 __ jr(T9);
575 __ delayed()->nop();
576 __ align(16);
577 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
578 __ end_a_stub();
579 return offset;
580 }
582 // Emit deopt handler code.
583 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) {
584 // Note that the code buffer's insts_mark is always relative to insts.
585 // That's why we must use the macroassembler to generate a handler.
586 MacroAssembler _masm(&cbuf);
587 address base =
588 __ start_a_stub(size_deopt_handler());
590 // FIXME
591 if (base == NULL) return 0; // CodeBuffer::expand failed
592 int offset = __ offset();
594 __ block_comment("; emit_deopt_handler");
596 cbuf.set_insts_mark();
597 __ relocate(relocInfo::runtime_call_type);
599 __ li48(T9, (long)SharedRuntime::deopt_blob()->unpack());
600 __ jalr(T9);
601 __ delayed()->nop();
602 __ align(16);
603 assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
604 __ end_a_stub();
605 return offset;
606 }
609 const bool Matcher::match_rule_supported(int opcode) {
610 if (!has_match_rule(opcode))
611 return false;
613 switch (opcode) {
614 //Op_CountLeadingZerosI Op_CountLeadingZerosL can be deleted, all MIPS CPUs support clz & dclz.
615 case Op_CountLeadingZerosI:
616 case Op_CountLeadingZerosL:
617 if (!UseCountLeadingZerosInstruction)
618 return false;
619 break;
620 case Op_CountTrailingZerosI:
621 case Op_CountTrailingZerosL:
622 if (!UseCountTrailingZerosInstruction)
623 return false;
624 break;
625 }
627 return true; // Per default match rules are supported.
628 }
630 //FIXME
631 // emit call stub, compiled java to interpreter
632 void emit_java_to_interp(CodeBuffer &cbuf ) {
633 // Stub is fixed up when the corresponding call is converted from calling
634 // compiled code to calling interpreted code.
635 // mov rbx,0
636 // jmp -1
638 address mark = cbuf.insts_mark(); // get mark within main instrs section
640 // Note that the code buffer's insts_mark is always relative to insts.
641 // That's why we must use the macroassembler to generate a stub.
642 MacroAssembler _masm(&cbuf);
644 address base =
645 __ start_a_stub(Compile::MAX_stubs_size);
646 if (base == NULL) return; // CodeBuffer::expand failed
647 // static stub relocation stores the instruction address of the call
649 __ relocate(static_stub_Relocation::spec(mark), 0);
651 /* 2012/10/29 Jin: Rmethod contains methodOop, it should be relocated for GC */
652 /*
653 int oop_index = __ oop_recorder()->allocate_index(NULL);
654 RelocationHolder rspec = oop_Relocation::spec(oop_index);
655 __ relocate(rspec);
656 */
658 // static stub relocation also tags the methodOop in the code-stream.
659 __ li48(S3, (long)0);
660 // This is recognized as unresolved by relocs/nativeInst/ic code
662 __ relocate(relocInfo::runtime_call_type);
664 cbuf.set_insts_mark();
665 address call_pc = (address)-1;
666 __ li48(AT, (long)call_pc);
667 __ jr(AT);
668 __ nop();
669 __ align(16);
670 __ end_a_stub();
671 // Update current stubs pointer and restore code_end.
672 }
674 // size of call stub, compiled java to interpretor
675 uint size_java_to_interp() {
676 int size = 4 * 4 + NativeCall::instruction_size; // sizeof(li48) + NativeCall::instruction_size
677 return round_to(size, 16);
678 }
680 // relocation entries for call stub, compiled java to interpreter
681 uint reloc_java_to_interp() {
682 return 16; // in emit_java_to_interp + in Java_Static_Call
683 }
685 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
686 if( Assembler::is_simm16(offset) ) return true;
687 else
688 {
689 assert(false, "Not implemented yet !" );
690 Unimplemented();
691 }
692 }
695 // No additional cost for CMOVL.
696 const int Matcher::long_cmove_cost() { return 0; }
698 // No CMOVF/CMOVD with SSE2
699 const int Matcher::float_cmove_cost() { return ConditionalMoveLimit; }
701 // Does the CPU require late expand (see block.cpp for description of late expand)?
702 const bool Matcher::require_postalloc_expand = false;
704 // Should the Matcher clone shifts on addressing modes, expecting them
705 // to be subsumed into complex addressing expressions or compute them
706 // into registers? True for Intel but false for most RISCs
707 const bool Matcher::clone_shift_expressions = false;
709 // Do we need to mask the count passed to shift instructions or does
710 // the cpu only look at the lower 5/6 bits anyway?
711 const bool Matcher::need_masked_shift_count = false;
713 bool Matcher::narrow_oop_use_complex_address() {
714 NOT_LP64(ShouldNotCallThis());
715 assert(UseCompressedOops, "only for compressed oops code");
716 return false;
717 }
719 bool Matcher::narrow_klass_use_complex_address() {
720 NOT_LP64(ShouldNotCallThis());
721 assert(UseCompressedClassPointers, "only for compressed klass code");
722 return false;
723 }
725 // This is UltraSparc specific, true just means we have fast l2f conversion
726 const bool Matcher::convL2FSupported(void) {
727 return true;
728 }
730 // Max vector size in bytes. 0 if not supported.
731 const int Matcher::vector_width_in_bytes(BasicType bt) {
732 assert(MaxVectorSize == 8, "");
733 return 8;
734 }
736 // Vector ideal reg
737 const int Matcher::vector_ideal_reg(int size) {
738 assert(MaxVectorSize == 8, "");
739 switch(size) {
740 case 8: return Op_VecD;
741 }
742 ShouldNotReachHere();
743 return 0;
744 }
746 // Only lowest bits of xmm reg are used for vector shift count.
747 const int Matcher::vector_shift_count_ideal_reg(int size) {
748 fatal("vector shift is not supported");
749 return Node::NotAMachineReg;
750 }
752 // Limits on vector size (number of elements) loaded into vector.
753 const int Matcher::max_vector_size(const BasicType bt) {
754 assert(is_java_primitive(bt), "only primitive type vectors");
755 return vector_width_in_bytes(bt)/type2aelembytes(bt);
756 }
758 const int Matcher::min_vector_size(const BasicType bt) {
759 return max_vector_size(bt); // Same as max.
760 }
762 // MIPS supports misaligned vectors store/load? FIXME
763 const bool Matcher::misaligned_vectors_ok() {
764 return false;
765 //return !AlignVector; // can be changed by flag
766 }
768 // Register for DIVI projection of divmodI
769 RegMask Matcher::divI_proj_mask() {
770 ShouldNotReachHere();
771 return RegMask();
772 }
774 // Register for MODI projection of divmodI
775 RegMask Matcher::modI_proj_mask() {
776 ShouldNotReachHere();
777 return RegMask();
778 }
780 // Register for DIVL projection of divmodL
781 RegMask Matcher::divL_proj_mask() {
782 ShouldNotReachHere();
783 return RegMask();
784 }
786 int Matcher::regnum_to_fpu_offset(int regnum) {
787 return regnum - 32; // The FP registers are in the second chunk
788 }
791 const bool Matcher::isSimpleConstant64(jlong value) {
792 // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
793 return true;
794 }
797 // Return whether or not this register is ever used as an argument. This
798 // function is used on startup to build the trampoline stubs in generateOptoStub.
799 // Registers not mentioned will be killed by the VM call in the trampoline, and
800 // arguments in those registers not be available to the callee.
801 bool Matcher::can_be_java_arg( int reg ) {
802 /* Refer to: [sharedRuntime_mips_64.cpp] SharedRuntime::java_calling_convention() */
803 if ( reg == T0_num || reg == T0_H_num
804 || reg == A0_num || reg == A0_H_num
805 || reg == A1_num || reg == A1_H_num
806 || reg == A2_num || reg == A2_H_num
807 || reg == A3_num || reg == A3_H_num
808 || reg == A4_num || reg == A4_H_num
809 || reg == A5_num || reg == A5_H_num
810 || reg == A6_num || reg == A6_H_num
811 || reg == A7_num || reg == A7_H_num )
812 return true;
814 if ( reg == F12_num || reg == F12_H_num
815 || reg == F13_num || reg == F13_H_num
816 || reg == F14_num || reg == F14_H_num
817 || reg == F15_num || reg == F15_H_num
818 || reg == F16_num || reg == F16_H_num
819 || reg == F17_num || reg == F17_H_num
820 || reg == F18_num || reg == F18_H_num
821 || reg == F19_num || reg == F19_H_num )
822 return true;
824 return false;
825 }
827 bool Matcher::is_spillable_arg( int reg ) {
828 return can_be_java_arg(reg);
829 }
831 bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) {
832 return false;
833 }
835 // Register for MODL projection of divmodL
836 RegMask Matcher::modL_proj_mask() {
837 ShouldNotReachHere();
838 return RegMask();
839 }
841 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
842 return FP_REG_mask();
843 }
845 // MIPS doesn't support AES intrinsics
846 const bool Matcher::pass_original_key_for_aes() {
847 return false;
848 }
850 // The address of the call instruction needs to be 16-byte aligned to
851 // ensure that it does not span a cache line so that it can be patched.
853 int CallStaticJavaDirectNode::compute_padding(int current_offset) const {
854 //lui
855 //ori
856 //dsll
857 //ori
859 //jalr
860 //nop
862 return round_to(current_offset, alignment_required()) - current_offset;
863 }
865 // The address of the call instruction needs to be 16-byte aligned to
866 // ensure that it does not span a cache line so that it can be patched.
867 int CallDynamicJavaDirectNode::compute_padding(int current_offset) const {
868 //li64 <--- skip
870 //lui
871 //ori
872 //dsll
873 //ori
875 //jalr
876 //nop
878 current_offset += 4 * 6; // skip li64
879 return round_to(current_offset, alignment_required()) - current_offset;
880 }
882 int CallLeafNoFPDirectNode::compute_padding(int current_offset) const {
883 //lui
884 //ori
885 //dsll
886 //ori
888 //jalr
889 //nop
891 return round_to(current_offset, alignment_required()) - current_offset;
892 }
894 int CallLeafDirectNode::compute_padding(int current_offset) const {
895 //lui
896 //ori
897 //dsll
898 //ori
900 //jalr
901 //nop
903 return round_to(current_offset, alignment_required()) - current_offset;
904 }
906 int CallRuntimeDirectNode::compute_padding(int current_offset) const {
907 //lui
908 //ori
909 //dsll
910 //ori
912 //jalr
913 //nop
915 return round_to(current_offset, alignment_required()) - current_offset;
916 }
918 // If CPU can load and store mis-aligned doubles directly then no fixup is
919 // needed. Else we split the double into 2 integer pieces and move it
920 // piece-by-piece. Only happens when passing doubles into C code as the
921 // Java calling convention forces doubles to be aligned.
922 const bool Matcher::misaligned_doubles_ok = false;
923 // Do floats take an entire double register or just half?
924 //const bool Matcher::float_in_double = true;
925 bool Matcher::float_in_double() { return false; }
926 // Threshold size for cleararray.
927 const int Matcher::init_array_short_size = 8 * BytesPerLong;
928 // Do ints take an entire long register or just half?
929 const bool Matcher::int_in_long = true;
930 // Is it better to copy float constants, or load them directly from memory?
931 // Intel can load a float constant from a direct address, requiring no
932 // extra registers. Most RISCs will have to materialize an address into a
933 // register first, so they would do better to copy the constant from stack.
934 const bool Matcher::rematerialize_float_constants = false;
935 // Advertise here if the CPU requires explicit rounding operations
936 // to implement the UseStrictFP mode.
937 const bool Matcher::strict_fp_requires_explicit_rounding = false;
938 // The ecx parameter to rep stos for the ClearArray node is in dwords.
939 const bool Matcher::init_array_count_is_in_bytes = false;
942 // Indicate if the safepoint node needs the polling page as an input.
943 // Since MIPS doesn't have absolute addressing, it needs.
944 bool SafePointNode::needs_polling_address_input() {
945 return true;
946 }
948 // !!!!! Special hack to get all type of calls to specify the byte offset
949 // from the start of the call to the point where the return address
950 // will point.
951 int MachCallStaticJavaNode::ret_addr_offset() {
952 assert(NativeCall::instruction_size == 24, "in MachCallStaticJavaNode::ret_addr_offset");
953 //The value ought to be 16 bytes.
954 //lui
955 //ori
956 //dsll
957 //ori
958 //jalr
959 //nop
960 return NativeCall::instruction_size;
961 }
963 int MachCallDynamicJavaNode::ret_addr_offset() {
964 /* 2012/9/10 Jin: must be kept in sync with Java_Dynamic_Call */
966 // return NativeCall::instruction_size;
967 assert(NativeCall::instruction_size == 24, "in MachCallDynamicJavaNode::ret_addr_offset");
968 //The value ought to be 4 + 16 bytes.
969 //lui IC_Klass,
970 //ori IC_Klass,
971 //dsll IC_Klass
972 //ori IC_Klass
973 //lui T9
974 //ori T9
975 //dsll T9
976 //ori T9
977 //jalr T9
978 //nop
979 return 6 * 4 + NativeCall::instruction_size;
981 }
983 //=============================================================================
985 // Figure out which register class each belongs in: rc_int, rc_float, rc_stack
986 enum RC { rc_bad, rc_int, rc_float, rc_stack };
987 static enum RC rc_class( OptoReg::Name reg ) {
988 if( !OptoReg::is_valid(reg) ) return rc_bad;
989 if (OptoReg::is_stack(reg)) return rc_stack;
990 VMReg r = OptoReg::as_VMReg(reg);
991 if (r->is_Register()) return rc_int;
992 assert(r->is_FloatRegister(), "must be");
993 return rc_float;
994 }
996 uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream* st ) const {
997 // Get registers to move
998 OptoReg::Name src_second = ra_->get_reg_second(in(1));
999 OptoReg::Name src_first = ra_->get_reg_first(in(1));
1000 OptoReg::Name dst_second = ra_->get_reg_second(this );
1001 OptoReg::Name dst_first = ra_->get_reg_first(this );
1003 enum RC src_second_rc = rc_class(src_second);
1004 enum RC src_first_rc = rc_class(src_first);
1005 enum RC dst_second_rc = rc_class(dst_second);
1006 enum RC dst_first_rc = rc_class(dst_first);
1008 assert(OptoReg::is_valid(src_first) && OptoReg::is_valid(dst_first), "must move at least 1 register" );
1010 // Generate spill code!
1011 int size = 0;
1013 if( src_first == dst_first && src_second == dst_second )
1014 return 0; // Self copy, no move
1016 if (src_first_rc == rc_stack) {
1017 // mem ->
1018 if (dst_first_rc == rc_stack) {
1019 // mem -> mem
1020 assert(src_second != dst_first, "overlap");
1021 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1022 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1023 // 64-bit
1024 int src_offset = ra_->reg2offset(src_first);
1025 int dst_offset = ra_->reg2offset(dst_first);
1026 if (cbuf) {
1027 MacroAssembler _masm(cbuf);
1028 __ ld(AT, Address(SP, src_offset));
1029 __ sd(AT, Address(SP, dst_offset));
1030 #ifndef PRODUCT
1031 } else {
1032 if(!do_size){
1033 if (size != 0) st->print("\n\t");
1034 st->print("ld AT, [SP + #%d]\t# 64-bit mem-mem spill 1\n\t"
1035 "sd AT, [SP + #%d]",
1036 src_offset, dst_offset);
1037 }
1038 #endif
1039 }
1040 size += 8;
1041 } else {
1042 // 32-bit
1043 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1044 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1045 // No pushl/popl, so:
1046 int src_offset = ra_->reg2offset(src_first);
1047 int dst_offset = ra_->reg2offset(dst_first);
1048 if (cbuf) {
1049 MacroAssembler _masm(cbuf);
1050 __ lw(AT, Address(SP, src_offset));
1051 __ sw(AT, Address(SP, dst_offset));
1052 #ifndef PRODUCT
1053 } else {
1054 if(!do_size){
1055 if (size != 0) st->print("\n\t");
1056 st->print("lw AT, [SP + #%d] spill 2\n\t"
1057 "sw AT, [SP + #%d]\n\t",
1058 src_offset, dst_offset);
1059 }
1060 #endif
1061 }
1062 size += 8;
1063 }
1064 return size;
1065 } else if (dst_first_rc == rc_int) {
1066 // mem -> gpr
1067 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1068 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1069 // 64-bit
1070 int offset = ra_->reg2offset(src_first);
1071 if (cbuf) {
1072 MacroAssembler _masm(cbuf);
1073 __ ld(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1074 #ifndef PRODUCT
1075 } else {
1076 if(!do_size){
1077 if (size != 0) st->print("\n\t");
1078 st->print("ld %s, [SP + #%d]\t# spill 3",
1079 Matcher::regName[dst_first],
1080 offset);
1081 }
1082 #endif
1083 }
1084 size += 4;
1085 } else {
1086 // 32-bit
1087 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1088 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1089 int offset = ra_->reg2offset(src_first);
1090 if (cbuf) {
1091 MacroAssembler _masm(cbuf);
1092 if (this->ideal_reg() == Op_RegI)
1093 __ lw(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1094 else
1095 __ lwu(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1096 #ifndef PRODUCT
1097 } else {
1098 if(!do_size){
1099 if (size != 0) st->print("\n\t");
1100 if (this->ideal_reg() == Op_RegI)
1101 st->print("lw %s, [SP + #%d]\t# spill 4",
1102 Matcher::regName[dst_first],
1103 offset);
1104 else
1105 st->print("lwu %s, [SP + #%d]\t# spill 5",
1106 Matcher::regName[dst_first],
1107 offset);
1108 }
1109 #endif
1110 }
1111 size += 4;
1112 }
1113 return size;
1114 } else if (dst_first_rc == rc_float) {
1115 // mem-> xmm
1116 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1117 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1118 // 64-bit
1119 int offset = ra_->reg2offset(src_first);
1120 if (cbuf) {
1121 MacroAssembler _masm(cbuf);
1122 __ ldc1( as_FloatRegister(Matcher::_regEncode[dst_first]), Address(SP, offset));
1123 #ifndef PRODUCT
1124 } else {
1125 if(!do_size){
1126 if (size != 0) st->print("\n\t");
1127 st->print("ldc1 %s, [SP + #%d]\t# spill 6",
1128 Matcher::regName[dst_first],
1129 offset);
1130 }
1131 #endif
1132 }
1133 size += 4;
1134 } else {
1135 // 32-bit
1136 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1137 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1138 int offset = ra_->reg2offset(src_first);
1139 if (cbuf) {
1140 MacroAssembler _masm(cbuf);
1141 __ lwc1( as_FloatRegister(Matcher::_regEncode[dst_first]), Address(SP, offset));
1142 #ifndef PRODUCT
1143 } else {
1144 if(!do_size){
1145 if (size != 0) st->print("\n\t");
1146 st->print("lwc1 %s, [SP + #%d]\t# spill 7",
1147 Matcher::regName[dst_first],
1148 offset);
1149 }
1150 #endif
1151 }
1152 size += 4;
1153 }
1154 return size;
1155 }
1156 } else if (src_first_rc == rc_int) {
1157 // gpr ->
1158 if (dst_first_rc == rc_stack) {
1159 // gpr -> mem
1160 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1161 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1162 // 64-bit
1163 int offset = ra_->reg2offset(dst_first);
1164 if (cbuf) {
1165 MacroAssembler _masm(cbuf);
1166 __ sd(as_Register(Matcher::_regEncode[src_first]), Address(SP, offset));
1167 #ifndef PRODUCT
1168 } else {
1169 if(!do_size){
1170 if (size != 0) st->print("\n\t");
1171 st->print("sd %s, [SP + #%d] # spill 8",
1172 Matcher::regName[src_first],
1173 offset);
1174 }
1175 #endif
1176 }
1177 size += 4;
1178 } else {
1179 // 32-bit
1180 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1181 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1182 int offset = ra_->reg2offset(dst_first);
1183 if (cbuf) {
1184 MacroAssembler _masm(cbuf);
1185 __ sw(as_Register(Matcher::_regEncode[src_first]), Address(SP, offset));
1186 #ifndef PRODUCT
1187 } else {
1188 if(!do_size){
1189 if (size != 0) st->print("\n\t");
1190 st->print("sw %s, [SP + #%d]\t# spill 9",
1191 Matcher::regName[src_first], offset);
1192 }
1193 #endif
1194 }
1195 size += 4;
1196 }
1197 return size;
1198 } else if (dst_first_rc == rc_int) {
1199 // gpr -> gpr
1200 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1201 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1202 // 64-bit
1203 if (cbuf) {
1204 MacroAssembler _masm(cbuf);
1205 __ move(as_Register(Matcher::_regEncode[dst_first]),
1206 as_Register(Matcher::_regEncode[src_first]));
1207 #ifndef PRODUCT
1208 } else {
1209 if(!do_size){
1210 if (size != 0) st->print("\n\t");
1211 st->print("move(64bit) %s <-- %s\t# spill 10",
1212 Matcher::regName[dst_first],
1213 Matcher::regName[src_first]);
1214 }
1215 #endif
1216 }
1217 size += 4;
1218 return size;
1219 } else {
1220 // 32-bit
1221 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1222 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1223 if (cbuf) {
1224 MacroAssembler _masm(cbuf);
1225 if (this->ideal_reg() == Op_RegI)
1226 __ move_u32(as_Register(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first]));
1227 else
1228 __ daddu(as_Register(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first]), R0);
1230 #ifndef PRODUCT
1231 } else {
1232 if(!do_size){
1233 if (size != 0) st->print("\n\t");
1234 st->print("move(32-bit) %s <-- %s\t# spill 11",
1235 Matcher::regName[dst_first],
1236 Matcher::regName[src_first]);
1237 }
1238 #endif
1239 }
1240 size += 4;
1241 return size;
1242 }
1243 } else if (dst_first_rc == rc_float) {
1244 // gpr -> xmm
1245 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1246 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1247 // 64-bit
1248 if (cbuf) {
1249 MacroAssembler _masm(cbuf);
1250 __ dmtc1(as_Register(Matcher::_regEncode[src_first]), as_FloatRegister(Matcher::_regEncode[dst_first]));
1251 #ifndef PRODUCT
1252 } else {
1253 if(!do_size){
1254 if (size != 0) st->print("\n\t");
1255 st->print("dmtc1 %s, %s\t# spill 12",
1256 Matcher::regName[dst_first],
1257 Matcher::regName[src_first]);
1258 }
1259 #endif
1260 }
1261 size += 4;
1262 } else {
1263 // 32-bit
1264 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1265 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1266 if (cbuf) {
1267 MacroAssembler _masm(cbuf);
1268 __ mtc1( as_Register(Matcher::_regEncode[src_first]), as_FloatRegister(Matcher::_regEncode[dst_first]) );
1269 #ifndef PRODUCT
1270 } else {
1271 if(!do_size){
1272 if (size != 0) st->print("\n\t");
1273 st->print("mtc1 %s, %s\t# spill 13",
1274 Matcher::regName[dst_first],
1275 Matcher::regName[src_first]);
1276 }
1277 #endif
1278 }
1279 size += 4;
1280 }
1281 return size;
1282 }
1283 } else if (src_first_rc == rc_float) {
1284 // xmm ->
1285 if (dst_first_rc == rc_stack) {
1286 // xmm -> mem
1287 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1288 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1289 // 64-bit
1290 int offset = ra_->reg2offset(dst_first);
1291 if (cbuf) {
1292 MacroAssembler _masm(cbuf);
1293 __ sdc1( as_FloatRegister(Matcher::_regEncode[src_first]), Address(SP, offset) );
1294 #ifndef PRODUCT
1295 } else {
1296 if(!do_size){
1297 if (size != 0) st->print("\n\t");
1298 st->print("sdc1 %s, [SP + #%d]\t# spill 14",
1299 Matcher::regName[src_first],
1300 offset);
1301 }
1302 #endif
1303 }
1304 size += 4;
1305 } else {
1306 // 32-bit
1307 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1308 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1309 int offset = ra_->reg2offset(dst_first);
1310 if (cbuf) {
1311 MacroAssembler _masm(cbuf);
1312 __ swc1(as_FloatRegister(Matcher::_regEncode[src_first]), Address(SP, offset));
1313 #ifndef PRODUCT
1314 } else {
1315 if(!do_size){
1316 if (size != 0) st->print("\n\t");
1317 st->print("swc1 %s, [SP + #%d]\t# spill 15",
1318 Matcher::regName[src_first],
1319 offset);
1320 }
1321 #endif
1322 }
1323 size += 4;
1324 }
1325 return size;
1326 } else if (dst_first_rc == rc_int) {
1327 // xmm -> gpr
1328 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1329 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1330 // 64-bit
1331 if (cbuf) {
1332 MacroAssembler _masm(cbuf);
1333 __ dmfc1( as_Register(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1334 #ifndef PRODUCT
1335 } else {
1336 if(!do_size){
1337 if (size != 0) st->print("\n\t");
1338 st->print("dmfc1 %s, %s\t# spill 16",
1339 Matcher::regName[dst_first],
1340 Matcher::regName[src_first]);
1341 }
1342 #endif
1343 }
1344 size += 4;
1345 } else {
1346 // 32-bit
1347 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1348 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1349 if (cbuf) {
1350 MacroAssembler _masm(cbuf);
1351 __ mfc1( as_Register(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1352 #ifndef PRODUCT
1353 } else {
1354 if(!do_size){
1355 if (size != 0) st->print("\n\t");
1356 st->print("mfc1 %s, %s\t# spill 17",
1357 Matcher::regName[dst_first],
1358 Matcher::regName[src_first]);
1359 }
1360 #endif
1361 }
1362 size += 4;
1363 }
1364 return size;
1365 } else if (dst_first_rc == rc_float) {
1366 // xmm -> xmm
1367 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1368 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1369 // 64-bit
1370 if (cbuf) {
1371 MacroAssembler _masm(cbuf);
1372 __ mov_d( as_FloatRegister(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1373 #ifndef PRODUCT
1374 } else {
1375 if(!do_size){
1376 if (size != 0) st->print("\n\t");
1377 st->print("mov_d %s <-- %s\t# spill 18",
1378 Matcher::regName[dst_first],
1379 Matcher::regName[src_first]);
1380 }
1381 #endif
1382 }
1383 size += 4;
1384 } else {
1385 // 32-bit
1386 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1387 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1388 if (cbuf) {
1389 MacroAssembler _masm(cbuf);
1390 __ mov_s( as_FloatRegister(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1391 #ifndef PRODUCT
1392 } else {
1393 if(!do_size){
1394 if (size != 0) st->print("\n\t");
1395 st->print("mov_s %s <-- %s\t# spill 19",
1396 Matcher::regName[dst_first],
1397 Matcher::regName[src_first]);
1398 }
1399 #endif
1400 }
1401 size += 4;
1402 }
1403 return size;
1404 }
1405 }
1407 assert(0," foo ");
1408 Unimplemented();
1409 return size;
1411 }
1413 #ifndef PRODUCT
1414 void MachSpillCopyNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1415 implementation( NULL, ra_, false, st );
1416 }
1417 #endif
1419 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1420 implementation( &cbuf, ra_, false, NULL );
1421 }
1423 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
1424 return implementation( NULL, ra_, true, NULL );
1425 }
1427 //=============================================================================
1428 #
1430 #ifndef PRODUCT
1431 void MachBreakpointNode::format( PhaseRegAlloc *, outputStream* st ) const {
1432 st->print("INT3");
1433 }
1434 #endif
1436 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc* ra_) const {
1437 MacroAssembler _masm(&cbuf);
1438 __ int3();
1439 }
1441 uint MachBreakpointNode::size(PhaseRegAlloc* ra_) const {
1442 return MachNode::size(ra_);
1443 }
1446 //=============================================================================
1447 #ifndef PRODUCT
1448 void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1449 Compile *C = ra_->C;
1450 int framesize = C->frame_size_in_bytes();
1452 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1454 st->print("daddiu SP, SP, %d # Rlease stack @ MachEpilogNode",framesize);
1455 st->cr(); st->print("\t");
1456 if (UseLoongsonISA) {
1457 st->print("gslq RA, FP, SP, %d # Restore FP & RA @ MachEpilogNode", -wordSize*2);
1458 } else {
1459 st->print("ld RA, SP, %d # Restore RA @ MachEpilogNode", -wordSize);
1460 st->cr(); st->print("\t");
1461 st->print("ld FP, SP, %d # Restore FP @ MachEpilogNode", -wordSize*2);
1462 }
1464 if( do_polling() && C->is_method_compilation() ) {
1465 st->print("Poll Safepoint # MachEpilogNode");
1466 }
1467 }
1468 #endif
1470 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1471 Compile *C = ra_->C;
1472 MacroAssembler _masm(&cbuf);
1473 int framesize = C->frame_size_in_bytes();
1475 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1477 __ daddiu(SP, SP, framesize);
1479 if (UseLoongsonISA) {
1480 __ gslq(RA, FP, SP, -wordSize*2);
1481 } else {
1482 __ ld(RA, SP, -wordSize );
1483 __ ld(FP, SP, -wordSize*2 );
1484 }
1486 /* 2012/11/19 Jin: The epilog in a RuntimeStub should not contain a safepoint */
1487 if( do_polling() && C->is_method_compilation() ) {
1488 #ifndef OPT_SAFEPOINT
1489 __ set64(AT, (long)os::get_polling_page());
1490 __ relocate(relocInfo::poll_return_type);
1491 __ lw(AT, AT, 0);
1492 #else
1493 __ lui(AT, Assembler::split_high((intptr_t)os::get_polling_page()));
1494 __ relocate(relocInfo::poll_return_type);
1495 __ lw(AT, AT, Assembler::split_low((intptr_t)os::get_polling_page()));
1496 #endif
1497 }
1498 }
1500 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
1501 return MachNode::size(ra_); // too many variables; just compute it the hard way fujie debug
1502 }
1504 int MachEpilogNode::reloc() const {
1505 return 0; // a large enough number
1506 }
1508 const Pipeline * MachEpilogNode::pipeline() const {
1509 return MachNode::pipeline_class();
1510 }
1512 int MachEpilogNode::safepoint_offset() const { return 0; }
1514 //=============================================================================
1516 #ifndef PRODUCT
1517 void BoxLockNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1518 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1519 int reg = ra_->get_reg_first(this);
1520 st->print("ADDI %s, SP, %d @BoxLockNode",Matcher::regName[reg],offset);
1521 }
1522 #endif
1525 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
1526 return 4;
1527 }
1529 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1530 MacroAssembler _masm(&cbuf);
1531 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1532 int reg = ra_->get_encode(this);
1534 __ addi(as_Register(reg), SP, offset);
1535 /*
1536 if( offset >= 128 ) {
1537 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
1538 emit_rm(cbuf, 0x2, reg, 0x04);
1539 emit_rm(cbuf, 0x0, 0x04, SP_enc);
1540 emit_d32(cbuf, offset);
1541 }
1542 else {
1543 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
1544 emit_rm(cbuf, 0x1, reg, 0x04);
1545 emit_rm(cbuf, 0x0, 0x04, SP_enc);
1546 emit_d8(cbuf, offset);
1547 }
1548 */
1549 }
1552 //static int sizeof_FFree_Float_Stack_All = -1;
1554 int MachCallRuntimeNode::ret_addr_offset() {
1555 //lui
1556 //ori
1557 //dsll
1558 //ori
1559 //jalr
1560 //nop
1561 assert(NativeCall::instruction_size == 24, "in MachCallRuntimeNode::ret_addr_offset()");
1562 return NativeCall::instruction_size;
1563 // return 16;
1564 }
1570 //=============================================================================
1571 #ifndef PRODUCT
1572 void MachNopNode::format( PhaseRegAlloc *, outputStream* st ) const {
1573 st->print("NOP \t# %d bytes pad for loops and calls", 4 * _count);
1574 }
1575 #endif
1577 void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc * ) const {
1578 MacroAssembler _masm(&cbuf);
1579 int i = 0;
1580 for(i = 0; i < _count; i++)
1581 __ nop();
1582 }
1584 uint MachNopNode::size(PhaseRegAlloc *) const {
1585 return 4 * _count;
1586 }
1587 const Pipeline* MachNopNode::pipeline() const {
1588 return MachNode::pipeline_class();
1589 }
1591 //=============================================================================
1593 //=============================================================================
1594 #ifndef PRODUCT
1595 void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1596 st->print_cr("load_klass(AT, T0)");
1597 st->print_cr("\tbeq(AT, iCache, L)");
1598 st->print_cr("\tnop");
1599 st->print_cr("\tjmp(SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type)");
1600 st->print_cr("\tnop");
1601 st->print_cr("\tnop");
1602 st->print_cr(" L:");
1603 }
1604 #endif
1607 void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1608 MacroAssembler _masm(&cbuf);
1609 #ifdef ASSERT
1610 //uint code_size = cbuf.code_size();
1611 #endif
1612 int ic_reg = Matcher::inline_cache_reg_encode();
1613 Label L;
1614 Register receiver = T0;
1615 Register iCache = as_Register(ic_reg);
1616 __ load_klass(AT, receiver);
1617 __ beq(AT, iCache, L);
1618 __ nop();
1620 __ relocate(relocInfo::runtime_call_type);
1621 __ li48(T9, (long)SharedRuntime::get_ic_miss_stub());
1622 __ jr(T9);
1623 __ nop();
1625 /* WARNING these NOPs are critical so that verified entry point is properly
1626 * 8 bytes aligned for patching by NativeJump::patch_verified_entry() */
1627 __ align(CodeEntryAlignment);
1628 __ bind(L);
1629 }
1631 uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
1632 return MachNode::size(ra_);
1633 }
1637 //=============================================================================
1639 const RegMask& MachConstantBaseNode::_out_RegMask = P_REG_mask();
1641 int Compile::ConstantTable::calculate_table_base_offset() const {
1642 return 0; // absolute addressing, no offset
1643 }
1645 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
1646 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
1647 ShouldNotReachHere();
1648 }
1650 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
1651 Compile* C = ra_->C;
1652 Compile::ConstantTable& constant_table = C->constant_table();
1653 MacroAssembler _masm(&cbuf);
1655 Register Rtoc = as_Register(ra_->get_encode(this));
1656 CodeSection* consts_section = __ code()->consts();
1657 int consts_size = consts_section->align_at_start(consts_section->size());
1658 assert(constant_table.size() == consts_size, "must be equal");
1660 if (consts_section->size()) {
1661 // Materialize the constant table base.
1662 address baseaddr = consts_section->start() + -(constant_table.table_base_offset());
1663 // RelocationHolder rspec = internal_word_Relocation::spec(baseaddr);
1664 __ relocate(relocInfo::internal_pc_type);
1665 __ li48(Rtoc, (long)baseaddr);
1666 }
1667 }
1669 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
1670 // li48 (4 insts)
1671 return 4 * 4;
1672 }
1674 #ifndef PRODUCT
1675 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
1676 Register r = as_Register(ra_->get_encode(this));
1677 st->print("li48 %s, &constanttable (constant table base) @ MachConstantBaseNode", r->name());
1678 }
1679 #endif
1682 //=============================================================================
1683 #ifndef PRODUCT
1684 void MachPrologNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1685 Compile* C = ra_->C;
1687 int framesize = C->frame_size_in_bytes();
1688 int bangsize = C->bang_size_in_bytes();
1689 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1691 // Calls to C2R adapters often do not accept exceptional returns.
1692 // We require that their callers must bang for them. But be careful, because
1693 // some VM calls (such as call site linkage) can use several kilobytes of
1694 // stack. But the stack safety zone should account for that.
1695 // See bugs 4446381, 4468289, 4497237.
1696 if (C->need_stack_bang(bangsize)) {
1697 st->print_cr("# stack bang"); st->print("\t");
1698 }
1699 if (UseLoongsonISA) {
1700 st->print("gssq RA, FP, %d(SP) @ MachPrologNode\n\t", -wordSize*2);
1701 } else {
1702 st->print("sd RA, %d(SP) @ MachPrologNode\n\t", -wordSize);
1703 st->print("sd FP, %d(SP) @ MachPrologNode\n\t", -wordSize*2);
1704 }
1705 st->print("daddiu FP, SP, -%d \n\t", wordSize*2);
1706 st->print("daddiu SP, SP, -%d \t",framesize);
1707 }
1708 #endif
1711 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1712 Compile* C = ra_->C;
1713 MacroAssembler _masm(&cbuf);
1715 int framesize = C->frame_size_in_bytes();
1716 int bangsize = C->bang_size_in_bytes();
1718 // __ verified_entry(framesize, C->need_stack_bang(bangsize)?bangsize:0, false);
1720 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1722 if (C->need_stack_bang(framesize)) {
1723 __ generate_stack_overflow_check(framesize);
1724 }
1726 if (UseLoongsonISA) {
1727 __ gssq(RA, FP, SP, -wordSize*2);
1728 } else {
1729 __ sd(RA, SP, -wordSize);
1730 __ sd(FP, SP, -wordSize*2);
1731 }
1732 __ daddiu(FP, SP, -wordSize*2);
1733 __ daddiu(SP, SP, -framesize);
1734 __ nop(); /* 2013.10.22 Jin: Make enough room for patch_verified_entry() */
1735 __ nop();
1737 C->set_frame_complete(cbuf.insts_size());
1738 if (C->has_mach_constant_base_node()) {
1739 // NOTE: We set the table base offset here because users might be
1740 // emitted before MachConstantBaseNode.
1741 Compile::ConstantTable& constant_table = C->constant_table();
1742 constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
1743 }
1745 }
1748 uint MachPrologNode::size(PhaseRegAlloc *ra_) const {
1749 //fprintf(stderr, "\nPrologNode::size(ra_)= %d \n", MachNode::size(ra_));//fujie debug
1750 return MachNode::size(ra_); // too many variables; just compute it the hard way
1751 }
1753 int MachPrologNode::reloc() const {
1754 return 0; // a large enough number
1755 }
1757 %}
1759 //----------ENCODING BLOCK-----------------------------------------------------
1760 // This block specifies the encoding classes used by the compiler to output
1761 // byte streams. Encoding classes generate functions which are called by
1762 // Machine Instruction Nodes in order to generate the bit encoding of the
1763 // instruction. Operands specify their base encoding interface with the
1764 // interface keyword. There are currently supported four interfaces,
1765 // REG_INTER, CONST_INTER, MEMORY_INTER, & COND_INTER. REG_INTER causes an
1766 // operand to generate a function which returns its register number when
1767 // queried. CONST_INTER causes an operand to generate a function which
1768 // returns the value of the constant when queried. MEMORY_INTER causes an
1769 // operand to generate four functions which return the Base Register, the
1770 // Index Register, the Scale Value, and the Offset Value of the operand when
1771 // queried. COND_INTER causes an operand to generate six functions which
1772 // return the encoding code (ie - encoding bits for the instruction)
1773 // associated with each basic boolean condition for a conditional instruction.
1774 // Instructions specify two basic values for encoding. They use the
1775 // ins_encode keyword to specify their encoding class (which must be one of
1776 // the class names specified in the encoding block), and they use the
1777 // opcode keyword to specify, in order, their primary, secondary, and
1778 // tertiary opcode. Only the opcode sections which a particular instruction
1779 // needs for encoding need to be specified.
1780 encode %{
1781 /*
1782 Alias:
1783 1044 b java.io.ObjectInputStream::readHandle (130 bytes)
1784 118 B14: # B19 B15 <- B13 Freq: 0.899955
1785 118 add S1, S2, V0 #@addP_reg_reg
1786 11c lb S0, [S1 + #-8257524] #@loadB
1787 120 BReq S0, #3, B19 #@branchConI_reg_imm P=0.100000 C=-1.000000
1788 */
1789 //Load byte signed
1790 enc_class load_B_enc (mRegI dst, memory mem) %{
1791 MacroAssembler _masm(&cbuf);
1792 int dst = $dst$$reg;
1793 int base = $mem$$base;
1794 int index = $mem$$index;
1795 int scale = $mem$$scale;
1796 int disp = $mem$$disp;
1798 if( index != 0 ) {
1799 if( Assembler::is_simm16(disp) ) {
1800 if( UseLoongsonISA ) {
1801 if (scale == 0) {
1802 __ gslbx(as_Register(dst), as_Register(base), as_Register(index), disp);
1803 } else {
1804 __ dsll(AT, as_Register(index), scale);
1805 __ gslbx(as_Register(dst), as_Register(base), AT, disp);
1806 }
1807 } else {
1808 if (scale == 0) {
1809 __ addu(AT, as_Register(base), as_Register(index));
1810 } else {
1811 __ dsll(AT, as_Register(index), scale);
1812 __ addu(AT, as_Register(base), AT);
1813 }
1814 __ lb(as_Register(dst), AT, disp);
1815 }
1816 } else {
1817 if (scale == 0) {
1818 __ addu(AT, as_Register(base), as_Register(index));
1819 } else {
1820 __ dsll(AT, as_Register(index), scale);
1821 __ addu(AT, as_Register(base), AT);
1822 }
1823 __ move(T9, disp);
1824 if( UseLoongsonISA ) {
1825 __ gslbx(as_Register(dst), AT, T9, 0);
1826 } else {
1827 __ addu(AT, AT, T9);
1828 __ lb(as_Register(dst), AT, 0);
1829 }
1830 }
1831 } else {
1832 if( Assembler::is_simm16(disp) ) {
1833 __ lb(as_Register(dst), as_Register(base), disp);
1834 } else {
1835 __ move(T9, disp);
1836 if( UseLoongsonISA ) {
1837 __ gslbx(as_Register(dst), as_Register(base), T9, 0);
1838 } else {
1839 __ addu(AT, as_Register(base), T9);
1840 __ lb(as_Register(dst), AT, 0);
1841 }
1842 }
1843 }
1844 %}
1846 //Load byte unsigned
1847 enc_class load_UB_enc (mRegI dst, memory mem) %{
1848 MacroAssembler _masm(&cbuf);
1849 int dst = $dst$$reg;
1850 int base = $mem$$base;
1851 int index = $mem$$index;
1852 int scale = $mem$$scale;
1853 int disp = $mem$$disp;
1855 if( index != 0 ) {
1856 if (scale == 0) {
1857 __ daddu(AT, as_Register(base), as_Register(index));
1858 } else {
1859 __ dsll(AT, as_Register(index), scale);
1860 __ daddu(AT, as_Register(base), AT);
1861 }
1862 if( Assembler::is_simm16(disp) ) {
1863 __ lbu(as_Register(dst), AT, disp);
1864 } else {
1865 __ move(T9, disp);
1866 __ daddu(AT, AT, T9);
1867 __ lbu(as_Register(dst), AT, 0);
1868 }
1869 } else {
1870 if( Assembler::is_simm16(disp) ) {
1871 __ lbu(as_Register(dst), as_Register(base), disp);
1872 } else {
1873 __ move(T9, disp);
1874 __ daddu(AT, as_Register(base), T9);
1875 __ lbu(as_Register(dst), AT, 0);
1876 }
1877 }
1878 %}
1880 enc_class store_B_reg_enc (memory mem, mRegI src) %{
1881 MacroAssembler _masm(&cbuf);
1882 int src = $src$$reg;
1883 int base = $mem$$base;
1884 int index = $mem$$index;
1885 int scale = $mem$$scale;
1886 int disp = $mem$$disp;
1888 if( index != 0 ) {
1889 if (scale == 0) {
1890 if( Assembler::is_simm(disp, 8) ) {
1891 if (UseLoongsonISA) {
1892 __ gssbx(as_Register(src), as_Register(base), as_Register(index), disp);
1893 } else {
1894 __ addu(AT, as_Register(base), as_Register(index));
1895 __ sb(as_Register(src), AT, disp);
1896 }
1897 } else if( Assembler::is_simm16(disp) ) {
1898 __ addu(AT, as_Register(base), as_Register(index));
1899 __ sb(as_Register(src), AT, disp);
1900 } else {
1901 __ addu(AT, as_Register(base), as_Register(index));
1902 __ move(T9, disp);
1903 if (UseLoongsonISA) {
1904 __ gssbx(as_Register(src), AT, T9, 0);
1905 } else {
1906 __ addu(AT, AT, T9);
1907 __ sb(as_Register(src), AT, 0);
1908 }
1909 }
1910 } else {
1911 __ dsll(AT, as_Register(index), scale);
1912 if( Assembler::is_simm(disp, 8) ) {
1913 if (UseLoongsonISA) {
1914 __ gssbx(as_Register(src), AT, as_Register(base), disp);
1915 } else {
1916 __ addu(AT, as_Register(base), AT);
1917 __ sb(as_Register(src), AT, disp);
1918 }
1919 } else if( Assembler::is_simm16(disp) ) {
1920 __ addu(AT, as_Register(base), AT);
1921 __ sb(as_Register(src), AT, disp);
1922 } else {
1923 __ addu(AT, as_Register(base), AT);
1924 __ move(T9, disp);
1925 if (UseLoongsonISA) {
1926 __ gssbx(as_Register(src), AT, T9, 0);
1927 } else {
1928 __ addu(AT, AT, T9);
1929 __ sb(as_Register(src), AT, 0);
1930 }
1931 }
1932 }
1933 } else {
1934 if( Assembler::is_simm16(disp) ) {
1935 __ sb(as_Register(src), as_Register(base), disp);
1936 } else {
1937 __ move(T9, disp);
1938 if (UseLoongsonISA) {
1939 __ gssbx(as_Register(src), as_Register(base), T9, 0);
1940 } else {
1941 __ addu(AT, as_Register(base), T9);
1942 __ sb(as_Register(src), AT, 0);
1943 }
1944 }
1945 }
1946 %}
1948 enc_class store_B_immI_enc (memory mem, immI8 src) %{
1949 MacroAssembler _masm(&cbuf);
1950 int base = $mem$$base;
1951 int index = $mem$$index;
1952 int scale = $mem$$scale;
1953 int disp = $mem$$disp;
1954 int value = $src$$constant;
1956 if( index != 0 ) {
1957 if (!UseLoongsonISA) {
1958 if (scale == 0) {
1959 __ daddu(AT, as_Register(base), as_Register(index));
1960 } else {
1961 __ dsll(AT, as_Register(index), scale);
1962 __ daddu(AT, as_Register(base), AT);
1963 }
1964 if( Assembler::is_simm16(disp) ) {
1965 if (value == 0) {
1966 __ sb(R0, AT, disp);
1967 } else {
1968 __ move(T9, value);
1969 __ sb(T9, AT, disp);
1970 }
1971 } else {
1972 if (value == 0) {
1973 __ move(T9, disp);
1974 __ daddu(AT, AT, T9);
1975 __ sb(R0, AT, 0);
1976 } else {
1977 __ move(T9, disp);
1978 __ daddu(AT, AT, T9);
1979 __ move(T9, value);
1980 __ sb(T9, AT, 0);
1981 }
1982 }
1983 } else {
1985 if (scale == 0) {
1986 if( Assembler::is_simm(disp, 8) ) {
1987 if (value == 0) {
1988 __ gssbx(R0, as_Register(base), as_Register(index), disp);
1989 } else {
1990 __ move(T9, value);
1991 __ gssbx(T9, as_Register(base), as_Register(index), disp);
1992 }
1993 } else if( Assembler::is_simm16(disp) ) {
1994 __ daddu(AT, as_Register(base), as_Register(index));
1995 if (value == 0) {
1996 __ sb(R0, AT, disp);
1997 } else {
1998 __ move(T9, value);
1999 __ sb(T9, AT, disp);
2000 }
2001 } else {
2002 if (value == 0) {
2003 __ daddu(AT, as_Register(base), as_Register(index));
2004 __ move(T9, disp);
2005 __ gssbx(R0, AT, T9, 0);
2006 } else {
2007 __ move(AT, disp);
2008 __ move(T9, value);
2009 __ daddu(AT, as_Register(base), AT);
2010 __ gssbx(T9, AT, as_Register(index), 0);
2011 }
2012 }
2014 } else {
2016 if( Assembler::is_simm(disp, 8) ) {
2017 __ dsll(AT, as_Register(index), scale);
2018 if (value == 0) {
2019 __ gssbx(R0, as_Register(base), AT, disp);
2020 } else {
2021 __ move(T9, value);
2022 __ gssbx(T9, as_Register(base), AT, disp);
2023 }
2024 } else if( Assembler::is_simm16(disp) ) {
2025 __ dsll(AT, as_Register(index), scale);
2026 __ daddu(AT, as_Register(base), AT);
2027 if (value == 0) {
2028 __ sb(R0, AT, disp);
2029 } else {
2030 __ move(T9, value);
2031 __ sb(T9, AT, disp);
2032 }
2033 } else {
2034 __ dsll(AT, as_Register(index), scale);
2035 if (value == 0) {
2036 __ daddu(AT, as_Register(base), AT);
2037 __ move(T9, disp);
2038 __ gssbx(R0, AT, T9, 0);
2039 } else {
2040 __ move(T9, disp);
2041 __ daddu(AT, AT, T9);
2042 __ move(T9, value);
2043 __ gssbx(T9, as_Register(base), AT, 0);
2044 }
2045 }
2046 }
2047 }
2048 } else {
2049 if( Assembler::is_simm16(disp) ) {
2050 if (value == 0) {
2051 __ sb(R0, as_Register(base), disp);
2052 } else {
2053 __ move(AT, value);
2054 __ sb(AT, as_Register(base), disp);
2055 }
2056 } else {
2057 if (value == 0) {
2058 __ move(T9, disp);
2059 if (UseLoongsonISA) {
2060 __ gssbx(R0, as_Register(base), T9, 0);
2061 } else {
2062 __ daddu(AT, as_Register(base), T9);
2063 __ sb(R0, AT, 0);
2064 }
2065 } else {
2066 __ move(T9, disp);
2067 if (UseLoongsonISA) {
2068 __ move(AT, value);
2069 __ gssbx(AT, as_Register(base), T9, 0);
2070 } else {
2071 __ daddu(AT, as_Register(base), T9);
2072 __ move(T9, value);
2073 __ sb(T9, AT, 0);
2074 }
2075 }
2076 }
2077 }
2078 %}
2081 enc_class store_B_immI_enc_sync (memory mem, immI8 src) %{
2082 MacroAssembler _masm(&cbuf);
2083 int base = $mem$$base;
2084 int index = $mem$$index;
2085 int scale = $mem$$scale;
2086 int disp = $mem$$disp;
2087 int value = $src$$constant;
2089 if( index != 0 ) {
2090 if (scale == 0) {
2091 __ daddu(AT, as_Register(base), as_Register(index));
2092 } else {
2093 __ dsll(AT, as_Register(index), scale);
2094 __ daddu(AT, as_Register(base), AT);
2095 }
2096 if( Assembler::is_simm16(disp) ) {
2097 if (value == 0) {
2098 __ sb(R0, AT, disp);
2099 } else {
2100 __ move(T9, value);
2101 __ sb(T9, AT, disp);
2102 }
2103 } else {
2104 if (value == 0) {
2105 __ move(T9, disp);
2106 __ daddu(AT, AT, T9);
2107 __ sb(R0, AT, 0);
2108 } else {
2109 __ move(T9, disp);
2110 __ daddu(AT, AT, T9);
2111 __ move(T9, value);
2112 __ sb(T9, AT, 0);
2113 }
2114 }
2115 } else {
2116 if( Assembler::is_simm16(disp) ) {
2117 if (value == 0) {
2118 __ sb(R0, as_Register(base), disp);
2119 } else {
2120 __ move(AT, value);
2121 __ sb(AT, as_Register(base), disp);
2122 }
2123 } else {
2124 if (value == 0) {
2125 __ move(T9, disp);
2126 __ daddu(AT, as_Register(base), T9);
2127 __ sb(R0, AT, 0);
2128 } else {
2129 __ move(T9, disp);
2130 __ daddu(AT, as_Register(base), T9);
2131 __ move(T9, value);
2132 __ sb(T9, AT, 0);
2133 }
2134 }
2135 }
2137 __ sync();
2138 %}
2140 // Load Short (16bit signed)
2141 enc_class load_S_enc (mRegI dst, memory mem) %{
2142 MacroAssembler _masm(&cbuf);
2143 int dst = $dst$$reg;
2144 int base = $mem$$base;
2145 int index = $mem$$index;
2146 int scale = $mem$$scale;
2147 int disp = $mem$$disp;
2149 if( index != 0 ) {
2150 if (scale == 0) {
2151 __ daddu(AT, as_Register(base), as_Register(index));
2152 } else {
2153 __ dsll(AT, as_Register(index), scale);
2154 __ daddu(AT, as_Register(base), AT);
2155 }
2156 if( Assembler::is_simm16(disp) ) {
2157 __ lh(as_Register(dst), AT, disp);
2158 } else {
2159 __ move(T9, disp);
2160 __ addu(AT, AT, T9);
2161 __ lh(as_Register(dst), AT, 0);
2162 }
2163 } else {
2164 if( Assembler::is_simm16(disp) ) {
2165 __ lh(as_Register(dst), as_Register(base), disp);
2166 } else {
2167 __ move(T9, disp);
2168 __ addu(AT, as_Register(base), T9);
2169 __ lh(as_Register(dst), AT, 0);
2170 }
2171 }
2172 %}
2174 // Load Char (16bit unsigned)
2175 enc_class load_C_enc (mRegI dst, memory mem) %{
2176 MacroAssembler _masm(&cbuf);
2177 int dst = $dst$$reg;
2178 int base = $mem$$base;
2179 int index = $mem$$index;
2180 int scale = $mem$$scale;
2181 int disp = $mem$$disp;
2183 if( index != 0 ) {
2184 if (scale == 0) {
2185 __ daddu(AT, as_Register(base), as_Register(index));
2186 } else {
2187 __ dsll(AT, as_Register(index), scale);
2188 __ daddu(AT, as_Register(base), AT);
2189 }
2190 if( Assembler::is_simm16(disp) ) {
2191 __ lhu(as_Register(dst), AT, disp);
2192 } else {
2193 __ move(T9, disp);
2194 __ addu(AT, AT, T9);
2195 __ lhu(as_Register(dst), AT, 0);
2196 }
2197 } else {
2198 if( Assembler::is_simm16(disp) ) {
2199 __ lhu(as_Register(dst), as_Register(base), disp);
2200 } else {
2201 __ move(T9, disp);
2202 __ daddu(AT, as_Register(base), T9);
2203 __ lhu(as_Register(dst), AT, 0);
2204 }
2205 }
2206 %}
2208 // Store Char (16bit unsigned)
2209 enc_class store_C_reg_enc (memory mem, mRegI src) %{
2210 MacroAssembler _masm(&cbuf);
2211 int src = $src$$reg;
2212 int base = $mem$$base;
2213 int index = $mem$$index;
2214 int scale = $mem$$scale;
2215 int disp = $mem$$disp;
2217 if( index != 0 ) {
2218 if( Assembler::is_simm16(disp) ) {
2219 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2220 if (scale == 0) {
2221 __ gsshx(as_Register(src), as_Register(base), as_Register(index), disp);
2222 } else {
2223 __ dsll(AT, as_Register(index), scale);
2224 __ gsshx(as_Register(src), as_Register(base), AT, disp);
2225 }
2226 } else {
2227 if (scale == 0) {
2228 __ addu(AT, as_Register(base), as_Register(index));
2229 } else {
2230 __ dsll(AT, as_Register(index), scale);
2231 __ addu(AT, as_Register(base), AT);
2232 }
2233 __ sh(as_Register(src), AT, disp);
2234 }
2235 } else {
2236 if (scale == 0) {
2237 __ addu(AT, as_Register(base), as_Register(index));
2238 } else {
2239 __ dsll(AT, as_Register(index), scale);
2240 __ addu(AT, as_Register(base), AT);
2241 }
2242 __ move(T9, disp);
2243 if( UseLoongsonISA ) {
2244 __ gsshx(as_Register(src), AT, T9, 0);
2245 } else {
2246 __ addu(AT, AT, T9);
2247 __ sh(as_Register(src), AT, 0);
2248 }
2249 }
2250 } else {
2251 if( Assembler::is_simm16(disp) ) {
2252 __ sh(as_Register(src), as_Register(base), disp);
2253 } else {
2254 __ move(T9, disp);
2255 if( UseLoongsonISA ) {
2256 __ gsshx(as_Register(src), as_Register(base), T9, 0);
2257 } else {
2258 __ addu(AT, as_Register(base), T9);
2259 __ sh(as_Register(src), AT, 0);
2260 }
2261 }
2262 }
2263 %}
2265 enc_class store_C0_enc (memory mem) %{
2266 MacroAssembler _masm(&cbuf);
2267 int base = $mem$$base;
2268 int index = $mem$$index;
2269 int scale = $mem$$scale;
2270 int disp = $mem$$disp;
2272 if( index != 0 ) {
2273 if( Assembler::is_simm16(disp) ) {
2274 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2275 if (scale == 0) {
2276 __ gsshx(R0, as_Register(base), as_Register(index), disp);
2277 } else {
2278 __ dsll(AT, as_Register(index), scale);
2279 __ gsshx(R0, as_Register(base), AT, disp);
2280 }
2281 } else {
2282 if (scale == 0) {
2283 __ addu(AT, as_Register(base), as_Register(index));
2284 } else {
2285 __ dsll(AT, as_Register(index), scale);
2286 __ addu(AT, as_Register(base), AT);
2287 }
2288 __ sh(R0, AT, disp);
2289 }
2290 } else {
2291 if (scale == 0) {
2292 __ addu(AT, as_Register(base), as_Register(index));
2293 } else {
2294 __ dsll(AT, as_Register(index), scale);
2295 __ addu(AT, as_Register(base), AT);
2296 }
2297 __ move(T9, disp);
2298 if( UseLoongsonISA ) {
2299 __ gsshx(R0, AT, T9, 0);
2300 } else {
2301 __ addu(AT, AT, T9);
2302 __ sh(R0, AT, 0);
2303 }
2304 }
2305 } else {
2306 if( Assembler::is_simm16(disp) ) {
2307 __ sh(R0, as_Register(base), disp);
2308 } else {
2309 __ move(T9, disp);
2310 if( UseLoongsonISA ) {
2311 __ gsshx(R0, as_Register(base), T9, 0);
2312 } else {
2313 __ addu(AT, as_Register(base), T9);
2314 __ sh(R0, AT, 0);
2315 }
2316 }
2317 }
2318 %}
2320 enc_class load_I_enc (mRegI dst, memory mem) %{
2321 MacroAssembler _masm(&cbuf);
2322 int dst = $dst$$reg;
2323 int base = $mem$$base;
2324 int index = $mem$$index;
2325 int scale = $mem$$scale;
2326 int disp = $mem$$disp;
2328 if( index != 0 ) {
2329 if( Assembler::is_simm16(disp) ) {
2330 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2331 if (scale == 0) {
2332 __ gslwx(as_Register(dst), as_Register(base), as_Register(index), disp);
2333 } else {
2334 __ dsll(AT, as_Register(index), scale);
2335 __ gslwx(as_Register(dst), as_Register(base), AT, disp);
2336 }
2337 } else {
2338 if (scale == 0) {
2339 __ addu(AT, as_Register(base), as_Register(index));
2340 } else {
2341 __ dsll(AT, as_Register(index), scale);
2342 __ addu(AT, as_Register(base), AT);
2343 }
2344 __ lw(as_Register(dst), AT, disp);
2345 }
2346 } else {
2347 if (scale == 0) {
2348 __ addu(AT, as_Register(base), as_Register(index));
2349 } else {
2350 __ dsll(AT, as_Register(index), scale);
2351 __ addu(AT, as_Register(base), AT);
2352 }
2353 __ move(T9, disp);
2354 if( UseLoongsonISA ) {
2355 __ gslwx(as_Register(dst), AT, T9, 0);
2356 } else {
2357 __ addu(AT, AT, T9);
2358 __ lw(as_Register(dst), AT, 0);
2359 }
2360 }
2361 } else {
2362 if( Assembler::is_simm16(disp) ) {
2363 __ lw(as_Register(dst), as_Register(base), disp);
2364 } else {
2365 __ move(T9, disp);
2366 if( UseLoongsonISA ) {
2367 __ gslwx(as_Register(dst), as_Register(base), T9, 0);
2368 } else {
2369 __ addu(AT, as_Register(base), T9);
2370 __ lw(as_Register(dst), AT, 0);
2371 }
2372 }
2373 }
2374 %}
2376 enc_class store_I_reg_enc (memory mem, mRegI src) %{
2377 MacroAssembler _masm(&cbuf);
2378 int src = $src$$reg;
2379 int base = $mem$$base;
2380 int index = $mem$$index;
2381 int scale = $mem$$scale;
2382 int disp = $mem$$disp;
2384 if( index != 0 ) {
2385 if( Assembler::is_simm16(disp) ) {
2386 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2387 if (scale == 0) {
2388 __ gsswx(as_Register(src), as_Register(base), as_Register(index), disp);
2389 } else {
2390 __ dsll(AT, as_Register(index), scale);
2391 __ gsswx(as_Register(src), as_Register(base), AT, disp);
2392 }
2393 } else {
2394 if (scale == 0) {
2395 __ addu(AT, as_Register(base), as_Register(index));
2396 } else {
2397 __ dsll(AT, as_Register(index), scale);
2398 __ addu(AT, as_Register(base), AT);
2399 }
2400 __ sw(as_Register(src), AT, disp);
2401 }
2402 } else {
2403 if (scale == 0) {
2404 __ addu(AT, as_Register(base), as_Register(index));
2405 } else {
2406 __ dsll(AT, as_Register(index), scale);
2407 __ addu(AT, as_Register(base), AT);
2408 }
2409 __ move(T9, disp);
2410 if( UseLoongsonISA ) {
2411 __ gsswx(as_Register(src), AT, T9, 0);
2412 } else {
2413 __ addu(AT, AT, T9);
2414 __ sw(as_Register(src), AT, 0);
2415 }
2416 }
2417 } else {
2418 if( Assembler::is_simm16(disp) ) {
2419 __ sw(as_Register(src), as_Register(base), disp);
2420 } else {
2421 __ move(T9, disp);
2422 if( UseLoongsonISA ) {
2423 __ gsswx(as_Register(src), as_Register(base), T9, 0);
2424 } else {
2425 __ addu(AT, as_Register(base), T9);
2426 __ sw(as_Register(src), AT, 0);
2427 }
2428 }
2429 }
2430 %}
2432 enc_class store_I_immI_enc (memory mem, immI src) %{
2433 MacroAssembler _masm(&cbuf);
2434 int base = $mem$$base;
2435 int index = $mem$$index;
2436 int scale = $mem$$scale;
2437 int disp = $mem$$disp;
2438 int value = $src$$constant;
2440 if( index != 0 ) {
2441 if (scale == 0) {
2442 __ daddu(AT, as_Register(base), as_Register(index));
2443 } else {
2444 __ dsll(AT, as_Register(index), scale);
2445 __ daddu(AT, as_Register(base), AT);
2446 }
2447 if( Assembler::is_simm16(disp) ) {
2448 if (value == 0) {
2449 __ sw(R0, AT, disp);
2450 } else {
2451 __ move(T9, value);
2452 __ sw(T9, AT, disp);
2453 }
2454 } else {
2455 if (value == 0) {
2456 __ move(T9, disp);
2457 __ addu(AT, AT, T9);
2458 __ sw(R0, AT, 0);
2459 } else {
2460 __ move(T9, disp);
2461 __ addu(AT, AT, T9);
2462 __ move(T9, value);
2463 __ sw(T9, AT, 0);
2464 }
2465 }
2466 } else {
2467 if( Assembler::is_simm16(disp) ) {
2468 if (value == 0) {
2469 __ sw(R0, as_Register(base), disp);
2470 } else {
2471 __ move(AT, value);
2472 __ sw(AT, as_Register(base), disp);
2473 }
2474 } else {
2475 if (value == 0) {
2476 __ move(T9, disp);
2477 __ addu(AT, as_Register(base), T9);
2478 __ sw(R0, AT, 0);
2479 } else {
2480 __ move(T9, disp);
2481 __ addu(AT, as_Register(base), T9);
2482 __ move(T9, value);
2483 __ sw(T9, AT, 0);
2484 }
2485 }
2486 }
2487 %}
2489 enc_class load_N_enc (mRegN dst, memory mem) %{
2490 MacroAssembler _masm(&cbuf);
2491 int dst = $dst$$reg;
2492 int base = $mem$$base;
2493 int index = $mem$$index;
2494 int scale = $mem$$scale;
2495 int disp = $mem$$disp;
2496 relocInfo::relocType disp_reloc = $mem->disp_reloc();
2497 assert(disp_reloc == relocInfo::none, "cannot have disp");
2499 if( index != 0 ) {
2500 if (scale == 0) {
2501 __ daddu(AT, as_Register(base), as_Register(index));
2502 } else {
2503 __ dsll(AT, as_Register(index), scale);
2504 __ daddu(AT, as_Register(base), AT);
2505 }
2506 if( Assembler::is_simm16(disp) ) {
2507 __ lwu(as_Register(dst), AT, disp);
2508 } else {
2509 __ li(T9, disp);
2510 __ daddu(AT, AT, T9);
2511 __ lwu(as_Register(dst), AT, 0);
2512 }
2513 } else {
2514 if( Assembler::is_simm16(disp) ) {
2515 __ lwu(as_Register(dst), as_Register(base), disp);
2516 } else {
2517 __ li(T9, disp);
2518 __ daddu(AT, as_Register(base), T9);
2519 __ lwu(as_Register(dst), AT, 0);
2520 }
2521 }
2523 %}
2526 enc_class load_P_enc (mRegP dst, memory mem) %{
2527 MacroAssembler _masm(&cbuf);
2528 int dst = $dst$$reg;
2529 int base = $mem$$base;
2530 int index = $mem$$index;
2531 int scale = $mem$$scale;
2532 int disp = $mem$$disp;
2533 relocInfo::relocType disp_reloc = $mem->disp_reloc();
2534 assert(disp_reloc == relocInfo::none, "cannot have disp");
2536 if( index != 0 ) {
2537 if (scale == 0) {
2538 __ daddu(AT, as_Register(base), as_Register(index));
2539 } else {
2540 __ dsll(AT, as_Register(index), scale);
2541 __ daddu(AT, as_Register(base), AT);
2542 }
2543 if( Assembler::is_simm16(disp) ) {
2544 __ ld(as_Register(dst), AT, disp);
2545 } else {
2546 __ li(T9, disp);
2547 __ daddu(AT, AT, T9);
2548 __ ld(as_Register(dst), AT, 0);
2549 }
2550 } else {
2551 if( Assembler::is_simm16(disp) ) {
2552 __ ld(as_Register(dst), as_Register(base), disp);
2553 } else {
2554 __ li(T9, disp);
2555 __ daddu(AT, as_Register(base), T9);
2556 __ ld(as_Register(dst), AT, 0);
2557 }
2558 }
2559 // if( disp_reloc != relocInfo::none) __ ld(as_Register(dst), as_Register(dst), 0);
2560 %}
2562 enc_class store_P_reg_enc (memory mem, mRegP src) %{
2563 MacroAssembler _masm(&cbuf);
2564 int src = $src$$reg;
2565 int base = $mem$$base;
2566 int index = $mem$$index;
2567 int scale = $mem$$scale;
2568 int disp = $mem$$disp;
2570 if( index != 0 ) {
2571 if (scale == 0) {
2572 __ daddu(AT, as_Register(base), as_Register(index));
2573 } else {
2574 __ dsll(AT, as_Register(index), scale);
2575 __ daddu(AT, as_Register(base), AT);
2576 }
2577 if( Assembler::is_simm16(disp) ) {
2578 __ sd(as_Register(src), AT, disp);
2579 } else {
2580 __ move(T9, disp);
2581 __ daddu(AT, AT, T9);
2582 __ sd(as_Register(src), AT, 0);
2583 }
2584 } else {
2585 if( Assembler::is_simm16(disp) ) {
2586 __ sd(as_Register(src), as_Register(base), disp);
2587 } else {
2588 __ move(T9, disp);
2589 __ daddu(AT, as_Register(base), T9);
2590 __ sd(as_Register(src), AT, 0);
2591 }
2592 }
2593 %}
2595 enc_class store_N_reg_enc (memory mem, mRegN src) %{
2596 MacroAssembler _masm(&cbuf);
2597 int src = $src$$reg;
2598 int base = $mem$$base;
2599 int index = $mem$$index;
2600 int scale = $mem$$scale;
2601 int disp = $mem$$disp;
2603 if( index != 0 ) {
2604 if (scale == 0) {
2605 __ daddu(AT, as_Register(base), as_Register(index));
2606 } else {
2607 __ dsll(AT, as_Register(index), scale);
2608 __ daddu(AT, as_Register(base), AT);
2609 }
2610 if( Assembler::is_simm16(disp) ) {
2611 __ sw(as_Register(src), AT, disp);
2612 } else {
2613 __ move(T9, disp);
2614 __ addu(AT, AT, T9);
2615 __ sw(as_Register(src), AT, 0);
2616 }
2617 } else {
2618 if( Assembler::is_simm16(disp) ) {
2619 __ sw(as_Register(src), as_Register(base), disp);
2620 } else {
2621 __ move(T9, disp);
2622 __ addu(AT, as_Register(base), T9);
2623 __ sw(as_Register(src), AT, 0);
2624 }
2625 }
2626 %}
2628 enc_class store_P_immP0_enc (memory mem) %{
2629 MacroAssembler _masm(&cbuf);
2630 int base = $mem$$base;
2631 int index = $mem$$index;
2632 int scale = $mem$$scale;
2633 int disp = $mem$$disp;
2635 if( index != 0 ) {
2636 if (scale == 0) {
2637 if( Assembler::is_simm16(disp) ) {
2638 if (UseLoongsonISA && Assembler::is_simm(disp, 8)) {
2639 __ gssdx(R0, as_Register(base), as_Register(index), disp);
2640 } else {
2641 __ daddu(AT, as_Register(base), as_Register(index));
2642 __ sd(R0, AT, disp);
2643 }
2644 } else {
2645 __ daddu(AT, as_Register(base), as_Register(index));
2646 __ move(T9, disp);
2647 if(UseLoongsonISA) {
2648 __ gssdx(R0, AT, T9, 0);
2649 } else {
2650 __ daddu(AT, AT, T9);
2651 __ sd(R0, AT, 0);
2652 }
2653 }
2654 } else {
2655 __ dsll(AT, as_Register(index), scale);
2656 if( Assembler::is_simm16(disp) ) {
2657 if (UseLoongsonISA && Assembler::is_simm(disp, 8)) {
2658 __ gssdx(R0, as_Register(base), AT, disp);
2659 } else {
2660 __ daddu(AT, as_Register(base), AT);
2661 __ sd(R0, AT, disp);
2662 }
2663 } else {
2664 __ daddu(AT, as_Register(base), AT);
2665 __ move(T9, disp);
2666 if (UseLoongsonISA) {
2667 __ gssdx(R0, AT, T9, 0);
2668 } else {
2669 __ daddu(AT, AT, T9);
2670 __ sd(R0, AT, 0);
2671 }
2672 }
2673 }
2674 } else {
2675 if( Assembler::is_simm16(disp) ) {
2676 __ sd(R0, as_Register(base), disp);
2677 } else {
2678 __ move(T9, disp);
2679 if (UseLoongsonISA) {
2680 __ gssdx(R0, as_Register(base), T9, 0);
2681 } else {
2682 __ daddu(AT, as_Register(base), T9);
2683 __ sd(R0, AT, 0);
2684 }
2685 }
2686 }
2687 %}
2690 enc_class storeImmN0_enc(memory mem, ImmN0 src) %{
2691 MacroAssembler _masm(&cbuf);
2692 int base = $mem$$base;
2693 int index = $mem$$index;
2694 int scale = $mem$$scale;
2695 int disp = $mem$$disp;
2697 if(index!=0){
2698 if (scale == 0) {
2699 __ daddu(AT, as_Register(base), as_Register(index));
2700 } else {
2701 __ dsll(AT, as_Register(index), scale);
2702 __ daddu(AT, as_Register(base), AT);
2703 }
2705 if( Assembler::is_simm16(disp) ) {
2706 __ sw(R0, AT, disp);
2707 } else {
2708 __ move(T9, disp);
2709 __ daddu(AT, AT, T9);
2710 __ sw(R0, AT, 0);
2711 }
2712 }
2713 else {
2714 if( Assembler::is_simm16(disp) ) {
2715 __ sw(R0, as_Register(base), disp);
2716 } else {
2717 __ move(T9, disp);
2718 __ daddu(AT, as_Register(base), T9);
2719 __ sw(R0, AT, 0);
2720 }
2721 }
2722 %}
2724 enc_class load_L_enc (mRegL dst, memory mem) %{
2725 MacroAssembler _masm(&cbuf);
2726 int base = $mem$$base;
2727 int index = $mem$$index;
2728 int scale = $mem$$scale;
2729 int disp = $mem$$disp;
2730 Register dst_reg = as_Register($dst$$reg);
2732 /*********************2013/03/27**************************
2733 * Jin: $base may contain a null object.
2734 * Server JIT force the exception_offset to be the pos of
2735 * the first instruction.
2736 * I insert such a 'null_check' at the beginning.
2737 *******************************************************/
2739 __ lw(AT, as_Register(base), 0);
2741 /*********************2012/10/04**************************
2742 * Error case found in SortTest
2743 * 337 b java.util.Arrays::sort1 (401 bytes)
2744 * B73:
2745 * d34 lw T4.lo, [T4 + #16] #@loadL-lo
2746 * lw T4.hi, [T4 + #16]+4 #@loadL-hi
2747 *
2748 * The original instructions generated here are :
2749 * __ lw(dst_lo, as_Register(base), disp);
2750 * __ lw(dst_hi, as_Register(base), disp + 4);
2751 *******************************************************/
2753 if( index != 0 ) {
2754 if (scale == 0) {
2755 __ daddu(AT, as_Register(base), as_Register(index));
2756 } else {
2757 __ dsll(AT, as_Register(index), scale);
2758 __ daddu(AT, as_Register(base), AT);
2759 }
2760 if( Assembler::is_simm16(disp) ) {
2761 __ ld(dst_reg, AT, disp);
2762 } else {
2763 __ move(T9, disp);
2764 __ daddu(AT, AT, T9);
2765 __ ld(dst_reg, AT, 0);
2766 }
2767 } else {
2768 if( Assembler::is_simm16(disp) ) {
2769 __ move(AT, as_Register(base));
2770 __ ld(dst_reg, AT, disp);
2771 } else {
2772 __ move(T9, disp);
2773 __ daddu(AT, as_Register(base), T9);
2774 __ ld(dst_reg, AT, 0);
2775 }
2776 }
2777 %}
2779 enc_class store_L_reg_enc (memory mem, mRegL src) %{
2780 MacroAssembler _masm(&cbuf);
2781 int base = $mem$$base;
2782 int index = $mem$$index;
2783 int scale = $mem$$scale;
2784 int disp = $mem$$disp;
2785 Register src_reg = as_Register($src$$reg);
2787 if( index != 0 ) {
2788 if (scale == 0) {
2789 __ daddu(AT, as_Register(base), as_Register(index));
2790 } else {
2791 __ dsll(AT, as_Register(index), scale);
2792 __ daddu(AT, as_Register(base), AT);
2793 }
2794 if( Assembler::is_simm16(disp) ) {
2795 __ sd(src_reg, AT, disp);
2796 } else {
2797 __ move(T9, disp);
2798 __ daddu(AT, AT, T9);
2799 __ sd(src_reg, AT, 0);
2800 }
2801 } else {
2802 if( Assembler::is_simm16(disp) ) {
2803 __ move(AT, as_Register(base));
2804 __ sd(src_reg, AT, disp);
2805 } else {
2806 __ move(T9, disp);
2807 __ daddu(AT, as_Register(base), T9);
2808 __ sd(src_reg, AT, 0);
2809 }
2810 }
2811 %}
2813 enc_class store_L_immL0_enc (memory mem, immL0 src) %{
2814 MacroAssembler _masm(&cbuf);
2815 int base = $mem$$base;
2816 int index = $mem$$index;
2817 int scale = $mem$$scale;
2818 int disp = $mem$$disp;
2820 if( index != 0 ) {
2821 if (scale == 0) {
2822 __ daddu(AT, as_Register(base), as_Register(index));
2823 } else {
2824 __ dsll(AT, as_Register(index), scale);
2825 __ daddu(AT, as_Register(base), AT);
2826 }
2827 if( Assembler::is_simm16(disp) ) {
2828 __ sd(R0, AT, disp);
2829 } else {
2830 __ move(T9, disp);
2831 __ addu(AT, AT, T9);
2832 __ sd(R0, AT, 0);
2833 }
2834 } else {
2835 if( Assembler::is_simm16(disp) ) {
2836 __ move(AT, as_Register(base));
2837 __ sd(R0, AT, disp);
2838 } else {
2839 __ move(T9, disp);
2840 __ addu(AT, as_Register(base), T9);
2841 __ sd(R0, AT, 0);
2842 }
2843 }
2844 %}
2846 enc_class load_F_enc (regF dst, memory mem) %{
2847 MacroAssembler _masm(&cbuf);
2848 int base = $mem$$base;
2849 int index = $mem$$index;
2850 int scale = $mem$$scale;
2851 int disp = $mem$$disp;
2852 FloatRegister dst = $dst$$FloatRegister;
2854 if( index != 0 ) {
2855 if( Assembler::is_simm16(disp) ) {
2856 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2857 if (scale == 0) {
2858 __ gslwxc1(dst, as_Register(base), as_Register(index), disp);
2859 } else {
2860 __ dsll(AT, as_Register(index), scale);
2861 __ gslwxc1(dst, as_Register(base), AT, disp);
2862 }
2863 } else {
2864 if (scale == 0) {
2865 __ daddu(AT, as_Register(base), as_Register(index));
2866 } else {
2867 __ dsll(AT, as_Register(index), scale);
2868 __ daddu(AT, as_Register(base), AT);
2869 }
2870 __ lwc1(dst, AT, disp);
2871 }
2872 } else {
2873 if (scale == 0) {
2874 __ daddu(AT, as_Register(base), as_Register(index));
2875 } else {
2876 __ dsll(AT, as_Register(index), scale);
2877 __ daddu(AT, as_Register(base), AT);
2878 }
2879 __ move(T9, disp);
2880 if( UseLoongsonISA ) {
2881 __ gslwxc1(dst, AT, T9, 0);
2882 } else {
2883 __ daddu(AT, AT, T9);
2884 __ lwc1(dst, AT, 0);
2885 }
2886 }
2887 } else {
2888 if( Assembler::is_simm16(disp) ) {
2889 __ lwc1(dst, as_Register(base), disp);
2890 } else {
2891 __ move(T9, disp);
2892 if( UseLoongsonISA ) {
2893 __ gslwxc1(dst, as_Register(base), T9, 0);
2894 } else {
2895 __ daddu(AT, as_Register(base), T9);
2896 __ lwc1(dst, AT, 0);
2897 }
2898 }
2899 }
2900 %}
2902 enc_class store_F_reg_enc (memory mem, regF src) %{
2903 MacroAssembler _masm(&cbuf);
2904 int base = $mem$$base;
2905 int index = $mem$$index;
2906 int scale = $mem$$scale;
2907 int disp = $mem$$disp;
2908 FloatRegister src = $src$$FloatRegister;
2910 if( index != 0 ) {
2911 if( Assembler::is_simm16(disp) ) {
2912 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2913 if (scale == 0) {
2914 __ gsswxc1(src, as_Register(base), as_Register(index), disp);
2915 } else {
2916 __ dsll(AT, as_Register(index), scale);
2917 __ gsswxc1(src, as_Register(base), AT, disp);
2918 }
2919 } else {
2920 if (scale == 0) {
2921 __ daddu(AT, as_Register(base), as_Register(index));
2922 } else {
2923 __ dsll(AT, as_Register(index), scale);
2924 __ daddu(AT, as_Register(base), AT);
2925 }
2926 __ swc1(src, AT, disp);
2927 }
2928 } else {
2929 if (scale == 0) {
2930 __ daddu(AT, as_Register(base), as_Register(index));
2931 } else {
2932 __ dsll(AT, as_Register(index), scale);
2933 __ daddu(AT, as_Register(base), AT);
2934 }
2935 __ move(T9, disp);
2936 if( UseLoongsonISA ) {
2937 __ gsswxc1(src, AT, T9, 0);
2938 } else {
2939 __ daddu(AT, AT, T9);
2940 __ swc1(src, AT, 0);
2941 }
2942 }
2943 } else {
2944 if( Assembler::is_simm16(disp) ) {
2945 __ swc1(src, as_Register(base), disp);
2946 } else {
2947 __ move(T9, disp);
2948 if( UseLoongsonISA ) {
2949 __ gslwxc1(src, as_Register(base), T9, 0);
2950 } else {
2951 __ daddu(AT, as_Register(base), T9);
2952 __ swc1(src, AT, 0);
2953 }
2954 }
2955 }
2956 %}
2958 enc_class load_D_enc (regD dst, memory mem) %{
2959 MacroAssembler _masm(&cbuf);
2960 int base = $mem$$base;
2961 int index = $mem$$index;
2962 int scale = $mem$$scale;
2963 int disp = $mem$$disp;
2964 FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2966 if( index != 0 ) {
2967 if( Assembler::is_simm16(disp) ) {
2968 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2969 if (scale == 0) {
2970 __ gsldxc1(dst_reg, as_Register(base), as_Register(index), disp);
2971 } else {
2972 __ dsll(AT, as_Register(index), scale);
2973 __ gsldxc1(dst_reg, as_Register(base), AT, disp);
2974 }
2975 } else {
2976 if (scale == 0) {
2977 __ daddu(AT, as_Register(base), as_Register(index));
2978 } else {
2979 __ dsll(AT, as_Register(index), scale);
2980 __ daddu(AT, as_Register(base), AT);
2981 }
2982 __ ldc1(dst_reg, AT, disp);
2983 }
2984 } else {
2985 if (scale == 0) {
2986 __ daddu(AT, as_Register(base), as_Register(index));
2987 } else {
2988 __ dsll(AT, as_Register(index), scale);
2989 __ daddu(AT, as_Register(base), AT);
2990 }
2991 __ move(T9, disp);
2992 if( UseLoongsonISA ) {
2993 __ gsldxc1(dst_reg, AT, T9, 0);
2994 } else {
2995 __ addu(AT, AT, T9);
2996 __ ldc1(dst_reg, AT, 0);
2997 }
2998 }
2999 } else {
3000 if( Assembler::is_simm16(disp) ) {
3001 __ ldc1(dst_reg, as_Register(base), disp);
3002 } else {
3003 __ move(T9, disp);
3004 if( UseLoongsonISA ) {
3005 __ gsldxc1(dst_reg, as_Register(base), T9, 0);
3006 } else {
3007 __ addu(AT, as_Register(base), T9);
3008 __ ldc1(dst_reg, AT, 0);
3009 }
3010 }
3011 }
3012 %}
3014 enc_class store_D_reg_enc (memory mem, regD src) %{
3015 MacroAssembler _masm(&cbuf);
3016 int base = $mem$$base;
3017 int index = $mem$$index;
3018 int scale = $mem$$scale;
3019 int disp = $mem$$disp;
3020 FloatRegister src_reg = as_FloatRegister($src$$reg);
3022 if( index != 0 ) {
3023 if( Assembler::is_simm16(disp) ) {
3024 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3025 if (scale == 0) {
3026 __ gssdxc1(src_reg, as_Register(base), as_Register(index), disp);
3027 } else {
3028 __ dsll(AT, as_Register(index), scale);
3029 __ gssdxc1(src_reg, as_Register(base), AT, disp);
3030 }
3031 } else {
3032 if (scale == 0) {
3033 __ daddu(AT, as_Register(base), as_Register(index));
3034 } else {
3035 __ dsll(AT, as_Register(index), scale);
3036 __ daddu(AT, as_Register(base), AT);
3037 }
3038 __ sdc1(src_reg, AT, disp);
3039 }
3040 } else {
3041 if (scale == 0) {
3042 __ daddu(AT, as_Register(base), as_Register(index));
3043 } else {
3044 __ dsll(AT, as_Register(index), scale);
3045 __ daddu(AT, as_Register(base), AT);
3046 }
3047 __ move(T9, disp);
3048 if( UseLoongsonISA ) {
3049 __ gssdxc1(src_reg, AT, T9, 0);
3050 } else {
3051 __ addu(AT, AT, T9);
3052 __ sdc1(src_reg, AT, 0);
3053 }
3054 }
3055 } else {
3056 if( Assembler::is_simm16(disp) ) {
3057 __ sdc1(src_reg, as_Register(base), disp);
3058 } else {
3059 __ move(T9, disp);
3060 if( UseLoongsonISA ) {
3061 __ gssdxc1(src_reg, as_Register(base), T9, 0);
3062 } else {
3063 __ addu(AT, as_Register(base), T9);
3064 __ sdc1(src_reg, AT, 0);
3065 }
3066 }
3067 }
3068 %}
3070 enc_class Java_To_Runtime (method meth) %{ // CALL Java_To_Runtime, Java_To_Runtime_Leaf
3071 MacroAssembler _masm(&cbuf);
3072 // This is the instruction starting address for relocation info.
3073 __ block_comment("Java_To_Runtime");
3074 cbuf.set_insts_mark();
3075 __ relocate(relocInfo::runtime_call_type);
3077 __ li48(T9, (long)$meth$$method);
3078 __ jalr(T9);
3079 __ nop();
3080 %}
3082 enc_class Java_Static_Call (method meth) %{ // JAVA STATIC CALL
3083 // CALL to fixup routine. Fixup routine uses ScopeDesc info to determine
3084 // who we intended to call.
3085 MacroAssembler _masm(&cbuf);
3086 cbuf.set_insts_mark();
3088 if ( !_method ) {
3089 __ relocate(relocInfo::runtime_call_type);
3090 } else if(_optimized_virtual) {
3091 __ relocate(relocInfo::opt_virtual_call_type);
3092 } else {
3093 __ relocate(relocInfo::static_call_type);
3094 }
3096 __ li(T9, $meth$$method);
3097 __ jalr(T9);
3098 __ nop();
3099 if( _method ) { // Emit stub for static call
3100 emit_java_to_interp(cbuf);
3101 }
3102 %}
3105 /*
3106 * [Ref: LIR_Assembler::ic_call() ]
3107 */
3108 enc_class Java_Dynamic_Call (method meth) %{ // JAVA DYNAMIC CALL
3109 MacroAssembler _masm(&cbuf);
3110 __ block_comment("Java_Dynamic_Call");
3111 __ ic_call((address)$meth$$method);
3112 %}
3115 enc_class Set_Flags_After_Fast_Lock_Unlock(FlagsReg cr) %{
3116 Register flags = $cr$$Register;
3117 Label L;
3119 MacroAssembler _masm(&cbuf);
3121 __ addu(flags, R0, R0);
3122 __ beq(AT, R0, L);
3123 __ delayed()->nop();
3124 __ move(flags, 0xFFFFFFFF);
3125 __ bind(L);
3126 %}
3128 enc_class enc_PartialSubtypeCheck(mRegP result, mRegP sub, mRegP super, mRegI tmp) %{
3129 Register result = $result$$Register;
3130 Register sub = $sub$$Register;
3131 Register super = $super$$Register;
3132 Register length = $tmp$$Register;
3133 Register tmp = T9;
3134 Label miss;
3136 /* 2012/9/28 Jin: result may be the same as sub
3137 * 47c B40: # B21 B41 <- B20 Freq: 0.155379
3138 * 47c partialSubtypeCheck result=S1, sub=S1, super=S3, length=S0
3139 * 4bc mov S2, NULL #@loadConP
3140 * 4c0 beq S1, S2, B21 #@branchConP P=0.999999 C=-1.000000
3141 */
3142 MacroAssembler _masm(&cbuf);
3143 Label done;
3144 __ check_klass_subtype_slow_path(sub, super, length, tmp,
3145 NULL, &miss,
3146 /*set_cond_codes:*/ true);
3147 /* 2013/7/22 Jin: Refer to X86_64's RDI */
3148 __ move(result, 0);
3149 __ b(done);
3150 __ nop();
3152 __ bind(miss);
3153 __ move(result, 1);
3154 __ bind(done);
3155 %}
3157 %}
3160 //---------MIPS FRAME--------------------------------------------------------------
3161 // Definition of frame structure and management information.
3162 //
3163 // S T A C K L A Y O U T Allocators stack-slot number
3164 // | (to get allocators register number
3165 // G Owned by | | v add SharedInfo::stack0)
3166 // r CALLER | |
3167 // o | +--------+ pad to even-align allocators stack-slot
3168 // w V | pad0 | numbers; owned by CALLER
3169 // t -----------+--------+----> Matcher::_in_arg_limit, unaligned
3170 // h ^ | in | 5
3171 // | | args | 4 Holes in incoming args owned by SELF
3172 // | | old | | 3
3173 // | | SP-+--------+----> Matcher::_old_SP, even aligned
3174 // v | | ret | 3 return address
3175 // Owned by +--------+
3176 // Self | pad2 | 2 pad to align old SP
3177 // | +--------+ 1
3178 // | | locks | 0
3179 // | +--------+----> SharedInfo::stack0, even aligned
3180 // | | pad1 | 11 pad to align new SP
3181 // | +--------+
3182 // | | | 10
3183 // | | spills | 9 spills
3184 // V | | 8 (pad0 slot for callee)
3185 // -----------+--------+----> Matcher::_out_arg_limit, unaligned
3186 // ^ | out | 7
3187 // | | args | 6 Holes in outgoing args owned by CALLEE
3188 // Owned by new | |
3189 // Callee SP-+--------+----> Matcher::_new_SP, even aligned
3190 // | |
3191 //
3192 // Note 1: Only region 8-11 is determined by the allocator. Region 0-5 is
3193 // known from SELF's arguments and the Java calling convention.
3194 // Region 6-7 is determined per call site.
3195 // Note 2: If the calling convention leaves holes in the incoming argument
3196 // area, those holes are owned by SELF. Holes in the outgoing area
3197 // are owned by the CALLEE. Holes should not be nessecary in the
3198 // incoming area, as the Java calling convention is completely under
3199 // the control of the AD file. Doubles can be sorted and packed to
3200 // avoid holes. Holes in the outgoing arguments may be nessecary for
3201 // varargs C calling conventions.
3202 // Note 3: Region 0-3 is even aligned, with pad2 as needed. Region 3-5 is
3203 // even aligned with pad0 as needed.
3204 // Region 6 is even aligned. Region 6-7 is NOT even aligned;
3205 // region 6-11 is even aligned; it may be padded out more so that
3206 // the region from SP to FP meets the minimum stack alignment.
3207 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
3208 // alignment. Region 11, pad1, may be dynamically extended so that
3209 // SP meets the minimum alignment.
3212 frame %{
3214 stack_direction(TOWARDS_LOW);
3216 // These two registers define part of the calling convention
3217 // between compiled code and the interpreter.
3218 // SEE StartI2CNode::calling_convention & StartC2INode::calling_convention & StartOSRNode::calling_convention
3219 // for more information. by yjl 3/16/2006
3221 inline_cache_reg(T1); // Inline Cache Register
3222 interpreter_method_oop_reg(S3); // Method Oop Register when calling interpreter
3223 /*
3224 inline_cache_reg(T1); // Inline Cache Register or methodOop for I2C
3225 interpreter_arg_ptr_reg(A0); // Argument pointer for I2C adapters
3226 */
3228 // Optional: name the operand used by cisc-spilling to access [stack_pointer + offset]
3229 cisc_spilling_operand_name(indOffset32);
3231 // Number of stack slots consumed by locking an object
3232 // generate Compile::sync_stack_slots
3233 #ifdef _LP64
3234 sync_stack_slots(2);
3235 #else
3236 sync_stack_slots(1);
3237 #endif
3239 frame_pointer(SP);
3241 // Interpreter stores its frame pointer in a register which is
3242 // stored to the stack by I2CAdaptors.
3243 // I2CAdaptors convert from interpreted java to compiled java.
3245 interpreter_frame_pointer(FP);
3247 // generate Matcher::stack_alignment
3248 stack_alignment(StackAlignmentInBytes); //wordSize = sizeof(char*);
3250 // Number of stack slots between incoming argument block and the start of
3251 // a new frame. The PROLOG must add this many slots to the stack. The
3252 // EPILOG must remove this many slots. Intel needs one slot for
3253 // return address.
3254 // generate Matcher::in_preserve_stack_slots
3255 //in_preserve_stack_slots(VerifyStackAtCalls + 2); //Now VerifyStackAtCalls is defined as false ! Leave one stack slot for ra and fp
3256 in_preserve_stack_slots(4); //Now VerifyStackAtCalls is defined as false ! Leave two stack slots for ra and fp
3258 // Number of outgoing stack slots killed above the out_preserve_stack_slots
3259 // for calls to C. Supports the var-args backing area for register parms.
3260 varargs_C_out_slots_killed(0);
3262 // The after-PROLOG location of the return address. Location of
3263 // return address specifies a type (REG or STACK) and a number
3264 // representing the register number (i.e. - use a register name) or
3265 // stack slot.
3266 // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
3267 // Otherwise, it is above the locks and verification slot and alignment word
3268 //return_addr(STACK -1+ round_to(1+VerifyStackAtCalls+Compile::current()->sync()*Compile::current()->sync_stack_slots(),WordsPerLong));
3269 return_addr(REG RA);
3271 // Body of function which returns an integer array locating
3272 // arguments either in registers or in stack slots. Passed an array
3273 // of ideal registers called "sig" and a "length" count. Stack-slot
3274 // offsets are based on outgoing arguments, i.e. a CALLER setting up
3275 // arguments for a CALLEE. Incoming stack arguments are
3276 // automatically biased by the preserve_stack_slots field above.
3279 // will generated to Matcher::calling_convention(OptoRegPair *sig, uint length, bool is_outgoing)
3280 // StartNode::calling_convention call this. by yjl 3/16/2006
3281 calling_convention %{
3282 SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
3283 %}
3288 // Body of function which returns an integer array locating
3289 // arguments either in registers or in stack slots. Passed an array
3290 // of ideal registers called "sig" and a "length" count. Stack-slot
3291 // offsets are based on outgoing arguments, i.e. a CALLER setting up
3292 // arguments for a CALLEE. Incoming stack arguments are
3293 // automatically biased by the preserve_stack_slots field above.
3296 // SEE CallRuntimeNode::calling_convention for more information. by yjl 3/16/2006
3297 c_calling_convention %{
3298 (void) SharedRuntime::c_calling_convention(sig_bt, regs, /*regs2=*/NULL, length);
3299 %}
3302 // Location of C & interpreter return values
3303 // register(s) contain(s) return value for Op_StartI2C and Op_StartOSR.
3304 // SEE Matcher::match. by yjl 3/16/2006
3305 c_return_value %{
3306 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3307 /* -- , -- , Op_RegN, Op_RegI, Op_RegP, Op_RegF, Op_RegD, Op_RegL */
3308 static int lo[Op_RegL+1] = { 0, 0, V0_num, V0_num, V0_num, F0_num, F0_num, V0_num };
3309 static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, V0_H_num, OptoReg::Bad, F0_H_num, V0_H_num };
3310 return OptoRegPair(hi[ideal_reg],lo[ideal_reg]);
3311 %}
3313 // Location of return values
3314 // register(s) contain(s) return value for Op_StartC2I and Op_Start.
3315 // SEE Matcher::match. by yjl 3/16/2006
3317 return_value %{
3318 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3319 /* -- , -- , Op_RegN, Op_RegI, Op_RegP, Op_RegF, Op_RegD, Op_RegL */
3320 static int lo[Op_RegL+1] = { 0, 0, V0_num, V0_num, V0_num, F0_num, F0_num, V0_num };
3321 static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, V0_H_num, OptoReg::Bad, F0_H_num, V0_H_num};
3322 return OptoRegPair(hi[ideal_reg],lo[ideal_reg]);
3323 %}
3325 %}
3327 //----------ATTRIBUTES---------------------------------------------------------
3328 //----------Operand Attributes-------------------------------------------------
3329 op_attrib op_cost(0); // Required cost attribute
3331 //----------Instruction Attributes---------------------------------------------
3332 ins_attrib ins_cost(100); // Required cost attribute
3333 ins_attrib ins_size(32); // Required size attribute (in bits)
3334 ins_attrib ins_pc_relative(0); // Required PC Relative flag
3335 ins_attrib ins_short_branch(0); // Required flag: is this instruction a
3336 // non-matching short branch variant of some
3337 // long branch?
3338 ins_attrib ins_alignment(4); // Required alignment attribute (must be a power of 2)
3339 // specifies the alignment that some part of the instruction (not
3340 // necessarily the start) requires. If > 1, a compute_padding()
3341 // function must be provided for the instruction
3343 //----------OPERANDS-----------------------------------------------------------
3344 // Operand definitions must precede instruction definitions for correct parsing
3345 // in the ADLC because operands constitute user defined types which are used in
3346 // instruction definitions.
3348 // Vectors
3349 operand vecD() %{
3350 constraint(ALLOC_IN_RC(dbl_reg));
3351 match(VecD);
3353 format %{ %}
3354 interface(REG_INTER);
3355 %}
3357 // Flags register, used as output of compare instructions
3358 operand FlagsReg() %{
3359 constraint(ALLOC_IN_RC(mips_flags));
3360 match(RegFlags);
3362 format %{ "EFLAGS" %}
3363 interface(REG_INTER);
3364 %}
3366 //----------Simple Operands----------------------------------------------------
3367 //TODO: Should we need to define some more special immediate number ?
3368 // Immediate Operands
3369 // Integer Immediate
3370 operand immI() %{
3371 match(ConI);
3372 //TODO: should not match immI8 here LEE
3373 match(immI8);
3375 op_cost(20);
3376 format %{ %}
3377 interface(CONST_INTER);
3378 %}
3380 // Long Immediate 8-bit
3381 operand immL8()
3382 %{
3383 predicate(-0x80L <= n->get_long() && n->get_long() < 0x80L);
3384 match(ConL);
3386 op_cost(5);
3387 format %{ %}
3388 interface(CONST_INTER);
3389 %}
3391 // Constant for test vs zero
3392 operand immI0() %{
3393 predicate(n->get_int() == 0);
3394 match(ConI);
3396 op_cost(0);
3397 format %{ %}
3398 interface(CONST_INTER);
3399 %}
3401 // Constant for increment
3402 operand immI1() %{
3403 predicate(n->get_int() == 1);
3404 match(ConI);
3406 op_cost(0);
3407 format %{ %}
3408 interface(CONST_INTER);
3409 %}
3411 // Constant for decrement
3412 operand immI_M1() %{
3413 predicate(n->get_int() == -1);
3414 match(ConI);
3416 op_cost(0);
3417 format %{ %}
3418 interface(CONST_INTER);
3419 %}
3421 operand immI_MaxI() %{
3422 predicate(n->get_int() == 2147483647);
3423 match(ConI);
3425 op_cost(0);
3426 format %{ %}
3427 interface(CONST_INTER);
3428 %}
3430 // Valid scale values for addressing modes
3431 operand immI2() %{
3432 predicate(0 <= n->get_int() && (n->get_int() <= 3));
3433 match(ConI);
3435 format %{ %}
3436 interface(CONST_INTER);
3437 %}
3439 operand immI8() %{
3440 predicate((-128 <= n->get_int()) && (n->get_int() <= 127));
3441 match(ConI);
3443 op_cost(5);
3444 format %{ %}
3445 interface(CONST_INTER);
3446 %}
3448 operand immI16() %{
3449 predicate((-32768 <= n->get_int()) && (n->get_int() <= 32767));
3450 match(ConI);
3452 op_cost(10);
3453 format %{ %}
3454 interface(CONST_INTER);
3455 %}
3457 // Constant for long shifts
3458 operand immI_32() %{
3459 predicate( n->get_int() == 32 );
3460 match(ConI);
3462 op_cost(0);
3463 format %{ %}
3464 interface(CONST_INTER);
3465 %}
3467 operand immI_63() %{
3468 predicate( n->get_int() == 63 );
3469 match(ConI);
3471 op_cost(0);
3472 format %{ %}
3473 interface(CONST_INTER);
3474 %}
3476 operand immI_0_31() %{
3477 predicate( n->get_int() >= 0 && n->get_int() <= 31 );
3478 match(ConI);
3480 op_cost(0);
3481 format %{ %}
3482 interface(CONST_INTER);
3483 %}
3485 // Operand for non-negtive integer mask
3486 operand immI_nonneg_mask() %{
3487 predicate( (n->get_int() >= 0) && (Assembler::is_int_mask(n->get_int()) != -1) );
3488 match(ConI);
3490 op_cost(0);
3491 format %{ %}
3492 interface(CONST_INTER);
3493 %}
3495 operand immI_32_63() %{
3496 predicate( n->get_int() >= 32 && n->get_int() <= 63 );
3497 match(ConI);
3498 op_cost(0);
3500 format %{ %}
3501 interface(CONST_INTER);
3502 %}
3504 operand immI16_sub() %{
3505 predicate((-32767 <= n->get_int()) && (n->get_int() <= 32768));
3506 match(ConI);
3508 op_cost(10);
3509 format %{ %}
3510 interface(CONST_INTER);
3511 %}
3513 operand immI_0_32767() %{
3514 predicate( n->get_int() >= 0 && n->get_int() <= 32767 );
3515 match(ConI);
3516 op_cost(0);
3518 format %{ %}
3519 interface(CONST_INTER);
3520 %}
3522 operand immI_0_65535() %{
3523 predicate( n->get_int() >= 0 && n->get_int() <= 65535 );
3524 match(ConI);
3525 op_cost(0);
3527 format %{ %}
3528 interface(CONST_INTER);
3529 %}
3531 operand immI_1() %{
3532 predicate( n->get_int() == 1 );
3533 match(ConI);
3535 op_cost(0);
3536 format %{ %}
3537 interface(CONST_INTER);
3538 %}
3540 operand immI_2() %{
3541 predicate( n->get_int() == 2 );
3542 match(ConI);
3544 op_cost(0);
3545 format %{ %}
3546 interface(CONST_INTER);
3547 %}
3549 operand immI_3() %{
3550 predicate( n->get_int() == 3 );
3551 match(ConI);
3553 op_cost(0);
3554 format %{ %}
3555 interface(CONST_INTER);
3556 %}
3558 operand immI_7() %{
3559 predicate( n->get_int() == 7 );
3560 match(ConI);
3562 format %{ %}
3563 interface(CONST_INTER);
3564 %}
3566 // Immediates for special shifts (sign extend)
3568 // Constants for increment
3569 operand immI_16() %{
3570 predicate( n->get_int() == 16 );
3571 match(ConI);
3573 format %{ %}
3574 interface(CONST_INTER);
3575 %}
3577 operand immI_24() %{
3578 predicate( n->get_int() == 24 );
3579 match(ConI);
3581 format %{ %}
3582 interface(CONST_INTER);
3583 %}
3585 // Constant for byte-wide masking
3586 operand immI_255() %{
3587 predicate( n->get_int() == 255 );
3588 match(ConI);
3590 op_cost(0);
3591 format %{ %}
3592 interface(CONST_INTER);
3593 %}
3595 operand immI_65535() %{
3596 predicate( n->get_int() == 65535 );
3597 match(ConI);
3599 op_cost(5);
3600 format %{ %}
3601 interface(CONST_INTER);
3602 %}
3604 operand immI_65536() %{
3605 predicate( n->get_int() == 65536 );
3606 match(ConI);
3608 op_cost(5);
3609 format %{ %}
3610 interface(CONST_INTER);
3611 %}
3613 operand immI_M65536() %{
3614 predicate( n->get_int() == -65536 );
3615 match(ConI);
3617 op_cost(5);
3618 format %{ %}
3619 interface(CONST_INTER);
3620 %}
3622 // Pointer Immediate
3623 operand immP() %{
3624 match(ConP);
3626 op_cost(10);
3627 format %{ %}
3628 interface(CONST_INTER);
3629 %}
3631 // NULL Pointer Immediate
3632 operand immP0() %{
3633 predicate( n->get_ptr() == 0 );
3634 match(ConP);
3635 op_cost(0);
3637 format %{ %}
3638 interface(CONST_INTER);
3639 %}
3641 // Pointer Immediate: 64-bit
3642 operand immP_set() %{
3643 match(ConP);
3645 op_cost(5);
3646 // formats are generated automatically for constants and base registers
3647 format %{ %}
3648 interface(CONST_INTER);
3649 %}
3651 // Pointer Immediate: 64-bit
3652 operand immP_load() %{
3653 predicate(n->bottom_type()->isa_oop_ptr() || (MacroAssembler::insts_for_set64(n->get_ptr()) > 3));
3654 match(ConP);
3656 op_cost(5);
3657 // formats are generated automatically for constants and base registers
3658 format %{ %}
3659 interface(CONST_INTER);
3660 %}
3662 // Pointer Immediate: 64-bit
3663 operand immP_no_oop_cheap() %{
3664 predicate(!n->bottom_type()->isa_oop_ptr() && (MacroAssembler::insts_for_set64(n->get_ptr()) <= 3));
3665 match(ConP);
3667 op_cost(5);
3668 // formats are generated automatically for constants and base registers
3669 format %{ %}
3670 interface(CONST_INTER);
3671 %}
3673 // Pointer for polling page
3674 operand immP_poll() %{
3675 predicate(n->get_ptr() != 0 && n->get_ptr() == (intptr_t)os::get_polling_page());
3676 match(ConP);
3677 op_cost(5);
3679 format %{ %}
3680 interface(CONST_INTER);
3681 %}
3683 // Pointer Immediate
3684 operand immN() %{
3685 match(ConN);
3687 op_cost(10);
3688 format %{ %}
3689 interface(CONST_INTER);
3690 %}
3692 operand immNKlass() %{
3693 match(ConNKlass);
3695 op_cost(10);
3696 format %{ %}
3697 interface(CONST_INTER);
3698 %}
3700 // NULL Pointer Immediate
3701 operand immN0() %{
3702 predicate(n->get_narrowcon() == 0);
3703 match(ConN);
3705 op_cost(5);
3706 format %{ %}
3707 interface(CONST_INTER);
3708 %}
3710 // Long Immediate
3711 operand immL() %{
3712 match(ConL);
3714 op_cost(20);
3715 format %{ %}
3716 interface(CONST_INTER);
3717 %}
3719 // Long Immediate zero
3720 operand immL0() %{
3721 predicate( n->get_long() == 0L );
3722 match(ConL);
3723 op_cost(0);
3725 format %{ %}
3726 interface(CONST_INTER);
3727 %}
3729 operand immL7() %{
3730 predicate( n->get_long() == 7L );
3731 match(ConL);
3732 op_cost(0);
3734 format %{ %}
3735 interface(CONST_INTER);
3736 %}
3738 operand immL_M1() %{
3739 predicate( n->get_long() == -1L );
3740 match(ConL);
3741 op_cost(0);
3743 format %{ %}
3744 interface(CONST_INTER);
3745 %}
3747 // bit 0..2 zero
3748 operand immL_M8() %{
3749 predicate( n->get_long() == -8L );
3750 match(ConL);
3751 op_cost(0);
3753 format %{ %}
3754 interface(CONST_INTER);
3755 %}
3757 // bit 2 zero
3758 operand immL_M5() %{
3759 predicate( n->get_long() == -5L );
3760 match(ConL);
3761 op_cost(0);
3763 format %{ %}
3764 interface(CONST_INTER);
3765 %}
3767 // bit 1..2 zero
3768 operand immL_M7() %{
3769 predicate( n->get_long() == -7L );
3770 match(ConL);
3771 op_cost(0);
3773 format %{ %}
3774 interface(CONST_INTER);
3775 %}
3777 // bit 0..1 zero
3778 operand immL_M4() %{
3779 predicate( n->get_long() == -4L );
3780 match(ConL);
3781 op_cost(0);
3783 format %{ %}
3784 interface(CONST_INTER);
3785 %}
3787 // bit 3..6 zero
3788 operand immL_M121() %{
3789 predicate( n->get_long() == -121L );
3790 match(ConL);
3791 op_cost(0);
3793 format %{ %}
3794 interface(CONST_INTER);
3795 %}
3797 // Long immediate from 0 to 127.
3798 // Used for a shorter form of long mul by 10.
3799 operand immL_127() %{
3800 predicate((0 <= n->get_long()) && (n->get_long() <= 127));
3801 match(ConL);
3802 op_cost(0);
3804 format %{ %}
3805 interface(CONST_INTER);
3806 %}
3808 // Operand for non-negtive long mask
3809 operand immL_nonneg_mask() %{
3810 predicate( (n->get_long() >= 0) && (Assembler::is_jlong_mask(n->get_long()) != -1) );
3811 match(ConL);
3813 op_cost(0);
3814 format %{ %}
3815 interface(CONST_INTER);
3816 %}
3818 operand immL_0_65535() %{
3819 predicate( n->get_long() >= 0 && n->get_long() <= 65535 );
3820 match(ConL);
3821 op_cost(0);
3823 format %{ %}
3824 interface(CONST_INTER);
3825 %}
3827 // Long Immediate: cheap (materialize in <= 3 instructions)
3828 operand immL_cheap() %{
3829 predicate(MacroAssembler::insts_for_set64(n->get_long()) <= 3);
3830 match(ConL);
3831 op_cost(0);
3833 format %{ %}
3834 interface(CONST_INTER);
3835 %}
3837 // Long Immediate: expensive (materialize in > 3 instructions)
3838 operand immL_expensive() %{
3839 predicate(MacroAssembler::insts_for_set64(n->get_long()) > 3);
3840 match(ConL);
3841 op_cost(0);
3843 format %{ %}
3844 interface(CONST_INTER);
3845 %}
3847 operand immL16() %{
3848 predicate((-32768 <= n->get_long()) && (n->get_long() <= 32767));
3849 match(ConL);
3851 op_cost(10);
3852 format %{ %}
3853 interface(CONST_INTER);
3854 %}
3856 operand immL16_sub() %{
3857 predicate((-32767 <= n->get_long()) && (n->get_long() <= 32768));
3858 match(ConL);
3860 op_cost(10);
3861 format %{ %}
3862 interface(CONST_INTER);
3863 %}
3865 // Long Immediate: low 32-bit mask
3866 operand immL_32bits() %{
3867 predicate(n->get_long() == 0xFFFFFFFFL);
3868 match(ConL);
3869 op_cost(20);
3871 format %{ %}
3872 interface(CONST_INTER);
3873 %}
3875 // Long Immediate 32-bit signed
3876 operand immL32()
3877 %{
3878 predicate(n->get_long() == (int) (n->get_long()));
3879 match(ConL);
3881 op_cost(15);
3882 format %{ %}
3883 interface(CONST_INTER);
3884 %}
3887 //single-precision floating-point zero
3888 operand immF0() %{
3889 predicate(jint_cast(n->getf()) == 0);
3890 match(ConF);
3892 op_cost(5);
3893 format %{ %}
3894 interface(CONST_INTER);
3895 %}
3897 //single-precision floating-point immediate
3898 operand immF() %{
3899 match(ConF);
3901 op_cost(20);
3902 format %{ %}
3903 interface(CONST_INTER);
3904 %}
3906 //double-precision floating-point zero
3907 operand immD0() %{
3908 predicate(jlong_cast(n->getd()) == 0);
3909 match(ConD);
3911 op_cost(5);
3912 format %{ %}
3913 interface(CONST_INTER);
3914 %}
3916 //double-precision floating-point immediate
3917 operand immD() %{
3918 match(ConD);
3920 op_cost(20);
3921 format %{ %}
3922 interface(CONST_INTER);
3923 %}
3925 // Register Operands
3926 // Integer Register
3927 operand mRegI() %{
3928 constraint(ALLOC_IN_RC(int_reg));
3929 match(RegI);
3931 format %{ %}
3932 interface(REG_INTER);
3933 %}
3935 operand no_Ax_mRegI() %{
3936 constraint(ALLOC_IN_RC(no_Ax_int_reg));
3937 match(RegI);
3938 match(mRegI);
3940 format %{ %}
3941 interface(REG_INTER);
3942 %}
3944 operand mS0RegI() %{
3945 constraint(ALLOC_IN_RC(s0_reg));
3946 match(RegI);
3947 match(mRegI);
3949 format %{ "S0" %}
3950 interface(REG_INTER);
3951 %}
3953 operand mS1RegI() %{
3954 constraint(ALLOC_IN_RC(s1_reg));
3955 match(RegI);
3956 match(mRegI);
3958 format %{ "S1" %}
3959 interface(REG_INTER);
3960 %}
3962 operand mS2RegI() %{
3963 constraint(ALLOC_IN_RC(s2_reg));
3964 match(RegI);
3965 match(mRegI);
3967 format %{ "S2" %}
3968 interface(REG_INTER);
3969 %}
3971 operand mS3RegI() %{
3972 constraint(ALLOC_IN_RC(s3_reg));
3973 match(RegI);
3974 match(mRegI);
3976 format %{ "S3" %}
3977 interface(REG_INTER);
3978 %}
3980 operand mS4RegI() %{
3981 constraint(ALLOC_IN_RC(s4_reg));
3982 match(RegI);
3983 match(mRegI);
3985 format %{ "S4" %}
3986 interface(REG_INTER);
3987 %}
3989 operand mS5RegI() %{
3990 constraint(ALLOC_IN_RC(s5_reg));
3991 match(RegI);
3992 match(mRegI);
3994 format %{ "S5" %}
3995 interface(REG_INTER);
3996 %}
3998 operand mS6RegI() %{
3999 constraint(ALLOC_IN_RC(s6_reg));
4000 match(RegI);
4001 match(mRegI);
4003 format %{ "S6" %}
4004 interface(REG_INTER);
4005 %}
4007 operand mS7RegI() %{
4008 constraint(ALLOC_IN_RC(s7_reg));
4009 match(RegI);
4010 match(mRegI);
4012 format %{ "S7" %}
4013 interface(REG_INTER);
4014 %}
4017 operand mT0RegI() %{
4018 constraint(ALLOC_IN_RC(t0_reg));
4019 match(RegI);
4020 match(mRegI);
4022 format %{ "T0" %}
4023 interface(REG_INTER);
4024 %}
4026 operand mT1RegI() %{
4027 constraint(ALLOC_IN_RC(t1_reg));
4028 match(RegI);
4029 match(mRegI);
4031 format %{ "T1" %}
4032 interface(REG_INTER);
4033 %}
4035 operand mT2RegI() %{
4036 constraint(ALLOC_IN_RC(t2_reg));
4037 match(RegI);
4038 match(mRegI);
4040 format %{ "T2" %}
4041 interface(REG_INTER);
4042 %}
4044 operand mT3RegI() %{
4045 constraint(ALLOC_IN_RC(t3_reg));
4046 match(RegI);
4047 match(mRegI);
4049 format %{ "T3" %}
4050 interface(REG_INTER);
4051 %}
4053 operand mT8RegI() %{
4054 constraint(ALLOC_IN_RC(t8_reg));
4055 match(RegI);
4056 match(mRegI);
4058 format %{ "T8" %}
4059 interface(REG_INTER);
4060 %}
4062 operand mT9RegI() %{
4063 constraint(ALLOC_IN_RC(t9_reg));
4064 match(RegI);
4065 match(mRegI);
4067 format %{ "T9" %}
4068 interface(REG_INTER);
4069 %}
4071 operand mA0RegI() %{
4072 constraint(ALLOC_IN_RC(a0_reg));
4073 match(RegI);
4074 match(mRegI);
4076 format %{ "A0" %}
4077 interface(REG_INTER);
4078 %}
4080 operand mA1RegI() %{
4081 constraint(ALLOC_IN_RC(a1_reg));
4082 match(RegI);
4083 match(mRegI);
4085 format %{ "A1" %}
4086 interface(REG_INTER);
4087 %}
4089 operand mA2RegI() %{
4090 constraint(ALLOC_IN_RC(a2_reg));
4091 match(RegI);
4092 match(mRegI);
4094 format %{ "A2" %}
4095 interface(REG_INTER);
4096 %}
4098 operand mA3RegI() %{
4099 constraint(ALLOC_IN_RC(a3_reg));
4100 match(RegI);
4101 match(mRegI);
4103 format %{ "A3" %}
4104 interface(REG_INTER);
4105 %}
4107 operand mA4RegI() %{
4108 constraint(ALLOC_IN_RC(a4_reg));
4109 match(RegI);
4110 match(mRegI);
4112 format %{ "A4" %}
4113 interface(REG_INTER);
4114 %}
4116 operand mA5RegI() %{
4117 constraint(ALLOC_IN_RC(a5_reg));
4118 match(RegI);
4119 match(mRegI);
4121 format %{ "A5" %}
4122 interface(REG_INTER);
4123 %}
4125 operand mA6RegI() %{
4126 constraint(ALLOC_IN_RC(a6_reg));
4127 match(RegI);
4128 match(mRegI);
4130 format %{ "A6" %}
4131 interface(REG_INTER);
4132 %}
4134 operand mA7RegI() %{
4135 constraint(ALLOC_IN_RC(a7_reg));
4136 match(RegI);
4137 match(mRegI);
4139 format %{ "A7" %}
4140 interface(REG_INTER);
4141 %}
4143 operand mV0RegI() %{
4144 constraint(ALLOC_IN_RC(v0_reg));
4145 match(RegI);
4146 match(mRegI);
4148 format %{ "V0" %}
4149 interface(REG_INTER);
4150 %}
4152 operand mV1RegI() %{
4153 constraint(ALLOC_IN_RC(v1_reg));
4154 match(RegI);
4155 match(mRegI);
4157 format %{ "V1" %}
4158 interface(REG_INTER);
4159 %}
4161 operand mRegN() %{
4162 constraint(ALLOC_IN_RC(int_reg));
4163 match(RegN);
4165 format %{ %}
4166 interface(REG_INTER);
4167 %}
4169 operand t0_RegN() %{
4170 constraint(ALLOC_IN_RC(t0_reg));
4171 match(RegN);
4172 match(mRegN);
4174 format %{ %}
4175 interface(REG_INTER);
4176 %}
4178 operand t1_RegN() %{
4179 constraint(ALLOC_IN_RC(t1_reg));
4180 match(RegN);
4181 match(mRegN);
4183 format %{ %}
4184 interface(REG_INTER);
4185 %}
4187 operand t2_RegN() %{
4188 constraint(ALLOC_IN_RC(t2_reg));
4189 match(RegN);
4190 match(mRegN);
4192 format %{ %}
4193 interface(REG_INTER);
4194 %}
4196 operand t3_RegN() %{
4197 constraint(ALLOC_IN_RC(t3_reg));
4198 match(RegN);
4199 match(mRegN);
4201 format %{ %}
4202 interface(REG_INTER);
4203 %}
4205 operand t8_RegN() %{
4206 constraint(ALLOC_IN_RC(t8_reg));
4207 match(RegN);
4208 match(mRegN);
4210 format %{ %}
4211 interface(REG_INTER);
4212 %}
4214 operand t9_RegN() %{
4215 constraint(ALLOC_IN_RC(t9_reg));
4216 match(RegN);
4217 match(mRegN);
4219 format %{ %}
4220 interface(REG_INTER);
4221 %}
4223 operand a0_RegN() %{
4224 constraint(ALLOC_IN_RC(a0_reg));
4225 match(RegN);
4226 match(mRegN);
4228 format %{ %}
4229 interface(REG_INTER);
4230 %}
4232 operand a1_RegN() %{
4233 constraint(ALLOC_IN_RC(a1_reg));
4234 match(RegN);
4235 match(mRegN);
4237 format %{ %}
4238 interface(REG_INTER);
4239 %}
4241 operand a2_RegN() %{
4242 constraint(ALLOC_IN_RC(a2_reg));
4243 match(RegN);
4244 match(mRegN);
4246 format %{ %}
4247 interface(REG_INTER);
4248 %}
4250 operand a3_RegN() %{
4251 constraint(ALLOC_IN_RC(a3_reg));
4252 match(RegN);
4253 match(mRegN);
4255 format %{ %}
4256 interface(REG_INTER);
4257 %}
4259 operand a4_RegN() %{
4260 constraint(ALLOC_IN_RC(a4_reg));
4261 match(RegN);
4262 match(mRegN);
4264 format %{ %}
4265 interface(REG_INTER);
4266 %}
4268 operand a5_RegN() %{
4269 constraint(ALLOC_IN_RC(a5_reg));
4270 match(RegN);
4271 match(mRegN);
4273 format %{ %}
4274 interface(REG_INTER);
4275 %}
4277 operand a6_RegN() %{
4278 constraint(ALLOC_IN_RC(a6_reg));
4279 match(RegN);
4280 match(mRegN);
4282 format %{ %}
4283 interface(REG_INTER);
4284 %}
4286 operand a7_RegN() %{
4287 constraint(ALLOC_IN_RC(a7_reg));
4288 match(RegN);
4289 match(mRegN);
4291 format %{ %}
4292 interface(REG_INTER);
4293 %}
4295 operand s0_RegN() %{
4296 constraint(ALLOC_IN_RC(s0_reg));
4297 match(RegN);
4298 match(mRegN);
4300 format %{ %}
4301 interface(REG_INTER);
4302 %}
4304 operand s1_RegN() %{
4305 constraint(ALLOC_IN_RC(s1_reg));
4306 match(RegN);
4307 match(mRegN);
4309 format %{ %}
4310 interface(REG_INTER);
4311 %}
4313 operand s2_RegN() %{
4314 constraint(ALLOC_IN_RC(s2_reg));
4315 match(RegN);
4316 match(mRegN);
4318 format %{ %}
4319 interface(REG_INTER);
4320 %}
4322 operand s3_RegN() %{
4323 constraint(ALLOC_IN_RC(s3_reg));
4324 match(RegN);
4325 match(mRegN);
4327 format %{ %}
4328 interface(REG_INTER);
4329 %}
4331 operand s4_RegN() %{
4332 constraint(ALLOC_IN_RC(s4_reg));
4333 match(RegN);
4334 match(mRegN);
4336 format %{ %}
4337 interface(REG_INTER);
4338 %}
4340 operand s5_RegN() %{
4341 constraint(ALLOC_IN_RC(s5_reg));
4342 match(RegN);
4343 match(mRegN);
4345 format %{ %}
4346 interface(REG_INTER);
4347 %}
4349 operand s6_RegN() %{
4350 constraint(ALLOC_IN_RC(s6_reg));
4351 match(RegN);
4352 match(mRegN);
4354 format %{ %}
4355 interface(REG_INTER);
4356 %}
4358 operand s7_RegN() %{
4359 constraint(ALLOC_IN_RC(s7_reg));
4360 match(RegN);
4361 match(mRegN);
4363 format %{ %}
4364 interface(REG_INTER);
4365 %}
4367 operand v0_RegN() %{
4368 constraint(ALLOC_IN_RC(v0_reg));
4369 match(RegN);
4370 match(mRegN);
4372 format %{ %}
4373 interface(REG_INTER);
4374 %}
4376 operand v1_RegN() %{
4377 constraint(ALLOC_IN_RC(v1_reg));
4378 match(RegN);
4379 match(mRegN);
4381 format %{ %}
4382 interface(REG_INTER);
4383 %}
4385 // Pointer Register
4386 operand mRegP() %{
4387 constraint(ALLOC_IN_RC(p_reg));
4388 match(RegP);
4390 format %{ %}
4391 interface(REG_INTER);
4392 %}
4394 operand no_T8_mRegP() %{
4395 constraint(ALLOC_IN_RC(no_T8_p_reg));
4396 match(RegP);
4397 match(mRegP);
4399 format %{ %}
4400 interface(REG_INTER);
4401 %}
4403 operand s0_RegP()
4404 %{
4405 constraint(ALLOC_IN_RC(s0_long_reg));
4406 match(RegP);
4407 match(mRegP);
4408 match(no_T8_mRegP);
4410 format %{ %}
4411 interface(REG_INTER);
4412 %}
4414 operand s1_RegP()
4415 %{
4416 constraint(ALLOC_IN_RC(s1_long_reg));
4417 match(RegP);
4418 match(mRegP);
4419 match(no_T8_mRegP);
4421 format %{ %}
4422 interface(REG_INTER);
4423 %}
4425 operand s2_RegP()
4426 %{
4427 constraint(ALLOC_IN_RC(s2_long_reg));
4428 match(RegP);
4429 match(mRegP);
4430 match(no_T8_mRegP);
4432 format %{ %}
4433 interface(REG_INTER);
4434 %}
4436 operand s3_RegP()
4437 %{
4438 constraint(ALLOC_IN_RC(s3_long_reg));
4439 match(RegP);
4440 match(mRegP);
4441 match(no_T8_mRegP);
4443 format %{ %}
4444 interface(REG_INTER);
4445 %}
4447 operand s4_RegP()
4448 %{
4449 constraint(ALLOC_IN_RC(s4_long_reg));
4450 match(RegP);
4451 match(mRegP);
4452 match(no_T8_mRegP);
4454 format %{ %}
4455 interface(REG_INTER);
4456 %}
4458 operand s5_RegP()
4459 %{
4460 constraint(ALLOC_IN_RC(s5_long_reg));
4461 match(RegP);
4462 match(mRegP);
4463 match(no_T8_mRegP);
4465 format %{ %}
4466 interface(REG_INTER);
4467 %}
4469 operand s6_RegP()
4470 %{
4471 constraint(ALLOC_IN_RC(s6_long_reg));
4472 match(RegP);
4473 match(mRegP);
4474 match(no_T8_mRegP);
4476 format %{ %}
4477 interface(REG_INTER);
4478 %}
4480 operand s7_RegP()
4481 %{
4482 constraint(ALLOC_IN_RC(s7_long_reg));
4483 match(RegP);
4484 match(mRegP);
4485 match(no_T8_mRegP);
4487 format %{ %}
4488 interface(REG_INTER);
4489 %}
4491 operand t0_RegP()
4492 %{
4493 constraint(ALLOC_IN_RC(t0_long_reg));
4494 match(RegP);
4495 match(mRegP);
4496 match(no_T8_mRegP);
4498 format %{ %}
4499 interface(REG_INTER);
4500 %}
4502 operand t1_RegP()
4503 %{
4504 constraint(ALLOC_IN_RC(t1_long_reg));
4505 match(RegP);
4506 match(mRegP);
4507 match(no_T8_mRegP);
4509 format %{ %}
4510 interface(REG_INTER);
4511 %}
4513 operand t2_RegP()
4514 %{
4515 constraint(ALLOC_IN_RC(t2_long_reg));
4516 match(RegP);
4517 match(mRegP);
4518 match(no_T8_mRegP);
4520 format %{ %}
4521 interface(REG_INTER);
4522 %}
4524 operand t3_RegP()
4525 %{
4526 constraint(ALLOC_IN_RC(t3_long_reg));
4527 match(RegP);
4528 match(mRegP);
4529 match(no_T8_mRegP);
4531 format %{ %}
4532 interface(REG_INTER);
4533 %}
4535 operand t8_RegP()
4536 %{
4537 constraint(ALLOC_IN_RC(t8_long_reg));
4538 match(RegP);
4539 match(mRegP);
4541 format %{ %}
4542 interface(REG_INTER);
4543 %}
4545 operand t9_RegP()
4546 %{
4547 constraint(ALLOC_IN_RC(t9_long_reg));
4548 match(RegP);
4549 match(mRegP);
4550 match(no_T8_mRegP);
4552 format %{ %}
4553 interface(REG_INTER);
4554 %}
4556 operand a0_RegP()
4557 %{
4558 constraint(ALLOC_IN_RC(a0_long_reg));
4559 match(RegP);
4560 match(mRegP);
4561 match(no_T8_mRegP);
4563 format %{ %}
4564 interface(REG_INTER);
4565 %}
4567 operand a1_RegP()
4568 %{
4569 constraint(ALLOC_IN_RC(a1_long_reg));
4570 match(RegP);
4571 match(mRegP);
4572 match(no_T8_mRegP);
4574 format %{ %}
4575 interface(REG_INTER);
4576 %}
4578 operand a2_RegP()
4579 %{
4580 constraint(ALLOC_IN_RC(a2_long_reg));
4581 match(RegP);
4582 match(mRegP);
4583 match(no_T8_mRegP);
4585 format %{ %}
4586 interface(REG_INTER);
4587 %}
4589 operand a3_RegP()
4590 %{
4591 constraint(ALLOC_IN_RC(a3_long_reg));
4592 match(RegP);
4593 match(mRegP);
4594 match(no_T8_mRegP);
4596 format %{ %}
4597 interface(REG_INTER);
4598 %}
4600 operand a4_RegP()
4601 %{
4602 constraint(ALLOC_IN_RC(a4_long_reg));
4603 match(RegP);
4604 match(mRegP);
4605 match(no_T8_mRegP);
4607 format %{ %}
4608 interface(REG_INTER);
4609 %}
4612 operand a5_RegP()
4613 %{
4614 constraint(ALLOC_IN_RC(a5_long_reg));
4615 match(RegP);
4616 match(mRegP);
4617 match(no_T8_mRegP);
4619 format %{ %}
4620 interface(REG_INTER);
4621 %}
4623 operand a6_RegP()
4624 %{
4625 constraint(ALLOC_IN_RC(a6_long_reg));
4626 match(RegP);
4627 match(mRegP);
4628 match(no_T8_mRegP);
4630 format %{ %}
4631 interface(REG_INTER);
4632 %}
4634 operand a7_RegP()
4635 %{
4636 constraint(ALLOC_IN_RC(a7_long_reg));
4637 match(RegP);
4638 match(mRegP);
4639 match(no_T8_mRegP);
4641 format %{ %}
4642 interface(REG_INTER);
4643 %}
4645 operand v0_RegP()
4646 %{
4647 constraint(ALLOC_IN_RC(v0_long_reg));
4648 match(RegP);
4649 match(mRegP);
4650 match(no_T8_mRegP);
4652 format %{ %}
4653 interface(REG_INTER);
4654 %}
4656 operand v1_RegP()
4657 %{
4658 constraint(ALLOC_IN_RC(v1_long_reg));
4659 match(RegP);
4660 match(mRegP);
4661 match(no_T8_mRegP);
4663 format %{ %}
4664 interface(REG_INTER);
4665 %}
4667 /*
4668 operand mSPRegP(mRegP reg) %{
4669 constraint(ALLOC_IN_RC(sp_reg));
4670 match(reg);
4672 format %{ "SP" %}
4673 interface(REG_INTER);
4674 %}
4676 operand mFPRegP(mRegP reg) %{
4677 constraint(ALLOC_IN_RC(fp_reg));
4678 match(reg);
4680 format %{ "FP" %}
4681 interface(REG_INTER);
4682 %}
4683 */
4685 operand mRegL() %{
4686 constraint(ALLOC_IN_RC(long_reg));
4687 match(RegL);
4689 format %{ %}
4690 interface(REG_INTER);
4691 %}
4693 operand v0RegL() %{
4694 constraint(ALLOC_IN_RC(v0_long_reg));
4695 match(RegL);
4696 match(mRegL);
4698 format %{ %}
4699 interface(REG_INTER);
4700 %}
4702 operand v1RegL() %{
4703 constraint(ALLOC_IN_RC(v1_long_reg));
4704 match(RegL);
4705 match(mRegL);
4707 format %{ %}
4708 interface(REG_INTER);
4709 %}
4711 operand a0RegL() %{
4712 constraint(ALLOC_IN_RC(a0_long_reg));
4713 match(RegL);
4714 match(mRegL);
4716 format %{ "A0" %}
4717 interface(REG_INTER);
4718 %}
4720 operand a1RegL() %{
4721 constraint(ALLOC_IN_RC(a1_long_reg));
4722 match(RegL);
4723 match(mRegL);
4725 format %{ %}
4726 interface(REG_INTER);
4727 %}
4729 operand a2RegL() %{
4730 constraint(ALLOC_IN_RC(a2_long_reg));
4731 match(RegL);
4732 match(mRegL);
4734 format %{ %}
4735 interface(REG_INTER);
4736 %}
4738 operand a3RegL() %{
4739 constraint(ALLOC_IN_RC(a3_long_reg));
4740 match(RegL);
4741 match(mRegL);
4743 format %{ %}
4744 interface(REG_INTER);
4745 %}
4747 operand t0RegL() %{
4748 constraint(ALLOC_IN_RC(t0_long_reg));
4749 match(RegL);
4750 match(mRegL);
4752 format %{ %}
4753 interface(REG_INTER);
4754 %}
4756 operand t1RegL() %{
4757 constraint(ALLOC_IN_RC(t1_long_reg));
4758 match(RegL);
4759 match(mRegL);
4761 format %{ %}
4762 interface(REG_INTER);
4763 %}
4765 operand t2RegL() %{
4766 constraint(ALLOC_IN_RC(t2_long_reg));
4767 match(RegL);
4768 match(mRegL);
4770 format %{ %}
4771 interface(REG_INTER);
4772 %}
4774 operand t3RegL() %{
4775 constraint(ALLOC_IN_RC(t3_long_reg));
4776 match(RegL);
4777 match(mRegL);
4779 format %{ %}
4780 interface(REG_INTER);
4781 %}
4783 operand t8RegL() %{
4784 constraint(ALLOC_IN_RC(t8_long_reg));
4785 match(RegL);
4786 match(mRegL);
4788 format %{ %}
4789 interface(REG_INTER);
4790 %}
4792 operand a4RegL() %{
4793 constraint(ALLOC_IN_RC(a4_long_reg));
4794 match(RegL);
4795 match(mRegL);
4797 format %{ %}
4798 interface(REG_INTER);
4799 %}
4801 operand a5RegL() %{
4802 constraint(ALLOC_IN_RC(a5_long_reg));
4803 match(RegL);
4804 match(mRegL);
4806 format %{ %}
4807 interface(REG_INTER);
4808 %}
4810 operand a6RegL() %{
4811 constraint(ALLOC_IN_RC(a6_long_reg));
4812 match(RegL);
4813 match(mRegL);
4815 format %{ %}
4816 interface(REG_INTER);
4817 %}
4819 operand a7RegL() %{
4820 constraint(ALLOC_IN_RC(a7_long_reg));
4821 match(RegL);
4822 match(mRegL);
4824 format %{ %}
4825 interface(REG_INTER);
4826 %}
4828 operand s0RegL() %{
4829 constraint(ALLOC_IN_RC(s0_long_reg));
4830 match(RegL);
4831 match(mRegL);
4833 format %{ %}
4834 interface(REG_INTER);
4835 %}
4837 operand s1RegL() %{
4838 constraint(ALLOC_IN_RC(s1_long_reg));
4839 match(RegL);
4840 match(mRegL);
4842 format %{ %}
4843 interface(REG_INTER);
4844 %}
4846 operand s2RegL() %{
4847 constraint(ALLOC_IN_RC(s2_long_reg));
4848 match(RegL);
4849 match(mRegL);
4851 format %{ %}
4852 interface(REG_INTER);
4853 %}
4855 operand s3RegL() %{
4856 constraint(ALLOC_IN_RC(s3_long_reg));
4857 match(RegL);
4858 match(mRegL);
4860 format %{ %}
4861 interface(REG_INTER);
4862 %}
4864 operand s4RegL() %{
4865 constraint(ALLOC_IN_RC(s4_long_reg));
4866 match(RegL);
4867 match(mRegL);
4869 format %{ %}
4870 interface(REG_INTER);
4871 %}
4873 operand s7RegL() %{
4874 constraint(ALLOC_IN_RC(s7_long_reg));
4875 match(RegL);
4876 match(mRegL);
4878 format %{ %}
4879 interface(REG_INTER);
4880 %}
4882 // Floating register operands
4883 operand regF() %{
4884 constraint(ALLOC_IN_RC(flt_reg));
4885 match(RegF);
4887 format %{ %}
4888 interface(REG_INTER);
4889 %}
4891 //Double Precision Floating register operands
4892 operand regD() %{
4893 constraint(ALLOC_IN_RC(dbl_reg));
4894 match(RegD);
4896 format %{ %}
4897 interface(REG_INTER);
4898 %}
4900 //----------Memory Operands----------------------------------------------------
4901 // Indirect Memory Operand
4902 operand indirect(mRegP reg) %{
4903 constraint(ALLOC_IN_RC(p_reg));
4904 match(reg);
4906 format %{ "[$reg] @ indirect" %}
4907 interface(MEMORY_INTER) %{
4908 base($reg);
4909 index(0x0); /* NO_INDEX */
4910 scale(0x0);
4911 disp(0x0);
4912 %}
4913 %}
4915 // Indirect Memory Plus Short Offset Operand
4916 operand indOffset8(mRegP reg, immL8 off)
4917 %{
4918 constraint(ALLOC_IN_RC(p_reg));
4919 match(AddP reg off);
4921 format %{ "[$reg + $off (8-bit)] @ indOffset8" %}
4922 interface(MEMORY_INTER) %{
4923 base($reg);
4924 index(0x0); /* NO_INDEX */
4925 scale(0x0);
4926 disp($off);
4927 %}
4928 %}
4930 // Indirect Memory Times Scale Plus Index Register
4931 operand indIndexScale(mRegP reg, mRegL lreg, immI2 scale)
4932 %{
4933 constraint(ALLOC_IN_RC(p_reg));
4934 match(AddP reg (LShiftL lreg scale));
4936 op_cost(10);
4937 format %{"[$reg + $lreg << $scale] @ indIndexScale" %}
4938 interface(MEMORY_INTER) %{
4939 base($reg);
4940 index($lreg);
4941 scale($scale);
4942 disp(0x0);
4943 %}
4944 %}
4947 // [base + index + offset]
4948 operand baseIndexOffset8(mRegP base, mRegL index, immL8 off)
4949 %{
4950 constraint(ALLOC_IN_RC(p_reg));
4951 op_cost(5);
4952 match(AddP (AddP base index) off);
4954 format %{ "[$base + $index + $off (8-bit)] @ baseIndexOffset8" %}
4955 interface(MEMORY_INTER) %{
4956 base($base);
4957 index($index);
4958 scale(0x0);
4959 disp($off);
4960 %}
4961 %}
4963 // [base + index + offset]
4964 operand baseIndexOffset8_convI2L(mRegP base, mRegI index, immL8 off)
4965 %{
4966 constraint(ALLOC_IN_RC(p_reg));
4967 op_cost(5);
4968 match(AddP (AddP base (ConvI2L index)) off);
4970 format %{ "[$base + $index + $off (8-bit)] @ baseIndexOffset8_convI2L" %}
4971 interface(MEMORY_INTER) %{
4972 base($base);
4973 index($index);
4974 scale(0x0);
4975 disp($off);
4976 %}
4977 %}
4979 // Indirect Memory Times Scale Plus Index Register Plus Offset Operand
4980 operand indIndexScaleOffset8(mRegP reg, immL8 off, mRegL lreg, immI2 scale)
4981 %{
4982 constraint(ALLOC_IN_RC(p_reg));
4983 match(AddP (AddP reg (LShiftL lreg scale)) off);
4985 op_cost(10);
4986 format %{"[$reg + $off + $lreg << $scale] @ indIndexScaleOffset8" %}
4987 interface(MEMORY_INTER) %{
4988 base($reg);
4989 index($lreg);
4990 scale($scale);
4991 disp($off);
4992 %}
4993 %}
4995 operand indIndexScaleOffset8_convI2L(mRegP reg, immL8 off, mRegI ireg, immI2 scale)
4996 %{
4997 constraint(ALLOC_IN_RC(p_reg));
4998 match(AddP (AddP reg (LShiftL (ConvI2L ireg) scale)) off);
5000 op_cost(10);
5001 format %{"[$reg + $off + $ireg << $scale] @ indIndexScaleOffset8_convI2L" %}
5002 interface(MEMORY_INTER) %{
5003 base($reg);
5004 index($ireg);
5005 scale($scale);
5006 disp($off);
5007 %}
5008 %}
5010 // [base + index<<scale + offset]
5011 operand basePosIndexScaleOffset8(mRegP base, mRegI index, immL8 off, immI_0_31 scale)
5012 %{
5013 constraint(ALLOC_IN_RC(p_reg));
5014 //predicate(n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
5015 op_cost(10);
5016 match(AddP (AddP base (LShiftL (ConvI2L index) scale)) off);
5018 format %{ "[$base + $index << $scale + $off (8-bit)] @ basePosIndexScaleOffset8" %}
5019 interface(MEMORY_INTER) %{
5020 base($base);
5021 index($index);
5022 scale($scale);
5023 disp($off);
5024 %}
5025 %}
5027 // Indirect Memory Times Scale Plus Index Register Plus Offset Operand
5028 operand indIndexScaleOffsetNarrow(mRegN reg, immL8 off, mRegL lreg, immI2 scale)
5029 %{
5030 predicate(Universe::narrow_oop_shift() == 0);
5031 constraint(ALLOC_IN_RC(p_reg));
5032 match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
5034 op_cost(10);
5035 format %{"[$reg + $off + $lreg << $scale] @ indIndexScaleOffsetNarrow" %}
5036 interface(MEMORY_INTER) %{
5037 base($reg);
5038 index($lreg);
5039 scale($scale);
5040 disp($off);
5041 %}
5042 %}
5044 // [base + index<<scale + offset] for compressd Oops
5045 operand indPosIndexI2LScaleOffset8Narrow(mRegN base, mRegI index, immL8 off, immI_0_31 scale)
5046 %{
5047 constraint(ALLOC_IN_RC(p_reg));
5048 //predicate(Universe::narrow_oop_shift() == 0 && n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
5049 predicate(Universe::narrow_oop_shift() == 0);
5050 op_cost(10);
5051 match(AddP (AddP (DecodeN base) (LShiftL (ConvI2L index) scale)) off);
5053 format %{ "[$base + $index << $scale + $off (8-bit)] @ indPosIndexI2LScaleOffset8Narrow" %}
5054 interface(MEMORY_INTER) %{
5055 base($base);
5056 index($index);
5057 scale($scale);
5058 disp($off);
5059 %}
5060 %}
5062 //FIXME: I think it's better to limit the immI to be 16-bit at most!
5063 // Indirect Memory Plus Long Offset Operand
5064 operand indOffset32(mRegP reg, immL32 off) %{
5065 constraint(ALLOC_IN_RC(p_reg));
5066 op_cost(20);
5067 match(AddP reg off);
5069 format %{ "[$reg + $off (32-bit)] @ indOffset32" %}
5070 interface(MEMORY_INTER) %{
5071 base($reg);
5072 index(0x0); /* NO_INDEX */
5073 scale(0x0);
5074 disp($off);
5075 %}
5076 %}
5078 // Indirect Memory Plus Index Register
5079 operand indIndex(mRegP addr, mRegL index) %{
5080 constraint(ALLOC_IN_RC(p_reg));
5081 match(AddP addr index);
5083 op_cost(20);
5084 format %{"[$addr + $index] @ indIndex" %}
5085 interface(MEMORY_INTER) %{
5086 base($addr);
5087 index($index);
5088 scale(0x0);
5089 disp(0x0);
5090 %}
5091 %}
5093 operand indirectNarrowKlass(mRegN reg)
5094 %{
5095 predicate(Universe::narrow_klass_shift() == 0);
5096 constraint(ALLOC_IN_RC(p_reg));
5097 op_cost(10);
5098 match(DecodeNKlass reg);
5100 format %{ "[$reg] @ indirectNarrowKlass" %}
5101 interface(MEMORY_INTER) %{
5102 base($reg);
5103 index(0x0);
5104 scale(0x0);
5105 disp(0x0);
5106 %}
5107 %}
5109 operand indOffset8NarrowKlass(mRegN reg, immL8 off)
5110 %{
5111 predicate(Universe::narrow_klass_shift() == 0);
5112 constraint(ALLOC_IN_RC(p_reg));
5113 op_cost(10);
5114 match(AddP (DecodeNKlass reg) off);
5116 format %{ "[$reg + $off (8-bit)] @ indOffset8NarrowKlass" %}
5117 interface(MEMORY_INTER) %{
5118 base($reg);
5119 index(0x0);
5120 scale(0x0);
5121 disp($off);
5122 %}
5123 %}
5125 operand indOffset32NarrowKlass(mRegN reg, immL32 off)
5126 %{
5127 predicate(Universe::narrow_klass_shift() == 0);
5128 constraint(ALLOC_IN_RC(p_reg));
5129 op_cost(10);
5130 match(AddP (DecodeNKlass reg) off);
5132 format %{ "[$reg + $off (32-bit)] @ indOffset32NarrowKlass" %}
5133 interface(MEMORY_INTER) %{
5134 base($reg);
5135 index(0x0);
5136 scale(0x0);
5137 disp($off);
5138 %}
5139 %}
5141 operand indIndexOffsetNarrowKlass(mRegN reg, mRegL lreg, immL32 off)
5142 %{
5143 predicate(Universe::narrow_klass_shift() == 0);
5144 constraint(ALLOC_IN_RC(p_reg));
5145 match(AddP (AddP (DecodeNKlass reg) lreg) off);
5147 op_cost(10);
5148 format %{"[$reg + $off + $lreg] @ indIndexOffsetNarrowKlass" %}
5149 interface(MEMORY_INTER) %{
5150 base($reg);
5151 index($lreg);
5152 scale(0x0);
5153 disp($off);
5154 %}
5155 %}
5157 operand indIndexNarrowKlass(mRegN reg, mRegL lreg)
5158 %{
5159 predicate(Universe::narrow_klass_shift() == 0);
5160 constraint(ALLOC_IN_RC(p_reg));
5161 match(AddP (DecodeNKlass reg) lreg);
5163 op_cost(10);
5164 format %{"[$reg + $lreg] @ indIndexNarrowKlass" %}
5165 interface(MEMORY_INTER) %{
5166 base($reg);
5167 index($lreg);
5168 scale(0x0);
5169 disp(0x0);
5170 %}
5171 %}
5173 // Indirect Memory Operand
5174 operand indirectNarrow(mRegN reg)
5175 %{
5176 predicate(Universe::narrow_oop_shift() == 0);
5177 constraint(ALLOC_IN_RC(p_reg));
5178 op_cost(10);
5179 match(DecodeN reg);
5181 format %{ "[$reg] @ indirectNarrow" %}
5182 interface(MEMORY_INTER) %{
5183 base($reg);
5184 index(0x0);
5185 scale(0x0);
5186 disp(0x0);
5187 %}
5188 %}
5190 // Indirect Memory Plus Short Offset Operand
5191 operand indOffset8Narrow(mRegN reg, immL8 off)
5192 %{
5193 predicate(Universe::narrow_oop_shift() == 0);
5194 constraint(ALLOC_IN_RC(p_reg));
5195 op_cost(10);
5196 match(AddP (DecodeN reg) off);
5198 format %{ "[$reg + $off (8-bit)] @ indOffset8Narrow" %}
5199 interface(MEMORY_INTER) %{
5200 base($reg);
5201 index(0x0);
5202 scale(0x0);
5203 disp($off);
5204 %}
5205 %}
5207 // Indirect Memory Plus Index Register Plus Offset Operand
5208 operand indIndexOffset8Narrow(mRegN reg, mRegL lreg, immL8 off)
5209 %{
5210 predicate(Universe::narrow_oop_shift() == 0);
5211 constraint(ALLOC_IN_RC(p_reg));
5212 match(AddP (AddP (DecodeN reg) lreg) off);
5214 op_cost(10);
5215 format %{"[$reg + $off + $lreg] @ indIndexOffset8Narrow" %}
5216 interface(MEMORY_INTER) %{
5217 base($reg);
5218 index($lreg);
5219 scale(0x0);
5220 disp($off);
5221 %}
5222 %}
5224 //----------Load Long Memory Operands------------------------------------------
5225 // The load-long idiom will use it's address expression again after loading
5226 // the first word of the long. If the load-long destination overlaps with
5227 // registers used in the addressing expression, the 2nd half will be loaded
5228 // from a clobbered address. Fix this by requiring that load-long use
5229 // address registers that do not overlap with the load-long target.
5231 // load-long support
5232 operand load_long_RegP() %{
5233 constraint(ALLOC_IN_RC(p_reg));
5234 match(RegP);
5235 match(mRegP);
5236 op_cost(100);
5237 format %{ %}
5238 interface(REG_INTER);
5239 %}
5241 // Indirect Memory Operand Long
5242 operand load_long_indirect(load_long_RegP reg) %{
5243 constraint(ALLOC_IN_RC(p_reg));
5244 match(reg);
5246 format %{ "[$reg]" %}
5247 interface(MEMORY_INTER) %{
5248 base($reg);
5249 index(0x0);
5250 scale(0x0);
5251 disp(0x0);
5252 %}
5253 %}
5255 // Indirect Memory Plus Long Offset Operand
5256 operand load_long_indOffset32(load_long_RegP reg, immL32 off) %{
5257 match(AddP reg off);
5259 format %{ "[$reg + $off]" %}
5260 interface(MEMORY_INTER) %{
5261 base($reg);
5262 index(0x0);
5263 scale(0x0);
5264 disp($off);
5265 %}
5266 %}
5268 //----------Conditional Branch Operands----------------------------------------
5269 // Comparison Op - This is the operation of the comparison, and is limited to
5270 // the following set of codes:
5271 // L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
5272 //
5273 // Other attributes of the comparison, such as unsignedness, are specified
5274 // by the comparison instruction that sets a condition code flags register.
5275 // That result is represented by a flags operand whose subtype is appropriate
5276 // to the unsignedness (etc.) of the comparison.
5277 //
5278 // Later, the instruction which matches both the Comparison Op (a Bool) and
5279 // the flags (produced by the Cmp) specifies the coding of the comparison op
5280 // by matching a specific subtype of Bool operand below, such as cmpOpU.
5282 // Comparision Code
5283 operand cmpOp() %{
5284 match(Bool);
5286 format %{ "" %}
5287 interface(COND_INTER) %{
5288 equal(0x01);
5289 not_equal(0x02);
5290 greater(0x03);
5291 greater_equal(0x04);
5292 less(0x05);
5293 less_equal(0x06);
5294 overflow(0x7);
5295 no_overflow(0x8);
5296 %}
5297 %}
5300 // Comparision Code
5301 // Comparison Code, unsigned compare. Used by FP also, with
5302 // C2 (unordered) turned into GT or LT already. The other bits
5303 // C0 and C3 are turned into Carry & Zero flags.
5304 operand cmpOpU() %{
5305 match(Bool);
5307 format %{ "" %}
5308 interface(COND_INTER) %{
5309 equal(0x01);
5310 not_equal(0x02);
5311 greater(0x03);
5312 greater_equal(0x04);
5313 less(0x05);
5314 less_equal(0x06);
5315 overflow(0x7);
5316 no_overflow(0x8);
5317 %}
5318 %}
5320 /*
5321 // Comparison Code, unsigned compare. Used by FP also, with
5322 // C2 (unordered) turned into GT or LT already. The other bits
5323 // C0 and C3 are turned into Carry & Zero flags.
5324 operand cmpOpU() %{
5325 match(Bool);
5327 format %{ "" %}
5328 interface(COND_INTER) %{
5329 equal(0x4);
5330 not_equal(0x5);
5331 less(0x2);
5332 greater_equal(0x3);
5333 less_equal(0x6);
5334 greater(0x7);
5335 %}
5336 %}
5337 */
5338 /*
5339 // Comparison Code for FP conditional move
5340 operand cmpOp_fcmov() %{
5341 match(Bool);
5343 format %{ "" %}
5344 interface(COND_INTER) %{
5345 equal (0x01);
5346 not_equal (0x02);
5347 greater (0x03);
5348 greater_equal(0x04);
5349 less (0x05);
5350 less_equal (0x06);
5351 %}
5352 %}
5354 // Comparision Code used in long compares
5355 operand cmpOp_commute() %{
5356 match(Bool);
5358 format %{ "" %}
5359 interface(COND_INTER) %{
5360 equal(0x4);
5361 not_equal(0x5);
5362 less(0xF);
5363 greater_equal(0xE);
5364 less_equal(0xD);
5365 greater(0xC);
5366 %}
5367 %}
5368 */
5370 //----------Special Memory Operands--------------------------------------------
5371 // Stack Slot Operand - This operand is used for loading and storing temporary
5372 // values on the stack where a match requires a value to
5373 // flow through memory.
5374 operand stackSlotP(sRegP reg) %{
5375 constraint(ALLOC_IN_RC(stack_slots));
5376 // No match rule because this operand is only generated in matching
5377 op_cost(50);
5378 format %{ "[$reg]" %}
5379 interface(MEMORY_INTER) %{
5380 base(0x1d); // SP
5381 index(0x0); // No Index
5382 scale(0x0); // No Scale
5383 disp($reg); // Stack Offset
5384 %}
5385 %}
5387 operand stackSlotI(sRegI reg) %{
5388 constraint(ALLOC_IN_RC(stack_slots));
5389 // No match rule because this operand is only generated in matching
5390 op_cost(50);
5391 format %{ "[$reg]" %}
5392 interface(MEMORY_INTER) %{
5393 base(0x1d); // SP
5394 index(0x0); // No Index
5395 scale(0x0); // No Scale
5396 disp($reg); // Stack Offset
5397 %}
5398 %}
5400 operand stackSlotF(sRegF reg) %{
5401 constraint(ALLOC_IN_RC(stack_slots));
5402 // No match rule because this operand is only generated in matching
5403 op_cost(50);
5404 format %{ "[$reg]" %}
5405 interface(MEMORY_INTER) %{
5406 base(0x1d); // SP
5407 index(0x0); // No Index
5408 scale(0x0); // No Scale
5409 disp($reg); // Stack Offset
5410 %}
5411 %}
5413 operand stackSlotD(sRegD reg) %{
5414 constraint(ALLOC_IN_RC(stack_slots));
5415 // No match rule because this operand is only generated in matching
5416 op_cost(50);
5417 format %{ "[$reg]" %}
5418 interface(MEMORY_INTER) %{
5419 base(0x1d); // SP
5420 index(0x0); // No Index
5421 scale(0x0); // No Scale
5422 disp($reg); // Stack Offset
5423 %}
5424 %}
5426 operand stackSlotL(sRegL reg) %{
5427 constraint(ALLOC_IN_RC(stack_slots));
5428 // No match rule because this operand is only generated in matching
5429 op_cost(50);
5430 format %{ "[$reg]" %}
5431 interface(MEMORY_INTER) %{
5432 base(0x1d); // SP
5433 index(0x0); // No Index
5434 scale(0x0); // No Scale
5435 disp($reg); // Stack Offset
5436 %}
5437 %}
5440 //------------------------OPERAND CLASSES--------------------------------------
5441 //opclass memory( direct, indirect, indOffset16, indOffset32, indOffset32X, indIndexOffset );
5442 opclass memory( indirect, indirectNarrow, indOffset8, indOffset32, indIndex, indIndexScale, load_long_indirect, load_long_indOffset32, baseIndexOffset8, baseIndexOffset8_convI2L, indIndexScaleOffset8, indIndexScaleOffset8_convI2L, basePosIndexScaleOffset8, indIndexScaleOffsetNarrow, indPosIndexI2LScaleOffset8Narrow, indOffset8Narrow, indIndexOffset8Narrow);
5445 //----------PIPELINE-----------------------------------------------------------
5446 // Rules which define the behavior of the target architectures pipeline.
5448 pipeline %{
5450 //----------ATTRIBUTES---------------------------------------------------------
5451 attributes %{
5452 fixed_size_instructions; // Fixed size instructions
5453 branch_has_delay_slot; // branch have delay slot in gs2
5454 max_instructions_per_bundle = 1; // 1 instruction per bundle
5455 max_bundles_per_cycle = 4; // Up to 4 bundles per cycle
5456 bundle_unit_size=4;
5457 instruction_unit_size = 4; // An instruction is 4 bytes long
5458 instruction_fetch_unit_size = 16; // The processor fetches one line
5459 instruction_fetch_units = 1; // of 16 bytes
5461 // List of nop instructions
5462 nops( MachNop );
5463 %}
5465 //----------RESOURCES----------------------------------------------------------
5466 // Resources are the functional units available to the machine
5468 resources(D1, D2, D3, D4, DECODE = D1 | D2 | D3| D4, ALU1, ALU2, ALU = ALU1 | ALU2, FPU1, FPU2, FPU = FPU1 | FPU2, MEM, BR);
5470 //----------PIPELINE DESCRIPTION-----------------------------------------------
5471 // Pipeline Description specifies the stages in the machine's pipeline
5473 // IF: fetch
5474 // ID: decode
5475 // RD: read
5476 // CA: caculate
5477 // WB: write back
5478 // CM: commit
5480 pipe_desc(IF, ID, RD, CA, WB, CM);
5483 //----------PIPELINE CLASSES---------------------------------------------------
5484 // Pipeline Classes describe the stages in which input and output are
5485 // referenced by the hardware pipeline.
5487 //No.1 Integer ALU reg-reg operation : dst <-- reg1 op reg2
5488 pipe_class ialu_regI_regI(mRegI dst, mRegI src1, mRegI src2) %{
5489 single_instruction;
5490 src1 : RD(read);
5491 src2 : RD(read);
5492 dst : WB(write)+1;
5493 DECODE : ID;
5494 ALU : CA;
5495 %}
5497 //No.19 Integer mult operation : dst <-- reg1 mult reg2
5498 pipe_class ialu_mult(mRegI dst, mRegI src1, mRegI src2) %{
5499 src1 : RD(read);
5500 src2 : RD(read);
5501 dst : WB(write)+5;
5502 DECODE : ID;
5503 ALU2 : CA;
5504 %}
5506 pipe_class mulL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
5507 src1 : RD(read);
5508 src2 : RD(read);
5509 dst : WB(write)+10;
5510 DECODE : ID;
5511 ALU2 : CA;
5512 %}
5514 //No.19 Integer div operation : dst <-- reg1 div reg2
5515 pipe_class ialu_div(mRegI dst, mRegI src1, mRegI src2) %{
5516 src1 : RD(read);
5517 src2 : RD(read);
5518 dst : WB(write)+10;
5519 DECODE : ID;
5520 ALU2 : CA;
5521 %}
5523 //No.19 Integer mod operation : dst <-- reg1 mod reg2
5524 pipe_class ialu_mod(mRegI dst, mRegI src1, mRegI src2) %{
5525 instruction_count(2);
5526 src1 : RD(read);
5527 src2 : RD(read);
5528 dst : WB(write)+10;
5529 DECODE : ID;
5530 ALU2 : CA;
5531 %}
5533 //No.15 Long ALU reg-reg operation : dst <-- reg1 op reg2
5534 pipe_class ialu_regL_regL(mRegL dst, mRegL src1, mRegL src2) %{
5535 instruction_count(2);
5536 src1 : RD(read);
5537 src2 : RD(read);
5538 dst : WB(write);
5539 DECODE : ID;
5540 ALU : CA;
5541 %}
5543 //No.18 Long ALU reg-imm16 operation : dst <-- reg1 op imm16
5544 pipe_class ialu_regL_imm16(mRegL dst, mRegL src) %{
5545 instruction_count(2);
5546 src : RD(read);
5547 dst : WB(write);
5548 DECODE : ID;
5549 ALU : CA;
5550 %}
5552 //no.16 load Long from memory :
5553 pipe_class ialu_loadL(mRegL dst, memory mem) %{
5554 instruction_count(2);
5555 mem : RD(read);
5556 dst : WB(write)+5;
5557 DECODE : ID;
5558 MEM : RD;
5559 %}
5561 //No.17 Store Long to Memory :
5562 pipe_class ialu_storeL(mRegL src, memory mem) %{
5563 instruction_count(2);
5564 mem : RD(read);
5565 src : RD(read);
5566 DECODE : ID;
5567 MEM : RD;
5568 %}
5570 //No.2 Integer ALU reg-imm16 operation : dst <-- reg1 op imm16
5571 pipe_class ialu_regI_imm16(mRegI dst, mRegI src) %{
5572 single_instruction;
5573 src : RD(read);
5574 dst : WB(write);
5575 DECODE : ID;
5576 ALU : CA;
5577 %}
5579 //No.3 Integer move operation : dst <-- reg
5580 pipe_class ialu_regI_mov(mRegI dst, mRegI src) %{
5581 src : RD(read);
5582 dst : WB(write);
5583 DECODE : ID;
5584 ALU : CA;
5585 %}
5587 //No.4 No instructions : do nothing
5588 pipe_class empty( ) %{
5589 instruction_count(0);
5590 %}
5592 //No.5 UnConditional branch :
5593 pipe_class pipe_jump( label labl ) %{
5594 multiple_bundles;
5595 DECODE : ID;
5596 BR : RD;
5597 %}
5599 //No.6 ALU Conditional branch :
5600 pipe_class pipe_alu_branch(mRegI src1, mRegI src2, label labl ) %{
5601 multiple_bundles;
5602 src1 : RD(read);
5603 src2 : RD(read);
5604 DECODE : ID;
5605 BR : RD;
5606 %}
5608 //no.7 load integer from memory :
5609 pipe_class ialu_loadI(mRegI dst, memory mem) %{
5610 mem : RD(read);
5611 dst : WB(write)+3;
5612 DECODE : ID;
5613 MEM : RD;
5614 %}
5616 //No.8 Store Integer to Memory :
5617 pipe_class ialu_storeI(mRegI src, memory mem) %{
5618 mem : RD(read);
5619 src : RD(read);
5620 DECODE : ID;
5621 MEM : RD;
5622 %}
5625 //No.10 Floating FPU reg-reg operation : dst <-- reg1 op reg2
5626 pipe_class fpu_regF_regF(regF dst, regF src1, regF src2) %{
5627 src1 : RD(read);
5628 src2 : RD(read);
5629 dst : WB(write);
5630 DECODE : ID;
5631 FPU : CA;
5632 %}
5634 //No.22 Floating div operation : dst <-- reg1 div reg2
5635 pipe_class fpu_div(regF dst, regF src1, regF src2) %{
5636 src1 : RD(read);
5637 src2 : RD(read);
5638 dst : WB(write);
5639 DECODE : ID;
5640 FPU2 : CA;
5641 %}
5643 pipe_class fcvt_I2D(regD dst, mRegI src) %{
5644 src : RD(read);
5645 dst : WB(write);
5646 DECODE : ID;
5647 FPU1 : CA;
5648 %}
5650 pipe_class fcvt_D2I(mRegI dst, regD src) %{
5651 src : RD(read);
5652 dst : WB(write);
5653 DECODE : ID;
5654 FPU1 : CA;
5655 %}
5657 pipe_class pipe_mfc1(mRegI dst, regD src) %{
5658 src : RD(read);
5659 dst : WB(write);
5660 DECODE : ID;
5661 MEM : RD;
5662 %}
5664 pipe_class pipe_mtc1(regD dst, mRegI src) %{
5665 src : RD(read);
5666 dst : WB(write);
5667 DECODE : ID;
5668 MEM : RD(5);
5669 %}
5671 //No.23 Floating sqrt operation : dst <-- reg1 sqrt reg2
5672 pipe_class fpu_sqrt(regF dst, regF src1, regF src2) %{
5673 multiple_bundles;
5674 src1 : RD(read);
5675 src2 : RD(read);
5676 dst : WB(write);
5677 DECODE : ID;
5678 FPU2 : CA;
5679 %}
5681 //No.11 Load Floating from Memory :
5682 pipe_class fpu_loadF(regF dst, memory mem) %{
5683 instruction_count(1);
5684 mem : RD(read);
5685 dst : WB(write)+3;
5686 DECODE : ID;
5687 MEM : RD;
5688 %}
5690 //No.12 Store Floating to Memory :
5691 pipe_class fpu_storeF(regF src, memory mem) %{
5692 instruction_count(1);
5693 mem : RD(read);
5694 src : RD(read);
5695 DECODE : ID;
5696 MEM : RD;
5697 %}
5699 //No.13 FPU Conditional branch :
5700 pipe_class pipe_fpu_branch(regF src1, regF src2, label labl ) %{
5701 multiple_bundles;
5702 src1 : RD(read);
5703 src2 : RD(read);
5704 DECODE : ID;
5705 BR : RD;
5706 %}
5708 //No.14 Floating FPU reg operation : dst <-- op reg
5709 pipe_class fpu1_regF(regF dst, regF src) %{
5710 src : RD(read);
5711 dst : WB(write);
5712 DECODE : ID;
5713 FPU : CA;
5714 %}
5716 pipe_class long_memory_op() %{
5717 instruction_count(10); multiple_bundles; force_serialization;
5718 fixed_latency(30);
5719 %}
5721 pipe_class simple_call() %{
5722 instruction_count(10); multiple_bundles; force_serialization;
5723 fixed_latency(200);
5724 BR : RD;
5725 %}
5727 pipe_class call() %{
5728 instruction_count(10); multiple_bundles; force_serialization;
5729 fixed_latency(200);
5730 %}
5732 //FIXME:
5733 //No.9 Piple slow : for multi-instructions
5734 pipe_class pipe_slow( ) %{
5735 instruction_count(20);
5736 force_serialization;
5737 multiple_bundles;
5738 fixed_latency(50);
5739 %}
5741 %}
5745 //----------INSTRUCTIONS-------------------------------------------------------
5746 //
5747 // match -- States which machine-independent subtree may be replaced
5748 // by this instruction.
5749 // ins_cost -- The estimated cost of this instruction is used by instruction
5750 // selection to identify a minimum cost tree of machine
5751 // instructions that matches a tree of machine-independent
5752 // instructions.
5753 // format -- A string providing the disassembly for this instruction.
5754 // The value of an instruction's operand may be inserted
5755 // by referring to it with a '$' prefix.
5756 // opcode -- Three instruction opcodes may be provided. These are referred
5757 // to within an encode class as $primary, $secondary, and $tertiary
5758 // respectively. The primary opcode is commonly used to
5759 // indicate the type of machine instruction, while secondary
5760 // and tertiary are often used for prefix options or addressing
5761 // modes.
5762 // ins_encode -- A list of encode classes with parameters. The encode class
5763 // name must have been defined in an 'enc_class' specification
5764 // in the encode section of the architecture description.
5767 // Load Integer
5768 instruct loadI(mRegI dst, memory mem) %{
5769 match(Set dst (LoadI mem));
5771 ins_cost(125);
5772 format %{ "lw $dst, $mem #@loadI" %}
5773 ins_encode (load_I_enc(dst, mem));
5774 ins_pipe( ialu_loadI );
5775 %}
5777 instruct loadI_convI2L(mRegL dst, memory mem) %{
5778 match(Set dst (ConvI2L (LoadI mem)));
5780 ins_cost(125);
5781 format %{ "lw $dst, $mem #@loadI_convI2L" %}
5782 ins_encode (load_I_enc(dst, mem));
5783 ins_pipe( ialu_loadI );
5784 %}
5786 // Load Integer (32 bit signed) to Byte (8 bit signed)
5787 instruct loadI2B(mRegI dst, memory mem, immI_24 twentyfour) %{
5788 match(Set dst (RShiftI (LShiftI (LoadI mem) twentyfour) twentyfour));
5790 ins_cost(125);
5791 format %{ "lb $dst, $mem\t# int -> byte #@loadI2B" %}
5792 ins_encode(load_B_enc(dst, mem));
5793 ins_pipe(ialu_loadI);
5794 %}
5796 // Load Integer (32 bit signed) to Unsigned Byte (8 bit UNsigned)
5797 instruct loadI2UB(mRegI dst, memory mem, immI_255 mask) %{
5798 match(Set dst (AndI (LoadI mem) mask));
5800 ins_cost(125);
5801 format %{ "lbu $dst, $mem\t# int -> ubyte #@loadI2UB" %}
5802 ins_encode(load_UB_enc(dst, mem));
5803 ins_pipe(ialu_loadI);
5804 %}
5806 // Load Integer (32 bit signed) to Short (16 bit signed)
5807 instruct loadI2S(mRegI dst, memory mem, immI_16 sixteen) %{
5808 match(Set dst (RShiftI (LShiftI (LoadI mem) sixteen) sixteen));
5810 ins_cost(125);
5811 format %{ "lh $dst, $mem\t# int -> short #@loadI2S" %}
5812 ins_encode(load_S_enc(dst, mem));
5813 ins_pipe(ialu_loadI);
5814 %}
5816 // Load Integer (32 bit signed) to Unsigned Short/Char (16 bit UNsigned)
5817 instruct loadI2US(mRegI dst, memory mem, immI_65535 mask) %{
5818 match(Set dst (AndI (LoadI mem) mask));
5820 ins_cost(125);
5821 format %{ "lhu $dst, $mem\t# int -> ushort/char #@loadI2US" %}
5822 ins_encode(load_C_enc(dst, mem));
5823 ins_pipe(ialu_loadI);
5824 %}
5826 // Load Long.
5827 instruct loadL(mRegL dst, memory mem) %{
5828 // predicate(!((LoadLNode*)n)->require_atomic_access());
5829 match(Set dst (LoadL mem));
5831 ins_cost(250);
5832 format %{ "ld $dst, $mem #@loadL" %}
5833 ins_encode(load_L_enc(dst, mem));
5834 ins_pipe( ialu_loadL );
5835 %}
5837 // Load Long - UNaligned
5838 instruct loadL_unaligned(mRegL dst, memory mem) %{
5839 match(Set dst (LoadL_unaligned mem));
5841 // FIXME: Jin: Need more effective ldl/ldr
5842 ins_cost(450);
5843 format %{ "ld $dst, $mem #@loadL_unaligned\n\t" %}
5844 ins_encode(load_L_enc(dst, mem));
5845 ins_pipe( ialu_loadL );
5846 %}
5848 // Store Long
5849 instruct storeL_reg(memory mem, mRegL src) %{
5850 match(Set mem (StoreL mem src));
5852 ins_cost(200);
5853 format %{ "sd $mem, $src #@storeL_reg\n" %}
5854 ins_encode(store_L_reg_enc(mem, src));
5855 ins_pipe( ialu_storeL );
5856 %}
5859 instruct storeL_immL0(memory mem, immL0 zero) %{
5860 match(Set mem (StoreL mem zero));
5862 ins_cost(180);
5863 format %{ "sd $mem, zero #@storeL_immL0" %}
5864 ins_encode(store_L_immL0_enc(mem, zero));
5865 ins_pipe( ialu_storeL );
5866 %}
5868 // Load Compressed Pointer
5869 instruct loadN(mRegN dst, memory mem)
5870 %{
5871 match(Set dst (LoadN mem));
5873 ins_cost(125); // XXX
5874 format %{ "lwu $dst, $mem\t# compressed ptr @ loadN" %}
5875 ins_encode (load_N_enc(dst, mem));
5876 ins_pipe( ialu_loadI ); // XXX
5877 %}
5879 // Load Pointer
5880 instruct loadP(mRegP dst, memory mem) %{
5881 match(Set dst (LoadP mem));
5883 ins_cost(125);
5884 format %{ "ld $dst, $mem #@loadP" %}
5885 ins_encode (load_P_enc(dst, mem));
5886 ins_pipe( ialu_loadI );
5887 %}
5889 // Load Klass Pointer
5890 instruct loadKlass(mRegP dst, memory mem) %{
5891 match(Set dst (LoadKlass mem));
5893 ins_cost(125);
5894 format %{ "MOV $dst,$mem @ loadKlass" %}
5895 ins_encode (load_P_enc(dst, mem));
5896 ins_pipe( ialu_loadI );
5897 %}
5899 // Load narrow Klass Pointer
5900 instruct loadNKlass(mRegN dst, memory mem)
5901 %{
5902 match(Set dst (LoadNKlass mem));
5904 ins_cost(125); // XXX
5905 format %{ "lwu $dst, $mem\t# compressed klass ptr @ loadNKlass" %}
5906 ins_encode (load_N_enc(dst, mem));
5907 ins_pipe( ialu_loadI ); // XXX
5908 %}
5910 // Load Constant
5911 instruct loadConI(mRegI dst, immI src) %{
5912 match(Set dst src);
5914 ins_cost(150);
5915 format %{ "mov $dst, $src #@loadConI" %}
5916 ins_encode %{
5917 Register dst = $dst$$Register;
5918 int value = $src$$constant;
5919 __ move(dst, value);
5920 %}
5921 ins_pipe( ialu_regI_regI );
5922 %}
5925 instruct loadConL_set64(mRegL dst, immL src) %{
5926 match(Set dst src);
5927 ins_cost(120);
5928 format %{ "li $dst, $src @ loadConL_set64" %}
5929 ins_encode %{
5930 __ set64($dst$$Register, $src$$constant);
5931 %}
5932 ins_pipe(ialu_regL_regL);
5933 %}
5935 /*
5936 // Load long value from constant table (predicated by immL_expensive).
5937 instruct loadConL_load(mRegL dst, immL_expensive src) %{
5938 match(Set dst src);
5939 ins_cost(150);
5940 format %{ "ld $dst, $constantoffset[$constanttablebase] # load long $src from table @ loadConL_ldx" %}
5941 ins_encode %{
5942 int con_offset = $constantoffset($src);
5944 if (Assembler::is_simm16(con_offset)) {
5945 __ ld($dst$$Register, $constanttablebase, con_offset);
5946 } else {
5947 __ set64(AT, con_offset);
5948 if (UseLoongsonISA) {
5949 __ gsldx($dst$$Register, $constanttablebase, AT, 0);
5950 } else {
5951 __ daddu(AT, $constanttablebase, AT);
5952 __ ld($dst$$Register, AT, 0);
5953 }
5954 }
5955 %}
5956 ins_pipe(ialu_loadI);
5957 %}
5958 */
5960 instruct loadConL16(mRegL dst, immL16 src) %{
5961 match(Set dst src);
5962 ins_cost(105);
5963 format %{ "mov $dst, $src #@loadConL16" %}
5964 ins_encode %{
5965 Register dst_reg = as_Register($dst$$reg);
5966 int value = $src$$constant;
5967 __ daddiu(dst_reg, R0, value);
5968 %}
5969 ins_pipe( ialu_regL_regL );
5970 %}
5973 instruct loadConL0(mRegL dst, immL0 src) %{
5974 match(Set dst src);
5975 ins_cost(100);
5976 format %{ "mov $dst, zero #@loadConL0" %}
5977 ins_encode %{
5978 Register dst_reg = as_Register($dst$$reg);
5979 __ daddu(dst_reg, R0, R0);
5980 %}
5981 ins_pipe( ialu_regL_regL );
5982 %}
5984 // Load Range
5985 instruct loadRange(mRegI dst, memory mem) %{
5986 match(Set dst (LoadRange mem));
5988 ins_cost(125);
5989 format %{ "MOV $dst,$mem @ loadRange" %}
5990 ins_encode(load_I_enc(dst, mem));
5991 ins_pipe( ialu_loadI );
5992 %}
5995 instruct storeP(memory mem, mRegP src ) %{
5996 match(Set mem (StoreP mem src));
5998 ins_cost(125);
5999 format %{ "sd $src, $mem #@storeP" %}
6000 ins_encode(store_P_reg_enc(mem, src));
6001 ins_pipe( ialu_storeI );
6002 %}
6004 // Store NULL Pointer, mark word, or other simple pointer constant.
6005 instruct storeImmP0(memory mem, immP0 zero) %{
6006 match(Set mem (StoreP mem zero));
6008 ins_cost(125);
6009 format %{ "mov $mem, $zero #@storeImmP0" %}
6010 ins_encode(store_P_immP0_enc(mem));
6011 ins_pipe( ialu_storeI );
6012 %}
6014 // Store Byte Immediate
6015 instruct storeImmB(memory mem, immI8 src) %{
6016 match(Set mem (StoreB mem src));
6018 ins_cost(150);
6019 format %{ "movb $mem, $src #@storeImmB" %}
6020 ins_encode(store_B_immI_enc(mem, src));
6021 ins_pipe( ialu_storeI );
6022 %}
6024 // Store Compressed Pointer
6025 instruct storeN(memory mem, mRegN src)
6026 %{
6027 match(Set mem (StoreN mem src));
6029 ins_cost(125); // XXX
6030 format %{ "sw $mem, $src\t# compressed ptr @ storeN" %}
6031 ins_encode(store_N_reg_enc(mem, src));
6032 ins_pipe( ialu_storeI );
6033 %}
6035 instruct storeNKlass(memory mem, mRegN src)
6036 %{
6037 match(Set mem (StoreNKlass mem src));
6039 ins_cost(125); // XXX
6040 format %{ "sw $mem, $src\t# compressed klass ptr @ storeNKlass" %}
6041 ins_encode(store_N_reg_enc(mem, src));
6042 ins_pipe( ialu_storeI );
6043 %}
6045 instruct storeImmN0(memory mem, immN0 zero)
6046 %{
6047 predicate(Universe::narrow_oop_base() == NULL && Universe::narrow_klass_base() == NULL);
6048 match(Set mem (StoreN mem zero));
6050 ins_cost(125); // XXX
6051 format %{ "storeN0 $mem, R12\t# compressed ptr" %}
6052 ins_encode(storeImmN0_enc(mem, zero));
6053 ins_pipe( ialu_storeI );
6054 %}
6056 // Store Byte
6057 instruct storeB(memory mem, mRegI src) %{
6058 match(Set mem (StoreB mem src));
6060 ins_cost(125);
6061 format %{ "sb $src, $mem #@storeB" %}
6062 ins_encode(store_B_reg_enc(mem, src));
6063 ins_pipe( ialu_storeI );
6064 %}
6066 instruct storeB_convL2I(memory mem, mRegL src) %{
6067 match(Set mem (StoreB mem (ConvL2I src)));
6069 ins_cost(125);
6070 format %{ "sb $src, $mem #@storeB_convL2I" %}
6071 ins_encode(store_B_reg_enc(mem, src));
6072 ins_pipe( ialu_storeI );
6073 %}
6075 // Load Byte (8bit signed)
6076 instruct loadB(mRegI dst, memory mem) %{
6077 match(Set dst (LoadB mem));
6079 ins_cost(125);
6080 format %{ "lb $dst, $mem #@loadB" %}
6081 ins_encode(load_B_enc(dst, mem));
6082 ins_pipe( ialu_loadI );
6083 %}
6085 instruct loadB_convI2L(mRegL dst, memory mem) %{
6086 match(Set dst (ConvI2L (LoadB mem)));
6088 ins_cost(125);
6089 format %{ "lb $dst, $mem #@loadB_convI2L" %}
6090 ins_encode(load_B_enc(dst, mem));
6091 ins_pipe( ialu_loadI );
6092 %}
6094 // Load Byte (8bit UNsigned)
6095 instruct loadUB(mRegI dst, memory mem) %{
6096 match(Set dst (LoadUB mem));
6098 ins_cost(125);
6099 format %{ "lbu $dst, $mem #@loadUB" %}
6100 ins_encode(load_UB_enc(dst, mem));
6101 ins_pipe( ialu_loadI );
6102 %}
6104 instruct loadUB_convI2L(mRegL dst, memory mem) %{
6105 match(Set dst (ConvI2L (LoadUB mem)));
6107 ins_cost(125);
6108 format %{ "lbu $dst, $mem #@loadUB_convI2L" %}
6109 ins_encode(load_UB_enc(dst, mem));
6110 ins_pipe( ialu_loadI );
6111 %}
6113 // Load Short (16bit signed)
6114 instruct loadS(mRegI dst, memory mem) %{
6115 match(Set dst (LoadS mem));
6117 ins_cost(125);
6118 format %{ "lh $dst, $mem #@loadS" %}
6119 ins_encode(load_S_enc(dst, mem));
6120 ins_pipe( ialu_loadI );
6121 %}
6123 // Load Short (16 bit signed) to Byte (8 bit signed)
6124 instruct loadS2B(mRegI dst, memory mem, immI_24 twentyfour) %{
6125 match(Set dst (RShiftI (LShiftI (LoadS mem) twentyfour) twentyfour));
6127 ins_cost(125);
6128 format %{ "lb $dst, $mem\t# short -> byte #@loadS2B" %}
6129 ins_encode(load_B_enc(dst, mem));
6130 ins_pipe(ialu_loadI);
6131 %}
6133 instruct loadS_convI2L(mRegL dst, memory mem) %{
6134 match(Set dst (ConvI2L (LoadS mem)));
6136 ins_cost(125);
6137 format %{ "lh $dst, $mem #@loadS_convI2L" %}
6138 ins_encode(load_S_enc(dst, mem));
6139 ins_pipe( ialu_loadI );
6140 %}
6142 // Store Integer Immediate
6143 instruct storeImmI(memory mem, immI src) %{
6144 match(Set mem (StoreI mem src));
6146 ins_cost(150);
6147 format %{ "mov $mem, $src #@storeImmI" %}
6148 ins_encode(store_I_immI_enc(mem, src));
6149 ins_pipe( ialu_storeI );
6150 %}
6152 // Store Integer
6153 instruct storeI(memory mem, mRegI src) %{
6154 match(Set mem (StoreI mem src));
6156 ins_cost(125);
6157 format %{ "sw $mem, $src #@storeI" %}
6158 ins_encode(store_I_reg_enc(mem, src));
6159 ins_pipe( ialu_storeI );
6160 %}
6162 instruct storeI_convL2I(memory mem, mRegL src) %{
6163 match(Set mem (StoreI mem (ConvL2I src)));
6165 ins_cost(125);
6166 format %{ "sw $mem, $src #@storeI_convL2I" %}
6167 ins_encode(store_I_reg_enc(mem, src));
6168 ins_pipe( ialu_storeI );
6169 %}
6171 // Load Float
6172 instruct loadF(regF dst, memory mem) %{
6173 match(Set dst (LoadF mem));
6175 ins_cost(150);
6176 format %{ "loadF $dst, $mem #@loadF" %}
6177 ins_encode(load_F_enc(dst, mem));
6178 ins_pipe( ialu_loadI );
6179 %}
6181 instruct loadConP_general(mRegP dst, immP src) %{
6182 match(Set dst src);
6184 ins_cost(120);
6185 format %{ "li $dst, $src #@loadConP_general" %}
6187 ins_encode %{
6188 Register dst = $dst$$Register;
6189 long* value = (long*)$src$$constant;
6191 if($src->constant_reloc() == relocInfo::metadata_type){
6192 int klass_index = __ oop_recorder()->find_index((Klass*)value);
6193 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
6195 __ relocate(rspec);
6196 __ li48(dst, (long)value);
6197 }else if($src->constant_reloc() == relocInfo::oop_type){
6198 int oop_index = __ oop_recorder()->find_index((jobject)value);
6199 RelocationHolder rspec = oop_Relocation::spec(oop_index);
6201 __ relocate(rspec);
6202 __ li48(dst, (long)value);
6203 } else if ($src->constant_reloc() == relocInfo::none) {
6204 __ set64(dst, (long)value);
6205 }
6206 %}
6208 ins_pipe( ialu_regI_regI );
6209 %}
6211 /*
6212 instruct loadConP_load(mRegP dst, immP_load src) %{
6213 match(Set dst src);
6215 ins_cost(100);
6216 format %{ "ld $dst, [$constanttablebase + $constantoffset] load from constant table: ptr=$src @ loadConP_load" %}
6218 ins_encode %{
6220 int con_offset = $constantoffset($src);
6222 if (Assembler::is_simm16(con_offset)) {
6223 __ ld($dst$$Register, $constanttablebase, con_offset);
6224 } else {
6225 __ set64(AT, con_offset);
6226 if (UseLoongsonISA) {
6227 __ gsldx($dst$$Register, $constanttablebase, AT, 0);
6228 } else {
6229 __ daddu(AT, $constanttablebase, AT);
6230 __ ld($dst$$Register, AT, 0);
6231 }
6232 }
6233 %}
6235 ins_pipe(ialu_loadI);
6236 %}
6237 */
6239 instruct loadConP_no_oop_cheap(mRegP dst, immP_no_oop_cheap src) %{
6240 match(Set dst src);
6242 ins_cost(80);
6243 format %{ "li $dst, $src @ loadConP_no_oop_cheap" %}
6245 ins_encode %{
6246 __ set64($dst$$Register, $src$$constant);
6247 %}
6249 ins_pipe(ialu_regI_regI);
6250 %}
6253 instruct loadConP_poll(mRegP dst, immP_poll src) %{
6254 match(Set dst src);
6256 ins_cost(50);
6257 format %{ "li $dst, $src #@loadConP_poll" %}
6259 ins_encode %{
6260 Register dst = $dst$$Register;
6261 intptr_t value = (intptr_t)$src$$constant;
6263 __ set64(dst, (jlong)value);
6264 %}
6266 ins_pipe( ialu_regI_regI );
6267 %}
6269 instruct loadConP0(mRegP dst, immP0 src)
6270 %{
6271 match(Set dst src);
6273 ins_cost(50);
6274 format %{ "mov $dst, R0\t# ptr" %}
6275 ins_encode %{
6276 Register dst_reg = $dst$$Register;
6277 __ daddu(dst_reg, R0, R0);
6278 %}
6279 ins_pipe( ialu_regI_regI );
6280 %}
6282 instruct loadConN0(mRegN dst, immN0 src) %{
6283 match(Set dst src);
6284 format %{ "move $dst, R0\t# compressed NULL ptr" %}
6285 ins_encode %{
6286 __ move($dst$$Register, R0);
6287 %}
6288 ins_pipe( ialu_regI_regI );
6289 %}
6291 instruct loadConN(mRegN dst, immN src) %{
6292 match(Set dst src);
6294 ins_cost(125);
6295 format %{ "li $dst, $src\t# compressed ptr @ loadConN" %}
6296 ins_encode %{
6297 Register dst = $dst$$Register;
6298 __ set_narrow_oop(dst, (jobject)$src$$constant);
6299 %}
6300 ins_pipe( ialu_regI_regI ); // XXX
6301 %}
6303 instruct loadConNKlass(mRegN dst, immNKlass src) %{
6304 match(Set dst src);
6306 ins_cost(125);
6307 format %{ "li $dst, $src\t# compressed klass ptr @ loadConNKlass" %}
6308 ins_encode %{
6309 Register dst = $dst$$Register;
6310 __ set_narrow_klass(dst, (Klass*)$src$$constant);
6311 %}
6312 ins_pipe( ialu_regI_regI ); // XXX
6313 %}
6315 //FIXME
6316 // Tail Call; Jump from runtime stub to Java code.
6317 // Also known as an 'interprocedural jump'.
6318 // Target of jump will eventually return to caller.
6319 // TailJump below removes the return address.
6320 instruct TailCalljmpInd(mRegP jump_target, mRegP method_oop) %{
6321 match(TailCall jump_target method_oop );
6322 ins_cost(300);
6323 format %{ "JMP $jump_target \t# @TailCalljmpInd" %}
6325 ins_encode %{
6326 Register target = $jump_target$$Register;
6327 Register oop = $method_oop$$Register;
6329 /* 2012/10/12 Jin: RA will be used in generate_forward_exception() */
6330 __ push(RA);
6332 __ move(S3, oop);
6333 __ jr(target);
6334 __ nop();
6335 %}
6337 ins_pipe( pipe_jump );
6338 %}
6340 // Create exception oop: created by stack-crawling runtime code.
6341 // Created exception is now available to this handler, and is setup
6342 // just prior to jumping to this handler. No code emitted.
6343 instruct CreateException( a0_RegP ex_oop )
6344 %{
6345 match(Set ex_oop (CreateEx));
6347 // use the following format syntax
6348 format %{ "# exception oop is in A0; no code emitted @CreateException" %}
6349 ins_encode %{
6350 /* Jin: X86 leaves this function empty */
6351 __ block_comment("CreateException is empty in X86/MIPS");
6352 %}
6353 ins_pipe( empty );
6354 // ins_pipe( pipe_jump );
6355 %}
6358 /* 2012/9/14 Jin: The mechanism of exception handling is clear now.
6360 - Common try/catch:
6361 2012/9/14 Jin: [stubGenerator_mips.cpp] generate_forward_exception()
6362 |- V0, V1 are created
6363 |- T9 <= SharedRuntime::exception_handler_for_return_address
6364 `- jr T9
6365 `- the caller's exception_handler
6366 `- jr OptoRuntime::exception_blob
6367 `- here
6368 - Rethrow(e.g. 'unwind'):
6369 * The callee:
6370 |- an exception is triggered during execution
6371 `- exits the callee method through RethrowException node
6372 |- The callee pushes exception_oop(T0) and exception_pc(RA)
6373 `- The callee jumps to OptoRuntime::rethrow_stub()
6374 * In OptoRuntime::rethrow_stub:
6375 |- The VM calls _rethrow_Java to determine the return address in the caller method
6376 `- exits the stub with tailjmpInd
6377 |- pops exception_oop(V0) and exception_pc(V1)
6378 `- jumps to the return address(usually an exception_handler)
6379 * The caller:
6380 `- continues processing the exception_blob with V0/V1
6381 */
6383 /*
6384 Disassembling OptoRuntime::rethrow_stub()
6386 ; locals
6387 0x2d3bf320: addiu sp, sp, 0xfffffff8
6388 0x2d3bf324: sw ra, 0x4(sp)
6389 0x2d3bf328: sw fp, 0x0(sp)
6390 0x2d3bf32c: addu fp, sp, zero
6391 0x2d3bf330: addiu sp, sp, 0xfffffff0
6392 0x2d3bf334: sw ra, 0x8(sp)
6393 0x2d3bf338: sw t0, 0x4(sp)
6394 0x2d3bf33c: sw sp, 0x0(sp)
6396 ; get_thread(S2)
6397 0x2d3bf340: addu s2, sp, zero
6398 0x2d3bf344: srl s2, s2, 12
6399 0x2d3bf348: sll s2, s2, 2
6400 0x2d3bf34c: lui at, 0x2c85
6401 0x2d3bf350: addu at, at, s2
6402 0x2d3bf354: lw s2, 0xffffcc80(at)
6404 0x2d3bf358: lw s0, 0x0(sp)
6405 0x2d3bf35c: sw s0, 0x118(s2) // last_sp -> threa
6406 0x2d3bf360: sw s2, 0xc(sp)
6408 ; OptoRuntime::rethrow_C(oopDesc* exception, JavaThread* thread, address ret_pc)
6409 0x2d3bf364: lw a0, 0x4(sp)
6410 0x2d3bf368: lw a1, 0xc(sp)
6411 0x2d3bf36c: lw a2, 0x8(sp)
6412 ;; Java_To_Runtime
6413 0x2d3bf370: lui t9, 0x2c34
6414 0x2d3bf374: addiu t9, t9, 0xffff8a48
6415 0x2d3bf378: jalr t9
6416 0x2d3bf37c: nop
6418 0x2d3bf380: addu s3, v0, zero ; S3: SharedRuntime::raw_exception_handler_for_return_address()
6420 0x2d3bf384: lw s0, 0xc(sp)
6421 0x2d3bf388: sw zero, 0x118(s0)
6422 0x2d3bf38c: sw zero, 0x11c(s0)
6423 0x2d3bf390: lw s1, 0x144(s0) ; ex_oop: S1
6424 0x2d3bf394: addu s2, s0, zero
6425 0x2d3bf398: sw zero, 0x144(s2)
6426 0x2d3bf39c: lw s0, 0x4(s2)
6427 0x2d3bf3a0: addiu s4, zero, 0x0
6428 0x2d3bf3a4: bne s0, s4, 0x2d3bf3d4
6429 0x2d3bf3a8: nop
6430 0x2d3bf3ac: addiu sp, sp, 0x10
6431 0x2d3bf3b0: addiu sp, sp, 0x8
6432 0x2d3bf3b4: lw ra, 0xfffffffc(sp)
6433 0x2d3bf3b8: lw fp, 0xfffffff8(sp)
6434 0x2d3bf3bc: lui at, 0x2b48
6435 0x2d3bf3c0: lw at, 0x100(at)
6437 ; tailjmpInd: Restores exception_oop & exception_pc
6438 0x2d3bf3c4: addu v1, ra, zero
6439 0x2d3bf3c8: addu v0, s1, zero
6440 0x2d3bf3cc: jr s3
6441 0x2d3bf3d0: nop
6442 ; Exception:
6443 0x2d3bf3d4: lui s1, 0x2cc8 ; generate_forward_exception()
6444 0x2d3bf3d8: addiu s1, s1, 0x40
6445 0x2d3bf3dc: addiu s2, zero, 0x0
6446 0x2d3bf3e0: addiu sp, sp, 0x10
6447 0x2d3bf3e4: addiu sp, sp, 0x8
6448 0x2d3bf3e8: lw ra, 0xfffffffc(sp)
6449 0x2d3bf3ec: lw fp, 0xfffffff8(sp)
6450 0x2d3bf3f0: lui at, 0x2b48
6451 0x2d3bf3f4: lw at, 0x100(at)
6452 ; TailCalljmpInd
6453 __ push(RA); ; to be used in generate_forward_exception()
6454 0x2d3bf3f8: addu t7, s2, zero
6455 0x2d3bf3fc: jr s1
6456 0x2d3bf400: nop
6457 */
6458 // Rethrow exception:
6459 // The exception oop will come in the first argument position.
6460 // Then JUMP (not call) to the rethrow stub code.
6461 instruct RethrowException()
6462 %{
6463 match(Rethrow);
6465 // use the following format syntax
6466 format %{ "JMP rethrow_stub #@RethrowException" %}
6467 ins_encode %{
6468 __ block_comment("@ RethrowException");
6470 cbuf.set_insts_mark();
6471 cbuf.relocate(cbuf.insts_mark(), runtime_call_Relocation::spec());
6473 // call OptoRuntime::rethrow_stub to get the exception handler in parent method
6474 __ li(T9, OptoRuntime::rethrow_stub());
6475 __ jr(T9);
6476 __ nop();
6477 %}
6478 ins_pipe( pipe_jump );
6479 %}
6481 instruct branchConP_zero(cmpOpU cmp, mRegP op1, immP0 zero, label labl) %{
6482 match(If cmp (CmpP op1 zero));
6483 effect(USE labl);
6485 ins_cost(180);
6486 format %{ "b$cmp $op1, R0, $labl #@branchConP_zero" %}
6488 ins_encode %{
6489 Register op1 = $op1$$Register;
6490 Register op2 = R0;
6491 Label &L = *($labl$$label);
6492 int flag = $cmp$$cmpcode;
6494 switch(flag)
6495 {
6496 case 0x01: //equal
6497 if (&L)
6498 __ beq(op1, op2, L);
6499 else
6500 __ beq(op1, op2, (int)0);
6501 break;
6502 case 0x02: //not_equal
6503 if (&L)
6504 __ bne(op1, op2, L);
6505 else
6506 __ bne(op1, op2, (int)0);
6507 break;
6508 /*
6509 case 0x03: //above
6510 __ sltu(AT, op2, op1);
6511 if(&L)
6512 __ bne(R0, AT, L);
6513 else
6514 __ bne(R0, AT, (int)0);
6515 break;
6516 case 0x04: //above_equal
6517 __ sltu(AT, op1, op2);
6518 if(&L)
6519 __ beq(AT, R0, L);
6520 else
6521 __ beq(AT, R0, (int)0);
6522 break;
6523 case 0x05: //below
6524 __ sltu(AT, op1, op2);
6525 if(&L)
6526 __ bne(R0, AT, L);
6527 else
6528 __ bne(R0, AT, (int)0);
6529 break;
6530 case 0x06: //below_equal
6531 __ sltu(AT, op2, op1);
6532 if(&L)
6533 __ beq(AT, R0, L);
6534 else
6535 __ beq(AT, R0, (int)0);
6536 break;
6537 */
6538 default:
6539 Unimplemented();
6540 }
6541 __ nop();
6542 %}
6544 ins_pc_relative(1);
6545 ins_pipe( pipe_alu_branch );
6546 %}
6549 instruct branchConP(cmpOpU cmp, mRegP op1, mRegP op2, label labl) %{
6550 match(If cmp (CmpP op1 op2));
6551 // predicate(can_branch_register(_kids[0]->_leaf, _kids[1]->_leaf));
6552 effect(USE labl);
6554 ins_cost(200);
6555 format %{ "b$cmp $op1, $op2, $labl #@branchConP" %}
6557 ins_encode %{
6558 Register op1 = $op1$$Register;
6559 Register op2 = $op2$$Register;
6560 Label &L = *($labl$$label);
6561 int flag = $cmp$$cmpcode;
6563 switch(flag)
6564 {
6565 case 0x01: //equal
6566 if (&L)
6567 __ beq(op1, op2, L);
6568 else
6569 __ beq(op1, op2, (int)0);
6570 break;
6571 case 0x02: //not_equal
6572 if (&L)
6573 __ bne(op1, op2, L);
6574 else
6575 __ bne(op1, op2, (int)0);
6576 break;
6577 case 0x03: //above
6578 __ sltu(AT, op2, op1);
6579 if(&L)
6580 __ bne(R0, AT, L);
6581 else
6582 __ bne(R0, AT, (int)0);
6583 break;
6584 case 0x04: //above_equal
6585 __ sltu(AT, op1, op2);
6586 if(&L)
6587 __ beq(AT, R0, L);
6588 else
6589 __ beq(AT, R0, (int)0);
6590 break;
6591 case 0x05: //below
6592 __ sltu(AT, op1, op2);
6593 if(&L)
6594 __ bne(R0, AT, L);
6595 else
6596 __ bne(R0, AT, (int)0);
6597 break;
6598 case 0x06: //below_equal
6599 __ sltu(AT, op2, op1);
6600 if(&L)
6601 __ beq(AT, R0, L);
6602 else
6603 __ beq(AT, R0, (int)0);
6604 break;
6605 default:
6606 Unimplemented();
6607 }
6608 __ nop();
6609 %}
6611 ins_pc_relative(1);
6612 ins_pipe( pipe_alu_branch );
6613 %}
6615 instruct cmpN_null_branch(cmpOp cmp, mRegN op1, immN0 null, label labl) %{
6616 match(If cmp (CmpN op1 null));
6617 effect(USE labl);
6619 ins_cost(180);
6620 format %{ "CMP $op1,0\t! compressed ptr\n\t"
6621 "BP$cmp $labl @ cmpN_null_branch" %}
6622 ins_encode %{
6623 Register op1 = $op1$$Register;
6624 Register op2 = R0;
6625 Label &L = *($labl$$label);
6626 int flag = $cmp$$cmpcode;
6628 switch(flag)
6629 {
6630 case 0x01: //equal
6631 if (&L)
6632 __ beq(op1, op2, L);
6633 else
6634 __ beq(op1, op2, (int)0);
6635 break;
6636 case 0x02: //not_equal
6637 if (&L)
6638 __ bne(op1, op2, L);
6639 else
6640 __ bne(op1, op2, (int)0);
6641 break;
6642 default:
6643 Unimplemented();
6644 }
6645 __ nop();
6646 %}
6647 //TODO: pipe_branchP or create pipe_branchN LEE
6648 ins_pc_relative(1);
6649 ins_pipe( pipe_alu_branch );
6650 %}
6652 instruct cmpN_reg_branch(cmpOp cmp, mRegN op1, mRegN op2, label labl) %{
6653 match(If cmp (CmpN op1 op2));
6654 effect(USE labl);
6656 ins_cost(180);
6657 format %{ "CMP $op1,$op2\t! compressed ptr\n\t"
6658 "BP$cmp $labl" %}
6659 ins_encode %{
6660 Register op1_reg = $op1$$Register;
6661 Register op2_reg = $op2$$Register;
6662 Label &L = *($labl$$label);
6663 int flag = $cmp$$cmpcode;
6665 switch(flag)
6666 {
6667 case 0x01: //equal
6668 if (&L)
6669 __ beq(op1_reg, op2_reg, L);
6670 else
6671 __ beq(op1_reg, op2_reg, (int)0);
6672 break;
6673 case 0x02: //not_equal
6674 if (&L)
6675 __ bne(op1_reg, op2_reg, L);
6676 else
6677 __ bne(op1_reg, op2_reg, (int)0);
6678 break;
6679 case 0x03: //above
6680 __ sltu(AT, op2_reg, op1_reg);
6681 if(&L)
6682 __ bne(R0, AT, L);
6683 else
6684 __ bne(R0, AT, (int)0);
6685 break;
6686 case 0x04: //above_equal
6687 __ sltu(AT, op1_reg, op2_reg);
6688 if(&L)
6689 __ beq(AT, R0, L);
6690 else
6691 __ beq(AT, R0, (int)0);
6692 break;
6693 case 0x05: //below
6694 __ sltu(AT, op1_reg, op2_reg);
6695 if(&L)
6696 __ bne(R0, AT, L);
6697 else
6698 __ bne(R0, AT, (int)0);
6699 break;
6700 case 0x06: //below_equal
6701 __ sltu(AT, op2_reg, op1_reg);
6702 if(&L)
6703 __ beq(AT, R0, L);
6704 else
6705 __ beq(AT, R0, (int)0);
6706 break;
6707 default:
6708 Unimplemented();
6709 }
6710 __ nop();
6711 %}
6712 ins_pc_relative(1);
6713 ins_pipe( pipe_alu_branch );
6714 %}
6716 instruct branchConIU_reg_reg(cmpOpU cmp, mRegI src1, mRegI src2, label labl) %{
6717 match( If cmp (CmpU src1 src2) );
6718 effect(USE labl);
6719 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_reg" %}
6721 ins_encode %{
6722 Register op1 = $src1$$Register;
6723 Register op2 = $src2$$Register;
6724 Label &L = *($labl$$label);
6725 int flag = $cmp$$cmpcode;
6727 switch(flag)
6728 {
6729 case 0x01: //equal
6730 if (&L)
6731 __ beq(op1, op2, L);
6732 else
6733 __ beq(op1, op2, (int)0);
6734 break;
6735 case 0x02: //not_equal
6736 if (&L)
6737 __ bne(op1, op2, L);
6738 else
6739 __ bne(op1, op2, (int)0);
6740 break;
6741 case 0x03: //above
6742 __ sltu(AT, op2, op1);
6743 if(&L)
6744 __ bne(AT, R0, L);
6745 else
6746 __ bne(AT, R0, (int)0);
6747 break;
6748 case 0x04: //above_equal
6749 __ sltu(AT, op1, op2);
6750 if(&L)
6751 __ beq(AT, R0, L);
6752 else
6753 __ beq(AT, R0, (int)0);
6754 break;
6755 case 0x05: //below
6756 __ sltu(AT, op1, op2);
6757 if(&L)
6758 __ bne(AT, R0, L);
6759 else
6760 __ bne(AT, R0, (int)0);
6761 break;
6762 case 0x06: //below_equal
6763 __ sltu(AT, op2, op1);
6764 if(&L)
6765 __ beq(AT, R0, L);
6766 else
6767 __ beq(AT, R0, (int)0);
6768 break;
6769 default:
6770 Unimplemented();
6771 }
6772 __ nop();
6773 %}
6775 ins_pc_relative(1);
6776 ins_pipe( pipe_alu_branch );
6777 %}
6780 instruct branchConIU_reg_imm(cmpOpU cmp, mRegI src1, immI src2, label labl) %{
6781 match( If cmp (CmpU src1 src2) );
6782 effect(USE labl);
6783 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_imm" %}
6785 ins_encode %{
6786 Register op1 = $src1$$Register;
6787 int val = $src2$$constant;
6788 Label &L = *($labl$$label);
6789 int flag = $cmp$$cmpcode;
6791 __ move(AT, val);
6792 switch(flag)
6793 {
6794 case 0x01: //equal
6795 if (&L)
6796 __ beq(op1, AT, L);
6797 else
6798 __ beq(op1, AT, (int)0);
6799 break;
6800 case 0x02: //not_equal
6801 if (&L)
6802 __ bne(op1, AT, L);
6803 else
6804 __ bne(op1, AT, (int)0);
6805 break;
6806 case 0x03: //above
6807 __ sltu(AT, AT, op1);
6808 if(&L)
6809 __ bne(R0, AT, L);
6810 else
6811 __ bne(R0, AT, (int)0);
6812 break;
6813 case 0x04: //above_equal
6814 __ sltu(AT, op1, AT);
6815 if(&L)
6816 __ beq(AT, R0, L);
6817 else
6818 __ beq(AT, R0, (int)0);
6819 break;
6820 case 0x05: //below
6821 __ sltu(AT, op1, AT);
6822 if(&L)
6823 __ bne(R0, AT, L);
6824 else
6825 __ bne(R0, AT, (int)0);
6826 break;
6827 case 0x06: //below_equal
6828 __ sltu(AT, AT, op1);
6829 if(&L)
6830 __ beq(AT, R0, L);
6831 else
6832 __ beq(AT, R0, (int)0);
6833 break;
6834 default:
6835 Unimplemented();
6836 }
6837 __ nop();
6838 %}
6840 ins_pc_relative(1);
6841 ins_pipe( pipe_alu_branch );
6842 %}
6844 instruct branchConI_reg_reg(cmpOp cmp, mRegI src1, mRegI src2, label labl) %{
6845 match( If cmp (CmpI src1 src2) );
6846 effect(USE labl);
6847 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_reg" %}
6849 ins_encode %{
6850 Register op1 = $src1$$Register;
6851 Register op2 = $src2$$Register;
6852 Label &L = *($labl$$label);
6853 int flag = $cmp$$cmpcode;
6855 switch(flag)
6856 {
6857 case 0x01: //equal
6858 if (&L)
6859 __ beq(op1, op2, L);
6860 else
6861 __ beq(op1, op2, (int)0);
6862 break;
6863 case 0x02: //not_equal
6864 if (&L)
6865 __ bne(op1, op2, L);
6866 else
6867 __ bne(op1, op2, (int)0);
6868 break;
6869 case 0x03: //above
6870 __ slt(AT, op2, op1);
6871 if(&L)
6872 __ bne(R0, AT, L);
6873 else
6874 __ bne(R0, AT, (int)0);
6875 break;
6876 case 0x04: //above_equal
6877 __ slt(AT, op1, op2);
6878 if(&L)
6879 __ beq(AT, R0, L);
6880 else
6881 __ beq(AT, R0, (int)0);
6882 break;
6883 case 0x05: //below
6884 __ slt(AT, op1, op2);
6885 if(&L)
6886 __ bne(R0, AT, L);
6887 else
6888 __ bne(R0, AT, (int)0);
6889 break;
6890 case 0x06: //below_equal
6891 __ slt(AT, op2, op1);
6892 if(&L)
6893 __ beq(AT, R0, L);
6894 else
6895 __ beq(AT, R0, (int)0);
6896 break;
6897 default:
6898 Unimplemented();
6899 }
6900 __ nop();
6901 %}
6903 ins_pc_relative(1);
6904 ins_pipe( pipe_alu_branch );
6905 %}
6907 instruct branchConI_reg_imm0(cmpOp cmp, mRegI src1, immI0 src2, label labl) %{
6908 match( If cmp (CmpI src1 src2) );
6909 effect(USE labl);
6910 ins_cost(170);
6911 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm0" %}
6913 ins_encode %{
6914 Register op1 = $src1$$Register;
6915 // int val = $src2$$constant;
6916 Label &L = *($labl$$label);
6917 int flag = $cmp$$cmpcode;
6919 //__ move(AT, val);
6920 switch(flag)
6921 {
6922 case 0x01: //equal
6923 if (&L)
6924 __ beq(op1, R0, L);
6925 else
6926 __ beq(op1, R0, (int)0);
6927 break;
6928 case 0x02: //not_equal
6929 if (&L)
6930 __ bne(op1, R0, L);
6931 else
6932 __ bne(op1, R0, (int)0);
6933 break;
6934 case 0x03: //greater
6935 if(&L)
6936 __ bgtz(op1, L);
6937 else
6938 __ bgtz(op1, (int)0);
6939 break;
6940 case 0x04: //greater_equal
6941 if(&L)
6942 __ bgez(op1, L);
6943 else
6944 __ bgez(op1, (int)0);
6945 break;
6946 case 0x05: //less
6947 if(&L)
6948 __ bltz(op1, L);
6949 else
6950 __ bltz(op1, (int)0);
6951 break;
6952 case 0x06: //less_equal
6953 if(&L)
6954 __ blez(op1, L);
6955 else
6956 __ blez(op1, (int)0);
6957 break;
6958 default:
6959 Unimplemented();
6960 }
6961 __ nop();
6962 %}
6964 ins_pc_relative(1);
6965 ins_pipe( pipe_alu_branch );
6966 %}
6969 instruct branchConI_reg_imm(cmpOp cmp, mRegI src1, immI src2, label labl) %{
6970 match( If cmp (CmpI src1 src2) );
6971 effect(USE labl);
6972 ins_cost(200);
6973 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm" %}
6975 ins_encode %{
6976 Register op1 = $src1$$Register;
6977 int val = $src2$$constant;
6978 Label &L = *($labl$$label);
6979 int flag = $cmp$$cmpcode;
6981 __ move(AT, val);
6982 switch(flag)
6983 {
6984 case 0x01: //equal
6985 if (&L)
6986 __ beq(op1, AT, L);
6987 else
6988 __ beq(op1, AT, (int)0);
6989 break;
6990 case 0x02: //not_equal
6991 if (&L)
6992 __ bne(op1, AT, L);
6993 else
6994 __ bne(op1, AT, (int)0);
6995 break;
6996 case 0x03: //greater
6997 __ slt(AT, AT, op1);
6998 if(&L)
6999 __ bne(R0, AT, L);
7000 else
7001 __ bne(R0, AT, (int)0);
7002 break;
7003 case 0x04: //greater_equal
7004 __ slt(AT, op1, AT);
7005 if(&L)
7006 __ beq(AT, R0, L);
7007 else
7008 __ beq(AT, R0, (int)0);
7009 break;
7010 case 0x05: //less
7011 __ slt(AT, op1, AT);
7012 if(&L)
7013 __ bne(R0, AT, L);
7014 else
7015 __ bne(R0, AT, (int)0);
7016 break;
7017 case 0x06: //less_equal
7018 __ slt(AT, AT, op1);
7019 if(&L)
7020 __ beq(AT, R0, L);
7021 else
7022 __ beq(AT, R0, (int)0);
7023 break;
7024 default:
7025 Unimplemented();
7026 }
7027 __ nop();
7028 %}
7030 ins_pc_relative(1);
7031 ins_pipe( pipe_alu_branch );
7032 %}
7034 instruct branchConIU_reg_imm0(cmpOpU cmp, mRegI src1, immI0 zero, label labl) %{
7035 match( If cmp (CmpU src1 zero) );
7036 effect(USE labl);
7037 format %{ "BR$cmp $src1, zero, $labl #@branchConIU_reg_imm0" %}
7039 ins_encode %{
7040 Register op1 = $src1$$Register;
7041 Label &L = *($labl$$label);
7042 int flag = $cmp$$cmpcode;
7044 switch(flag)
7045 {
7046 case 0x01: //equal
7047 if (&L)
7048 __ beq(op1, R0, L);
7049 else
7050 __ beq(op1, R0, (int)0);
7051 break;
7052 case 0x02: //not_equal
7053 if (&L)
7054 __ bne(op1, R0, L);
7055 else
7056 __ bne(op1, R0, (int)0);
7057 break;
7058 case 0x03: //above
7059 if(&L)
7060 __ bne(R0, op1, L);
7061 else
7062 __ bne(R0, op1, (int)0);
7063 break;
7064 case 0x04: //above_equal
7065 if(&L)
7066 __ beq(R0, R0, L);
7067 else
7068 __ beq(R0, R0, (int)0);
7069 break;
7070 case 0x05: //below
7071 return;
7072 break;
7073 case 0x06: //below_equal
7074 if(&L)
7075 __ beq(op1, R0, L);
7076 else
7077 __ beq(op1, R0, (int)0);
7078 break;
7079 default:
7080 Unimplemented();
7081 }
7082 __ nop();
7083 %}
7085 ins_pc_relative(1);
7086 ins_pipe( pipe_alu_branch );
7087 %}
7090 instruct branchConIU_reg_immI16(cmpOpU cmp, mRegI src1, immI16 src2, label labl) %{
7091 match( If cmp (CmpU src1 src2) );
7092 effect(USE labl);
7093 ins_cost(180);
7094 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_immI16" %}
7096 ins_encode %{
7097 Register op1 = $src1$$Register;
7098 int val = $src2$$constant;
7099 Label &L = *($labl$$label);
7100 int flag = $cmp$$cmpcode;
7102 switch(flag)
7103 {
7104 case 0x01: //equal
7105 __ move(AT, val);
7106 if (&L)
7107 __ beq(op1, AT, L);
7108 else
7109 __ beq(op1, AT, (int)0);
7110 break;
7111 case 0x02: //not_equal
7112 __ move(AT, val);
7113 if (&L)
7114 __ bne(op1, AT, L);
7115 else
7116 __ bne(op1, AT, (int)0);
7117 break;
7118 case 0x03: //above
7119 __ move(AT, val);
7120 __ sltu(AT, AT, op1);
7121 if(&L)
7122 __ bne(R0, AT, L);
7123 else
7124 __ bne(R0, AT, (int)0);
7125 break;
7126 case 0x04: //above_equal
7127 __ sltiu(AT, op1, val);
7128 if(&L)
7129 __ beq(AT, R0, L);
7130 else
7131 __ beq(AT, R0, (int)0);
7132 break;
7133 case 0x05: //below
7134 __ sltiu(AT, op1, val);
7135 if(&L)
7136 __ bne(R0, AT, L);
7137 else
7138 __ bne(R0, AT, (int)0);
7139 break;
7140 case 0x06: //below_equal
7141 __ move(AT, val);
7142 __ sltu(AT, AT, op1);
7143 if(&L)
7144 __ beq(AT, R0, L);
7145 else
7146 __ beq(AT, R0, (int)0);
7147 break;
7148 default:
7149 Unimplemented();
7150 }
7151 __ nop();
7152 %}
7154 ins_pc_relative(1);
7155 ins_pipe( pipe_alu_branch );
7156 %}
7159 instruct branchConL_regL_regL(cmpOp cmp, mRegL src1, mRegL src2, label labl) %{
7160 match( If cmp (CmpL src1 src2) );
7161 effect(USE labl);
7162 format %{ "BR$cmp $src1, $src2, $labl #@branchConL_regL_regL" %}
7163 ins_cost(250);
7165 ins_encode %{
7166 Register opr1_reg = as_Register($src1$$reg);
7167 Register opr2_reg = as_Register($src2$$reg);
7169 Label &target = *($labl$$label);
7170 int flag = $cmp$$cmpcode;
7172 switch(flag)
7173 {
7174 case 0x01: //equal
7175 if (&target)
7176 __ beq(opr1_reg, opr2_reg, target);
7177 else
7178 __ beq(opr1_reg, opr2_reg, (int)0);
7179 __ delayed()->nop();
7180 break;
7182 case 0x02: //not_equal
7183 if(&target)
7184 __ bne(opr1_reg, opr2_reg, target);
7185 else
7186 __ bne(opr1_reg, opr2_reg, (int)0);
7187 __ delayed()->nop();
7188 break;
7190 case 0x03: //greater
7191 __ slt(AT, opr2_reg, opr1_reg);
7192 if(&target)
7193 __ bne(AT, R0, target);
7194 else
7195 __ bne(AT, R0, (int)0);
7196 __ delayed()->nop();
7197 break;
7199 case 0x04: //greater_equal
7200 __ slt(AT, opr1_reg, opr2_reg);
7201 if(&target)
7202 __ beq(AT, R0, target);
7203 else
7204 __ beq(AT, R0, (int)0);
7205 __ delayed()->nop();
7207 break;
7209 case 0x05: //less
7210 __ slt(AT, opr1_reg, opr2_reg);
7211 if(&target)
7212 __ bne(AT, R0, target);
7213 else
7214 __ bne(AT, R0, (int)0);
7215 __ delayed()->nop();
7217 break;
7219 case 0x06: //less_equal
7220 __ slt(AT, opr2_reg, opr1_reg);
7222 if(&target)
7223 __ beq(AT, R0, target);
7224 else
7225 __ beq(AT, R0, (int)0);
7226 __ delayed()->nop();
7228 break;
7230 default:
7231 Unimplemented();
7232 }
7233 %}
7236 ins_pc_relative(1);
7237 ins_pipe( pipe_alu_branch );
7238 %}
7240 instruct branchConL_reg_immL16_sub(cmpOp cmp, mRegL src1, immL16_sub src2, label labl) %{
7241 match( If cmp (CmpL src1 src2) );
7242 effect(USE labl);
7243 ins_cost(180);
7244 format %{ "BR$cmp $src1, $src2, $labl #@branchConL_reg_immL16_sub" %}
7246 ins_encode %{
7247 Register op1 = $src1$$Register;
7248 int val = $src2$$constant;
7249 Label &L = *($labl$$label);
7250 int flag = $cmp$$cmpcode;
7252 __ daddiu(AT, op1, -1 * val);
7253 switch(flag)
7254 {
7255 case 0x01: //equal
7256 if (&L)
7257 __ beq(R0, AT, L);
7258 else
7259 __ beq(R0, AT, (int)0);
7260 break;
7261 case 0x02: //not_equal
7262 if (&L)
7263 __ bne(R0, AT, L);
7264 else
7265 __ bne(R0, AT, (int)0);
7266 break;
7267 case 0x03: //greater
7268 if(&L)
7269 __ bgtz(AT, L);
7270 else
7271 __ bgtz(AT, (int)0);
7272 break;
7273 case 0x04: //greater_equal
7274 if(&L)
7275 __ bgez(AT, L);
7276 else
7277 __ bgez(AT, (int)0);
7278 break;
7279 case 0x05: //less
7280 if(&L)
7281 __ bltz(AT, L);
7282 else
7283 __ bltz(AT, (int)0);
7284 break;
7285 case 0x06: //less_equal
7286 if(&L)
7287 __ blez(AT, L);
7288 else
7289 __ blez(AT, (int)0);
7290 break;
7291 default:
7292 Unimplemented();
7293 }
7294 __ nop();
7295 %}
7297 ins_pc_relative(1);
7298 ins_pipe( pipe_alu_branch );
7299 %}
7302 instruct branchConI_reg_imm16_sub(cmpOp cmp, mRegI src1, immI16_sub src2, label labl) %{
7303 match( If cmp (CmpI src1 src2) );
7304 effect(USE labl);
7305 ins_cost(180);
7306 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm16_sub" %}
7308 ins_encode %{
7309 Register op1 = $src1$$Register;
7310 int val = $src2$$constant;
7311 Label &L = *($labl$$label);
7312 int flag = $cmp$$cmpcode;
7314 __ addiu32(AT, op1, -1 * val);
7315 switch(flag)
7316 {
7317 case 0x01: //equal
7318 if (&L)
7319 __ beq(R0, AT, L);
7320 else
7321 __ beq(R0, AT, (int)0);
7322 break;
7323 case 0x02: //not_equal
7324 if (&L)
7325 __ bne(R0, AT, L);
7326 else
7327 __ bne(R0, AT, (int)0);
7328 break;
7329 case 0x03: //greater
7330 if(&L)
7331 __ bgtz(AT, L);
7332 else
7333 __ bgtz(AT, (int)0);
7334 break;
7335 case 0x04: //greater_equal
7336 if(&L)
7337 __ bgez(AT, L);
7338 else
7339 __ bgez(AT, (int)0);
7340 break;
7341 case 0x05: //less
7342 if(&L)
7343 __ bltz(AT, L);
7344 else
7345 __ bltz(AT, (int)0);
7346 break;
7347 case 0x06: //less_equal
7348 if(&L)
7349 __ blez(AT, L);
7350 else
7351 __ blez(AT, (int)0);
7352 break;
7353 default:
7354 Unimplemented();
7355 }
7356 __ nop();
7357 %}
7359 ins_pc_relative(1);
7360 ins_pipe( pipe_alu_branch );
7361 %}
7363 instruct branchConL_regL_immL0(cmpOp cmp, mRegL src1, immL0 zero, label labl) %{
7364 match( If cmp (CmpL src1 zero) );
7365 effect(USE labl);
7366 format %{ "BR$cmp $src1, zero, $labl #@branchConL_regL_immL0" %}
7367 ins_cost(150);
7369 ins_encode %{
7370 Register opr1_reg = as_Register($src1$$reg);
7371 Label &target = *($labl$$label);
7372 int flag = $cmp$$cmpcode;
7374 switch(flag)
7375 {
7376 case 0x01: //equal
7377 if (&target)
7378 __ beq(opr1_reg, R0, target);
7379 else
7380 __ beq(opr1_reg, R0, int(0));
7381 break;
7383 case 0x02: //not_equal
7384 if(&target)
7385 __ bne(opr1_reg, R0, target);
7386 else
7387 __ bne(opr1_reg, R0, (int)0);
7388 break;
7390 case 0x03: //greater
7391 if(&target)
7392 __ bgtz(opr1_reg, target);
7393 else
7394 __ bgtz(opr1_reg, (int)0);
7395 break;
7397 case 0x04: //greater_equal
7398 if(&target)
7399 __ bgez(opr1_reg, target);
7400 else
7401 __ bgez(opr1_reg, (int)0);
7402 break;
7404 case 0x05: //less
7405 __ slt(AT, opr1_reg, R0);
7406 if(&target)
7407 __ bne(AT, R0, target);
7408 else
7409 __ bne(AT, R0, (int)0);
7410 break;
7412 case 0x06: //less_equal
7413 if (&target)
7414 __ blez(opr1_reg, target);
7415 else
7416 __ blez(opr1_reg, int(0));
7417 break;
7419 default:
7420 Unimplemented();
7421 }
7422 __ delayed()->nop();
7423 %}
7426 ins_pc_relative(1);
7427 ins_pipe( pipe_alu_branch );
7428 %}
7431 //FIXME
7432 instruct branchConF_reg_reg(cmpOp cmp, regF src1, regF src2, label labl) %{
7433 match( If cmp (CmpF src1 src2) );
7434 effect(USE labl);
7435 format %{ "BR$cmp $src1, $src2, $labl #@branchConF_reg_reg" %}
7437 ins_encode %{
7438 FloatRegister reg_op1 = $src1$$FloatRegister;
7439 FloatRegister reg_op2 = $src2$$FloatRegister;
7440 Label &L = *($labl$$label);
7441 int flag = $cmp$$cmpcode;
7443 switch(flag)
7444 {
7445 case 0x01: //equal
7446 __ c_eq_s(reg_op1, reg_op2);
7447 if (&L)
7448 __ bc1t(L);
7449 else
7450 __ bc1t((int)0);
7451 break;
7452 case 0x02: //not_equal
7453 __ c_eq_s(reg_op1, reg_op2);
7454 if (&L)
7455 __ bc1f(L);
7456 else
7457 __ bc1f((int)0);
7458 break;
7459 case 0x03: //greater
7460 __ c_ule_s(reg_op1, reg_op2);
7461 if(&L)
7462 __ bc1f(L);
7463 else
7464 __ bc1f((int)0);
7465 break;
7466 case 0x04: //greater_equal
7467 __ c_ult_s(reg_op1, reg_op2);
7468 if(&L)
7469 __ bc1f(L);
7470 else
7471 __ bc1f((int)0);
7472 break;
7473 case 0x05: //less
7474 __ c_ult_s(reg_op1, reg_op2);
7475 if(&L)
7476 __ bc1t(L);
7477 else
7478 __ bc1t((int)0);
7479 break;
7480 case 0x06: //less_equal
7481 __ c_ule_s(reg_op1, reg_op2);
7482 if(&L)
7483 __ bc1t(L);
7484 else
7485 __ bc1t((int)0);
7486 break;
7487 default:
7488 Unimplemented();
7489 }
7490 __ nop();
7491 %}
7493 ins_pc_relative(1);
7494 ins_pipe(pipe_slow);
7495 %}
7497 instruct branchConD_reg_reg(cmpOp cmp, regD src1, regD src2, label labl) %{
7498 match( If cmp (CmpD src1 src2) );
7499 effect(USE labl);
7500 format %{ "BR$cmp $src1, $src2, $labl #@branchConD_reg_reg" %}
7502 ins_encode %{
7503 FloatRegister reg_op1 = $src1$$FloatRegister;
7504 FloatRegister reg_op2 = $src2$$FloatRegister;
7505 Label &L = *($labl$$label);
7506 int flag = $cmp$$cmpcode;
7508 switch(flag)
7509 {
7510 case 0x01: //equal
7511 __ c_eq_d(reg_op1, reg_op2);
7512 if (&L)
7513 __ bc1t(L);
7514 else
7515 __ bc1t((int)0);
7516 break;
7517 case 0x02: //not_equal
7518 //2016/4/19 aoqi: c_ueq_d cannot distinguish NaN from equal. Double.isNaN(Double) is implemented by 'f != f', so the use of c_ueq_d causes bugs.
7519 __ c_eq_d(reg_op1, reg_op2);
7520 if (&L)
7521 __ bc1f(L);
7522 else
7523 __ bc1f((int)0);
7524 break;
7525 case 0x03: //greater
7526 __ c_ule_d(reg_op1, reg_op2);
7527 if(&L)
7528 __ bc1f(L);
7529 else
7530 __ bc1f((int)0);
7531 break;
7532 case 0x04: //greater_equal
7533 __ c_ult_d(reg_op1, reg_op2);
7534 if(&L)
7535 __ bc1f(L);
7536 else
7537 __ bc1f((int)0);
7538 break;
7539 case 0x05: //less
7540 __ c_ult_d(reg_op1, reg_op2);
7541 if(&L)
7542 __ bc1t(L);
7543 else
7544 __ bc1t((int)0);
7545 break;
7546 case 0x06: //less_equal
7547 __ c_ule_d(reg_op1, reg_op2);
7548 if(&L)
7549 __ bc1t(L);
7550 else
7551 __ bc1t((int)0);
7552 break;
7553 default:
7554 Unimplemented();
7555 }
7556 __ nop();
7557 %}
7559 ins_pc_relative(1);
7560 ins_pipe(pipe_slow);
7561 %}
7564 // Call Runtime Instruction
7565 instruct CallRuntimeDirect(method meth) %{
7566 match(CallRuntime );
7567 effect(USE meth);
7569 ins_cost(300);
7570 format %{ "CALL,runtime #@CallRuntimeDirect" %}
7571 ins_encode( Java_To_Runtime( meth ) );
7572 ins_pipe( pipe_slow );
7573 ins_alignment(16);
7574 %}
7578 //------------------------MemBar Instructions-------------------------------
7579 //Memory barrier flavors
7581 instruct membar_acquire() %{
7582 match(MemBarAcquire);
7583 ins_cost(0);
7585 size(0);
7586 format %{ "MEMBAR-acquire (empty) @ membar_acquire" %}
7587 ins_encode();
7588 ins_pipe(empty);
7589 %}
7591 instruct load_fence() %{
7592 match(LoadFence);
7593 ins_cost(400);
7595 format %{ "MEMBAR @ load_fence" %}
7596 ins_encode %{
7597 __ sync();
7598 %}
7599 ins_pipe(pipe_slow);
7600 %}
7602 instruct membar_acquire_lock()
7603 %{
7604 match(MemBarAcquireLock);
7605 ins_cost(0);
7607 size(0);
7608 format %{ "MEMBAR-acquire (acquire as part of CAS in prior FastLock so empty encoding) @ membar_acquire_lock" %}
7609 ins_encode();
7610 ins_pipe(empty);
7611 %}
7613 instruct membar_release() %{
7614 match(MemBarRelease);
7615 ins_cost(0);
7617 size(0);
7618 format %{ "MEMBAR-release (empty) @ membar_release" %}
7619 ins_encode();
7620 ins_pipe(empty);
7621 %}
7623 instruct store_fence() %{
7624 match(StoreFence);
7625 ins_cost(400);
7627 format %{ "MEMBAR @ store_fence" %}
7629 ins_encode %{
7630 __ sync();
7631 %}
7633 ins_pipe(pipe_slow);
7634 %}
7636 instruct membar_release_lock()
7637 %{
7638 match(MemBarReleaseLock);
7639 ins_cost(0);
7641 size(0);
7642 format %{ "MEMBAR-release-lock (release in FastUnlock so empty) @ membar_release_lock" %}
7643 ins_encode();
7644 ins_pipe(empty);
7645 %}
7648 instruct membar_volatile() %{
7649 match(MemBarVolatile);
7650 ins_cost(400);
7652 format %{ "MEMBAR-volatile" %}
7653 ins_encode %{
7654 if( !os::is_MP() ) return; // Not needed on single CPU
7655 __ sync();
7657 %}
7658 ins_pipe(pipe_slow);
7659 %}
7661 instruct unnecessary_membar_volatile() %{
7662 match(MemBarVolatile);
7663 predicate(Matcher::post_store_load_barrier(n));
7664 ins_cost(0);
7666 size(0);
7667 format %{ "MEMBAR-volatile (unnecessary so empty encoding) @ unnecessary_membar_volatile" %}
7668 ins_encode( );
7669 ins_pipe(empty);
7670 %}
7672 instruct membar_storestore() %{
7673 match(MemBarStoreStore);
7675 ins_cost(0);
7676 size(0);
7677 format %{ "MEMBAR-storestore (empty encoding) @ membar_storestore" %}
7678 ins_encode( );
7679 ins_pipe(empty);
7680 %}
7682 //----------Move Instructions--------------------------------------------------
7683 instruct castX2P(mRegP dst, mRegL src) %{
7684 match(Set dst (CastX2P src));
7685 format %{ "castX2P $dst, $src @ castX2P" %}
7686 ins_encode %{
7687 Register src = $src$$Register;
7688 Register dst = $dst$$Register;
7690 if(src != dst)
7691 __ move(dst, src);
7692 %}
7693 ins_cost(10);
7694 ins_pipe( ialu_regI_mov );
7695 %}
7697 instruct castP2X(mRegL dst, mRegP src ) %{
7698 match(Set dst (CastP2X src));
7700 format %{ "mov $dst, $src\t #@castP2X" %}
7701 ins_encode %{
7702 Register src = $src$$Register;
7703 Register dst = $dst$$Register;
7705 if(src != dst)
7706 __ move(dst, src);
7707 %}
7708 ins_pipe( ialu_regI_mov );
7709 %}
7711 instruct MoveF2I_reg_reg(mRegI dst, regF src) %{
7712 match(Set dst (MoveF2I src));
7713 effect(DEF dst, USE src);
7714 ins_cost(85);
7715 format %{ "MoveF2I $dst, $src @ MoveF2I_reg_reg" %}
7716 ins_encode %{
7717 Register dst = as_Register($dst$$reg);
7718 FloatRegister src = as_FloatRegister($src$$reg);
7720 __ mfc1(dst, src);
7721 %}
7722 ins_pipe( pipe_slow );
7723 %}
7725 instruct MoveI2F_reg_reg(regF dst, mRegI src) %{
7726 match(Set dst (MoveI2F src));
7727 effect(DEF dst, USE src);
7728 ins_cost(85);
7729 format %{ "MoveI2F $dst, $src @ MoveI2F_reg_reg" %}
7730 ins_encode %{
7731 Register src = as_Register($src$$reg);
7732 FloatRegister dst = as_FloatRegister($dst$$reg);
7734 __ mtc1(src, dst);
7735 %}
7736 ins_pipe( pipe_slow );
7737 %}
7739 instruct MoveD2L_reg_reg(mRegL dst, regD src) %{
7740 match(Set dst (MoveD2L src));
7741 effect(DEF dst, USE src);
7742 ins_cost(85);
7743 format %{ "MoveD2L $dst, $src @ MoveD2L_reg_reg" %}
7744 ins_encode %{
7745 Register dst = as_Register($dst$$reg);
7746 FloatRegister src = as_FloatRegister($src$$reg);
7748 __ dmfc1(dst, src);
7749 %}
7750 ins_pipe( pipe_slow );
7751 %}
7753 instruct MoveL2D_reg_reg(regD dst, mRegL src) %{
7754 match(Set dst (MoveL2D src));
7755 effect(DEF dst, USE src);
7756 ins_cost(85);
7757 format %{ "MoveL2D $dst, $src @ MoveL2D_reg_reg" %}
7758 ins_encode %{
7759 FloatRegister dst = as_FloatRegister($dst$$reg);
7760 Register src = as_Register($src$$reg);
7762 __ dmtc1(src, dst);
7763 %}
7764 ins_pipe( pipe_slow );
7765 %}
7767 //----------Conditional Move---------------------------------------------------
7768 // Conditional move
7769 instruct cmovI_cmpI_reg_reg(mRegI dst, mRegI src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
7770 match(Set dst (CMoveI (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
7771 ins_cost(80);
7772 format %{
7773 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpI_reg_reg\n"
7774 "\tCMOV $dst,$src \t @cmovI_cmpI_reg_reg"
7775 %}
7777 ins_encode %{
7778 Register op1 = $tmp1$$Register;
7779 Register op2 = $tmp2$$Register;
7780 Register dst = $dst$$Register;
7781 Register src = $src$$Register;
7782 int flag = $cop$$cmpcode;
7784 switch(flag)
7785 {
7786 case 0x01: //equal
7787 __ subu32(AT, op1, op2);
7788 __ movz(dst, src, AT);
7789 break;
7791 case 0x02: //not_equal
7792 __ subu32(AT, op1, op2);
7793 __ movn(dst, src, AT);
7794 break;
7796 case 0x03: //great
7797 __ slt(AT, op2, op1);
7798 __ movn(dst, src, AT);
7799 break;
7801 case 0x04: //great_equal
7802 __ slt(AT, op1, op2);
7803 __ movz(dst, src, AT);
7804 break;
7806 case 0x05: //less
7807 __ slt(AT, op1, op2);
7808 __ movn(dst, src, AT);
7809 break;
7811 case 0x06: //less_equal
7812 __ slt(AT, op2, op1);
7813 __ movz(dst, src, AT);
7814 break;
7816 default:
7817 Unimplemented();
7818 }
7819 %}
7821 ins_pipe( pipe_slow );
7822 %}
7824 instruct cmovI_cmpP_reg_reg(mRegI dst, mRegI src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
7825 match(Set dst (CMoveI (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
7826 ins_cost(80);
7827 format %{
7828 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpP_reg_reg\n\t"
7829 "CMOV $dst,$src\t @cmovI_cmpP_reg_reg"
7830 %}
7831 ins_encode %{
7832 Register op1 = $tmp1$$Register;
7833 Register op2 = $tmp2$$Register;
7834 Register dst = $dst$$Register;
7835 Register src = $src$$Register;
7836 int flag = $cop$$cmpcode;
7838 switch(flag)
7839 {
7840 case 0x01: //equal
7841 __ subu(AT, op1, op2);
7842 __ movz(dst, src, AT);
7843 break;
7845 case 0x02: //not_equal
7846 __ subu(AT, op1, op2);
7847 __ movn(dst, src, AT);
7848 break;
7850 case 0x03: //above
7851 __ sltu(AT, op2, op1);
7852 __ movn(dst, src, AT);
7853 break;
7855 case 0x04: //above_equal
7856 __ sltu(AT, op1, op2);
7857 __ movz(dst, src, AT);
7858 break;
7860 case 0x05: //below
7861 __ sltu(AT, op1, op2);
7862 __ movn(dst, src, AT);
7863 break;
7865 case 0x06: //below_equal
7866 __ sltu(AT, op2, op1);
7867 __ movz(dst, src, AT);
7868 break;
7870 default:
7871 Unimplemented();
7872 }
7873 %}
7875 ins_pipe( pipe_slow );
7876 %}
7878 instruct cmovI_cmpN_reg_reg(mRegI dst, mRegI src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
7879 match(Set dst (CMoveI (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
7880 ins_cost(80);
7881 format %{
7882 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpN_reg_reg\n\t"
7883 "CMOV $dst,$src\t @cmovI_cmpN_reg_reg"
7884 %}
7885 ins_encode %{
7886 Register op1 = $tmp1$$Register;
7887 Register op2 = $tmp2$$Register;
7888 Register dst = $dst$$Register;
7889 Register src = $src$$Register;
7890 int flag = $cop$$cmpcode;
7892 switch(flag)
7893 {
7894 case 0x01: //equal
7895 __ subu32(AT, op1, op2);
7896 __ movz(dst, src, AT);
7897 break;
7899 case 0x02: //not_equal
7900 __ subu32(AT, op1, op2);
7901 __ movn(dst, src, AT);
7902 break;
7904 case 0x03: //above
7905 __ sltu(AT, op2, op1);
7906 __ movn(dst, src, AT);
7907 break;
7909 case 0x04: //above_equal
7910 __ sltu(AT, op1, op2);
7911 __ movz(dst, src, AT);
7912 break;
7914 case 0x05: //below
7915 __ sltu(AT, op1, op2);
7916 __ movn(dst, src, AT);
7917 break;
7919 case 0x06: //below_equal
7920 __ sltu(AT, op2, op1);
7921 __ movz(dst, src, AT);
7922 break;
7924 default:
7925 Unimplemented();
7926 }
7927 %}
7929 ins_pipe( pipe_slow );
7930 %}
7932 instruct cmovP_cmpN_reg_reg(mRegP dst, mRegP src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
7933 match(Set dst (CMoveP (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
7934 ins_cost(80);
7935 format %{
7936 "CMPU$cop $tmp1,$tmp2\t @cmovP_cmpN_reg_reg\n\t"
7937 "CMOV $dst,$src\t @cmovP_cmpN_reg_reg"
7938 %}
7939 ins_encode %{
7940 Register op1 = $tmp1$$Register;
7941 Register op2 = $tmp2$$Register;
7942 Register dst = $dst$$Register;
7943 Register src = $src$$Register;
7944 int flag = $cop$$cmpcode;
7946 switch(flag)
7947 {
7948 case 0x01: //equal
7949 __ subu32(AT, op1, op2);
7950 __ movz(dst, src, AT);
7951 break;
7953 case 0x02: //not_equal
7954 __ subu32(AT, op1, op2);
7955 __ movn(dst, src, AT);
7956 break;
7958 case 0x03: //above
7959 __ sltu(AT, op2, op1);
7960 __ movn(dst, src, AT);
7961 break;
7963 case 0x04: //above_equal
7964 __ sltu(AT, op1, op2);
7965 __ movz(dst, src, AT);
7966 break;
7968 case 0x05: //below
7969 __ sltu(AT, op1, op2);
7970 __ movn(dst, src, AT);
7971 break;
7973 case 0x06: //below_equal
7974 __ sltu(AT, op2, op1);
7975 __ movz(dst, src, AT);
7976 break;
7978 default:
7979 Unimplemented();
7980 }
7981 %}
7983 ins_pipe( pipe_slow );
7984 %}
7986 instruct cmovN_cmpP_reg_reg(mRegN dst, mRegN src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
7987 match(Set dst (CMoveN (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
7988 ins_cost(80);
7989 format %{
7990 "CMPU$cop $tmp1,$tmp2\t @cmovN_cmpP_reg_reg\n\t"
7991 "CMOV $dst,$src\t @cmovN_cmpP_reg_reg"
7992 %}
7993 ins_encode %{
7994 Register op1 = $tmp1$$Register;
7995 Register op2 = $tmp2$$Register;
7996 Register dst = $dst$$Register;
7997 Register src = $src$$Register;
7998 int flag = $cop$$cmpcode;
8000 switch(flag)
8001 {
8002 case 0x01: //equal
8003 __ subu(AT, op1, op2);
8004 __ movz(dst, src, AT);
8005 break;
8007 case 0x02: //not_equal
8008 __ subu(AT, op1, op2);
8009 __ movn(dst, src, AT);
8010 break;
8012 case 0x03: //above
8013 __ sltu(AT, op2, op1);
8014 __ movn(dst, src, AT);
8015 break;
8017 case 0x04: //above_equal
8018 __ sltu(AT, op1, op2);
8019 __ movz(dst, src, AT);
8020 break;
8022 case 0x05: //below
8023 __ sltu(AT, op1, op2);
8024 __ movn(dst, src, AT);
8025 break;
8027 case 0x06: //below_equal
8028 __ sltu(AT, op2, op1);
8029 __ movz(dst, src, AT);
8030 break;
8032 default:
8033 Unimplemented();
8034 }
8035 %}
8037 ins_pipe( pipe_slow );
8038 %}
8040 instruct cmovP_cmpD_reg_reg(mRegP dst, mRegP src, regD tmp1, regD tmp2, cmpOp cop ) %{
8041 match(Set dst (CMoveP (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8042 ins_cost(80);
8043 format %{
8044 "CMP$cop $tmp1, $tmp2\t @cmovP_cmpD_reg_reg\n"
8045 "\tCMOV $dst,$src \t @cmovP_cmpD_reg_reg"
8046 %}
8047 ins_encode %{
8048 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8049 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8050 Register dst = as_Register($dst$$reg);
8051 Register src = as_Register($src$$reg);
8053 int flag = $cop$$cmpcode;
8055 switch(flag)
8056 {
8057 case 0x01: //equal
8058 __ c_eq_d(reg_op1, reg_op2);
8059 __ movt(dst, src);
8060 break;
8061 case 0x02: //not_equal
8062 __ c_eq_d(reg_op1, reg_op2);
8063 __ movf(dst, src);
8064 break;
8065 case 0x03: //greater
8066 __ c_ole_d(reg_op1, reg_op2);
8067 __ movf(dst, src);
8068 break;
8069 case 0x04: //greater_equal
8070 __ c_olt_d(reg_op1, reg_op2);
8071 __ movf(dst, src);
8072 break;
8073 case 0x05: //less
8074 __ c_ult_d(reg_op1, reg_op2);
8075 __ movt(dst, src);
8076 break;
8077 case 0x06: //less_equal
8078 __ c_ule_d(reg_op1, reg_op2);
8079 __ movt(dst, src);
8080 break;
8081 default:
8082 Unimplemented();
8083 }
8084 %}
8086 ins_pipe( pipe_slow );
8087 %}
8090 instruct cmovN_cmpN_reg_reg(mRegN dst, mRegN src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8091 match(Set dst (CMoveN (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8092 ins_cost(80);
8093 format %{
8094 "CMPU$cop $tmp1,$tmp2\t @cmovN_cmpN_reg_reg\n\t"
8095 "CMOV $dst,$src\t @cmovN_cmpN_reg_reg"
8096 %}
8097 ins_encode %{
8098 Register op1 = $tmp1$$Register;
8099 Register op2 = $tmp2$$Register;
8100 Register dst = $dst$$Register;
8101 Register src = $src$$Register;
8102 int flag = $cop$$cmpcode;
8104 switch(flag)
8105 {
8106 case 0x01: //equal
8107 __ subu32(AT, op1, op2);
8108 __ movz(dst, src, AT);
8109 break;
8111 case 0x02: //not_equal
8112 __ subu32(AT, op1, op2);
8113 __ movn(dst, src, AT);
8114 break;
8116 case 0x03: //above
8117 __ sltu(AT, op2, op1);
8118 __ movn(dst, src, AT);
8119 break;
8121 case 0x04: //above_equal
8122 __ sltu(AT, op1, op2);
8123 __ movz(dst, src, AT);
8124 break;
8126 case 0x05: //below
8127 __ sltu(AT, op1, op2);
8128 __ movn(dst, src, AT);
8129 break;
8131 case 0x06: //below_equal
8132 __ sltu(AT, op2, op1);
8133 __ movz(dst, src, AT);
8134 break;
8136 default:
8137 Unimplemented();
8138 }
8139 %}
8141 ins_pipe( pipe_slow );
8142 %}
8145 instruct cmovI_cmpU_reg_reg(mRegI dst, mRegI src, mRegI tmp1, mRegI tmp2, cmpOpU cop ) %{
8146 match(Set dst (CMoveI (Binary cop (CmpU tmp1 tmp2)) (Binary dst src)));
8147 ins_cost(80);
8148 format %{
8149 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpU_reg_reg\n\t"
8150 "CMOV $dst,$src\t @cmovI_cmpU_reg_reg"
8151 %}
8152 ins_encode %{
8153 Register op1 = $tmp1$$Register;
8154 Register op2 = $tmp2$$Register;
8155 Register dst = $dst$$Register;
8156 Register src = $src$$Register;
8157 int flag = $cop$$cmpcode;
8159 switch(flag)
8160 {
8161 case 0x01: //equal
8162 __ subu(AT, op1, op2);
8163 __ movz(dst, src, AT);
8164 break;
8166 case 0x02: //not_equal
8167 __ subu(AT, op1, op2);
8168 __ movn(dst, src, AT);
8169 break;
8171 case 0x03: //above
8172 __ sltu(AT, op2, op1);
8173 __ movn(dst, src, AT);
8174 break;
8176 case 0x04: //above_equal
8177 __ sltu(AT, op1, op2);
8178 __ movz(dst, src, AT);
8179 break;
8181 case 0x05: //below
8182 __ sltu(AT, op1, op2);
8183 __ movn(dst, src, AT);
8184 break;
8186 case 0x06: //below_equal
8187 __ sltu(AT, op2, op1);
8188 __ movz(dst, src, AT);
8189 break;
8191 default:
8192 Unimplemented();
8193 }
8194 %}
8196 ins_pipe( pipe_slow );
8197 %}
8199 instruct cmovI_cmpL_reg_reg(mRegI dst, mRegI src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8200 match(Set dst (CMoveI (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8201 ins_cost(80);
8202 format %{
8203 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpL_reg_reg\n"
8204 "\tCMOV $dst,$src \t @cmovI_cmpL_reg_reg"
8205 %}
8206 ins_encode %{
8207 Register opr1 = as_Register($tmp1$$reg);
8208 Register opr2 = as_Register($tmp2$$reg);
8209 Register dst = $dst$$Register;
8210 Register src = $src$$Register;
8211 int flag = $cop$$cmpcode;
8213 switch(flag)
8214 {
8215 case 0x01: //equal
8216 __ subu(AT, opr1, opr2);
8217 __ movz(dst, src, AT);
8218 break;
8220 case 0x02: //not_equal
8221 __ subu(AT, opr1, opr2);
8222 __ movn(dst, src, AT);
8223 break;
8225 case 0x03: //greater
8226 __ slt(AT, opr2, opr1);
8227 __ movn(dst, src, AT);
8228 break;
8230 case 0x04: //greater_equal
8231 __ slt(AT, opr1, opr2);
8232 __ movz(dst, src, AT);
8233 break;
8235 case 0x05: //less
8236 __ slt(AT, opr1, opr2);
8237 __ movn(dst, src, AT);
8238 break;
8240 case 0x06: //less_equal
8241 __ slt(AT, opr2, opr1);
8242 __ movz(dst, src, AT);
8243 break;
8245 default:
8246 Unimplemented();
8247 }
8248 %}
8250 ins_pipe( pipe_slow );
8251 %}
8253 instruct cmovP_cmpL_reg_reg(mRegP dst, mRegP src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8254 match(Set dst (CMoveP (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8255 ins_cost(80);
8256 format %{
8257 "CMP$cop $tmp1, $tmp2\t @cmovP_cmpL_reg_reg\n"
8258 "\tCMOV $dst,$src \t @cmovP_cmpL_reg_reg"
8259 %}
8260 ins_encode %{
8261 Register opr1 = as_Register($tmp1$$reg);
8262 Register opr2 = as_Register($tmp2$$reg);
8263 Register dst = $dst$$Register;
8264 Register src = $src$$Register;
8265 int flag = $cop$$cmpcode;
8267 switch(flag)
8268 {
8269 case 0x01: //equal
8270 __ subu(AT, opr1, opr2);
8271 __ movz(dst, src, AT);
8272 break;
8274 case 0x02: //not_equal
8275 __ subu(AT, opr1, opr2);
8276 __ movn(dst, src, AT);
8277 break;
8279 case 0x03: //greater
8280 __ slt(AT, opr2, opr1);
8281 __ movn(dst, src, AT);
8282 break;
8284 case 0x04: //greater_equal
8285 __ slt(AT, opr1, opr2);
8286 __ movz(dst, src, AT);
8287 break;
8289 case 0x05: //less
8290 __ slt(AT, opr1, opr2);
8291 __ movn(dst, src, AT);
8292 break;
8294 case 0x06: //less_equal
8295 __ slt(AT, opr2, opr1);
8296 __ movz(dst, src, AT);
8297 break;
8299 default:
8300 Unimplemented();
8301 }
8302 %}
8304 ins_pipe( pipe_slow );
8305 %}
8307 instruct cmovI_cmpD_reg_reg(mRegI dst, mRegI src, regD tmp1, regD tmp2, cmpOp cop ) %{
8308 match(Set dst (CMoveI (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8309 ins_cost(80);
8310 format %{
8311 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpD_reg_reg\n"
8312 "\tCMOV $dst,$src \t @cmovI_cmpD_reg_reg"
8313 %}
8314 ins_encode %{
8315 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8316 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8317 Register dst = as_Register($dst$$reg);
8318 Register src = as_Register($src$$reg);
8320 int flag = $cop$$cmpcode;
8322 switch(flag)
8323 {
8324 case 0x01: //equal
8325 __ c_eq_d(reg_op1, reg_op2);
8326 __ movt(dst, src);
8327 break;
8328 case 0x02: //not_equal
8329 //2016/4/19 aoqi: See instruct branchConD_reg_reg. The change in branchConD_reg_reg fixed a bug. It seems similar here, so I made thesame change.
8330 __ c_eq_d(reg_op1, reg_op2);
8331 __ movf(dst, src);
8332 break;
8333 case 0x03: //greater
8334 __ c_ole_d(reg_op1, reg_op2);
8335 __ movf(dst, src);
8336 break;
8337 case 0x04: //greater_equal
8338 __ c_olt_d(reg_op1, reg_op2);
8339 __ movf(dst, src);
8340 break;
8341 case 0x05: //less
8342 __ c_ult_d(reg_op1, reg_op2);
8343 __ movt(dst, src);
8344 break;
8345 case 0x06: //less_equal
8346 __ c_ule_d(reg_op1, reg_op2);
8347 __ movt(dst, src);
8348 break;
8349 default:
8350 Unimplemented();
8351 }
8352 %}
8354 ins_pipe( pipe_slow );
8355 %}
8358 instruct cmovP_cmpP_reg_reg(mRegP dst, mRegP src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
8359 match(Set dst (CMoveP (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
8360 ins_cost(80);
8361 format %{
8362 "CMPU$cop $tmp1,$tmp2\t @cmovP_cmpP_reg_reg\n\t"
8363 "CMOV $dst,$src\t @cmovP_cmpP_reg_reg"
8364 %}
8365 ins_encode %{
8366 Register op1 = $tmp1$$Register;
8367 Register op2 = $tmp2$$Register;
8368 Register dst = $dst$$Register;
8369 Register src = $src$$Register;
8370 int flag = $cop$$cmpcode;
8372 switch(flag)
8373 {
8374 case 0x01: //equal
8375 __ subu(AT, op1, op2);
8376 __ movz(dst, src, AT);
8377 break;
8379 case 0x02: //not_equal
8380 __ subu(AT, op1, op2);
8381 __ movn(dst, src, AT);
8382 break;
8384 case 0x03: //above
8385 __ sltu(AT, op2, op1);
8386 __ movn(dst, src, AT);
8387 break;
8389 case 0x04: //above_equal
8390 __ sltu(AT, op1, op2);
8391 __ movz(dst, src, AT);
8392 break;
8394 case 0x05: //below
8395 __ sltu(AT, op1, op2);
8396 __ movn(dst, src, AT);
8397 break;
8399 case 0x06: //below_equal
8400 __ sltu(AT, op2, op1);
8401 __ movz(dst, src, AT);
8402 break;
8404 default:
8405 Unimplemented();
8406 }
8407 %}
8409 ins_pipe( pipe_slow );
8410 %}
8412 instruct cmovP_cmpI_reg_reg(mRegP dst, mRegP src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8413 match(Set dst (CMoveP (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8414 ins_cost(80);
8415 format %{
8416 "CMP$cop $tmp1,$tmp2\t @cmovP_cmpI_reg_reg\n\t"
8417 "CMOV $dst,$src\t @cmovP_cmpI_reg_reg"
8418 %}
8419 ins_encode %{
8420 Register op1 = $tmp1$$Register;
8421 Register op2 = $tmp2$$Register;
8422 Register dst = $dst$$Register;
8423 Register src = $src$$Register;
8424 int flag = $cop$$cmpcode;
8426 switch(flag)
8427 {
8428 case 0x01: //equal
8429 __ subu32(AT, op1, op2);
8430 __ movz(dst, src, AT);
8431 break;
8433 case 0x02: //not_equal
8434 __ subu32(AT, op1, op2);
8435 __ movn(dst, src, AT);
8436 break;
8438 case 0x03: //above
8439 __ slt(AT, op2, op1);
8440 __ movn(dst, src, AT);
8441 break;
8443 case 0x04: //above_equal
8444 __ slt(AT, op1, op2);
8445 __ movz(dst, src, AT);
8446 break;
8448 case 0x05: //below
8449 __ slt(AT, op1, op2);
8450 __ movn(dst, src, AT);
8451 break;
8453 case 0x06: //below_equal
8454 __ slt(AT, op2, op1);
8455 __ movz(dst, src, AT);
8456 break;
8458 default:
8459 Unimplemented();
8460 }
8461 %}
8463 ins_pipe( pipe_slow );
8464 %}
8466 instruct cmovN_cmpI_reg_reg(mRegN dst, mRegN src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8467 match(Set dst (CMoveN (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8468 ins_cost(80);
8469 format %{
8470 "CMP$cop $tmp1,$tmp2\t @cmovN_cmpI_reg_reg\n\t"
8471 "CMOV $dst,$src\t @cmovN_cmpI_reg_reg"
8472 %}
8473 ins_encode %{
8474 Register op1 = $tmp1$$Register;
8475 Register op2 = $tmp2$$Register;
8476 Register dst = $dst$$Register;
8477 Register src = $src$$Register;
8478 int flag = $cop$$cmpcode;
8480 switch(flag)
8481 {
8482 case 0x01: //equal
8483 __ subu32(AT, op1, op2);
8484 __ movz(dst, src, AT);
8485 break;
8487 case 0x02: //not_equal
8488 __ subu32(AT, op1, op2);
8489 __ movn(dst, src, AT);
8490 break;
8492 case 0x03: //above
8493 __ slt(AT, op2, op1);
8494 __ movn(dst, src, AT);
8495 break;
8497 case 0x04: //above_equal
8498 __ slt(AT, op1, op2);
8499 __ movz(dst, src, AT);
8500 break;
8502 case 0x05: //below
8503 __ slt(AT, op1, op2);
8504 __ movn(dst, src, AT);
8505 break;
8507 case 0x06: //below_equal
8508 __ slt(AT, op2, op1);
8509 __ movz(dst, src, AT);
8510 break;
8512 default:
8513 Unimplemented();
8514 }
8515 %}
8517 ins_pipe( pipe_slow );
8518 %}
8521 instruct cmovL_cmpI_reg_reg(mRegL dst, mRegL src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8522 match(Set dst (CMoveL (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8523 ins_cost(80);
8524 format %{
8525 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpI_reg_reg\n"
8526 "\tCMOV $dst,$src \t @cmovL_cmpI_reg_reg"
8527 %}
8529 ins_encode %{
8530 Register op1 = $tmp1$$Register;
8531 Register op2 = $tmp2$$Register;
8532 Register dst = as_Register($dst$$reg);
8533 Register src = as_Register($src$$reg);
8534 int flag = $cop$$cmpcode;
8536 switch(flag)
8537 {
8538 case 0x01: //equal
8539 __ subu32(AT, op1, op2);
8540 __ movz(dst, src, AT);
8541 break;
8543 case 0x02: //not_equal
8544 __ subu32(AT, op1, op2);
8545 __ movn(dst, src, AT);
8546 break;
8548 case 0x03: //great
8549 __ slt(AT, op2, op1);
8550 __ movn(dst, src, AT);
8551 break;
8553 case 0x04: //great_equal
8554 __ slt(AT, op1, op2);
8555 __ movz(dst, src, AT);
8556 break;
8558 case 0x05: //less
8559 __ slt(AT, op1, op2);
8560 __ movn(dst, src, AT);
8561 break;
8563 case 0x06: //less_equal
8564 __ slt(AT, op2, op1);
8565 __ movz(dst, src, AT);
8566 break;
8568 default:
8569 Unimplemented();
8570 }
8571 %}
8573 ins_pipe( pipe_slow );
8574 %}
8576 instruct cmovL_cmpL_reg_reg(mRegL dst, mRegL src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8577 match(Set dst (CMoveL (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8578 ins_cost(80);
8579 format %{
8580 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpL_reg_reg\n"
8581 "\tCMOV $dst,$src \t @cmovL_cmpL_reg_reg"
8582 %}
8583 ins_encode %{
8584 Register opr1 = as_Register($tmp1$$reg);
8585 Register opr2 = as_Register($tmp2$$reg);
8586 Register dst = as_Register($dst$$reg);
8587 Register src = as_Register($src$$reg);
8588 int flag = $cop$$cmpcode;
8590 switch(flag)
8591 {
8592 case 0x01: //equal
8593 __ subu(AT, opr1, opr2);
8594 __ movz(dst, src, AT);
8595 break;
8597 case 0x02: //not_equal
8598 __ subu(AT, opr1, opr2);
8599 __ movn(dst, src, AT);
8600 break;
8602 case 0x03: //greater
8603 __ slt(AT, opr2, opr1);
8604 __ movn(dst, src, AT);
8605 break;
8607 case 0x04: //greater_equal
8608 __ slt(AT, opr1, opr2);
8609 __ movz(dst, src, AT);
8610 break;
8612 case 0x05: //less
8613 __ slt(AT, opr1, opr2);
8614 __ movn(dst, src, AT);
8615 break;
8617 case 0x06: //less_equal
8618 __ slt(AT, opr2, opr1);
8619 __ movz(dst, src, AT);
8620 break;
8622 default:
8623 Unimplemented();
8624 }
8625 %}
8627 ins_pipe( pipe_slow );
8628 %}
8630 instruct cmovL_cmpN_reg_reg(mRegL dst, mRegL src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8631 match(Set dst (CMoveL (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8632 ins_cost(80);
8633 format %{
8634 "CMPU$cop $tmp1,$tmp2\t @cmovL_cmpN_reg_reg\n\t"
8635 "CMOV $dst,$src\t @cmovL_cmpN_reg_reg"
8636 %}
8637 ins_encode %{
8638 Register op1 = $tmp1$$Register;
8639 Register op2 = $tmp2$$Register;
8640 Register dst = $dst$$Register;
8641 Register src = $src$$Register;
8642 int flag = $cop$$cmpcode;
8644 switch(flag)
8645 {
8646 case 0x01: //equal
8647 __ subu32(AT, op1, op2);
8648 __ movz(dst, src, AT);
8649 break;
8651 case 0x02: //not_equal
8652 __ subu32(AT, op1, op2);
8653 __ movn(dst, src, AT);
8654 break;
8656 case 0x03: //above
8657 __ sltu(AT, op2, op1);
8658 __ movn(dst, src, AT);
8659 break;
8661 case 0x04: //above_equal
8662 __ sltu(AT, op1, op2);
8663 __ movz(dst, src, AT);
8664 break;
8666 case 0x05: //below
8667 __ sltu(AT, op1, op2);
8668 __ movn(dst, src, AT);
8669 break;
8671 case 0x06: //below_equal
8672 __ sltu(AT, op2, op1);
8673 __ movz(dst, src, AT);
8674 break;
8676 default:
8677 Unimplemented();
8678 }
8679 %}
8681 ins_pipe( pipe_slow );
8682 %}
8685 instruct cmovL_cmpD_reg_reg(mRegL dst, mRegL src, regD tmp1, regD tmp2, cmpOp cop ) %{
8686 match(Set dst (CMoveL (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8687 ins_cost(80);
8688 format %{
8689 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpD_reg_reg\n"
8690 "\tCMOV $dst,$src \t @cmovL_cmpD_reg_reg"
8691 %}
8692 ins_encode %{
8693 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8694 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8695 Register dst = as_Register($dst$$reg);
8696 Register src = as_Register($src$$reg);
8698 int flag = $cop$$cmpcode;
8700 switch(flag)
8701 {
8702 case 0x01: //equal
8703 __ c_eq_d(reg_op1, reg_op2);
8704 __ movt(dst, src);
8705 break;
8706 case 0x02: //not_equal
8707 __ c_eq_d(reg_op1, reg_op2);
8708 __ movf(dst, src);
8709 break;
8710 case 0x03: //greater
8711 __ c_ole_d(reg_op1, reg_op2);
8712 __ movf(dst, src);
8713 break;
8714 case 0x04: //greater_equal
8715 __ c_olt_d(reg_op1, reg_op2);
8716 __ movf(dst, src);
8717 break;
8718 case 0x05: //less
8719 __ c_ult_d(reg_op1, reg_op2);
8720 __ movt(dst, src);
8721 break;
8722 case 0x06: //less_equal
8723 __ c_ule_d(reg_op1, reg_op2);
8724 __ movt(dst, src);
8725 break;
8726 default:
8727 Unimplemented();
8728 }
8729 %}
8731 ins_pipe( pipe_slow );
8732 %}
8734 instruct cmovD_cmpD_reg_reg(regD dst, regD src, regD tmp1, regD tmp2, cmpOp cop ) %{
8735 match(Set dst (CMoveD (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8736 ins_cost(200);
8737 format %{
8738 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpD_reg_reg\n"
8739 "\tCMOV $dst,$src \t @cmovD_cmpD_reg_reg"
8740 %}
8741 ins_encode %{
8742 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8743 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8744 FloatRegister dst = as_FloatRegister($dst$$reg);
8745 FloatRegister src = as_FloatRegister($src$$reg);
8747 int flag = $cop$$cmpcode;
8749 Label L;
8751 switch(flag)
8752 {
8753 case 0x01: //equal
8754 __ c_eq_d(reg_op1, reg_op2);
8755 __ bc1f(L);
8756 __ nop();
8757 __ mov_d(dst, src);
8758 __ bind(L);
8759 break;
8760 case 0x02: //not_equal
8761 //2016/4/19 aoqi: See instruct branchConD_reg_reg. The change in branchConD_reg_reg fixed a bug. It seems similar here, so I made thesame change.
8762 __ c_eq_d(reg_op1, reg_op2);
8763 __ bc1t(L);
8764 __ nop();
8765 __ mov_d(dst, src);
8766 __ bind(L);
8767 break;
8768 case 0x03: //greater
8769 __ c_ole_d(reg_op1, reg_op2);
8770 __ bc1t(L);
8771 __ nop();
8772 __ mov_d(dst, src);
8773 __ bind(L);
8774 break;
8775 case 0x04: //greater_equal
8776 __ c_olt_d(reg_op1, reg_op2);
8777 __ bc1t(L);
8778 __ nop();
8779 __ mov_d(dst, src);
8780 __ bind(L);
8781 break;
8782 case 0x05: //less
8783 __ c_ult_d(reg_op1, reg_op2);
8784 __ bc1f(L);
8785 __ nop();
8786 __ mov_d(dst, src);
8787 __ bind(L);
8788 break;
8789 case 0x06: //less_equal
8790 __ c_ule_d(reg_op1, reg_op2);
8791 __ bc1f(L);
8792 __ nop();
8793 __ mov_d(dst, src);
8794 __ bind(L);
8795 break;
8796 default:
8797 Unimplemented();
8798 }
8799 %}
8801 ins_pipe( pipe_slow );
8802 %}
8804 instruct cmovF_cmpI_reg_reg(regF dst, regF src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8805 match(Set dst (CMoveF (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8806 ins_cost(200);
8807 format %{
8808 "CMP$cop $tmp1, $tmp2\t @cmovF_cmpI_reg_reg\n"
8809 "\tCMOV $dst, $src \t @cmovF_cmpI_reg_reg"
8810 %}
8812 ins_encode %{
8813 Register op1 = $tmp1$$Register;
8814 Register op2 = $tmp2$$Register;
8815 FloatRegister dst = as_FloatRegister($dst$$reg);
8816 FloatRegister src = as_FloatRegister($src$$reg);
8817 int flag = $cop$$cmpcode;
8818 Label L;
8820 switch(flag)
8821 {
8822 case 0x01: //equal
8823 __ bne(op1, op2, L);
8824 __ nop();
8825 __ mov_s(dst, src);
8826 __ bind(L);
8827 break;
8828 case 0x02: //not_equal
8829 __ beq(op1, op2, L);
8830 __ nop();
8831 __ mov_s(dst, src);
8832 __ bind(L);
8833 break;
8834 case 0x03: //great
8835 __ slt(AT, op2, op1);
8836 __ beq(AT, R0, L);
8837 __ nop();
8838 __ mov_s(dst, src);
8839 __ bind(L);
8840 break;
8841 case 0x04: //great_equal
8842 __ slt(AT, op1, op2);
8843 __ bne(AT, R0, L);
8844 __ nop();
8845 __ mov_s(dst, src);
8846 __ bind(L);
8847 break;
8848 case 0x05: //less
8849 __ slt(AT, op1, op2);
8850 __ beq(AT, R0, L);
8851 __ nop();
8852 __ mov_s(dst, src);
8853 __ bind(L);
8854 break;
8855 case 0x06: //less_equal
8856 __ slt(AT, op2, op1);
8857 __ bne(AT, R0, L);
8858 __ nop();
8859 __ mov_s(dst, src);
8860 __ bind(L);
8861 break;
8862 default:
8863 Unimplemented();
8864 }
8865 %}
8867 ins_pipe( pipe_slow );
8868 %}
8870 instruct cmovD_cmpI_reg_reg(regD dst, regD src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8871 match(Set dst (CMoveD (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8872 ins_cost(200);
8873 format %{
8874 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpI_reg_reg\n"
8875 "\tCMOV $dst, $src \t @cmovD_cmpI_reg_reg"
8876 %}
8878 ins_encode %{
8879 Register op1 = $tmp1$$Register;
8880 Register op2 = $tmp2$$Register;
8881 FloatRegister dst = as_FloatRegister($dst$$reg);
8882 FloatRegister src = as_FloatRegister($src$$reg);
8883 int flag = $cop$$cmpcode;
8884 Label L;
8886 switch(flag)
8887 {
8888 case 0x01: //equal
8889 __ bne(op1, op2, L);
8890 __ nop();
8891 __ mov_d(dst, src);
8892 __ bind(L);
8893 break;
8894 case 0x02: //not_equal
8895 __ beq(op1, op2, L);
8896 __ nop();
8897 __ mov_d(dst, src);
8898 __ bind(L);
8899 break;
8900 case 0x03: //great
8901 __ slt(AT, op2, op1);
8902 __ beq(AT, R0, L);
8903 __ nop();
8904 __ mov_d(dst, src);
8905 __ bind(L);
8906 break;
8907 case 0x04: //great_equal
8908 __ slt(AT, op1, op2);
8909 __ bne(AT, R0, L);
8910 __ nop();
8911 __ mov_d(dst, src);
8912 __ bind(L);
8913 break;
8914 case 0x05: //less
8915 __ slt(AT, op1, op2);
8916 __ beq(AT, R0, L);
8917 __ nop();
8918 __ mov_d(dst, src);
8919 __ bind(L);
8920 break;
8921 case 0x06: //less_equal
8922 __ slt(AT, op2, op1);
8923 __ bne(AT, R0, L);
8924 __ nop();
8925 __ mov_d(dst, src);
8926 __ bind(L);
8927 break;
8928 default:
8929 Unimplemented();
8930 }
8931 %}
8933 ins_pipe( pipe_slow );
8934 %}
8936 instruct cmovD_cmpP_reg_reg(regD dst, regD src, mRegP tmp1, mRegP tmp2, cmpOp cop ) %{
8937 match(Set dst (CMoveD (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
8938 ins_cost(200);
8939 format %{
8940 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpP_reg_reg\n"
8941 "\tCMOV $dst, $src \t @cmovD_cmpP_reg_reg"
8942 %}
8944 ins_encode %{
8945 Register op1 = $tmp1$$Register;
8946 Register op2 = $tmp2$$Register;
8947 FloatRegister dst = as_FloatRegister($dst$$reg);
8948 FloatRegister src = as_FloatRegister($src$$reg);
8949 int flag = $cop$$cmpcode;
8950 Label L;
8952 switch(flag)
8953 {
8954 case 0x01: //equal
8955 __ bne(op1, op2, L);
8956 __ nop();
8957 __ mov_d(dst, src);
8958 __ bind(L);
8959 break;
8960 case 0x02: //not_equal
8961 __ beq(op1, op2, L);
8962 __ nop();
8963 __ mov_d(dst, src);
8964 __ bind(L);
8965 break;
8966 case 0x03: //great
8967 __ slt(AT, op2, op1);
8968 __ beq(AT, R0, L);
8969 __ nop();
8970 __ mov_d(dst, src);
8971 __ bind(L);
8972 break;
8973 case 0x04: //great_equal
8974 __ slt(AT, op1, op2);
8975 __ bne(AT, R0, L);
8976 __ nop();
8977 __ mov_d(dst, src);
8978 __ bind(L);
8979 break;
8980 case 0x05: //less
8981 __ slt(AT, op1, op2);
8982 __ beq(AT, R0, L);
8983 __ nop();
8984 __ mov_d(dst, src);
8985 __ bind(L);
8986 break;
8987 case 0x06: //less_equal
8988 __ slt(AT, op2, op1);
8989 __ bne(AT, R0, L);
8990 __ nop();
8991 __ mov_d(dst, src);
8992 __ bind(L);
8993 break;
8994 default:
8995 Unimplemented();
8996 }
8997 %}
8999 ins_pipe( pipe_slow );
9000 %}
9002 //FIXME
9003 instruct cmovI_cmpF_reg_reg(mRegI dst, mRegI src, regF tmp1, regF tmp2, cmpOp cop ) %{
9004 match(Set dst (CMoveI (Binary cop (CmpF tmp1 tmp2)) (Binary dst src)));
9005 ins_cost(80);
9006 format %{
9007 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpF_reg_reg\n"
9008 "\tCMOV $dst,$src \t @cmovI_cmpF_reg_reg"
9009 %}
9011 ins_encode %{
9012 FloatRegister reg_op1 = $tmp1$$FloatRegister;
9013 FloatRegister reg_op2 = $tmp2$$FloatRegister;
9014 Register dst = $dst$$Register;
9015 Register src = $src$$Register;
9016 int flag = $cop$$cmpcode;
9018 switch(flag)
9019 {
9020 case 0x01: //equal
9021 __ c_eq_s(reg_op1, reg_op2);
9022 __ movt(dst, src);
9023 break;
9024 case 0x02: //not_equal
9025 __ c_eq_s(reg_op1, reg_op2);
9026 __ movf(dst, src);
9027 break;
9028 case 0x03: //greater
9029 __ c_ole_s(reg_op1, reg_op2);
9030 __ movf(dst, src);
9031 break;
9032 case 0x04: //greater_equal
9033 __ c_olt_s(reg_op1, reg_op2);
9034 __ movf(dst, src);
9035 break;
9036 case 0x05: //less
9037 __ c_ult_s(reg_op1, reg_op2);
9038 __ movt(dst, src);
9039 break;
9040 case 0x06: //less_equal
9041 __ c_ule_s(reg_op1, reg_op2);
9042 __ movt(dst, src);
9043 break;
9044 default:
9045 Unimplemented();
9046 }
9047 %}
9048 ins_pipe( pipe_slow );
9049 %}
9051 instruct cmovF_cmpF_reg_reg(regF dst, regF src, regF tmp1, regF tmp2, cmpOp cop ) %{
9052 match(Set dst (CMoveF (Binary cop (CmpF tmp1 tmp2)) (Binary dst src)));
9053 ins_cost(200);
9054 format %{
9055 "CMP$cop $tmp1, $tmp2\t @cmovF_cmpF_reg_reg\n"
9056 "\tCMOV $dst,$src \t @cmovF_cmpF_reg_reg"
9057 %}
9059 ins_encode %{
9060 FloatRegister reg_op1 = $tmp1$$FloatRegister;
9061 FloatRegister reg_op2 = $tmp2$$FloatRegister;
9062 FloatRegister dst = $dst$$FloatRegister;
9063 FloatRegister src = $src$$FloatRegister;
9064 Label L;
9065 int flag = $cop$$cmpcode;
9067 switch(flag)
9068 {
9069 case 0x01: //equal
9070 __ c_eq_s(reg_op1, reg_op2);
9071 __ bc1f(L);
9072 __ nop();
9073 __ mov_s(dst, src);
9074 __ bind(L);
9075 break;
9076 case 0x02: //not_equal
9077 __ c_eq_s(reg_op1, reg_op2);
9078 __ bc1t(L);
9079 __ nop();
9080 __ mov_s(dst, src);
9081 __ bind(L);
9082 break;
9083 case 0x03: //greater
9084 __ c_ole_s(reg_op1, reg_op2);
9085 __ bc1t(L);
9086 __ nop();
9087 __ mov_s(dst, src);
9088 __ bind(L);
9089 break;
9090 case 0x04: //greater_equal
9091 __ c_olt_s(reg_op1, reg_op2);
9092 __ bc1t(L);
9093 __ nop();
9094 __ mov_s(dst, src);
9095 __ bind(L);
9096 break;
9097 case 0x05: //less
9098 __ c_ult_s(reg_op1, reg_op2);
9099 __ bc1f(L);
9100 __ nop();
9101 __ mov_s(dst, src);
9102 __ bind(L);
9103 break;
9104 case 0x06: //less_equal
9105 __ c_ule_s(reg_op1, reg_op2);
9106 __ bc1f(L);
9107 __ nop();
9108 __ mov_s(dst, src);
9109 __ bind(L);
9110 break;
9111 default:
9112 Unimplemented();
9113 }
9114 %}
9115 ins_pipe( pipe_slow );
9116 %}
9118 // Manifest a CmpL result in an integer register. Very painful.
9119 // This is the test to avoid.
9120 instruct cmpL3_reg_reg(mRegI dst, mRegL src1, mRegL src2) %{
9121 match(Set dst (CmpL3 src1 src2));
9122 ins_cost(1000);
9123 format %{ "cmpL3 $dst, $src1, $src2 @ cmpL3_reg_reg" %}
9124 ins_encode %{
9125 Register opr1 = as_Register($src1$$reg);
9126 Register opr2 = as_Register($src2$$reg);
9127 Register dst = as_Register($dst$$reg);
9129 Label Done;
9131 __ subu(AT, opr1, opr2);
9132 __ bltz(AT, Done);
9133 __ delayed()->daddiu(dst, R0, -1);
9135 __ move(dst, 1);
9136 __ movz(dst, R0, AT);
9138 __ bind(Done);
9139 %}
9140 ins_pipe( pipe_slow );
9141 %}
9143 //
9144 // less_rsult = -1
9145 // greater_result = 1
9146 // equal_result = 0
9147 // nan_result = -1
9148 //
9149 instruct cmpF3_reg_reg(mRegI dst, regF src1, regF src2) %{
9150 match(Set dst (CmpF3 src1 src2));
9151 ins_cost(1000);
9152 format %{ "cmpF3 $dst, $src1, $src2 @ cmpF3_reg_reg" %}
9153 ins_encode %{
9154 FloatRegister src1 = as_FloatRegister($src1$$reg);
9155 FloatRegister src2 = as_FloatRegister($src2$$reg);
9156 Register dst = as_Register($dst$$reg);
9158 Label Done;
9160 __ c_ult_s(src1, src2);
9161 __ bc1t(Done);
9162 __ delayed()->daddiu(dst, R0, -1);
9164 __ c_eq_s(src1, src2);
9165 __ move(dst, 1);
9166 __ movt(dst, R0);
9168 __ bind(Done);
9169 %}
9170 ins_pipe( pipe_slow );
9171 %}
9173 instruct cmpD3_reg_reg(mRegI dst, regD src1, regD src2) %{
9174 match(Set dst (CmpD3 src1 src2));
9175 ins_cost(1000);
9176 format %{ "cmpD3 $dst, $src1, $src2 @ cmpD3_reg_reg" %}
9177 ins_encode %{
9178 FloatRegister src1 = as_FloatRegister($src1$$reg);
9179 FloatRegister src2 = as_FloatRegister($src2$$reg);
9180 Register dst = as_Register($dst$$reg);
9182 Label Done;
9184 __ c_ult_d(src1, src2);
9185 __ bc1t(Done);
9186 __ delayed()->daddiu(dst, R0, -1);
9188 __ c_eq_d(src1, src2);
9189 __ move(dst, 1);
9190 __ movt(dst, R0);
9192 __ bind(Done);
9193 %}
9194 ins_pipe( pipe_slow );
9195 %}
9197 instruct clear_array(mRegL cnt, mRegP base, Universe dummy) %{
9198 match(Set dummy (ClearArray cnt base));
9199 format %{ "CLEAR_ARRAY base = $base, cnt = $cnt # Clear doublewords" %}
9200 ins_encode %{
9201 //Assume cnt is the number of bytes in an array to be cleared,
9202 //and base points to the starting address of the array.
9203 Register base = $base$$Register;
9204 Register num = $cnt$$Register;
9205 Label Loop, done;
9207 /* 2012/9/21 Jin: according to X86, $cnt is caculated by doublewords(8 bytes) */
9208 __ move(T9, num); /* T9 = words */
9209 __ beq(T9, R0, done);
9210 __ nop();
9211 __ move(AT, base);
9213 __ bind(Loop);
9214 __ sd(R0, Address(AT, 0));
9215 __ daddi(AT, AT, wordSize);
9216 __ daddi(T9, T9, -1);
9217 __ bne(T9, R0, Loop);
9218 __ delayed()->nop();
9219 __ bind(done);
9220 %}
9221 ins_pipe( pipe_slow );
9222 %}
9224 instruct string_compare(a4_RegP str1, mA5RegI cnt1, a6_RegP str2, mA7RegI cnt2, no_Ax_mRegI result) %{
9225 match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
9226 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2);
9228 format %{ "String Compare $str1[len: $cnt1], $str2[len: $cnt2] -> $result @ string_compare" %}
9229 ins_encode %{
9230 // Get the first character position in both strings
9231 // [8] char array, [12] offset, [16] count
9232 Register str1 = $str1$$Register;
9233 Register str2 = $str2$$Register;
9234 Register cnt1 = $cnt1$$Register;
9235 Register cnt2 = $cnt2$$Register;
9236 Register result = $result$$Register;
9238 Label L, Loop, haveResult, done;
9240 // compute the and difference of lengths (in result)
9241 __ subu(result, cnt1, cnt2); // result holds the difference of two lengths
9243 // compute the shorter length (in cnt1)
9244 __ slt(AT, cnt2, cnt1);
9245 __ movn(cnt1, cnt2, AT);
9247 // Now the shorter length is in cnt1 and cnt2 can be used as a tmp register
9248 __ bind(Loop); // Loop begin
9249 __ beq(cnt1, R0, done);
9250 __ delayed()->lhu(AT, str1, 0);;
9252 // compare current character
9253 __ lhu(cnt2, str2, 0);
9254 __ bne(AT, cnt2, haveResult);
9255 __ delayed()->addi(str1, str1, 2);
9256 __ addi(str2, str2, 2);
9257 __ b(Loop);
9258 __ delayed()->addi(cnt1, cnt1, -1); // Loop end
9260 __ bind(haveResult);
9261 __ subu(result, AT, cnt2);
9263 __ bind(done);
9264 %}
9266 ins_pipe( pipe_slow );
9267 %}
9269 // intrinsic optimization
9270 instruct string_equals(a4_RegP str1, a5_RegP str2, mA6RegI cnt, mA7RegI temp, no_Ax_mRegI result) %{
9271 match(Set result (StrEquals (Binary str1 str2) cnt));
9272 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL temp);
9274 format %{ "String Equal $str1, $str2, len:$cnt tmp:$temp -> $result @ string_equals" %}
9275 ins_encode %{
9276 // Get the first character position in both strings
9277 // [8] char array, [12] offset, [16] count
9278 Register str1 = $str1$$Register;
9279 Register str2 = $str2$$Register;
9280 Register cnt = $cnt$$Register;
9281 Register tmp = $temp$$Register;
9282 Register result = $result$$Register;
9284 Label Loop, done;
9287 __ beq(str1, str2, done); // same char[] ?
9288 __ daddiu(result, R0, 1);
9290 __ bind(Loop); // Loop begin
9291 __ beq(cnt, R0, done);
9292 __ daddiu(result, R0, 1); // count == 0
9294 // compare current character
9295 __ lhu(AT, str1, 0);;
9296 __ lhu(tmp, str2, 0);
9297 __ bne(AT, tmp, done);
9298 __ delayed()->daddi(result, R0, 0);
9299 __ addi(str1, str1, 2);
9300 __ addi(str2, str2, 2);
9301 __ b(Loop);
9302 __ delayed()->addi(cnt, cnt, -1); // Loop end
9304 __ bind(done);
9305 %}
9307 ins_pipe( pipe_slow );
9308 %}
9310 //----------Arithmetic Instructions-------------------------------------------
9311 //----------Addition Instructions---------------------------------------------
9312 instruct addI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9313 match(Set dst (AddI src1 src2));
9315 format %{ "add $dst, $src1, $src2 #@addI_Reg_Reg" %}
9316 ins_encode %{
9317 Register dst = $dst$$Register;
9318 Register src1 = $src1$$Register;
9319 Register src2 = $src2$$Register;
9320 __ addu32(dst, src1, src2);
9321 %}
9322 ins_pipe( ialu_regI_regI );
9323 %}
9325 instruct addI_Reg_imm(mRegI dst, mRegI src1, immI src2) %{
9326 match(Set dst (AddI src1 src2));
9328 format %{ "add $dst, $src1, $src2 #@addI_Reg_imm" %}
9329 ins_encode %{
9330 Register dst = $dst$$Register;
9331 Register src1 = $src1$$Register;
9332 int imm = $src2$$constant;
9334 if(Assembler::is_simm16(imm)) {
9335 __ addiu32(dst, src1, imm);
9336 } else {
9337 __ move(AT, imm);
9338 __ addu32(dst, src1, AT);
9339 }
9340 %}
9341 ins_pipe( ialu_regI_regI );
9342 %}
9344 instruct addP_reg_reg(mRegP dst, mRegP src1, mRegL src2) %{
9345 match(Set dst (AddP src1 src2));
9347 format %{ "dadd $dst, $src1, $src2 #@addP_reg_reg" %}
9349 ins_encode %{
9350 Register dst = $dst$$Register;
9351 Register src1 = $src1$$Register;
9352 Register src2 = $src2$$Register;
9353 __ daddu(dst, src1, src2);
9354 %}
9356 ins_pipe( ialu_regI_regI );
9357 %}
9359 instruct addP_reg_reg_convI2L(mRegP dst, mRegP src1, mRegI src2) %{
9360 match(Set dst (AddP src1 (ConvI2L src2)));
9362 format %{ "dadd $dst, $src1, $src2 #@addP_reg_reg_convI2L" %}
9364 ins_encode %{
9365 Register dst = $dst$$Register;
9366 Register src1 = $src1$$Register;
9367 Register src2 = $src2$$Register;
9368 __ daddu(dst, src1, src2);
9369 %}
9371 ins_pipe( ialu_regI_regI );
9372 %}
9374 instruct addP_reg_imm(mRegP dst, mRegP src1, immL src2) %{
9375 match(Set dst (AddP src1 src2));
9377 format %{ "daddi $dst, $src1, $src2 #@addP_reg_imm" %}
9378 ins_encode %{
9379 Register src1 = $src1$$Register;
9380 long src2 = $src2$$constant;
9381 Register dst = $dst$$Register;
9383 if(Assembler::is_simm16(src2)) {
9384 __ daddiu(dst, src1, src2);
9385 } else {
9386 __ set64(AT, src2);
9387 __ daddu(dst, src1, AT);
9388 }
9389 %}
9390 ins_pipe( ialu_regI_imm16 );
9391 %}
9393 // Add Long Register with Register
9394 instruct addL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
9395 match(Set dst (AddL src1 src2));
9396 ins_cost(200);
9397 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_Reg\t" %}
9399 ins_encode %{
9400 Register dst_reg = as_Register($dst$$reg);
9401 Register src1_reg = as_Register($src1$$reg);
9402 Register src2_reg = as_Register($src2$$reg);
9404 __ daddu(dst_reg, src1_reg, src2_reg);
9405 %}
9407 ins_pipe( ialu_regL_regL );
9408 %}
9410 instruct addL_Reg_imm(mRegL dst, mRegL src1, immL16 src2)
9411 %{
9412 match(Set dst (AddL src1 src2));
9414 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_imm " %}
9415 ins_encode %{
9416 Register dst_reg = as_Register($dst$$reg);
9417 Register src1_reg = as_Register($src1$$reg);
9418 int src2_imm = $src2$$constant;
9420 __ daddiu(dst_reg, src1_reg, src2_imm);
9421 %}
9423 ins_pipe( ialu_regL_regL );
9424 %}
9426 instruct addL_RegI2L_imm(mRegL dst, mRegI src1, immL16 src2)
9427 %{
9428 match(Set dst (AddL (ConvI2L src1) src2));
9430 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_imm " %}
9431 ins_encode %{
9432 Register dst_reg = as_Register($dst$$reg);
9433 Register src1_reg = as_Register($src1$$reg);
9434 int src2_imm = $src2$$constant;
9436 __ daddiu(dst_reg, src1_reg, src2_imm);
9437 %}
9439 ins_pipe( ialu_regL_regL );
9440 %}
9442 instruct addL_RegI2L_Reg(mRegL dst, mRegI src1, mRegL src2) %{
9443 match(Set dst (AddL (ConvI2L src1) src2));
9444 ins_cost(200);
9445 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_Reg\t" %}
9447 ins_encode %{
9448 Register dst_reg = as_Register($dst$$reg);
9449 Register src1_reg = as_Register($src1$$reg);
9450 Register src2_reg = as_Register($src2$$reg);
9452 __ daddu(dst_reg, src1_reg, src2_reg);
9453 %}
9455 ins_pipe( ialu_regL_regL );
9456 %}
9458 instruct addL_RegI2L_RegI2L(mRegL dst, mRegI src1, mRegI src2) %{
9459 match(Set dst (AddL (ConvI2L src1) (ConvI2L src2)));
9460 ins_cost(200);
9461 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_RegI2L\t" %}
9463 ins_encode %{
9464 Register dst_reg = as_Register($dst$$reg);
9465 Register src1_reg = as_Register($src1$$reg);
9466 Register src2_reg = as_Register($src2$$reg);
9468 __ daddu(dst_reg, src1_reg, src2_reg);
9469 %}
9471 ins_pipe( ialu_regL_regL );
9472 %}
9474 instruct addL_Reg_RegI2L(mRegL dst, mRegL src1, mRegI src2) %{
9475 match(Set dst (AddL src1 (ConvI2L src2)));
9476 ins_cost(200);
9477 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_RegI2L\t" %}
9479 ins_encode %{
9480 Register dst_reg = as_Register($dst$$reg);
9481 Register src1_reg = as_Register($src1$$reg);
9482 Register src2_reg = as_Register($src2$$reg);
9484 __ daddu(dst_reg, src1_reg, src2_reg);
9485 %}
9487 ins_pipe( ialu_regL_regL );
9488 %}
9490 //----------Subtraction Instructions-------------------------------------------
9491 // Integer Subtraction Instructions
9492 instruct subI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9493 match(Set dst (SubI src1 src2));
9494 ins_cost(100);
9496 format %{ "sub $dst, $src1, $src2 #@subI_Reg_Reg" %}
9497 ins_encode %{
9498 Register dst = $dst$$Register;
9499 Register src1 = $src1$$Register;
9500 Register src2 = $src2$$Register;
9501 __ subu32(dst, src1, src2);
9502 %}
9503 ins_pipe( ialu_regI_regI );
9504 %}
9506 instruct subI_Reg_immI16_sub(mRegI dst, mRegI src1, immI16_sub src2) %{
9507 match(Set dst (SubI src1 src2));
9508 ins_cost(80);
9510 format %{ "sub $dst, $src1, $src2 #@subI_Reg_immI16_sub" %}
9511 ins_encode %{
9512 Register dst = $dst$$Register;
9513 Register src1 = $src1$$Register;
9514 __ addiu32(dst, src1, -1 * $src2$$constant);
9515 %}
9516 ins_pipe( ialu_regI_regI );
9517 %}
9519 instruct negI_Reg(mRegI dst, immI0 zero, mRegI src) %{
9520 match(Set dst (SubI zero src));
9521 ins_cost(80);
9523 format %{ "neg $dst, $src #@negI_Reg" %}
9524 ins_encode %{
9525 Register dst = $dst$$Register;
9526 Register src = $src$$Register;
9527 __ subu32(dst, R0, src);
9528 %}
9529 ins_pipe( ialu_regI_regI );
9530 %}
9532 instruct negL_Reg(mRegL dst, immL0 zero, mRegL src) %{
9533 match(Set dst (SubL zero src));
9534 ins_cost(80);
9536 format %{ "neg $dst, $src #@negL_Reg" %}
9537 ins_encode %{
9538 Register dst = $dst$$Register;
9539 Register src = $src$$Register;
9540 __ subu(dst, R0, src);
9541 %}
9542 ins_pipe( ialu_regI_regI );
9543 %}
9545 instruct subL_Reg_immL16_sub(mRegL dst, mRegL src1, immL16_sub src2) %{
9546 match(Set dst (SubL src1 src2));
9547 ins_cost(80);
9549 format %{ "sub $dst, $src1, $src2 #@subL_Reg_immL16_sub" %}
9550 ins_encode %{
9551 Register dst = $dst$$Register;
9552 Register src1 = $src1$$Register;
9553 __ daddiu(dst, src1, -1 * $src2$$constant);
9554 %}
9555 ins_pipe( ialu_regI_regI );
9556 %}
9558 // Subtract Long Register with Register.
9559 instruct subL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
9560 match(Set dst (SubL src1 src2));
9561 ins_cost(100);
9562 format %{ "SubL $dst, $src1, $src2 @ subL_Reg_Reg" %}
9563 ins_encode %{
9564 Register dst = as_Register($dst$$reg);
9565 Register src1 = as_Register($src1$$reg);
9566 Register src2 = as_Register($src2$$reg);
9568 __ subu(dst, src1, src2);
9569 %}
9570 ins_pipe( ialu_regL_regL );
9571 %}
9573 instruct subL_Reg_RegI2L(mRegL dst, mRegL src1, mRegI src2) %{
9574 match(Set dst (SubL src1 (ConvI2L src2)));
9575 ins_cost(100);
9576 format %{ "SubL $dst, $src1, $src2 @ subL_Reg_RegI2L" %}
9577 ins_encode %{
9578 Register dst = as_Register($dst$$reg);
9579 Register src1 = as_Register($src1$$reg);
9580 Register src2 = as_Register($src2$$reg);
9582 __ subu(dst, src1, src2);
9583 %}
9584 ins_pipe( ialu_regL_regL );
9585 %}
9587 instruct subL_RegI2L_Reg(mRegL dst, mRegI src1, mRegL src2) %{
9588 match(Set dst (SubL (ConvI2L src1) src2));
9589 ins_cost(200);
9590 format %{ "SubL $dst, $src1, $src2 @ subL_RegI2L_Reg" %}
9591 ins_encode %{
9592 Register dst = as_Register($dst$$reg);
9593 Register src1 = as_Register($src1$$reg);
9594 Register src2 = as_Register($src2$$reg);
9596 __ subu(dst, src1, src2);
9597 %}
9598 ins_pipe( ialu_regL_regL );
9599 %}
9601 instruct subL_RegI2L_RegI2L(mRegL dst, mRegI src1, mRegI src2) %{
9602 match(Set dst (SubL (ConvI2L src1) (ConvI2L src2)));
9603 ins_cost(200);
9604 format %{ "SubL $dst, $src1, $src2 @ subL_RegI2L_RegI2L" %}
9605 ins_encode %{
9606 Register dst = as_Register($dst$$reg);
9607 Register src1 = as_Register($src1$$reg);
9608 Register src2 = as_Register($src2$$reg);
9610 __ subu(dst, src1, src2);
9611 %}
9612 ins_pipe( ialu_regL_regL );
9613 %}
9615 // Integer MOD with Register
9616 instruct modI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9617 match(Set dst (ModI src1 src2));
9618 ins_cost(300);
9619 format %{ "modi $dst, $src1, $src2 @ modI_Reg_Reg" %}
9620 ins_encode %{
9621 Register dst = $dst$$Register;
9622 Register src1 = $src1$$Register;
9623 Register src2 = $src2$$Register;
9625 //if (UseLoongsonISA) {
9626 if (0) {
9627 // 2016.08.10
9628 // Experiments show that gsmod is slower that div+mfhi.
9629 // So I just disable it here.
9630 __ gsmod(dst, src1, src2);
9631 } else {
9632 __ div(src1, src2);
9633 __ mfhi(dst);
9634 }
9635 %}
9637 //ins_pipe( ialu_mod );
9638 ins_pipe( ialu_regI_regI );
9639 %}
9641 instruct modL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
9642 match(Set dst (ModL src1 src2));
9643 format %{ "modL $dst, $src1, $src2 @modL_reg_reg" %}
9645 ins_encode %{
9646 Register dst = as_Register($dst$$reg);
9647 Register op1 = as_Register($src1$$reg);
9648 Register op2 = as_Register($src2$$reg);
9650 if (UseLoongsonISA) {
9651 __ gsdmod(dst, op1, op2);
9652 } else {
9653 __ ddiv(op1, op2);
9654 __ mfhi(dst);
9655 }
9656 %}
9657 ins_pipe( pipe_slow );
9658 %}
9660 instruct mulI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9661 match(Set dst (MulI src1 src2));
9663 ins_cost(300);
9664 format %{ "mul $dst, $src1, $src2 @ mulI_Reg_Reg" %}
9665 ins_encode %{
9666 Register src1 = $src1$$Register;
9667 Register src2 = $src2$$Register;
9668 Register dst = $dst$$Register;
9670 __ mul(dst, src1, src2);
9671 %}
9672 ins_pipe( ialu_mult );
9673 %}
9675 instruct maddI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2, mRegI src3) %{
9676 match(Set dst (AddI (MulI src1 src2) src3));
9678 ins_cost(999);
9679 format %{ "madd $dst, $src1 * $src2 + $src3 #@maddI_Reg_Reg" %}
9680 ins_encode %{
9681 Register src1 = $src1$$Register;
9682 Register src2 = $src2$$Register;
9683 Register src3 = $src3$$Register;
9684 Register dst = $dst$$Register;
9686 __ mtlo(src3);
9687 __ madd(src1, src2);
9688 __ mflo(dst);
9689 %}
9690 ins_pipe( ialu_mult );
9691 %}
9693 instruct divI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9694 match(Set dst (DivI src1 src2));
9696 ins_cost(300);
9697 format %{ "div $dst, $src1, $src2 @ divI_Reg_Reg" %}
9698 ins_encode %{
9699 Register src1 = $src1$$Register;
9700 Register src2 = $src2$$Register;
9701 Register dst = $dst$$Register;
9703 /* 2012/4/21 Jin: In MIPS, div does not cause exception.
9704 We must trap an exception manually. */
9705 __ teq(R0, src2, 0x7);
9707 if (UseLoongsonISA) {
9708 __ gsdiv(dst, src1, src2);
9709 } else {
9710 __ div(src1, src2);
9712 __ nop();
9713 __ nop();
9714 __ mflo(dst);
9715 }
9716 %}
9717 ins_pipe( ialu_mod );
9718 %}
9720 instruct divF_Reg_Reg(regF dst, regF src1, regF src2) %{
9721 match(Set dst (DivF src1 src2));
9723 ins_cost(300);
9724 format %{ "divF $dst, $src1, $src2 @ divF_Reg_Reg" %}
9725 ins_encode %{
9726 FloatRegister src1 = $src1$$FloatRegister;
9727 FloatRegister src2 = $src2$$FloatRegister;
9728 FloatRegister dst = $dst$$FloatRegister;
9730 /* Here do we need to trap an exception manually ? */
9731 __ div_s(dst, src1, src2);
9732 %}
9733 ins_pipe( pipe_slow );
9734 %}
9736 instruct divD_Reg_Reg(regD dst, regD src1, regD src2) %{
9737 match(Set dst (DivD src1 src2));
9739 ins_cost(300);
9740 format %{ "divD $dst, $src1, $src2 @ divD_Reg_Reg" %}
9741 ins_encode %{
9742 FloatRegister src1 = $src1$$FloatRegister;
9743 FloatRegister src2 = $src2$$FloatRegister;
9744 FloatRegister dst = $dst$$FloatRegister;
9746 /* Here do we need to trap an exception manually ? */
9747 __ div_d(dst, src1, src2);
9748 %}
9749 ins_pipe( pipe_slow );
9750 %}
9752 instruct mulL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
9753 match(Set dst (MulL src1 src2));
9754 format %{ "mulL $dst, $src1, $src2 @mulL_reg_reg" %}
9755 ins_encode %{
9756 Register dst = as_Register($dst$$reg);
9757 Register op1 = as_Register($src1$$reg);
9758 Register op2 = as_Register($src2$$reg);
9760 if (UseLoongsonISA) {
9761 __ gsdmult(dst, op1, op2);
9762 } else {
9763 __ dmult(op1, op2);
9764 __ mflo(dst);
9765 }
9766 %}
9767 ins_pipe( pipe_slow );
9768 %}
9770 instruct mulL_reg_regI2L(mRegL dst, mRegL src1, mRegI src2) %{
9771 match(Set dst (MulL src1 (ConvI2L src2)));
9772 format %{ "mulL $dst, $src1, $src2 @mulL_reg_regI2L" %}
9773 ins_encode %{
9774 Register dst = as_Register($dst$$reg);
9775 Register op1 = as_Register($src1$$reg);
9776 Register op2 = as_Register($src2$$reg);
9778 if (UseLoongsonISA) {
9779 __ gsdmult(dst, op1, op2);
9780 } else {
9781 __ dmult(op1, op2);
9782 __ mflo(dst);
9783 }
9784 %}
9785 ins_pipe( pipe_slow );
9786 %}
9788 instruct divL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
9789 match(Set dst (DivL src1 src2));
9790 format %{ "divL $dst, $src1, $src2 @divL_reg_reg" %}
9792 ins_encode %{
9793 Register dst = as_Register($dst$$reg);
9794 Register op1 = as_Register($src1$$reg);
9795 Register op2 = as_Register($src2$$reg);
9797 if (UseLoongsonISA) {
9798 __ gsddiv(dst, op1, op2);
9799 } else {
9800 __ ddiv(op1, op2);
9801 __ mflo(dst);
9802 }
9803 %}
9804 ins_pipe( pipe_slow );
9805 %}
9807 instruct addF_reg_reg(regF dst, regF src1, regF src2) %{
9808 match(Set dst (AddF src1 src2));
9809 format %{ "AddF $dst, $src1, $src2 @addF_reg_reg" %}
9810 ins_encode %{
9811 FloatRegister src1 = as_FloatRegister($src1$$reg);
9812 FloatRegister src2 = as_FloatRegister($src2$$reg);
9813 FloatRegister dst = as_FloatRegister($dst$$reg);
9815 __ add_s(dst, src1, src2);
9816 %}
9817 ins_pipe( fpu_regF_regF );
9818 %}
9820 instruct subF_reg_reg(regF dst, regF src1, regF src2) %{
9821 match(Set dst (SubF src1 src2));
9822 format %{ "SubF $dst, $src1, $src2 @subF_reg_reg" %}
9823 ins_encode %{
9824 FloatRegister src1 = as_FloatRegister($src1$$reg);
9825 FloatRegister src2 = as_FloatRegister($src2$$reg);
9826 FloatRegister dst = as_FloatRegister($dst$$reg);
9828 __ sub_s(dst, src1, src2);
9829 %}
9830 ins_pipe( fpu_regF_regF );
9831 %}
9832 instruct addD_reg_reg(regD dst, regD src1, regD src2) %{
9833 match(Set dst (AddD src1 src2));
9834 format %{ "AddD $dst, $src1, $src2 @addD_reg_reg" %}
9835 ins_encode %{
9836 FloatRegister src1 = as_FloatRegister($src1$$reg);
9837 FloatRegister src2 = as_FloatRegister($src2$$reg);
9838 FloatRegister dst = as_FloatRegister($dst$$reg);
9840 __ add_d(dst, src1, src2);
9841 %}
9842 ins_pipe( fpu_regF_regF );
9843 %}
9845 instruct subD_reg_reg(regD dst, regD src1, regD src2) %{
9846 match(Set dst (SubD src1 src2));
9847 format %{ "SubD $dst, $src1, $src2 @subD_reg_reg" %}
9848 ins_encode %{
9849 FloatRegister src1 = as_FloatRegister($src1$$reg);
9850 FloatRegister src2 = as_FloatRegister($src2$$reg);
9851 FloatRegister dst = as_FloatRegister($dst$$reg);
9853 __ sub_d(dst, src1, src2);
9854 %}
9855 ins_pipe( fpu_regF_regF );
9856 %}
9858 instruct negF_reg(regF dst, regF src) %{
9859 match(Set dst (NegF src));
9860 format %{ "negF $dst, $src @negF_reg" %}
9861 ins_encode %{
9862 FloatRegister src = as_FloatRegister($src$$reg);
9863 FloatRegister dst = as_FloatRegister($dst$$reg);
9865 __ neg_s(dst, src);
9866 %}
9867 ins_pipe( fpu_regF_regF );
9868 %}
9870 instruct negD_reg(regD dst, regD src) %{
9871 match(Set dst (NegD src));
9872 format %{ "negD $dst, $src @negD_reg" %}
9873 ins_encode %{
9874 FloatRegister src = as_FloatRegister($src$$reg);
9875 FloatRegister dst = as_FloatRegister($dst$$reg);
9877 __ neg_d(dst, src);
9878 %}
9879 ins_pipe( fpu_regF_regF );
9880 %}
9883 instruct mulF_reg_reg(regF dst, regF src1, regF src2) %{
9884 match(Set dst (MulF src1 src2));
9885 format %{ "MULF $dst, $src1, $src2 @mulF_reg_reg" %}
9886 ins_encode %{
9887 FloatRegister src1 = $src1$$FloatRegister;
9888 FloatRegister src2 = $src2$$FloatRegister;
9889 FloatRegister dst = $dst$$FloatRegister;
9891 __ mul_s(dst, src1, src2);
9892 %}
9893 ins_pipe( fpu_regF_regF );
9894 %}
9896 instruct maddF_reg_reg(regF dst, regF src1, regF src2, regF src3) %{
9897 match(Set dst (AddF (MulF src1 src2) src3));
9898 // For compatibility reason (e.g. on the Loongson platform), disable this guy.
9899 ins_cost(44444);
9900 format %{ "maddF $dst, $src1, $src2, $src3 @maddF_reg_reg" %}
9901 ins_encode %{
9902 FloatRegister src1 = $src1$$FloatRegister;
9903 FloatRegister src2 = $src2$$FloatRegister;
9904 FloatRegister src3 = $src3$$FloatRegister;
9905 FloatRegister dst = $dst$$FloatRegister;
9907 __ madd_s(dst, src1, src2, src3);
9908 %}
9909 ins_pipe( fpu_regF_regF );
9910 %}
9912 // Mul two double precision floating piont number
9913 instruct mulD_reg_reg(regD dst, regD src1, regD src2) %{
9914 match(Set dst (MulD src1 src2));
9915 format %{ "MULD $dst, $src1, $src2 @mulD_reg_reg" %}
9916 ins_encode %{
9917 FloatRegister src1 = $src1$$FloatRegister;
9918 FloatRegister src2 = $src2$$FloatRegister;
9919 FloatRegister dst = $dst$$FloatRegister;
9921 __ mul_d(dst, src1, src2);
9922 %}
9923 ins_pipe( fpu_regF_regF );
9924 %}
9926 instruct maddD_reg_reg(regD dst, regD src1, regD src2, regD src3) %{
9927 match(Set dst (AddD (MulD src1 src2) src3));
9928 // For compatibility reason (e.g. on the Loongson platform), disable this guy.
9929 ins_cost(44444);
9930 format %{ "maddD $dst, $src1, $src2, $src3 @maddD_reg_reg" %}
9931 ins_encode %{
9932 FloatRegister src1 = $src1$$FloatRegister;
9933 FloatRegister src2 = $src2$$FloatRegister;
9934 FloatRegister src3 = $src3$$FloatRegister;
9935 FloatRegister dst = $dst$$FloatRegister;
9937 __ madd_d(dst, src1, src2, src3);
9938 %}
9939 ins_pipe( fpu_regF_regF );
9940 %}
9942 instruct absF_reg(regF dst, regF src) %{
9943 match(Set dst (AbsF src));
9944 ins_cost(100);
9945 format %{ "absF $dst, $src @absF_reg" %}
9946 ins_encode %{
9947 FloatRegister src = as_FloatRegister($src$$reg);
9948 FloatRegister dst = as_FloatRegister($dst$$reg);
9950 __ abs_s(dst, src);
9951 %}
9952 ins_pipe( fpu_regF_regF );
9953 %}
9956 // intrinsics for math_native.
9957 // AbsD SqrtD CosD SinD TanD LogD Log10D
9959 instruct absD_reg(regD dst, regD src) %{
9960 match(Set dst (AbsD src));
9961 ins_cost(100);
9962 format %{ "absD $dst, $src @absD_reg" %}
9963 ins_encode %{
9964 FloatRegister src = as_FloatRegister($src$$reg);
9965 FloatRegister dst = as_FloatRegister($dst$$reg);
9967 __ abs_d(dst, src);
9968 %}
9969 ins_pipe( fpu_regF_regF );
9970 %}
9972 instruct sqrtD_reg(regD dst, regD src) %{
9973 match(Set dst (SqrtD src));
9974 ins_cost(100);
9975 format %{ "SqrtD $dst, $src @sqrtD_reg" %}
9976 ins_encode %{
9977 FloatRegister src = as_FloatRegister($src$$reg);
9978 FloatRegister dst = as_FloatRegister($dst$$reg);
9980 __ sqrt_d(dst, src);
9981 %}
9982 ins_pipe( fpu_regF_regF );
9983 %}
9985 instruct sqrtF_reg(regF dst, regF src) %{
9986 match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
9987 ins_cost(100);
9988 format %{ "SqrtF $dst, $src @sqrtF_reg" %}
9989 ins_encode %{
9990 FloatRegister src = as_FloatRegister($src$$reg);
9991 FloatRegister dst = as_FloatRegister($dst$$reg);
9993 __ sqrt_s(dst, src);
9994 %}
9995 ins_pipe( fpu_regF_regF );
9996 %}
9997 //----------------------------------Logical Instructions----------------------
9998 //__________________________________Integer Logical Instructions-------------
10000 //And Instuctions
10001 // And Register with Immediate
10002 instruct andI_Reg_immI(mRegI dst, mRegI src1, immI src2) %{
10003 match(Set dst (AndI src1 src2));
10005 format %{ "and $dst, $src1, $src2 #@andI_Reg_immI" %}
10006 ins_encode %{
10007 Register dst = $dst$$Register;
10008 Register src = $src1$$Register;
10009 int val = $src2$$constant;
10011 __ move(AT, val);
10012 __ andr(dst, src, AT);
10013 %}
10014 ins_pipe( ialu_regI_regI );
10015 %}
10017 instruct andI_Reg_imm_0_65535(mRegI dst, mRegI src1, immI_0_65535 src2) %{
10018 match(Set dst (AndI src1 src2));
10019 ins_cost(60);
10021 format %{ "and $dst, $src1, $src2 #@andI_Reg_imm_0_65535" %}
10022 ins_encode %{
10023 Register dst = $dst$$Register;
10024 Register src = $src1$$Register;
10025 int val = $src2$$constant;
10027 __ andi(dst, src, val);
10028 %}
10029 ins_pipe( ialu_regI_regI );
10030 %}
10032 instruct andI_Reg_immI_nonneg_mask(mRegI dst, mRegI src1, immI_nonneg_mask mask) %{
10033 match(Set dst (AndI src1 mask));
10034 ins_cost(60);
10036 format %{ "and $dst, $src1, $mask #@andI_Reg_immI_nonneg_mask" %}
10037 ins_encode %{
10038 Register dst = $dst$$Register;
10039 Register src = $src1$$Register;
10040 int size = Assembler::is_int_mask($mask$$constant);
10042 __ ext(dst, src, 0, size);
10043 %}
10044 ins_pipe( ialu_regI_regI );
10045 %}
10047 instruct andL_Reg_immL_nonneg_mask(mRegL dst, mRegL src1, immL_nonneg_mask mask) %{
10048 match(Set dst (AndL src1 mask));
10049 ins_cost(60);
10051 format %{ "and $dst, $src1, $mask #@andL_Reg_immL_nonneg_mask" %}
10052 ins_encode %{
10053 Register dst = $dst$$Register;
10054 Register src = $src1$$Register;
10055 int size = Assembler::is_jlong_mask($mask$$constant);
10057 __ dext(dst, src, 0, size);
10058 %}
10059 ins_pipe( ialu_regI_regI );
10060 %}
10062 instruct xorI_Reg_imm_0_65535(mRegI dst, mRegI src1, immI_0_65535 src2) %{
10063 match(Set dst (XorI src1 src2));
10064 ins_cost(60);
10066 format %{ "xori $dst, $src1, $src2 #@xorI_Reg_imm_0_65535" %}
10067 ins_encode %{
10068 Register dst = $dst$$Register;
10069 Register src = $src1$$Register;
10070 int val = $src2$$constant;
10072 __ xori(dst, src, val);
10073 %}
10074 ins_pipe( ialu_regI_regI );
10075 %}
10077 instruct xorI_Reg_immI_M1(mRegI dst, mRegI src1, immI_M1 M1) %{
10078 match(Set dst (XorI src1 M1));
10079 predicate(UseLoongsonISA);
10080 ins_cost(60);
10082 format %{ "xor $dst, $src1, $M1 #@xorI_Reg_immI_M1" %}
10083 ins_encode %{
10084 Register dst = $dst$$Register;
10085 Register src = $src1$$Register;
10087 __ gsorn(dst, R0, src);
10088 %}
10089 ins_pipe( ialu_regI_regI );
10090 %}
10092 instruct xorL2I_Reg_immI_M1(mRegI dst, mRegL src1, immI_M1 M1) %{
10093 match(Set dst (XorI (ConvL2I src1) M1));
10094 predicate(UseLoongsonISA);
10095 ins_cost(60);
10097 format %{ "xor $dst, $src1, $M1 #@xorL2I_Reg_immI_M1" %}
10098 ins_encode %{
10099 Register dst = $dst$$Register;
10100 Register src = $src1$$Register;
10102 __ gsorn(dst, R0, src);
10103 %}
10104 ins_pipe( ialu_regI_regI );
10105 %}
10107 instruct xorL_Reg_imm_0_65535(mRegL dst, mRegL src1, immL_0_65535 src2) %{
10108 match(Set dst (XorL src1 src2));
10109 ins_cost(60);
10111 format %{ "xori $dst, $src1, $src2 #@xorL_Reg_imm_0_65535" %}
10112 ins_encode %{
10113 Register dst = $dst$$Register;
10114 Register src = $src1$$Register;
10115 int val = $src2$$constant;
10117 __ xori(dst, src, val);
10118 %}
10119 ins_pipe( ialu_regI_regI );
10120 %}
10122 /*
10123 instruct xorL_Reg_immL_M1(mRegL dst, mRegL src1, immL_M1 M1) %{
10124 match(Set dst (XorL src1 M1));
10125 predicate(UseLoongsonISA);
10126 ins_cost(60);
10128 format %{ "xor $dst, $src1, $M1 #@xorL_Reg_immL_M1" %}
10129 ins_encode %{
10130 Register dst = $dst$$Register;
10131 Register src = $src1$$Register;
10133 __ gsorn(dst, R0, src);
10134 %}
10135 ins_pipe( ialu_regI_regI );
10136 %}
10137 */
10139 instruct lbu_and_lmask(mRegI dst, memory mem, immI_255 mask) %{
10140 match(Set dst (AndI mask (LoadB mem)));
10141 ins_cost(60);
10143 format %{ "lhu $dst, $mem #@lbu_and_lmask" %}
10144 ins_encode(load_UB_enc(dst, mem));
10145 ins_pipe( ialu_loadI );
10146 %}
10148 instruct lbu_and_rmask(mRegI dst, memory mem, immI_255 mask) %{
10149 match(Set dst (AndI (LoadB mem) mask));
10150 ins_cost(60);
10152 format %{ "lhu $dst, $mem #@lbu_and_rmask" %}
10153 ins_encode(load_UB_enc(dst, mem));
10154 ins_pipe( ialu_loadI );
10155 %}
10157 instruct andI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10158 match(Set dst (AndI src1 src2));
10160 format %{ "and $dst, $src1, $src2 #@andI_Reg_Reg" %}
10161 ins_encode %{
10162 Register dst = $dst$$Register;
10163 Register src1 = $src1$$Register;
10164 Register src2 = $src2$$Register;
10165 __ andr(dst, src1, src2);
10166 %}
10167 ins_pipe( ialu_regI_regI );
10168 %}
10170 instruct andnI_Reg_nReg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10171 match(Set dst (AndI src1 (XorI src2 M1)));
10172 predicate(UseLoongsonISA);
10174 format %{ "andn $dst, $src1, $src2 #@andnI_Reg_nReg" %}
10175 ins_encode %{
10176 Register dst = $dst$$Register;
10177 Register src1 = $src1$$Register;
10178 Register src2 = $src2$$Register;
10180 __ gsandn(dst, src1, src2);
10181 %}
10182 ins_pipe( ialu_regI_regI );
10183 %}
10185 instruct ornI_Reg_nReg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10186 match(Set dst (OrI src1 (XorI src2 M1)));
10187 predicate(UseLoongsonISA);
10189 format %{ "orn $dst, $src1, $src2 #@ornI_Reg_nReg" %}
10190 ins_encode %{
10191 Register dst = $dst$$Register;
10192 Register src1 = $src1$$Register;
10193 Register src2 = $src2$$Register;
10195 __ gsorn(dst, src1, src2);
10196 %}
10197 ins_pipe( ialu_regI_regI );
10198 %}
10200 instruct andnI_nReg_Reg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10201 match(Set dst (AndI (XorI src1 M1) src2));
10202 predicate(UseLoongsonISA);
10204 format %{ "andn $dst, $src2, $src1 #@andnI_nReg_Reg" %}
10205 ins_encode %{
10206 Register dst = $dst$$Register;
10207 Register src1 = $src1$$Register;
10208 Register src2 = $src2$$Register;
10210 __ gsandn(dst, src2, src1);
10211 %}
10212 ins_pipe( ialu_regI_regI );
10213 %}
10215 instruct ornI_nReg_Reg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10216 match(Set dst (OrI (XorI src1 M1) src2));
10217 predicate(UseLoongsonISA);
10219 format %{ "orn $dst, $src2, $src1 #@ornI_nReg_Reg" %}
10220 ins_encode %{
10221 Register dst = $dst$$Register;
10222 Register src1 = $src1$$Register;
10223 Register src2 = $src2$$Register;
10225 __ gsorn(dst, src2, src1);
10226 %}
10227 ins_pipe( ialu_regI_regI );
10228 %}
10230 // And Long Register with Register
10231 instruct andL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10232 match(Set dst (AndL src1 src2));
10233 format %{ "AND $dst, $src1, $src2 @ andL_Reg_Reg\n\t" %}
10234 ins_encode %{
10235 Register dst_reg = as_Register($dst$$reg);
10236 Register src1_reg = as_Register($src1$$reg);
10237 Register src2_reg = as_Register($src2$$reg);
10239 __ andr(dst_reg, src1_reg, src2_reg);
10240 %}
10241 ins_pipe( ialu_regL_regL );
10242 %}
10244 instruct andL_Reg_Reg_convI2L(mRegL dst, mRegL src1, mRegI src2) %{
10245 match(Set dst (AndL src1 (ConvI2L src2)));
10246 format %{ "AND $dst, $src1, $src2 @ andL_Reg_Reg_convI2L\n\t" %}
10247 ins_encode %{
10248 Register dst_reg = as_Register($dst$$reg);
10249 Register src1_reg = as_Register($src1$$reg);
10250 Register src2_reg = as_Register($src2$$reg);
10252 __ andr(dst_reg, src1_reg, src2_reg);
10253 %}
10254 ins_pipe( ialu_regL_regL );
10255 %}
10257 instruct andL_Reg_imm_0_65535(mRegL dst, mRegL src1, immL_0_65535 src2) %{
10258 match(Set dst (AndL src1 src2));
10259 ins_cost(60);
10261 format %{ "and $dst, $src1, $src2 #@andL_Reg_imm_0_65535" %}
10262 ins_encode %{
10263 Register dst = $dst$$Register;
10264 Register src = $src1$$Register;
10265 long val = $src2$$constant;
10267 __ andi(dst, src, val);
10268 %}
10269 ins_pipe( ialu_regI_regI );
10270 %}
10272 instruct andL2I_Reg_imm_0_65535(mRegI dst, mRegL src1, immL_0_65535 src2) %{
10273 match(Set dst (ConvL2I (AndL src1 src2)));
10274 ins_cost(60);
10276 format %{ "and $dst, $src1, $src2 #@andL2I_Reg_imm_0_65535" %}
10277 ins_encode %{
10278 Register dst = $dst$$Register;
10279 Register src = $src1$$Register;
10280 long val = $src2$$constant;
10282 __ andi(dst, src, val);
10283 %}
10284 ins_pipe( ialu_regI_regI );
10285 %}
10287 /*
10288 instruct andnL_Reg_nReg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10289 match(Set dst (AndL src1 (XorL src2 M1)));
10290 predicate(UseLoongsonISA);
10292 format %{ "andn $dst, $src1, $src2 #@andnL_Reg_nReg" %}
10293 ins_encode %{
10294 Register dst = $dst$$Register;
10295 Register src1 = $src1$$Register;
10296 Register src2 = $src2$$Register;
10298 __ gsandn(dst, src1, src2);
10299 %}
10300 ins_pipe( ialu_regI_regI );
10301 %}
10302 */
10304 /*
10305 instruct ornL_Reg_nReg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10306 match(Set dst (OrL src1 (XorL src2 M1)));
10307 predicate(UseLoongsonISA);
10309 format %{ "orn $dst, $src1, $src2 #@ornL_Reg_nReg" %}
10310 ins_encode %{
10311 Register dst = $dst$$Register;
10312 Register src1 = $src1$$Register;
10313 Register src2 = $src2$$Register;
10315 __ gsorn(dst, src1, src2);
10316 %}
10317 ins_pipe( ialu_regI_regI );
10318 %}
10319 */
10321 /*
10322 instruct andnL_nReg_Reg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10323 match(Set dst (AndL (XorL src1 M1) src2));
10324 predicate(UseLoongsonISA);
10326 format %{ "andn $dst, $src2, $src1 #@andnL_nReg_Reg" %}
10327 ins_encode %{
10328 Register dst = $dst$$Register;
10329 Register src1 = $src1$$Register;
10330 Register src2 = $src2$$Register;
10332 __ gsandn(dst, src2, src1);
10333 %}
10334 ins_pipe( ialu_regI_regI );
10335 %}
10336 */
10338 /*
10339 instruct ornL_nReg_Reg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10340 match(Set dst (OrL (XorL src1 M1) src2));
10341 predicate(UseLoongsonISA);
10343 format %{ "orn $dst, $src2, $src1 #@ornL_nReg_Reg" %}
10344 ins_encode %{
10345 Register dst = $dst$$Register;
10346 Register src1 = $src1$$Register;
10347 Register src2 = $src2$$Register;
10349 __ gsorn(dst, src2, src1);
10350 %}
10351 ins_pipe( ialu_regI_regI );
10352 %}
10353 */
10355 instruct andL_Reg_immL_M8(mRegL dst, immL_M8 M8) %{
10356 match(Set dst (AndL dst M8));
10357 ins_cost(60);
10359 format %{ "and $dst, $dst, $M8 #@andL_Reg_immL_M8" %}
10360 ins_encode %{
10361 Register dst = $dst$$Register;
10363 __ dins(dst, R0, 0, 3);
10364 %}
10365 ins_pipe( ialu_regI_regI );
10366 %}
10368 instruct andL_Reg_immL_M5(mRegL dst, immL_M5 M5) %{
10369 match(Set dst (AndL dst M5));
10370 ins_cost(60);
10372 format %{ "and $dst, $dst, $M5 #@andL_Reg_immL_M5" %}
10373 ins_encode %{
10374 Register dst = $dst$$Register;
10376 __ dins(dst, R0, 2, 1);
10377 %}
10378 ins_pipe( ialu_regI_regI );
10379 %}
10381 instruct andL_Reg_immL_M7(mRegL dst, immL_M7 M7) %{
10382 match(Set dst (AndL dst M7));
10383 ins_cost(60);
10385 format %{ "and $dst, $dst, $M7 #@andL_Reg_immL_M7" %}
10386 ins_encode %{
10387 Register dst = $dst$$Register;
10389 __ dins(dst, R0, 1, 2);
10390 %}
10391 ins_pipe( ialu_regI_regI );
10392 %}
10394 instruct andL_Reg_immL_M4(mRegL dst, immL_M4 M4) %{
10395 match(Set dst (AndL dst M4));
10396 ins_cost(60);
10398 format %{ "and $dst, $dst, $M4 #@andL_Reg_immL_M4" %}
10399 ins_encode %{
10400 Register dst = $dst$$Register;
10402 __ dins(dst, R0, 0, 2);
10403 %}
10404 ins_pipe( ialu_regI_regI );
10405 %}
10407 instruct andL_Reg_immL_M121(mRegL dst, immL_M121 M121) %{
10408 match(Set dst (AndL dst M121));
10409 ins_cost(60);
10411 format %{ "and $dst, $dst, $M121 #@andL_Reg_immL_M121" %}
10412 ins_encode %{
10413 Register dst = $dst$$Register;
10415 __ dins(dst, R0, 3, 4);
10416 %}
10417 ins_pipe( ialu_regI_regI );
10418 %}
10420 // Or Long Register with Register
10421 instruct orL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10422 match(Set dst (OrL src1 src2));
10423 format %{ "OR $dst, $src1, $src2 @ orL_Reg_Reg\t" %}
10424 ins_encode %{
10425 Register dst_reg = $dst$$Register;
10426 Register src1_reg = $src1$$Register;
10427 Register src2_reg = $src2$$Register;
10429 __ orr(dst_reg, src1_reg, src2_reg);
10430 %}
10431 ins_pipe( ialu_regL_regL );
10432 %}
10434 instruct orL_Reg_P2XReg(mRegL dst, mRegP src1, mRegL src2) %{
10435 match(Set dst (OrL (CastP2X src1) src2));
10436 format %{ "OR $dst, $src1, $src2 @ orL_Reg_P2XReg\t" %}
10437 ins_encode %{
10438 Register dst_reg = $dst$$Register;
10439 Register src1_reg = $src1$$Register;
10440 Register src2_reg = $src2$$Register;
10442 __ orr(dst_reg, src1_reg, src2_reg);
10443 %}
10444 ins_pipe( ialu_regL_regL );
10445 %}
10447 // Xor Long Register with Register
10448 instruct xorL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10449 match(Set dst (XorL src1 src2));
10450 format %{ "XOR $dst, $src1, $src2 @ xorL_Reg_Reg\t" %}
10451 ins_encode %{
10452 Register dst_reg = as_Register($dst$$reg);
10453 Register src1_reg = as_Register($src1$$reg);
10454 Register src2_reg = as_Register($src2$$reg);
10456 __ xorr(dst_reg, src1_reg, src2_reg);
10457 %}
10458 ins_pipe( ialu_regL_regL );
10459 %}
10461 // Shift Left by 8-bit immediate
10462 instruct salI_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
10463 match(Set dst (LShiftI src shift));
10465 format %{ "SHL $dst, $src, $shift #@salI_Reg_imm" %}
10466 ins_encode %{
10467 Register src = $src$$Register;
10468 Register dst = $dst$$Register;
10469 int shamt = $shift$$constant;
10471 __ sll(dst, src, shamt);
10472 %}
10473 ins_pipe( ialu_regI_regI );
10474 %}
10476 instruct salL2I_Reg_imm(mRegI dst, mRegL src, immI8 shift) %{
10477 match(Set dst (LShiftI (ConvL2I src) shift));
10479 format %{ "SHL $dst, $src, $shift #@salL2I_Reg_imm" %}
10480 ins_encode %{
10481 Register src = $src$$Register;
10482 Register dst = $dst$$Register;
10483 int shamt = $shift$$constant;
10485 __ sll(dst, src, shamt);
10486 %}
10487 ins_pipe( ialu_regI_regI );
10488 %}
10490 instruct salI_Reg_imm_and_M65536(mRegI dst, mRegI src, immI_16 shift, immI_M65536 mask) %{
10491 match(Set dst (AndI (LShiftI src shift) mask));
10493 format %{ "SHL $dst, $src, $shift #@salI_Reg_imm_and_M65536" %}
10494 ins_encode %{
10495 Register src = $src$$Register;
10496 Register dst = $dst$$Register;
10498 __ sll(dst, src, 16);
10499 %}
10500 ins_pipe( ialu_regI_regI );
10501 %}
10503 instruct land7_2_s(mRegI dst, mRegL src, immL7 seven, immI_16 sixteen)
10504 %{
10505 match(Set dst (RShiftI (LShiftI (ConvL2I (AndL src seven)) sixteen) sixteen));
10507 format %{ "andi $dst, $src, 7\t# @land7_2_s" %}
10508 ins_encode %{
10509 Register src = $src$$Register;
10510 Register dst = $dst$$Register;
10512 __ andi(dst, src, 7);
10513 %}
10514 ins_pipe(ialu_regI_regI);
10515 %}
10517 instruct ori2s(mRegI dst, mRegI src1, immI_0_32767 src2, immI_16 sixteen)
10518 %{
10519 match(Set dst (RShiftI (LShiftI (OrI src1 src2) sixteen) sixteen));
10521 format %{ "ori $dst, $src1, $src2\t# @ori2s" %}
10522 ins_encode %{
10523 Register src = $src1$$Register;
10524 int val = $src2$$constant;
10525 Register dst = $dst$$Register;
10527 __ ori(dst, src, val);
10528 %}
10529 ins_pipe(ialu_regI_regI);
10530 %}
10532 // Logical Shift Right by 16, followed by Arithmetic Shift Left by 16.
10533 // This idiom is used by the compiler the i2s bytecode.
10534 instruct i2s(mRegI dst, mRegI src, immI_16 sixteen)
10535 %{
10536 match(Set dst (RShiftI (LShiftI src sixteen) sixteen));
10538 format %{ "i2s $dst, $src\t# @i2s" %}
10539 ins_encode %{
10540 Register src = $src$$Register;
10541 Register dst = $dst$$Register;
10543 __ seh(dst, src);
10544 %}
10545 ins_pipe(ialu_regI_regI);
10546 %}
10548 // Logical Shift Right by 24, followed by Arithmetic Shift Left by 24.
10549 // This idiom is used by the compiler for the i2b bytecode.
10550 instruct i2b(mRegI dst, mRegI src, immI_24 twentyfour)
10551 %{
10552 match(Set dst (RShiftI (LShiftI src twentyfour) twentyfour));
10554 format %{ "i2b $dst, $src\t# @i2b" %}
10555 ins_encode %{
10556 Register src = $src$$Register;
10557 Register dst = $dst$$Register;
10559 __ seb(dst, src);
10560 %}
10561 ins_pipe(ialu_regI_regI);
10562 %}
10565 instruct salI_RegL2I_imm(mRegI dst, mRegL src, immI8 shift) %{
10566 match(Set dst (LShiftI (ConvL2I src) shift));
10568 format %{ "SHL $dst, $src, $shift #@salI_RegL2I_imm" %}
10569 ins_encode %{
10570 Register src = $src$$Register;
10571 Register dst = $dst$$Register;
10572 int shamt = $shift$$constant;
10574 __ sll(dst, src, shamt);
10575 %}
10576 ins_pipe( ialu_regI_regI );
10577 %}
10579 // Shift Left by 8-bit immediate
10580 instruct salI_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
10581 match(Set dst (LShiftI src shift));
10583 format %{ "SHL $dst, $src, $shift #@salI_Reg_Reg" %}
10584 ins_encode %{
10585 Register src = $src$$Register;
10586 Register dst = $dst$$Register;
10587 Register shamt = $shift$$Register;
10588 __ sllv(dst, src, shamt);
10589 %}
10590 ins_pipe( ialu_regI_regI );
10591 %}
10594 // Shift Left Long
10595 instruct salL_Reg_imm(mRegL dst, mRegL src, immI8 shift) %{
10596 //predicate(UseNewLongLShift);
10597 match(Set dst (LShiftL src shift));
10598 ins_cost(100);
10599 format %{ "salL $dst, $src, $shift @ salL_Reg_imm" %}
10600 ins_encode %{
10601 Register src_reg = as_Register($src$$reg);
10602 Register dst_reg = as_Register($dst$$reg);
10603 int shamt = $shift$$constant;
10605 if (__ is_simm(shamt, 5))
10606 __ dsll(dst_reg, src_reg, shamt);
10607 else
10608 {
10609 int sa = Assembler::low(shamt, 6);
10610 if (sa < 32) {
10611 __ dsll(dst_reg, src_reg, sa);
10612 } else {
10613 __ dsll32(dst_reg, src_reg, sa - 32);
10614 }
10615 }
10616 %}
10617 ins_pipe( ialu_regL_regL );
10618 %}
10620 instruct salL_RegI2L_imm(mRegL dst, mRegI src, immI8 shift) %{
10621 //predicate(UseNewLongLShift);
10622 match(Set dst (LShiftL (ConvI2L src) shift));
10623 ins_cost(100);
10624 format %{ "salL $dst, $src, $shift @ salL_RegI2L_imm" %}
10625 ins_encode %{
10626 Register src_reg = as_Register($src$$reg);
10627 Register dst_reg = as_Register($dst$$reg);
10628 int shamt = $shift$$constant;
10630 if (__ is_simm(shamt, 5))
10631 __ dsll(dst_reg, src_reg, shamt);
10632 else
10633 {
10634 int sa = Assembler::low(shamt, 6);
10635 if (sa < 32) {
10636 __ dsll(dst_reg, src_reg, sa);
10637 } else {
10638 __ dsll32(dst_reg, src_reg, sa - 32);
10639 }
10640 }
10641 %}
10642 ins_pipe( ialu_regL_regL );
10643 %}
10645 // Shift Left Long
10646 instruct salL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
10647 //predicate(UseNewLongLShift);
10648 match(Set dst (LShiftL src shift));
10649 ins_cost(100);
10650 format %{ "salL $dst, $src, $shift @ salL_Reg_Reg" %}
10651 ins_encode %{
10652 Register src_reg = as_Register($src$$reg);
10653 Register dst_reg = as_Register($dst$$reg);
10655 __ dsllv(dst_reg, src_reg, $shift$$Register);
10656 %}
10657 ins_pipe( ialu_regL_regL );
10658 %}
10660 instruct salL_convI2L_Reg_imm(mRegL dst, mRegI src, immI8 shift) %{
10661 match(Set dst (LShiftL (ConvI2L src) shift));
10662 ins_cost(100);
10663 format %{ "salL $dst, $src, $shift @ salL_convI2L_Reg_imm" %}
10664 ins_encode %{
10665 Register src_reg = as_Register($src$$reg);
10666 Register dst_reg = as_Register($dst$$reg);
10667 int shamt = $shift$$constant;
10669 if (__ is_simm(shamt, 5)) {
10670 __ dsll(dst_reg, src_reg, shamt);
10671 } else {
10672 int sa = Assembler::low(shamt, 6);
10673 if (sa < 32) {
10674 __ dsll(dst_reg, src_reg, sa);
10675 } else {
10676 __ dsll32(dst_reg, src_reg, sa - 32);
10677 }
10678 }
10679 %}
10680 ins_pipe( ialu_regL_regL );
10681 %}
10683 // Shift Right Long
10684 instruct sarL_Reg_imm(mRegL dst, mRegL src, immI8 shift) %{
10685 match(Set dst (RShiftL src shift));
10686 ins_cost(100);
10687 format %{ "sarL $dst, $src, $shift @ sarL_Reg_imm" %}
10688 ins_encode %{
10689 Register src_reg = as_Register($src$$reg);
10690 Register dst_reg = as_Register($dst$$reg);
10691 int shamt = ($shift$$constant & 0x3f);
10692 if (__ is_simm(shamt, 5))
10693 __ dsra(dst_reg, src_reg, shamt);
10694 else {
10695 int sa = Assembler::low(shamt, 6);
10696 if (sa < 32) {
10697 __ dsra(dst_reg, src_reg, sa);
10698 } else {
10699 __ dsra32(dst_reg, src_reg, sa - 32);
10700 }
10701 }
10702 %}
10703 ins_pipe( ialu_regL_regL );
10704 %}
10706 instruct sarL2I_Reg_immI_32_63(mRegI dst, mRegL src, immI_32_63 shift) %{
10707 match(Set dst (ConvL2I (RShiftL src shift)));
10708 ins_cost(100);
10709 format %{ "sarL $dst, $src, $shift @ sarL2I_Reg_immI_32_63" %}
10710 ins_encode %{
10711 Register src_reg = as_Register($src$$reg);
10712 Register dst_reg = as_Register($dst$$reg);
10713 int shamt = $shift$$constant;
10715 __ dsra32(dst_reg, src_reg, shamt - 32);
10716 %}
10717 ins_pipe( ialu_regL_regL );
10718 %}
10720 // Shift Right Long arithmetically
10721 instruct sarL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
10722 //predicate(UseNewLongLShift);
10723 match(Set dst (RShiftL src shift));
10724 ins_cost(100);
10725 format %{ "sarL $dst, $src, $shift @ sarL_Reg_Reg" %}
10726 ins_encode %{
10727 Register src_reg = as_Register($src$$reg);
10728 Register dst_reg = as_Register($dst$$reg);
10730 __ dsrav(dst_reg, src_reg, $shift$$Register);
10731 %}
10732 ins_pipe( ialu_regL_regL );
10733 %}
10735 // Shift Right Long logically
10736 instruct slrL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
10737 match(Set dst (URShiftL src shift));
10738 ins_cost(100);
10739 format %{ "slrL $dst, $src, $shift @ slrL_Reg_Reg" %}
10740 ins_encode %{
10741 Register src_reg = as_Register($src$$reg);
10742 Register dst_reg = as_Register($dst$$reg);
10744 __ dsrlv(dst_reg, src_reg, $shift$$Register);
10745 %}
10746 ins_pipe( ialu_regL_regL );
10747 %}
10749 instruct slrL_Reg_immI_0_31(mRegL dst, mRegL src, immI_0_31 shift) %{
10750 match(Set dst (URShiftL src shift));
10751 ins_cost(80);
10752 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_0_31" %}
10753 ins_encode %{
10754 Register src_reg = as_Register($src$$reg);
10755 Register dst_reg = as_Register($dst$$reg);
10756 int shamt = $shift$$constant;
10758 __ dsrl(dst_reg, src_reg, shamt);
10759 %}
10760 ins_pipe( ialu_regL_regL );
10761 %}
10763 instruct slrL_Reg_immI_0_31_and_max_int(mRegI dst, mRegL src, immI_0_31 shift, immI_MaxI max_int) %{
10764 match(Set dst (AndI (ConvL2I (URShiftL src shift)) max_int));
10765 ins_cost(80);
10766 format %{ "dext $dst, $src, $shift, 31 @ slrL_Reg_immI_0_31_and_max_int" %}
10767 ins_encode %{
10768 Register src_reg = as_Register($src$$reg);
10769 Register dst_reg = as_Register($dst$$reg);
10770 int shamt = $shift$$constant;
10772 __ dext(dst_reg, src_reg, shamt, 31);
10773 %}
10774 ins_pipe( ialu_regL_regL );
10775 %}
10777 instruct slrL_P2XReg_immI_0_31(mRegL dst, mRegP src, immI_0_31 shift) %{
10778 match(Set dst (URShiftL (CastP2X src) shift));
10779 ins_cost(80);
10780 format %{ "slrL $dst, $src, $shift @ slrL_P2XReg_immI_0_31" %}
10781 ins_encode %{
10782 Register src_reg = as_Register($src$$reg);
10783 Register dst_reg = as_Register($dst$$reg);
10784 int shamt = $shift$$constant;
10786 __ dsrl(dst_reg, src_reg, shamt);
10787 %}
10788 ins_pipe( ialu_regL_regL );
10789 %}
10791 instruct slrL_Reg_immI_32_63(mRegL dst, mRegL src, immI_32_63 shift) %{
10792 match(Set dst (URShiftL src shift));
10793 ins_cost(80);
10794 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_32_63" %}
10795 ins_encode %{
10796 Register src_reg = as_Register($src$$reg);
10797 Register dst_reg = as_Register($dst$$reg);
10798 int shamt = $shift$$constant;
10800 __ dsrl32(dst_reg, src_reg, shamt - 32);
10801 %}
10802 ins_pipe( ialu_regL_regL );
10803 %}
10805 instruct slrL_Reg_immI_convL2I(mRegI dst, mRegL src, immI_32_63 shift) %{
10806 match(Set dst (ConvL2I (URShiftL src shift)));
10807 predicate(n->in(1)->in(2)->get_int() > 32);
10808 ins_cost(80);
10809 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_convL2I" %}
10810 ins_encode %{
10811 Register src_reg = as_Register($src$$reg);
10812 Register dst_reg = as_Register($dst$$reg);
10813 int shamt = $shift$$constant;
10815 __ dsrl32(dst_reg, src_reg, shamt - 32);
10816 %}
10817 ins_pipe( ialu_regL_regL );
10818 %}
10820 instruct slrL_P2XReg_immI_32_63(mRegL dst, mRegP src, immI_32_63 shift) %{
10821 match(Set dst (URShiftL (CastP2X src) shift));
10822 ins_cost(80);
10823 format %{ "slrL $dst, $src, $shift @ slrL_P2XReg_immI_32_63" %}
10824 ins_encode %{
10825 Register src_reg = as_Register($src$$reg);
10826 Register dst_reg = as_Register($dst$$reg);
10827 int shamt = $shift$$constant;
10829 __ dsrl32(dst_reg, src_reg, shamt - 32);
10830 %}
10831 ins_pipe( ialu_regL_regL );
10832 %}
10834 // Xor Instructions
10835 // Xor Register with Register
10836 instruct xorI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10837 match(Set dst (XorI src1 src2));
10839 format %{ "XOR $dst, $src1, $src2 #@xorI_Reg_Reg" %}
10841 ins_encode %{
10842 Register dst = $dst$$Register;
10843 Register src1 = $src1$$Register;
10844 Register src2 = $src2$$Register;
10845 __ xorr(dst, src1, src2);
10846 __ sll(dst, dst, 0); /* long -> int */
10847 %}
10849 ins_pipe( ialu_regI_regI );
10850 %}
10852 // Or Instructions
10853 // Or Register with Register
10854 instruct orI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10855 match(Set dst (OrI src1 src2));
10857 format %{ "OR $dst, $src1, $src2 #@orI_Reg_Reg" %}
10858 ins_encode %{
10859 Register dst = $dst$$Register;
10860 Register src1 = $src1$$Register;
10861 Register src2 = $src2$$Register;
10862 __ orr(dst, src1, src2);
10863 %}
10865 ins_pipe( ialu_regI_regI );
10866 %}
10868 instruct rotI_shr_logical_Reg(mRegI dst, mRegI src, immI_0_31 rshift, immI_0_31 lshift, immI_1 one) %{
10869 match(Set dst (OrI (URShiftI src rshift) (LShiftI (AndI src one) lshift)));
10870 predicate(32 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int())));
10872 format %{ "rotr $dst, $src, 1 ...\n\t"
10873 "srl $dst, $dst, ($rshift-1) @ rotI_shr_logical_Reg" %}
10874 ins_encode %{
10875 Register dst = $dst$$Register;
10876 Register src = $src$$Register;
10877 int rshift = $rshift$$constant;
10879 __ rotr(dst, src, 1);
10880 if (rshift - 1) {
10881 __ srl(dst, dst, rshift - 1);
10882 }
10883 %}
10885 ins_pipe( ialu_regI_regI );
10886 %}
10888 instruct orI_Reg_castP2X(mRegL dst, mRegL src1, mRegP src2) %{
10889 match(Set dst (OrI src1 (CastP2X src2)));
10891 format %{ "OR $dst, $src1, $src2 #@orI_Reg_castP2X" %}
10892 ins_encode %{
10893 Register dst = $dst$$Register;
10894 Register src1 = $src1$$Register;
10895 Register src2 = $src2$$Register;
10896 __ orr(dst, src1, src2);
10897 %}
10899 ins_pipe( ialu_regI_regI );
10900 %}
10902 // Logical Shift Right by 8-bit immediate
10903 instruct shr_logical_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
10904 match(Set dst (URShiftI src shift));
10905 // effect(KILL cr);
10907 format %{ "SRL $dst, $src, $shift #@shr_logical_Reg_imm" %}
10908 ins_encode %{
10909 Register src = $src$$Register;
10910 Register dst = $dst$$Register;
10911 int shift = $shift$$constant;
10913 __ srl(dst, src, shift);
10914 %}
10915 ins_pipe( ialu_regI_regI );
10916 %}
10918 instruct shr_logical_Reg_imm_nonneg_mask(mRegI dst, mRegI src, immI_0_31 shift, immI_nonneg_mask mask) %{
10919 match(Set dst (AndI (URShiftI src shift) mask));
10921 format %{ "ext $dst, $src, $shift, one-bits($mask) #@shr_logical_Reg_imm_nonneg_mask" %}
10922 ins_encode %{
10923 Register src = $src$$Register;
10924 Register dst = $dst$$Register;
10925 int pos = $shift$$constant;
10926 int size = Assembler::is_int_mask($mask$$constant);
10928 __ ext(dst, src, pos, size);
10929 %}
10930 ins_pipe( ialu_regI_regI );
10931 %}
10933 instruct rolI_Reg_immI_0_31(mRegI dst, immI_0_31 lshift, immI_0_31 rshift)
10934 %{
10935 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
10936 match(Set dst (OrI (LShiftI dst lshift) (URShiftI dst rshift)));
10938 ins_cost(100);
10939 format %{ "rotr $dst, $dst, $rshift #@rolI_Reg_immI_0_31" %}
10940 ins_encode %{
10941 Register dst = $dst$$Register;
10942 int sa = $rshift$$constant;
10944 __ rotr(dst, dst, sa);
10945 %}
10946 ins_pipe( ialu_regI_regI );
10947 %}
10949 instruct rolL_Reg_immI_0_31(mRegL dst, immI_32_63 lshift, immI_0_31 rshift)
10950 %{
10951 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
10952 match(Set dst (OrL (LShiftL dst lshift) (URShiftL dst rshift)));
10954 ins_cost(100);
10955 format %{ "rotr $dst, $dst, $rshift #@rolL_Reg_immI_0_31" %}
10956 ins_encode %{
10957 Register dst = $dst$$Register;
10958 int sa = $rshift$$constant;
10960 __ drotr(dst, dst, sa);
10961 %}
10962 ins_pipe( ialu_regI_regI );
10963 %}
10965 instruct rolL_Reg_immI_32_63(mRegL dst, immI_0_31 lshift, immI_32_63 rshift)
10966 %{
10967 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
10968 match(Set dst (OrL (LShiftL dst lshift) (URShiftL dst rshift)));
10970 ins_cost(100);
10971 format %{ "rotr $dst, $dst, $rshift #@rolL_Reg_immI_32_63" %}
10972 ins_encode %{
10973 Register dst = $dst$$Register;
10974 int sa = $rshift$$constant;
10976 __ drotr32(dst, dst, sa - 32);
10977 %}
10978 ins_pipe( ialu_regI_regI );
10979 %}
10981 instruct rorI_Reg_immI_0_31(mRegI dst, immI_0_31 rshift, immI_0_31 lshift)
10982 %{
10983 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
10984 match(Set dst (OrI (URShiftI dst rshift) (LShiftI dst lshift)));
10986 ins_cost(100);
10987 format %{ "rotr $dst, $dst, $rshift #@rorI_Reg_immI_0_31" %}
10988 ins_encode %{
10989 Register dst = $dst$$Register;
10990 int sa = $rshift$$constant;
10992 __ rotr(dst, dst, sa);
10993 %}
10994 ins_pipe( ialu_regI_regI );
10995 %}
10997 instruct rorL_Reg_immI_0_31(mRegL dst, immI_0_31 rshift, immI_32_63 lshift)
10998 %{
10999 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11000 match(Set dst (OrL (URShiftL dst rshift) (LShiftL dst lshift)));
11002 ins_cost(100);
11003 format %{ "rotr $dst, $dst, $rshift #@rorL_Reg_immI_0_31" %}
11004 ins_encode %{
11005 Register dst = $dst$$Register;
11006 int sa = $rshift$$constant;
11008 __ drotr(dst, dst, sa);
11009 %}
11010 ins_pipe( ialu_regI_regI );
11011 %}
11013 instruct rorL_Reg_immI_32_63(mRegL dst, immI_32_63 rshift, immI_0_31 lshift)
11014 %{
11015 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11016 match(Set dst (OrL (URShiftL dst rshift) (LShiftL dst lshift)));
11018 ins_cost(100);
11019 format %{ "rotr $dst, $dst, $rshift #@rorL_Reg_immI_32_63" %}
11020 ins_encode %{
11021 Register dst = $dst$$Register;
11022 int sa = $rshift$$constant;
11024 __ drotr32(dst, dst, sa - 32);
11025 %}
11026 ins_pipe( ialu_regI_regI );
11027 %}
11029 // Logical Shift Right
11030 instruct shr_logical_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
11031 match(Set dst (URShiftI src shift));
11033 format %{ "SRL $dst, $src, $shift #@shr_logical_Reg_Reg" %}
11034 ins_encode %{
11035 Register src = $src$$Register;
11036 Register dst = $dst$$Register;
11037 Register shift = $shift$$Register;
11038 __ srlv(dst, src, shift);
11039 %}
11040 ins_pipe( ialu_regI_regI );
11041 %}
11044 instruct shr_arith_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
11045 match(Set dst (RShiftI src shift));
11046 // effect(KILL cr);
11048 format %{ "SRA $dst, $src, $shift #@shr_arith_Reg_imm" %}
11049 ins_encode %{
11050 Register src = $src$$Register;
11051 Register dst = $dst$$Register;
11052 int shift = $shift$$constant;
11053 __ sra(dst, src, shift);
11054 %}
11055 ins_pipe( ialu_regI_regI );
11056 %}
11058 instruct shr_arith_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
11059 match(Set dst (RShiftI src shift));
11060 // effect(KILL cr);
11062 format %{ "SRA $dst, $src, $shift #@shr_arith_Reg_Reg" %}
11063 ins_encode %{
11064 Register src = $src$$Register;
11065 Register dst = $dst$$Register;
11066 Register shift = $shift$$Register;
11067 __ srav(dst, src, shift);
11068 %}
11069 ins_pipe( ialu_regI_regI );
11070 %}
11072 //----------Convert Int to Boolean---------------------------------------------
11074 instruct convI2B(mRegI dst, mRegI src) %{
11075 match(Set dst (Conv2B src));
11077 ins_cost(100);
11078 format %{ "convI2B $dst, $src @ convI2B" %}
11079 ins_encode %{
11080 Register dst = as_Register($dst$$reg);
11081 Register src = as_Register($src$$reg);
11083 if (dst != src) {
11084 __ daddiu(dst, R0, 1);
11085 __ movz(dst, R0, src);
11086 } else {
11087 __ move(AT, src);
11088 __ daddiu(dst, R0, 1);
11089 __ movz(dst, R0, AT);
11090 }
11091 %}
11093 ins_pipe( ialu_regL_regL );
11094 %}
11096 instruct convI2L_reg( mRegL dst, mRegI src) %{
11097 match(Set dst (ConvI2L src));
11099 ins_cost(100);
11100 format %{ "SLL $dst, $src @ convI2L_reg\t" %}
11101 ins_encode %{
11102 Register dst = as_Register($dst$$reg);
11103 Register src = as_Register($src$$reg);
11105 if(dst != src) __ sll(dst, src, 0);
11106 %}
11107 ins_pipe( ialu_regL_regL );
11108 %}
11111 instruct convL2I_reg( mRegI dst, mRegL src ) %{
11112 match(Set dst (ConvL2I src));
11114 format %{ "MOV $dst, $src @ convL2I_reg" %}
11115 ins_encode %{
11116 Register dst = as_Register($dst$$reg);
11117 Register src = as_Register($src$$reg);
11119 __ sll(dst, src, 0);
11120 %}
11122 ins_pipe( ialu_regI_regI );
11123 %}
11125 instruct convL2I2L_reg( mRegL dst, mRegL src ) %{
11126 match(Set dst (ConvI2L (ConvL2I src)));
11128 format %{ "sll $dst, $src, 0 @ convL2I2L_reg" %}
11129 ins_encode %{
11130 Register dst = as_Register($dst$$reg);
11131 Register src = as_Register($src$$reg);
11133 __ sll(dst, src, 0);
11134 %}
11136 ins_pipe( ialu_regI_regI );
11137 %}
11139 instruct convL2D_reg( regD dst, mRegL src ) %{
11140 match(Set dst (ConvL2D src));
11141 format %{ "convL2D $dst, $src @ convL2D_reg" %}
11142 ins_encode %{
11143 Register src = as_Register($src$$reg);
11144 FloatRegister dst = as_FloatRegister($dst$$reg);
11146 __ dmtc1(src, dst);
11147 __ cvt_d_l(dst, dst);
11148 %}
11150 ins_pipe( pipe_slow );
11151 %}
11153 instruct convD2L_reg_fast( mRegL dst, regD src ) %{
11154 match(Set dst (ConvD2L src));
11155 ins_cost(150);
11156 format %{ "convD2L $dst, $src @ convD2L_reg_fast" %}
11157 ins_encode %{
11158 Register dst = as_Register($dst$$reg);
11159 FloatRegister src = as_FloatRegister($src$$reg);
11161 Label Done;
11163 __ trunc_l_d(F30, src);
11164 // max_long: 0x7fffffffffffffff
11165 // __ set64(AT, 0x7fffffffffffffff);
11166 __ daddiu(AT, R0, -1);
11167 __ dsrl(AT, AT, 1);
11168 __ dmfc1(dst, F30);
11170 __ bne(dst, AT, Done);
11171 __ delayed()->mtc1(R0, F30);
11173 __ cvt_d_w(F30, F30);
11174 __ c_ult_d(src, F30);
11175 __ bc1f(Done);
11176 __ delayed()->daddiu(T9, R0, -1);
11178 __ c_un_d(src, src); //NaN?
11179 __ subu(dst, T9, AT);
11180 __ movt(dst, R0);
11182 __ bind(Done);
11183 %}
11185 ins_pipe( pipe_slow );
11186 %}
11188 instruct convD2L_reg_slow( mRegL dst, regD src ) %{
11189 match(Set dst (ConvD2L src));
11190 ins_cost(250);
11191 format %{ "convD2L $dst, $src @ convD2L_reg_slow" %}
11192 ins_encode %{
11193 Register dst = as_Register($dst$$reg);
11194 FloatRegister src = as_FloatRegister($src$$reg);
11196 Label L;
11198 __ c_un_d(src, src); //NaN?
11199 __ bc1t(L);
11200 __ delayed();
11201 __ move(dst, R0);
11203 __ trunc_l_d(F30, src);
11204 __ cfc1(AT, 31);
11205 __ li(T9, 0x10000);
11206 __ andr(AT, AT, T9);
11207 __ beq(AT, R0, L);
11208 __ delayed()->dmfc1(dst, F30);
11210 __ mov_d(F12, src);
11211 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1);
11212 __ move(dst, V0);
11213 __ bind(L);
11214 %}
11216 ins_pipe( pipe_slow );
11217 %}
11219 instruct convF2I_reg_fast( mRegI dst, regF src ) %{
11220 match(Set dst (ConvF2I src));
11221 ins_cost(150);
11222 format %{ "convf2i $dst, $src @ convF2I_reg_fast" %}
11223 ins_encode %{
11224 Register dreg = $dst$$Register;
11225 FloatRegister fval = $src$$FloatRegister;
11227 __ trunc_w_s(F30, fval);
11228 __ mfc1(dreg, F30);
11229 __ c_un_s(fval, fval); //NaN?
11230 __ movt(dreg, R0);
11231 %}
11233 ins_pipe( pipe_slow );
11234 %}
11236 instruct convF2I_reg_slow( mRegI dst, regF src ) %{
11237 match(Set dst (ConvF2I src));
11238 ins_cost(250);
11239 format %{ "convf2i $dst, $src @ convF2I_reg_slow" %}
11240 ins_encode %{
11241 Register dreg = $dst$$Register;
11242 FloatRegister fval = $src$$FloatRegister;
11243 Label L;
11245 __ c_un_s(fval, fval); //NaN?
11246 __ bc1t(L);
11247 __ delayed();
11248 __ move(dreg, R0);
11250 __ trunc_w_s(F30, fval);
11252 /* Call SharedRuntime:f2i() to do valid convention */
11253 __ cfc1(AT, 31);
11254 __ li(T9, 0x10000);
11255 __ andr(AT, AT, T9);
11256 __ beq(AT, R0, L);
11257 __ delayed()->mfc1(dreg, F30);
11259 __ mov_s(F12, fval);
11261 /* 2014/01/08 Fu : This bug was found when running ezDS's control-panel.
11262 * J 982 C2 javax.swing.text.BoxView.layoutMajorAxis(II[I[I)V (283 bytes) @ 0x000000555c46aa74
11263 *
11264 * An interger array index has been assigned to V0, and then changed from 1 to Integer.MAX_VALUE.
11265 * V0 is corrupted during call_VM_leaf(), and should be preserved.
11266 */
11267 if(dreg != V0) {
11268 __ push(V0);
11269 }
11270 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
11271 if(dreg != V0) {
11272 __ move(dreg, V0);
11273 __ pop(V0);
11274 }
11275 __ bind(L);
11276 %}
11278 ins_pipe( pipe_slow );
11279 %}
11281 instruct convF2L_reg_fast( mRegL dst, regF src ) %{
11282 match(Set dst (ConvF2L src));
11283 ins_cost(150);
11284 format %{ "convf2l $dst, $src @ convF2L_reg_fast" %}
11285 ins_encode %{
11286 Register dreg = $dst$$Register;
11287 FloatRegister fval = $src$$FloatRegister;
11289 __ trunc_l_s(F30, fval);
11290 __ dmfc1(dreg, F30);
11291 __ c_un_s(fval, fval); //NaN?
11292 __ movt(dreg, R0);
11293 %}
11295 ins_pipe( pipe_slow );
11296 %}
11298 instruct convF2L_reg_slow( mRegL dst, regF src ) %{
11299 match(Set dst (ConvF2L src));
11300 ins_cost(250);
11301 format %{ "convf2l $dst, $src @ convF2L_reg_slow" %}
11302 ins_encode %{
11303 Register dst = as_Register($dst$$reg);
11304 FloatRegister fval = $src$$FloatRegister;
11305 Label L;
11307 __ c_un_s(fval, fval); //NaN?
11308 __ bc1t(L);
11309 __ delayed();
11310 __ move(dst, R0);
11312 __ trunc_l_s(F30, fval);
11313 __ cfc1(AT, 31);
11314 __ li(T9, 0x10000);
11315 __ andr(AT, AT, T9);
11316 __ beq(AT, R0, L);
11317 __ delayed()->dmfc1(dst, F30);
11319 __ mov_s(F12, fval);
11320 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
11321 __ move(dst, V0);
11322 __ bind(L);
11323 %}
11325 ins_pipe( pipe_slow );
11326 %}
11328 instruct convL2F_reg( regF dst, mRegL src ) %{
11329 match(Set dst (ConvL2F src));
11330 format %{ "convl2f $dst, $src @ convL2F_reg" %}
11331 ins_encode %{
11332 FloatRegister dst = $dst$$FloatRegister;
11333 Register src = as_Register($src$$reg);
11334 Label L;
11336 __ dmtc1(src, dst);
11337 __ cvt_s_l(dst, dst);
11338 %}
11340 ins_pipe( pipe_slow );
11341 %}
11343 instruct convI2F_reg( regF dst, mRegI src ) %{
11344 match(Set dst (ConvI2F src));
11345 format %{ "convi2f $dst, $src @ convI2F_reg" %}
11346 ins_encode %{
11347 Register src = $src$$Register;
11348 FloatRegister dst = $dst$$FloatRegister;
11350 __ mtc1(src, dst);
11351 __ cvt_s_w(dst, dst);
11352 %}
11354 ins_pipe( fpu_regF_regF );
11355 %}
11357 instruct cmpLTMask_immI0( mRegI dst, mRegI p, immI0 zero ) %{
11358 match(Set dst (CmpLTMask p zero));
11359 ins_cost(100);
11361 format %{ "sra $dst, $p, 31 @ cmpLTMask_immI0" %}
11362 ins_encode %{
11363 Register src = $p$$Register;
11364 Register dst = $dst$$Register;
11366 __ sra(dst, src, 31);
11367 %}
11368 ins_pipe( pipe_slow );
11369 %}
11372 instruct cmpLTMask( mRegI dst, mRegI p, mRegI q ) %{
11373 match(Set dst (CmpLTMask p q));
11374 ins_cost(400);
11376 format %{ "cmpLTMask $dst, $p, $q @ cmpLTMask" %}
11377 ins_encode %{
11378 Register p = $p$$Register;
11379 Register q = $q$$Register;
11380 Register dst = $dst$$Register;
11382 __ slt(dst, p, q);
11383 __ subu(dst, R0, dst);
11384 %}
11385 ins_pipe( pipe_slow );
11386 %}
11388 instruct convP2B(mRegI dst, mRegP src) %{
11389 match(Set dst (Conv2B src));
11391 ins_cost(100);
11392 format %{ "convP2B $dst, $src @ convP2B" %}
11393 ins_encode %{
11394 Register dst = as_Register($dst$$reg);
11395 Register src = as_Register($src$$reg);
11397 if (dst != src) {
11398 __ daddiu(dst, R0, 1);
11399 __ movz(dst, R0, src);
11400 } else {
11401 __ move(AT, src);
11402 __ daddiu(dst, R0, 1);
11403 __ movz(dst, R0, AT);
11404 }
11405 %}
11407 ins_pipe( ialu_regL_regL );
11408 %}
11411 instruct convI2D_reg_reg(regD dst, mRegI src) %{
11412 match(Set dst (ConvI2D src));
11413 format %{ "conI2D $dst, $src @convI2D_reg" %}
11414 ins_encode %{
11415 Register src = $src$$Register;
11416 FloatRegister dst = $dst$$FloatRegister;
11417 __ mtc1(src, dst);
11418 __ cvt_d_w(dst, dst);
11419 %}
11420 ins_pipe( fpu_regF_regF );
11421 %}
11423 instruct convF2D_reg_reg(regD dst, regF src) %{
11424 match(Set dst (ConvF2D src));
11425 format %{ "convF2D $dst, $src\t# @convF2D_reg_reg" %}
11426 ins_encode %{
11427 FloatRegister dst = $dst$$FloatRegister;
11428 FloatRegister src = $src$$FloatRegister;
11430 __ cvt_d_s(dst, src);
11431 %}
11432 ins_pipe( fpu_regF_regF );
11433 %}
11435 instruct convD2F_reg_reg(regF dst, regD src) %{
11436 match(Set dst (ConvD2F src));
11437 format %{ "convD2F $dst, $src\t# @convD2F_reg_reg" %}
11438 ins_encode %{
11439 FloatRegister dst = $dst$$FloatRegister;
11440 FloatRegister src = $src$$FloatRegister;
11442 __ cvt_s_d(dst, src);
11443 %}
11444 ins_pipe( fpu_regF_regF );
11445 %}
11447 // Convert a double to an int. If the double is a NAN, stuff a zero in instead.
11448 instruct convD2I_reg_reg_fast( mRegI dst, regD src ) %{
11449 match(Set dst (ConvD2I src));
11451 ins_cost(150);
11452 format %{ "convD2I $dst, $src\t# @ convD2I_reg_reg_fast" %}
11454 ins_encode %{
11455 FloatRegister src = $src$$FloatRegister;
11456 Register dst = $dst$$Register;
11458 Label Done;
11460 __ trunc_w_d(F30, src);
11461 // max_int: 2147483647
11462 __ move(AT, 0x7fffffff);
11463 __ mfc1(dst, F30);
11465 __ bne(dst, AT, Done);
11466 __ delayed()->mtc1(R0, F30);
11468 __ cvt_d_w(F30, F30);
11469 __ c_ult_d(src, F30);
11470 __ bc1f(Done);
11471 __ delayed()->addiu(T9, R0, -1);
11473 __ c_un_d(src, src); //NaN?
11474 __ subu32(dst, T9, AT);
11475 __ movt(dst, R0);
11477 __ bind(Done);
11478 %}
11479 ins_pipe( pipe_slow );
11480 %}
11482 instruct convD2I_reg_reg_slow( mRegI dst, regD src ) %{
11483 match(Set dst (ConvD2I src));
11485 ins_cost(250);
11486 format %{ "convD2I $dst, $src\t# @ convD2I_reg_reg_slow" %}
11488 ins_encode %{
11489 FloatRegister src = $src$$FloatRegister;
11490 Register dst = $dst$$Register;
11491 Label L;
11493 __ trunc_w_d(F30, src);
11494 __ cfc1(AT, 31);
11495 __ li(T9, 0x10000);
11496 __ andr(AT, AT, T9);
11497 __ beq(AT, R0, L);
11498 __ delayed()->mfc1(dst, F30);
11500 __ mov_d(F12, src);
11501 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 1);
11502 __ move(dst, V0);
11503 __ bind(L);
11505 %}
11506 ins_pipe( pipe_slow );
11507 %}
11509 // Convert oop pointer into compressed form
11510 instruct encodeHeapOop(mRegN dst, mRegP src) %{
11511 predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
11512 match(Set dst (EncodeP src));
11513 format %{ "encode_heap_oop $dst,$src" %}
11514 ins_encode %{
11515 Register src = $src$$Register;
11516 Register dst = $dst$$Register;
11517 if (src != dst) {
11518 __ move(dst, src);
11519 }
11520 __ encode_heap_oop(dst);
11521 %}
11522 ins_pipe( ialu_regL_regL );
11523 %}
11525 instruct encodeHeapOop_not_null(mRegN dst, mRegP src) %{
11526 predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
11527 match(Set dst (EncodeP src));
11528 format %{ "encode_heap_oop_not_null $dst,$src @ encodeHeapOop_not_null" %}
11529 ins_encode %{
11530 __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
11531 %}
11532 ins_pipe( ialu_regL_regL );
11533 %}
11535 instruct decodeHeapOop(mRegP dst, mRegN src) %{
11536 predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
11537 n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
11538 match(Set dst (DecodeN src));
11539 format %{ "decode_heap_oop $dst,$src @ decodeHeapOop" %}
11540 ins_encode %{
11541 Register s = $src$$Register;
11542 Register d = $dst$$Register;
11543 if (s != d) {
11544 __ move(d, s);
11545 }
11546 __ decode_heap_oop(d);
11547 %}
11548 ins_pipe( ialu_regL_regL );
11549 %}
11551 instruct decodeHeapOop_not_null(mRegP dst, mRegN src) %{
11552 predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
11553 n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
11554 match(Set dst (DecodeN src));
11555 format %{ "decode_heap_oop_not_null $dst,$src @ decodeHeapOop_not_null" %}
11556 ins_encode %{
11557 Register s = $src$$Register;
11558 Register d = $dst$$Register;
11559 if (s != d) {
11560 __ decode_heap_oop_not_null(d, s);
11561 } else {
11562 __ decode_heap_oop_not_null(d);
11563 }
11564 %}
11565 ins_pipe( ialu_regL_regL );
11566 %}
11568 instruct encodeKlass_not_null(mRegN dst, mRegP src) %{
11569 match(Set dst (EncodePKlass src));
11570 format %{ "encode_heap_oop_not_null $dst,$src @ encodeKlass_not_null" %}
11571 ins_encode %{
11572 __ encode_klass_not_null($dst$$Register, $src$$Register);
11573 %}
11574 ins_pipe( ialu_regL_regL );
11575 %}
11577 instruct decodeKlass_not_null(mRegP dst, mRegN src) %{
11578 match(Set dst (DecodeNKlass src));
11579 format %{ "decode_heap_klass_not_null $dst,$src" %}
11580 ins_encode %{
11581 Register s = $src$$Register;
11582 Register d = $dst$$Register;
11583 if (s != d) {
11584 __ decode_klass_not_null(d, s);
11585 } else {
11586 __ decode_klass_not_null(d);
11587 }
11588 %}
11589 ins_pipe( ialu_regL_regL );
11590 %}
11592 //FIXME
11593 instruct tlsLoadP(mRegP dst) %{
11594 match(Set dst (ThreadLocal));
11596 ins_cost(0);
11597 format %{ " get_thread in $dst #@tlsLoadP" %}
11598 ins_encode %{
11599 Register dst = $dst$$Register;
11600 #ifdef OPT_THREAD
11601 __ move(dst, TREG);
11602 #else
11603 __ get_thread(dst);
11604 #endif
11605 %}
11607 ins_pipe( ialu_loadI );
11608 %}
11611 instruct checkCastPP( mRegP dst ) %{
11612 match(Set dst (CheckCastPP dst));
11614 format %{ "#checkcastPP of $dst (empty encoding) #@chekCastPP" %}
11615 ins_encode( /*empty encoding*/ );
11616 ins_pipe( empty );
11617 %}
11619 instruct castPP(mRegP dst)
11620 %{
11621 match(Set dst (CastPP dst));
11623 size(0);
11624 format %{ "# castPP of $dst" %}
11625 ins_encode(/* empty encoding */);
11626 ins_pipe(empty);
11627 %}
11629 instruct castII( mRegI dst ) %{
11630 match(Set dst (CastII dst));
11631 format %{ "#castII of $dst empty encoding" %}
11632 ins_encode( /*empty encoding*/ );
11633 ins_cost(0);
11634 ins_pipe( empty );
11635 %}
11637 // Return Instruction
11638 // Remove the return address & jump to it.
11639 instruct Ret() %{
11640 match(Return);
11641 format %{ "RET #@Ret" %}
11643 ins_encode %{
11644 __ jr(RA);
11645 __ nop();
11646 %}
11648 ins_pipe( pipe_jump );
11649 %}
11651 /*
11652 // For Loongson CPUs, jr seems too slow, so this rule shouldn't be imported.
11653 instruct jumpXtnd(mRegL switch_val) %{
11654 match(Jump switch_val);
11656 ins_cost(350);
11658 format %{ "load T9 <-- [$constanttablebase, $switch_val, $constantoffset] @ jumpXtnd\n\t"
11659 "jr T9\n\t"
11660 "nop" %}
11661 ins_encode %{
11662 Register table_base = $constanttablebase;
11663 int con_offset = $constantoffset;
11664 Register switch_reg = $switch_val$$Register;
11666 if (UseLoongsonISA) {
11667 if (Assembler::is_simm(con_offset, 8)) {
11668 __ gsldx(T9, table_base, switch_reg, con_offset);
11669 } else if (Assembler::is_simm16(con_offset)) {
11670 __ daddu(T9, table_base, switch_reg);
11671 __ ld(T9, T9, con_offset);
11672 } else {
11673 __ move(T9, con_offset);
11674 __ daddu(AT, table_base, switch_reg);
11675 __ gsldx(T9, AT, T9, 0);
11676 }
11677 } else {
11678 if (Assembler::is_simm16(con_offset)) {
11679 __ daddu(T9, table_base, switch_reg);
11680 __ ld(T9, T9, con_offset);
11681 } else {
11682 __ move(T9, con_offset);
11683 __ daddu(AT, table_base, switch_reg);
11684 __ daddu(AT, T9, AT);
11685 __ ld(T9, AT, 0);
11686 }
11687 }
11689 __ jr(T9);
11690 __ nop();
11692 %}
11693 ins_pipe(pipe_jump);
11694 %}
11695 */
11697 // Jump Direct - Label defines a relative address from JMP
11698 instruct jmpDir(label labl) %{
11699 match(Goto);
11700 effect(USE labl);
11702 ins_cost(300);
11703 format %{ "JMP $labl #@jmpDir" %}
11705 ins_encode %{
11706 Label &L = *($labl$$label);
11707 if(&L)
11708 __ b(L);
11709 else
11710 __ b(int(0));
11711 __ nop();
11712 %}
11714 ins_pipe( pipe_jump );
11715 ins_pc_relative(1);
11716 %}
11720 // Tail Jump; remove the return address; jump to target.
11721 // TailCall above leaves the return address around.
11722 // TailJump is used in only one place, the rethrow_Java stub (fancy_jump=2).
11723 // ex_oop (Exception Oop) is needed in %o0 at the jump. As there would be a
11724 // "restore" before this instruction (in Epilogue), we need to materialize it
11725 // in %i0.
11726 //FIXME
11727 instruct tailjmpInd(mRegP jump_target,mRegP ex_oop) %{
11728 match( TailJump jump_target ex_oop );
11729 ins_cost(200);
11730 format %{ "Jmp $jump_target ; ex_oop = $ex_oop #@tailjmpInd" %}
11731 ins_encode %{
11732 Register target = $jump_target$$Register;
11734 /* 2012/9/14 Jin: V0, V1 are indicated in:
11735 * [stubGenerator_mips.cpp] generate_forward_exception()
11736 * [runtime_mips.cpp] OptoRuntime::generate_exception_blob()
11737 */
11738 Register oop = $ex_oop$$Register;
11739 Register exception_oop = V0;
11740 Register exception_pc = V1;
11742 __ move(exception_pc, RA);
11743 __ move(exception_oop, oop);
11745 __ jr(target);
11746 __ nop();
11747 %}
11748 ins_pipe( pipe_jump );
11749 %}
11751 // ============================================================================
11752 // Procedure Call/Return Instructions
11753 // Call Java Static Instruction
11754 // Note: If this code changes, the corresponding ret_addr_offset() and
11755 // compute_padding() functions will have to be adjusted.
11756 instruct CallStaticJavaDirect(method meth) %{
11757 match(CallStaticJava);
11758 effect(USE meth);
11760 ins_cost(300);
11761 format %{ "CALL,static #@CallStaticJavaDirect " %}
11762 ins_encode( Java_Static_Call( meth ) );
11763 ins_pipe( pipe_slow );
11764 ins_pc_relative(1);
11765 ins_alignment(16);
11766 %}
11768 // Call Java Dynamic Instruction
11769 // Note: If this code changes, the corresponding ret_addr_offset() and
11770 // compute_padding() functions will have to be adjusted.
11771 instruct CallDynamicJavaDirect(method meth) %{
11772 match(CallDynamicJava);
11773 effect(USE meth);
11775 ins_cost(300);
11776 format %{"MOV IC_Klass, (oop)-1 @ CallDynamicJavaDirect\n\t"
11777 "CallDynamic @ CallDynamicJavaDirect" %}
11778 ins_encode( Java_Dynamic_Call( meth ) );
11779 ins_pipe( pipe_slow );
11780 ins_pc_relative(1);
11781 ins_alignment(16);
11782 %}
11784 instruct CallLeafNoFPDirect(method meth) %{
11785 match(CallLeafNoFP);
11786 effect(USE meth);
11788 ins_cost(300);
11789 format %{ "CALL_LEAF_NOFP,runtime " %}
11790 ins_encode(Java_To_Runtime(meth));
11791 ins_pipe( pipe_slow );
11792 ins_pc_relative(1);
11793 ins_alignment(16);
11794 %}
11796 // Prefetch instructions.
11798 instruct prefetchrNTA( memory mem ) %{
11799 match(PrefetchRead mem);
11800 ins_cost(125);
11802 format %{ "pref $mem\t# Prefetch into non-temporal cache for read @ prefetchrNTA" %}
11803 ins_encode %{
11804 int base = $mem$$base;
11805 int index = $mem$$index;
11806 int scale = $mem$$scale;
11807 int disp = $mem$$disp;
11809 if( index != 0 ) {
11810 if (scale == 0) {
11811 __ daddu(AT, as_Register(base), as_Register(index));
11812 } else {
11813 __ dsll(AT, as_Register(index), scale);
11814 __ daddu(AT, as_Register(base), AT);
11815 }
11816 } else {
11817 __ move(AT, as_Register(base));
11818 }
11819 if( Assembler::is_simm16(disp) ) {
11820 __ daddiu(AT, as_Register(base), disp);
11821 __ daddiu(AT, AT, disp);
11822 } else {
11823 __ move(T9, disp);
11824 __ daddu(AT, as_Register(base), T9);
11825 }
11826 __ pref(0, AT, 0); //hint: 0:load
11827 %}
11828 ins_pipe(pipe_slow);
11829 %}
11831 instruct prefetchwNTA( memory mem ) %{
11832 match(PrefetchWrite mem);
11833 ins_cost(125);
11834 format %{ "pref $mem\t# Prefetch to non-temporal cache for write @ prefetchwNTA" %}
11835 ins_encode %{
11836 int base = $mem$$base;
11837 int index = $mem$$index;
11838 int scale = $mem$$scale;
11839 int disp = $mem$$disp;
11841 if( index != 0 ) {
11842 if (scale == 0) {
11843 __ daddu(AT, as_Register(base), as_Register(index));
11844 } else {
11845 __ dsll(AT, as_Register(index), scale);
11846 __ daddu(AT, as_Register(base), AT);
11847 }
11848 } else {
11849 __ move(AT, as_Register(base));
11850 }
11851 if( Assembler::is_simm16(disp) ) {
11852 __ daddiu(AT, as_Register(base), disp);
11853 __ daddiu(AT, AT, disp);
11854 } else {
11855 __ move(T9, disp);
11856 __ daddu(AT, as_Register(base), T9);
11857 }
11858 __ pref(1, AT, 0); //hint: 1:store
11859 %}
11860 ins_pipe(pipe_slow);
11861 %}
11863 // Prefetch instructions for allocation.
11865 instruct prefetchAllocNTA( memory mem ) %{
11866 match(PrefetchAllocation mem);
11867 ins_cost(125);
11868 format %{ "pref $mem\t# Prefetch allocation @ prefetchAllocNTA" %}
11869 ins_encode %{
11870 int base = $mem$$base;
11871 int index = $mem$$index;
11872 int scale = $mem$$scale;
11873 int disp = $mem$$disp;
11875 Register dst = R0;
11877 if( index != 0 ) {
11878 if( Assembler::is_simm16(disp) ) {
11879 if( UseLoongsonISA ) {
11880 if (scale == 0) {
11881 __ gslbx(dst, as_Register(base), as_Register(index), disp);
11882 } else {
11883 __ dsll(AT, as_Register(index), scale);
11884 __ gslbx(dst, as_Register(base), AT, disp);
11885 }
11886 } else {
11887 if (scale == 0) {
11888 __ addu(AT, as_Register(base), as_Register(index));
11889 } else {
11890 __ dsll(AT, as_Register(index), scale);
11891 __ addu(AT, as_Register(base), AT);
11892 }
11893 __ lb(dst, AT, disp);
11894 }
11895 } else {
11896 if (scale == 0) {
11897 __ addu(AT, as_Register(base), as_Register(index));
11898 } else {
11899 __ dsll(AT, as_Register(index), scale);
11900 __ addu(AT, as_Register(base), AT);
11901 }
11902 __ move(T9, disp);
11903 if( UseLoongsonISA ) {
11904 __ gslbx(dst, AT, T9, 0);
11905 } else {
11906 __ addu(AT, AT, T9);
11907 __ lb(dst, AT, 0);
11908 }
11909 }
11910 } else {
11911 if( Assembler::is_simm16(disp) ) {
11912 __ lb(dst, as_Register(base), disp);
11913 } else {
11914 __ move(T9, disp);
11915 if( UseLoongsonISA ) {
11916 __ gslbx(dst, as_Register(base), T9, 0);
11917 } else {
11918 __ addu(AT, as_Register(base), T9);
11919 __ lb(dst, AT, 0);
11920 }
11921 }
11922 }
11923 %}
11924 ins_pipe(pipe_slow);
11925 %}
11928 // Call runtime without safepoint
11929 instruct CallLeafDirect(method meth) %{
11930 match(CallLeaf);
11931 effect(USE meth);
11933 ins_cost(300);
11934 format %{ "CALL_LEAF,runtime #@CallLeafDirect " %}
11935 ins_encode(Java_To_Runtime(meth));
11936 ins_pipe( pipe_slow );
11937 ins_pc_relative(1);
11938 ins_alignment(16);
11939 %}
11941 // Load Char (16bit unsigned)
11942 instruct loadUS(mRegI dst, memory mem) %{
11943 match(Set dst (LoadUS mem));
11945 ins_cost(125);
11946 format %{ "loadUS $dst,$mem @ loadC" %}
11947 ins_encode(load_C_enc(dst, mem));
11948 ins_pipe( ialu_loadI );
11949 %}
11951 instruct loadUS_convI2L(mRegL dst, memory mem) %{
11952 match(Set dst (ConvI2L (LoadUS mem)));
11954 ins_cost(125);
11955 format %{ "loadUS $dst,$mem @ loadUS_convI2L" %}
11956 ins_encode(load_C_enc(dst, mem));
11957 ins_pipe( ialu_loadI );
11958 %}
11960 // Store Char (16bit unsigned)
11961 instruct storeC(memory mem, mRegI src) %{
11962 match(Set mem (StoreC mem src));
11964 ins_cost(125);
11965 format %{ "storeC $src, $mem @ storeC" %}
11966 ins_encode(store_C_reg_enc(mem, src));
11967 ins_pipe( ialu_loadI );
11968 %}
11970 instruct storeC0(memory mem, immI0 zero) %{
11971 match(Set mem (StoreC mem zero));
11973 ins_cost(125);
11974 format %{ "storeC $zero, $mem @ storeC0" %}
11975 ins_encode(store_C0_enc(mem));
11976 ins_pipe( ialu_loadI );
11977 %}
11980 instruct loadConF0(regF dst, immF0 zero) %{
11981 match(Set dst zero);
11982 ins_cost(100);
11984 format %{ "mov $dst, zero @ loadConF0\n"%}
11985 ins_encode %{
11986 FloatRegister dst = $dst$$FloatRegister;
11988 __ mtc1(R0, dst);
11989 %}
11990 ins_pipe( fpu_loadF );
11991 %}
11994 instruct loadConF(regF dst, immF src) %{
11995 match(Set dst src);
11996 ins_cost(125);
11998 format %{ "lwc1 $dst, $constantoffset[$constanttablebase] # load FLOAT $src from table @ loadConF" %}
11999 ins_encode %{
12000 int con_offset = $constantoffset($src);
12002 if (Assembler::is_simm16(con_offset)) {
12003 __ lwc1($dst$$FloatRegister, $constanttablebase, con_offset);
12004 } else {
12005 __ set64(AT, con_offset);
12006 if (UseLoongsonISA) {
12007 __ gslwxc1($dst$$FloatRegister, $constanttablebase, AT, 0);
12008 } else {
12009 __ daddu(AT, $constanttablebase, AT);
12010 __ lwc1($dst$$FloatRegister, AT, 0);
12011 }
12012 }
12013 %}
12014 ins_pipe( fpu_loadF );
12015 %}
12018 instruct loadConD0(regD dst, immD0 zero) %{
12019 match(Set dst zero);
12020 ins_cost(100);
12022 format %{ "mov $dst, zero @ loadConD0"%}
12023 ins_encode %{
12024 FloatRegister dst = as_FloatRegister($dst$$reg);
12026 __ dmtc1(R0, dst);
12027 %}
12028 ins_pipe( fpu_loadF );
12029 %}
12031 instruct loadConD(regD dst, immD src) %{
12032 match(Set dst src);
12033 ins_cost(125);
12035 format %{ "ldc1 $dst, $constantoffset[$constanttablebase] # load DOUBLE $src from table @ loadConD" %}
12036 ins_encode %{
12037 int con_offset = $constantoffset($src);
12039 if (Assembler::is_simm16(con_offset)) {
12040 __ ldc1($dst$$FloatRegister, $constanttablebase, con_offset);
12041 } else {
12042 __ set64(AT, con_offset);
12043 if (UseLoongsonISA) {
12044 __ gsldxc1($dst$$FloatRegister, $constanttablebase, AT, 0);
12045 } else {
12046 __ daddu(AT, $constanttablebase, AT);
12047 __ ldc1($dst$$FloatRegister, AT, 0);
12048 }
12049 }
12050 %}
12051 ins_pipe( fpu_loadF );
12052 %}
12054 // Store register Float value (it is faster than store from FPU register)
12055 instruct storeF_reg( memory mem, regF src) %{
12056 match(Set mem (StoreF mem src));
12058 ins_cost(50);
12059 format %{ "store $mem, $src\t# store float @ storeF_reg" %}
12060 ins_encode(store_F_reg_enc(mem, src));
12061 ins_pipe( fpu_storeF );
12062 %}
12064 instruct storeF_imm0( memory mem, immF0 zero) %{
12065 match(Set mem (StoreF mem zero));
12067 ins_cost(40);
12068 format %{ "store $mem, zero\t# store float @ storeF_imm0" %}
12069 ins_encode %{
12070 int base = $mem$$base;
12071 int index = $mem$$index;
12072 int scale = $mem$$scale;
12073 int disp = $mem$$disp;
12075 if( index != 0 ) {
12076 if(scale != 0) {
12077 __ dsll(T9, as_Register(index), scale);
12078 __ addu(AT, as_Register(base), T9);
12079 } else {
12080 __ daddu(AT, as_Register(base), as_Register(index));
12081 }
12082 if( Assembler::is_simm16(disp) ) {
12083 __ sw(R0, AT, disp);
12084 } else {
12085 __ move(T9, disp);
12086 __ addu(AT, AT, T9);
12087 __ sw(R0, AT, 0);
12088 }
12090 } else {
12091 if( Assembler::is_simm16(disp) ) {
12092 __ sw(R0, as_Register(base), disp);
12093 } else {
12094 __ move(T9, disp);
12095 __ addu(AT, as_Register(base), T9);
12096 __ sw(R0, AT, 0);
12097 }
12098 }
12099 %}
12100 ins_pipe( ialu_storeI );
12101 %}
12103 // Load Double
12104 instruct loadD(regD dst, memory mem) %{
12105 match(Set dst (LoadD mem));
12107 ins_cost(150);
12108 format %{ "loadD $dst, $mem #@loadD" %}
12109 ins_encode(load_D_enc(dst, mem));
12110 ins_pipe( ialu_loadI );
12111 %}
12113 // Load Double - UNaligned
12114 instruct loadD_unaligned(regD dst, memory mem ) %{
12115 match(Set dst (LoadD_unaligned mem));
12116 ins_cost(250);
12117 // FIXME: Jin: Need more effective ldl/ldr
12118 format %{ "loadD_unaligned $dst, $mem #@loadD_unaligned" %}
12119 ins_encode(load_D_enc(dst, mem));
12120 ins_pipe( ialu_loadI );
12121 %}
12123 instruct storeD_reg( memory mem, regD src) %{
12124 match(Set mem (StoreD mem src));
12126 ins_cost(50);
12127 format %{ "store $mem, $src\t# store float @ storeD_reg" %}
12128 ins_encode(store_D_reg_enc(mem, src));
12129 ins_pipe( fpu_storeF );
12130 %}
12132 instruct storeD_imm0( memory mem, immD0 zero) %{
12133 match(Set mem (StoreD mem zero));
12135 ins_cost(40);
12136 format %{ "store $mem, zero\t# store float @ storeD_imm0" %}
12137 ins_encode %{
12138 int base = $mem$$base;
12139 int index = $mem$$index;
12140 int scale = $mem$$scale;
12141 int disp = $mem$$disp;
12143 __ mtc1(R0, F30);
12144 __ cvt_d_w(F30, F30);
12146 if( index != 0 ) {
12147 if(scale != 0) {
12148 __ dsll(T9, as_Register(index), scale);
12149 __ addu(AT, as_Register(base), T9);
12150 } else {
12151 __ daddu(AT, as_Register(base), as_Register(index));
12152 }
12153 if( Assembler::is_simm16(disp) ) {
12154 __ sdc1(F30, AT, disp);
12155 } else {
12156 __ move(T9, disp);
12157 __ addu(AT, AT, T9);
12158 __ sdc1(F30, AT, 0);
12159 }
12161 } else {
12162 if( Assembler::is_simm16(disp) ) {
12163 __ sdc1(F30, as_Register(base), disp);
12164 } else {
12165 __ move(T9, disp);
12166 __ addu(AT, as_Register(base), T9);
12167 __ sdc1(F30, AT, 0);
12168 }
12169 }
12170 %}
12171 ins_pipe( ialu_storeI );
12172 %}
12174 instruct loadSSI(mRegI dst, stackSlotI src)
12175 %{
12176 match(Set dst src);
12178 ins_cost(125);
12179 format %{ "lw $dst, $src\t# int stk @ loadSSI" %}
12180 ins_encode %{
12181 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSI) !");
12182 __ lw($dst$$Register, SP, $src$$disp);
12183 %}
12184 ins_pipe(ialu_loadI);
12185 %}
12187 instruct storeSSI(stackSlotI dst, mRegI src)
12188 %{
12189 match(Set dst src);
12191 ins_cost(100);
12192 format %{ "sw $dst, $src\t# int stk @ storeSSI" %}
12193 ins_encode %{
12194 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSI) !");
12195 __ sw($src$$Register, SP, $dst$$disp);
12196 %}
12197 ins_pipe(ialu_storeI);
12198 %}
12200 instruct loadSSL(mRegL dst, stackSlotL src)
12201 %{
12202 match(Set dst src);
12204 ins_cost(125);
12205 format %{ "ld $dst, $src\t# long stk @ loadSSL" %}
12206 ins_encode %{
12207 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSL) !");
12208 __ ld($dst$$Register, SP, $src$$disp);
12209 %}
12210 ins_pipe(ialu_loadI);
12211 %}
12213 instruct storeSSL(stackSlotL dst, mRegL src)
12214 %{
12215 match(Set dst src);
12217 ins_cost(100);
12218 format %{ "sd $dst, $src\t# long stk @ storeSSL" %}
12219 ins_encode %{
12220 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSL) !");
12221 __ sd($src$$Register, SP, $dst$$disp);
12222 %}
12223 ins_pipe(ialu_storeI);
12224 %}
12226 instruct loadSSP(mRegP dst, stackSlotP src)
12227 %{
12228 match(Set dst src);
12230 ins_cost(125);
12231 format %{ "ld $dst, $src\t# ptr stk @ loadSSP" %}
12232 ins_encode %{
12233 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSP) !");
12234 __ ld($dst$$Register, SP, $src$$disp);
12235 %}
12236 ins_pipe(ialu_loadI);
12237 %}
12239 instruct storeSSP(stackSlotP dst, mRegP src)
12240 %{
12241 match(Set dst src);
12243 ins_cost(100);
12244 format %{ "sd $dst, $src\t# ptr stk @ storeSSP" %}
12245 ins_encode %{
12246 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSP) !");
12247 __ sd($src$$Register, SP, $dst$$disp);
12248 %}
12249 ins_pipe(ialu_storeI);
12250 %}
12252 instruct loadSSF(regF dst, stackSlotF src)
12253 %{
12254 match(Set dst src);
12256 ins_cost(125);
12257 format %{ "lwc1 $dst, $src\t# float stk @ loadSSF" %}
12258 ins_encode %{
12259 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSF) !");
12260 __ lwc1($dst$$FloatRegister, SP, $src$$disp);
12261 %}
12262 ins_pipe(ialu_loadI);
12263 %}
12265 instruct storeSSF(stackSlotF dst, regF src)
12266 %{
12267 match(Set dst src);
12269 ins_cost(100);
12270 format %{ "swc1 $dst, $src\t# float stk @ storeSSF" %}
12271 ins_encode %{
12272 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSF) !");
12273 __ swc1($src$$FloatRegister, SP, $dst$$disp);
12274 %}
12275 ins_pipe(fpu_storeF);
12276 %}
12278 // Use the same format since predicate() can not be used here.
12279 instruct loadSSD(regD dst, stackSlotD src)
12280 %{
12281 match(Set dst src);
12283 ins_cost(125);
12284 format %{ "ldc1 $dst, $src\t# double stk @ loadSSD" %}
12285 ins_encode %{
12286 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSD) !");
12287 __ ldc1($dst$$FloatRegister, SP, $src$$disp);
12288 %}
12289 ins_pipe(ialu_loadI);
12290 %}
12292 instruct storeSSD(stackSlotD dst, regD src)
12293 %{
12294 match(Set dst src);
12296 ins_cost(100);
12297 format %{ "sdc1 $dst, $src\t# double stk @ storeSSD" %}
12298 ins_encode %{
12299 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSD) !");
12300 __ sdc1($src$$FloatRegister, SP, $dst$$disp);
12301 %}
12302 ins_pipe(fpu_storeF);
12303 %}
12305 instruct cmpFastLock( FlagsReg cr, mRegP object, s0_RegP box, mRegI tmp, mRegP scr) %{
12306 match( Set cr (FastLock object box) );
12307 effect( TEMP tmp, TEMP scr, USE_KILL box );
12308 ins_cost(300);
12309 format %{ "FASTLOCK $cr $object, $box, $tmp #@ cmpFastLock" %}
12310 ins_encode %{
12311 __ fast_lock($object$$Register, $box$$Register, $tmp$$Register, $scr$$Register);
12312 %}
12314 ins_pipe( pipe_slow );
12315 ins_pc_relative(1);
12316 %}
12318 instruct cmpFastUnlock( FlagsReg cr, mRegP object, s0_RegP box, mRegP tmp ) %{
12319 match( Set cr (FastUnlock object box) );
12320 effect( TEMP tmp, USE_KILL box );
12321 ins_cost(300);
12322 format %{ "FASTUNLOCK $object, $box, $tmp #@cmpFastUnlock" %}
12323 ins_encode %{
12324 __ fast_unlock($object$$Register, $box$$Register, $tmp$$Register);
12325 %}
12327 ins_pipe( pipe_slow );
12328 ins_pc_relative(1);
12329 %}
12331 // Store CMS card-mark Immediate
12332 instruct storeImmCM(memory mem, immI8 src) %{
12333 match(Set mem (StoreCM mem src));
12335 ins_cost(150);
12336 format %{ "MOV8 $mem,$src\t! CMS card-mark imm0" %}
12337 // opcode(0xC6);
12338 ins_encode(store_B_immI_enc_sync(mem, src));
12339 ins_pipe( ialu_storeI );
12340 %}
12342 // Die now
12343 instruct ShouldNotReachHere( )
12344 %{
12345 match(Halt);
12346 ins_cost(300);
12348 // Use the following format syntax
12349 format %{ "ILLTRAP ;#@ShouldNotReachHere" %}
12350 ins_encode %{
12351 // Here we should emit illtrap !
12353 __ stop("in ShoudNotReachHere");
12355 %}
12356 ins_pipe( pipe_jump );
12357 %}
12359 instruct leaP8Narrow(mRegP dst, indOffset8Narrow mem)
12360 %{
12361 predicate(Universe::narrow_oop_shift() == 0);
12362 match(Set dst mem);
12364 ins_cost(110);
12365 format %{ "leaq $dst, $mem\t# ptr off8narrow @ leaP8Narrow" %}
12366 ins_encode %{
12367 Register dst = $dst$$Register;
12368 Register base = as_Register($mem$$base);
12369 int disp = $mem$$disp;
12371 __ daddiu(dst, base, disp);
12372 %}
12373 ins_pipe( ialu_regI_imm16 );
12374 %}
12376 instruct leaPPosIdxScaleOff8(mRegP dst, basePosIndexScaleOffset8 mem)
12377 %{
12378 match(Set dst mem);
12380 ins_cost(110);
12381 format %{ "leaq $dst, $mem\t# @ PosIdxScaleOff8" %}
12382 ins_encode %{
12383 Register dst = $dst$$Register;
12384 Register base = as_Register($mem$$base);
12385 Register index = as_Register($mem$$index);
12386 int scale = $mem$$scale;
12387 int disp = $mem$$disp;
12389 if (scale == 0) {
12390 __ daddu(AT, base, index);
12391 __ daddiu(dst, AT, disp);
12392 } else {
12393 __ dsll(AT, index, scale);
12394 __ daddu(AT, base, AT);
12395 __ daddiu(dst, AT, disp);
12396 }
12397 %}
12399 ins_pipe( ialu_regI_imm16 );
12400 %}
12402 instruct leaPIdxScale(mRegP dst, indIndexScale mem)
12403 %{
12404 match(Set dst mem);
12406 ins_cost(110);
12407 format %{ "leaq $dst, $mem\t# @ leaPIdxScale" %}
12408 ins_encode %{
12409 Register dst = $dst$$Register;
12410 Register base = as_Register($mem$$base);
12411 Register index = as_Register($mem$$index);
12412 int scale = $mem$$scale;
12414 if (scale == 0) {
12415 __ daddu(dst, base, index);
12416 } else {
12417 __ dsll(AT, index, scale);
12418 __ daddu(dst, base, AT);
12419 }
12420 %}
12422 ins_pipe( ialu_regI_imm16 );
12423 %}
12425 // Jump Direct Conditional - Label defines a relative address from Jcc+1
12426 instruct jmpLoopEnd(cmpOp cop, mRegI src1, mRegI src2, label labl) %{
12427 match(CountedLoopEnd cop (CmpI src1 src2));
12428 effect(USE labl);
12430 ins_cost(300);
12431 format %{ "J$cop $src1, $src2, $labl\t# Loop end @ jmpLoopEnd" %}
12432 ins_encode %{
12433 Register op1 = $src1$$Register;
12434 Register op2 = $src2$$Register;
12435 Label &L = *($labl$$label);
12436 int flag = $cop$$cmpcode;
12438 switch(flag)
12439 {
12440 case 0x01: //equal
12441 if (&L)
12442 __ beq(op1, op2, L);
12443 else
12444 __ beq(op1, op2, (int)0);
12445 break;
12446 case 0x02: //not_equal
12447 if (&L)
12448 __ bne(op1, op2, L);
12449 else
12450 __ bne(op1, op2, (int)0);
12451 break;
12452 case 0x03: //above
12453 __ slt(AT, op2, op1);
12454 if(&L)
12455 __ bne(AT, R0, L);
12456 else
12457 __ bne(AT, R0, (int)0);
12458 break;
12459 case 0x04: //above_equal
12460 __ slt(AT, op1, op2);
12461 if(&L)
12462 __ beq(AT, R0, L);
12463 else
12464 __ beq(AT, R0, (int)0);
12465 break;
12466 case 0x05: //below
12467 __ slt(AT, op1, op2);
12468 if(&L)
12469 __ bne(AT, R0, L);
12470 else
12471 __ bne(AT, R0, (int)0);
12472 break;
12473 case 0x06: //below_equal
12474 __ slt(AT, op2, op1);
12475 if(&L)
12476 __ beq(AT, R0, L);
12477 else
12478 __ beq(AT, R0, (int)0);
12479 break;
12480 default:
12481 Unimplemented();
12482 }
12483 __ nop();
12484 %}
12485 ins_pipe( pipe_jump );
12486 ins_pc_relative(1);
12487 %}
12490 instruct jmpLoopEnd_reg_imm16_sub(cmpOp cop, mRegI src1, immI16_sub src2, label labl) %{
12491 match(CountedLoopEnd cop (CmpI src1 src2));
12492 effect(USE labl);
12494 ins_cost(250);
12495 format %{ "J$cop $src1, $src2, $labl\t# Loop end @ jmpLoopEnd_reg_imm16_sub" %}
12496 ins_encode %{
12497 Register op1 = $src1$$Register;
12498 int op2 = $src2$$constant;
12499 Label &L = *($labl$$label);
12500 int flag = $cop$$cmpcode;
12502 __ addiu32(AT, op1, -1 * op2);
12504 switch(flag)
12505 {
12506 case 0x01: //equal
12507 if (&L)
12508 __ beq(AT, R0, L);
12509 else
12510 __ beq(AT, R0, (int)0);
12511 break;
12512 case 0x02: //not_equal
12513 if (&L)
12514 __ bne(AT, R0, L);
12515 else
12516 __ bne(AT, R0, (int)0);
12517 break;
12518 case 0x03: //above
12519 if(&L)
12520 __ bgtz(AT, L);
12521 else
12522 __ bgtz(AT, (int)0);
12523 break;
12524 case 0x04: //above_equal
12525 if(&L)
12526 __ bgez(AT, L);
12527 else
12528 __ bgez(AT,(int)0);
12529 break;
12530 case 0x05: //below
12531 if(&L)
12532 __ bltz(AT, L);
12533 else
12534 __ bltz(AT, (int)0);
12535 break;
12536 case 0x06: //below_equal
12537 if(&L)
12538 __ blez(AT, L);
12539 else
12540 __ blez(AT, (int)0);
12541 break;
12542 default:
12543 Unimplemented();
12544 }
12545 __ nop();
12546 %}
12547 ins_pipe( pipe_jump );
12548 ins_pc_relative(1);
12549 %}
12552 /*
12553 // Jump Direct Conditional - Label defines a relative address from Jcc+1
12554 instruct jmpLoopEndU(cmpOpU cop, eFlagsRegU cmp, label labl) %{
12555 match(CountedLoopEnd cop cmp);
12556 effect(USE labl);
12558 ins_cost(300);
12559 format %{ "J$cop,u $labl\t# Loop end" %}
12560 size(6);
12561 opcode(0x0F, 0x80);
12562 ins_encode( Jcc( cop, labl) );
12563 ins_pipe( pipe_jump );
12564 ins_pc_relative(1);
12565 %}
12567 instruct jmpLoopEndUCF(cmpOpUCF cop, eFlagsRegUCF cmp, label labl) %{
12568 match(CountedLoopEnd cop cmp);
12569 effect(USE labl);
12571 ins_cost(200);
12572 format %{ "J$cop,u $labl\t# Loop end" %}
12573 opcode(0x0F, 0x80);
12574 ins_encode( Jcc( cop, labl) );
12575 ins_pipe( pipe_jump );
12576 ins_pc_relative(1);
12577 %}
12578 */
12580 // This match pattern is created for StoreIConditional since I cannot match IfNode without a RegFlags! fujie 2012/07/17
12581 instruct jmpCon_flags(cmpOp cop, FlagsReg cr, label labl) %{
12582 match(If cop cr);
12583 effect(USE labl);
12585 ins_cost(300);
12586 format %{ "J$cop $labl #mips uses AT as eflag @jmpCon_flags" %}
12588 ins_encode %{
12589 Label &L = *($labl$$label);
12590 switch($cop$$cmpcode)
12591 {
12592 case 0x01: //equal
12593 if (&L)
12594 __ bne(AT, R0, L);
12595 else
12596 __ bne(AT, R0, (int)0);
12597 break;
12598 case 0x02: //not equal
12599 if (&L)
12600 __ beq(AT, R0, L);
12601 else
12602 __ beq(AT, R0, (int)0);
12603 break;
12604 default:
12605 Unimplemented();
12606 }
12607 __ nop();
12608 %}
12610 ins_pipe( pipe_jump );
12611 ins_pc_relative(1);
12612 %}
12615 // ============================================================================
12616 // The 2nd slow-half of a subtype check. Scan the subklass's 2ndary superklass
12617 // array for an instance of the superklass. Set a hidden internal cache on a
12618 // hit (cache is checked with exposed code in gen_subtype_check()). Return
12619 // NZ for a miss or zero for a hit. The encoding ALSO sets flags.
12620 instruct partialSubtypeCheck( mRegP result, no_T8_mRegP sub, no_T8_mRegP super, mT8RegI tmp ) %{
12621 match(Set result (PartialSubtypeCheck sub super));
12622 effect(KILL tmp);
12623 ins_cost(1100); // slightly larger than the next version
12624 format %{ "partialSubtypeCheck result=$result, sub=$sub, super=$super, tmp=$tmp " %}
12626 ins_encode( enc_PartialSubtypeCheck(result, sub, super, tmp) );
12627 ins_pipe( pipe_slow );
12628 %}
12631 // Conditional-store of an int value.
12632 // ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG on Intel.
12633 instruct storeIConditional( memory mem, mRegI oldval, mRegI newval, FlagsReg cr ) %{
12634 match(Set cr (StoreIConditional mem (Binary oldval newval)));
12635 // effect(KILL oldval);
12636 format %{ "CMPXCHG $newval, $mem, $oldval \t# @storeIConditional" %}
12638 ins_encode %{
12639 Register oldval = $oldval$$Register;
12640 Register newval = $newval$$Register;
12641 Address addr(as_Register($mem$$base), $mem$$disp);
12642 Label again, failure;
12644 // int base = $mem$$base;
12645 int index = $mem$$index;
12646 int scale = $mem$$scale;
12647 int disp = $mem$$disp;
12649 guarantee(Assembler::is_simm16(disp), "");
12651 if( index != 0 ) {
12652 __ stop("in storeIConditional: index != 0");
12653 } else {
12654 __ bind(again);
12655 if(UseSyncLevel <= 1000) __ sync();
12656 __ ll(AT, addr);
12657 __ bne(AT, oldval, failure);
12658 __ delayed()->addu(AT, R0, R0);
12660 __ addu(AT, newval, R0);
12661 __ sc(AT, addr);
12662 __ beq(AT, R0, again);
12663 __ delayed()->addiu(AT, R0, 0xFF);
12664 __ bind(failure);
12665 __ sync();
12666 }
12667 %}
12669 ins_pipe( long_memory_op );
12670 %}
12672 // Conditional-store of a long value.
12673 // ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG.
12674 instruct storeLConditional(memory mem, t2RegL oldval, mRegL newval, FlagsReg cr )
12675 %{
12676 match(Set cr (StoreLConditional mem (Binary oldval newval)));
12677 effect(KILL oldval);
12679 format %{ "cmpxchg $mem, $newval\t# If $oldval == $mem then store $newval into $mem" %}
12680 ins_encode%{
12681 Register oldval = $oldval$$Register;
12682 Register newval = $newval$$Register;
12683 Address addr((Register)$mem$$base, $mem$$disp);
12685 int index = $mem$$index;
12686 int scale = $mem$$scale;
12687 int disp = $mem$$disp;
12689 guarantee(Assembler::is_simm16(disp), "");
12691 if( index != 0 ) {
12692 __ stop("in storeIConditional: index != 0");
12693 } else {
12694 __ cmpxchg(newval, addr, oldval);
12695 }
12696 %}
12697 ins_pipe( long_memory_op );
12698 %}
12701 instruct compareAndSwapI( mRegI res, mRegP mem_ptr, mS2RegI oldval, mRegI newval) %{
12702 match(Set res (CompareAndSwapI mem_ptr (Binary oldval newval)));
12703 effect(KILL oldval);
12704 // match(CompareAndSwapI mem_ptr (Binary oldval newval));
12705 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapI\n\t"
12706 "MOV $res, 1 @ compareAndSwapI\n\t"
12707 "BNE AT, R0 @ compareAndSwapI\n\t"
12708 "MOV $res, 0 @ compareAndSwapI\n"
12709 "L:" %}
12710 ins_encode %{
12711 Register newval = $newval$$Register;
12712 Register oldval = $oldval$$Register;
12713 Register res = $res$$Register;
12714 Address addr($mem_ptr$$Register, 0);
12715 Label L;
12717 __ cmpxchg32(newval, addr, oldval);
12718 __ move(res, AT);
12719 %}
12720 ins_pipe( long_memory_op );
12721 %}
12723 //FIXME:
12724 instruct compareAndSwapP( mRegI res, mRegP mem_ptr, s2_RegP oldval, mRegP newval) %{
12725 match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval)));
12726 effect(KILL oldval);
12727 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapP\n\t"
12728 "MOV $res, AT @ compareAndSwapP\n\t"
12729 "L:" %}
12730 ins_encode %{
12731 Register newval = $newval$$Register;
12732 Register oldval = $oldval$$Register;
12733 Register res = $res$$Register;
12734 Address addr($mem_ptr$$Register, 0);
12735 Label L;
12737 __ cmpxchg(newval, addr, oldval);
12738 __ move(res, AT);
12739 %}
12740 ins_pipe( long_memory_op );
12741 %}
12743 instruct compareAndSwapN( mRegI res, mRegP mem_ptr, t2_RegN oldval, mRegN newval) %{
12744 match(Set res (CompareAndSwapN mem_ptr (Binary oldval newval)));
12745 effect(KILL oldval);
12746 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapN\n\t"
12747 "MOV $res, AT @ compareAndSwapN\n\t"
12748 "L:" %}
12749 ins_encode %{
12750 Register newval = $newval$$Register;
12751 Register oldval = $oldval$$Register;
12752 Register res = $res$$Register;
12753 Address addr($mem_ptr$$Register, 0);
12754 Label L;
12756 /* 2013/7/19 Jin: cmpxchg32 is implemented with ll/sc, which will do sign extension.
12757 * Thus, we should extend oldval's sign for correct comparision.
12758 */
12759 __ sll(oldval, oldval, 0);
12761 __ cmpxchg32(newval, addr, oldval);
12762 __ move(res, AT);
12763 %}
12764 ins_pipe( long_memory_op );
12765 %}
12767 //----------Max and Min--------------------------------------------------------
12768 // Min Instructions
12769 ////
12770 // *** Min and Max using the conditional move are slower than the
12771 // *** branch version on a Pentium III.
12772 // // Conditional move for min
12773 //instruct cmovI_reg_lt( eRegI op2, eRegI op1, eFlagsReg cr ) %{
12774 // effect( USE_DEF op2, USE op1, USE cr );
12775 // format %{ "CMOVlt $op2,$op1\t! min" %}
12776 // opcode(0x4C,0x0F);
12777 // ins_encode( OpcS, OpcP, RegReg( op2, op1 ) );
12778 // ins_pipe( pipe_cmov_reg );
12779 //%}
12780 //
12781 //// Min Register with Register (P6 version)
12782 //instruct minI_eReg_p6( eRegI op1, eRegI op2 ) %{
12783 // predicate(VM_Version::supports_cmov() );
12784 // match(Set op2 (MinI op1 op2));
12785 // ins_cost(200);
12786 // expand %{
12787 // eFlagsReg cr;
12788 // compI_eReg(cr,op1,op2);
12789 // cmovI_reg_lt(op2,op1,cr);
12790 // %}
12791 //%}
12793 // Min Register with Register (generic version)
12794 instruct minI_Reg_Reg(mRegI dst, mRegI src) %{
12795 match(Set dst (MinI dst src));
12796 //effect(KILL flags);
12797 ins_cost(80);
12799 format %{ "MIN $dst, $src @minI_Reg_Reg" %}
12800 ins_encode %{
12801 Register dst = $dst$$Register;
12802 Register src = $src$$Register;
12804 __ slt(AT, src, dst);
12805 __ movn(dst, src, AT);
12807 %}
12809 ins_pipe( pipe_slow );
12810 %}
12812 // Max Register with Register
12813 // *** Min and Max using the conditional move are slower than the
12814 // *** branch version on a Pentium III.
12815 // // Conditional move for max
12816 //instruct cmovI_reg_gt( eRegI op2, eRegI op1, eFlagsReg cr ) %{
12817 // effect( USE_DEF op2, USE op1, USE cr );
12818 // format %{ "CMOVgt $op2,$op1\t! max" %}
12819 // opcode(0x4F,0x0F);
12820 // ins_encode( OpcS, OpcP, RegReg( op2, op1 ) );
12821 // ins_pipe( pipe_cmov_reg );
12822 //%}
12823 //
12824 // // Max Register with Register (P6 version)
12825 //instruct maxI_eReg_p6( eRegI op1, eRegI op2 ) %{
12826 // predicate(VM_Version::supports_cmov() );
12827 // match(Set op2 (MaxI op1 op2));
12828 // ins_cost(200);
12829 // expand %{
12830 // eFlagsReg cr;
12831 // compI_eReg(cr,op1,op2);
12832 // cmovI_reg_gt(op2,op1,cr);
12833 // %}
12834 //%}
12836 // Max Register with Register (generic version)
12837 instruct maxI_Reg_Reg(mRegI dst, mRegI src) %{
12838 match(Set dst (MaxI dst src));
12839 ins_cost(80);
12841 format %{ "MAX $dst, $src @maxI_Reg_Reg" %}
12843 ins_encode %{
12844 Register dst = $dst$$Register;
12845 Register src = $src$$Register;
12847 __ slt(AT, dst, src);
12848 __ movn(dst, src, AT);
12850 %}
12852 ins_pipe( pipe_slow );
12853 %}
12855 instruct maxI_Reg_zero(mRegI dst, immI0 zero) %{
12856 match(Set dst (MaxI dst zero));
12857 ins_cost(50);
12859 format %{ "MAX $dst, 0 @maxI_Reg_zero" %}
12861 ins_encode %{
12862 Register dst = $dst$$Register;
12864 __ slt(AT, dst, R0);
12865 __ movn(dst, R0, AT);
12867 %}
12869 ins_pipe( pipe_slow );
12870 %}
12872 instruct zerox_long_reg_reg(mRegL dst, mRegL src, immL_32bits mask)
12873 %{
12874 match(Set dst (AndL src mask));
12876 format %{ "movl $dst, $src\t# zero-extend long @ zerox_long_reg_reg" %}
12877 ins_encode %{
12878 Register dst = $dst$$Register;
12879 Register src = $src$$Register;
12881 __ dext(dst, src, 0, 32);
12882 %}
12883 ins_pipe(ialu_regI_regI);
12884 %}
12886 instruct combine_i2l(mRegL dst, mRegI src1, immL_32bits mask, mRegI src2, immI_32 shift32)
12887 %{
12888 match(Set dst (OrL (AndL (ConvI2L src1) mask) (LShiftL (ConvI2L src2) shift32)));
12890 format %{ "combine_i2l $dst, $src2(H), $src1(L) @ combine_i2l" %}
12891 ins_encode %{
12892 Register dst = $dst$$Register;
12893 Register src1 = $src1$$Register;
12894 Register src2 = $src2$$Register;
12896 if (src1 == dst) {
12897 __ dinsu(dst, src2, 32, 32);
12898 } else if (src2 == dst) {
12899 __ dsll32(dst, dst, 0);
12900 __ dins(dst, src1, 0, 32);
12901 } else {
12902 __ dext(dst, src1, 0, 32);
12903 __ dinsu(dst, src2, 32, 32);
12904 }
12905 %}
12906 ins_pipe(ialu_regI_regI);
12907 %}
12909 // Zero-extend convert int to long
12910 instruct convI2L_reg_reg_zex(mRegL dst, mRegI src, immL_32bits mask)
12911 %{
12912 match(Set dst (AndL (ConvI2L src) mask));
12914 format %{ "movl $dst, $src\t# i2l zero-extend @ convI2L_reg_reg_zex" %}
12915 ins_encode %{
12916 Register dst = $dst$$Register;
12917 Register src = $src$$Register;
12919 __ dext(dst, src, 0, 32);
12920 %}
12921 ins_pipe(ialu_regI_regI);
12922 %}
12924 instruct convL2I2L_reg_reg_zex(mRegL dst, mRegL src, immL_32bits mask)
12925 %{
12926 match(Set dst (AndL (ConvI2L (ConvL2I src)) mask));
12928 format %{ "movl $dst, $src\t# i2l zero-extend @ convL2I2L_reg_reg_zex" %}
12929 ins_encode %{
12930 Register dst = $dst$$Register;
12931 Register src = $src$$Register;
12933 __ dext(dst, src, 0, 32);
12934 %}
12935 ins_pipe(ialu_regI_regI);
12936 %}
12938 // Match loading integer and casting it to unsigned int in long register.
12939 // LoadI + ConvI2L + AndL 0xffffffff.
12940 instruct loadUI2L_rmask(mRegL dst, memory mem, immL_32bits mask) %{
12941 match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
12943 format %{ "lwu $dst, $mem \t// zero-extend to long @ loadUI2L_rmask" %}
12944 ins_encode (load_N_enc(dst, mem));
12945 ins_pipe(ialu_loadI);
12946 %}
12948 instruct loadUI2L_lmask(mRegL dst, memory mem, immL_32bits mask) %{
12949 match(Set dst (AndL mask (ConvI2L (LoadI mem))));
12951 format %{ "lwu $dst, $mem \t// zero-extend to long @ loadUI2L_lmask" %}
12952 ins_encode (load_N_enc(dst, mem));
12953 ins_pipe(ialu_loadI);
12954 %}
12957 // ============================================================================
12958 // Safepoint Instruction
12959 instruct safePoint_poll(mRegP poll) %{
12960 match(SafePoint poll);
12961 effect(USE poll);
12963 ins_cost(125);
12964 format %{ "Safepoint @ [$poll] : poll for GC @ safePoint_poll" %}
12966 ins_encode %{
12967 Register poll_reg = $poll$$Register;
12969 __ block_comment("Safepoint:");
12970 __ relocate(relocInfo::poll_type);
12971 __ lw(AT, poll_reg, 0);
12972 %}
12974 ins_pipe( ialu_storeI );
12975 %}
12977 //----------Arithmetic Conversion Instructions---------------------------------
12979 instruct roundFloat_nop(regF dst)
12980 %{
12981 match(Set dst (RoundFloat dst));
12983 ins_cost(0);
12984 ins_encode();
12985 ins_pipe(empty);
12986 %}
12988 instruct roundDouble_nop(regD dst)
12989 %{
12990 match(Set dst (RoundDouble dst));
12992 ins_cost(0);
12993 ins_encode();
12994 ins_pipe(empty);
12995 %}
12997 //---------- Zeros Count Instructions ------------------------------------------
12998 // CountLeadingZerosINode CountTrailingZerosINode
12999 instruct countLeadingZerosI(mRegI dst, mRegI src) %{
13000 predicate(UseCountLeadingZerosInstruction);
13001 match(Set dst (CountLeadingZerosI src));
13003 format %{ "clz $dst, $src\t# count leading zeros (int)" %}
13004 ins_encode %{
13005 __ clz($dst$$Register, $src$$Register);
13006 %}
13007 ins_pipe( ialu_regL_regL );
13008 %}
13010 instruct countLeadingZerosL(mRegI dst, mRegL src) %{
13011 predicate(UseCountLeadingZerosInstruction);
13012 match(Set dst (CountLeadingZerosL src));
13014 format %{ "dclz $dst, $src\t# count leading zeros (long)" %}
13015 ins_encode %{
13016 __ dclz($dst$$Register, $src$$Register);
13017 %}
13018 ins_pipe( ialu_regL_regL );
13019 %}
13021 instruct countTrailingZerosI(mRegI dst, mRegI src) %{
13022 predicate(UseCountTrailingZerosInstruction);
13023 match(Set dst (CountTrailingZerosI src));
13025 format %{ "ctz $dst, $src\t# count trailing zeros (int)" %}
13026 ins_encode %{
13027 // ctz and dctz is gs instructions.
13028 __ ctz($dst$$Register, $src$$Register);
13029 %}
13030 ins_pipe( ialu_regL_regL );
13031 %}
13033 instruct countTrailingZerosL(mRegI dst, mRegL src) %{
13034 predicate(UseCountTrailingZerosInstruction);
13035 match(Set dst (CountTrailingZerosL src));
13037 format %{ "dcto $dst, $src\t# count trailing zeros (long)" %}
13038 ins_encode %{
13039 __ dctz($dst$$Register, $src$$Register);
13040 %}
13041 ins_pipe( ialu_regL_regL );
13042 %}
13044 // ====================VECTOR INSTRUCTIONS=====================================
13046 // Load vectors (8 bytes long)
13047 instruct loadV8(vecD dst, memory mem) %{
13048 predicate(n->as_LoadVector()->memory_size() == 8);
13049 match(Set dst (LoadVector mem));
13050 ins_cost(125);
13051 format %{ "load $dst, $mem\t! load vector (8 bytes)" %}
13052 ins_encode(load_D_enc(dst, mem));
13053 ins_pipe( fpu_loadF );
13054 %}
13056 // Store vectors (8 bytes long)
13057 instruct storeV8(memory mem, vecD src) %{
13058 predicate(n->as_StoreVector()->memory_size() == 8);
13059 match(Set mem (StoreVector mem src));
13060 ins_cost(145);
13061 format %{ "store $mem, $src\t! store vector (8 bytes)" %}
13062 ins_encode(store_D_reg_enc(mem, src));
13063 ins_pipe( fpu_storeF );
13064 %}
13066 instruct Repl8B(vecD dst, mRegI src) %{
13067 predicate(n->as_Vector()->length() == 8);
13068 match(Set dst (ReplicateB src));
13069 format %{ "replv_ob AT, $src\n\t"
13070 "dmtc1 AT, $dst\t! replicate8B" %}
13071 ins_encode %{
13072 __ replv_ob(AT, $src$$Register);
13073 __ dmtc1(AT, $dst$$FloatRegister);
13074 %}
13075 ins_pipe( pipe_mtc1 );
13076 %}
13078 instruct Repl8B_imm(vecD dst, immI con) %{
13079 predicate(n->as_Vector()->length() == 8);
13080 match(Set dst (ReplicateB con));
13081 format %{ "repl_ob AT, [$con]\n\t"
13082 "dmtc1 AT, $dst,0x00\t! replicate8B($con)" %}
13083 ins_encode %{
13084 int val = $con$$constant;
13085 __ repl_ob(AT, val);
13086 __ dmtc1(AT, $dst$$FloatRegister);
13087 %}
13088 ins_pipe( pipe_mtc1 );
13089 %}
13091 instruct Repl8B_zero(vecD dst, immI0 zero) %{
13092 predicate(n->as_Vector()->length() == 8);
13093 match(Set dst (ReplicateB zero));
13094 format %{ "dmtc1 R0, $dst\t! replicate8B zero" %}
13095 ins_encode %{
13096 __ dmtc1(R0, $dst$$FloatRegister);
13097 %}
13098 ins_pipe( pipe_mtc1 );
13099 %}
13101 instruct Repl8B_M1(vecD dst, immI_M1 M1) %{
13102 predicate(n->as_Vector()->length() == 8);
13103 match(Set dst (ReplicateB M1));
13104 format %{ "dmtc1 -1, $dst\t! replicate8B -1" %}
13105 ins_encode %{
13106 __ nor(AT, R0, R0);
13107 __ dmtc1(AT, $dst$$FloatRegister);
13108 %}
13109 ins_pipe( pipe_mtc1 );
13110 %}
13112 instruct Repl4S(vecD dst, mRegI src) %{
13113 predicate(n->as_Vector()->length() == 4);
13114 match(Set dst (ReplicateS src));
13115 format %{ "replv_qh AT, $src\n\t"
13116 "dmtc1 AT, $dst\t! replicate4S" %}
13117 ins_encode %{
13118 __ replv_qh(AT, $src$$Register);
13119 __ dmtc1(AT, $dst$$FloatRegister);
13120 %}
13121 ins_pipe( pipe_mtc1 );
13122 %}
13124 instruct Repl4S_imm(vecD dst, immI con) %{
13125 predicate(n->as_Vector()->length() == 4);
13126 match(Set dst (ReplicateS con));
13127 format %{ "replv_qh AT, [$con]\n\t"
13128 "dmtc1 AT, $dst\t! replicate4S($con)" %}
13129 ins_encode %{
13130 int val = $con$$constant;
13131 if ( Assembler::is_simm(val, 10)) {
13132 //repl_qh supports 10 bits immediate
13133 __ repl_qh(AT, val);
13134 } else {
13135 __ li32(AT, val);
13136 __ replv_qh(AT, AT);
13137 }
13138 __ dmtc1(AT, $dst$$FloatRegister);
13139 %}
13140 ins_pipe( pipe_mtc1 );
13141 %}
13143 instruct Repl4S_zero(vecD dst, immI0 zero) %{
13144 predicate(n->as_Vector()->length() == 4);
13145 match(Set dst (ReplicateS zero));
13146 format %{ "dmtc1 R0, $dst\t! replicate4S zero" %}
13147 ins_encode %{
13148 __ dmtc1(R0, $dst$$FloatRegister);
13149 %}
13150 ins_pipe( pipe_mtc1 );
13151 %}
13153 instruct Repl4S_M1(vecD dst, immI_M1 M1) %{
13154 predicate(n->as_Vector()->length() == 4);
13155 match(Set dst (ReplicateS M1));
13156 format %{ "dmtc1 -1, $dst\t! replicate4S -1" %}
13157 ins_encode %{
13158 __ nor(AT, R0, R0);
13159 __ dmtc1(AT, $dst$$FloatRegister);
13160 %}
13161 ins_pipe( pipe_mtc1 );
13162 %}
13164 // Replicate integer (4 byte) scalar to be vector
13165 instruct Repl2I(vecD dst, mRegI src) %{
13166 predicate(n->as_Vector()->length() == 2);
13167 match(Set dst (ReplicateI src));
13168 format %{ "dins AT, $src, 0, 32\n\t"
13169 "dinsu AT, $src, 32, 32\n\t"
13170 "dmtc1 AT, $dst\t! replicate2I" %}
13171 ins_encode %{
13172 __ dins(AT, $src$$Register, 0, 32);
13173 __ dinsu(AT, $src$$Register, 32, 32);
13174 __ dmtc1(AT, $dst$$FloatRegister);
13175 %}
13176 ins_pipe( pipe_mtc1 );
13177 %}
13179 // Replicate integer (4 byte) scalar immediate to be vector by loading from const table.
13180 instruct Repl2I_imm(vecD dst, immI con, mA7RegI tmp) %{
13181 predicate(n->as_Vector()->length() == 2);
13182 match(Set dst (ReplicateI con));
13183 effect(KILL tmp);
13184 format %{ "li32 AT, [$con], 32\n\t"
13185 "replv_pw AT, AT\n\t"
13186 "dmtc1 AT, $dst\t! replicate2I($con)" %}
13187 ins_encode %{
13188 int val = $con$$constant;
13189 __ li32(AT, val);
13190 __ replv_pw(AT, AT);
13191 __ dmtc1(AT, $dst$$FloatRegister);
13192 %}
13193 ins_pipe( pipe_mtc1 );
13194 %}
13196 // Replicate integer (4 byte) scalar zero to be vector
13197 instruct Repl2I_zero(vecD dst, immI0 zero) %{
13198 predicate(n->as_Vector()->length() == 2);
13199 match(Set dst (ReplicateI zero));
13200 format %{ "dmtc1 R0, $dst\t! replicate2I zero" %}
13201 ins_encode %{
13202 __ dmtc1(R0, $dst$$FloatRegister);
13203 %}
13204 ins_pipe( pipe_mtc1 );
13205 %}
13207 // Replicate integer (4 byte) scalar -1 to be vector
13208 instruct Repl2I_M1(vecD dst, immI_M1 M1) %{
13209 predicate(n->as_Vector()->length() == 2);
13210 match(Set dst (ReplicateI M1));
13211 format %{ "dmtc1 -1, $dst\t! replicate2I -1, use AT" %}
13212 ins_encode %{
13213 __ nor(AT, R0, R0);
13214 __ dmtc1(AT, $dst$$FloatRegister);
13215 %}
13216 ins_pipe( pipe_mtc1 );
13217 %}
13219 // Replicate float (4 byte) scalar to be vector
13220 instruct Repl2F(vecD dst, regF src) %{
13221 predicate(n->as_Vector()->length() == 2);
13222 match(Set dst (ReplicateF src));
13223 format %{ "cvt.ps $dst, $src, $src\t! replicate2F" %}
13224 ins_encode %{
13225 __ cvt_ps_s($dst$$FloatRegister, $src$$FloatRegister, $src$$FloatRegister);
13226 %}
13227 ins_pipe( pipe_slow );
13228 %}
13230 // Replicate float (4 byte) scalar zero to be vector
13231 instruct Repl2F_zero(vecD dst, immF0 zero) %{
13232 predicate(n->as_Vector()->length() == 2);
13233 match(Set dst (ReplicateF zero));
13234 format %{ "dmtc1 R0, $dst\t! replicate2F zero" %}
13235 ins_encode %{
13236 __ dmtc1(R0, $dst$$FloatRegister);
13237 %}
13238 ins_pipe( pipe_mtc1 );
13239 %}
13242 // ====================VECTOR ARITHMETIC=======================================
13244 // --------------------------------- ADD --------------------------------------
13246 // Floats vector add
13247 instruct vadd2F(vecD dst, vecD src) %{
13248 predicate(n->as_Vector()->length() == 2);
13249 match(Set dst (AddVF dst src));
13250 format %{ "add.ps $dst,$src\t! add packed2F" %}
13251 ins_encode %{
13252 __ add_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
13253 %}
13254 ins_pipe( pipe_slow );
13255 %}
13257 instruct vadd2F3(vecD dst, vecD src1, vecD src2) %{
13258 predicate(n->as_Vector()->length() == 2);
13259 match(Set dst (AddVF src1 src2));
13260 format %{ "add.ps $dst,$src1,$src2\t! add packed2F" %}
13261 ins_encode %{
13262 __ add_ps($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
13263 %}
13264 ins_pipe( fpu_regF_regF );
13265 %}
13267 // --------------------------------- SUB --------------------------------------
13269 // Floats vector sub
13270 instruct vsub2F(vecD dst, vecD src) %{
13271 predicate(n->as_Vector()->length() == 2);
13272 match(Set dst (SubVF dst src));
13273 format %{ "sub.ps $dst,$src\t! sub packed2F" %}
13274 ins_encode %{
13275 __ sub_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
13276 %}
13277 ins_pipe( fpu_regF_regF );
13278 %}
13280 // --------------------------------- MUL --------------------------------------
13282 // Floats vector mul
13283 instruct vmul2F(vecD dst, vecD src) %{
13284 predicate(n->as_Vector()->length() == 2);
13285 match(Set dst (MulVF dst src));
13286 format %{ "mul.ps $dst, $src\t! mul packed2F" %}
13287 ins_encode %{
13288 __ mul_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
13289 %}
13290 ins_pipe( fpu_regF_regF );
13291 %}
13293 instruct vmul2F3(vecD dst, vecD src1, vecD src2) %{
13294 predicate(n->as_Vector()->length() == 2);
13295 match(Set dst (MulVF src1 src2));
13296 format %{ "mul.ps $dst, $src1, $src2\t! mul packed2F" %}
13297 ins_encode %{
13298 __ mul_ps($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
13299 %}
13300 ins_pipe( fpu_regF_regF );
13301 %}
13303 // --------------------------------- DIV --------------------------------------
13304 // MIPS do not have div.ps
13307 //----------PEEPHOLE RULES-----------------------------------------------------
13308 // These must follow all instruction definitions as they use the names
13309 // defined in the instructions definitions.
13310 //
13311 // peepmatch ( root_instr_name [preceeding_instruction]* );
13312 //
13313 // peepconstraint %{
13314 // (instruction_number.operand_name relational_op instruction_number.operand_name
13315 // [, ...] );
13316 // // instruction numbers are zero-based using left to right order in peepmatch
13317 //
13318 // peepreplace ( instr_name ( [instruction_number.operand_name]* ) );
13319 // // provide an instruction_number.operand_name for each operand that appears
13320 // // in the replacement instruction's match rule
13321 //
13322 // ---------VM FLAGS---------------------------------------------------------
13323 //
13324 // All peephole optimizations can be turned off using -XX:-OptoPeephole
13325 //
13326 // Each peephole rule is given an identifying number starting with zero and
13327 // increasing by one in the order seen by the parser. An individual peephole
13328 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
13329 // on the command-line.
13330 //
13331 // ---------CURRENT LIMITATIONS----------------------------------------------
13332 //
13333 // Only match adjacent instructions in same basic block
13334 // Only equality constraints
13335 // Only constraints between operands, not (0.dest_reg == EAX_enc)
13336 // Only one replacement instruction
13337 //
13338 // ---------EXAMPLE----------------------------------------------------------
13339 //
13340 // // pertinent parts of existing instructions in architecture description
13341 // instruct movI(eRegI dst, eRegI src) %{
13342 // match(Set dst (CopyI src));
13343 // %}
13344 //
13345 // instruct incI_eReg(eRegI dst, immI1 src, eFlagsReg cr) %{
13346 // match(Set dst (AddI dst src));
13347 // effect(KILL cr);
13348 // %}
13349 //
13350 // // Change (inc mov) to lea
13351 // peephole %{
13352 // // increment preceeded by register-register move
13353 // peepmatch ( incI_eReg movI );
13354 // // require that the destination register of the increment
13355 // // match the destination register of the move
13356 // peepconstraint ( 0.dst == 1.dst );
13357 // // construct a replacement instruction that sets
13358 // // the destination to ( move's source register + one )
13359 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13360 // %}
13361 //
13362 // Implementation no longer uses movX instructions since
13363 // machine-independent system no longer uses CopyX nodes.
13364 //
13365 // peephole %{
13366 // peepmatch ( incI_eReg movI );
13367 // peepconstraint ( 0.dst == 1.dst );
13368 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13369 // %}
13370 //
13371 // peephole %{
13372 // peepmatch ( decI_eReg movI );
13373 // peepconstraint ( 0.dst == 1.dst );
13374 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13375 // %}
13376 //
13377 // peephole %{
13378 // peepmatch ( addI_eReg_imm movI );
13379 // peepconstraint ( 0.dst == 1.dst );
13380 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13381 // %}
13382 //
13383 // peephole %{
13384 // peepmatch ( addP_eReg_imm movP );
13385 // peepconstraint ( 0.dst == 1.dst );
13386 // peepreplace ( leaP_eReg_immI( 0.dst 1.src 0.src ) );
13387 // %}
13389 // // Change load of spilled value to only a spill
13390 // instruct storeI(memory mem, eRegI src) %{
13391 // match(Set mem (StoreI mem src));
13392 // %}
13393 //
13394 // instruct loadI(eRegI dst, memory mem) %{
13395 // match(Set dst (LoadI mem));
13396 // %}
13397 //
13398 //peephole %{
13399 // peepmatch ( loadI storeI );
13400 // peepconstraint ( 1.src == 0.dst, 1.mem == 0.mem );
13401 // peepreplace ( storeI( 1.mem 1.mem 1.src ) );
13402 //%}
13404 //----------SMARTSPILL RULES---------------------------------------------------
13405 // These must follow all instruction definitions as they use the names
13406 // defined in the instructions definitions.