Fri, 17 Feb 2017 20:37:47 +0800
[C2] Modify some problems in last patch and Use gsswx in store_N_reg_enc for Loongson CPUs.
1 //
2 // Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
3 // Copyright (c) 2015, 2016, Loongson Technology. All rights reserved.
4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 //
6 // This code is free software; you can redistribute it and/or modify it
7 // under the terms of the GNU General Public License version 2 only, as
8 // published by the Free Software Foundation.
9 //
10 // This code is distributed in the hope that it will be useful, but WITHOUT
11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 // version 2 for more details (a copy is included in the LICENSE file that
14 // accompanied this code).
15 //
16 // You should have received a copy of the GNU General Public License version
17 // 2 along with this work; if not, write to the Free Software Foundation,
18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 //
20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 // or visit www.oracle.com if you need additional information or have any
22 // questions.
23 //
24 //
26 // GodSon3 Architecture Description File
28 //----------REGISTER DEFINITION BLOCK------------------------------------------
29 // This information is used by the matcher and the register allocator to
30 // describe individual registers and classes of registers within the target
31 // archtecture.
33 // format:
34 // reg_def name (call convention, c-call convention, ideal type, encoding);
35 // call convention :
36 // NS = No-Save
37 // SOC = Save-On-Call
38 // SOE = Save-On-Entry
39 // AS = Always-Save
40 // ideal type :
41 // see opto/opcodes.hpp for more info
42 // reg_class name (reg, ...);
43 // alloc_class name (reg, ...);
44 register %{
46 // General Registers
47 // Integer Registers
48 reg_def R0 ( NS, NS, Op_RegI, 0, VMRegImpl::Bad());
49 reg_def AT ( NS, NS, Op_RegI, 1, AT->as_VMReg());
50 reg_def AT_H ( NS, NS, Op_RegI, 1, AT->as_VMReg()->next());
51 reg_def V0 (SOC, SOC, Op_RegI, 2, V0->as_VMReg());
52 reg_def V0_H (SOC, SOC, Op_RegI, 2, V0->as_VMReg()->next());
53 reg_def V1 (SOC, SOC, Op_RegI, 3, V1->as_VMReg());
54 reg_def V1_H (SOC, SOC, Op_RegI, 3, V1->as_VMReg()->next());
55 reg_def A0 (SOC, SOC, Op_RegI, 4, A0->as_VMReg());
56 reg_def A0_H (SOC, SOC, Op_RegI, 4, A0->as_VMReg()->next());
57 reg_def A1 (SOC, SOC, Op_RegI, 5, A1->as_VMReg());
58 reg_def A1_H (SOC, SOC, Op_RegI, 5, A1->as_VMReg()->next());
59 reg_def A2 (SOC, SOC, Op_RegI, 6, A2->as_VMReg());
60 reg_def A2_H (SOC, SOC, Op_RegI, 6, A2->as_VMReg()->next());
61 reg_def A3 (SOC, SOC, Op_RegI, 7, A3->as_VMReg());
62 reg_def A3_H (SOC, SOC, Op_RegI, 7, A3->as_VMReg()->next());
63 reg_def A4 (SOC, SOC, Op_RegI, 8, A4->as_VMReg());
64 reg_def A4_H (SOC, SOC, Op_RegI, 8, A4->as_VMReg()->next());
65 reg_def A5 (SOC, SOC, Op_RegI, 9, A5->as_VMReg());
66 reg_def A5_H (SOC, SOC, Op_RegI, 9, A5->as_VMReg()->next());
67 reg_def A6 (SOC, SOC, Op_RegI, 10, A6->as_VMReg());
68 reg_def A6_H (SOC, SOC, Op_RegI, 10, A6->as_VMReg()->next());
69 reg_def A7 (SOC, SOC, Op_RegI, 11, A7->as_VMReg());
70 reg_def A7_H (SOC, SOC, Op_RegI, 11, A7->as_VMReg()->next());
71 reg_def T0 (SOC, SOC, Op_RegI, 12, T0->as_VMReg());
72 reg_def T0_H (SOC, SOC, Op_RegI, 12, T0->as_VMReg()->next());
73 reg_def T1 (SOC, SOC, Op_RegI, 13, T1->as_VMReg());
74 reg_def T1_H (SOC, SOC, Op_RegI, 13, T1->as_VMReg()->next());
75 reg_def T2 (SOC, SOC, Op_RegI, 14, T2->as_VMReg());
76 reg_def T2_H (SOC, SOC, Op_RegI, 14, T2->as_VMReg()->next());
77 reg_def T3 (SOC, SOC, Op_RegI, 15, T3->as_VMReg());
78 reg_def T3_H (SOC, SOC, Op_RegI, 15, T3->as_VMReg()->next());
79 reg_def S0 (SOC, SOE, Op_RegI, 16, S0->as_VMReg());
80 reg_def S0_H (SOC, SOE, Op_RegI, 16, S0->as_VMReg()->next());
81 reg_def S1 (SOC, SOE, Op_RegI, 17, S1->as_VMReg());
82 reg_def S1_H (SOC, SOE, Op_RegI, 17, S1->as_VMReg()->next());
83 reg_def S2 (SOC, SOE, Op_RegI, 18, S2->as_VMReg());
84 reg_def S2_H (SOC, SOE, Op_RegI, 18, S2->as_VMReg()->next());
85 reg_def S3 (SOC, SOE, Op_RegI, 19, S3->as_VMReg());
86 reg_def S3_H (SOC, SOE, Op_RegI, 19, S3->as_VMReg()->next());
87 reg_def S4 (SOC, SOE, Op_RegI, 20, S4->as_VMReg());
88 reg_def S4_H (SOC, SOE, Op_RegI, 20, S4->as_VMReg()->next());
89 reg_def S5 (SOC, SOE, Op_RegI, 21, S5->as_VMReg());
90 reg_def S5_H (SOC, SOE, Op_RegI, 21, S5->as_VMReg()->next());
91 reg_def S6 (SOC, SOE, Op_RegI, 22, S6->as_VMReg());
92 reg_def S6_H (SOC, SOE, Op_RegI, 22, S6->as_VMReg()->next());
93 reg_def S7 (SOC, SOE, Op_RegI, 23, S7->as_VMReg());
94 reg_def S7_H (SOC, SOE, Op_RegI, 23, S7->as_VMReg()->next());
95 reg_def T8 (SOC, SOC, Op_RegI, 24, T8->as_VMReg());
96 reg_def T8_H (SOC, SOC, Op_RegI, 24, T8->as_VMReg()->next());
97 reg_def T9 (SOC, SOC, Op_RegI, 25, T9->as_VMReg());
98 reg_def T9_H (SOC, SOC, Op_RegI, 25, T9->as_VMReg()->next());
100 // Special Registers
101 reg_def K0 ( NS, NS, Op_RegI, 26, K0->as_VMReg());
102 reg_def K1 ( NS, NS, Op_RegI, 27, K1->as_VMReg());
103 reg_def GP ( NS, NS, Op_RegI, 28, GP->as_VMReg());
104 reg_def GP_H ( NS, NS, Op_RegI, 28, GP->as_VMReg()->next());
105 reg_def SP ( NS, NS, Op_RegI, 29, SP->as_VMReg());
106 reg_def SP_H ( NS, NS, Op_RegI, 29, SP->as_VMReg()->next());
107 reg_def FP ( NS, NS, Op_RegI, 30, FP->as_VMReg());
108 reg_def FP_H ( NS, NS, Op_RegI, 30, FP->as_VMReg()->next());
109 reg_def RA ( NS, NS, Op_RegI, 31, RA->as_VMReg());
110 reg_def RA_H ( NS, NS, Op_RegI, 31, RA->as_VMReg()->next());
112 // Floating registers.
113 reg_def F0 ( SOC, SOC, Op_RegF, 0, F0->as_VMReg());
114 reg_def F0_H ( SOC, SOC, Op_RegF, 0, F0->as_VMReg()->next());
115 reg_def F1 ( SOC, SOC, Op_RegF, 1, F1->as_VMReg());
116 reg_def F1_H ( SOC, SOC, Op_RegF, 1, F1->as_VMReg()->next());
117 reg_def F2 ( SOC, SOC, Op_RegF, 2, F2->as_VMReg());
118 reg_def F2_H ( SOC, SOC, Op_RegF, 2, F2->as_VMReg()->next());
119 reg_def F3 ( SOC, SOC, Op_RegF, 3, F3->as_VMReg());
120 reg_def F3_H ( SOC, SOC, Op_RegF, 3, F3->as_VMReg()->next());
121 reg_def F4 ( SOC, SOC, Op_RegF, 4, F4->as_VMReg());
122 reg_def F4_H ( SOC, SOC, Op_RegF, 4, F4->as_VMReg()->next());
123 reg_def F5 ( SOC, SOC, Op_RegF, 5, F5->as_VMReg());
124 reg_def F5_H ( SOC, SOC, Op_RegF, 5, F5->as_VMReg()->next());
125 reg_def F6 ( SOC, SOC, Op_RegF, 6, F6->as_VMReg());
126 reg_def F6_H ( SOC, SOC, Op_RegF, 6, F6->as_VMReg()->next());
127 reg_def F7 ( SOC, SOC, Op_RegF, 7, F7->as_VMReg());
128 reg_def F7_H ( SOC, SOC, Op_RegF, 7, F7->as_VMReg()->next());
129 reg_def F8 ( SOC, SOC, Op_RegF, 8, F8->as_VMReg());
130 reg_def F8_H ( SOC, SOC, Op_RegF, 8, F8->as_VMReg()->next());
131 reg_def F9 ( SOC, SOC, Op_RegF, 9, F9->as_VMReg());
132 reg_def F9_H ( SOC, SOC, Op_RegF, 9, F9->as_VMReg()->next());
133 reg_def F10 ( SOC, SOC, Op_RegF, 10, F10->as_VMReg());
134 reg_def F10_H ( SOC, SOC, Op_RegF, 10, F10->as_VMReg()->next());
135 reg_def F11 ( SOC, SOC, Op_RegF, 11, F11->as_VMReg());
136 reg_def F11_H ( SOC, SOC, Op_RegF, 11, F11->as_VMReg()->next());
137 reg_def F12 ( SOC, SOC, Op_RegF, 12, F12->as_VMReg());
138 reg_def F12_H ( SOC, SOC, Op_RegF, 12, F12->as_VMReg()->next());
139 reg_def F13 ( SOC, SOC, Op_RegF, 13, F13->as_VMReg());
140 reg_def F13_H ( SOC, SOC, Op_RegF, 13, F13->as_VMReg()->next());
141 reg_def F14 ( SOC, SOC, Op_RegF, 14, F14->as_VMReg());
142 reg_def F14_H ( SOC, SOC, Op_RegF, 14, F14->as_VMReg()->next());
143 reg_def F15 ( SOC, SOC, Op_RegF, 15, F15->as_VMReg());
144 reg_def F15_H ( SOC, SOC, Op_RegF, 15, F15->as_VMReg()->next());
145 reg_def F16 ( SOC, SOC, Op_RegF, 16, F16->as_VMReg());
146 reg_def F16_H ( SOC, SOC, Op_RegF, 16, F16->as_VMReg()->next());
147 reg_def F17 ( SOC, SOC, Op_RegF, 17, F17->as_VMReg());
148 reg_def F17_H ( SOC, SOC, Op_RegF, 17, F17->as_VMReg()->next());
149 reg_def F18 ( SOC, SOC, Op_RegF, 18, F18->as_VMReg());
150 reg_def F18_H ( SOC, SOC, Op_RegF, 18, F18->as_VMReg()->next());
151 reg_def F19 ( SOC, SOC, Op_RegF, 19, F19->as_VMReg());
152 reg_def F19_H ( SOC, SOC, Op_RegF, 19, F19->as_VMReg()->next());
153 reg_def F20 ( SOC, SOC, Op_RegF, 20, F20->as_VMReg());
154 reg_def F20_H ( SOC, SOC, Op_RegF, 20, F20->as_VMReg()->next());
155 reg_def F21 ( SOC, SOC, Op_RegF, 21, F21->as_VMReg());
156 reg_def F21_H ( SOC, SOC, Op_RegF, 21, F21->as_VMReg()->next());
157 reg_def F22 ( SOC, SOC, Op_RegF, 22, F22->as_VMReg());
158 reg_def F22_H ( SOC, SOC, Op_RegF, 22, F22->as_VMReg()->next());
159 reg_def F23 ( SOC, SOC, Op_RegF, 23, F23->as_VMReg());
160 reg_def F23_H ( SOC, SOC, Op_RegF, 23, F23->as_VMReg()->next());
161 reg_def F24 ( SOC, SOC, Op_RegF, 24, F24->as_VMReg());
162 reg_def F24_H ( SOC, SOC, Op_RegF, 24, F24->as_VMReg()->next());
163 reg_def F25 ( SOC, SOC, Op_RegF, 25, F25->as_VMReg());
164 reg_def F25_H ( SOC, SOC, Op_RegF, 25, F25->as_VMReg()->next());
165 reg_def F26 ( SOC, SOC, Op_RegF, 26, F26->as_VMReg());
166 reg_def F26_H ( SOC, SOC, Op_RegF, 26, F26->as_VMReg()->next());
167 reg_def F27 ( SOC, SOC, Op_RegF, 27, F27->as_VMReg());
168 reg_def F27_H ( SOC, SOC, Op_RegF, 27, F27->as_VMReg()->next());
169 reg_def F28 ( SOC, SOC, Op_RegF, 28, F28->as_VMReg());
170 reg_def F28_H ( SOC, SOC, Op_RegF, 28, F28->as_VMReg()->next());
171 reg_def F29 ( SOC, SOC, Op_RegF, 29, F29->as_VMReg());
172 reg_def F29_H ( SOC, SOC, Op_RegF, 29, F29->as_VMReg()->next());
173 reg_def F30 ( SOC, SOC, Op_RegF, 30, F30->as_VMReg());
174 reg_def F30_H ( SOC, SOC, Op_RegF, 30, F30->as_VMReg()->next());
175 reg_def F31 ( SOC, SOC, Op_RegF, 31, F31->as_VMReg());
176 reg_def F31_H ( SOC, SOC, Op_RegF, 31, F31->as_VMReg()->next());
179 // ----------------------------
180 // Special Registers
181 // Condition Codes Flag Registers
182 reg_def MIPS_FLAG (SOC, SOC, Op_RegFlags, 1, as_Register(1)->as_VMReg());
183 //S6 is used for get_thread(S6)
184 //S5 is uesd for heapbase of compressed oop
185 alloc_class chunk0(
186 S7, S7_H,
187 S0, S0_H,
188 S1, S1_H,
189 S2, S2_H,
190 S4, S4_H,
191 S5, S5_H,
192 S6, S6_H,
193 S3, S3_H,
194 T2, T2_H,
195 T3, T3_H,
196 T8, T8_H,
197 T9, T9_H,
198 T1, T1_H, // inline_cache_reg
199 V1, V1_H,
200 A7, A7_H,
201 A6, A6_H,
202 A5, A5_H,
203 A4, A4_H,
204 V0, V0_H,
205 A3, A3_H,
206 A2, A2_H,
207 A1, A1_H,
208 A0, A0_H,
209 T0, T0_H,
210 GP, GP_H
211 RA, RA_H,
212 SP, SP_H, // stack_pointer
213 FP, FP_H // frame_pointer
214 );
216 alloc_class chunk1( F0, F0_H,
217 F1, F1_H,
218 F2, F2_H,
219 F3, F3_H,
220 F4, F4_H,
221 F5, F5_H,
222 F6, F6_H,
223 F7, F7_H,
224 F8, F8_H,
225 F9, F9_H,
226 F10, F10_H,
227 F11, F11_H,
228 F20, F20_H,
229 F21, F21_H,
230 F22, F22_H,
231 F23, F23_H,
232 F24, F24_H,
233 F25, F25_H,
234 F26, F26_H,
235 F27, F27_H,
236 F28, F28_H,
237 F19, F19_H,
238 F18, F18_H,
239 F17, F17_H,
240 F16, F16_H,
241 F15, F15_H,
242 F14, F14_H,
243 F13, F13_H,
244 F12, F12_H,
245 F29, F29_H,
246 F30, F30_H,
247 F31, F31_H);
249 alloc_class chunk2(MIPS_FLAG);
251 reg_class s_reg( S0, S1, S2, S3, S4, S5, S6, S7 );
252 reg_class s0_reg( S0 );
253 reg_class s1_reg( S1 );
254 reg_class s2_reg( S2 );
255 reg_class s3_reg( S3 );
256 reg_class s4_reg( S4 );
257 reg_class s5_reg( S5 );
258 reg_class s6_reg( S6 );
259 reg_class s7_reg( S7 );
261 reg_class t_reg( T0, T1, T2, T3, T8, T9 );
262 reg_class t0_reg( T0 );
263 reg_class t1_reg( T1 );
264 reg_class t2_reg( T2 );
265 reg_class t3_reg( T3 );
266 reg_class t8_reg( T8 );
267 reg_class t9_reg( T9 );
269 reg_class a_reg( A0, A1, A2, A3, A4, A5, A6, A7 );
270 reg_class a0_reg( A0 );
271 reg_class a1_reg( A1 );
272 reg_class a2_reg( A2 );
273 reg_class a3_reg( A3 );
274 reg_class a4_reg( A4 );
275 reg_class a5_reg( A5 );
276 reg_class a6_reg( A6 );
277 reg_class a7_reg( A7 );
279 reg_class v0_reg( V0 );
280 reg_class v1_reg( V1 );
282 reg_class sp_reg( SP, SP_H );
283 reg_class fp_reg( FP, FP_H );
285 reg_class mips_flags(MIPS_FLAG);
287 reg_class v0_long_reg( V0, V0_H );
288 reg_class v1_long_reg( V1, V1_H );
289 reg_class a0_long_reg( A0, A0_H );
290 reg_class a1_long_reg( A1, A1_H );
291 reg_class a2_long_reg( A2, A2_H );
292 reg_class a3_long_reg( A3, A3_H );
293 reg_class a4_long_reg( A4, A4_H );
294 reg_class a5_long_reg( A5, A5_H );
295 reg_class a6_long_reg( A6, A6_H );
296 reg_class a7_long_reg( A7, A7_H );
297 reg_class t0_long_reg( T0, T0_H );
298 reg_class t1_long_reg( T1, T1_H );
299 reg_class t2_long_reg( T2, T2_H );
300 reg_class t3_long_reg( T3, T3_H );
301 reg_class t8_long_reg( T8, T8_H );
302 reg_class t9_long_reg( T9, T9_H );
303 reg_class s0_long_reg( S0, S0_H );
304 reg_class s1_long_reg( S1, S1_H );
305 reg_class s2_long_reg( S2, S2_H );
306 reg_class s3_long_reg( S3, S3_H );
307 reg_class s4_long_reg( S4, S4_H );
308 reg_class s5_long_reg( S5, S5_H );
309 reg_class s6_long_reg( S6, S6_H );
310 reg_class s7_long_reg( S7, S7_H );
312 reg_class int_reg( S7, S0, S1, S2, S4, S3, T8, T2, T3, T1, V1, A7, A6, A5, A4, V0, A3, A2, A1, A0, T0 );
314 reg_class no_Ax_int_reg( S7, S0, S1, S2, S4, S3, T8, T2, T3, T1, V1, V0, T0 );
316 reg_class p_reg(
317 S7, S7_H,
318 S0, S0_H,
319 S1, S1_H,
320 S2, S2_H,
321 S4, S4_H,
322 S3, S3_H,
323 T8, T8_H,
324 T2, T2_H,
325 T3, T3_H,
326 T1, T1_H,
327 A7, A7_H,
328 A6, A6_H,
329 A5, A5_H,
330 A4, A4_H,
331 A3, A3_H,
332 A2, A2_H,
333 A1, A1_H,
334 A0, A0_H,
335 T0, T0_H
336 );
338 reg_class no_T8_p_reg(
339 S7, S7_H,
340 S0, S0_H,
341 S1, S1_H,
342 S2, S2_H,
343 S4, S4_H,
344 S3, S3_H,
345 T2, T2_H,
346 T3, T3_H,
347 T1, T1_H,
348 A7, A7_H,
349 A6, A6_H,
350 A5, A5_H,
351 A4, A4_H,
352 A3, A3_H,
353 A2, A2_H,
354 A1, A1_H,
355 A0, A0_H,
356 T0, T0_H
357 );
359 reg_class long_reg(
360 S7, S7_H,
361 S0, S0_H,
362 S1, S1_H,
363 S2, S2_H,
364 S4, S4_H,
365 S3, S3_H,
366 T8, T8_H,
367 T2, T2_H,
368 T3, T3_H,
369 T1, T1_H,
370 A7, A7_H,
371 A6, A6_H,
372 A5, A5_H,
373 A4, A4_H,
374 A3, A3_H,
375 A2, A2_H,
376 A1, A1_H,
377 A0, A0_H,
378 T0, T0_H
379 );
382 // Floating point registers.
383 // 2012/8/23 Fu: F30/F31 are used as temporary registers in D2I
384 // 2016/12/1 aoqi: F31 are not used as temporary registers in D2I
385 reg_class flt_reg( F0, F1, F2, F3, F4, F5, F6, F7, F8, F9, F10, F11, F12, F13, F14, F15, F16, F17 F18, F19, F20, F21, F22, F23, F24, F25, F26, F27, F28, F29, F31);
386 reg_class dbl_reg( F0, F0_H,
387 F1, F1_H,
388 F2, F2_H,
389 F3, F3_H,
390 F4, F4_H,
391 F5, F5_H,
392 F6, F6_H,
393 F7, F7_H,
394 F8, F8_H,
395 F9, F9_H,
396 F10, F10_H,
397 F11, F11_H,
398 F12, F12_H,
399 F13, F13_H,
400 F14, F14_H,
401 F15, F15_H,
402 F16, F16_H,
403 F17, F17_H,
404 F18, F18_H,
405 F19, F19_H,
406 F20, F20_H,
407 F21, F21_H,
408 F22, F22_H,
409 F23, F23_H,
410 F24, F24_H,
411 F25, F25_H,
412 F26, F26_H,
413 F27, F27_H,
414 F28, F28_H,
415 F29, F29_H,
416 F31, F31_H);
418 reg_class flt_arg0( F12 );
419 reg_class dbl_arg0( F12, F12_H );
420 reg_class dbl_arg1( F14, F14_H );
422 %}
424 //----------DEFINITION BLOCK---------------------------------------------------
425 // Define name --> value mappings to inform the ADLC of an integer valued name
426 // Current support includes integer values in the range [0, 0x7FFFFFFF]
427 // Format:
428 // int_def <name> ( <int_value>, <expression>);
429 // Generated Code in ad_<arch>.hpp
430 // #define <name> (<expression>)
431 // // value == <int_value>
432 // Generated code in ad_<arch>.cpp adlc_verification()
433 // assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
434 //
435 definitions %{
436 int_def DEFAULT_COST ( 100, 100);
437 int_def HUGE_COST (1000000, 1000000);
439 // Memory refs are twice as expensive as run-of-the-mill.
440 int_def MEMORY_REF_COST ( 200, DEFAULT_COST * 2);
442 // Branches are even more expensive.
443 int_def BRANCH_COST ( 300, DEFAULT_COST * 3);
444 // we use jr instruction to construct call, so more expensive
445 // by yjl 2/28/2006
446 int_def CALL_COST ( 500, DEFAULT_COST * 5);
447 /*
448 int_def EQUAL ( 1, 1 );
449 int_def NOT_EQUAL ( 2, 2 );
450 int_def GREATER ( 3, 3 );
451 int_def GREATER_EQUAL ( 4, 4 );
452 int_def LESS ( 5, 5 );
453 int_def LESS_EQUAL ( 6, 6 );
454 */
455 %}
459 //----------SOURCE BLOCK-------------------------------------------------------
460 // This is a block of C++ code which provides values, functions, and
461 // definitions necessary in the rest of the architecture description
463 source_hpp %{
464 // Header information of the source block.
465 // Method declarations/definitions which are used outside
466 // the ad-scope can conveniently be defined here.
467 //
468 // To keep related declarations/definitions/uses close together,
469 // we switch between source %{ }% and source_hpp %{ }% freely as needed.
471 class CallStubImpl {
473 //--------------------------------------------------------------
474 //---< Used for optimization in Compile::shorten_branches >---
475 //--------------------------------------------------------------
477 public:
478 // Size of call trampoline stub.
479 static uint size_call_trampoline() {
480 return 0; // no call trampolines on this platform
481 }
483 // number of relocations needed by a call trampoline stub
484 static uint reloc_call_trampoline() {
485 return 0; // no call trampolines on this platform
486 }
487 };
489 class HandlerImpl {
491 public:
493 static int emit_exception_handler(CodeBuffer &cbuf);
494 static int emit_deopt_handler(CodeBuffer& cbuf);
496 static uint size_exception_handler() {
497 // NativeCall instruction size is the same as NativeJump.
498 // exception handler starts out as jump and can be patched to
499 // a call be deoptimization. (4932387)
500 // Note that this value is also credited (in output.cpp) to
501 // the size of the code section.
502 // return NativeJump::instruction_size;
503 int size = NativeCall::instruction_size;
504 return round_to(size, 16);
505 }
507 #ifdef _LP64
508 static uint size_deopt_handler() {
509 int size = NativeCall::instruction_size;
510 return round_to(size, 16);
511 }
512 #else
513 static uint size_deopt_handler() {
514 // NativeCall instruction size is the same as NativeJump.
515 // exception handler starts out as jump and can be patched to
516 // a call be deoptimization. (4932387)
517 // Note that this value is also credited (in output.cpp) to
518 // the size of the code section.
519 return 5 + NativeJump::instruction_size; // pushl(); jmp;
520 }
521 #endif
522 };
524 %} // end source_hpp
526 source %{
528 #define NO_INDEX 0
529 #define RELOC_IMM64 Assembler::imm_operand
530 #define RELOC_DISP32 Assembler::disp32_operand
533 #define __ _masm.
536 // Emit exception handler code.
537 // Stuff framesize into a register and call a VM stub routine.
538 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf) {
539 /*
540 // Note that the code buffer's insts_mark is always relative to insts.
541 // That's why we must use the macroassembler to generate a handler.
542 MacroAssembler _masm(&cbuf);
543 address base = __ start_a_stub(size_exception_handler());
544 if (base == NULL) return 0; // CodeBuffer::expand failed
545 int offset = __ offset();
546 __ jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
547 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
548 __ end_a_stub();
549 return offset;
550 */
551 // Note that the code buffer's insts_mark is always relative to insts.
552 // That's why we must use the macroassembler to generate a handler.
553 MacroAssembler _masm(&cbuf);
554 address base =
555 __ start_a_stub(size_exception_handler());
556 if (base == NULL) return 0; // CodeBuffer::expand failed
557 int offset = __ offset();
559 __ block_comment("; emit_exception_handler");
561 /* 2012/9/25 FIXME Jin: According to X86, we should use direct jumpt.
562 * * However, this will trigger an assert after the 40th method:
563 * *
564 * * 39 b java.lang.Throwable::<init> (25 bytes)
565 * * --- ns java.lang.Throwable::fillInStackTrace
566 * * 40 !b java.net.URLClassLoader::findClass (29 bytes)
567 * * /vm/opto/runtime.cpp, 900 , assert(caller.is_compiled_frame(),"must be")
568 * * 40 made not entrant (2) java.net.URLClassLoader::findClass (29 bytes)
569 * *
570 * * If we change from JR to JALR, the assert will disappear, but WebClient will
571 * * fail after the 403th method with unknown reason.
572 * */
573 __ li48(T9, (long)OptoRuntime::exception_blob()->entry_point());
574 __ jr(T9);
575 __ delayed()->nop();
576 __ align(16);
577 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
578 __ end_a_stub();
579 return offset;
580 }
582 // Emit deopt handler code.
583 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) {
584 // Note that the code buffer's insts_mark is always relative to insts.
585 // That's why we must use the macroassembler to generate a handler.
586 MacroAssembler _masm(&cbuf);
587 address base =
588 __ start_a_stub(size_deopt_handler());
590 // FIXME
591 if (base == NULL) return 0; // CodeBuffer::expand failed
592 int offset = __ offset();
594 __ block_comment("; emit_deopt_handler");
596 cbuf.set_insts_mark();
597 __ relocate(relocInfo::runtime_call_type);
599 __ li48(T9, (long)SharedRuntime::deopt_blob()->unpack());
600 __ jalr(T9);
601 __ delayed()->nop();
602 __ align(16);
603 assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
604 __ end_a_stub();
605 return offset;
606 }
609 const bool Matcher::match_rule_supported(int opcode) {
610 if (!has_match_rule(opcode))
611 return false;
613 switch (opcode) {
614 //Op_CountLeadingZerosI Op_CountLeadingZerosL can be deleted, all MIPS CPUs support clz & dclz.
615 case Op_CountLeadingZerosI:
616 case Op_CountLeadingZerosL:
617 if (!UseCountLeadingZerosInstruction)
618 return false;
619 break;
620 case Op_CountTrailingZerosI:
621 case Op_CountTrailingZerosL:
622 if (!UseCountTrailingZerosInstruction)
623 return false;
624 break;
625 }
627 return true; // Per default match rules are supported.
628 }
630 //FIXME
631 // emit call stub, compiled java to interpreter
632 void emit_java_to_interp(CodeBuffer &cbuf ) {
633 // Stub is fixed up when the corresponding call is converted from calling
634 // compiled code to calling interpreted code.
635 // mov rbx,0
636 // jmp -1
638 address mark = cbuf.insts_mark(); // get mark within main instrs section
640 // Note that the code buffer's insts_mark is always relative to insts.
641 // That's why we must use the macroassembler to generate a stub.
642 MacroAssembler _masm(&cbuf);
644 address base =
645 __ start_a_stub(Compile::MAX_stubs_size);
646 if (base == NULL) return; // CodeBuffer::expand failed
647 // static stub relocation stores the instruction address of the call
649 __ relocate(static_stub_Relocation::spec(mark), 0);
651 /* 2012/10/29 Jin: Rmethod contains methodOop, it should be relocated for GC */
652 /*
653 int oop_index = __ oop_recorder()->allocate_index(NULL);
654 RelocationHolder rspec = oop_Relocation::spec(oop_index);
655 __ relocate(rspec);
656 */
658 // static stub relocation also tags the methodOop in the code-stream.
659 __ li48(S3, (long)0);
660 // This is recognized as unresolved by relocs/nativeInst/ic code
662 __ relocate(relocInfo::runtime_call_type);
664 cbuf.set_insts_mark();
665 address call_pc = (address)-1;
666 __ li48(AT, (long)call_pc);
667 __ jr(AT);
668 __ nop();
669 __ align(16);
670 __ end_a_stub();
671 // Update current stubs pointer and restore code_end.
672 }
674 // size of call stub, compiled java to interpretor
675 uint size_java_to_interp() {
676 int size = 4 * 4 + NativeCall::instruction_size; // sizeof(li48) + NativeCall::instruction_size
677 return round_to(size, 16);
678 }
680 // relocation entries for call stub, compiled java to interpreter
681 uint reloc_java_to_interp() {
682 return 16; // in emit_java_to_interp + in Java_Static_Call
683 }
685 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
686 if( Assembler::is_simm16(offset) ) return true;
687 else
688 {
689 assert(false, "Not implemented yet !" );
690 Unimplemented();
691 }
692 }
695 // No additional cost for CMOVL.
696 const int Matcher::long_cmove_cost() { return 0; }
698 // No CMOVF/CMOVD with SSE2
699 const int Matcher::float_cmove_cost() { return ConditionalMoveLimit; }
701 // Does the CPU require late expand (see block.cpp for description of late expand)?
702 const bool Matcher::require_postalloc_expand = false;
704 // Should the Matcher clone shifts on addressing modes, expecting them
705 // to be subsumed into complex addressing expressions or compute them
706 // into registers? True for Intel but false for most RISCs
707 const bool Matcher::clone_shift_expressions = false;
709 // Do we need to mask the count passed to shift instructions or does
710 // the cpu only look at the lower 5/6 bits anyway?
711 const bool Matcher::need_masked_shift_count = false;
713 bool Matcher::narrow_oop_use_complex_address() {
714 NOT_LP64(ShouldNotCallThis());
715 assert(UseCompressedOops, "only for compressed oops code");
716 return false;
717 }
719 bool Matcher::narrow_klass_use_complex_address() {
720 NOT_LP64(ShouldNotCallThis());
721 assert(UseCompressedClassPointers, "only for compressed klass code");
722 return false;
723 }
725 // This is UltraSparc specific, true just means we have fast l2f conversion
726 const bool Matcher::convL2FSupported(void) {
727 return true;
728 }
730 // Max vector size in bytes. 0 if not supported.
731 const int Matcher::vector_width_in_bytes(BasicType bt) {
732 assert(MaxVectorSize == 8, "");
733 return 8;
734 }
736 // Vector ideal reg
737 const int Matcher::vector_ideal_reg(int size) {
738 assert(MaxVectorSize == 8, "");
739 switch(size) {
740 case 8: return Op_VecD;
741 }
742 ShouldNotReachHere();
743 return 0;
744 }
746 // Only lowest bits of xmm reg are used for vector shift count.
747 const int Matcher::vector_shift_count_ideal_reg(int size) {
748 fatal("vector shift is not supported");
749 return Node::NotAMachineReg;
750 }
752 // Limits on vector size (number of elements) loaded into vector.
753 const int Matcher::max_vector_size(const BasicType bt) {
754 assert(is_java_primitive(bt), "only primitive type vectors");
755 return vector_width_in_bytes(bt)/type2aelembytes(bt);
756 }
758 const int Matcher::min_vector_size(const BasicType bt) {
759 return max_vector_size(bt); // Same as max.
760 }
762 // MIPS supports misaligned vectors store/load? FIXME
763 const bool Matcher::misaligned_vectors_ok() {
764 return false;
765 //return !AlignVector; // can be changed by flag
766 }
768 // Register for DIVI projection of divmodI
769 RegMask Matcher::divI_proj_mask() {
770 ShouldNotReachHere();
771 return RegMask();
772 }
774 // Register for MODI projection of divmodI
775 RegMask Matcher::modI_proj_mask() {
776 ShouldNotReachHere();
777 return RegMask();
778 }
780 // Register for DIVL projection of divmodL
781 RegMask Matcher::divL_proj_mask() {
782 ShouldNotReachHere();
783 return RegMask();
784 }
786 int Matcher::regnum_to_fpu_offset(int regnum) {
787 return regnum - 32; // The FP registers are in the second chunk
788 }
791 const bool Matcher::isSimpleConstant64(jlong value) {
792 // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
793 return true;
794 }
797 // Return whether or not this register is ever used as an argument. This
798 // function is used on startup to build the trampoline stubs in generateOptoStub.
799 // Registers not mentioned will be killed by the VM call in the trampoline, and
800 // arguments in those registers not be available to the callee.
801 bool Matcher::can_be_java_arg( int reg ) {
802 /* Refer to: [sharedRuntime_mips_64.cpp] SharedRuntime::java_calling_convention() */
803 if ( reg == T0_num || reg == T0_H_num
804 || reg == A0_num || reg == A0_H_num
805 || reg == A1_num || reg == A1_H_num
806 || reg == A2_num || reg == A2_H_num
807 || reg == A3_num || reg == A3_H_num
808 || reg == A4_num || reg == A4_H_num
809 || reg == A5_num || reg == A5_H_num
810 || reg == A6_num || reg == A6_H_num
811 || reg == A7_num || reg == A7_H_num )
812 return true;
814 if ( reg == F12_num || reg == F12_H_num
815 || reg == F13_num || reg == F13_H_num
816 || reg == F14_num || reg == F14_H_num
817 || reg == F15_num || reg == F15_H_num
818 || reg == F16_num || reg == F16_H_num
819 || reg == F17_num || reg == F17_H_num
820 || reg == F18_num || reg == F18_H_num
821 || reg == F19_num || reg == F19_H_num )
822 return true;
824 return false;
825 }
827 bool Matcher::is_spillable_arg( int reg ) {
828 return can_be_java_arg(reg);
829 }
831 bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) {
832 return false;
833 }
835 // Register for MODL projection of divmodL
836 RegMask Matcher::modL_proj_mask() {
837 ShouldNotReachHere();
838 return RegMask();
839 }
841 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
842 return FP_REG_mask();
843 }
845 // MIPS doesn't support AES intrinsics
846 const bool Matcher::pass_original_key_for_aes() {
847 return false;
848 }
850 // The address of the call instruction needs to be 16-byte aligned to
851 // ensure that it does not span a cache line so that it can be patched.
853 int CallStaticJavaDirectNode::compute_padding(int current_offset) const {
854 //lui
855 //ori
856 //dsll
857 //ori
859 //jalr
860 //nop
862 return round_to(current_offset, alignment_required()) - current_offset;
863 }
865 // The address of the call instruction needs to be 16-byte aligned to
866 // ensure that it does not span a cache line so that it can be patched.
867 int CallDynamicJavaDirectNode::compute_padding(int current_offset) const {
868 //li64 <--- skip
870 //lui
871 //ori
872 //dsll
873 //ori
875 //jalr
876 //nop
878 current_offset += 4 * 6; // skip li64
879 return round_to(current_offset, alignment_required()) - current_offset;
880 }
882 int CallLeafNoFPDirectNode::compute_padding(int current_offset) const {
883 //lui
884 //ori
885 //dsll
886 //ori
888 //jalr
889 //nop
891 return round_to(current_offset, alignment_required()) - current_offset;
892 }
894 int CallLeafDirectNode::compute_padding(int current_offset) const {
895 //lui
896 //ori
897 //dsll
898 //ori
900 //jalr
901 //nop
903 return round_to(current_offset, alignment_required()) - current_offset;
904 }
906 int CallRuntimeDirectNode::compute_padding(int current_offset) const {
907 //lui
908 //ori
909 //dsll
910 //ori
912 //jalr
913 //nop
915 return round_to(current_offset, alignment_required()) - current_offset;
916 }
918 // If CPU can load and store mis-aligned doubles directly then no fixup is
919 // needed. Else we split the double into 2 integer pieces and move it
920 // piece-by-piece. Only happens when passing doubles into C code as the
921 // Java calling convention forces doubles to be aligned.
922 const bool Matcher::misaligned_doubles_ok = false;
923 // Do floats take an entire double register or just half?
924 //const bool Matcher::float_in_double = true;
925 bool Matcher::float_in_double() { return false; }
926 // Threshold size for cleararray.
927 const int Matcher::init_array_short_size = 8 * BytesPerLong;
928 // Do ints take an entire long register or just half?
929 const bool Matcher::int_in_long = true;
930 // Is it better to copy float constants, or load them directly from memory?
931 // Intel can load a float constant from a direct address, requiring no
932 // extra registers. Most RISCs will have to materialize an address into a
933 // register first, so they would do better to copy the constant from stack.
934 const bool Matcher::rematerialize_float_constants = false;
935 // Advertise here if the CPU requires explicit rounding operations
936 // to implement the UseStrictFP mode.
937 const bool Matcher::strict_fp_requires_explicit_rounding = false;
938 // The ecx parameter to rep stos for the ClearArray node is in dwords.
939 const bool Matcher::init_array_count_is_in_bytes = false;
942 // Indicate if the safepoint node needs the polling page as an input.
943 // Since MIPS doesn't have absolute addressing, it needs.
944 bool SafePointNode::needs_polling_address_input() {
945 return true;
946 }
948 // !!!!! Special hack to get all type of calls to specify the byte offset
949 // from the start of the call to the point where the return address
950 // will point.
951 int MachCallStaticJavaNode::ret_addr_offset() {
952 assert(NativeCall::instruction_size == 24, "in MachCallStaticJavaNode::ret_addr_offset");
953 //The value ought to be 16 bytes.
954 //lui
955 //ori
956 //dsll
957 //ori
958 //jalr
959 //nop
960 return NativeCall::instruction_size;
961 }
963 int MachCallDynamicJavaNode::ret_addr_offset() {
964 /* 2012/9/10 Jin: must be kept in sync with Java_Dynamic_Call */
966 // return NativeCall::instruction_size;
967 assert(NativeCall::instruction_size == 24, "in MachCallDynamicJavaNode::ret_addr_offset");
968 //The value ought to be 4 + 16 bytes.
969 //lui IC_Klass,
970 //ori IC_Klass,
971 //dsll IC_Klass
972 //ori IC_Klass
973 //lui T9
974 //ori T9
975 //dsll T9
976 //ori T9
977 //jalr T9
978 //nop
979 return 6 * 4 + NativeCall::instruction_size;
981 }
983 //=============================================================================
985 // Figure out which register class each belongs in: rc_int, rc_float, rc_stack
986 enum RC { rc_bad, rc_int, rc_float, rc_stack };
987 static enum RC rc_class( OptoReg::Name reg ) {
988 if( !OptoReg::is_valid(reg) ) return rc_bad;
989 if (OptoReg::is_stack(reg)) return rc_stack;
990 VMReg r = OptoReg::as_VMReg(reg);
991 if (r->is_Register()) return rc_int;
992 assert(r->is_FloatRegister(), "must be");
993 return rc_float;
994 }
996 uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream* st ) const {
997 // Get registers to move
998 OptoReg::Name src_second = ra_->get_reg_second(in(1));
999 OptoReg::Name src_first = ra_->get_reg_first(in(1));
1000 OptoReg::Name dst_second = ra_->get_reg_second(this );
1001 OptoReg::Name dst_first = ra_->get_reg_first(this );
1003 enum RC src_second_rc = rc_class(src_second);
1004 enum RC src_first_rc = rc_class(src_first);
1005 enum RC dst_second_rc = rc_class(dst_second);
1006 enum RC dst_first_rc = rc_class(dst_first);
1008 assert(OptoReg::is_valid(src_first) && OptoReg::is_valid(dst_first), "must move at least 1 register" );
1010 // Generate spill code!
1011 int size = 0;
1013 if( src_first == dst_first && src_second == dst_second )
1014 return 0; // Self copy, no move
1016 if (src_first_rc == rc_stack) {
1017 // mem ->
1018 if (dst_first_rc == rc_stack) {
1019 // mem -> mem
1020 assert(src_second != dst_first, "overlap");
1021 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1022 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1023 // 64-bit
1024 int src_offset = ra_->reg2offset(src_first);
1025 int dst_offset = ra_->reg2offset(dst_first);
1026 if (cbuf) {
1027 MacroAssembler _masm(cbuf);
1028 __ ld(AT, Address(SP, src_offset));
1029 __ sd(AT, Address(SP, dst_offset));
1030 #ifndef PRODUCT
1031 } else {
1032 if(!do_size){
1033 if (size != 0) st->print("\n\t");
1034 st->print("ld AT, [SP + #%d]\t# 64-bit mem-mem spill 1\n\t"
1035 "sd AT, [SP + #%d]",
1036 src_offset, dst_offset);
1037 }
1038 #endif
1039 }
1040 size += 8;
1041 } else {
1042 // 32-bit
1043 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1044 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1045 // No pushl/popl, so:
1046 int src_offset = ra_->reg2offset(src_first);
1047 int dst_offset = ra_->reg2offset(dst_first);
1048 if (cbuf) {
1049 MacroAssembler _masm(cbuf);
1050 __ lw(AT, Address(SP, src_offset));
1051 __ sw(AT, Address(SP, dst_offset));
1052 #ifndef PRODUCT
1053 } else {
1054 if(!do_size){
1055 if (size != 0) st->print("\n\t");
1056 st->print("lw AT, [SP + #%d] spill 2\n\t"
1057 "sw AT, [SP + #%d]\n\t",
1058 src_offset, dst_offset);
1059 }
1060 #endif
1061 }
1062 size += 8;
1063 }
1064 return size;
1065 } else if (dst_first_rc == rc_int) {
1066 // mem -> gpr
1067 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1068 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1069 // 64-bit
1070 int offset = ra_->reg2offset(src_first);
1071 if (cbuf) {
1072 MacroAssembler _masm(cbuf);
1073 __ ld(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1074 #ifndef PRODUCT
1075 } else {
1076 if(!do_size){
1077 if (size != 0) st->print("\n\t");
1078 st->print("ld %s, [SP + #%d]\t# spill 3",
1079 Matcher::regName[dst_first],
1080 offset);
1081 }
1082 #endif
1083 }
1084 size += 4;
1085 } else {
1086 // 32-bit
1087 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1088 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1089 int offset = ra_->reg2offset(src_first);
1090 if (cbuf) {
1091 MacroAssembler _masm(cbuf);
1092 if (this->ideal_reg() == Op_RegI)
1093 __ lw(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1094 else
1095 __ lwu(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1096 #ifndef PRODUCT
1097 } else {
1098 if(!do_size){
1099 if (size != 0) st->print("\n\t");
1100 if (this->ideal_reg() == Op_RegI)
1101 st->print("lw %s, [SP + #%d]\t# spill 4",
1102 Matcher::regName[dst_first],
1103 offset);
1104 else
1105 st->print("lwu %s, [SP + #%d]\t# spill 5",
1106 Matcher::regName[dst_first],
1107 offset);
1108 }
1109 #endif
1110 }
1111 size += 4;
1112 }
1113 return size;
1114 } else if (dst_first_rc == rc_float) {
1115 // mem-> xmm
1116 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1117 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1118 // 64-bit
1119 int offset = ra_->reg2offset(src_first);
1120 if (cbuf) {
1121 MacroAssembler _masm(cbuf);
1122 __ ldc1( as_FloatRegister(Matcher::_regEncode[dst_first]), Address(SP, offset));
1123 #ifndef PRODUCT
1124 } else {
1125 if(!do_size){
1126 if (size != 0) st->print("\n\t");
1127 st->print("ldc1 %s, [SP + #%d]\t# spill 6",
1128 Matcher::regName[dst_first],
1129 offset);
1130 }
1131 #endif
1132 }
1133 size += 4;
1134 } else {
1135 // 32-bit
1136 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1137 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1138 int offset = ra_->reg2offset(src_first);
1139 if (cbuf) {
1140 MacroAssembler _masm(cbuf);
1141 __ lwc1( as_FloatRegister(Matcher::_regEncode[dst_first]), Address(SP, offset));
1142 #ifndef PRODUCT
1143 } else {
1144 if(!do_size){
1145 if (size != 0) st->print("\n\t");
1146 st->print("lwc1 %s, [SP + #%d]\t# spill 7",
1147 Matcher::regName[dst_first],
1148 offset);
1149 }
1150 #endif
1151 }
1152 size += 4;
1153 }
1154 return size;
1155 }
1156 } else if (src_first_rc == rc_int) {
1157 // gpr ->
1158 if (dst_first_rc == rc_stack) {
1159 // gpr -> mem
1160 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1161 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1162 // 64-bit
1163 int offset = ra_->reg2offset(dst_first);
1164 if (cbuf) {
1165 MacroAssembler _masm(cbuf);
1166 __ sd(as_Register(Matcher::_regEncode[src_first]), Address(SP, offset));
1167 #ifndef PRODUCT
1168 } else {
1169 if(!do_size){
1170 if (size != 0) st->print("\n\t");
1171 st->print("sd %s, [SP + #%d] # spill 8",
1172 Matcher::regName[src_first],
1173 offset);
1174 }
1175 #endif
1176 }
1177 size += 4;
1178 } else {
1179 // 32-bit
1180 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1181 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1182 int offset = ra_->reg2offset(dst_first);
1183 if (cbuf) {
1184 MacroAssembler _masm(cbuf);
1185 __ sw(as_Register(Matcher::_regEncode[src_first]), Address(SP, offset));
1186 #ifndef PRODUCT
1187 } else {
1188 if(!do_size){
1189 if (size != 0) st->print("\n\t");
1190 st->print("sw %s, [SP + #%d]\t# spill 9",
1191 Matcher::regName[src_first], offset);
1192 }
1193 #endif
1194 }
1195 size += 4;
1196 }
1197 return size;
1198 } else if (dst_first_rc == rc_int) {
1199 // gpr -> gpr
1200 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1201 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1202 // 64-bit
1203 if (cbuf) {
1204 MacroAssembler _masm(cbuf);
1205 __ move(as_Register(Matcher::_regEncode[dst_first]),
1206 as_Register(Matcher::_regEncode[src_first]));
1207 #ifndef PRODUCT
1208 } else {
1209 if(!do_size){
1210 if (size != 0) st->print("\n\t");
1211 st->print("move(64bit) %s <-- %s\t# spill 10",
1212 Matcher::regName[dst_first],
1213 Matcher::regName[src_first]);
1214 }
1215 #endif
1216 }
1217 size += 4;
1218 return size;
1219 } else {
1220 // 32-bit
1221 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1222 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1223 if (cbuf) {
1224 MacroAssembler _masm(cbuf);
1225 if (this->ideal_reg() == Op_RegI)
1226 __ move_u32(as_Register(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first]));
1227 else
1228 __ daddu(as_Register(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first]), R0);
1230 #ifndef PRODUCT
1231 } else {
1232 if(!do_size){
1233 if (size != 0) st->print("\n\t");
1234 st->print("move(32-bit) %s <-- %s\t# spill 11",
1235 Matcher::regName[dst_first],
1236 Matcher::regName[src_first]);
1237 }
1238 #endif
1239 }
1240 size += 4;
1241 return size;
1242 }
1243 } else if (dst_first_rc == rc_float) {
1244 // gpr -> xmm
1245 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1246 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1247 // 64-bit
1248 if (cbuf) {
1249 MacroAssembler _masm(cbuf);
1250 __ dmtc1(as_Register(Matcher::_regEncode[src_first]), as_FloatRegister(Matcher::_regEncode[dst_first]));
1251 #ifndef PRODUCT
1252 } else {
1253 if(!do_size){
1254 if (size != 0) st->print("\n\t");
1255 st->print("dmtc1 %s, %s\t# spill 12",
1256 Matcher::regName[dst_first],
1257 Matcher::regName[src_first]);
1258 }
1259 #endif
1260 }
1261 size += 4;
1262 } else {
1263 // 32-bit
1264 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1265 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1266 if (cbuf) {
1267 MacroAssembler _masm(cbuf);
1268 __ mtc1( as_Register(Matcher::_regEncode[src_first]), as_FloatRegister(Matcher::_regEncode[dst_first]) );
1269 #ifndef PRODUCT
1270 } else {
1271 if(!do_size){
1272 if (size != 0) st->print("\n\t");
1273 st->print("mtc1 %s, %s\t# spill 13",
1274 Matcher::regName[dst_first],
1275 Matcher::regName[src_first]);
1276 }
1277 #endif
1278 }
1279 size += 4;
1280 }
1281 return size;
1282 }
1283 } else if (src_first_rc == rc_float) {
1284 // xmm ->
1285 if (dst_first_rc == rc_stack) {
1286 // xmm -> mem
1287 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1288 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1289 // 64-bit
1290 int offset = ra_->reg2offset(dst_first);
1291 if (cbuf) {
1292 MacroAssembler _masm(cbuf);
1293 __ sdc1( as_FloatRegister(Matcher::_regEncode[src_first]), Address(SP, offset) );
1294 #ifndef PRODUCT
1295 } else {
1296 if(!do_size){
1297 if (size != 0) st->print("\n\t");
1298 st->print("sdc1 %s, [SP + #%d]\t# spill 14",
1299 Matcher::regName[src_first],
1300 offset);
1301 }
1302 #endif
1303 }
1304 size += 4;
1305 } else {
1306 // 32-bit
1307 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1308 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1309 int offset = ra_->reg2offset(dst_first);
1310 if (cbuf) {
1311 MacroAssembler _masm(cbuf);
1312 __ swc1(as_FloatRegister(Matcher::_regEncode[src_first]), Address(SP, offset));
1313 #ifndef PRODUCT
1314 } else {
1315 if(!do_size){
1316 if (size != 0) st->print("\n\t");
1317 st->print("swc1 %s, [SP + #%d]\t# spill 15",
1318 Matcher::regName[src_first],
1319 offset);
1320 }
1321 #endif
1322 }
1323 size += 4;
1324 }
1325 return size;
1326 } else if (dst_first_rc == rc_int) {
1327 // xmm -> gpr
1328 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1329 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1330 // 64-bit
1331 if (cbuf) {
1332 MacroAssembler _masm(cbuf);
1333 __ dmfc1( as_Register(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1334 #ifndef PRODUCT
1335 } else {
1336 if(!do_size){
1337 if (size != 0) st->print("\n\t");
1338 st->print("dmfc1 %s, %s\t# spill 16",
1339 Matcher::regName[dst_first],
1340 Matcher::regName[src_first]);
1341 }
1342 #endif
1343 }
1344 size += 4;
1345 } else {
1346 // 32-bit
1347 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1348 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1349 if (cbuf) {
1350 MacroAssembler _masm(cbuf);
1351 __ mfc1( as_Register(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1352 #ifndef PRODUCT
1353 } else {
1354 if(!do_size){
1355 if (size != 0) st->print("\n\t");
1356 st->print("mfc1 %s, %s\t# spill 17",
1357 Matcher::regName[dst_first],
1358 Matcher::regName[src_first]);
1359 }
1360 #endif
1361 }
1362 size += 4;
1363 }
1364 return size;
1365 } else if (dst_first_rc == rc_float) {
1366 // xmm -> xmm
1367 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1368 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1369 // 64-bit
1370 if (cbuf) {
1371 MacroAssembler _masm(cbuf);
1372 __ mov_d( as_FloatRegister(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1373 #ifndef PRODUCT
1374 } else {
1375 if(!do_size){
1376 if (size != 0) st->print("\n\t");
1377 st->print("mov_d %s <-- %s\t# spill 18",
1378 Matcher::regName[dst_first],
1379 Matcher::regName[src_first]);
1380 }
1381 #endif
1382 }
1383 size += 4;
1384 } else {
1385 // 32-bit
1386 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1387 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1388 if (cbuf) {
1389 MacroAssembler _masm(cbuf);
1390 __ mov_s( as_FloatRegister(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1391 #ifndef PRODUCT
1392 } else {
1393 if(!do_size){
1394 if (size != 0) st->print("\n\t");
1395 st->print("mov_s %s <-- %s\t# spill 19",
1396 Matcher::regName[dst_first],
1397 Matcher::regName[src_first]);
1398 }
1399 #endif
1400 }
1401 size += 4;
1402 }
1403 return size;
1404 }
1405 }
1407 assert(0," foo ");
1408 Unimplemented();
1409 return size;
1411 }
1413 #ifndef PRODUCT
1414 void MachSpillCopyNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1415 implementation( NULL, ra_, false, st );
1416 }
1417 #endif
1419 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1420 implementation( &cbuf, ra_, false, NULL );
1421 }
1423 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
1424 return implementation( NULL, ra_, true, NULL );
1425 }
1427 //=============================================================================
1428 #
1430 #ifndef PRODUCT
1431 void MachBreakpointNode::format( PhaseRegAlloc *, outputStream* st ) const {
1432 st->print("INT3");
1433 }
1434 #endif
1436 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc* ra_) const {
1437 MacroAssembler _masm(&cbuf);
1438 __ int3();
1439 }
1441 uint MachBreakpointNode::size(PhaseRegAlloc* ra_) const {
1442 return MachNode::size(ra_);
1443 }
1446 //=============================================================================
1447 #ifndef PRODUCT
1448 void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1449 Compile *C = ra_->C;
1450 int framesize = C->frame_size_in_bytes();
1452 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1454 st->print("daddiu SP, SP, %d # Rlease stack @ MachEpilogNode",framesize);
1455 st->cr(); st->print("\t");
1456 if (UseLoongsonISA) {
1457 st->print("gslq RA, FP, SP, %d # Restore FP & RA @ MachEpilogNode", -wordSize*2);
1458 } else {
1459 st->print("ld RA, SP, %d # Restore RA @ MachEpilogNode", -wordSize);
1460 st->cr(); st->print("\t");
1461 st->print("ld FP, SP, %d # Restore FP @ MachEpilogNode", -wordSize*2);
1462 }
1464 if( do_polling() && C->is_method_compilation() ) {
1465 st->print("Poll Safepoint # MachEpilogNode");
1466 }
1467 }
1468 #endif
1470 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1471 Compile *C = ra_->C;
1472 MacroAssembler _masm(&cbuf);
1473 int framesize = C->frame_size_in_bytes();
1475 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1477 __ daddiu(SP, SP, framesize);
1479 if (UseLoongsonISA) {
1480 __ gslq(RA, FP, SP, -wordSize*2);
1481 } else {
1482 __ ld(RA, SP, -wordSize );
1483 __ ld(FP, SP, -wordSize*2 );
1484 }
1486 /* 2012/11/19 Jin: The epilog in a RuntimeStub should not contain a safepoint */
1487 if( do_polling() && C->is_method_compilation() ) {
1488 #ifndef OPT_SAFEPOINT
1489 __ set64(AT, (long)os::get_polling_page());
1490 __ relocate(relocInfo::poll_return_type);
1491 __ lw(AT, AT, 0);
1492 #else
1493 __ lui(AT, Assembler::split_high((intptr_t)os::get_polling_page()));
1494 __ relocate(relocInfo::poll_return_type);
1495 __ lw(AT, AT, Assembler::split_low((intptr_t)os::get_polling_page()));
1496 #endif
1497 }
1498 }
1500 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
1501 return MachNode::size(ra_); // too many variables; just compute it the hard way fujie debug
1502 }
1504 int MachEpilogNode::reloc() const {
1505 return 0; // a large enough number
1506 }
1508 const Pipeline * MachEpilogNode::pipeline() const {
1509 return MachNode::pipeline_class();
1510 }
1512 int MachEpilogNode::safepoint_offset() const { return 0; }
1514 //=============================================================================
1516 #ifndef PRODUCT
1517 void BoxLockNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1518 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1519 int reg = ra_->get_reg_first(this);
1520 st->print("ADDI %s, SP, %d @BoxLockNode",Matcher::regName[reg],offset);
1521 }
1522 #endif
1525 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
1526 return 4;
1527 }
1529 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1530 MacroAssembler _masm(&cbuf);
1531 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1532 int reg = ra_->get_encode(this);
1534 __ addi(as_Register(reg), SP, offset);
1535 /*
1536 if( offset >= 128 ) {
1537 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
1538 emit_rm(cbuf, 0x2, reg, 0x04);
1539 emit_rm(cbuf, 0x0, 0x04, SP_enc);
1540 emit_d32(cbuf, offset);
1541 }
1542 else {
1543 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
1544 emit_rm(cbuf, 0x1, reg, 0x04);
1545 emit_rm(cbuf, 0x0, 0x04, SP_enc);
1546 emit_d8(cbuf, offset);
1547 }
1548 */
1549 }
1552 //static int sizeof_FFree_Float_Stack_All = -1;
1554 int MachCallRuntimeNode::ret_addr_offset() {
1555 //lui
1556 //ori
1557 //dsll
1558 //ori
1559 //jalr
1560 //nop
1561 assert(NativeCall::instruction_size == 24, "in MachCallRuntimeNode::ret_addr_offset()");
1562 return NativeCall::instruction_size;
1563 // return 16;
1564 }
1570 //=============================================================================
1571 #ifndef PRODUCT
1572 void MachNopNode::format( PhaseRegAlloc *, outputStream* st ) const {
1573 st->print("NOP \t# %d bytes pad for loops and calls", 4 * _count);
1574 }
1575 #endif
1577 void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc * ) const {
1578 MacroAssembler _masm(&cbuf);
1579 int i = 0;
1580 for(i = 0; i < _count; i++)
1581 __ nop();
1582 }
1584 uint MachNopNode::size(PhaseRegAlloc *) const {
1585 return 4 * _count;
1586 }
1587 const Pipeline* MachNopNode::pipeline() const {
1588 return MachNode::pipeline_class();
1589 }
1591 //=============================================================================
1593 //=============================================================================
1594 #ifndef PRODUCT
1595 void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1596 st->print_cr("load_klass(AT, T0)");
1597 st->print_cr("\tbeq(AT, iCache, L)");
1598 st->print_cr("\tnop");
1599 st->print_cr("\tjmp(SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type)");
1600 st->print_cr("\tnop");
1601 st->print_cr("\tnop");
1602 st->print_cr(" L:");
1603 }
1604 #endif
1607 void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1608 MacroAssembler _masm(&cbuf);
1609 #ifdef ASSERT
1610 //uint code_size = cbuf.code_size();
1611 #endif
1612 int ic_reg = Matcher::inline_cache_reg_encode();
1613 Label L;
1614 Register receiver = T0;
1615 Register iCache = as_Register(ic_reg);
1616 __ load_klass(AT, receiver);
1617 __ beq(AT, iCache, L);
1618 __ nop();
1620 __ relocate(relocInfo::runtime_call_type);
1621 __ li48(T9, (long)SharedRuntime::get_ic_miss_stub());
1622 __ jr(T9);
1623 __ nop();
1625 /* WARNING these NOPs are critical so that verified entry point is properly
1626 * 8 bytes aligned for patching by NativeJump::patch_verified_entry() */
1627 __ align(CodeEntryAlignment);
1628 __ bind(L);
1629 }
1631 uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
1632 return MachNode::size(ra_);
1633 }
1637 //=============================================================================
1639 const RegMask& MachConstantBaseNode::_out_RegMask = P_REG_mask();
1641 int Compile::ConstantTable::calculate_table_base_offset() const {
1642 return 0; // absolute addressing, no offset
1643 }
1645 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
1646 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
1647 ShouldNotReachHere();
1648 }
1650 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
1651 Compile* C = ra_->C;
1652 Compile::ConstantTable& constant_table = C->constant_table();
1653 MacroAssembler _masm(&cbuf);
1655 Register Rtoc = as_Register(ra_->get_encode(this));
1656 CodeSection* consts_section = __ code()->consts();
1657 int consts_size = consts_section->align_at_start(consts_section->size());
1658 assert(constant_table.size() == consts_size, "must be equal");
1660 if (consts_section->size()) {
1661 // Materialize the constant table base.
1662 address baseaddr = consts_section->start() + -(constant_table.table_base_offset());
1663 // RelocationHolder rspec = internal_word_Relocation::spec(baseaddr);
1664 __ relocate(relocInfo::internal_pc_type);
1665 __ li48(Rtoc, (long)baseaddr);
1666 }
1667 }
1669 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
1670 // li48 (4 insts)
1671 return 4 * 4;
1672 }
1674 #ifndef PRODUCT
1675 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
1676 Register r = as_Register(ra_->get_encode(this));
1677 st->print("li48 %s, &constanttable (constant table base) @ MachConstantBaseNode", r->name());
1678 }
1679 #endif
1682 //=============================================================================
1683 #ifndef PRODUCT
1684 void MachPrologNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1685 Compile* C = ra_->C;
1687 int framesize = C->frame_size_in_bytes();
1688 int bangsize = C->bang_size_in_bytes();
1689 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1691 // Calls to C2R adapters often do not accept exceptional returns.
1692 // We require that their callers must bang for them. But be careful, because
1693 // some VM calls (such as call site linkage) can use several kilobytes of
1694 // stack. But the stack safety zone should account for that.
1695 // See bugs 4446381, 4468289, 4497237.
1696 if (C->need_stack_bang(bangsize)) {
1697 st->print_cr("# stack bang"); st->print("\t");
1698 }
1699 if (UseLoongsonISA) {
1700 st->print("gssq RA, FP, %d(SP) @ MachPrologNode\n\t", -wordSize*2);
1701 } else {
1702 st->print("sd RA, %d(SP) @ MachPrologNode\n\t", -wordSize);
1703 st->print("sd FP, %d(SP) @ MachPrologNode\n\t", -wordSize*2);
1704 }
1705 st->print("daddiu FP, SP, -%d \n\t", wordSize*2);
1706 st->print("daddiu SP, SP, -%d \t",framesize);
1707 }
1708 #endif
1711 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1712 Compile* C = ra_->C;
1713 MacroAssembler _masm(&cbuf);
1715 int framesize = C->frame_size_in_bytes();
1716 int bangsize = C->bang_size_in_bytes();
1718 // __ verified_entry(framesize, C->need_stack_bang(bangsize)?bangsize:0, false);
1720 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1722 if (C->need_stack_bang(framesize)) {
1723 __ generate_stack_overflow_check(framesize);
1724 }
1726 if (UseLoongsonISA) {
1727 __ gssq(RA, FP, SP, -wordSize*2);
1728 } else {
1729 __ sd(RA, SP, -wordSize);
1730 __ sd(FP, SP, -wordSize*2);
1731 }
1732 __ daddiu(FP, SP, -wordSize*2);
1733 __ daddiu(SP, SP, -framesize);
1734 __ nop(); /* 2013.10.22 Jin: Make enough room for patch_verified_entry() */
1735 __ nop();
1737 C->set_frame_complete(cbuf.insts_size());
1738 if (C->has_mach_constant_base_node()) {
1739 // NOTE: We set the table base offset here because users might be
1740 // emitted before MachConstantBaseNode.
1741 Compile::ConstantTable& constant_table = C->constant_table();
1742 constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
1743 }
1745 }
1748 uint MachPrologNode::size(PhaseRegAlloc *ra_) const {
1749 //fprintf(stderr, "\nPrologNode::size(ra_)= %d \n", MachNode::size(ra_));//fujie debug
1750 return MachNode::size(ra_); // too many variables; just compute it the hard way
1751 }
1753 int MachPrologNode::reloc() const {
1754 return 0; // a large enough number
1755 }
1757 %}
1759 //----------ENCODING BLOCK-----------------------------------------------------
1760 // This block specifies the encoding classes used by the compiler to output
1761 // byte streams. Encoding classes generate functions which are called by
1762 // Machine Instruction Nodes in order to generate the bit encoding of the
1763 // instruction. Operands specify their base encoding interface with the
1764 // interface keyword. There are currently supported four interfaces,
1765 // REG_INTER, CONST_INTER, MEMORY_INTER, & COND_INTER. REG_INTER causes an
1766 // operand to generate a function which returns its register number when
1767 // queried. CONST_INTER causes an operand to generate a function which
1768 // returns the value of the constant when queried. MEMORY_INTER causes an
1769 // operand to generate four functions which return the Base Register, the
1770 // Index Register, the Scale Value, and the Offset Value of the operand when
1771 // queried. COND_INTER causes an operand to generate six functions which
1772 // return the encoding code (ie - encoding bits for the instruction)
1773 // associated with each basic boolean condition for a conditional instruction.
1774 // Instructions specify two basic values for encoding. They use the
1775 // ins_encode keyword to specify their encoding class (which must be one of
1776 // the class names specified in the encoding block), and they use the
1777 // opcode keyword to specify, in order, their primary, secondary, and
1778 // tertiary opcode. Only the opcode sections which a particular instruction
1779 // needs for encoding need to be specified.
1780 encode %{
1781 /*
1782 Alias:
1783 1044 b java.io.ObjectInputStream::readHandle (130 bytes)
1784 118 B14: # B19 B15 <- B13 Freq: 0.899955
1785 118 add S1, S2, V0 #@addP_reg_reg
1786 11c lb S0, [S1 + #-8257524] #@loadB
1787 120 BReq S0, #3, B19 #@branchConI_reg_imm P=0.100000 C=-1.000000
1788 */
1789 //Load byte signed
1790 enc_class load_B_enc (mRegI dst, memory mem) %{
1791 MacroAssembler _masm(&cbuf);
1792 int dst = $dst$$reg;
1793 int base = $mem$$base;
1794 int index = $mem$$index;
1795 int scale = $mem$$scale;
1796 int disp = $mem$$disp;
1798 if( index != 0 ) {
1799 if( Assembler::is_simm16(disp) ) {
1800 if( UseLoongsonISA ) {
1801 if (scale == 0) {
1802 __ gslbx(as_Register(dst), as_Register(base), as_Register(index), disp);
1803 } else {
1804 __ dsll(AT, as_Register(index), scale);
1805 __ gslbx(as_Register(dst), as_Register(base), AT, disp);
1806 }
1807 } else {
1808 if (scale == 0) {
1809 __ addu(AT, as_Register(base), as_Register(index));
1810 } else {
1811 __ dsll(AT, as_Register(index), scale);
1812 __ addu(AT, as_Register(base), AT);
1813 }
1814 __ lb(as_Register(dst), AT, disp);
1815 }
1816 } else {
1817 if (scale == 0) {
1818 __ addu(AT, as_Register(base), as_Register(index));
1819 } else {
1820 __ dsll(AT, as_Register(index), scale);
1821 __ addu(AT, as_Register(base), AT);
1822 }
1823 __ move(T9, disp);
1824 if( UseLoongsonISA ) {
1825 __ gslbx(as_Register(dst), AT, T9, 0);
1826 } else {
1827 __ addu(AT, AT, T9);
1828 __ lb(as_Register(dst), AT, 0);
1829 }
1830 }
1831 } else {
1832 if( Assembler::is_simm16(disp) ) {
1833 __ lb(as_Register(dst), as_Register(base), disp);
1834 } else {
1835 __ move(T9, disp);
1836 if( UseLoongsonISA ) {
1837 __ gslbx(as_Register(dst), as_Register(base), T9, 0);
1838 } else {
1839 __ addu(AT, as_Register(base), T9);
1840 __ lb(as_Register(dst), AT, 0);
1841 }
1842 }
1843 }
1844 %}
1846 //Load byte unsigned
1847 enc_class load_UB_enc (mRegI dst, memory mem) %{
1848 MacroAssembler _masm(&cbuf);
1849 int dst = $dst$$reg;
1850 int base = $mem$$base;
1851 int index = $mem$$index;
1852 int scale = $mem$$scale;
1853 int disp = $mem$$disp;
1855 if( index != 0 ) {
1856 if (scale == 0) {
1857 __ daddu(AT, as_Register(base), as_Register(index));
1858 } else {
1859 __ dsll(AT, as_Register(index), scale);
1860 __ daddu(AT, as_Register(base), AT);
1861 }
1862 if( Assembler::is_simm16(disp) ) {
1863 __ lbu(as_Register(dst), AT, disp);
1864 } else {
1865 __ move(T9, disp);
1866 __ daddu(AT, AT, T9);
1867 __ lbu(as_Register(dst), AT, 0);
1868 }
1869 } else {
1870 if( Assembler::is_simm16(disp) ) {
1871 __ lbu(as_Register(dst), as_Register(base), disp);
1872 } else {
1873 __ move(T9, disp);
1874 __ daddu(AT, as_Register(base), T9);
1875 __ lbu(as_Register(dst), AT, 0);
1876 }
1877 }
1878 %}
1880 enc_class store_B_reg_enc (memory mem, mRegI src) %{
1881 MacroAssembler _masm(&cbuf);
1882 int src = $src$$reg;
1883 int base = $mem$$base;
1884 int index = $mem$$index;
1885 int scale = $mem$$scale;
1886 int disp = $mem$$disp;
1888 if( index != 0 ) {
1889 if (scale == 0) {
1890 if( Assembler::is_simm(disp, 8) ) {
1891 if (UseLoongsonISA) {
1892 __ gssbx(as_Register(src), as_Register(base), as_Register(index), disp);
1893 } else {
1894 __ addu(AT, as_Register(base), as_Register(index));
1895 __ sb(as_Register(src), AT, disp);
1896 }
1897 } else if( Assembler::is_simm16(disp) ) {
1898 __ addu(AT, as_Register(base), as_Register(index));
1899 __ sb(as_Register(src), AT, disp);
1900 } else {
1901 __ addu(AT, as_Register(base), as_Register(index));
1902 __ move(T9, disp);
1903 if (UseLoongsonISA) {
1904 __ gssbx(as_Register(src), AT, T9, 0);
1905 } else {
1906 __ addu(AT, AT, T9);
1907 __ sb(as_Register(src), AT, 0);
1908 }
1909 }
1910 } else {
1911 __ dsll(AT, as_Register(index), scale);
1912 if( Assembler::is_simm(disp, 8) ) {
1913 if (UseLoongsonISA) {
1914 __ gssbx(as_Register(src), AT, as_Register(base), disp);
1915 } else {
1916 __ addu(AT, as_Register(base), AT);
1917 __ sb(as_Register(src), AT, disp);
1918 }
1919 } else if( Assembler::is_simm16(disp) ) {
1920 __ addu(AT, as_Register(base), AT);
1921 __ sb(as_Register(src), AT, disp);
1922 } else {
1923 __ addu(AT, as_Register(base), AT);
1924 __ move(T9, disp);
1925 if (UseLoongsonISA) {
1926 __ gssbx(as_Register(src), AT, T9, 0);
1927 } else {
1928 __ addu(AT, AT, T9);
1929 __ sb(as_Register(src), AT, 0);
1930 }
1931 }
1932 }
1933 } else {
1934 if( Assembler::is_simm16(disp) ) {
1935 __ sb(as_Register(src), as_Register(base), disp);
1936 } else {
1937 __ move(T9, disp);
1938 if (UseLoongsonISA) {
1939 __ gssbx(as_Register(src), as_Register(base), T9, 0);
1940 } else {
1941 __ addu(AT, as_Register(base), T9);
1942 __ sb(as_Register(src), AT, 0);
1943 }
1944 }
1945 }
1946 %}
1948 enc_class store_B_immI_enc (memory mem, immI8 src) %{
1949 MacroAssembler _masm(&cbuf);
1950 int base = $mem$$base;
1951 int index = $mem$$index;
1952 int scale = $mem$$scale;
1953 int disp = $mem$$disp;
1954 int value = $src$$constant;
1956 if( index != 0 ) {
1957 if (!UseLoongsonISA) {
1958 if (scale == 0) {
1959 __ daddu(AT, as_Register(base), as_Register(index));
1960 } else {
1961 __ dsll(AT, as_Register(index), scale);
1962 __ daddu(AT, as_Register(base), AT);
1963 }
1964 if( Assembler::is_simm16(disp) ) {
1965 if (value == 0) {
1966 __ sb(R0, AT, disp);
1967 } else {
1968 __ move(T9, value);
1969 __ sb(T9, AT, disp);
1970 }
1971 } else {
1972 if (value == 0) {
1973 __ move(T9, disp);
1974 __ daddu(AT, AT, T9);
1975 __ sb(R0, AT, 0);
1976 } else {
1977 __ move(T9, disp);
1978 __ daddu(AT, AT, T9);
1979 __ move(T9, value);
1980 __ sb(T9, AT, 0);
1981 }
1982 }
1983 } else {
1985 if (scale == 0) {
1986 if( Assembler::is_simm(disp, 8) ) {
1987 if (value == 0) {
1988 __ gssbx(R0, as_Register(base), as_Register(index), disp);
1989 } else {
1990 __ move(T9, value);
1991 __ gssbx(T9, as_Register(base), as_Register(index), disp);
1992 }
1993 } else if( Assembler::is_simm16(disp) ) {
1994 __ daddu(AT, as_Register(base), as_Register(index));
1995 if (value == 0) {
1996 __ sb(R0, AT, disp);
1997 } else {
1998 __ move(T9, value);
1999 __ sb(T9, AT, disp);
2000 }
2001 } else {
2002 if (value == 0) {
2003 __ daddu(AT, as_Register(base), as_Register(index));
2004 __ move(T9, disp);
2005 __ gssbx(R0, AT, T9, 0);
2006 } else {
2007 __ move(AT, disp);
2008 __ move(T9, value);
2009 __ daddu(AT, as_Register(base), AT);
2010 __ gssbx(T9, AT, as_Register(index), 0);
2011 }
2012 }
2014 } else {
2016 if( Assembler::is_simm(disp, 8) ) {
2017 __ dsll(AT, as_Register(index), scale);
2018 if (value == 0) {
2019 __ gssbx(R0, as_Register(base), AT, disp);
2020 } else {
2021 __ move(T9, value);
2022 __ gssbx(T9, as_Register(base), AT, disp);
2023 }
2024 } else if( Assembler::is_simm16(disp) ) {
2025 __ dsll(AT, as_Register(index), scale);
2026 __ daddu(AT, as_Register(base), AT);
2027 if (value == 0) {
2028 __ sb(R0, AT, disp);
2029 } else {
2030 __ move(T9, value);
2031 __ sb(T9, AT, disp);
2032 }
2033 } else {
2034 __ dsll(AT, as_Register(index), scale);
2035 if (value == 0) {
2036 __ daddu(AT, as_Register(base), AT);
2037 __ move(T9, disp);
2038 __ gssbx(R0, AT, T9, 0);
2039 } else {
2040 __ move(T9, disp);
2041 __ daddu(AT, AT, T9);
2042 __ move(T9, value);
2043 __ gssbx(T9, as_Register(base), AT, 0);
2044 }
2045 }
2046 }
2047 }
2048 } else {
2049 if( Assembler::is_simm16(disp) ) {
2050 if (value == 0) {
2051 __ sb(R0, as_Register(base), disp);
2052 } else {
2053 __ move(AT, value);
2054 __ sb(AT, as_Register(base), disp);
2055 }
2056 } else {
2057 if (value == 0) {
2058 __ move(T9, disp);
2059 if (UseLoongsonISA) {
2060 __ gssbx(R0, as_Register(base), T9, 0);
2061 } else {
2062 __ daddu(AT, as_Register(base), T9);
2063 __ sb(R0, AT, 0);
2064 }
2065 } else {
2066 __ move(T9, disp);
2067 if (UseLoongsonISA) {
2068 __ move(AT, value);
2069 __ gssbx(AT, as_Register(base), T9, 0);
2070 } else {
2071 __ daddu(AT, as_Register(base), T9);
2072 __ move(T9, value);
2073 __ sb(T9, AT, 0);
2074 }
2075 }
2076 }
2077 }
2078 %}
2081 enc_class store_B_immI_enc_sync (memory mem, immI8 src) %{
2082 MacroAssembler _masm(&cbuf);
2083 int base = $mem$$base;
2084 int index = $mem$$index;
2085 int scale = $mem$$scale;
2086 int disp = $mem$$disp;
2087 int value = $src$$constant;
2089 if( index != 0 ) {
2090 if (scale == 0) {
2091 __ daddu(AT, as_Register(base), as_Register(index));
2092 } else {
2093 __ dsll(AT, as_Register(index), scale);
2094 __ daddu(AT, as_Register(base), AT);
2095 }
2096 if( Assembler::is_simm16(disp) ) {
2097 if (value == 0) {
2098 __ sb(R0, AT, disp);
2099 } else {
2100 __ move(T9, value);
2101 __ sb(T9, AT, disp);
2102 }
2103 } else {
2104 if (value == 0) {
2105 __ move(T9, disp);
2106 __ daddu(AT, AT, T9);
2107 __ sb(R0, AT, 0);
2108 } else {
2109 __ move(T9, disp);
2110 __ daddu(AT, AT, T9);
2111 __ move(T9, value);
2112 __ sb(T9, AT, 0);
2113 }
2114 }
2115 } else {
2116 if( Assembler::is_simm16(disp) ) {
2117 if (value == 0) {
2118 __ sb(R0, as_Register(base), disp);
2119 } else {
2120 __ move(AT, value);
2121 __ sb(AT, as_Register(base), disp);
2122 }
2123 } else {
2124 if (value == 0) {
2125 __ move(T9, disp);
2126 __ daddu(AT, as_Register(base), T9);
2127 __ sb(R0, AT, 0);
2128 } else {
2129 __ move(T9, disp);
2130 __ daddu(AT, as_Register(base), T9);
2131 __ move(T9, value);
2132 __ sb(T9, AT, 0);
2133 }
2134 }
2135 }
2137 __ sync();
2138 %}
2140 // Load Short (16bit signed)
2141 enc_class load_S_enc (mRegI dst, memory mem) %{
2142 MacroAssembler _masm(&cbuf);
2143 int dst = $dst$$reg;
2144 int base = $mem$$base;
2145 int index = $mem$$index;
2146 int scale = $mem$$scale;
2147 int disp = $mem$$disp;
2149 if( index != 0 ) {
2150 if (scale == 0) {
2151 __ daddu(AT, as_Register(base), as_Register(index));
2152 } else {
2153 __ dsll(AT, as_Register(index), scale);
2154 __ daddu(AT, as_Register(base), AT);
2155 }
2156 if( Assembler::is_simm16(disp) ) {
2157 __ lh(as_Register(dst), AT, disp);
2158 } else {
2159 __ move(T9, disp);
2160 __ addu(AT, AT, T9);
2161 __ lh(as_Register(dst), AT, 0);
2162 }
2163 } else {
2164 if( Assembler::is_simm16(disp) ) {
2165 __ lh(as_Register(dst), as_Register(base), disp);
2166 } else {
2167 __ move(T9, disp);
2168 __ addu(AT, as_Register(base), T9);
2169 __ lh(as_Register(dst), AT, 0);
2170 }
2171 }
2172 %}
2174 // Load Char (16bit unsigned)
2175 enc_class load_C_enc (mRegI dst, memory mem) %{
2176 MacroAssembler _masm(&cbuf);
2177 int dst = $dst$$reg;
2178 int base = $mem$$base;
2179 int index = $mem$$index;
2180 int scale = $mem$$scale;
2181 int disp = $mem$$disp;
2183 if( index != 0 ) {
2184 if (scale == 0) {
2185 __ daddu(AT, as_Register(base), as_Register(index));
2186 } else {
2187 __ dsll(AT, as_Register(index), scale);
2188 __ daddu(AT, as_Register(base), AT);
2189 }
2190 if( Assembler::is_simm16(disp) ) {
2191 __ lhu(as_Register(dst), AT, disp);
2192 } else {
2193 __ move(T9, disp);
2194 __ addu(AT, AT, T9);
2195 __ lhu(as_Register(dst), AT, 0);
2196 }
2197 } else {
2198 if( Assembler::is_simm16(disp) ) {
2199 __ lhu(as_Register(dst), as_Register(base), disp);
2200 } else {
2201 __ move(T9, disp);
2202 __ daddu(AT, as_Register(base), T9);
2203 __ lhu(as_Register(dst), AT, 0);
2204 }
2205 }
2206 %}
2208 // Store Char (16bit unsigned)
2209 enc_class store_C_reg_enc (memory mem, mRegI src) %{
2210 MacroAssembler _masm(&cbuf);
2211 int src = $src$$reg;
2212 int base = $mem$$base;
2213 int index = $mem$$index;
2214 int scale = $mem$$scale;
2215 int disp = $mem$$disp;
2217 if( index != 0 ) {
2218 if( Assembler::is_simm16(disp) ) {
2219 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2220 if (scale == 0) {
2221 __ gsshx(as_Register(src), as_Register(base), as_Register(index), disp);
2222 } else {
2223 __ dsll(AT, as_Register(index), scale);
2224 __ gsshx(as_Register(src), as_Register(base), AT, disp);
2225 }
2226 } else {
2227 if (scale == 0) {
2228 __ addu(AT, as_Register(base), as_Register(index));
2229 } else {
2230 __ dsll(AT, as_Register(index), scale);
2231 __ addu(AT, as_Register(base), AT);
2232 }
2233 __ sh(as_Register(src), AT, disp);
2234 }
2235 } else {
2236 if (scale == 0) {
2237 __ addu(AT, as_Register(base), as_Register(index));
2238 } else {
2239 __ dsll(AT, as_Register(index), scale);
2240 __ addu(AT, as_Register(base), AT);
2241 }
2242 __ move(T9, disp);
2243 if( UseLoongsonISA ) {
2244 __ gsshx(as_Register(src), AT, T9, 0);
2245 } else {
2246 __ addu(AT, AT, T9);
2247 __ sh(as_Register(src), AT, 0);
2248 }
2249 }
2250 } else {
2251 if( Assembler::is_simm16(disp) ) {
2252 __ sh(as_Register(src), as_Register(base), disp);
2253 } else {
2254 __ move(T9, disp);
2255 if( UseLoongsonISA ) {
2256 __ gsshx(as_Register(src), as_Register(base), T9, 0);
2257 } else {
2258 __ addu(AT, as_Register(base), T9);
2259 __ sh(as_Register(src), AT, 0);
2260 }
2261 }
2262 }
2263 %}
2265 enc_class store_C0_enc (memory mem) %{
2266 MacroAssembler _masm(&cbuf);
2267 int base = $mem$$base;
2268 int index = $mem$$index;
2269 int scale = $mem$$scale;
2270 int disp = $mem$$disp;
2272 if( index != 0 ) {
2273 if( Assembler::is_simm16(disp) ) {
2274 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2275 if (scale == 0) {
2276 __ gsshx(R0, as_Register(base), as_Register(index), disp);
2277 } else {
2278 __ dsll(AT, as_Register(index), scale);
2279 __ gsshx(R0, as_Register(base), AT, disp);
2280 }
2281 } else {
2282 if (scale == 0) {
2283 __ addu(AT, as_Register(base), as_Register(index));
2284 } else {
2285 __ dsll(AT, as_Register(index), scale);
2286 __ addu(AT, as_Register(base), AT);
2287 }
2288 __ sh(R0, AT, disp);
2289 }
2290 } else {
2291 if (scale == 0) {
2292 __ addu(AT, as_Register(base), as_Register(index));
2293 } else {
2294 __ dsll(AT, as_Register(index), scale);
2295 __ addu(AT, as_Register(base), AT);
2296 }
2297 __ move(T9, disp);
2298 if( UseLoongsonISA ) {
2299 __ gsshx(R0, AT, T9, 0);
2300 } else {
2301 __ addu(AT, AT, T9);
2302 __ sh(R0, AT, 0);
2303 }
2304 }
2305 } else {
2306 if( Assembler::is_simm16(disp) ) {
2307 __ sh(R0, as_Register(base), disp);
2308 } else {
2309 __ move(T9, disp);
2310 if( UseLoongsonISA ) {
2311 __ gsshx(R0, as_Register(base), T9, 0);
2312 } else {
2313 __ addu(AT, as_Register(base), T9);
2314 __ sh(R0, AT, 0);
2315 }
2316 }
2317 }
2318 %}
2320 enc_class load_I_enc (mRegI dst, memory mem) %{
2321 MacroAssembler _masm(&cbuf);
2322 int dst = $dst$$reg;
2323 int base = $mem$$base;
2324 int index = $mem$$index;
2325 int scale = $mem$$scale;
2326 int disp = $mem$$disp;
2328 if( index != 0 ) {
2329 if( Assembler::is_simm16(disp) ) {
2330 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2331 if (scale == 0) {
2332 __ gslwx(as_Register(dst), as_Register(base), as_Register(index), disp);
2333 } else {
2334 __ dsll(AT, as_Register(index), scale);
2335 __ gslwx(as_Register(dst), as_Register(base), AT, disp);
2336 }
2337 } else {
2338 if (scale == 0) {
2339 __ addu(AT, as_Register(base), as_Register(index));
2340 } else {
2341 __ dsll(AT, as_Register(index), scale);
2342 __ addu(AT, as_Register(base), AT);
2343 }
2344 __ lw(as_Register(dst), AT, disp);
2345 }
2346 } else {
2347 if (scale == 0) {
2348 __ addu(AT, as_Register(base), as_Register(index));
2349 } else {
2350 __ dsll(AT, as_Register(index), scale);
2351 __ addu(AT, as_Register(base), AT);
2352 }
2353 __ move(T9, disp);
2354 if( UseLoongsonISA ) {
2355 __ gslwx(as_Register(dst), AT, T9, 0);
2356 } else {
2357 __ addu(AT, AT, T9);
2358 __ lw(as_Register(dst), AT, 0);
2359 }
2360 }
2361 } else {
2362 if( Assembler::is_simm16(disp) ) {
2363 __ lw(as_Register(dst), as_Register(base), disp);
2364 } else {
2365 __ move(T9, disp);
2366 if( UseLoongsonISA ) {
2367 __ gslwx(as_Register(dst), as_Register(base), T9, 0);
2368 } else {
2369 __ addu(AT, as_Register(base), T9);
2370 __ lw(as_Register(dst), AT, 0);
2371 }
2372 }
2373 }
2374 %}
2376 enc_class store_I_reg_enc (memory mem, mRegI src) %{
2377 MacroAssembler _masm(&cbuf);
2378 int src = $src$$reg;
2379 int base = $mem$$base;
2380 int index = $mem$$index;
2381 int scale = $mem$$scale;
2382 int disp = $mem$$disp;
2384 if( index != 0 ) {
2385 if( Assembler::is_simm16(disp) ) {
2386 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2387 if (scale == 0) {
2388 __ gsswx(as_Register(src), as_Register(base), as_Register(index), disp);
2389 } else {
2390 __ dsll(AT, as_Register(index), scale);
2391 __ gsswx(as_Register(src), as_Register(base), AT, disp);
2392 }
2393 } else {
2394 if (scale == 0) {
2395 __ addu(AT, as_Register(base), as_Register(index));
2396 } else {
2397 __ dsll(AT, as_Register(index), scale);
2398 __ addu(AT, as_Register(base), AT);
2399 }
2400 __ sw(as_Register(src), AT, disp);
2401 }
2402 } else {
2403 if (scale == 0) {
2404 __ addu(AT, as_Register(base), as_Register(index));
2405 } else {
2406 __ dsll(AT, as_Register(index), scale);
2407 __ addu(AT, as_Register(base), AT);
2408 }
2409 __ move(T9, disp);
2410 if( UseLoongsonISA ) {
2411 __ gsswx(as_Register(src), AT, T9, 0);
2412 } else {
2413 __ addu(AT, AT, T9);
2414 __ sw(as_Register(src), AT, 0);
2415 }
2416 }
2417 } else {
2418 if( Assembler::is_simm16(disp) ) {
2419 __ sw(as_Register(src), as_Register(base), disp);
2420 } else {
2421 __ move(T9, disp);
2422 if( UseLoongsonISA ) {
2423 __ gsswx(as_Register(src), as_Register(base), T9, 0);
2424 } else {
2425 __ addu(AT, as_Register(base), T9);
2426 __ sw(as_Register(src), AT, 0);
2427 }
2428 }
2429 }
2430 %}
2432 enc_class store_I_immI_enc (memory mem, immI src) %{
2433 MacroAssembler _masm(&cbuf);
2434 int base = $mem$$base;
2435 int index = $mem$$index;
2436 int scale = $mem$$scale;
2437 int disp = $mem$$disp;
2438 int value = $src$$constant;
2440 if( index != 0 ) {
2441 if (scale == 0) {
2442 __ daddu(AT, as_Register(base), as_Register(index));
2443 } else {
2444 __ dsll(AT, as_Register(index), scale);
2445 __ daddu(AT, as_Register(base), AT);
2446 }
2447 if( Assembler::is_simm16(disp) ) {
2448 if (value == 0) {
2449 __ sw(R0, AT, disp);
2450 } else {
2451 __ move(T9, value);
2452 __ sw(T9, AT, disp);
2453 }
2454 } else {
2455 if (value == 0) {
2456 __ move(T9, disp);
2457 __ addu(AT, AT, T9);
2458 __ sw(R0, AT, 0);
2459 } else {
2460 __ move(T9, disp);
2461 __ addu(AT, AT, T9);
2462 __ move(T9, value);
2463 __ sw(T9, AT, 0);
2464 }
2465 }
2466 } else {
2467 if( Assembler::is_simm16(disp) ) {
2468 if (value == 0) {
2469 __ sw(R0, as_Register(base), disp);
2470 } else {
2471 __ move(AT, value);
2472 __ sw(AT, as_Register(base), disp);
2473 }
2474 } else {
2475 if (value == 0) {
2476 __ move(T9, disp);
2477 __ addu(AT, as_Register(base), T9);
2478 __ sw(R0, AT, 0);
2479 } else {
2480 __ move(T9, disp);
2481 __ addu(AT, as_Register(base), T9);
2482 __ move(T9, value);
2483 __ sw(T9, AT, 0);
2484 }
2485 }
2486 }
2487 %}
2489 enc_class load_N_enc (mRegN dst, memory mem) %{
2490 MacroAssembler _masm(&cbuf);
2491 int dst = $dst$$reg;
2492 int base = $mem$$base;
2493 int index = $mem$$index;
2494 int scale = $mem$$scale;
2495 int disp = $mem$$disp;
2496 relocInfo::relocType disp_reloc = $mem->disp_reloc();
2497 assert(disp_reloc == relocInfo::none, "cannot have disp");
2499 if( index != 0 ) {
2500 if (scale == 0) {
2501 __ daddu(AT, as_Register(base), as_Register(index));
2502 } else {
2503 __ dsll(AT, as_Register(index), scale);
2504 __ daddu(AT, as_Register(base), AT);
2505 }
2506 if( Assembler::is_simm16(disp) ) {
2507 __ lwu(as_Register(dst), AT, disp);
2508 } else {
2509 __ li(T9, disp);
2510 __ daddu(AT, AT, T9);
2511 __ lwu(as_Register(dst), AT, 0);
2512 }
2513 } else {
2514 if( Assembler::is_simm16(disp) ) {
2515 __ lwu(as_Register(dst), as_Register(base), disp);
2516 } else {
2517 __ li(T9, disp);
2518 __ daddu(AT, as_Register(base), T9);
2519 __ lwu(as_Register(dst), AT, 0);
2520 }
2521 }
2523 %}
2526 enc_class load_P_enc (mRegP dst, memory mem) %{
2527 MacroAssembler _masm(&cbuf);
2528 int dst = $dst$$reg;
2529 int base = $mem$$base;
2530 int index = $mem$$index;
2531 int scale = $mem$$scale;
2532 int disp = $mem$$disp;
2533 relocInfo::relocType disp_reloc = $mem->disp_reloc();
2534 assert(disp_reloc == relocInfo::none, "cannot have disp");
2536 if( index != 0 ) {
2537 if ( UseLoongsonISA ) {
2538 if ( Assembler::is_simm(disp, 8) ) {
2539 if ( scale != 0 ) {
2540 __ dsll(AT, as_Register(index), scale);
2541 __ gsldx(as_Register(dst), as_Register(base), AT, disp);
2542 } else {
2543 __ gsldx(as_Register(dst), as_Register(base), as_Register(index), disp);
2544 }
2545 } else if ( Assembler::is_simm16(disp) ){
2546 if ( scale != 0 ) {
2547 __ dsll(AT, as_Register(index), scale);
2548 __ daddu(AT, AT, as_Register(base));
2549 } else {
2550 __ daddu(AT, as_Register(index), as_Register(base));
2551 }
2552 __ ld(as_Register(dst), AT, disp);
2553 } else {
2554 if ( scale != 0 ) {
2555 __ dsll(AT, as_Register(index), scale);
2556 __ move(T9, disp);
2557 __ daddu(AT, AT, T9);
2558 } else {
2559 __ move(T9, disp);
2560 __ daddu(AT, as_Register(index), T9);
2561 }
2562 __ gsldx(as_Register(dst), as_Register(base), AT, 0);
2563 }
2564 } else { //not use loongson isa
2565 if (scale == 0) {
2566 __ daddu(AT, as_Register(base), as_Register(index));
2567 } else {
2568 __ dsll(AT, as_Register(index), scale);
2569 __ daddu(AT, as_Register(base), AT);
2570 }
2571 if( Assembler::is_simm16(disp) ) {
2572 __ ld(as_Register(dst), AT, disp);
2573 } else {
2574 __ li(T9, disp);
2575 __ daddu(AT, AT, T9);
2576 __ ld(as_Register(dst), AT, 0);
2577 }
2578 }
2579 } else {
2580 if ( UseLoongsonISA ) {
2581 if ( Assembler::is_simm16(disp) ){
2582 __ ld(as_Register(dst), as_Register(base), disp);
2583 } else {
2584 __ li(T9, disp);
2585 __ gsldx(as_Register(dst), as_Register(base), T9, 0);
2586 }
2587 } else { //not use loongson isa
2588 if( Assembler::is_simm16(disp) ) {
2589 __ ld(as_Register(dst), as_Register(base), disp);
2590 } else {
2591 __ li(T9, disp);
2592 __ daddu(AT, as_Register(base), T9);
2593 __ ld(as_Register(dst), AT, 0);
2594 }
2595 }
2596 }
2597 // if( disp_reloc != relocInfo::none) __ ld(as_Register(dst), as_Register(dst), 0);
2598 %}
2600 enc_class store_P_reg_enc (memory mem, mRegP src) %{
2601 MacroAssembler _masm(&cbuf);
2602 int src = $src$$reg;
2603 int base = $mem$$base;
2604 int index = $mem$$index;
2605 int scale = $mem$$scale;
2606 int disp = $mem$$disp;
2608 if( index != 0 ) {
2609 if (scale == 0) {
2610 __ daddu(AT, as_Register(base), as_Register(index));
2611 } else {
2612 __ dsll(AT, as_Register(index), scale);
2613 __ daddu(AT, as_Register(base), AT);
2614 }
2615 if( Assembler::is_simm16(disp) ) {
2616 __ sd(as_Register(src), AT, disp);
2617 } else {
2618 __ move(T9, disp);
2619 __ daddu(AT, AT, T9);
2620 __ sd(as_Register(src), AT, 0);
2621 }
2622 } else {
2623 if( Assembler::is_simm16(disp) ) {
2624 __ sd(as_Register(src), as_Register(base), disp);
2625 } else {
2626 __ move(T9, disp);
2627 __ daddu(AT, as_Register(base), T9);
2628 __ sd(as_Register(src), AT, 0);
2629 }
2630 }
2631 %}
2633 enc_class store_N_reg_enc (memory mem, mRegN src) %{
2634 MacroAssembler _masm(&cbuf);
2635 int src = $src$$reg;
2636 int base = $mem$$base;
2637 int index = $mem$$index;
2638 int scale = $mem$$scale;
2639 int disp = $mem$$disp;
2641 if( index != 0 ) {
2642 if ( UseLoongsonISA ){
2643 if ( Assembler::is_simm(disp, 8) ) {
2644 if ( scale == 0 ) {
2645 __ gsswx(as_Register(src), as_Register(base), as_Register(index), disp);
2646 } else {
2647 __ dsll(AT, as_Register(index), scale);
2648 __ gsswx(as_Register(src), as_Register(base), AT, disp);
2649 }
2650 } else if ( Assembler::is_simm16(disp) ) {
2651 if ( scale == 0 ) {
2652 __ daddu(AT, as_Register(base), as_Register(index));
2653 } else {
2654 __ dsll(AT, as_Register(index), scale);
2655 __ daddu(AT, as_Register(base), AT);
2656 }
2657 __ sw(as_Register(src), AT, disp);
2658 } else {
2659 if ( scale == 0 ) {
2660 __ move(T9, disp);
2661 __ daddu(AT, as_Register(index), T9);
2662 } else {
2663 __ dsll(AT, as_Register(index), scale);
2664 __ move(T9, disp);
2665 __ daddu(AT, AT, T9);
2666 }
2667 __ gsswx(as_Register(src), as_Register(base), AT, 0);
2668 }
2669 } else { //not use loongson isa
2670 if (scale == 0) {
2671 __ daddu(AT, as_Register(base), as_Register(index));
2672 } else {
2673 __ dsll(AT, as_Register(index), scale);
2674 __ daddu(AT, as_Register(base), AT);
2675 }
2676 if( Assembler::is_simm16(disp) ) {
2677 __ sw(as_Register(src), AT, disp);
2678 } else {
2679 __ move(T9, disp);
2680 __ addu(AT, AT, T9);
2681 __ sw(as_Register(src), AT, 0);
2682 }
2683 }
2684 } else {
2685 if ( UseLoongsonISA ) {
2686 if ( Assembler::is_simm16(disp) ) {
2687 __ sw(as_Register(src), as_Register(base), disp);
2688 } else {
2689 __ move(T9, disp);
2690 __ gsswx(as_Register(src), as_Register(base), T9, 0);
2691 }
2692 } else {
2693 if( Assembler::is_simm16(disp) ) {
2694 __ sw(as_Register(src), as_Register(base), disp);
2695 } else {
2696 __ move(T9, disp);
2697 __ addu(AT, as_Register(base), T9);
2698 __ sw(as_Register(src), AT, 0);
2699 }
2700 }
2701 }
2702 %}
2704 enc_class store_P_immP0_enc (memory mem) %{
2705 MacroAssembler _masm(&cbuf);
2706 int base = $mem$$base;
2707 int index = $mem$$index;
2708 int scale = $mem$$scale;
2709 int disp = $mem$$disp;
2711 if( index != 0 ) {
2712 if (scale == 0) {
2713 if( Assembler::is_simm16(disp) ) {
2714 if (UseLoongsonISA && Assembler::is_simm(disp, 8)) {
2715 __ gssdx(R0, as_Register(base), as_Register(index), disp);
2716 } else {
2717 __ daddu(AT, as_Register(base), as_Register(index));
2718 __ sd(R0, AT, disp);
2719 }
2720 } else {
2721 __ daddu(AT, as_Register(base), as_Register(index));
2722 __ move(T9, disp);
2723 if(UseLoongsonISA) {
2724 __ gssdx(R0, AT, T9, 0);
2725 } else {
2726 __ daddu(AT, AT, T9);
2727 __ sd(R0, AT, 0);
2728 }
2729 }
2730 } else {
2731 __ dsll(AT, as_Register(index), scale);
2732 if( Assembler::is_simm16(disp) ) {
2733 if (UseLoongsonISA && Assembler::is_simm(disp, 8)) {
2734 __ gssdx(R0, as_Register(base), AT, disp);
2735 } else {
2736 __ daddu(AT, as_Register(base), AT);
2737 __ sd(R0, AT, disp);
2738 }
2739 } else {
2740 __ daddu(AT, as_Register(base), AT);
2741 __ move(T9, disp);
2742 if (UseLoongsonISA) {
2743 __ gssdx(R0, AT, T9, 0);
2744 } else {
2745 __ daddu(AT, AT, T9);
2746 __ sd(R0, AT, 0);
2747 }
2748 }
2749 }
2750 } else {
2751 if( Assembler::is_simm16(disp) ) {
2752 __ sd(R0, as_Register(base), disp);
2753 } else {
2754 __ move(T9, disp);
2755 if (UseLoongsonISA) {
2756 __ gssdx(R0, as_Register(base), T9, 0);
2757 } else {
2758 __ daddu(AT, as_Register(base), T9);
2759 __ sd(R0, AT, 0);
2760 }
2761 }
2762 }
2763 %}
2766 enc_class storeImmN0_enc(memory mem, ImmN0 src) %{
2767 MacroAssembler _masm(&cbuf);
2768 int base = $mem$$base;
2769 int index = $mem$$index;
2770 int scale = $mem$$scale;
2771 int disp = $mem$$disp;
2773 if(index!=0){
2774 if (scale == 0) {
2775 __ daddu(AT, as_Register(base), as_Register(index));
2776 } else {
2777 __ dsll(AT, as_Register(index), scale);
2778 __ daddu(AT, as_Register(base), AT);
2779 }
2781 if( Assembler::is_simm16(disp) ) {
2782 __ sw(R0, AT, disp);
2783 } else {
2784 __ move(T9, disp);
2785 __ daddu(AT, AT, T9);
2786 __ sw(R0, AT, 0);
2787 }
2788 }
2789 else {
2790 if( Assembler::is_simm16(disp) ) {
2791 __ sw(R0, as_Register(base), disp);
2792 } else {
2793 __ move(T9, disp);
2794 __ daddu(AT, as_Register(base), T9);
2795 __ sw(R0, AT, 0);
2796 }
2797 }
2798 %}
2800 enc_class load_L_enc (mRegL dst, memory mem) %{
2801 MacroAssembler _masm(&cbuf);
2802 int base = $mem$$base;
2803 int index = $mem$$index;
2804 int scale = $mem$$scale;
2805 int disp = $mem$$disp;
2806 Register dst_reg = as_Register($dst$$reg);
2808 /*********************2013/03/27**************************
2809 * Jin: $base may contain a null object.
2810 * Server JIT force the exception_offset to be the pos of
2811 * the first instruction.
2812 * I insert such a 'null_check' at the beginning.
2813 *******************************************************/
2815 __ lw(AT, as_Register(base), 0);
2817 /*********************2012/10/04**************************
2818 * Error case found in SortTest
2819 * 337 b java.util.Arrays::sort1 (401 bytes)
2820 * B73:
2821 * d34 lw T4.lo, [T4 + #16] #@loadL-lo
2822 * lw T4.hi, [T4 + #16]+4 #@loadL-hi
2823 *
2824 * The original instructions generated here are :
2825 * __ lw(dst_lo, as_Register(base), disp);
2826 * __ lw(dst_hi, as_Register(base), disp + 4);
2827 *******************************************************/
2829 if( index != 0 ) {
2830 if (scale == 0) {
2831 __ daddu(AT, as_Register(base), as_Register(index));
2832 } else {
2833 __ dsll(AT, as_Register(index), scale);
2834 __ daddu(AT, as_Register(base), AT);
2835 }
2836 if( Assembler::is_simm16(disp) ) {
2837 __ ld(dst_reg, AT, disp);
2838 } else {
2839 __ move(T9, disp);
2840 __ daddu(AT, AT, T9);
2841 __ ld(dst_reg, AT, 0);
2842 }
2843 } else {
2844 if( Assembler::is_simm16(disp) ) {
2845 __ move(AT, as_Register(base));
2846 __ ld(dst_reg, AT, disp);
2847 } else {
2848 __ move(T9, disp);
2849 __ daddu(AT, as_Register(base), T9);
2850 __ ld(dst_reg, AT, 0);
2851 }
2852 }
2853 %}
2855 enc_class store_L_reg_enc (memory mem, mRegL src) %{
2856 MacroAssembler _masm(&cbuf);
2857 int base = $mem$$base;
2858 int index = $mem$$index;
2859 int scale = $mem$$scale;
2860 int disp = $mem$$disp;
2861 Register src_reg = as_Register($src$$reg);
2863 if( index != 0 ) {
2864 if (scale == 0) {
2865 __ daddu(AT, as_Register(base), as_Register(index));
2866 } else {
2867 __ dsll(AT, as_Register(index), scale);
2868 __ daddu(AT, as_Register(base), AT);
2869 }
2870 if( Assembler::is_simm16(disp) ) {
2871 __ sd(src_reg, AT, disp);
2872 } else {
2873 __ move(T9, disp);
2874 __ daddu(AT, AT, T9);
2875 __ sd(src_reg, AT, 0);
2876 }
2877 } else {
2878 if( Assembler::is_simm16(disp) ) {
2879 __ move(AT, as_Register(base));
2880 __ sd(src_reg, AT, disp);
2881 } else {
2882 __ move(T9, disp);
2883 __ daddu(AT, as_Register(base), T9);
2884 __ sd(src_reg, AT, 0);
2885 }
2886 }
2887 %}
2889 enc_class store_L_immL0_enc (memory mem, immL0 src) %{
2890 MacroAssembler _masm(&cbuf);
2891 int base = $mem$$base;
2892 int index = $mem$$index;
2893 int scale = $mem$$scale;
2894 int disp = $mem$$disp;
2896 if( index != 0 ) {
2897 if (scale == 0) {
2898 __ daddu(AT, as_Register(base), as_Register(index));
2899 } else {
2900 __ dsll(AT, as_Register(index), scale);
2901 __ daddu(AT, as_Register(base), AT);
2902 }
2903 if( Assembler::is_simm16(disp) ) {
2904 __ sd(R0, AT, disp);
2905 } else {
2906 __ move(T9, disp);
2907 __ addu(AT, AT, T9);
2908 __ sd(R0, AT, 0);
2909 }
2910 } else {
2911 if( Assembler::is_simm16(disp) ) {
2912 __ move(AT, as_Register(base));
2913 __ sd(R0, AT, disp);
2914 } else {
2915 __ move(T9, disp);
2916 __ addu(AT, as_Register(base), T9);
2917 __ sd(R0, AT, 0);
2918 }
2919 }
2920 %}
2922 enc_class load_F_enc (regF dst, memory mem) %{
2923 MacroAssembler _masm(&cbuf);
2924 int base = $mem$$base;
2925 int index = $mem$$index;
2926 int scale = $mem$$scale;
2927 int disp = $mem$$disp;
2928 FloatRegister dst = $dst$$FloatRegister;
2930 if( index != 0 ) {
2931 if( Assembler::is_simm16(disp) ) {
2932 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2933 if (scale == 0) {
2934 __ gslwxc1(dst, as_Register(base), as_Register(index), disp);
2935 } else {
2936 __ dsll(AT, as_Register(index), scale);
2937 __ gslwxc1(dst, as_Register(base), AT, disp);
2938 }
2939 } else {
2940 if (scale == 0) {
2941 __ daddu(AT, as_Register(base), as_Register(index));
2942 } else {
2943 __ dsll(AT, as_Register(index), scale);
2944 __ daddu(AT, as_Register(base), AT);
2945 }
2946 __ lwc1(dst, AT, disp);
2947 }
2948 } else {
2949 if (scale == 0) {
2950 __ daddu(AT, as_Register(base), as_Register(index));
2951 } else {
2952 __ dsll(AT, as_Register(index), scale);
2953 __ daddu(AT, as_Register(base), AT);
2954 }
2955 __ move(T9, disp);
2956 if( UseLoongsonISA ) {
2957 __ gslwxc1(dst, AT, T9, 0);
2958 } else {
2959 __ daddu(AT, AT, T9);
2960 __ lwc1(dst, AT, 0);
2961 }
2962 }
2963 } else {
2964 if( Assembler::is_simm16(disp) ) {
2965 __ lwc1(dst, as_Register(base), disp);
2966 } else {
2967 __ move(T9, disp);
2968 if( UseLoongsonISA ) {
2969 __ gslwxc1(dst, as_Register(base), T9, 0);
2970 } else {
2971 __ daddu(AT, as_Register(base), T9);
2972 __ lwc1(dst, AT, 0);
2973 }
2974 }
2975 }
2976 %}
2978 enc_class store_F_reg_enc (memory mem, regF src) %{
2979 MacroAssembler _masm(&cbuf);
2980 int base = $mem$$base;
2981 int index = $mem$$index;
2982 int scale = $mem$$scale;
2983 int disp = $mem$$disp;
2984 FloatRegister src = $src$$FloatRegister;
2986 if( index != 0 ) {
2987 if( Assembler::is_simm16(disp) ) {
2988 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2989 if (scale == 0) {
2990 __ gsswxc1(src, as_Register(base), as_Register(index), disp);
2991 } else {
2992 __ dsll(AT, as_Register(index), scale);
2993 __ gsswxc1(src, as_Register(base), AT, disp);
2994 }
2995 } else {
2996 if (scale == 0) {
2997 __ daddu(AT, as_Register(base), as_Register(index));
2998 } else {
2999 __ dsll(AT, as_Register(index), scale);
3000 __ daddu(AT, as_Register(base), AT);
3001 }
3002 __ swc1(src, AT, disp);
3003 }
3004 } else {
3005 if (scale == 0) {
3006 __ daddu(AT, as_Register(base), as_Register(index));
3007 } else {
3008 __ dsll(AT, as_Register(index), scale);
3009 __ daddu(AT, as_Register(base), AT);
3010 }
3011 __ move(T9, disp);
3012 if( UseLoongsonISA ) {
3013 __ gsswxc1(src, AT, T9, 0);
3014 } else {
3015 __ daddu(AT, AT, T9);
3016 __ swc1(src, AT, 0);
3017 }
3018 }
3019 } else {
3020 if( Assembler::is_simm16(disp) ) {
3021 __ swc1(src, as_Register(base), disp);
3022 } else {
3023 __ move(T9, disp);
3024 if( UseLoongsonISA ) {
3025 __ gslwxc1(src, as_Register(base), T9, 0);
3026 } else {
3027 __ daddu(AT, as_Register(base), T9);
3028 __ swc1(src, AT, 0);
3029 }
3030 }
3031 }
3032 %}
3034 enc_class load_D_enc (regD dst, memory mem) %{
3035 MacroAssembler _masm(&cbuf);
3036 int base = $mem$$base;
3037 int index = $mem$$index;
3038 int scale = $mem$$scale;
3039 int disp = $mem$$disp;
3040 FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3042 if( index != 0 ) {
3043 if( Assembler::is_simm16(disp) ) {
3044 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3045 if (scale == 0) {
3046 __ gsldxc1(dst_reg, as_Register(base), as_Register(index), disp);
3047 } else {
3048 __ dsll(AT, as_Register(index), scale);
3049 __ gsldxc1(dst_reg, as_Register(base), AT, disp);
3050 }
3051 } else {
3052 if (scale == 0) {
3053 __ daddu(AT, as_Register(base), as_Register(index));
3054 } else {
3055 __ dsll(AT, as_Register(index), scale);
3056 __ daddu(AT, as_Register(base), AT);
3057 }
3058 __ ldc1(dst_reg, AT, disp);
3059 }
3060 } else {
3061 if (scale == 0) {
3062 __ daddu(AT, as_Register(base), as_Register(index));
3063 } else {
3064 __ dsll(AT, as_Register(index), scale);
3065 __ daddu(AT, as_Register(base), AT);
3066 }
3067 __ move(T9, disp);
3068 if( UseLoongsonISA ) {
3069 __ gsldxc1(dst_reg, AT, T9, 0);
3070 } else {
3071 __ addu(AT, AT, T9);
3072 __ ldc1(dst_reg, AT, 0);
3073 }
3074 }
3075 } else {
3076 if( Assembler::is_simm16(disp) ) {
3077 __ ldc1(dst_reg, as_Register(base), disp);
3078 } else {
3079 __ move(T9, disp);
3080 if( UseLoongsonISA ) {
3081 __ gsldxc1(dst_reg, as_Register(base), T9, 0);
3082 } else {
3083 __ addu(AT, as_Register(base), T9);
3084 __ ldc1(dst_reg, AT, 0);
3085 }
3086 }
3087 }
3088 %}
3090 enc_class store_D_reg_enc (memory mem, regD src) %{
3091 MacroAssembler _masm(&cbuf);
3092 int base = $mem$$base;
3093 int index = $mem$$index;
3094 int scale = $mem$$scale;
3095 int disp = $mem$$disp;
3096 FloatRegister src_reg = as_FloatRegister($src$$reg);
3098 if( index != 0 ) {
3099 if( Assembler::is_simm16(disp) ) {
3100 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3101 if (scale == 0) {
3102 __ gssdxc1(src_reg, as_Register(base), as_Register(index), disp);
3103 } else {
3104 __ dsll(AT, as_Register(index), scale);
3105 __ gssdxc1(src_reg, as_Register(base), AT, disp);
3106 }
3107 } else {
3108 if (scale == 0) {
3109 __ daddu(AT, as_Register(base), as_Register(index));
3110 } else {
3111 __ dsll(AT, as_Register(index), scale);
3112 __ daddu(AT, as_Register(base), AT);
3113 }
3114 __ sdc1(src_reg, AT, disp);
3115 }
3116 } else {
3117 if (scale == 0) {
3118 __ daddu(AT, as_Register(base), as_Register(index));
3119 } else {
3120 __ dsll(AT, as_Register(index), scale);
3121 __ daddu(AT, as_Register(base), AT);
3122 }
3123 __ move(T9, disp);
3124 if( UseLoongsonISA ) {
3125 __ gssdxc1(src_reg, AT, T9, 0);
3126 } else {
3127 __ addu(AT, AT, T9);
3128 __ sdc1(src_reg, AT, 0);
3129 }
3130 }
3131 } else {
3132 if( Assembler::is_simm16(disp) ) {
3133 __ sdc1(src_reg, as_Register(base), disp);
3134 } else {
3135 __ move(T9, disp);
3136 if( UseLoongsonISA ) {
3137 __ gssdxc1(src_reg, as_Register(base), T9, 0);
3138 } else {
3139 __ addu(AT, as_Register(base), T9);
3140 __ sdc1(src_reg, AT, 0);
3141 }
3142 }
3143 }
3144 %}
3146 enc_class Java_To_Runtime (method meth) %{ // CALL Java_To_Runtime, Java_To_Runtime_Leaf
3147 MacroAssembler _masm(&cbuf);
3148 // This is the instruction starting address for relocation info.
3149 __ block_comment("Java_To_Runtime");
3150 cbuf.set_insts_mark();
3151 __ relocate(relocInfo::runtime_call_type);
3153 __ li48(T9, (long)$meth$$method);
3154 __ jalr(T9);
3155 __ nop();
3156 %}
3158 enc_class Java_Static_Call (method meth) %{ // JAVA STATIC CALL
3159 // CALL to fixup routine. Fixup routine uses ScopeDesc info to determine
3160 // who we intended to call.
3161 MacroAssembler _masm(&cbuf);
3162 cbuf.set_insts_mark();
3164 if ( !_method ) {
3165 __ relocate(relocInfo::runtime_call_type);
3166 } else if(_optimized_virtual) {
3167 __ relocate(relocInfo::opt_virtual_call_type);
3168 } else {
3169 __ relocate(relocInfo::static_call_type);
3170 }
3172 __ li(T9, $meth$$method);
3173 __ jalr(T9);
3174 __ nop();
3175 if( _method ) { // Emit stub for static call
3176 emit_java_to_interp(cbuf);
3177 }
3178 %}
3181 /*
3182 * [Ref: LIR_Assembler::ic_call() ]
3183 */
3184 enc_class Java_Dynamic_Call (method meth) %{ // JAVA DYNAMIC CALL
3185 MacroAssembler _masm(&cbuf);
3186 __ block_comment("Java_Dynamic_Call");
3187 __ ic_call((address)$meth$$method);
3188 %}
3191 enc_class Set_Flags_After_Fast_Lock_Unlock(FlagsReg cr) %{
3192 Register flags = $cr$$Register;
3193 Label L;
3195 MacroAssembler _masm(&cbuf);
3197 __ addu(flags, R0, R0);
3198 __ beq(AT, R0, L);
3199 __ delayed()->nop();
3200 __ move(flags, 0xFFFFFFFF);
3201 __ bind(L);
3202 %}
3204 enc_class enc_PartialSubtypeCheck(mRegP result, mRegP sub, mRegP super, mRegI tmp) %{
3205 Register result = $result$$Register;
3206 Register sub = $sub$$Register;
3207 Register super = $super$$Register;
3208 Register length = $tmp$$Register;
3209 Register tmp = T9;
3210 Label miss;
3212 /* 2012/9/28 Jin: result may be the same as sub
3213 * 47c B40: # B21 B41 <- B20 Freq: 0.155379
3214 * 47c partialSubtypeCheck result=S1, sub=S1, super=S3, length=S0
3215 * 4bc mov S2, NULL #@loadConP
3216 * 4c0 beq S1, S2, B21 #@branchConP P=0.999999 C=-1.000000
3217 */
3218 MacroAssembler _masm(&cbuf);
3219 Label done;
3220 __ check_klass_subtype_slow_path(sub, super, length, tmp,
3221 NULL, &miss,
3222 /*set_cond_codes:*/ true);
3223 /* 2013/7/22 Jin: Refer to X86_64's RDI */
3224 __ move(result, 0);
3225 __ b(done);
3226 __ nop();
3228 __ bind(miss);
3229 __ move(result, 1);
3230 __ bind(done);
3231 %}
3233 %}
3236 //---------MIPS FRAME--------------------------------------------------------------
3237 // Definition of frame structure and management information.
3238 //
3239 // S T A C K L A Y O U T Allocators stack-slot number
3240 // | (to get allocators register number
3241 // G Owned by | | v add SharedInfo::stack0)
3242 // r CALLER | |
3243 // o | +--------+ pad to even-align allocators stack-slot
3244 // w V | pad0 | numbers; owned by CALLER
3245 // t -----------+--------+----> Matcher::_in_arg_limit, unaligned
3246 // h ^ | in | 5
3247 // | | args | 4 Holes in incoming args owned by SELF
3248 // | | old | | 3
3249 // | | SP-+--------+----> Matcher::_old_SP, even aligned
3250 // v | | ret | 3 return address
3251 // Owned by +--------+
3252 // Self | pad2 | 2 pad to align old SP
3253 // | +--------+ 1
3254 // | | locks | 0
3255 // | +--------+----> SharedInfo::stack0, even aligned
3256 // | | pad1 | 11 pad to align new SP
3257 // | +--------+
3258 // | | | 10
3259 // | | spills | 9 spills
3260 // V | | 8 (pad0 slot for callee)
3261 // -----------+--------+----> Matcher::_out_arg_limit, unaligned
3262 // ^ | out | 7
3263 // | | args | 6 Holes in outgoing args owned by CALLEE
3264 // Owned by new | |
3265 // Callee SP-+--------+----> Matcher::_new_SP, even aligned
3266 // | |
3267 //
3268 // Note 1: Only region 8-11 is determined by the allocator. Region 0-5 is
3269 // known from SELF's arguments and the Java calling convention.
3270 // Region 6-7 is determined per call site.
3271 // Note 2: If the calling convention leaves holes in the incoming argument
3272 // area, those holes are owned by SELF. Holes in the outgoing area
3273 // are owned by the CALLEE. Holes should not be nessecary in the
3274 // incoming area, as the Java calling convention is completely under
3275 // the control of the AD file. Doubles can be sorted and packed to
3276 // avoid holes. Holes in the outgoing arguments may be nessecary for
3277 // varargs C calling conventions.
3278 // Note 3: Region 0-3 is even aligned, with pad2 as needed. Region 3-5 is
3279 // even aligned with pad0 as needed.
3280 // Region 6 is even aligned. Region 6-7 is NOT even aligned;
3281 // region 6-11 is even aligned; it may be padded out more so that
3282 // the region from SP to FP meets the minimum stack alignment.
3283 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
3284 // alignment. Region 11, pad1, may be dynamically extended so that
3285 // SP meets the minimum alignment.
3288 frame %{
3290 stack_direction(TOWARDS_LOW);
3292 // These two registers define part of the calling convention
3293 // between compiled code and the interpreter.
3294 // SEE StartI2CNode::calling_convention & StartC2INode::calling_convention & StartOSRNode::calling_convention
3295 // for more information. by yjl 3/16/2006
3297 inline_cache_reg(T1); // Inline Cache Register
3298 interpreter_method_oop_reg(S3); // Method Oop Register when calling interpreter
3299 /*
3300 inline_cache_reg(T1); // Inline Cache Register or methodOop for I2C
3301 interpreter_arg_ptr_reg(A0); // Argument pointer for I2C adapters
3302 */
3304 // Optional: name the operand used by cisc-spilling to access [stack_pointer + offset]
3305 cisc_spilling_operand_name(indOffset32);
3307 // Number of stack slots consumed by locking an object
3308 // generate Compile::sync_stack_slots
3309 #ifdef _LP64
3310 sync_stack_slots(2);
3311 #else
3312 sync_stack_slots(1);
3313 #endif
3315 frame_pointer(SP);
3317 // Interpreter stores its frame pointer in a register which is
3318 // stored to the stack by I2CAdaptors.
3319 // I2CAdaptors convert from interpreted java to compiled java.
3321 interpreter_frame_pointer(FP);
3323 // generate Matcher::stack_alignment
3324 stack_alignment(StackAlignmentInBytes); //wordSize = sizeof(char*);
3326 // Number of stack slots between incoming argument block and the start of
3327 // a new frame. The PROLOG must add this many slots to the stack. The
3328 // EPILOG must remove this many slots. Intel needs one slot for
3329 // return address.
3330 // generate Matcher::in_preserve_stack_slots
3331 //in_preserve_stack_slots(VerifyStackAtCalls + 2); //Now VerifyStackAtCalls is defined as false ! Leave one stack slot for ra and fp
3332 in_preserve_stack_slots(4); //Now VerifyStackAtCalls is defined as false ! Leave two stack slots for ra and fp
3334 // Number of outgoing stack slots killed above the out_preserve_stack_slots
3335 // for calls to C. Supports the var-args backing area for register parms.
3336 varargs_C_out_slots_killed(0);
3338 // The after-PROLOG location of the return address. Location of
3339 // return address specifies a type (REG or STACK) and a number
3340 // representing the register number (i.e. - use a register name) or
3341 // stack slot.
3342 // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
3343 // Otherwise, it is above the locks and verification slot and alignment word
3344 //return_addr(STACK -1+ round_to(1+VerifyStackAtCalls+Compile::current()->sync()*Compile::current()->sync_stack_slots(),WordsPerLong));
3345 return_addr(REG RA);
3347 // Body of function which returns an integer array locating
3348 // arguments either in registers or in stack slots. Passed an array
3349 // of ideal registers called "sig" and a "length" count. Stack-slot
3350 // offsets are based on outgoing arguments, i.e. a CALLER setting up
3351 // arguments for a CALLEE. Incoming stack arguments are
3352 // automatically biased by the preserve_stack_slots field above.
3355 // will generated to Matcher::calling_convention(OptoRegPair *sig, uint length, bool is_outgoing)
3356 // StartNode::calling_convention call this. by yjl 3/16/2006
3357 calling_convention %{
3358 SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
3359 %}
3364 // Body of function which returns an integer array locating
3365 // arguments either in registers or in stack slots. Passed an array
3366 // of ideal registers called "sig" and a "length" count. Stack-slot
3367 // offsets are based on outgoing arguments, i.e. a CALLER setting up
3368 // arguments for a CALLEE. Incoming stack arguments are
3369 // automatically biased by the preserve_stack_slots field above.
3372 // SEE CallRuntimeNode::calling_convention for more information. by yjl 3/16/2006
3373 c_calling_convention %{
3374 (void) SharedRuntime::c_calling_convention(sig_bt, regs, /*regs2=*/NULL, length);
3375 %}
3378 // Location of C & interpreter return values
3379 // register(s) contain(s) return value for Op_StartI2C and Op_StartOSR.
3380 // SEE Matcher::match. by yjl 3/16/2006
3381 c_return_value %{
3382 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3383 /* -- , -- , Op_RegN, Op_RegI, Op_RegP, Op_RegF, Op_RegD, Op_RegL */
3384 static int lo[Op_RegL+1] = { 0, 0, V0_num, V0_num, V0_num, F0_num, F0_num, V0_num };
3385 static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, V0_H_num, OptoReg::Bad, F0_H_num, V0_H_num };
3386 return OptoRegPair(hi[ideal_reg],lo[ideal_reg]);
3387 %}
3389 // Location of return values
3390 // register(s) contain(s) return value for Op_StartC2I and Op_Start.
3391 // SEE Matcher::match. by yjl 3/16/2006
3393 return_value %{
3394 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3395 /* -- , -- , Op_RegN, Op_RegI, Op_RegP, Op_RegF, Op_RegD, Op_RegL */
3396 static int lo[Op_RegL+1] = { 0, 0, V0_num, V0_num, V0_num, F0_num, F0_num, V0_num };
3397 static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, V0_H_num, OptoReg::Bad, F0_H_num, V0_H_num};
3398 return OptoRegPair(hi[ideal_reg],lo[ideal_reg]);
3399 %}
3401 %}
3403 //----------ATTRIBUTES---------------------------------------------------------
3404 //----------Operand Attributes-------------------------------------------------
3405 op_attrib op_cost(0); // Required cost attribute
3407 //----------Instruction Attributes---------------------------------------------
3408 ins_attrib ins_cost(100); // Required cost attribute
3409 ins_attrib ins_size(32); // Required size attribute (in bits)
3410 ins_attrib ins_pc_relative(0); // Required PC Relative flag
3411 ins_attrib ins_short_branch(0); // Required flag: is this instruction a
3412 // non-matching short branch variant of some
3413 // long branch?
3414 ins_attrib ins_alignment(4); // Required alignment attribute (must be a power of 2)
3415 // specifies the alignment that some part of the instruction (not
3416 // necessarily the start) requires. If > 1, a compute_padding()
3417 // function must be provided for the instruction
3419 //----------OPERANDS-----------------------------------------------------------
3420 // Operand definitions must precede instruction definitions for correct parsing
3421 // in the ADLC because operands constitute user defined types which are used in
3422 // instruction definitions.
3424 // Vectors
3425 operand vecD() %{
3426 constraint(ALLOC_IN_RC(dbl_reg));
3427 match(VecD);
3429 format %{ %}
3430 interface(REG_INTER);
3431 %}
3433 // Flags register, used as output of compare instructions
3434 operand FlagsReg() %{
3435 constraint(ALLOC_IN_RC(mips_flags));
3436 match(RegFlags);
3438 format %{ "EFLAGS" %}
3439 interface(REG_INTER);
3440 %}
3442 //----------Simple Operands----------------------------------------------------
3443 //TODO: Should we need to define some more special immediate number ?
3444 // Immediate Operands
3445 // Integer Immediate
3446 operand immI() %{
3447 match(ConI);
3448 //TODO: should not match immI8 here LEE
3449 match(immI8);
3451 op_cost(20);
3452 format %{ %}
3453 interface(CONST_INTER);
3454 %}
3456 // Long Immediate 8-bit
3457 operand immL8()
3458 %{
3459 predicate(-0x80L <= n->get_long() && n->get_long() < 0x80L);
3460 match(ConL);
3462 op_cost(5);
3463 format %{ %}
3464 interface(CONST_INTER);
3465 %}
3467 // Constant for test vs zero
3468 operand immI0() %{
3469 predicate(n->get_int() == 0);
3470 match(ConI);
3472 op_cost(0);
3473 format %{ %}
3474 interface(CONST_INTER);
3475 %}
3477 // Constant for increment
3478 operand immI1() %{
3479 predicate(n->get_int() == 1);
3480 match(ConI);
3482 op_cost(0);
3483 format %{ %}
3484 interface(CONST_INTER);
3485 %}
3487 // Constant for decrement
3488 operand immI_M1() %{
3489 predicate(n->get_int() == -1);
3490 match(ConI);
3492 op_cost(0);
3493 format %{ %}
3494 interface(CONST_INTER);
3495 %}
3497 operand immI_MaxI() %{
3498 predicate(n->get_int() == 2147483647);
3499 match(ConI);
3501 op_cost(0);
3502 format %{ %}
3503 interface(CONST_INTER);
3504 %}
3506 // Valid scale values for addressing modes
3507 operand immI2() %{
3508 predicate(0 <= n->get_int() && (n->get_int() <= 3));
3509 match(ConI);
3511 format %{ %}
3512 interface(CONST_INTER);
3513 %}
3515 operand immI8() %{
3516 predicate((-128 <= n->get_int()) && (n->get_int() <= 127));
3517 match(ConI);
3519 op_cost(5);
3520 format %{ %}
3521 interface(CONST_INTER);
3522 %}
3524 operand immI16() %{
3525 predicate((-32768 <= n->get_int()) && (n->get_int() <= 32767));
3526 match(ConI);
3528 op_cost(10);
3529 format %{ %}
3530 interface(CONST_INTER);
3531 %}
3533 // Constant for long shifts
3534 operand immI_32() %{
3535 predicate( n->get_int() == 32 );
3536 match(ConI);
3538 op_cost(0);
3539 format %{ %}
3540 interface(CONST_INTER);
3541 %}
3543 operand immI_63() %{
3544 predicate( n->get_int() == 63 );
3545 match(ConI);
3547 op_cost(0);
3548 format %{ %}
3549 interface(CONST_INTER);
3550 %}
3552 operand immI_0_31() %{
3553 predicate( n->get_int() >= 0 && n->get_int() <= 31 );
3554 match(ConI);
3556 op_cost(0);
3557 format %{ %}
3558 interface(CONST_INTER);
3559 %}
3561 // Operand for non-negtive integer mask
3562 operand immI_nonneg_mask() %{
3563 predicate( (n->get_int() >= 0) && (Assembler::is_int_mask(n->get_int()) != -1) );
3564 match(ConI);
3566 op_cost(0);
3567 format %{ %}
3568 interface(CONST_INTER);
3569 %}
3571 operand immI_32_63() %{
3572 predicate( n->get_int() >= 32 && n->get_int() <= 63 );
3573 match(ConI);
3574 op_cost(0);
3576 format %{ %}
3577 interface(CONST_INTER);
3578 %}
3580 operand immI16_sub() %{
3581 predicate((-32767 <= n->get_int()) && (n->get_int() <= 32768));
3582 match(ConI);
3584 op_cost(10);
3585 format %{ %}
3586 interface(CONST_INTER);
3587 %}
3589 operand immI_0_32767() %{
3590 predicate( n->get_int() >= 0 && n->get_int() <= 32767 );
3591 match(ConI);
3592 op_cost(0);
3594 format %{ %}
3595 interface(CONST_INTER);
3596 %}
3598 operand immI_0_65535() %{
3599 predicate( n->get_int() >= 0 && n->get_int() <= 65535 );
3600 match(ConI);
3601 op_cost(0);
3603 format %{ %}
3604 interface(CONST_INTER);
3605 %}
3607 operand immI_1() %{
3608 predicate( n->get_int() == 1 );
3609 match(ConI);
3611 op_cost(0);
3612 format %{ %}
3613 interface(CONST_INTER);
3614 %}
3616 operand immI_2() %{
3617 predicate( n->get_int() == 2 );
3618 match(ConI);
3620 op_cost(0);
3621 format %{ %}
3622 interface(CONST_INTER);
3623 %}
3625 operand immI_3() %{
3626 predicate( n->get_int() == 3 );
3627 match(ConI);
3629 op_cost(0);
3630 format %{ %}
3631 interface(CONST_INTER);
3632 %}
3634 operand immI_7() %{
3635 predicate( n->get_int() == 7 );
3636 match(ConI);
3638 format %{ %}
3639 interface(CONST_INTER);
3640 %}
3642 // Immediates for special shifts (sign extend)
3644 // Constants for increment
3645 operand immI_16() %{
3646 predicate( n->get_int() == 16 );
3647 match(ConI);
3649 format %{ %}
3650 interface(CONST_INTER);
3651 %}
3653 operand immI_24() %{
3654 predicate( n->get_int() == 24 );
3655 match(ConI);
3657 format %{ %}
3658 interface(CONST_INTER);
3659 %}
3661 // Constant for byte-wide masking
3662 operand immI_255() %{
3663 predicate( n->get_int() == 255 );
3664 match(ConI);
3666 op_cost(0);
3667 format %{ %}
3668 interface(CONST_INTER);
3669 %}
3671 operand immI_65535() %{
3672 predicate( n->get_int() == 65535 );
3673 match(ConI);
3675 op_cost(5);
3676 format %{ %}
3677 interface(CONST_INTER);
3678 %}
3680 operand immI_65536() %{
3681 predicate( n->get_int() == 65536 );
3682 match(ConI);
3684 op_cost(5);
3685 format %{ %}
3686 interface(CONST_INTER);
3687 %}
3689 operand immI_M65536() %{
3690 predicate( n->get_int() == -65536 );
3691 match(ConI);
3693 op_cost(5);
3694 format %{ %}
3695 interface(CONST_INTER);
3696 %}
3698 // Pointer Immediate
3699 operand immP() %{
3700 match(ConP);
3702 op_cost(10);
3703 format %{ %}
3704 interface(CONST_INTER);
3705 %}
3707 // NULL Pointer Immediate
3708 operand immP0() %{
3709 predicate( n->get_ptr() == 0 );
3710 match(ConP);
3711 op_cost(0);
3713 format %{ %}
3714 interface(CONST_INTER);
3715 %}
3717 // Pointer Immediate: 64-bit
3718 operand immP_set() %{
3719 match(ConP);
3721 op_cost(5);
3722 // formats are generated automatically for constants and base registers
3723 format %{ %}
3724 interface(CONST_INTER);
3725 %}
3727 // Pointer Immediate: 64-bit
3728 operand immP_load() %{
3729 predicate(n->bottom_type()->isa_oop_ptr() || (MacroAssembler::insts_for_set64(n->get_ptr()) > 3));
3730 match(ConP);
3732 op_cost(5);
3733 // formats are generated automatically for constants and base registers
3734 format %{ %}
3735 interface(CONST_INTER);
3736 %}
3738 // Pointer Immediate: 64-bit
3739 operand immP_no_oop_cheap() %{
3740 predicate(!n->bottom_type()->isa_oop_ptr() && (MacroAssembler::insts_for_set64(n->get_ptr()) <= 3));
3741 match(ConP);
3743 op_cost(5);
3744 // formats are generated automatically for constants and base registers
3745 format %{ %}
3746 interface(CONST_INTER);
3747 %}
3749 // Pointer for polling page
3750 operand immP_poll() %{
3751 predicate(n->get_ptr() != 0 && n->get_ptr() == (intptr_t)os::get_polling_page());
3752 match(ConP);
3753 op_cost(5);
3755 format %{ %}
3756 interface(CONST_INTER);
3757 %}
3759 // Pointer Immediate
3760 operand immN() %{
3761 match(ConN);
3763 op_cost(10);
3764 format %{ %}
3765 interface(CONST_INTER);
3766 %}
3768 operand immNKlass() %{
3769 match(ConNKlass);
3771 op_cost(10);
3772 format %{ %}
3773 interface(CONST_INTER);
3774 %}
3776 // NULL Pointer Immediate
3777 operand immN0() %{
3778 predicate(n->get_narrowcon() == 0);
3779 match(ConN);
3781 op_cost(5);
3782 format %{ %}
3783 interface(CONST_INTER);
3784 %}
3786 // Long Immediate
3787 operand immL() %{
3788 match(ConL);
3790 op_cost(20);
3791 format %{ %}
3792 interface(CONST_INTER);
3793 %}
3795 // Long Immediate zero
3796 operand immL0() %{
3797 predicate( n->get_long() == 0L );
3798 match(ConL);
3799 op_cost(0);
3801 format %{ %}
3802 interface(CONST_INTER);
3803 %}
3805 operand immL7() %{
3806 predicate( n->get_long() == 7L );
3807 match(ConL);
3808 op_cost(0);
3810 format %{ %}
3811 interface(CONST_INTER);
3812 %}
3814 operand immL_M1() %{
3815 predicate( n->get_long() == -1L );
3816 match(ConL);
3817 op_cost(0);
3819 format %{ %}
3820 interface(CONST_INTER);
3821 %}
3823 // bit 0..2 zero
3824 operand immL_M8() %{
3825 predicate( n->get_long() == -8L );
3826 match(ConL);
3827 op_cost(0);
3829 format %{ %}
3830 interface(CONST_INTER);
3831 %}
3833 // bit 2 zero
3834 operand immL_M5() %{
3835 predicate( n->get_long() == -5L );
3836 match(ConL);
3837 op_cost(0);
3839 format %{ %}
3840 interface(CONST_INTER);
3841 %}
3843 // bit 1..2 zero
3844 operand immL_M7() %{
3845 predicate( n->get_long() == -7L );
3846 match(ConL);
3847 op_cost(0);
3849 format %{ %}
3850 interface(CONST_INTER);
3851 %}
3853 // bit 0..1 zero
3854 operand immL_M4() %{
3855 predicate( n->get_long() == -4L );
3856 match(ConL);
3857 op_cost(0);
3859 format %{ %}
3860 interface(CONST_INTER);
3861 %}
3863 // bit 3..6 zero
3864 operand immL_M121() %{
3865 predicate( n->get_long() == -121L );
3866 match(ConL);
3867 op_cost(0);
3869 format %{ %}
3870 interface(CONST_INTER);
3871 %}
3873 // Long immediate from 0 to 127.
3874 // Used for a shorter form of long mul by 10.
3875 operand immL_127() %{
3876 predicate((0 <= n->get_long()) && (n->get_long() <= 127));
3877 match(ConL);
3878 op_cost(0);
3880 format %{ %}
3881 interface(CONST_INTER);
3882 %}
3884 // Operand for non-negtive long mask
3885 operand immL_nonneg_mask() %{
3886 predicate( (n->get_long() >= 0) && (Assembler::is_jlong_mask(n->get_long()) != -1) );
3887 match(ConL);
3889 op_cost(0);
3890 format %{ %}
3891 interface(CONST_INTER);
3892 %}
3894 operand immL_0_65535() %{
3895 predicate( n->get_long() >= 0 && n->get_long() <= 65535 );
3896 match(ConL);
3897 op_cost(0);
3899 format %{ %}
3900 interface(CONST_INTER);
3901 %}
3903 // Long Immediate: cheap (materialize in <= 3 instructions)
3904 operand immL_cheap() %{
3905 predicate(MacroAssembler::insts_for_set64(n->get_long()) <= 3);
3906 match(ConL);
3907 op_cost(0);
3909 format %{ %}
3910 interface(CONST_INTER);
3911 %}
3913 // Long Immediate: expensive (materialize in > 3 instructions)
3914 operand immL_expensive() %{
3915 predicate(MacroAssembler::insts_for_set64(n->get_long()) > 3);
3916 match(ConL);
3917 op_cost(0);
3919 format %{ %}
3920 interface(CONST_INTER);
3921 %}
3923 operand immL16() %{
3924 predicate((-32768 <= n->get_long()) && (n->get_long() <= 32767));
3925 match(ConL);
3927 op_cost(10);
3928 format %{ %}
3929 interface(CONST_INTER);
3930 %}
3932 operand immL16_sub() %{
3933 predicate((-32767 <= n->get_long()) && (n->get_long() <= 32768));
3934 match(ConL);
3936 op_cost(10);
3937 format %{ %}
3938 interface(CONST_INTER);
3939 %}
3941 // Long Immediate: low 32-bit mask
3942 operand immL_32bits() %{
3943 predicate(n->get_long() == 0xFFFFFFFFL);
3944 match(ConL);
3945 op_cost(20);
3947 format %{ %}
3948 interface(CONST_INTER);
3949 %}
3951 // Long Immediate 32-bit signed
3952 operand immL32()
3953 %{
3954 predicate(n->get_long() == (int) (n->get_long()));
3955 match(ConL);
3957 op_cost(15);
3958 format %{ %}
3959 interface(CONST_INTER);
3960 %}
3963 //single-precision floating-point zero
3964 operand immF0() %{
3965 predicate(jint_cast(n->getf()) == 0);
3966 match(ConF);
3968 op_cost(5);
3969 format %{ %}
3970 interface(CONST_INTER);
3971 %}
3973 //single-precision floating-point immediate
3974 operand immF() %{
3975 match(ConF);
3977 op_cost(20);
3978 format %{ %}
3979 interface(CONST_INTER);
3980 %}
3982 //double-precision floating-point zero
3983 operand immD0() %{
3984 predicate(jlong_cast(n->getd()) == 0);
3985 match(ConD);
3987 op_cost(5);
3988 format %{ %}
3989 interface(CONST_INTER);
3990 %}
3992 //double-precision floating-point immediate
3993 operand immD() %{
3994 match(ConD);
3996 op_cost(20);
3997 format %{ %}
3998 interface(CONST_INTER);
3999 %}
4001 // Register Operands
4002 // Integer Register
4003 operand mRegI() %{
4004 constraint(ALLOC_IN_RC(int_reg));
4005 match(RegI);
4007 format %{ %}
4008 interface(REG_INTER);
4009 %}
4011 operand no_Ax_mRegI() %{
4012 constraint(ALLOC_IN_RC(no_Ax_int_reg));
4013 match(RegI);
4014 match(mRegI);
4016 format %{ %}
4017 interface(REG_INTER);
4018 %}
4020 operand mS0RegI() %{
4021 constraint(ALLOC_IN_RC(s0_reg));
4022 match(RegI);
4023 match(mRegI);
4025 format %{ "S0" %}
4026 interface(REG_INTER);
4027 %}
4029 operand mS1RegI() %{
4030 constraint(ALLOC_IN_RC(s1_reg));
4031 match(RegI);
4032 match(mRegI);
4034 format %{ "S1" %}
4035 interface(REG_INTER);
4036 %}
4038 operand mS2RegI() %{
4039 constraint(ALLOC_IN_RC(s2_reg));
4040 match(RegI);
4041 match(mRegI);
4043 format %{ "S2" %}
4044 interface(REG_INTER);
4045 %}
4047 operand mS3RegI() %{
4048 constraint(ALLOC_IN_RC(s3_reg));
4049 match(RegI);
4050 match(mRegI);
4052 format %{ "S3" %}
4053 interface(REG_INTER);
4054 %}
4056 operand mS4RegI() %{
4057 constraint(ALLOC_IN_RC(s4_reg));
4058 match(RegI);
4059 match(mRegI);
4061 format %{ "S4" %}
4062 interface(REG_INTER);
4063 %}
4065 operand mS5RegI() %{
4066 constraint(ALLOC_IN_RC(s5_reg));
4067 match(RegI);
4068 match(mRegI);
4070 format %{ "S5" %}
4071 interface(REG_INTER);
4072 %}
4074 operand mS6RegI() %{
4075 constraint(ALLOC_IN_RC(s6_reg));
4076 match(RegI);
4077 match(mRegI);
4079 format %{ "S6" %}
4080 interface(REG_INTER);
4081 %}
4083 operand mS7RegI() %{
4084 constraint(ALLOC_IN_RC(s7_reg));
4085 match(RegI);
4086 match(mRegI);
4088 format %{ "S7" %}
4089 interface(REG_INTER);
4090 %}
4093 operand mT0RegI() %{
4094 constraint(ALLOC_IN_RC(t0_reg));
4095 match(RegI);
4096 match(mRegI);
4098 format %{ "T0" %}
4099 interface(REG_INTER);
4100 %}
4102 operand mT1RegI() %{
4103 constraint(ALLOC_IN_RC(t1_reg));
4104 match(RegI);
4105 match(mRegI);
4107 format %{ "T1" %}
4108 interface(REG_INTER);
4109 %}
4111 operand mT2RegI() %{
4112 constraint(ALLOC_IN_RC(t2_reg));
4113 match(RegI);
4114 match(mRegI);
4116 format %{ "T2" %}
4117 interface(REG_INTER);
4118 %}
4120 operand mT3RegI() %{
4121 constraint(ALLOC_IN_RC(t3_reg));
4122 match(RegI);
4123 match(mRegI);
4125 format %{ "T3" %}
4126 interface(REG_INTER);
4127 %}
4129 operand mT8RegI() %{
4130 constraint(ALLOC_IN_RC(t8_reg));
4131 match(RegI);
4132 match(mRegI);
4134 format %{ "T8" %}
4135 interface(REG_INTER);
4136 %}
4138 operand mT9RegI() %{
4139 constraint(ALLOC_IN_RC(t9_reg));
4140 match(RegI);
4141 match(mRegI);
4143 format %{ "T9" %}
4144 interface(REG_INTER);
4145 %}
4147 operand mA0RegI() %{
4148 constraint(ALLOC_IN_RC(a0_reg));
4149 match(RegI);
4150 match(mRegI);
4152 format %{ "A0" %}
4153 interface(REG_INTER);
4154 %}
4156 operand mA1RegI() %{
4157 constraint(ALLOC_IN_RC(a1_reg));
4158 match(RegI);
4159 match(mRegI);
4161 format %{ "A1" %}
4162 interface(REG_INTER);
4163 %}
4165 operand mA2RegI() %{
4166 constraint(ALLOC_IN_RC(a2_reg));
4167 match(RegI);
4168 match(mRegI);
4170 format %{ "A2" %}
4171 interface(REG_INTER);
4172 %}
4174 operand mA3RegI() %{
4175 constraint(ALLOC_IN_RC(a3_reg));
4176 match(RegI);
4177 match(mRegI);
4179 format %{ "A3" %}
4180 interface(REG_INTER);
4181 %}
4183 operand mA4RegI() %{
4184 constraint(ALLOC_IN_RC(a4_reg));
4185 match(RegI);
4186 match(mRegI);
4188 format %{ "A4" %}
4189 interface(REG_INTER);
4190 %}
4192 operand mA5RegI() %{
4193 constraint(ALLOC_IN_RC(a5_reg));
4194 match(RegI);
4195 match(mRegI);
4197 format %{ "A5" %}
4198 interface(REG_INTER);
4199 %}
4201 operand mA6RegI() %{
4202 constraint(ALLOC_IN_RC(a6_reg));
4203 match(RegI);
4204 match(mRegI);
4206 format %{ "A6" %}
4207 interface(REG_INTER);
4208 %}
4210 operand mA7RegI() %{
4211 constraint(ALLOC_IN_RC(a7_reg));
4212 match(RegI);
4213 match(mRegI);
4215 format %{ "A7" %}
4216 interface(REG_INTER);
4217 %}
4219 operand mV0RegI() %{
4220 constraint(ALLOC_IN_RC(v0_reg));
4221 match(RegI);
4222 match(mRegI);
4224 format %{ "V0" %}
4225 interface(REG_INTER);
4226 %}
4228 operand mV1RegI() %{
4229 constraint(ALLOC_IN_RC(v1_reg));
4230 match(RegI);
4231 match(mRegI);
4233 format %{ "V1" %}
4234 interface(REG_INTER);
4235 %}
4237 operand mRegN() %{
4238 constraint(ALLOC_IN_RC(int_reg));
4239 match(RegN);
4241 format %{ %}
4242 interface(REG_INTER);
4243 %}
4245 operand t0_RegN() %{
4246 constraint(ALLOC_IN_RC(t0_reg));
4247 match(RegN);
4248 match(mRegN);
4250 format %{ %}
4251 interface(REG_INTER);
4252 %}
4254 operand t1_RegN() %{
4255 constraint(ALLOC_IN_RC(t1_reg));
4256 match(RegN);
4257 match(mRegN);
4259 format %{ %}
4260 interface(REG_INTER);
4261 %}
4263 operand t2_RegN() %{
4264 constraint(ALLOC_IN_RC(t2_reg));
4265 match(RegN);
4266 match(mRegN);
4268 format %{ %}
4269 interface(REG_INTER);
4270 %}
4272 operand t3_RegN() %{
4273 constraint(ALLOC_IN_RC(t3_reg));
4274 match(RegN);
4275 match(mRegN);
4277 format %{ %}
4278 interface(REG_INTER);
4279 %}
4281 operand t8_RegN() %{
4282 constraint(ALLOC_IN_RC(t8_reg));
4283 match(RegN);
4284 match(mRegN);
4286 format %{ %}
4287 interface(REG_INTER);
4288 %}
4290 operand t9_RegN() %{
4291 constraint(ALLOC_IN_RC(t9_reg));
4292 match(RegN);
4293 match(mRegN);
4295 format %{ %}
4296 interface(REG_INTER);
4297 %}
4299 operand a0_RegN() %{
4300 constraint(ALLOC_IN_RC(a0_reg));
4301 match(RegN);
4302 match(mRegN);
4304 format %{ %}
4305 interface(REG_INTER);
4306 %}
4308 operand a1_RegN() %{
4309 constraint(ALLOC_IN_RC(a1_reg));
4310 match(RegN);
4311 match(mRegN);
4313 format %{ %}
4314 interface(REG_INTER);
4315 %}
4317 operand a2_RegN() %{
4318 constraint(ALLOC_IN_RC(a2_reg));
4319 match(RegN);
4320 match(mRegN);
4322 format %{ %}
4323 interface(REG_INTER);
4324 %}
4326 operand a3_RegN() %{
4327 constraint(ALLOC_IN_RC(a3_reg));
4328 match(RegN);
4329 match(mRegN);
4331 format %{ %}
4332 interface(REG_INTER);
4333 %}
4335 operand a4_RegN() %{
4336 constraint(ALLOC_IN_RC(a4_reg));
4337 match(RegN);
4338 match(mRegN);
4340 format %{ %}
4341 interface(REG_INTER);
4342 %}
4344 operand a5_RegN() %{
4345 constraint(ALLOC_IN_RC(a5_reg));
4346 match(RegN);
4347 match(mRegN);
4349 format %{ %}
4350 interface(REG_INTER);
4351 %}
4353 operand a6_RegN() %{
4354 constraint(ALLOC_IN_RC(a6_reg));
4355 match(RegN);
4356 match(mRegN);
4358 format %{ %}
4359 interface(REG_INTER);
4360 %}
4362 operand a7_RegN() %{
4363 constraint(ALLOC_IN_RC(a7_reg));
4364 match(RegN);
4365 match(mRegN);
4367 format %{ %}
4368 interface(REG_INTER);
4369 %}
4371 operand s0_RegN() %{
4372 constraint(ALLOC_IN_RC(s0_reg));
4373 match(RegN);
4374 match(mRegN);
4376 format %{ %}
4377 interface(REG_INTER);
4378 %}
4380 operand s1_RegN() %{
4381 constraint(ALLOC_IN_RC(s1_reg));
4382 match(RegN);
4383 match(mRegN);
4385 format %{ %}
4386 interface(REG_INTER);
4387 %}
4389 operand s2_RegN() %{
4390 constraint(ALLOC_IN_RC(s2_reg));
4391 match(RegN);
4392 match(mRegN);
4394 format %{ %}
4395 interface(REG_INTER);
4396 %}
4398 operand s3_RegN() %{
4399 constraint(ALLOC_IN_RC(s3_reg));
4400 match(RegN);
4401 match(mRegN);
4403 format %{ %}
4404 interface(REG_INTER);
4405 %}
4407 operand s4_RegN() %{
4408 constraint(ALLOC_IN_RC(s4_reg));
4409 match(RegN);
4410 match(mRegN);
4412 format %{ %}
4413 interface(REG_INTER);
4414 %}
4416 operand s5_RegN() %{
4417 constraint(ALLOC_IN_RC(s5_reg));
4418 match(RegN);
4419 match(mRegN);
4421 format %{ %}
4422 interface(REG_INTER);
4423 %}
4425 operand s6_RegN() %{
4426 constraint(ALLOC_IN_RC(s6_reg));
4427 match(RegN);
4428 match(mRegN);
4430 format %{ %}
4431 interface(REG_INTER);
4432 %}
4434 operand s7_RegN() %{
4435 constraint(ALLOC_IN_RC(s7_reg));
4436 match(RegN);
4437 match(mRegN);
4439 format %{ %}
4440 interface(REG_INTER);
4441 %}
4443 operand v0_RegN() %{
4444 constraint(ALLOC_IN_RC(v0_reg));
4445 match(RegN);
4446 match(mRegN);
4448 format %{ %}
4449 interface(REG_INTER);
4450 %}
4452 operand v1_RegN() %{
4453 constraint(ALLOC_IN_RC(v1_reg));
4454 match(RegN);
4455 match(mRegN);
4457 format %{ %}
4458 interface(REG_INTER);
4459 %}
4461 // Pointer Register
4462 operand mRegP() %{
4463 constraint(ALLOC_IN_RC(p_reg));
4464 match(RegP);
4466 format %{ %}
4467 interface(REG_INTER);
4468 %}
4470 operand no_T8_mRegP() %{
4471 constraint(ALLOC_IN_RC(no_T8_p_reg));
4472 match(RegP);
4473 match(mRegP);
4475 format %{ %}
4476 interface(REG_INTER);
4477 %}
4479 operand s0_RegP()
4480 %{
4481 constraint(ALLOC_IN_RC(s0_long_reg));
4482 match(RegP);
4483 match(mRegP);
4484 match(no_T8_mRegP);
4486 format %{ %}
4487 interface(REG_INTER);
4488 %}
4490 operand s1_RegP()
4491 %{
4492 constraint(ALLOC_IN_RC(s1_long_reg));
4493 match(RegP);
4494 match(mRegP);
4495 match(no_T8_mRegP);
4497 format %{ %}
4498 interface(REG_INTER);
4499 %}
4501 operand s2_RegP()
4502 %{
4503 constraint(ALLOC_IN_RC(s2_long_reg));
4504 match(RegP);
4505 match(mRegP);
4506 match(no_T8_mRegP);
4508 format %{ %}
4509 interface(REG_INTER);
4510 %}
4512 operand s3_RegP()
4513 %{
4514 constraint(ALLOC_IN_RC(s3_long_reg));
4515 match(RegP);
4516 match(mRegP);
4517 match(no_T8_mRegP);
4519 format %{ %}
4520 interface(REG_INTER);
4521 %}
4523 operand s4_RegP()
4524 %{
4525 constraint(ALLOC_IN_RC(s4_long_reg));
4526 match(RegP);
4527 match(mRegP);
4528 match(no_T8_mRegP);
4530 format %{ %}
4531 interface(REG_INTER);
4532 %}
4534 operand s5_RegP()
4535 %{
4536 constraint(ALLOC_IN_RC(s5_long_reg));
4537 match(RegP);
4538 match(mRegP);
4539 match(no_T8_mRegP);
4541 format %{ %}
4542 interface(REG_INTER);
4543 %}
4545 operand s6_RegP()
4546 %{
4547 constraint(ALLOC_IN_RC(s6_long_reg));
4548 match(RegP);
4549 match(mRegP);
4550 match(no_T8_mRegP);
4552 format %{ %}
4553 interface(REG_INTER);
4554 %}
4556 operand s7_RegP()
4557 %{
4558 constraint(ALLOC_IN_RC(s7_long_reg));
4559 match(RegP);
4560 match(mRegP);
4561 match(no_T8_mRegP);
4563 format %{ %}
4564 interface(REG_INTER);
4565 %}
4567 operand t0_RegP()
4568 %{
4569 constraint(ALLOC_IN_RC(t0_long_reg));
4570 match(RegP);
4571 match(mRegP);
4572 match(no_T8_mRegP);
4574 format %{ %}
4575 interface(REG_INTER);
4576 %}
4578 operand t1_RegP()
4579 %{
4580 constraint(ALLOC_IN_RC(t1_long_reg));
4581 match(RegP);
4582 match(mRegP);
4583 match(no_T8_mRegP);
4585 format %{ %}
4586 interface(REG_INTER);
4587 %}
4589 operand t2_RegP()
4590 %{
4591 constraint(ALLOC_IN_RC(t2_long_reg));
4592 match(RegP);
4593 match(mRegP);
4594 match(no_T8_mRegP);
4596 format %{ %}
4597 interface(REG_INTER);
4598 %}
4600 operand t3_RegP()
4601 %{
4602 constraint(ALLOC_IN_RC(t3_long_reg));
4603 match(RegP);
4604 match(mRegP);
4605 match(no_T8_mRegP);
4607 format %{ %}
4608 interface(REG_INTER);
4609 %}
4611 operand t8_RegP()
4612 %{
4613 constraint(ALLOC_IN_RC(t8_long_reg));
4614 match(RegP);
4615 match(mRegP);
4617 format %{ %}
4618 interface(REG_INTER);
4619 %}
4621 operand t9_RegP()
4622 %{
4623 constraint(ALLOC_IN_RC(t9_long_reg));
4624 match(RegP);
4625 match(mRegP);
4626 match(no_T8_mRegP);
4628 format %{ %}
4629 interface(REG_INTER);
4630 %}
4632 operand a0_RegP()
4633 %{
4634 constraint(ALLOC_IN_RC(a0_long_reg));
4635 match(RegP);
4636 match(mRegP);
4637 match(no_T8_mRegP);
4639 format %{ %}
4640 interface(REG_INTER);
4641 %}
4643 operand a1_RegP()
4644 %{
4645 constraint(ALLOC_IN_RC(a1_long_reg));
4646 match(RegP);
4647 match(mRegP);
4648 match(no_T8_mRegP);
4650 format %{ %}
4651 interface(REG_INTER);
4652 %}
4654 operand a2_RegP()
4655 %{
4656 constraint(ALLOC_IN_RC(a2_long_reg));
4657 match(RegP);
4658 match(mRegP);
4659 match(no_T8_mRegP);
4661 format %{ %}
4662 interface(REG_INTER);
4663 %}
4665 operand a3_RegP()
4666 %{
4667 constraint(ALLOC_IN_RC(a3_long_reg));
4668 match(RegP);
4669 match(mRegP);
4670 match(no_T8_mRegP);
4672 format %{ %}
4673 interface(REG_INTER);
4674 %}
4676 operand a4_RegP()
4677 %{
4678 constraint(ALLOC_IN_RC(a4_long_reg));
4679 match(RegP);
4680 match(mRegP);
4681 match(no_T8_mRegP);
4683 format %{ %}
4684 interface(REG_INTER);
4685 %}
4688 operand a5_RegP()
4689 %{
4690 constraint(ALLOC_IN_RC(a5_long_reg));
4691 match(RegP);
4692 match(mRegP);
4693 match(no_T8_mRegP);
4695 format %{ %}
4696 interface(REG_INTER);
4697 %}
4699 operand a6_RegP()
4700 %{
4701 constraint(ALLOC_IN_RC(a6_long_reg));
4702 match(RegP);
4703 match(mRegP);
4704 match(no_T8_mRegP);
4706 format %{ %}
4707 interface(REG_INTER);
4708 %}
4710 operand a7_RegP()
4711 %{
4712 constraint(ALLOC_IN_RC(a7_long_reg));
4713 match(RegP);
4714 match(mRegP);
4715 match(no_T8_mRegP);
4717 format %{ %}
4718 interface(REG_INTER);
4719 %}
4721 operand v0_RegP()
4722 %{
4723 constraint(ALLOC_IN_RC(v0_long_reg));
4724 match(RegP);
4725 match(mRegP);
4726 match(no_T8_mRegP);
4728 format %{ %}
4729 interface(REG_INTER);
4730 %}
4732 operand v1_RegP()
4733 %{
4734 constraint(ALLOC_IN_RC(v1_long_reg));
4735 match(RegP);
4736 match(mRegP);
4737 match(no_T8_mRegP);
4739 format %{ %}
4740 interface(REG_INTER);
4741 %}
4743 /*
4744 operand mSPRegP(mRegP reg) %{
4745 constraint(ALLOC_IN_RC(sp_reg));
4746 match(reg);
4748 format %{ "SP" %}
4749 interface(REG_INTER);
4750 %}
4752 operand mFPRegP(mRegP reg) %{
4753 constraint(ALLOC_IN_RC(fp_reg));
4754 match(reg);
4756 format %{ "FP" %}
4757 interface(REG_INTER);
4758 %}
4759 */
4761 operand mRegL() %{
4762 constraint(ALLOC_IN_RC(long_reg));
4763 match(RegL);
4765 format %{ %}
4766 interface(REG_INTER);
4767 %}
4769 operand v0RegL() %{
4770 constraint(ALLOC_IN_RC(v0_long_reg));
4771 match(RegL);
4772 match(mRegL);
4774 format %{ %}
4775 interface(REG_INTER);
4776 %}
4778 operand v1RegL() %{
4779 constraint(ALLOC_IN_RC(v1_long_reg));
4780 match(RegL);
4781 match(mRegL);
4783 format %{ %}
4784 interface(REG_INTER);
4785 %}
4787 operand a0RegL() %{
4788 constraint(ALLOC_IN_RC(a0_long_reg));
4789 match(RegL);
4790 match(mRegL);
4792 format %{ "A0" %}
4793 interface(REG_INTER);
4794 %}
4796 operand a1RegL() %{
4797 constraint(ALLOC_IN_RC(a1_long_reg));
4798 match(RegL);
4799 match(mRegL);
4801 format %{ %}
4802 interface(REG_INTER);
4803 %}
4805 operand a2RegL() %{
4806 constraint(ALLOC_IN_RC(a2_long_reg));
4807 match(RegL);
4808 match(mRegL);
4810 format %{ %}
4811 interface(REG_INTER);
4812 %}
4814 operand a3RegL() %{
4815 constraint(ALLOC_IN_RC(a3_long_reg));
4816 match(RegL);
4817 match(mRegL);
4819 format %{ %}
4820 interface(REG_INTER);
4821 %}
4823 operand t0RegL() %{
4824 constraint(ALLOC_IN_RC(t0_long_reg));
4825 match(RegL);
4826 match(mRegL);
4828 format %{ %}
4829 interface(REG_INTER);
4830 %}
4832 operand t1RegL() %{
4833 constraint(ALLOC_IN_RC(t1_long_reg));
4834 match(RegL);
4835 match(mRegL);
4837 format %{ %}
4838 interface(REG_INTER);
4839 %}
4841 operand t2RegL() %{
4842 constraint(ALLOC_IN_RC(t2_long_reg));
4843 match(RegL);
4844 match(mRegL);
4846 format %{ %}
4847 interface(REG_INTER);
4848 %}
4850 operand t3RegL() %{
4851 constraint(ALLOC_IN_RC(t3_long_reg));
4852 match(RegL);
4853 match(mRegL);
4855 format %{ %}
4856 interface(REG_INTER);
4857 %}
4859 operand t8RegL() %{
4860 constraint(ALLOC_IN_RC(t8_long_reg));
4861 match(RegL);
4862 match(mRegL);
4864 format %{ %}
4865 interface(REG_INTER);
4866 %}
4868 operand a4RegL() %{
4869 constraint(ALLOC_IN_RC(a4_long_reg));
4870 match(RegL);
4871 match(mRegL);
4873 format %{ %}
4874 interface(REG_INTER);
4875 %}
4877 operand a5RegL() %{
4878 constraint(ALLOC_IN_RC(a5_long_reg));
4879 match(RegL);
4880 match(mRegL);
4882 format %{ %}
4883 interface(REG_INTER);
4884 %}
4886 operand a6RegL() %{
4887 constraint(ALLOC_IN_RC(a6_long_reg));
4888 match(RegL);
4889 match(mRegL);
4891 format %{ %}
4892 interface(REG_INTER);
4893 %}
4895 operand a7RegL() %{
4896 constraint(ALLOC_IN_RC(a7_long_reg));
4897 match(RegL);
4898 match(mRegL);
4900 format %{ %}
4901 interface(REG_INTER);
4902 %}
4904 operand s0RegL() %{
4905 constraint(ALLOC_IN_RC(s0_long_reg));
4906 match(RegL);
4907 match(mRegL);
4909 format %{ %}
4910 interface(REG_INTER);
4911 %}
4913 operand s1RegL() %{
4914 constraint(ALLOC_IN_RC(s1_long_reg));
4915 match(RegL);
4916 match(mRegL);
4918 format %{ %}
4919 interface(REG_INTER);
4920 %}
4922 operand s2RegL() %{
4923 constraint(ALLOC_IN_RC(s2_long_reg));
4924 match(RegL);
4925 match(mRegL);
4927 format %{ %}
4928 interface(REG_INTER);
4929 %}
4931 operand s3RegL() %{
4932 constraint(ALLOC_IN_RC(s3_long_reg));
4933 match(RegL);
4934 match(mRegL);
4936 format %{ %}
4937 interface(REG_INTER);
4938 %}
4940 operand s4RegL() %{
4941 constraint(ALLOC_IN_RC(s4_long_reg));
4942 match(RegL);
4943 match(mRegL);
4945 format %{ %}
4946 interface(REG_INTER);
4947 %}
4949 operand s7RegL() %{
4950 constraint(ALLOC_IN_RC(s7_long_reg));
4951 match(RegL);
4952 match(mRegL);
4954 format %{ %}
4955 interface(REG_INTER);
4956 %}
4958 // Floating register operands
4959 operand regF() %{
4960 constraint(ALLOC_IN_RC(flt_reg));
4961 match(RegF);
4963 format %{ %}
4964 interface(REG_INTER);
4965 %}
4967 //Double Precision Floating register operands
4968 operand regD() %{
4969 constraint(ALLOC_IN_RC(dbl_reg));
4970 match(RegD);
4972 format %{ %}
4973 interface(REG_INTER);
4974 %}
4976 //----------Memory Operands----------------------------------------------------
4977 // Indirect Memory Operand
4978 operand indirect(mRegP reg) %{
4979 constraint(ALLOC_IN_RC(p_reg));
4980 match(reg);
4982 format %{ "[$reg] @ indirect" %}
4983 interface(MEMORY_INTER) %{
4984 base($reg);
4985 index(0x0); /* NO_INDEX */
4986 scale(0x0);
4987 disp(0x0);
4988 %}
4989 %}
4991 // Indirect Memory Plus Short Offset Operand
4992 operand indOffset8(mRegP reg, immL8 off)
4993 %{
4994 constraint(ALLOC_IN_RC(p_reg));
4995 match(AddP reg off);
4997 format %{ "[$reg + $off (8-bit)] @ indOffset8" %}
4998 interface(MEMORY_INTER) %{
4999 base($reg);
5000 index(0x0); /* NO_INDEX */
5001 scale(0x0);
5002 disp($off);
5003 %}
5004 %}
5006 // Indirect Memory Times Scale Plus Index Register
5007 operand indIndexScale(mRegP reg, mRegL lreg, immI2 scale)
5008 %{
5009 constraint(ALLOC_IN_RC(p_reg));
5010 match(AddP reg (LShiftL lreg scale));
5012 op_cost(10);
5013 format %{"[$reg + $lreg << $scale] @ indIndexScale" %}
5014 interface(MEMORY_INTER) %{
5015 base($reg);
5016 index($lreg);
5017 scale($scale);
5018 disp(0x0);
5019 %}
5020 %}
5023 // [base + index + offset]
5024 operand baseIndexOffset8(mRegP base, mRegL index, immL8 off)
5025 %{
5026 constraint(ALLOC_IN_RC(p_reg));
5027 op_cost(5);
5028 match(AddP (AddP base index) off);
5030 format %{ "[$base + $index + $off (8-bit)] @ baseIndexOffset8" %}
5031 interface(MEMORY_INTER) %{
5032 base($base);
5033 index($index);
5034 scale(0x0);
5035 disp($off);
5036 %}
5037 %}
5039 // [base + index + offset]
5040 operand baseIndexOffset8_convI2L(mRegP base, mRegI index, immL8 off)
5041 %{
5042 constraint(ALLOC_IN_RC(p_reg));
5043 op_cost(5);
5044 match(AddP (AddP base (ConvI2L index)) off);
5046 format %{ "[$base + $index + $off (8-bit)] @ baseIndexOffset8_convI2L" %}
5047 interface(MEMORY_INTER) %{
5048 base($base);
5049 index($index);
5050 scale(0x0);
5051 disp($off);
5052 %}
5053 %}
5055 // Indirect Memory Times Scale Plus Index Register Plus Offset Operand
5056 operand indIndexScaleOffset8(mRegP reg, immL8 off, mRegL lreg, immI2 scale)
5057 %{
5058 constraint(ALLOC_IN_RC(p_reg));
5059 match(AddP (AddP reg (LShiftL lreg scale)) off);
5061 op_cost(10);
5062 format %{"[$reg + $off + $lreg << $scale] @ indIndexScaleOffset8" %}
5063 interface(MEMORY_INTER) %{
5064 base($reg);
5065 index($lreg);
5066 scale($scale);
5067 disp($off);
5068 %}
5069 %}
5071 operand indIndexScaleOffset8_convI2L(mRegP reg, immL8 off, mRegI ireg, immI2 scale)
5072 %{
5073 constraint(ALLOC_IN_RC(p_reg));
5074 match(AddP (AddP reg (LShiftL (ConvI2L ireg) scale)) off);
5076 op_cost(10);
5077 format %{"[$reg + $off + $ireg << $scale] @ indIndexScaleOffset8_convI2L" %}
5078 interface(MEMORY_INTER) %{
5079 base($reg);
5080 index($ireg);
5081 scale($scale);
5082 disp($off);
5083 %}
5084 %}
5086 // [base + index<<scale + offset]
5087 operand basePosIndexScaleOffset8(mRegP base, mRegI index, immL8 off, immI_0_31 scale)
5088 %{
5089 constraint(ALLOC_IN_RC(p_reg));
5090 //predicate(n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
5091 op_cost(10);
5092 match(AddP (AddP base (LShiftL (ConvI2L index) scale)) off);
5094 format %{ "[$base + $index << $scale + $off (8-bit)] @ basePosIndexScaleOffset8" %}
5095 interface(MEMORY_INTER) %{
5096 base($base);
5097 index($index);
5098 scale($scale);
5099 disp($off);
5100 %}
5101 %}
5103 // Indirect Memory Times Scale Plus Index Register Plus Offset Operand
5104 operand indIndexScaleOffsetNarrow(mRegN reg, immL8 off, mRegL lreg, immI2 scale)
5105 %{
5106 predicate(Universe::narrow_oop_shift() == 0);
5107 constraint(ALLOC_IN_RC(p_reg));
5108 match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
5110 op_cost(10);
5111 format %{"[$reg + $off + $lreg << $scale] @ indIndexScaleOffsetNarrow" %}
5112 interface(MEMORY_INTER) %{
5113 base($reg);
5114 index($lreg);
5115 scale($scale);
5116 disp($off);
5117 %}
5118 %}
5120 // [base + index<<scale + offset] for compressd Oops
5121 operand indPosIndexI2LScaleOffset8Narrow(mRegN base, mRegI index, immL8 off, immI_0_31 scale)
5122 %{
5123 constraint(ALLOC_IN_RC(p_reg));
5124 //predicate(Universe::narrow_oop_shift() == 0 && n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
5125 predicate(Universe::narrow_oop_shift() == 0);
5126 op_cost(10);
5127 match(AddP (AddP (DecodeN base) (LShiftL (ConvI2L index) scale)) off);
5129 format %{ "[$base + $index << $scale + $off (8-bit)] @ indPosIndexI2LScaleOffset8Narrow" %}
5130 interface(MEMORY_INTER) %{
5131 base($base);
5132 index($index);
5133 scale($scale);
5134 disp($off);
5135 %}
5136 %}
5138 //FIXME: I think it's better to limit the immI to be 16-bit at most!
5139 // Indirect Memory Plus Long Offset Operand
5140 operand indOffset32(mRegP reg, immL32 off) %{
5141 constraint(ALLOC_IN_RC(p_reg));
5142 op_cost(20);
5143 match(AddP reg off);
5145 format %{ "[$reg + $off (32-bit)] @ indOffset32" %}
5146 interface(MEMORY_INTER) %{
5147 base($reg);
5148 index(0x0); /* NO_INDEX */
5149 scale(0x0);
5150 disp($off);
5151 %}
5152 %}
5154 // Indirect Memory Plus Index Register
5155 operand indIndex(mRegP addr, mRegL index) %{
5156 constraint(ALLOC_IN_RC(p_reg));
5157 match(AddP addr index);
5159 op_cost(20);
5160 format %{"[$addr + $index] @ indIndex" %}
5161 interface(MEMORY_INTER) %{
5162 base($addr);
5163 index($index);
5164 scale(0x0);
5165 disp(0x0);
5166 %}
5167 %}
5169 operand indirectNarrowKlass(mRegN reg)
5170 %{
5171 predicate(Universe::narrow_klass_shift() == 0);
5172 constraint(ALLOC_IN_RC(p_reg));
5173 op_cost(10);
5174 match(DecodeNKlass reg);
5176 format %{ "[$reg] @ indirectNarrowKlass" %}
5177 interface(MEMORY_INTER) %{
5178 base($reg);
5179 index(0x0);
5180 scale(0x0);
5181 disp(0x0);
5182 %}
5183 %}
5185 operand indOffset8NarrowKlass(mRegN reg, immL8 off)
5186 %{
5187 predicate(Universe::narrow_klass_shift() == 0);
5188 constraint(ALLOC_IN_RC(p_reg));
5189 op_cost(10);
5190 match(AddP (DecodeNKlass reg) off);
5192 format %{ "[$reg + $off (8-bit)] @ indOffset8NarrowKlass" %}
5193 interface(MEMORY_INTER) %{
5194 base($reg);
5195 index(0x0);
5196 scale(0x0);
5197 disp($off);
5198 %}
5199 %}
5201 operand indOffset32NarrowKlass(mRegN reg, immL32 off)
5202 %{
5203 predicate(Universe::narrow_klass_shift() == 0);
5204 constraint(ALLOC_IN_RC(p_reg));
5205 op_cost(10);
5206 match(AddP (DecodeNKlass reg) off);
5208 format %{ "[$reg + $off (32-bit)] @ indOffset32NarrowKlass" %}
5209 interface(MEMORY_INTER) %{
5210 base($reg);
5211 index(0x0);
5212 scale(0x0);
5213 disp($off);
5214 %}
5215 %}
5217 operand indIndexOffsetNarrowKlass(mRegN reg, mRegL lreg, immL32 off)
5218 %{
5219 predicate(Universe::narrow_klass_shift() == 0);
5220 constraint(ALLOC_IN_RC(p_reg));
5221 match(AddP (AddP (DecodeNKlass reg) lreg) off);
5223 op_cost(10);
5224 format %{"[$reg + $off + $lreg] @ indIndexOffsetNarrowKlass" %}
5225 interface(MEMORY_INTER) %{
5226 base($reg);
5227 index($lreg);
5228 scale(0x0);
5229 disp($off);
5230 %}
5231 %}
5233 operand indIndexNarrowKlass(mRegN reg, mRegL lreg)
5234 %{
5235 predicate(Universe::narrow_klass_shift() == 0);
5236 constraint(ALLOC_IN_RC(p_reg));
5237 match(AddP (DecodeNKlass reg) lreg);
5239 op_cost(10);
5240 format %{"[$reg + $lreg] @ indIndexNarrowKlass" %}
5241 interface(MEMORY_INTER) %{
5242 base($reg);
5243 index($lreg);
5244 scale(0x0);
5245 disp(0x0);
5246 %}
5247 %}
5249 // Indirect Memory Operand
5250 operand indirectNarrow(mRegN reg)
5251 %{
5252 predicate(Universe::narrow_oop_shift() == 0);
5253 constraint(ALLOC_IN_RC(p_reg));
5254 op_cost(10);
5255 match(DecodeN reg);
5257 format %{ "[$reg] @ indirectNarrow" %}
5258 interface(MEMORY_INTER) %{
5259 base($reg);
5260 index(0x0);
5261 scale(0x0);
5262 disp(0x0);
5263 %}
5264 %}
5266 // Indirect Memory Plus Short Offset Operand
5267 operand indOffset8Narrow(mRegN reg, immL8 off)
5268 %{
5269 predicate(Universe::narrow_oop_shift() == 0);
5270 constraint(ALLOC_IN_RC(p_reg));
5271 op_cost(10);
5272 match(AddP (DecodeN reg) off);
5274 format %{ "[$reg + $off (8-bit)] @ indOffset8Narrow" %}
5275 interface(MEMORY_INTER) %{
5276 base($reg);
5277 index(0x0);
5278 scale(0x0);
5279 disp($off);
5280 %}
5281 %}
5283 // Indirect Memory Plus Index Register Plus Offset Operand
5284 operand indIndexOffset8Narrow(mRegN reg, mRegL lreg, immL8 off)
5285 %{
5286 predicate(Universe::narrow_oop_shift() == 0);
5287 constraint(ALLOC_IN_RC(p_reg));
5288 match(AddP (AddP (DecodeN reg) lreg) off);
5290 op_cost(10);
5291 format %{"[$reg + $off + $lreg] @ indIndexOffset8Narrow" %}
5292 interface(MEMORY_INTER) %{
5293 base($reg);
5294 index($lreg);
5295 scale(0x0);
5296 disp($off);
5297 %}
5298 %}
5300 //----------Load Long Memory Operands------------------------------------------
5301 // The load-long idiom will use it's address expression again after loading
5302 // the first word of the long. If the load-long destination overlaps with
5303 // registers used in the addressing expression, the 2nd half will be loaded
5304 // from a clobbered address. Fix this by requiring that load-long use
5305 // address registers that do not overlap with the load-long target.
5307 // load-long support
5308 operand load_long_RegP() %{
5309 constraint(ALLOC_IN_RC(p_reg));
5310 match(RegP);
5311 match(mRegP);
5312 op_cost(100);
5313 format %{ %}
5314 interface(REG_INTER);
5315 %}
5317 // Indirect Memory Operand Long
5318 operand load_long_indirect(load_long_RegP reg) %{
5319 constraint(ALLOC_IN_RC(p_reg));
5320 match(reg);
5322 format %{ "[$reg]" %}
5323 interface(MEMORY_INTER) %{
5324 base($reg);
5325 index(0x0);
5326 scale(0x0);
5327 disp(0x0);
5328 %}
5329 %}
5331 // Indirect Memory Plus Long Offset Operand
5332 operand load_long_indOffset32(load_long_RegP reg, immL32 off) %{
5333 match(AddP reg off);
5335 format %{ "[$reg + $off]" %}
5336 interface(MEMORY_INTER) %{
5337 base($reg);
5338 index(0x0);
5339 scale(0x0);
5340 disp($off);
5341 %}
5342 %}
5344 //----------Conditional Branch Operands----------------------------------------
5345 // Comparison Op - This is the operation of the comparison, and is limited to
5346 // the following set of codes:
5347 // L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
5348 //
5349 // Other attributes of the comparison, such as unsignedness, are specified
5350 // by the comparison instruction that sets a condition code flags register.
5351 // That result is represented by a flags operand whose subtype is appropriate
5352 // to the unsignedness (etc.) of the comparison.
5353 //
5354 // Later, the instruction which matches both the Comparison Op (a Bool) and
5355 // the flags (produced by the Cmp) specifies the coding of the comparison op
5356 // by matching a specific subtype of Bool operand below, such as cmpOpU.
5358 // Comparision Code
5359 operand cmpOp() %{
5360 match(Bool);
5362 format %{ "" %}
5363 interface(COND_INTER) %{
5364 equal(0x01);
5365 not_equal(0x02);
5366 greater(0x03);
5367 greater_equal(0x04);
5368 less(0x05);
5369 less_equal(0x06);
5370 overflow(0x7);
5371 no_overflow(0x8);
5372 %}
5373 %}
5376 // Comparision Code
5377 // Comparison Code, unsigned compare. Used by FP also, with
5378 // C2 (unordered) turned into GT or LT already. The other bits
5379 // C0 and C3 are turned into Carry & Zero flags.
5380 operand cmpOpU() %{
5381 match(Bool);
5383 format %{ "" %}
5384 interface(COND_INTER) %{
5385 equal(0x01);
5386 not_equal(0x02);
5387 greater(0x03);
5388 greater_equal(0x04);
5389 less(0x05);
5390 less_equal(0x06);
5391 overflow(0x7);
5392 no_overflow(0x8);
5393 %}
5394 %}
5396 /*
5397 // Comparison Code, unsigned compare. Used by FP also, with
5398 // C2 (unordered) turned into GT or LT already. The other bits
5399 // C0 and C3 are turned into Carry & Zero flags.
5400 operand cmpOpU() %{
5401 match(Bool);
5403 format %{ "" %}
5404 interface(COND_INTER) %{
5405 equal(0x4);
5406 not_equal(0x5);
5407 less(0x2);
5408 greater_equal(0x3);
5409 less_equal(0x6);
5410 greater(0x7);
5411 %}
5412 %}
5413 */
5414 /*
5415 // Comparison Code for FP conditional move
5416 operand cmpOp_fcmov() %{
5417 match(Bool);
5419 format %{ "" %}
5420 interface(COND_INTER) %{
5421 equal (0x01);
5422 not_equal (0x02);
5423 greater (0x03);
5424 greater_equal(0x04);
5425 less (0x05);
5426 less_equal (0x06);
5427 %}
5428 %}
5430 // Comparision Code used in long compares
5431 operand cmpOp_commute() %{
5432 match(Bool);
5434 format %{ "" %}
5435 interface(COND_INTER) %{
5436 equal(0x4);
5437 not_equal(0x5);
5438 less(0xF);
5439 greater_equal(0xE);
5440 less_equal(0xD);
5441 greater(0xC);
5442 %}
5443 %}
5444 */
5446 //----------Special Memory Operands--------------------------------------------
5447 // Stack Slot Operand - This operand is used for loading and storing temporary
5448 // values on the stack where a match requires a value to
5449 // flow through memory.
5450 operand stackSlotP(sRegP reg) %{
5451 constraint(ALLOC_IN_RC(stack_slots));
5452 // No match rule because this operand is only generated in matching
5453 op_cost(50);
5454 format %{ "[$reg]" %}
5455 interface(MEMORY_INTER) %{
5456 base(0x1d); // SP
5457 index(0x0); // No Index
5458 scale(0x0); // No Scale
5459 disp($reg); // Stack Offset
5460 %}
5461 %}
5463 operand stackSlotI(sRegI reg) %{
5464 constraint(ALLOC_IN_RC(stack_slots));
5465 // No match rule because this operand is only generated in matching
5466 op_cost(50);
5467 format %{ "[$reg]" %}
5468 interface(MEMORY_INTER) %{
5469 base(0x1d); // SP
5470 index(0x0); // No Index
5471 scale(0x0); // No Scale
5472 disp($reg); // Stack Offset
5473 %}
5474 %}
5476 operand stackSlotF(sRegF reg) %{
5477 constraint(ALLOC_IN_RC(stack_slots));
5478 // No match rule because this operand is only generated in matching
5479 op_cost(50);
5480 format %{ "[$reg]" %}
5481 interface(MEMORY_INTER) %{
5482 base(0x1d); // SP
5483 index(0x0); // No Index
5484 scale(0x0); // No Scale
5485 disp($reg); // Stack Offset
5486 %}
5487 %}
5489 operand stackSlotD(sRegD reg) %{
5490 constraint(ALLOC_IN_RC(stack_slots));
5491 // No match rule because this operand is only generated in matching
5492 op_cost(50);
5493 format %{ "[$reg]" %}
5494 interface(MEMORY_INTER) %{
5495 base(0x1d); // SP
5496 index(0x0); // No Index
5497 scale(0x0); // No Scale
5498 disp($reg); // Stack Offset
5499 %}
5500 %}
5502 operand stackSlotL(sRegL reg) %{
5503 constraint(ALLOC_IN_RC(stack_slots));
5504 // No match rule because this operand is only generated in matching
5505 op_cost(50);
5506 format %{ "[$reg]" %}
5507 interface(MEMORY_INTER) %{
5508 base(0x1d); // SP
5509 index(0x0); // No Index
5510 scale(0x0); // No Scale
5511 disp($reg); // Stack Offset
5512 %}
5513 %}
5516 //------------------------OPERAND CLASSES--------------------------------------
5517 //opclass memory( direct, indirect, indOffset16, indOffset32, indOffset32X, indIndexOffset );
5518 opclass memory( indirect, indirectNarrow, indOffset8, indOffset32, indIndex, indIndexScale, load_long_indirect, load_long_indOffset32, baseIndexOffset8, baseIndexOffset8_convI2L, indIndexScaleOffset8, indIndexScaleOffset8_convI2L, basePosIndexScaleOffset8, indIndexScaleOffsetNarrow, indPosIndexI2LScaleOffset8Narrow, indOffset8Narrow, indIndexOffset8Narrow);
5521 //----------PIPELINE-----------------------------------------------------------
5522 // Rules which define the behavior of the target architectures pipeline.
5524 pipeline %{
5526 //----------ATTRIBUTES---------------------------------------------------------
5527 attributes %{
5528 fixed_size_instructions; // Fixed size instructions
5529 branch_has_delay_slot; // branch have delay slot in gs2
5530 max_instructions_per_bundle = 1; // 1 instruction per bundle
5531 max_bundles_per_cycle = 4; // Up to 4 bundles per cycle
5532 bundle_unit_size=4;
5533 instruction_unit_size = 4; // An instruction is 4 bytes long
5534 instruction_fetch_unit_size = 16; // The processor fetches one line
5535 instruction_fetch_units = 1; // of 16 bytes
5537 // List of nop instructions
5538 nops( MachNop );
5539 %}
5541 //----------RESOURCES----------------------------------------------------------
5542 // Resources are the functional units available to the machine
5544 resources(D1, D2, D3, D4, DECODE = D1 | D2 | D3| D4, ALU1, ALU2, ALU = ALU1 | ALU2, FPU1, FPU2, FPU = FPU1 | FPU2, MEM, BR);
5546 //----------PIPELINE DESCRIPTION-----------------------------------------------
5547 // Pipeline Description specifies the stages in the machine's pipeline
5549 // IF: fetch
5550 // ID: decode
5551 // RD: read
5552 // CA: caculate
5553 // WB: write back
5554 // CM: commit
5556 pipe_desc(IF, ID, RD, CA, WB, CM);
5559 //----------PIPELINE CLASSES---------------------------------------------------
5560 // Pipeline Classes describe the stages in which input and output are
5561 // referenced by the hardware pipeline.
5563 //No.1 Integer ALU reg-reg operation : dst <-- reg1 op reg2
5564 pipe_class ialu_regI_regI(mRegI dst, mRegI src1, mRegI src2) %{
5565 single_instruction;
5566 src1 : RD(read);
5567 src2 : RD(read);
5568 dst : WB(write)+1;
5569 DECODE : ID;
5570 ALU : CA;
5571 %}
5573 //No.19 Integer mult operation : dst <-- reg1 mult reg2
5574 pipe_class ialu_mult(mRegI dst, mRegI src1, mRegI src2) %{
5575 src1 : RD(read);
5576 src2 : RD(read);
5577 dst : WB(write)+5;
5578 DECODE : ID;
5579 ALU2 : CA;
5580 %}
5582 pipe_class mulL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
5583 src1 : RD(read);
5584 src2 : RD(read);
5585 dst : WB(write)+10;
5586 DECODE : ID;
5587 ALU2 : CA;
5588 %}
5590 //No.19 Integer div operation : dst <-- reg1 div reg2
5591 pipe_class ialu_div(mRegI dst, mRegI src1, mRegI src2) %{
5592 src1 : RD(read);
5593 src2 : RD(read);
5594 dst : WB(write)+10;
5595 DECODE : ID;
5596 ALU2 : CA;
5597 %}
5599 //No.19 Integer mod operation : dst <-- reg1 mod reg2
5600 pipe_class ialu_mod(mRegI dst, mRegI src1, mRegI src2) %{
5601 instruction_count(2);
5602 src1 : RD(read);
5603 src2 : RD(read);
5604 dst : WB(write)+10;
5605 DECODE : ID;
5606 ALU2 : CA;
5607 %}
5609 //No.15 Long ALU reg-reg operation : dst <-- reg1 op reg2
5610 pipe_class ialu_regL_regL(mRegL dst, mRegL src1, mRegL src2) %{
5611 instruction_count(2);
5612 src1 : RD(read);
5613 src2 : RD(read);
5614 dst : WB(write);
5615 DECODE : ID;
5616 ALU : CA;
5617 %}
5619 //No.18 Long ALU reg-imm16 operation : dst <-- reg1 op imm16
5620 pipe_class ialu_regL_imm16(mRegL dst, mRegL src) %{
5621 instruction_count(2);
5622 src : RD(read);
5623 dst : WB(write);
5624 DECODE : ID;
5625 ALU : CA;
5626 %}
5628 //no.16 load Long from memory :
5629 pipe_class ialu_loadL(mRegL dst, memory mem) %{
5630 instruction_count(2);
5631 mem : RD(read);
5632 dst : WB(write)+5;
5633 DECODE : ID;
5634 MEM : RD;
5635 %}
5637 //No.17 Store Long to Memory :
5638 pipe_class ialu_storeL(mRegL src, memory mem) %{
5639 instruction_count(2);
5640 mem : RD(read);
5641 src : RD(read);
5642 DECODE : ID;
5643 MEM : RD;
5644 %}
5646 //No.2 Integer ALU reg-imm16 operation : dst <-- reg1 op imm16
5647 pipe_class ialu_regI_imm16(mRegI dst, mRegI src) %{
5648 single_instruction;
5649 src : RD(read);
5650 dst : WB(write);
5651 DECODE : ID;
5652 ALU : CA;
5653 %}
5655 //No.3 Integer move operation : dst <-- reg
5656 pipe_class ialu_regI_mov(mRegI dst, mRegI src) %{
5657 src : RD(read);
5658 dst : WB(write);
5659 DECODE : ID;
5660 ALU : CA;
5661 %}
5663 //No.4 No instructions : do nothing
5664 pipe_class empty( ) %{
5665 instruction_count(0);
5666 %}
5668 //No.5 UnConditional branch :
5669 pipe_class pipe_jump( label labl ) %{
5670 multiple_bundles;
5671 DECODE : ID;
5672 BR : RD;
5673 %}
5675 //No.6 ALU Conditional branch :
5676 pipe_class pipe_alu_branch(mRegI src1, mRegI src2, label labl ) %{
5677 multiple_bundles;
5678 src1 : RD(read);
5679 src2 : RD(read);
5680 DECODE : ID;
5681 BR : RD;
5682 %}
5684 //no.7 load integer from memory :
5685 pipe_class ialu_loadI(mRegI dst, memory mem) %{
5686 mem : RD(read);
5687 dst : WB(write)+3;
5688 DECODE : ID;
5689 MEM : RD;
5690 %}
5692 //No.8 Store Integer to Memory :
5693 pipe_class ialu_storeI(mRegI src, memory mem) %{
5694 mem : RD(read);
5695 src : RD(read);
5696 DECODE : ID;
5697 MEM : RD;
5698 %}
5701 //No.10 Floating FPU reg-reg operation : dst <-- reg1 op reg2
5702 pipe_class fpu_regF_regF(regF dst, regF src1, regF src2) %{
5703 src1 : RD(read);
5704 src2 : RD(read);
5705 dst : WB(write);
5706 DECODE : ID;
5707 FPU : CA;
5708 %}
5710 //No.22 Floating div operation : dst <-- reg1 div reg2
5711 pipe_class fpu_div(regF dst, regF src1, regF src2) %{
5712 src1 : RD(read);
5713 src2 : RD(read);
5714 dst : WB(write);
5715 DECODE : ID;
5716 FPU2 : CA;
5717 %}
5719 pipe_class fcvt_I2D(regD dst, mRegI src) %{
5720 src : RD(read);
5721 dst : WB(write);
5722 DECODE : ID;
5723 FPU1 : CA;
5724 %}
5726 pipe_class fcvt_D2I(mRegI dst, regD src) %{
5727 src : RD(read);
5728 dst : WB(write);
5729 DECODE : ID;
5730 FPU1 : CA;
5731 %}
5733 pipe_class pipe_mfc1(mRegI dst, regD src) %{
5734 src : RD(read);
5735 dst : WB(write);
5736 DECODE : ID;
5737 MEM : RD;
5738 %}
5740 pipe_class pipe_mtc1(regD dst, mRegI src) %{
5741 src : RD(read);
5742 dst : WB(write);
5743 DECODE : ID;
5744 MEM : RD(5);
5745 %}
5747 //No.23 Floating sqrt operation : dst <-- reg1 sqrt reg2
5748 pipe_class fpu_sqrt(regF dst, regF src1, regF src2) %{
5749 multiple_bundles;
5750 src1 : RD(read);
5751 src2 : RD(read);
5752 dst : WB(write);
5753 DECODE : ID;
5754 FPU2 : CA;
5755 %}
5757 //No.11 Load Floating from Memory :
5758 pipe_class fpu_loadF(regF dst, memory mem) %{
5759 instruction_count(1);
5760 mem : RD(read);
5761 dst : WB(write)+3;
5762 DECODE : ID;
5763 MEM : RD;
5764 %}
5766 //No.12 Store Floating to Memory :
5767 pipe_class fpu_storeF(regF src, memory mem) %{
5768 instruction_count(1);
5769 mem : RD(read);
5770 src : RD(read);
5771 DECODE : ID;
5772 MEM : RD;
5773 %}
5775 //No.13 FPU Conditional branch :
5776 pipe_class pipe_fpu_branch(regF src1, regF src2, label labl ) %{
5777 multiple_bundles;
5778 src1 : RD(read);
5779 src2 : RD(read);
5780 DECODE : ID;
5781 BR : RD;
5782 %}
5784 //No.14 Floating FPU reg operation : dst <-- op reg
5785 pipe_class fpu1_regF(regF dst, regF src) %{
5786 src : RD(read);
5787 dst : WB(write);
5788 DECODE : ID;
5789 FPU : CA;
5790 %}
5792 pipe_class long_memory_op() %{
5793 instruction_count(10); multiple_bundles; force_serialization;
5794 fixed_latency(30);
5795 %}
5797 pipe_class simple_call() %{
5798 instruction_count(10); multiple_bundles; force_serialization;
5799 fixed_latency(200);
5800 BR : RD;
5801 %}
5803 pipe_class call() %{
5804 instruction_count(10); multiple_bundles; force_serialization;
5805 fixed_latency(200);
5806 %}
5808 //FIXME:
5809 //No.9 Piple slow : for multi-instructions
5810 pipe_class pipe_slow( ) %{
5811 instruction_count(20);
5812 force_serialization;
5813 multiple_bundles;
5814 fixed_latency(50);
5815 %}
5817 %}
5821 //----------INSTRUCTIONS-------------------------------------------------------
5822 //
5823 // match -- States which machine-independent subtree may be replaced
5824 // by this instruction.
5825 // ins_cost -- The estimated cost of this instruction is used by instruction
5826 // selection to identify a minimum cost tree of machine
5827 // instructions that matches a tree of machine-independent
5828 // instructions.
5829 // format -- A string providing the disassembly for this instruction.
5830 // The value of an instruction's operand may be inserted
5831 // by referring to it with a '$' prefix.
5832 // opcode -- Three instruction opcodes may be provided. These are referred
5833 // to within an encode class as $primary, $secondary, and $tertiary
5834 // respectively. The primary opcode is commonly used to
5835 // indicate the type of machine instruction, while secondary
5836 // and tertiary are often used for prefix options or addressing
5837 // modes.
5838 // ins_encode -- A list of encode classes with parameters. The encode class
5839 // name must have been defined in an 'enc_class' specification
5840 // in the encode section of the architecture description.
5843 // Load Integer
5844 instruct loadI(mRegI dst, memory mem) %{
5845 match(Set dst (LoadI mem));
5847 ins_cost(125);
5848 format %{ "lw $dst, $mem #@loadI" %}
5849 ins_encode (load_I_enc(dst, mem));
5850 ins_pipe( ialu_loadI );
5851 %}
5853 instruct loadI_convI2L(mRegL dst, memory mem) %{
5854 match(Set dst (ConvI2L (LoadI mem)));
5856 ins_cost(125);
5857 format %{ "lw $dst, $mem #@loadI_convI2L" %}
5858 ins_encode (load_I_enc(dst, mem));
5859 ins_pipe( ialu_loadI );
5860 %}
5862 // Load Integer (32 bit signed) to Byte (8 bit signed)
5863 instruct loadI2B(mRegI dst, memory mem, immI_24 twentyfour) %{
5864 match(Set dst (RShiftI (LShiftI (LoadI mem) twentyfour) twentyfour));
5866 ins_cost(125);
5867 format %{ "lb $dst, $mem\t# int -> byte #@loadI2B" %}
5868 ins_encode(load_B_enc(dst, mem));
5869 ins_pipe(ialu_loadI);
5870 %}
5872 // Load Integer (32 bit signed) to Unsigned Byte (8 bit UNsigned)
5873 instruct loadI2UB(mRegI dst, memory mem, immI_255 mask) %{
5874 match(Set dst (AndI (LoadI mem) mask));
5876 ins_cost(125);
5877 format %{ "lbu $dst, $mem\t# int -> ubyte #@loadI2UB" %}
5878 ins_encode(load_UB_enc(dst, mem));
5879 ins_pipe(ialu_loadI);
5880 %}
5882 // Load Integer (32 bit signed) to Short (16 bit signed)
5883 instruct loadI2S(mRegI dst, memory mem, immI_16 sixteen) %{
5884 match(Set dst (RShiftI (LShiftI (LoadI mem) sixteen) sixteen));
5886 ins_cost(125);
5887 format %{ "lh $dst, $mem\t# int -> short #@loadI2S" %}
5888 ins_encode(load_S_enc(dst, mem));
5889 ins_pipe(ialu_loadI);
5890 %}
5892 // Load Integer (32 bit signed) to Unsigned Short/Char (16 bit UNsigned)
5893 instruct loadI2US(mRegI dst, memory mem, immI_65535 mask) %{
5894 match(Set dst (AndI (LoadI mem) mask));
5896 ins_cost(125);
5897 format %{ "lhu $dst, $mem\t# int -> ushort/char #@loadI2US" %}
5898 ins_encode(load_C_enc(dst, mem));
5899 ins_pipe(ialu_loadI);
5900 %}
5902 // Load Long.
5903 instruct loadL(mRegL dst, memory mem) %{
5904 // predicate(!((LoadLNode*)n)->require_atomic_access());
5905 match(Set dst (LoadL mem));
5907 ins_cost(250);
5908 format %{ "ld $dst, $mem #@loadL" %}
5909 ins_encode(load_L_enc(dst, mem));
5910 ins_pipe( ialu_loadL );
5911 %}
5913 // Load Long - UNaligned
5914 instruct loadL_unaligned(mRegL dst, memory mem) %{
5915 match(Set dst (LoadL_unaligned mem));
5917 // FIXME: Jin: Need more effective ldl/ldr
5918 ins_cost(450);
5919 format %{ "ld $dst, $mem #@loadL_unaligned\n\t" %}
5920 ins_encode(load_L_enc(dst, mem));
5921 ins_pipe( ialu_loadL );
5922 %}
5924 // Store Long
5925 instruct storeL_reg(memory mem, mRegL src) %{
5926 match(Set mem (StoreL mem src));
5928 ins_cost(200);
5929 format %{ "sd $mem, $src #@storeL_reg\n" %}
5930 ins_encode(store_L_reg_enc(mem, src));
5931 ins_pipe( ialu_storeL );
5932 %}
5935 instruct storeL_immL0(memory mem, immL0 zero) %{
5936 match(Set mem (StoreL mem zero));
5938 ins_cost(180);
5939 format %{ "sd $mem, zero #@storeL_immL0" %}
5940 ins_encode(store_L_immL0_enc(mem, zero));
5941 ins_pipe( ialu_storeL );
5942 %}
5944 // Load Compressed Pointer
5945 instruct loadN(mRegN dst, memory mem)
5946 %{
5947 match(Set dst (LoadN mem));
5949 ins_cost(125); // XXX
5950 format %{ "lwu $dst, $mem\t# compressed ptr @ loadN" %}
5951 ins_encode (load_N_enc(dst, mem));
5952 ins_pipe( ialu_loadI ); // XXX
5953 %}
5955 // Load Pointer
5956 instruct loadP(mRegP dst, memory mem) %{
5957 match(Set dst (LoadP mem));
5959 ins_cost(125);
5960 format %{ "ld $dst, $mem #@loadP" %}
5961 ins_encode (load_P_enc(dst, mem));
5962 ins_pipe( ialu_loadI );
5963 %}
5965 // Load Klass Pointer
5966 instruct loadKlass(mRegP dst, memory mem) %{
5967 match(Set dst (LoadKlass mem));
5969 ins_cost(125);
5970 format %{ "MOV $dst,$mem @ loadKlass" %}
5971 ins_encode (load_P_enc(dst, mem));
5972 ins_pipe( ialu_loadI );
5973 %}
5975 // Load narrow Klass Pointer
5976 instruct loadNKlass(mRegN dst, memory mem)
5977 %{
5978 match(Set dst (LoadNKlass mem));
5980 ins_cost(125); // XXX
5981 format %{ "lwu $dst, $mem\t# compressed klass ptr @ loadNKlass" %}
5982 ins_encode (load_N_enc(dst, mem));
5983 ins_pipe( ialu_loadI ); // XXX
5984 %}
5986 // Load Constant
5987 instruct loadConI(mRegI dst, immI src) %{
5988 match(Set dst src);
5990 ins_cost(150);
5991 format %{ "mov $dst, $src #@loadConI" %}
5992 ins_encode %{
5993 Register dst = $dst$$Register;
5994 int value = $src$$constant;
5995 __ move(dst, value);
5996 %}
5997 ins_pipe( ialu_regI_regI );
5998 %}
6001 instruct loadConL_set64(mRegL dst, immL src) %{
6002 match(Set dst src);
6003 ins_cost(120);
6004 format %{ "li $dst, $src @ loadConL_set64" %}
6005 ins_encode %{
6006 __ set64($dst$$Register, $src$$constant);
6007 %}
6008 ins_pipe(ialu_regL_regL);
6009 %}
6011 /*
6012 // Load long value from constant table (predicated by immL_expensive).
6013 instruct loadConL_load(mRegL dst, immL_expensive src) %{
6014 match(Set dst src);
6015 ins_cost(150);
6016 format %{ "ld $dst, $constantoffset[$constanttablebase] # load long $src from table @ loadConL_ldx" %}
6017 ins_encode %{
6018 int con_offset = $constantoffset($src);
6020 if (Assembler::is_simm16(con_offset)) {
6021 __ ld($dst$$Register, $constanttablebase, con_offset);
6022 } else {
6023 __ set64(AT, con_offset);
6024 if (UseLoongsonISA) {
6025 __ gsldx($dst$$Register, $constanttablebase, AT, 0);
6026 } else {
6027 __ daddu(AT, $constanttablebase, AT);
6028 __ ld($dst$$Register, AT, 0);
6029 }
6030 }
6031 %}
6032 ins_pipe(ialu_loadI);
6033 %}
6034 */
6036 instruct loadConL16(mRegL dst, immL16 src) %{
6037 match(Set dst src);
6038 ins_cost(105);
6039 format %{ "mov $dst, $src #@loadConL16" %}
6040 ins_encode %{
6041 Register dst_reg = as_Register($dst$$reg);
6042 int value = $src$$constant;
6043 __ daddiu(dst_reg, R0, value);
6044 %}
6045 ins_pipe( ialu_regL_regL );
6046 %}
6049 instruct loadConL0(mRegL dst, immL0 src) %{
6050 match(Set dst src);
6051 ins_cost(100);
6052 format %{ "mov $dst, zero #@loadConL0" %}
6053 ins_encode %{
6054 Register dst_reg = as_Register($dst$$reg);
6055 __ daddu(dst_reg, R0, R0);
6056 %}
6057 ins_pipe( ialu_regL_regL );
6058 %}
6060 // Load Range
6061 instruct loadRange(mRegI dst, memory mem) %{
6062 match(Set dst (LoadRange mem));
6064 ins_cost(125);
6065 format %{ "MOV $dst,$mem @ loadRange" %}
6066 ins_encode(load_I_enc(dst, mem));
6067 ins_pipe( ialu_loadI );
6068 %}
6071 instruct storeP(memory mem, mRegP src ) %{
6072 match(Set mem (StoreP mem src));
6074 ins_cost(125);
6075 format %{ "sd $src, $mem #@storeP" %}
6076 ins_encode(store_P_reg_enc(mem, src));
6077 ins_pipe( ialu_storeI );
6078 %}
6080 // Store NULL Pointer, mark word, or other simple pointer constant.
6081 instruct storeImmP0(memory mem, immP0 zero) %{
6082 match(Set mem (StoreP mem zero));
6084 ins_cost(125);
6085 format %{ "mov $mem, $zero #@storeImmP0" %}
6086 ins_encode(store_P_immP0_enc(mem));
6087 ins_pipe( ialu_storeI );
6088 %}
6090 // Store Byte Immediate
6091 instruct storeImmB(memory mem, immI8 src) %{
6092 match(Set mem (StoreB mem src));
6094 ins_cost(150);
6095 format %{ "movb $mem, $src #@storeImmB" %}
6096 ins_encode(store_B_immI_enc(mem, src));
6097 ins_pipe( ialu_storeI );
6098 %}
6100 // Store Compressed Pointer
6101 instruct storeN(memory mem, mRegN src)
6102 %{
6103 match(Set mem (StoreN mem src));
6105 ins_cost(125); // XXX
6106 format %{ "sw $mem, $src\t# compressed ptr @ storeN" %}
6107 ins_encode(store_N_reg_enc(mem, src));
6108 ins_pipe( ialu_storeI );
6109 %}
6111 instruct storeNKlass(memory mem, mRegN src)
6112 %{
6113 match(Set mem (StoreNKlass mem src));
6115 ins_cost(125); // XXX
6116 format %{ "sw $mem, $src\t# compressed klass ptr @ storeNKlass" %}
6117 ins_encode(store_N_reg_enc(mem, src));
6118 ins_pipe( ialu_storeI );
6119 %}
6121 instruct storeImmN0(memory mem, immN0 zero)
6122 %{
6123 predicate(Universe::narrow_oop_base() == NULL && Universe::narrow_klass_base() == NULL);
6124 match(Set mem (StoreN mem zero));
6126 ins_cost(125); // XXX
6127 format %{ "storeN0 $mem, R12\t# compressed ptr" %}
6128 ins_encode(storeImmN0_enc(mem, zero));
6129 ins_pipe( ialu_storeI );
6130 %}
6132 // Store Byte
6133 instruct storeB(memory mem, mRegI src) %{
6134 match(Set mem (StoreB mem src));
6136 ins_cost(125);
6137 format %{ "sb $src, $mem #@storeB" %}
6138 ins_encode(store_B_reg_enc(mem, src));
6139 ins_pipe( ialu_storeI );
6140 %}
6142 instruct storeB_convL2I(memory mem, mRegL src) %{
6143 match(Set mem (StoreB mem (ConvL2I src)));
6145 ins_cost(125);
6146 format %{ "sb $src, $mem #@storeB_convL2I" %}
6147 ins_encode(store_B_reg_enc(mem, src));
6148 ins_pipe( ialu_storeI );
6149 %}
6151 // Load Byte (8bit signed)
6152 instruct loadB(mRegI dst, memory mem) %{
6153 match(Set dst (LoadB mem));
6155 ins_cost(125);
6156 format %{ "lb $dst, $mem #@loadB" %}
6157 ins_encode(load_B_enc(dst, mem));
6158 ins_pipe( ialu_loadI );
6159 %}
6161 instruct loadB_convI2L(mRegL dst, memory mem) %{
6162 match(Set dst (ConvI2L (LoadB mem)));
6164 ins_cost(125);
6165 format %{ "lb $dst, $mem #@loadB_convI2L" %}
6166 ins_encode(load_B_enc(dst, mem));
6167 ins_pipe( ialu_loadI );
6168 %}
6170 // Load Byte (8bit UNsigned)
6171 instruct loadUB(mRegI dst, memory mem) %{
6172 match(Set dst (LoadUB mem));
6174 ins_cost(125);
6175 format %{ "lbu $dst, $mem #@loadUB" %}
6176 ins_encode(load_UB_enc(dst, mem));
6177 ins_pipe( ialu_loadI );
6178 %}
6180 instruct loadUB_convI2L(mRegL dst, memory mem) %{
6181 match(Set dst (ConvI2L (LoadUB mem)));
6183 ins_cost(125);
6184 format %{ "lbu $dst, $mem #@loadUB_convI2L" %}
6185 ins_encode(load_UB_enc(dst, mem));
6186 ins_pipe( ialu_loadI );
6187 %}
6189 // Load Short (16bit signed)
6190 instruct loadS(mRegI dst, memory mem) %{
6191 match(Set dst (LoadS mem));
6193 ins_cost(125);
6194 format %{ "lh $dst, $mem #@loadS" %}
6195 ins_encode(load_S_enc(dst, mem));
6196 ins_pipe( ialu_loadI );
6197 %}
6199 // Load Short (16 bit signed) to Byte (8 bit signed)
6200 instruct loadS2B(mRegI dst, memory mem, immI_24 twentyfour) %{
6201 match(Set dst (RShiftI (LShiftI (LoadS mem) twentyfour) twentyfour));
6203 ins_cost(125);
6204 format %{ "lb $dst, $mem\t# short -> byte #@loadS2B" %}
6205 ins_encode(load_B_enc(dst, mem));
6206 ins_pipe(ialu_loadI);
6207 %}
6209 instruct loadS_convI2L(mRegL dst, memory mem) %{
6210 match(Set dst (ConvI2L (LoadS mem)));
6212 ins_cost(125);
6213 format %{ "lh $dst, $mem #@loadS_convI2L" %}
6214 ins_encode(load_S_enc(dst, mem));
6215 ins_pipe( ialu_loadI );
6216 %}
6218 // Store Integer Immediate
6219 instruct storeImmI(memory mem, immI src) %{
6220 match(Set mem (StoreI mem src));
6222 ins_cost(150);
6223 format %{ "mov $mem, $src #@storeImmI" %}
6224 ins_encode(store_I_immI_enc(mem, src));
6225 ins_pipe( ialu_storeI );
6226 %}
6228 // Store Integer
6229 instruct storeI(memory mem, mRegI src) %{
6230 match(Set mem (StoreI mem src));
6232 ins_cost(125);
6233 format %{ "sw $mem, $src #@storeI" %}
6234 ins_encode(store_I_reg_enc(mem, src));
6235 ins_pipe( ialu_storeI );
6236 %}
6238 instruct storeI_convL2I(memory mem, mRegL src) %{
6239 match(Set mem (StoreI mem (ConvL2I src)));
6241 ins_cost(125);
6242 format %{ "sw $mem, $src #@storeI_convL2I" %}
6243 ins_encode(store_I_reg_enc(mem, src));
6244 ins_pipe( ialu_storeI );
6245 %}
6247 // Load Float
6248 instruct loadF(regF dst, memory mem) %{
6249 match(Set dst (LoadF mem));
6251 ins_cost(150);
6252 format %{ "loadF $dst, $mem #@loadF" %}
6253 ins_encode(load_F_enc(dst, mem));
6254 ins_pipe( ialu_loadI );
6255 %}
6257 instruct loadConP_general(mRegP dst, immP src) %{
6258 match(Set dst src);
6260 ins_cost(120);
6261 format %{ "li $dst, $src #@loadConP_general" %}
6263 ins_encode %{
6264 Register dst = $dst$$Register;
6265 long* value = (long*)$src$$constant;
6267 if($src->constant_reloc() == relocInfo::metadata_type){
6268 int klass_index = __ oop_recorder()->find_index((Klass*)value);
6269 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
6271 __ relocate(rspec);
6272 __ li48(dst, (long)value);
6273 }else if($src->constant_reloc() == relocInfo::oop_type){
6274 int oop_index = __ oop_recorder()->find_index((jobject)value);
6275 RelocationHolder rspec = oop_Relocation::spec(oop_index);
6277 __ relocate(rspec);
6278 __ li48(dst, (long)value);
6279 } else if ($src->constant_reloc() == relocInfo::none) {
6280 __ set64(dst, (long)value);
6281 }
6282 %}
6284 ins_pipe( ialu_regI_regI );
6285 %}
6287 /*
6288 instruct loadConP_load(mRegP dst, immP_load src) %{
6289 match(Set dst src);
6291 ins_cost(100);
6292 format %{ "ld $dst, [$constanttablebase + $constantoffset] load from constant table: ptr=$src @ loadConP_load" %}
6294 ins_encode %{
6296 int con_offset = $constantoffset($src);
6298 if (Assembler::is_simm16(con_offset)) {
6299 __ ld($dst$$Register, $constanttablebase, con_offset);
6300 } else {
6301 __ set64(AT, con_offset);
6302 if (UseLoongsonISA) {
6303 __ gsldx($dst$$Register, $constanttablebase, AT, 0);
6304 } else {
6305 __ daddu(AT, $constanttablebase, AT);
6306 __ ld($dst$$Register, AT, 0);
6307 }
6308 }
6309 %}
6311 ins_pipe(ialu_loadI);
6312 %}
6313 */
6315 instruct loadConP_no_oop_cheap(mRegP dst, immP_no_oop_cheap src) %{
6316 match(Set dst src);
6318 ins_cost(80);
6319 format %{ "li $dst, $src @ loadConP_no_oop_cheap" %}
6321 ins_encode %{
6322 __ set64($dst$$Register, $src$$constant);
6323 %}
6325 ins_pipe(ialu_regI_regI);
6326 %}
6329 instruct loadConP_poll(mRegP dst, immP_poll src) %{
6330 match(Set dst src);
6332 ins_cost(50);
6333 format %{ "li $dst, $src #@loadConP_poll" %}
6335 ins_encode %{
6336 Register dst = $dst$$Register;
6337 intptr_t value = (intptr_t)$src$$constant;
6339 __ set64(dst, (jlong)value);
6340 %}
6342 ins_pipe( ialu_regI_regI );
6343 %}
6345 instruct loadConP0(mRegP dst, immP0 src)
6346 %{
6347 match(Set dst src);
6349 ins_cost(50);
6350 format %{ "mov $dst, R0\t# ptr" %}
6351 ins_encode %{
6352 Register dst_reg = $dst$$Register;
6353 __ daddu(dst_reg, R0, R0);
6354 %}
6355 ins_pipe( ialu_regI_regI );
6356 %}
6358 instruct loadConN0(mRegN dst, immN0 src) %{
6359 match(Set dst src);
6360 format %{ "move $dst, R0\t# compressed NULL ptr" %}
6361 ins_encode %{
6362 __ move($dst$$Register, R0);
6363 %}
6364 ins_pipe( ialu_regI_regI );
6365 %}
6367 instruct loadConN(mRegN dst, immN src) %{
6368 match(Set dst src);
6370 ins_cost(125);
6371 format %{ "li $dst, $src\t# compressed ptr @ loadConN" %}
6372 ins_encode %{
6373 Register dst = $dst$$Register;
6374 __ set_narrow_oop(dst, (jobject)$src$$constant);
6375 %}
6376 ins_pipe( ialu_regI_regI ); // XXX
6377 %}
6379 instruct loadConNKlass(mRegN dst, immNKlass src) %{
6380 match(Set dst src);
6382 ins_cost(125);
6383 format %{ "li $dst, $src\t# compressed klass ptr @ loadConNKlass" %}
6384 ins_encode %{
6385 Register dst = $dst$$Register;
6386 __ set_narrow_klass(dst, (Klass*)$src$$constant);
6387 %}
6388 ins_pipe( ialu_regI_regI ); // XXX
6389 %}
6391 //FIXME
6392 // Tail Call; Jump from runtime stub to Java code.
6393 // Also known as an 'interprocedural jump'.
6394 // Target of jump will eventually return to caller.
6395 // TailJump below removes the return address.
6396 instruct TailCalljmpInd(mRegP jump_target, mRegP method_oop) %{
6397 match(TailCall jump_target method_oop );
6398 ins_cost(300);
6399 format %{ "JMP $jump_target \t# @TailCalljmpInd" %}
6401 ins_encode %{
6402 Register target = $jump_target$$Register;
6403 Register oop = $method_oop$$Register;
6405 /* 2012/10/12 Jin: RA will be used in generate_forward_exception() */
6406 __ push(RA);
6408 __ move(S3, oop);
6409 __ jr(target);
6410 __ nop();
6411 %}
6413 ins_pipe( pipe_jump );
6414 %}
6416 // Create exception oop: created by stack-crawling runtime code.
6417 // Created exception is now available to this handler, and is setup
6418 // just prior to jumping to this handler. No code emitted.
6419 instruct CreateException( a0_RegP ex_oop )
6420 %{
6421 match(Set ex_oop (CreateEx));
6423 // use the following format syntax
6424 format %{ "# exception oop is in A0; no code emitted @CreateException" %}
6425 ins_encode %{
6426 /* Jin: X86 leaves this function empty */
6427 __ block_comment("CreateException is empty in X86/MIPS");
6428 %}
6429 ins_pipe( empty );
6430 // ins_pipe( pipe_jump );
6431 %}
6434 /* 2012/9/14 Jin: The mechanism of exception handling is clear now.
6436 - Common try/catch:
6437 2012/9/14 Jin: [stubGenerator_mips.cpp] generate_forward_exception()
6438 |- V0, V1 are created
6439 |- T9 <= SharedRuntime::exception_handler_for_return_address
6440 `- jr T9
6441 `- the caller's exception_handler
6442 `- jr OptoRuntime::exception_blob
6443 `- here
6444 - Rethrow(e.g. 'unwind'):
6445 * The callee:
6446 |- an exception is triggered during execution
6447 `- exits the callee method through RethrowException node
6448 |- The callee pushes exception_oop(T0) and exception_pc(RA)
6449 `- The callee jumps to OptoRuntime::rethrow_stub()
6450 * In OptoRuntime::rethrow_stub:
6451 |- The VM calls _rethrow_Java to determine the return address in the caller method
6452 `- exits the stub with tailjmpInd
6453 |- pops exception_oop(V0) and exception_pc(V1)
6454 `- jumps to the return address(usually an exception_handler)
6455 * The caller:
6456 `- continues processing the exception_blob with V0/V1
6457 */
6459 /*
6460 Disassembling OptoRuntime::rethrow_stub()
6462 ; locals
6463 0x2d3bf320: addiu sp, sp, 0xfffffff8
6464 0x2d3bf324: sw ra, 0x4(sp)
6465 0x2d3bf328: sw fp, 0x0(sp)
6466 0x2d3bf32c: addu fp, sp, zero
6467 0x2d3bf330: addiu sp, sp, 0xfffffff0
6468 0x2d3bf334: sw ra, 0x8(sp)
6469 0x2d3bf338: sw t0, 0x4(sp)
6470 0x2d3bf33c: sw sp, 0x0(sp)
6472 ; get_thread(S2)
6473 0x2d3bf340: addu s2, sp, zero
6474 0x2d3bf344: srl s2, s2, 12
6475 0x2d3bf348: sll s2, s2, 2
6476 0x2d3bf34c: lui at, 0x2c85
6477 0x2d3bf350: addu at, at, s2
6478 0x2d3bf354: lw s2, 0xffffcc80(at)
6480 0x2d3bf358: lw s0, 0x0(sp)
6481 0x2d3bf35c: sw s0, 0x118(s2) // last_sp -> threa
6482 0x2d3bf360: sw s2, 0xc(sp)
6484 ; OptoRuntime::rethrow_C(oopDesc* exception, JavaThread* thread, address ret_pc)
6485 0x2d3bf364: lw a0, 0x4(sp)
6486 0x2d3bf368: lw a1, 0xc(sp)
6487 0x2d3bf36c: lw a2, 0x8(sp)
6488 ;; Java_To_Runtime
6489 0x2d3bf370: lui t9, 0x2c34
6490 0x2d3bf374: addiu t9, t9, 0xffff8a48
6491 0x2d3bf378: jalr t9
6492 0x2d3bf37c: nop
6494 0x2d3bf380: addu s3, v0, zero ; S3: SharedRuntime::raw_exception_handler_for_return_address()
6496 0x2d3bf384: lw s0, 0xc(sp)
6497 0x2d3bf388: sw zero, 0x118(s0)
6498 0x2d3bf38c: sw zero, 0x11c(s0)
6499 0x2d3bf390: lw s1, 0x144(s0) ; ex_oop: S1
6500 0x2d3bf394: addu s2, s0, zero
6501 0x2d3bf398: sw zero, 0x144(s2)
6502 0x2d3bf39c: lw s0, 0x4(s2)
6503 0x2d3bf3a0: addiu s4, zero, 0x0
6504 0x2d3bf3a4: bne s0, s4, 0x2d3bf3d4
6505 0x2d3bf3a8: nop
6506 0x2d3bf3ac: addiu sp, sp, 0x10
6507 0x2d3bf3b0: addiu sp, sp, 0x8
6508 0x2d3bf3b4: lw ra, 0xfffffffc(sp)
6509 0x2d3bf3b8: lw fp, 0xfffffff8(sp)
6510 0x2d3bf3bc: lui at, 0x2b48
6511 0x2d3bf3c0: lw at, 0x100(at)
6513 ; tailjmpInd: Restores exception_oop & exception_pc
6514 0x2d3bf3c4: addu v1, ra, zero
6515 0x2d3bf3c8: addu v0, s1, zero
6516 0x2d3bf3cc: jr s3
6517 0x2d3bf3d0: nop
6518 ; Exception:
6519 0x2d3bf3d4: lui s1, 0x2cc8 ; generate_forward_exception()
6520 0x2d3bf3d8: addiu s1, s1, 0x40
6521 0x2d3bf3dc: addiu s2, zero, 0x0
6522 0x2d3bf3e0: addiu sp, sp, 0x10
6523 0x2d3bf3e4: addiu sp, sp, 0x8
6524 0x2d3bf3e8: lw ra, 0xfffffffc(sp)
6525 0x2d3bf3ec: lw fp, 0xfffffff8(sp)
6526 0x2d3bf3f0: lui at, 0x2b48
6527 0x2d3bf3f4: lw at, 0x100(at)
6528 ; TailCalljmpInd
6529 __ push(RA); ; to be used in generate_forward_exception()
6530 0x2d3bf3f8: addu t7, s2, zero
6531 0x2d3bf3fc: jr s1
6532 0x2d3bf400: nop
6533 */
6534 // Rethrow exception:
6535 // The exception oop will come in the first argument position.
6536 // Then JUMP (not call) to the rethrow stub code.
6537 instruct RethrowException()
6538 %{
6539 match(Rethrow);
6541 // use the following format syntax
6542 format %{ "JMP rethrow_stub #@RethrowException" %}
6543 ins_encode %{
6544 __ block_comment("@ RethrowException");
6546 cbuf.set_insts_mark();
6547 cbuf.relocate(cbuf.insts_mark(), runtime_call_Relocation::spec());
6549 // call OptoRuntime::rethrow_stub to get the exception handler in parent method
6550 __ li(T9, OptoRuntime::rethrow_stub());
6551 __ jr(T9);
6552 __ nop();
6553 %}
6554 ins_pipe( pipe_jump );
6555 %}
6557 instruct branchConP_zero(cmpOpU cmp, mRegP op1, immP0 zero, label labl) %{
6558 match(If cmp (CmpP op1 zero));
6559 effect(USE labl);
6561 ins_cost(180);
6562 format %{ "b$cmp $op1, R0, $labl #@branchConP_zero" %}
6564 ins_encode %{
6565 Register op1 = $op1$$Register;
6566 Register op2 = R0;
6567 Label &L = *($labl$$label);
6568 int flag = $cmp$$cmpcode;
6570 switch(flag)
6571 {
6572 case 0x01: //equal
6573 if (&L)
6574 __ beq(op1, op2, L);
6575 else
6576 __ beq(op1, op2, (int)0);
6577 break;
6578 case 0x02: //not_equal
6579 if (&L)
6580 __ bne(op1, op2, L);
6581 else
6582 __ bne(op1, op2, (int)0);
6583 break;
6584 /*
6585 case 0x03: //above
6586 __ sltu(AT, op2, op1);
6587 if(&L)
6588 __ bne(R0, AT, L);
6589 else
6590 __ bne(R0, AT, (int)0);
6591 break;
6592 case 0x04: //above_equal
6593 __ sltu(AT, op1, op2);
6594 if(&L)
6595 __ beq(AT, R0, L);
6596 else
6597 __ beq(AT, R0, (int)0);
6598 break;
6599 case 0x05: //below
6600 __ sltu(AT, op1, op2);
6601 if(&L)
6602 __ bne(R0, AT, L);
6603 else
6604 __ bne(R0, AT, (int)0);
6605 break;
6606 case 0x06: //below_equal
6607 __ sltu(AT, op2, op1);
6608 if(&L)
6609 __ beq(AT, R0, L);
6610 else
6611 __ beq(AT, R0, (int)0);
6612 break;
6613 */
6614 default:
6615 Unimplemented();
6616 }
6617 __ nop();
6618 %}
6620 ins_pc_relative(1);
6621 ins_pipe( pipe_alu_branch );
6622 %}
6625 instruct branchConP(cmpOpU cmp, mRegP op1, mRegP op2, label labl) %{
6626 match(If cmp (CmpP op1 op2));
6627 // predicate(can_branch_register(_kids[0]->_leaf, _kids[1]->_leaf));
6628 effect(USE labl);
6630 ins_cost(200);
6631 format %{ "b$cmp $op1, $op2, $labl #@branchConP" %}
6633 ins_encode %{
6634 Register op1 = $op1$$Register;
6635 Register op2 = $op2$$Register;
6636 Label &L = *($labl$$label);
6637 int flag = $cmp$$cmpcode;
6639 switch(flag)
6640 {
6641 case 0x01: //equal
6642 if (&L)
6643 __ beq(op1, op2, L);
6644 else
6645 __ beq(op1, op2, (int)0);
6646 break;
6647 case 0x02: //not_equal
6648 if (&L)
6649 __ bne(op1, op2, L);
6650 else
6651 __ bne(op1, op2, (int)0);
6652 break;
6653 case 0x03: //above
6654 __ sltu(AT, op2, op1);
6655 if(&L)
6656 __ bne(R0, AT, L);
6657 else
6658 __ bne(R0, AT, (int)0);
6659 break;
6660 case 0x04: //above_equal
6661 __ sltu(AT, op1, op2);
6662 if(&L)
6663 __ beq(AT, R0, L);
6664 else
6665 __ beq(AT, R0, (int)0);
6666 break;
6667 case 0x05: //below
6668 __ sltu(AT, op1, op2);
6669 if(&L)
6670 __ bne(R0, AT, L);
6671 else
6672 __ bne(R0, AT, (int)0);
6673 break;
6674 case 0x06: //below_equal
6675 __ sltu(AT, op2, op1);
6676 if(&L)
6677 __ beq(AT, R0, L);
6678 else
6679 __ beq(AT, R0, (int)0);
6680 break;
6681 default:
6682 Unimplemented();
6683 }
6684 __ nop();
6685 %}
6687 ins_pc_relative(1);
6688 ins_pipe( pipe_alu_branch );
6689 %}
6691 instruct cmpN_null_branch(cmpOp cmp, mRegN op1, immN0 null, label labl) %{
6692 match(If cmp (CmpN op1 null));
6693 effect(USE labl);
6695 ins_cost(180);
6696 format %{ "CMP $op1,0\t! compressed ptr\n\t"
6697 "BP$cmp $labl @ cmpN_null_branch" %}
6698 ins_encode %{
6699 Register op1 = $op1$$Register;
6700 Register op2 = R0;
6701 Label &L = *($labl$$label);
6702 int flag = $cmp$$cmpcode;
6704 switch(flag)
6705 {
6706 case 0x01: //equal
6707 if (&L)
6708 __ beq(op1, op2, L);
6709 else
6710 __ beq(op1, op2, (int)0);
6711 break;
6712 case 0x02: //not_equal
6713 if (&L)
6714 __ bne(op1, op2, L);
6715 else
6716 __ bne(op1, op2, (int)0);
6717 break;
6718 default:
6719 Unimplemented();
6720 }
6721 __ nop();
6722 %}
6723 //TODO: pipe_branchP or create pipe_branchN LEE
6724 ins_pc_relative(1);
6725 ins_pipe( pipe_alu_branch );
6726 %}
6728 instruct cmpN_reg_branch(cmpOp cmp, mRegN op1, mRegN op2, label labl) %{
6729 match(If cmp (CmpN op1 op2));
6730 effect(USE labl);
6732 ins_cost(180);
6733 format %{ "CMP $op1,$op2\t! compressed ptr\n\t"
6734 "BP$cmp $labl" %}
6735 ins_encode %{
6736 Register op1_reg = $op1$$Register;
6737 Register op2_reg = $op2$$Register;
6738 Label &L = *($labl$$label);
6739 int flag = $cmp$$cmpcode;
6741 switch(flag)
6742 {
6743 case 0x01: //equal
6744 if (&L)
6745 __ beq(op1_reg, op2_reg, L);
6746 else
6747 __ beq(op1_reg, op2_reg, (int)0);
6748 break;
6749 case 0x02: //not_equal
6750 if (&L)
6751 __ bne(op1_reg, op2_reg, L);
6752 else
6753 __ bne(op1_reg, op2_reg, (int)0);
6754 break;
6755 case 0x03: //above
6756 __ sltu(AT, op2_reg, op1_reg);
6757 if(&L)
6758 __ bne(R0, AT, L);
6759 else
6760 __ bne(R0, AT, (int)0);
6761 break;
6762 case 0x04: //above_equal
6763 __ sltu(AT, op1_reg, op2_reg);
6764 if(&L)
6765 __ beq(AT, R0, L);
6766 else
6767 __ beq(AT, R0, (int)0);
6768 break;
6769 case 0x05: //below
6770 __ sltu(AT, op1_reg, op2_reg);
6771 if(&L)
6772 __ bne(R0, AT, L);
6773 else
6774 __ bne(R0, AT, (int)0);
6775 break;
6776 case 0x06: //below_equal
6777 __ sltu(AT, op2_reg, op1_reg);
6778 if(&L)
6779 __ beq(AT, R0, L);
6780 else
6781 __ beq(AT, R0, (int)0);
6782 break;
6783 default:
6784 Unimplemented();
6785 }
6786 __ nop();
6787 %}
6788 ins_pc_relative(1);
6789 ins_pipe( pipe_alu_branch );
6790 %}
6792 instruct branchConIU_reg_reg(cmpOpU cmp, mRegI src1, mRegI src2, label labl) %{
6793 match( If cmp (CmpU src1 src2) );
6794 effect(USE labl);
6795 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_reg" %}
6797 ins_encode %{
6798 Register op1 = $src1$$Register;
6799 Register op2 = $src2$$Register;
6800 Label &L = *($labl$$label);
6801 int flag = $cmp$$cmpcode;
6803 switch(flag)
6804 {
6805 case 0x01: //equal
6806 if (&L)
6807 __ beq(op1, op2, L);
6808 else
6809 __ beq(op1, op2, (int)0);
6810 break;
6811 case 0x02: //not_equal
6812 if (&L)
6813 __ bne(op1, op2, L);
6814 else
6815 __ bne(op1, op2, (int)0);
6816 break;
6817 case 0x03: //above
6818 __ sltu(AT, op2, op1);
6819 if(&L)
6820 __ bne(AT, R0, L);
6821 else
6822 __ bne(AT, R0, (int)0);
6823 break;
6824 case 0x04: //above_equal
6825 __ sltu(AT, op1, op2);
6826 if(&L)
6827 __ beq(AT, R0, L);
6828 else
6829 __ beq(AT, R0, (int)0);
6830 break;
6831 case 0x05: //below
6832 __ sltu(AT, op1, op2);
6833 if(&L)
6834 __ bne(AT, R0, L);
6835 else
6836 __ bne(AT, R0, (int)0);
6837 break;
6838 case 0x06: //below_equal
6839 __ sltu(AT, op2, op1);
6840 if(&L)
6841 __ beq(AT, R0, L);
6842 else
6843 __ beq(AT, R0, (int)0);
6844 break;
6845 default:
6846 Unimplemented();
6847 }
6848 __ nop();
6849 %}
6851 ins_pc_relative(1);
6852 ins_pipe( pipe_alu_branch );
6853 %}
6856 instruct branchConIU_reg_imm(cmpOpU cmp, mRegI src1, immI src2, label labl) %{
6857 match( If cmp (CmpU src1 src2) );
6858 effect(USE labl);
6859 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_imm" %}
6861 ins_encode %{
6862 Register op1 = $src1$$Register;
6863 int val = $src2$$constant;
6864 Label &L = *($labl$$label);
6865 int flag = $cmp$$cmpcode;
6867 __ move(AT, val);
6868 switch(flag)
6869 {
6870 case 0x01: //equal
6871 if (&L)
6872 __ beq(op1, AT, L);
6873 else
6874 __ beq(op1, AT, (int)0);
6875 break;
6876 case 0x02: //not_equal
6877 if (&L)
6878 __ bne(op1, AT, L);
6879 else
6880 __ bne(op1, AT, (int)0);
6881 break;
6882 case 0x03: //above
6883 __ sltu(AT, AT, op1);
6884 if(&L)
6885 __ bne(R0, AT, L);
6886 else
6887 __ bne(R0, AT, (int)0);
6888 break;
6889 case 0x04: //above_equal
6890 __ sltu(AT, op1, AT);
6891 if(&L)
6892 __ beq(AT, R0, L);
6893 else
6894 __ beq(AT, R0, (int)0);
6895 break;
6896 case 0x05: //below
6897 __ sltu(AT, op1, AT);
6898 if(&L)
6899 __ bne(R0, AT, L);
6900 else
6901 __ bne(R0, AT, (int)0);
6902 break;
6903 case 0x06: //below_equal
6904 __ sltu(AT, AT, op1);
6905 if(&L)
6906 __ beq(AT, R0, L);
6907 else
6908 __ beq(AT, R0, (int)0);
6909 break;
6910 default:
6911 Unimplemented();
6912 }
6913 __ nop();
6914 %}
6916 ins_pc_relative(1);
6917 ins_pipe( pipe_alu_branch );
6918 %}
6920 instruct branchConI_reg_reg(cmpOp cmp, mRegI src1, mRegI src2, label labl) %{
6921 match( If cmp (CmpI src1 src2) );
6922 effect(USE labl);
6923 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_reg" %}
6925 ins_encode %{
6926 Register op1 = $src1$$Register;
6927 Register op2 = $src2$$Register;
6928 Label &L = *($labl$$label);
6929 int flag = $cmp$$cmpcode;
6931 switch(flag)
6932 {
6933 case 0x01: //equal
6934 if (&L)
6935 __ beq(op1, op2, L);
6936 else
6937 __ beq(op1, op2, (int)0);
6938 break;
6939 case 0x02: //not_equal
6940 if (&L)
6941 __ bne(op1, op2, L);
6942 else
6943 __ bne(op1, op2, (int)0);
6944 break;
6945 case 0x03: //above
6946 __ slt(AT, op2, op1);
6947 if(&L)
6948 __ bne(R0, AT, L);
6949 else
6950 __ bne(R0, AT, (int)0);
6951 break;
6952 case 0x04: //above_equal
6953 __ slt(AT, op1, op2);
6954 if(&L)
6955 __ beq(AT, R0, L);
6956 else
6957 __ beq(AT, R0, (int)0);
6958 break;
6959 case 0x05: //below
6960 __ slt(AT, op1, op2);
6961 if(&L)
6962 __ bne(R0, AT, L);
6963 else
6964 __ bne(R0, AT, (int)0);
6965 break;
6966 case 0x06: //below_equal
6967 __ slt(AT, op2, op1);
6968 if(&L)
6969 __ beq(AT, R0, L);
6970 else
6971 __ beq(AT, R0, (int)0);
6972 break;
6973 default:
6974 Unimplemented();
6975 }
6976 __ nop();
6977 %}
6979 ins_pc_relative(1);
6980 ins_pipe( pipe_alu_branch );
6981 %}
6983 instruct branchConI_reg_imm0(cmpOp cmp, mRegI src1, immI0 src2, label labl) %{
6984 match( If cmp (CmpI src1 src2) );
6985 effect(USE labl);
6986 ins_cost(170);
6987 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm0" %}
6989 ins_encode %{
6990 Register op1 = $src1$$Register;
6991 // int val = $src2$$constant;
6992 Label &L = *($labl$$label);
6993 int flag = $cmp$$cmpcode;
6995 //__ move(AT, val);
6996 switch(flag)
6997 {
6998 case 0x01: //equal
6999 if (&L)
7000 __ beq(op1, R0, L);
7001 else
7002 __ beq(op1, R0, (int)0);
7003 break;
7004 case 0x02: //not_equal
7005 if (&L)
7006 __ bne(op1, R0, L);
7007 else
7008 __ bne(op1, R0, (int)0);
7009 break;
7010 case 0x03: //greater
7011 if(&L)
7012 __ bgtz(op1, L);
7013 else
7014 __ bgtz(op1, (int)0);
7015 break;
7016 case 0x04: //greater_equal
7017 if(&L)
7018 __ bgez(op1, L);
7019 else
7020 __ bgez(op1, (int)0);
7021 break;
7022 case 0x05: //less
7023 if(&L)
7024 __ bltz(op1, L);
7025 else
7026 __ bltz(op1, (int)0);
7027 break;
7028 case 0x06: //less_equal
7029 if(&L)
7030 __ blez(op1, L);
7031 else
7032 __ blez(op1, (int)0);
7033 break;
7034 default:
7035 Unimplemented();
7036 }
7037 __ nop();
7038 %}
7040 ins_pc_relative(1);
7041 ins_pipe( pipe_alu_branch );
7042 %}
7045 instruct branchConI_reg_imm(cmpOp cmp, mRegI src1, immI src2, label labl) %{
7046 match( If cmp (CmpI src1 src2) );
7047 effect(USE labl);
7048 ins_cost(200);
7049 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm" %}
7051 ins_encode %{
7052 Register op1 = $src1$$Register;
7053 int val = $src2$$constant;
7054 Label &L = *($labl$$label);
7055 int flag = $cmp$$cmpcode;
7057 __ move(AT, val);
7058 switch(flag)
7059 {
7060 case 0x01: //equal
7061 if (&L)
7062 __ beq(op1, AT, L);
7063 else
7064 __ beq(op1, AT, (int)0);
7065 break;
7066 case 0x02: //not_equal
7067 if (&L)
7068 __ bne(op1, AT, L);
7069 else
7070 __ bne(op1, AT, (int)0);
7071 break;
7072 case 0x03: //greater
7073 __ slt(AT, AT, op1);
7074 if(&L)
7075 __ bne(R0, AT, L);
7076 else
7077 __ bne(R0, AT, (int)0);
7078 break;
7079 case 0x04: //greater_equal
7080 __ slt(AT, op1, AT);
7081 if(&L)
7082 __ beq(AT, R0, L);
7083 else
7084 __ beq(AT, R0, (int)0);
7085 break;
7086 case 0x05: //less
7087 __ slt(AT, op1, AT);
7088 if(&L)
7089 __ bne(R0, AT, L);
7090 else
7091 __ bne(R0, AT, (int)0);
7092 break;
7093 case 0x06: //less_equal
7094 __ slt(AT, AT, op1);
7095 if(&L)
7096 __ beq(AT, R0, L);
7097 else
7098 __ beq(AT, R0, (int)0);
7099 break;
7100 default:
7101 Unimplemented();
7102 }
7103 __ nop();
7104 %}
7106 ins_pc_relative(1);
7107 ins_pipe( pipe_alu_branch );
7108 %}
7110 instruct branchConIU_reg_imm0(cmpOpU cmp, mRegI src1, immI0 zero, label labl) %{
7111 match( If cmp (CmpU src1 zero) );
7112 effect(USE labl);
7113 format %{ "BR$cmp $src1, zero, $labl #@branchConIU_reg_imm0" %}
7115 ins_encode %{
7116 Register op1 = $src1$$Register;
7117 Label &L = *($labl$$label);
7118 int flag = $cmp$$cmpcode;
7120 switch(flag)
7121 {
7122 case 0x01: //equal
7123 if (&L)
7124 __ beq(op1, R0, L);
7125 else
7126 __ beq(op1, R0, (int)0);
7127 break;
7128 case 0x02: //not_equal
7129 if (&L)
7130 __ bne(op1, R0, L);
7131 else
7132 __ bne(op1, R0, (int)0);
7133 break;
7134 case 0x03: //above
7135 if(&L)
7136 __ bne(R0, op1, L);
7137 else
7138 __ bne(R0, op1, (int)0);
7139 break;
7140 case 0x04: //above_equal
7141 if(&L)
7142 __ beq(R0, R0, L);
7143 else
7144 __ beq(R0, R0, (int)0);
7145 break;
7146 case 0x05: //below
7147 return;
7148 break;
7149 case 0x06: //below_equal
7150 if(&L)
7151 __ beq(op1, R0, L);
7152 else
7153 __ beq(op1, R0, (int)0);
7154 break;
7155 default:
7156 Unimplemented();
7157 }
7158 __ nop();
7159 %}
7161 ins_pc_relative(1);
7162 ins_pipe( pipe_alu_branch );
7163 %}
7166 instruct branchConIU_reg_immI16(cmpOpU cmp, mRegI src1, immI16 src2, label labl) %{
7167 match( If cmp (CmpU src1 src2) );
7168 effect(USE labl);
7169 ins_cost(180);
7170 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_immI16" %}
7172 ins_encode %{
7173 Register op1 = $src1$$Register;
7174 int val = $src2$$constant;
7175 Label &L = *($labl$$label);
7176 int flag = $cmp$$cmpcode;
7178 switch(flag)
7179 {
7180 case 0x01: //equal
7181 __ move(AT, val);
7182 if (&L)
7183 __ beq(op1, AT, L);
7184 else
7185 __ beq(op1, AT, (int)0);
7186 break;
7187 case 0x02: //not_equal
7188 __ move(AT, val);
7189 if (&L)
7190 __ bne(op1, AT, L);
7191 else
7192 __ bne(op1, AT, (int)0);
7193 break;
7194 case 0x03: //above
7195 __ move(AT, val);
7196 __ sltu(AT, AT, op1);
7197 if(&L)
7198 __ bne(R0, AT, L);
7199 else
7200 __ bne(R0, AT, (int)0);
7201 break;
7202 case 0x04: //above_equal
7203 __ sltiu(AT, op1, val);
7204 if(&L)
7205 __ beq(AT, R0, L);
7206 else
7207 __ beq(AT, R0, (int)0);
7208 break;
7209 case 0x05: //below
7210 __ sltiu(AT, op1, val);
7211 if(&L)
7212 __ bne(R0, AT, L);
7213 else
7214 __ bne(R0, AT, (int)0);
7215 break;
7216 case 0x06: //below_equal
7217 __ move(AT, val);
7218 __ sltu(AT, AT, op1);
7219 if(&L)
7220 __ beq(AT, R0, L);
7221 else
7222 __ beq(AT, R0, (int)0);
7223 break;
7224 default:
7225 Unimplemented();
7226 }
7227 __ nop();
7228 %}
7230 ins_pc_relative(1);
7231 ins_pipe( pipe_alu_branch );
7232 %}
7235 instruct branchConL_regL_regL(cmpOp cmp, mRegL src1, mRegL src2, label labl) %{
7236 match( If cmp (CmpL src1 src2) );
7237 effect(USE labl);
7238 format %{ "BR$cmp $src1, $src2, $labl #@branchConL_regL_regL" %}
7239 ins_cost(250);
7241 ins_encode %{
7242 Register opr1_reg = as_Register($src1$$reg);
7243 Register opr2_reg = as_Register($src2$$reg);
7245 Label &target = *($labl$$label);
7246 int flag = $cmp$$cmpcode;
7248 switch(flag)
7249 {
7250 case 0x01: //equal
7251 if (&target)
7252 __ beq(opr1_reg, opr2_reg, target);
7253 else
7254 __ beq(opr1_reg, opr2_reg, (int)0);
7255 __ delayed()->nop();
7256 break;
7258 case 0x02: //not_equal
7259 if(&target)
7260 __ bne(opr1_reg, opr2_reg, target);
7261 else
7262 __ bne(opr1_reg, opr2_reg, (int)0);
7263 __ delayed()->nop();
7264 break;
7266 case 0x03: //greater
7267 __ slt(AT, opr2_reg, opr1_reg);
7268 if(&target)
7269 __ bne(AT, R0, target);
7270 else
7271 __ bne(AT, R0, (int)0);
7272 __ delayed()->nop();
7273 break;
7275 case 0x04: //greater_equal
7276 __ slt(AT, opr1_reg, opr2_reg);
7277 if(&target)
7278 __ beq(AT, R0, target);
7279 else
7280 __ beq(AT, R0, (int)0);
7281 __ delayed()->nop();
7283 break;
7285 case 0x05: //less
7286 __ slt(AT, opr1_reg, opr2_reg);
7287 if(&target)
7288 __ bne(AT, R0, target);
7289 else
7290 __ bne(AT, R0, (int)0);
7291 __ delayed()->nop();
7293 break;
7295 case 0x06: //less_equal
7296 __ slt(AT, opr2_reg, opr1_reg);
7298 if(&target)
7299 __ beq(AT, R0, target);
7300 else
7301 __ beq(AT, R0, (int)0);
7302 __ delayed()->nop();
7304 break;
7306 default:
7307 Unimplemented();
7308 }
7309 %}
7312 ins_pc_relative(1);
7313 ins_pipe( pipe_alu_branch );
7314 %}
7316 instruct branchConL_reg_immL16_sub(cmpOp cmp, mRegL src1, immL16_sub src2, label labl) %{
7317 match( If cmp (CmpL src1 src2) );
7318 effect(USE labl);
7319 ins_cost(180);
7320 format %{ "BR$cmp $src1, $src2, $labl #@branchConL_reg_immL16_sub" %}
7322 ins_encode %{
7323 Register op1 = $src1$$Register;
7324 int val = $src2$$constant;
7325 Label &L = *($labl$$label);
7326 int flag = $cmp$$cmpcode;
7328 __ daddiu(AT, op1, -1 * val);
7329 switch(flag)
7330 {
7331 case 0x01: //equal
7332 if (&L)
7333 __ beq(R0, AT, L);
7334 else
7335 __ beq(R0, AT, (int)0);
7336 break;
7337 case 0x02: //not_equal
7338 if (&L)
7339 __ bne(R0, AT, L);
7340 else
7341 __ bne(R0, AT, (int)0);
7342 break;
7343 case 0x03: //greater
7344 if(&L)
7345 __ bgtz(AT, L);
7346 else
7347 __ bgtz(AT, (int)0);
7348 break;
7349 case 0x04: //greater_equal
7350 if(&L)
7351 __ bgez(AT, L);
7352 else
7353 __ bgez(AT, (int)0);
7354 break;
7355 case 0x05: //less
7356 if(&L)
7357 __ bltz(AT, L);
7358 else
7359 __ bltz(AT, (int)0);
7360 break;
7361 case 0x06: //less_equal
7362 if(&L)
7363 __ blez(AT, L);
7364 else
7365 __ blez(AT, (int)0);
7366 break;
7367 default:
7368 Unimplemented();
7369 }
7370 __ nop();
7371 %}
7373 ins_pc_relative(1);
7374 ins_pipe( pipe_alu_branch );
7375 %}
7378 instruct branchConI_reg_imm16_sub(cmpOp cmp, mRegI src1, immI16_sub src2, label labl) %{
7379 match( If cmp (CmpI src1 src2) );
7380 effect(USE labl);
7381 ins_cost(180);
7382 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm16_sub" %}
7384 ins_encode %{
7385 Register op1 = $src1$$Register;
7386 int val = $src2$$constant;
7387 Label &L = *($labl$$label);
7388 int flag = $cmp$$cmpcode;
7390 __ addiu32(AT, op1, -1 * val);
7391 switch(flag)
7392 {
7393 case 0x01: //equal
7394 if (&L)
7395 __ beq(R0, AT, L);
7396 else
7397 __ beq(R0, AT, (int)0);
7398 break;
7399 case 0x02: //not_equal
7400 if (&L)
7401 __ bne(R0, AT, L);
7402 else
7403 __ bne(R0, AT, (int)0);
7404 break;
7405 case 0x03: //greater
7406 if(&L)
7407 __ bgtz(AT, L);
7408 else
7409 __ bgtz(AT, (int)0);
7410 break;
7411 case 0x04: //greater_equal
7412 if(&L)
7413 __ bgez(AT, L);
7414 else
7415 __ bgez(AT, (int)0);
7416 break;
7417 case 0x05: //less
7418 if(&L)
7419 __ bltz(AT, L);
7420 else
7421 __ bltz(AT, (int)0);
7422 break;
7423 case 0x06: //less_equal
7424 if(&L)
7425 __ blez(AT, L);
7426 else
7427 __ blez(AT, (int)0);
7428 break;
7429 default:
7430 Unimplemented();
7431 }
7432 __ nop();
7433 %}
7435 ins_pc_relative(1);
7436 ins_pipe( pipe_alu_branch );
7437 %}
7439 instruct branchConL_regL_immL0(cmpOp cmp, mRegL src1, immL0 zero, label labl) %{
7440 match( If cmp (CmpL src1 zero) );
7441 effect(USE labl);
7442 format %{ "BR$cmp $src1, zero, $labl #@branchConL_regL_immL0" %}
7443 ins_cost(150);
7445 ins_encode %{
7446 Register opr1_reg = as_Register($src1$$reg);
7447 Label &target = *($labl$$label);
7448 int flag = $cmp$$cmpcode;
7450 switch(flag)
7451 {
7452 case 0x01: //equal
7453 if (&target)
7454 __ beq(opr1_reg, R0, target);
7455 else
7456 __ beq(opr1_reg, R0, int(0));
7457 break;
7459 case 0x02: //not_equal
7460 if(&target)
7461 __ bne(opr1_reg, R0, target);
7462 else
7463 __ bne(opr1_reg, R0, (int)0);
7464 break;
7466 case 0x03: //greater
7467 if(&target)
7468 __ bgtz(opr1_reg, target);
7469 else
7470 __ bgtz(opr1_reg, (int)0);
7471 break;
7473 case 0x04: //greater_equal
7474 if(&target)
7475 __ bgez(opr1_reg, target);
7476 else
7477 __ bgez(opr1_reg, (int)0);
7478 break;
7480 case 0x05: //less
7481 __ slt(AT, opr1_reg, R0);
7482 if(&target)
7483 __ bne(AT, R0, target);
7484 else
7485 __ bne(AT, R0, (int)0);
7486 break;
7488 case 0x06: //less_equal
7489 if (&target)
7490 __ blez(opr1_reg, target);
7491 else
7492 __ blez(opr1_reg, int(0));
7493 break;
7495 default:
7496 Unimplemented();
7497 }
7498 __ delayed()->nop();
7499 %}
7502 ins_pc_relative(1);
7503 ins_pipe( pipe_alu_branch );
7504 %}
7507 //FIXME
7508 instruct branchConF_reg_reg(cmpOp cmp, regF src1, regF src2, label labl) %{
7509 match( If cmp (CmpF src1 src2) );
7510 effect(USE labl);
7511 format %{ "BR$cmp $src1, $src2, $labl #@branchConF_reg_reg" %}
7513 ins_encode %{
7514 FloatRegister reg_op1 = $src1$$FloatRegister;
7515 FloatRegister reg_op2 = $src2$$FloatRegister;
7516 Label &L = *($labl$$label);
7517 int flag = $cmp$$cmpcode;
7519 switch(flag)
7520 {
7521 case 0x01: //equal
7522 __ c_eq_s(reg_op1, reg_op2);
7523 if (&L)
7524 __ bc1t(L);
7525 else
7526 __ bc1t((int)0);
7527 break;
7528 case 0x02: //not_equal
7529 __ c_eq_s(reg_op1, reg_op2);
7530 if (&L)
7531 __ bc1f(L);
7532 else
7533 __ bc1f((int)0);
7534 break;
7535 case 0x03: //greater
7536 __ c_ule_s(reg_op1, reg_op2);
7537 if(&L)
7538 __ bc1f(L);
7539 else
7540 __ bc1f((int)0);
7541 break;
7542 case 0x04: //greater_equal
7543 __ c_ult_s(reg_op1, reg_op2);
7544 if(&L)
7545 __ bc1f(L);
7546 else
7547 __ bc1f((int)0);
7548 break;
7549 case 0x05: //less
7550 __ c_ult_s(reg_op1, reg_op2);
7551 if(&L)
7552 __ bc1t(L);
7553 else
7554 __ bc1t((int)0);
7555 break;
7556 case 0x06: //less_equal
7557 __ c_ule_s(reg_op1, reg_op2);
7558 if(&L)
7559 __ bc1t(L);
7560 else
7561 __ bc1t((int)0);
7562 break;
7563 default:
7564 Unimplemented();
7565 }
7566 __ nop();
7567 %}
7569 ins_pc_relative(1);
7570 ins_pipe(pipe_slow);
7571 %}
7573 instruct branchConD_reg_reg(cmpOp cmp, regD src1, regD src2, label labl) %{
7574 match( If cmp (CmpD src1 src2) );
7575 effect(USE labl);
7576 format %{ "BR$cmp $src1, $src2, $labl #@branchConD_reg_reg" %}
7578 ins_encode %{
7579 FloatRegister reg_op1 = $src1$$FloatRegister;
7580 FloatRegister reg_op2 = $src2$$FloatRegister;
7581 Label &L = *($labl$$label);
7582 int flag = $cmp$$cmpcode;
7584 switch(flag)
7585 {
7586 case 0x01: //equal
7587 __ c_eq_d(reg_op1, reg_op2);
7588 if (&L)
7589 __ bc1t(L);
7590 else
7591 __ bc1t((int)0);
7592 break;
7593 case 0x02: //not_equal
7594 //2016/4/19 aoqi: c_ueq_d cannot distinguish NaN from equal. Double.isNaN(Double) is implemented by 'f != f', so the use of c_ueq_d causes bugs.
7595 __ c_eq_d(reg_op1, reg_op2);
7596 if (&L)
7597 __ bc1f(L);
7598 else
7599 __ bc1f((int)0);
7600 break;
7601 case 0x03: //greater
7602 __ c_ule_d(reg_op1, reg_op2);
7603 if(&L)
7604 __ bc1f(L);
7605 else
7606 __ bc1f((int)0);
7607 break;
7608 case 0x04: //greater_equal
7609 __ c_ult_d(reg_op1, reg_op2);
7610 if(&L)
7611 __ bc1f(L);
7612 else
7613 __ bc1f((int)0);
7614 break;
7615 case 0x05: //less
7616 __ c_ult_d(reg_op1, reg_op2);
7617 if(&L)
7618 __ bc1t(L);
7619 else
7620 __ bc1t((int)0);
7621 break;
7622 case 0x06: //less_equal
7623 __ c_ule_d(reg_op1, reg_op2);
7624 if(&L)
7625 __ bc1t(L);
7626 else
7627 __ bc1t((int)0);
7628 break;
7629 default:
7630 Unimplemented();
7631 }
7632 __ nop();
7633 %}
7635 ins_pc_relative(1);
7636 ins_pipe(pipe_slow);
7637 %}
7640 // Call Runtime Instruction
7641 instruct CallRuntimeDirect(method meth) %{
7642 match(CallRuntime );
7643 effect(USE meth);
7645 ins_cost(300);
7646 format %{ "CALL,runtime #@CallRuntimeDirect" %}
7647 ins_encode( Java_To_Runtime( meth ) );
7648 ins_pipe( pipe_slow );
7649 ins_alignment(16);
7650 %}
7654 //------------------------MemBar Instructions-------------------------------
7655 //Memory barrier flavors
7657 instruct membar_acquire() %{
7658 match(MemBarAcquire);
7659 ins_cost(0);
7661 size(0);
7662 format %{ "MEMBAR-acquire (empty) @ membar_acquire" %}
7663 ins_encode();
7664 ins_pipe(empty);
7665 %}
7667 instruct load_fence() %{
7668 match(LoadFence);
7669 ins_cost(400);
7671 format %{ "MEMBAR @ load_fence" %}
7672 ins_encode %{
7673 __ sync();
7674 %}
7675 ins_pipe(pipe_slow);
7676 %}
7678 instruct membar_acquire_lock()
7679 %{
7680 match(MemBarAcquireLock);
7681 ins_cost(0);
7683 size(0);
7684 format %{ "MEMBAR-acquire (acquire as part of CAS in prior FastLock so empty encoding) @ membar_acquire_lock" %}
7685 ins_encode();
7686 ins_pipe(empty);
7687 %}
7689 instruct membar_release() %{
7690 match(MemBarRelease);
7691 ins_cost(0);
7693 size(0);
7694 format %{ "MEMBAR-release (empty) @ membar_release" %}
7695 ins_encode();
7696 ins_pipe(empty);
7697 %}
7699 instruct store_fence() %{
7700 match(StoreFence);
7701 ins_cost(400);
7703 format %{ "MEMBAR @ store_fence" %}
7705 ins_encode %{
7706 __ sync();
7707 %}
7709 ins_pipe(pipe_slow);
7710 %}
7712 instruct membar_release_lock()
7713 %{
7714 match(MemBarReleaseLock);
7715 ins_cost(0);
7717 size(0);
7718 format %{ "MEMBAR-release-lock (release in FastUnlock so empty) @ membar_release_lock" %}
7719 ins_encode();
7720 ins_pipe(empty);
7721 %}
7724 instruct membar_volatile() %{
7725 match(MemBarVolatile);
7726 ins_cost(400);
7728 format %{ "MEMBAR-volatile" %}
7729 ins_encode %{
7730 if( !os::is_MP() ) return; // Not needed on single CPU
7731 __ sync();
7733 %}
7734 ins_pipe(pipe_slow);
7735 %}
7737 instruct unnecessary_membar_volatile() %{
7738 match(MemBarVolatile);
7739 predicate(Matcher::post_store_load_barrier(n));
7740 ins_cost(0);
7742 size(0);
7743 format %{ "MEMBAR-volatile (unnecessary so empty encoding) @ unnecessary_membar_volatile" %}
7744 ins_encode( );
7745 ins_pipe(empty);
7746 %}
7748 instruct membar_storestore() %{
7749 match(MemBarStoreStore);
7751 ins_cost(0);
7752 size(0);
7753 format %{ "MEMBAR-storestore (empty encoding) @ membar_storestore" %}
7754 ins_encode( );
7755 ins_pipe(empty);
7756 %}
7758 //----------Move Instructions--------------------------------------------------
7759 instruct castX2P(mRegP dst, mRegL src) %{
7760 match(Set dst (CastX2P src));
7761 format %{ "castX2P $dst, $src @ castX2P" %}
7762 ins_encode %{
7763 Register src = $src$$Register;
7764 Register dst = $dst$$Register;
7766 if(src != dst)
7767 __ move(dst, src);
7768 %}
7769 ins_cost(10);
7770 ins_pipe( ialu_regI_mov );
7771 %}
7773 instruct castP2X(mRegL dst, mRegP src ) %{
7774 match(Set dst (CastP2X src));
7776 format %{ "mov $dst, $src\t #@castP2X" %}
7777 ins_encode %{
7778 Register src = $src$$Register;
7779 Register dst = $dst$$Register;
7781 if(src != dst)
7782 __ move(dst, src);
7783 %}
7784 ins_pipe( ialu_regI_mov );
7785 %}
7787 instruct MoveF2I_reg_reg(mRegI dst, regF src) %{
7788 match(Set dst (MoveF2I src));
7789 effect(DEF dst, USE src);
7790 ins_cost(85);
7791 format %{ "MoveF2I $dst, $src @ MoveF2I_reg_reg" %}
7792 ins_encode %{
7793 Register dst = as_Register($dst$$reg);
7794 FloatRegister src = as_FloatRegister($src$$reg);
7796 __ mfc1(dst, src);
7797 %}
7798 ins_pipe( pipe_slow );
7799 %}
7801 instruct MoveI2F_reg_reg(regF dst, mRegI src) %{
7802 match(Set dst (MoveI2F src));
7803 effect(DEF dst, USE src);
7804 ins_cost(85);
7805 format %{ "MoveI2F $dst, $src @ MoveI2F_reg_reg" %}
7806 ins_encode %{
7807 Register src = as_Register($src$$reg);
7808 FloatRegister dst = as_FloatRegister($dst$$reg);
7810 __ mtc1(src, dst);
7811 %}
7812 ins_pipe( pipe_slow );
7813 %}
7815 instruct MoveD2L_reg_reg(mRegL dst, regD src) %{
7816 match(Set dst (MoveD2L src));
7817 effect(DEF dst, USE src);
7818 ins_cost(85);
7819 format %{ "MoveD2L $dst, $src @ MoveD2L_reg_reg" %}
7820 ins_encode %{
7821 Register dst = as_Register($dst$$reg);
7822 FloatRegister src = as_FloatRegister($src$$reg);
7824 __ dmfc1(dst, src);
7825 %}
7826 ins_pipe( pipe_slow );
7827 %}
7829 instruct MoveL2D_reg_reg(regD dst, mRegL src) %{
7830 match(Set dst (MoveL2D src));
7831 effect(DEF dst, USE src);
7832 ins_cost(85);
7833 format %{ "MoveL2D $dst, $src @ MoveL2D_reg_reg" %}
7834 ins_encode %{
7835 FloatRegister dst = as_FloatRegister($dst$$reg);
7836 Register src = as_Register($src$$reg);
7838 __ dmtc1(src, dst);
7839 %}
7840 ins_pipe( pipe_slow );
7841 %}
7843 //----------Conditional Move---------------------------------------------------
7844 // Conditional move
7845 instruct cmovI_cmpI_reg_reg(mRegI dst, mRegI src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
7846 match(Set dst (CMoveI (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
7847 ins_cost(80);
7848 format %{
7849 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpI_reg_reg\n"
7850 "\tCMOV $dst,$src \t @cmovI_cmpI_reg_reg"
7851 %}
7853 ins_encode %{
7854 Register op1 = $tmp1$$Register;
7855 Register op2 = $tmp2$$Register;
7856 Register dst = $dst$$Register;
7857 Register src = $src$$Register;
7858 int flag = $cop$$cmpcode;
7860 switch(flag)
7861 {
7862 case 0x01: //equal
7863 __ subu32(AT, op1, op2);
7864 __ movz(dst, src, AT);
7865 break;
7867 case 0x02: //not_equal
7868 __ subu32(AT, op1, op2);
7869 __ movn(dst, src, AT);
7870 break;
7872 case 0x03: //great
7873 __ slt(AT, op2, op1);
7874 __ movn(dst, src, AT);
7875 break;
7877 case 0x04: //great_equal
7878 __ slt(AT, op1, op2);
7879 __ movz(dst, src, AT);
7880 break;
7882 case 0x05: //less
7883 __ slt(AT, op1, op2);
7884 __ movn(dst, src, AT);
7885 break;
7887 case 0x06: //less_equal
7888 __ slt(AT, op2, op1);
7889 __ movz(dst, src, AT);
7890 break;
7892 default:
7893 Unimplemented();
7894 }
7895 %}
7897 ins_pipe( pipe_slow );
7898 %}
7900 instruct cmovI_cmpP_reg_reg(mRegI dst, mRegI src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
7901 match(Set dst (CMoveI (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
7902 ins_cost(80);
7903 format %{
7904 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpP_reg_reg\n\t"
7905 "CMOV $dst,$src\t @cmovI_cmpP_reg_reg"
7906 %}
7907 ins_encode %{
7908 Register op1 = $tmp1$$Register;
7909 Register op2 = $tmp2$$Register;
7910 Register dst = $dst$$Register;
7911 Register src = $src$$Register;
7912 int flag = $cop$$cmpcode;
7914 switch(flag)
7915 {
7916 case 0x01: //equal
7917 __ subu(AT, op1, op2);
7918 __ movz(dst, src, AT);
7919 break;
7921 case 0x02: //not_equal
7922 __ subu(AT, op1, op2);
7923 __ movn(dst, src, AT);
7924 break;
7926 case 0x03: //above
7927 __ sltu(AT, op2, op1);
7928 __ movn(dst, src, AT);
7929 break;
7931 case 0x04: //above_equal
7932 __ sltu(AT, op1, op2);
7933 __ movz(dst, src, AT);
7934 break;
7936 case 0x05: //below
7937 __ sltu(AT, op1, op2);
7938 __ movn(dst, src, AT);
7939 break;
7941 case 0x06: //below_equal
7942 __ sltu(AT, op2, op1);
7943 __ movz(dst, src, AT);
7944 break;
7946 default:
7947 Unimplemented();
7948 }
7949 %}
7951 ins_pipe( pipe_slow );
7952 %}
7954 instruct cmovI_cmpN_reg_reg(mRegI dst, mRegI src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
7955 match(Set dst (CMoveI (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
7956 ins_cost(80);
7957 format %{
7958 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpN_reg_reg\n\t"
7959 "CMOV $dst,$src\t @cmovI_cmpN_reg_reg"
7960 %}
7961 ins_encode %{
7962 Register op1 = $tmp1$$Register;
7963 Register op2 = $tmp2$$Register;
7964 Register dst = $dst$$Register;
7965 Register src = $src$$Register;
7966 int flag = $cop$$cmpcode;
7968 switch(flag)
7969 {
7970 case 0x01: //equal
7971 __ subu32(AT, op1, op2);
7972 __ movz(dst, src, AT);
7973 break;
7975 case 0x02: //not_equal
7976 __ subu32(AT, op1, op2);
7977 __ movn(dst, src, AT);
7978 break;
7980 case 0x03: //above
7981 __ sltu(AT, op2, op1);
7982 __ movn(dst, src, AT);
7983 break;
7985 case 0x04: //above_equal
7986 __ sltu(AT, op1, op2);
7987 __ movz(dst, src, AT);
7988 break;
7990 case 0x05: //below
7991 __ sltu(AT, op1, op2);
7992 __ movn(dst, src, AT);
7993 break;
7995 case 0x06: //below_equal
7996 __ sltu(AT, op2, op1);
7997 __ movz(dst, src, AT);
7998 break;
8000 default:
8001 Unimplemented();
8002 }
8003 %}
8005 ins_pipe( pipe_slow );
8006 %}
8008 instruct cmovP_cmpN_reg_reg(mRegP dst, mRegP src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8009 match(Set dst (CMoveP (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8010 ins_cost(80);
8011 format %{
8012 "CMPU$cop $tmp1,$tmp2\t @cmovP_cmpN_reg_reg\n\t"
8013 "CMOV $dst,$src\t @cmovP_cmpN_reg_reg"
8014 %}
8015 ins_encode %{
8016 Register op1 = $tmp1$$Register;
8017 Register op2 = $tmp2$$Register;
8018 Register dst = $dst$$Register;
8019 Register src = $src$$Register;
8020 int flag = $cop$$cmpcode;
8022 switch(flag)
8023 {
8024 case 0x01: //equal
8025 __ subu32(AT, op1, op2);
8026 __ movz(dst, src, AT);
8027 break;
8029 case 0x02: //not_equal
8030 __ subu32(AT, op1, op2);
8031 __ movn(dst, src, AT);
8032 break;
8034 case 0x03: //above
8035 __ sltu(AT, op2, op1);
8036 __ movn(dst, src, AT);
8037 break;
8039 case 0x04: //above_equal
8040 __ sltu(AT, op1, op2);
8041 __ movz(dst, src, AT);
8042 break;
8044 case 0x05: //below
8045 __ sltu(AT, op1, op2);
8046 __ movn(dst, src, AT);
8047 break;
8049 case 0x06: //below_equal
8050 __ sltu(AT, op2, op1);
8051 __ movz(dst, src, AT);
8052 break;
8054 default:
8055 Unimplemented();
8056 }
8057 %}
8059 ins_pipe( pipe_slow );
8060 %}
8062 instruct cmovN_cmpP_reg_reg(mRegN dst, mRegN src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
8063 match(Set dst (CMoveN (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
8064 ins_cost(80);
8065 format %{
8066 "CMPU$cop $tmp1,$tmp2\t @cmovN_cmpP_reg_reg\n\t"
8067 "CMOV $dst,$src\t @cmovN_cmpP_reg_reg"
8068 %}
8069 ins_encode %{
8070 Register op1 = $tmp1$$Register;
8071 Register op2 = $tmp2$$Register;
8072 Register dst = $dst$$Register;
8073 Register src = $src$$Register;
8074 int flag = $cop$$cmpcode;
8076 switch(flag)
8077 {
8078 case 0x01: //equal
8079 __ subu(AT, op1, op2);
8080 __ movz(dst, src, AT);
8081 break;
8083 case 0x02: //not_equal
8084 __ subu(AT, op1, op2);
8085 __ movn(dst, src, AT);
8086 break;
8088 case 0x03: //above
8089 __ sltu(AT, op2, op1);
8090 __ movn(dst, src, AT);
8091 break;
8093 case 0x04: //above_equal
8094 __ sltu(AT, op1, op2);
8095 __ movz(dst, src, AT);
8096 break;
8098 case 0x05: //below
8099 __ sltu(AT, op1, op2);
8100 __ movn(dst, src, AT);
8101 break;
8103 case 0x06: //below_equal
8104 __ sltu(AT, op2, op1);
8105 __ movz(dst, src, AT);
8106 break;
8108 default:
8109 Unimplemented();
8110 }
8111 %}
8113 ins_pipe( pipe_slow );
8114 %}
8116 instruct cmovP_cmpD_reg_reg(mRegP dst, mRegP src, regD tmp1, regD tmp2, cmpOp cop ) %{
8117 match(Set dst (CMoveP (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8118 ins_cost(80);
8119 format %{
8120 "CMP$cop $tmp1, $tmp2\t @cmovP_cmpD_reg_reg\n"
8121 "\tCMOV $dst,$src \t @cmovP_cmpD_reg_reg"
8122 %}
8123 ins_encode %{
8124 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8125 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8126 Register dst = as_Register($dst$$reg);
8127 Register src = as_Register($src$$reg);
8129 int flag = $cop$$cmpcode;
8131 switch(flag)
8132 {
8133 case 0x01: //equal
8134 __ c_eq_d(reg_op1, reg_op2);
8135 __ movt(dst, src);
8136 break;
8137 case 0x02: //not_equal
8138 __ c_eq_d(reg_op1, reg_op2);
8139 __ movf(dst, src);
8140 break;
8141 case 0x03: //greater
8142 __ c_ole_d(reg_op1, reg_op2);
8143 __ movf(dst, src);
8144 break;
8145 case 0x04: //greater_equal
8146 __ c_olt_d(reg_op1, reg_op2);
8147 __ movf(dst, src);
8148 break;
8149 case 0x05: //less
8150 __ c_ult_d(reg_op1, reg_op2);
8151 __ movt(dst, src);
8152 break;
8153 case 0x06: //less_equal
8154 __ c_ule_d(reg_op1, reg_op2);
8155 __ movt(dst, src);
8156 break;
8157 default:
8158 Unimplemented();
8159 }
8160 %}
8162 ins_pipe( pipe_slow );
8163 %}
8166 instruct cmovN_cmpN_reg_reg(mRegN dst, mRegN src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8167 match(Set dst (CMoveN (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8168 ins_cost(80);
8169 format %{
8170 "CMPU$cop $tmp1,$tmp2\t @cmovN_cmpN_reg_reg\n\t"
8171 "CMOV $dst,$src\t @cmovN_cmpN_reg_reg"
8172 %}
8173 ins_encode %{
8174 Register op1 = $tmp1$$Register;
8175 Register op2 = $tmp2$$Register;
8176 Register dst = $dst$$Register;
8177 Register src = $src$$Register;
8178 int flag = $cop$$cmpcode;
8180 switch(flag)
8181 {
8182 case 0x01: //equal
8183 __ subu32(AT, op1, op2);
8184 __ movz(dst, src, AT);
8185 break;
8187 case 0x02: //not_equal
8188 __ subu32(AT, op1, op2);
8189 __ movn(dst, src, AT);
8190 break;
8192 case 0x03: //above
8193 __ sltu(AT, op2, op1);
8194 __ movn(dst, src, AT);
8195 break;
8197 case 0x04: //above_equal
8198 __ sltu(AT, op1, op2);
8199 __ movz(dst, src, AT);
8200 break;
8202 case 0x05: //below
8203 __ sltu(AT, op1, op2);
8204 __ movn(dst, src, AT);
8205 break;
8207 case 0x06: //below_equal
8208 __ sltu(AT, op2, op1);
8209 __ movz(dst, src, AT);
8210 break;
8212 default:
8213 Unimplemented();
8214 }
8215 %}
8217 ins_pipe( pipe_slow );
8218 %}
8221 instruct cmovI_cmpU_reg_reg(mRegI dst, mRegI src, mRegI tmp1, mRegI tmp2, cmpOpU cop ) %{
8222 match(Set dst (CMoveI (Binary cop (CmpU tmp1 tmp2)) (Binary dst src)));
8223 ins_cost(80);
8224 format %{
8225 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpU_reg_reg\n\t"
8226 "CMOV $dst,$src\t @cmovI_cmpU_reg_reg"
8227 %}
8228 ins_encode %{
8229 Register op1 = $tmp1$$Register;
8230 Register op2 = $tmp2$$Register;
8231 Register dst = $dst$$Register;
8232 Register src = $src$$Register;
8233 int flag = $cop$$cmpcode;
8235 switch(flag)
8236 {
8237 case 0x01: //equal
8238 __ subu(AT, op1, op2);
8239 __ movz(dst, src, AT);
8240 break;
8242 case 0x02: //not_equal
8243 __ subu(AT, op1, op2);
8244 __ movn(dst, src, AT);
8245 break;
8247 case 0x03: //above
8248 __ sltu(AT, op2, op1);
8249 __ movn(dst, src, AT);
8250 break;
8252 case 0x04: //above_equal
8253 __ sltu(AT, op1, op2);
8254 __ movz(dst, src, AT);
8255 break;
8257 case 0x05: //below
8258 __ sltu(AT, op1, op2);
8259 __ movn(dst, src, AT);
8260 break;
8262 case 0x06: //below_equal
8263 __ sltu(AT, op2, op1);
8264 __ movz(dst, src, AT);
8265 break;
8267 default:
8268 Unimplemented();
8269 }
8270 %}
8272 ins_pipe( pipe_slow );
8273 %}
8275 instruct cmovI_cmpL_reg_reg(mRegI dst, mRegI src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8276 match(Set dst (CMoveI (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8277 ins_cost(80);
8278 format %{
8279 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpL_reg_reg\n"
8280 "\tCMOV $dst,$src \t @cmovI_cmpL_reg_reg"
8281 %}
8282 ins_encode %{
8283 Register opr1 = as_Register($tmp1$$reg);
8284 Register opr2 = as_Register($tmp2$$reg);
8285 Register dst = $dst$$Register;
8286 Register src = $src$$Register;
8287 int flag = $cop$$cmpcode;
8289 switch(flag)
8290 {
8291 case 0x01: //equal
8292 __ subu(AT, opr1, opr2);
8293 __ movz(dst, src, AT);
8294 break;
8296 case 0x02: //not_equal
8297 __ subu(AT, opr1, opr2);
8298 __ movn(dst, src, AT);
8299 break;
8301 case 0x03: //greater
8302 __ slt(AT, opr2, opr1);
8303 __ movn(dst, src, AT);
8304 break;
8306 case 0x04: //greater_equal
8307 __ slt(AT, opr1, opr2);
8308 __ movz(dst, src, AT);
8309 break;
8311 case 0x05: //less
8312 __ slt(AT, opr1, opr2);
8313 __ movn(dst, src, AT);
8314 break;
8316 case 0x06: //less_equal
8317 __ slt(AT, opr2, opr1);
8318 __ movz(dst, src, AT);
8319 break;
8321 default:
8322 Unimplemented();
8323 }
8324 %}
8326 ins_pipe( pipe_slow );
8327 %}
8329 instruct cmovP_cmpL_reg_reg(mRegP dst, mRegP src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8330 match(Set dst (CMoveP (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8331 ins_cost(80);
8332 format %{
8333 "CMP$cop $tmp1, $tmp2\t @cmovP_cmpL_reg_reg\n"
8334 "\tCMOV $dst,$src \t @cmovP_cmpL_reg_reg"
8335 %}
8336 ins_encode %{
8337 Register opr1 = as_Register($tmp1$$reg);
8338 Register opr2 = as_Register($tmp2$$reg);
8339 Register dst = $dst$$Register;
8340 Register src = $src$$Register;
8341 int flag = $cop$$cmpcode;
8343 switch(flag)
8344 {
8345 case 0x01: //equal
8346 __ subu(AT, opr1, opr2);
8347 __ movz(dst, src, AT);
8348 break;
8350 case 0x02: //not_equal
8351 __ subu(AT, opr1, opr2);
8352 __ movn(dst, src, AT);
8353 break;
8355 case 0x03: //greater
8356 __ slt(AT, opr2, opr1);
8357 __ movn(dst, src, AT);
8358 break;
8360 case 0x04: //greater_equal
8361 __ slt(AT, opr1, opr2);
8362 __ movz(dst, src, AT);
8363 break;
8365 case 0x05: //less
8366 __ slt(AT, opr1, opr2);
8367 __ movn(dst, src, AT);
8368 break;
8370 case 0x06: //less_equal
8371 __ slt(AT, opr2, opr1);
8372 __ movz(dst, src, AT);
8373 break;
8375 default:
8376 Unimplemented();
8377 }
8378 %}
8380 ins_pipe( pipe_slow );
8381 %}
8383 instruct cmovI_cmpD_reg_reg(mRegI dst, mRegI src, regD tmp1, regD tmp2, cmpOp cop ) %{
8384 match(Set dst (CMoveI (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8385 ins_cost(80);
8386 format %{
8387 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpD_reg_reg\n"
8388 "\tCMOV $dst,$src \t @cmovI_cmpD_reg_reg"
8389 %}
8390 ins_encode %{
8391 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8392 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8393 Register dst = as_Register($dst$$reg);
8394 Register src = as_Register($src$$reg);
8396 int flag = $cop$$cmpcode;
8398 switch(flag)
8399 {
8400 case 0x01: //equal
8401 __ c_eq_d(reg_op1, reg_op2);
8402 __ movt(dst, src);
8403 break;
8404 case 0x02: //not_equal
8405 //2016/4/19 aoqi: See instruct branchConD_reg_reg. The change in branchConD_reg_reg fixed a bug. It seems similar here, so I made thesame change.
8406 __ c_eq_d(reg_op1, reg_op2);
8407 __ movf(dst, src);
8408 break;
8409 case 0x03: //greater
8410 __ c_ole_d(reg_op1, reg_op2);
8411 __ movf(dst, src);
8412 break;
8413 case 0x04: //greater_equal
8414 __ c_olt_d(reg_op1, reg_op2);
8415 __ movf(dst, src);
8416 break;
8417 case 0x05: //less
8418 __ c_ult_d(reg_op1, reg_op2);
8419 __ movt(dst, src);
8420 break;
8421 case 0x06: //less_equal
8422 __ c_ule_d(reg_op1, reg_op2);
8423 __ movt(dst, src);
8424 break;
8425 default:
8426 Unimplemented();
8427 }
8428 %}
8430 ins_pipe( pipe_slow );
8431 %}
8434 instruct cmovP_cmpP_reg_reg(mRegP dst, mRegP src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
8435 match(Set dst (CMoveP (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
8436 ins_cost(80);
8437 format %{
8438 "CMPU$cop $tmp1,$tmp2\t @cmovP_cmpP_reg_reg\n\t"
8439 "CMOV $dst,$src\t @cmovP_cmpP_reg_reg"
8440 %}
8441 ins_encode %{
8442 Register op1 = $tmp1$$Register;
8443 Register op2 = $tmp2$$Register;
8444 Register dst = $dst$$Register;
8445 Register src = $src$$Register;
8446 int flag = $cop$$cmpcode;
8448 switch(flag)
8449 {
8450 case 0x01: //equal
8451 __ subu(AT, op1, op2);
8452 __ movz(dst, src, AT);
8453 break;
8455 case 0x02: //not_equal
8456 __ subu(AT, op1, op2);
8457 __ movn(dst, src, AT);
8458 break;
8460 case 0x03: //above
8461 __ sltu(AT, op2, op1);
8462 __ movn(dst, src, AT);
8463 break;
8465 case 0x04: //above_equal
8466 __ sltu(AT, op1, op2);
8467 __ movz(dst, src, AT);
8468 break;
8470 case 0x05: //below
8471 __ sltu(AT, op1, op2);
8472 __ movn(dst, src, AT);
8473 break;
8475 case 0x06: //below_equal
8476 __ sltu(AT, op2, op1);
8477 __ movz(dst, src, AT);
8478 break;
8480 default:
8481 Unimplemented();
8482 }
8483 %}
8485 ins_pipe( pipe_slow );
8486 %}
8488 instruct cmovP_cmpI_reg_reg(mRegP dst, mRegP src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8489 match(Set dst (CMoveP (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8490 ins_cost(80);
8491 format %{
8492 "CMP$cop $tmp1,$tmp2\t @cmovP_cmpI_reg_reg\n\t"
8493 "CMOV $dst,$src\t @cmovP_cmpI_reg_reg"
8494 %}
8495 ins_encode %{
8496 Register op1 = $tmp1$$Register;
8497 Register op2 = $tmp2$$Register;
8498 Register dst = $dst$$Register;
8499 Register src = $src$$Register;
8500 int flag = $cop$$cmpcode;
8502 switch(flag)
8503 {
8504 case 0x01: //equal
8505 __ subu32(AT, op1, op2);
8506 __ movz(dst, src, AT);
8507 break;
8509 case 0x02: //not_equal
8510 __ subu32(AT, op1, op2);
8511 __ movn(dst, src, AT);
8512 break;
8514 case 0x03: //above
8515 __ slt(AT, op2, op1);
8516 __ movn(dst, src, AT);
8517 break;
8519 case 0x04: //above_equal
8520 __ slt(AT, op1, op2);
8521 __ movz(dst, src, AT);
8522 break;
8524 case 0x05: //below
8525 __ slt(AT, op1, op2);
8526 __ movn(dst, src, AT);
8527 break;
8529 case 0x06: //below_equal
8530 __ slt(AT, op2, op1);
8531 __ movz(dst, src, AT);
8532 break;
8534 default:
8535 Unimplemented();
8536 }
8537 %}
8539 ins_pipe( pipe_slow );
8540 %}
8542 instruct cmovN_cmpI_reg_reg(mRegN dst, mRegN src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8543 match(Set dst (CMoveN (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8544 ins_cost(80);
8545 format %{
8546 "CMP$cop $tmp1,$tmp2\t @cmovN_cmpI_reg_reg\n\t"
8547 "CMOV $dst,$src\t @cmovN_cmpI_reg_reg"
8548 %}
8549 ins_encode %{
8550 Register op1 = $tmp1$$Register;
8551 Register op2 = $tmp2$$Register;
8552 Register dst = $dst$$Register;
8553 Register src = $src$$Register;
8554 int flag = $cop$$cmpcode;
8556 switch(flag)
8557 {
8558 case 0x01: //equal
8559 __ subu32(AT, op1, op2);
8560 __ movz(dst, src, AT);
8561 break;
8563 case 0x02: //not_equal
8564 __ subu32(AT, op1, op2);
8565 __ movn(dst, src, AT);
8566 break;
8568 case 0x03: //above
8569 __ slt(AT, op2, op1);
8570 __ movn(dst, src, AT);
8571 break;
8573 case 0x04: //above_equal
8574 __ slt(AT, op1, op2);
8575 __ movz(dst, src, AT);
8576 break;
8578 case 0x05: //below
8579 __ slt(AT, op1, op2);
8580 __ movn(dst, src, AT);
8581 break;
8583 case 0x06: //below_equal
8584 __ slt(AT, op2, op1);
8585 __ movz(dst, src, AT);
8586 break;
8588 default:
8589 Unimplemented();
8590 }
8591 %}
8593 ins_pipe( pipe_slow );
8594 %}
8597 instruct cmovL_cmpI_reg_reg(mRegL dst, mRegL src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8598 match(Set dst (CMoveL (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8599 ins_cost(80);
8600 format %{
8601 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpI_reg_reg\n"
8602 "\tCMOV $dst,$src \t @cmovL_cmpI_reg_reg"
8603 %}
8605 ins_encode %{
8606 Register op1 = $tmp1$$Register;
8607 Register op2 = $tmp2$$Register;
8608 Register dst = as_Register($dst$$reg);
8609 Register src = as_Register($src$$reg);
8610 int flag = $cop$$cmpcode;
8612 switch(flag)
8613 {
8614 case 0x01: //equal
8615 __ subu32(AT, op1, op2);
8616 __ movz(dst, src, AT);
8617 break;
8619 case 0x02: //not_equal
8620 __ subu32(AT, op1, op2);
8621 __ movn(dst, src, AT);
8622 break;
8624 case 0x03: //great
8625 __ slt(AT, op2, op1);
8626 __ movn(dst, src, AT);
8627 break;
8629 case 0x04: //great_equal
8630 __ slt(AT, op1, op2);
8631 __ movz(dst, src, AT);
8632 break;
8634 case 0x05: //less
8635 __ slt(AT, op1, op2);
8636 __ movn(dst, src, AT);
8637 break;
8639 case 0x06: //less_equal
8640 __ slt(AT, op2, op1);
8641 __ movz(dst, src, AT);
8642 break;
8644 default:
8645 Unimplemented();
8646 }
8647 %}
8649 ins_pipe( pipe_slow );
8650 %}
8652 instruct cmovL_cmpL_reg_reg(mRegL dst, mRegL src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8653 match(Set dst (CMoveL (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8654 ins_cost(80);
8655 format %{
8656 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpL_reg_reg\n"
8657 "\tCMOV $dst,$src \t @cmovL_cmpL_reg_reg"
8658 %}
8659 ins_encode %{
8660 Register opr1 = as_Register($tmp1$$reg);
8661 Register opr2 = as_Register($tmp2$$reg);
8662 Register dst = as_Register($dst$$reg);
8663 Register src = as_Register($src$$reg);
8664 int flag = $cop$$cmpcode;
8666 switch(flag)
8667 {
8668 case 0x01: //equal
8669 __ subu(AT, opr1, opr2);
8670 __ movz(dst, src, AT);
8671 break;
8673 case 0x02: //not_equal
8674 __ subu(AT, opr1, opr2);
8675 __ movn(dst, src, AT);
8676 break;
8678 case 0x03: //greater
8679 __ slt(AT, opr2, opr1);
8680 __ movn(dst, src, AT);
8681 break;
8683 case 0x04: //greater_equal
8684 __ slt(AT, opr1, opr2);
8685 __ movz(dst, src, AT);
8686 break;
8688 case 0x05: //less
8689 __ slt(AT, opr1, opr2);
8690 __ movn(dst, src, AT);
8691 break;
8693 case 0x06: //less_equal
8694 __ slt(AT, opr2, opr1);
8695 __ movz(dst, src, AT);
8696 break;
8698 default:
8699 Unimplemented();
8700 }
8701 %}
8703 ins_pipe( pipe_slow );
8704 %}
8706 instruct cmovL_cmpN_reg_reg(mRegL dst, mRegL src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8707 match(Set dst (CMoveL (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8708 ins_cost(80);
8709 format %{
8710 "CMPU$cop $tmp1,$tmp2\t @cmovL_cmpN_reg_reg\n\t"
8711 "CMOV $dst,$src\t @cmovL_cmpN_reg_reg"
8712 %}
8713 ins_encode %{
8714 Register op1 = $tmp1$$Register;
8715 Register op2 = $tmp2$$Register;
8716 Register dst = $dst$$Register;
8717 Register src = $src$$Register;
8718 int flag = $cop$$cmpcode;
8720 switch(flag)
8721 {
8722 case 0x01: //equal
8723 __ subu32(AT, op1, op2);
8724 __ movz(dst, src, AT);
8725 break;
8727 case 0x02: //not_equal
8728 __ subu32(AT, op1, op2);
8729 __ movn(dst, src, AT);
8730 break;
8732 case 0x03: //above
8733 __ sltu(AT, op2, op1);
8734 __ movn(dst, src, AT);
8735 break;
8737 case 0x04: //above_equal
8738 __ sltu(AT, op1, op2);
8739 __ movz(dst, src, AT);
8740 break;
8742 case 0x05: //below
8743 __ sltu(AT, op1, op2);
8744 __ movn(dst, src, AT);
8745 break;
8747 case 0x06: //below_equal
8748 __ sltu(AT, op2, op1);
8749 __ movz(dst, src, AT);
8750 break;
8752 default:
8753 Unimplemented();
8754 }
8755 %}
8757 ins_pipe( pipe_slow );
8758 %}
8761 instruct cmovL_cmpD_reg_reg(mRegL dst, mRegL src, regD tmp1, regD tmp2, cmpOp cop ) %{
8762 match(Set dst (CMoveL (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8763 ins_cost(80);
8764 format %{
8765 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpD_reg_reg\n"
8766 "\tCMOV $dst,$src \t @cmovL_cmpD_reg_reg"
8767 %}
8768 ins_encode %{
8769 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8770 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8771 Register dst = as_Register($dst$$reg);
8772 Register src = as_Register($src$$reg);
8774 int flag = $cop$$cmpcode;
8776 switch(flag)
8777 {
8778 case 0x01: //equal
8779 __ c_eq_d(reg_op1, reg_op2);
8780 __ movt(dst, src);
8781 break;
8782 case 0x02: //not_equal
8783 __ c_eq_d(reg_op1, reg_op2);
8784 __ movf(dst, src);
8785 break;
8786 case 0x03: //greater
8787 __ c_ole_d(reg_op1, reg_op2);
8788 __ movf(dst, src);
8789 break;
8790 case 0x04: //greater_equal
8791 __ c_olt_d(reg_op1, reg_op2);
8792 __ movf(dst, src);
8793 break;
8794 case 0x05: //less
8795 __ c_ult_d(reg_op1, reg_op2);
8796 __ movt(dst, src);
8797 break;
8798 case 0x06: //less_equal
8799 __ c_ule_d(reg_op1, reg_op2);
8800 __ movt(dst, src);
8801 break;
8802 default:
8803 Unimplemented();
8804 }
8805 %}
8807 ins_pipe( pipe_slow );
8808 %}
8810 instruct cmovD_cmpD_reg_reg(regD dst, regD src, regD tmp1, regD tmp2, cmpOp cop ) %{
8811 match(Set dst (CMoveD (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8812 ins_cost(200);
8813 format %{
8814 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpD_reg_reg\n"
8815 "\tCMOV $dst,$src \t @cmovD_cmpD_reg_reg"
8816 %}
8817 ins_encode %{
8818 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8819 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8820 FloatRegister dst = as_FloatRegister($dst$$reg);
8821 FloatRegister src = as_FloatRegister($src$$reg);
8823 int flag = $cop$$cmpcode;
8825 Label L;
8827 switch(flag)
8828 {
8829 case 0x01: //equal
8830 __ c_eq_d(reg_op1, reg_op2);
8831 __ bc1f(L);
8832 __ nop();
8833 __ mov_d(dst, src);
8834 __ bind(L);
8835 break;
8836 case 0x02: //not_equal
8837 //2016/4/19 aoqi: See instruct branchConD_reg_reg. The change in branchConD_reg_reg fixed a bug. It seems similar here, so I made thesame change.
8838 __ c_eq_d(reg_op1, reg_op2);
8839 __ bc1t(L);
8840 __ nop();
8841 __ mov_d(dst, src);
8842 __ bind(L);
8843 break;
8844 case 0x03: //greater
8845 __ c_ole_d(reg_op1, reg_op2);
8846 __ bc1t(L);
8847 __ nop();
8848 __ mov_d(dst, src);
8849 __ bind(L);
8850 break;
8851 case 0x04: //greater_equal
8852 __ c_olt_d(reg_op1, reg_op2);
8853 __ bc1t(L);
8854 __ nop();
8855 __ mov_d(dst, src);
8856 __ bind(L);
8857 break;
8858 case 0x05: //less
8859 __ c_ult_d(reg_op1, reg_op2);
8860 __ bc1f(L);
8861 __ nop();
8862 __ mov_d(dst, src);
8863 __ bind(L);
8864 break;
8865 case 0x06: //less_equal
8866 __ c_ule_d(reg_op1, reg_op2);
8867 __ bc1f(L);
8868 __ nop();
8869 __ mov_d(dst, src);
8870 __ bind(L);
8871 break;
8872 default:
8873 Unimplemented();
8874 }
8875 %}
8877 ins_pipe( pipe_slow );
8878 %}
8880 instruct cmovF_cmpI_reg_reg(regF dst, regF src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8881 match(Set dst (CMoveF (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8882 ins_cost(200);
8883 format %{
8884 "CMP$cop $tmp1, $tmp2\t @cmovF_cmpI_reg_reg\n"
8885 "\tCMOV $dst, $src \t @cmovF_cmpI_reg_reg"
8886 %}
8888 ins_encode %{
8889 Register op1 = $tmp1$$Register;
8890 Register op2 = $tmp2$$Register;
8891 FloatRegister dst = as_FloatRegister($dst$$reg);
8892 FloatRegister src = as_FloatRegister($src$$reg);
8893 int flag = $cop$$cmpcode;
8894 Label L;
8896 switch(flag)
8897 {
8898 case 0x01: //equal
8899 __ bne(op1, op2, L);
8900 __ nop();
8901 __ mov_s(dst, src);
8902 __ bind(L);
8903 break;
8904 case 0x02: //not_equal
8905 __ beq(op1, op2, L);
8906 __ nop();
8907 __ mov_s(dst, src);
8908 __ bind(L);
8909 break;
8910 case 0x03: //great
8911 __ slt(AT, op2, op1);
8912 __ beq(AT, R0, L);
8913 __ nop();
8914 __ mov_s(dst, src);
8915 __ bind(L);
8916 break;
8917 case 0x04: //great_equal
8918 __ slt(AT, op1, op2);
8919 __ bne(AT, R0, L);
8920 __ nop();
8921 __ mov_s(dst, src);
8922 __ bind(L);
8923 break;
8924 case 0x05: //less
8925 __ slt(AT, op1, op2);
8926 __ beq(AT, R0, L);
8927 __ nop();
8928 __ mov_s(dst, src);
8929 __ bind(L);
8930 break;
8931 case 0x06: //less_equal
8932 __ slt(AT, op2, op1);
8933 __ bne(AT, R0, L);
8934 __ nop();
8935 __ mov_s(dst, src);
8936 __ bind(L);
8937 break;
8938 default:
8939 Unimplemented();
8940 }
8941 %}
8943 ins_pipe( pipe_slow );
8944 %}
8946 instruct cmovD_cmpI_reg_reg(regD dst, regD src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8947 match(Set dst (CMoveD (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8948 ins_cost(200);
8949 format %{
8950 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpI_reg_reg\n"
8951 "\tCMOV $dst, $src \t @cmovD_cmpI_reg_reg"
8952 %}
8954 ins_encode %{
8955 Register op1 = $tmp1$$Register;
8956 Register op2 = $tmp2$$Register;
8957 FloatRegister dst = as_FloatRegister($dst$$reg);
8958 FloatRegister src = as_FloatRegister($src$$reg);
8959 int flag = $cop$$cmpcode;
8960 Label L;
8962 switch(flag)
8963 {
8964 case 0x01: //equal
8965 __ bne(op1, op2, L);
8966 __ nop();
8967 __ mov_d(dst, src);
8968 __ bind(L);
8969 break;
8970 case 0x02: //not_equal
8971 __ beq(op1, op2, L);
8972 __ nop();
8973 __ mov_d(dst, src);
8974 __ bind(L);
8975 break;
8976 case 0x03: //great
8977 __ slt(AT, op2, op1);
8978 __ beq(AT, R0, L);
8979 __ nop();
8980 __ mov_d(dst, src);
8981 __ bind(L);
8982 break;
8983 case 0x04: //great_equal
8984 __ slt(AT, op1, op2);
8985 __ bne(AT, R0, L);
8986 __ nop();
8987 __ mov_d(dst, src);
8988 __ bind(L);
8989 break;
8990 case 0x05: //less
8991 __ slt(AT, op1, op2);
8992 __ beq(AT, R0, L);
8993 __ nop();
8994 __ mov_d(dst, src);
8995 __ bind(L);
8996 break;
8997 case 0x06: //less_equal
8998 __ slt(AT, op2, op1);
8999 __ bne(AT, R0, L);
9000 __ nop();
9001 __ mov_d(dst, src);
9002 __ bind(L);
9003 break;
9004 default:
9005 Unimplemented();
9006 }
9007 %}
9009 ins_pipe( pipe_slow );
9010 %}
9012 instruct cmovD_cmpP_reg_reg(regD dst, regD src, mRegP tmp1, mRegP tmp2, cmpOp cop ) %{
9013 match(Set dst (CMoveD (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
9014 ins_cost(200);
9015 format %{
9016 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpP_reg_reg\n"
9017 "\tCMOV $dst, $src \t @cmovD_cmpP_reg_reg"
9018 %}
9020 ins_encode %{
9021 Register op1 = $tmp1$$Register;
9022 Register op2 = $tmp2$$Register;
9023 FloatRegister dst = as_FloatRegister($dst$$reg);
9024 FloatRegister src = as_FloatRegister($src$$reg);
9025 int flag = $cop$$cmpcode;
9026 Label L;
9028 switch(flag)
9029 {
9030 case 0x01: //equal
9031 __ bne(op1, op2, L);
9032 __ nop();
9033 __ mov_d(dst, src);
9034 __ bind(L);
9035 break;
9036 case 0x02: //not_equal
9037 __ beq(op1, op2, L);
9038 __ nop();
9039 __ mov_d(dst, src);
9040 __ bind(L);
9041 break;
9042 case 0x03: //great
9043 __ slt(AT, op2, op1);
9044 __ beq(AT, R0, L);
9045 __ nop();
9046 __ mov_d(dst, src);
9047 __ bind(L);
9048 break;
9049 case 0x04: //great_equal
9050 __ slt(AT, op1, op2);
9051 __ bne(AT, R0, L);
9052 __ nop();
9053 __ mov_d(dst, src);
9054 __ bind(L);
9055 break;
9056 case 0x05: //less
9057 __ slt(AT, op1, op2);
9058 __ beq(AT, R0, L);
9059 __ nop();
9060 __ mov_d(dst, src);
9061 __ bind(L);
9062 break;
9063 case 0x06: //less_equal
9064 __ slt(AT, op2, op1);
9065 __ bne(AT, R0, L);
9066 __ nop();
9067 __ mov_d(dst, src);
9068 __ bind(L);
9069 break;
9070 default:
9071 Unimplemented();
9072 }
9073 %}
9075 ins_pipe( pipe_slow );
9076 %}
9078 //FIXME
9079 instruct cmovI_cmpF_reg_reg(mRegI dst, mRegI src, regF tmp1, regF tmp2, cmpOp cop ) %{
9080 match(Set dst (CMoveI (Binary cop (CmpF tmp1 tmp2)) (Binary dst src)));
9081 ins_cost(80);
9082 format %{
9083 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpF_reg_reg\n"
9084 "\tCMOV $dst,$src \t @cmovI_cmpF_reg_reg"
9085 %}
9087 ins_encode %{
9088 FloatRegister reg_op1 = $tmp1$$FloatRegister;
9089 FloatRegister reg_op2 = $tmp2$$FloatRegister;
9090 Register dst = $dst$$Register;
9091 Register src = $src$$Register;
9092 int flag = $cop$$cmpcode;
9094 switch(flag)
9095 {
9096 case 0x01: //equal
9097 __ c_eq_s(reg_op1, reg_op2);
9098 __ movt(dst, src);
9099 break;
9100 case 0x02: //not_equal
9101 __ c_eq_s(reg_op1, reg_op2);
9102 __ movf(dst, src);
9103 break;
9104 case 0x03: //greater
9105 __ c_ole_s(reg_op1, reg_op2);
9106 __ movf(dst, src);
9107 break;
9108 case 0x04: //greater_equal
9109 __ c_olt_s(reg_op1, reg_op2);
9110 __ movf(dst, src);
9111 break;
9112 case 0x05: //less
9113 __ c_ult_s(reg_op1, reg_op2);
9114 __ movt(dst, src);
9115 break;
9116 case 0x06: //less_equal
9117 __ c_ule_s(reg_op1, reg_op2);
9118 __ movt(dst, src);
9119 break;
9120 default:
9121 Unimplemented();
9122 }
9123 %}
9124 ins_pipe( pipe_slow );
9125 %}
9127 instruct cmovF_cmpF_reg_reg(regF dst, regF src, regF tmp1, regF tmp2, cmpOp cop ) %{
9128 match(Set dst (CMoveF (Binary cop (CmpF tmp1 tmp2)) (Binary dst src)));
9129 ins_cost(200);
9130 format %{
9131 "CMP$cop $tmp1, $tmp2\t @cmovF_cmpF_reg_reg\n"
9132 "\tCMOV $dst,$src \t @cmovF_cmpF_reg_reg"
9133 %}
9135 ins_encode %{
9136 FloatRegister reg_op1 = $tmp1$$FloatRegister;
9137 FloatRegister reg_op2 = $tmp2$$FloatRegister;
9138 FloatRegister dst = $dst$$FloatRegister;
9139 FloatRegister src = $src$$FloatRegister;
9140 Label L;
9141 int flag = $cop$$cmpcode;
9143 switch(flag)
9144 {
9145 case 0x01: //equal
9146 __ c_eq_s(reg_op1, reg_op2);
9147 __ bc1f(L);
9148 __ nop();
9149 __ mov_s(dst, src);
9150 __ bind(L);
9151 break;
9152 case 0x02: //not_equal
9153 __ c_eq_s(reg_op1, reg_op2);
9154 __ bc1t(L);
9155 __ nop();
9156 __ mov_s(dst, src);
9157 __ bind(L);
9158 break;
9159 case 0x03: //greater
9160 __ c_ole_s(reg_op1, reg_op2);
9161 __ bc1t(L);
9162 __ nop();
9163 __ mov_s(dst, src);
9164 __ bind(L);
9165 break;
9166 case 0x04: //greater_equal
9167 __ c_olt_s(reg_op1, reg_op2);
9168 __ bc1t(L);
9169 __ nop();
9170 __ mov_s(dst, src);
9171 __ bind(L);
9172 break;
9173 case 0x05: //less
9174 __ c_ult_s(reg_op1, reg_op2);
9175 __ bc1f(L);
9176 __ nop();
9177 __ mov_s(dst, src);
9178 __ bind(L);
9179 break;
9180 case 0x06: //less_equal
9181 __ c_ule_s(reg_op1, reg_op2);
9182 __ bc1f(L);
9183 __ nop();
9184 __ mov_s(dst, src);
9185 __ bind(L);
9186 break;
9187 default:
9188 Unimplemented();
9189 }
9190 %}
9191 ins_pipe( pipe_slow );
9192 %}
9194 // Manifest a CmpL result in an integer register. Very painful.
9195 // This is the test to avoid.
9196 instruct cmpL3_reg_reg(mRegI dst, mRegL src1, mRegL src2) %{
9197 match(Set dst (CmpL3 src1 src2));
9198 ins_cost(1000);
9199 format %{ "cmpL3 $dst, $src1, $src2 @ cmpL3_reg_reg" %}
9200 ins_encode %{
9201 Register opr1 = as_Register($src1$$reg);
9202 Register opr2 = as_Register($src2$$reg);
9203 Register dst = as_Register($dst$$reg);
9205 Label Done;
9207 __ subu(AT, opr1, opr2);
9208 __ bltz(AT, Done);
9209 __ delayed()->daddiu(dst, R0, -1);
9211 __ move(dst, 1);
9212 __ movz(dst, R0, AT);
9214 __ bind(Done);
9215 %}
9216 ins_pipe( pipe_slow );
9217 %}
9219 //
9220 // less_rsult = -1
9221 // greater_result = 1
9222 // equal_result = 0
9223 // nan_result = -1
9224 //
9225 instruct cmpF3_reg_reg(mRegI dst, regF src1, regF src2) %{
9226 match(Set dst (CmpF3 src1 src2));
9227 ins_cost(1000);
9228 format %{ "cmpF3 $dst, $src1, $src2 @ cmpF3_reg_reg" %}
9229 ins_encode %{
9230 FloatRegister src1 = as_FloatRegister($src1$$reg);
9231 FloatRegister src2 = as_FloatRegister($src2$$reg);
9232 Register dst = as_Register($dst$$reg);
9234 Label Done;
9236 __ c_ult_s(src1, src2);
9237 __ bc1t(Done);
9238 __ delayed()->daddiu(dst, R0, -1);
9240 __ c_eq_s(src1, src2);
9241 __ move(dst, 1);
9242 __ movt(dst, R0);
9244 __ bind(Done);
9245 %}
9246 ins_pipe( pipe_slow );
9247 %}
9249 instruct cmpD3_reg_reg(mRegI dst, regD src1, regD src2) %{
9250 match(Set dst (CmpD3 src1 src2));
9251 ins_cost(1000);
9252 format %{ "cmpD3 $dst, $src1, $src2 @ cmpD3_reg_reg" %}
9253 ins_encode %{
9254 FloatRegister src1 = as_FloatRegister($src1$$reg);
9255 FloatRegister src2 = as_FloatRegister($src2$$reg);
9256 Register dst = as_Register($dst$$reg);
9258 Label Done;
9260 __ c_ult_d(src1, src2);
9261 __ bc1t(Done);
9262 __ delayed()->daddiu(dst, R0, -1);
9264 __ c_eq_d(src1, src2);
9265 __ move(dst, 1);
9266 __ movt(dst, R0);
9268 __ bind(Done);
9269 %}
9270 ins_pipe( pipe_slow );
9271 %}
9273 instruct clear_array(mRegL cnt, mRegP base, Universe dummy) %{
9274 match(Set dummy (ClearArray cnt base));
9275 format %{ "CLEAR_ARRAY base = $base, cnt = $cnt # Clear doublewords" %}
9276 ins_encode %{
9277 //Assume cnt is the number of bytes in an array to be cleared,
9278 //and base points to the starting address of the array.
9279 Register base = $base$$Register;
9280 Register num = $cnt$$Register;
9281 Label Loop, done;
9283 /* 2012/9/21 Jin: according to X86, $cnt is caculated by doublewords(8 bytes) */
9284 __ move(T9, num); /* T9 = words */
9285 __ beq(T9, R0, done);
9286 __ nop();
9287 __ move(AT, base);
9289 __ bind(Loop);
9290 __ sd(R0, Address(AT, 0));
9291 __ daddi(AT, AT, wordSize);
9292 __ daddi(T9, T9, -1);
9293 __ bne(T9, R0, Loop);
9294 __ delayed()->nop();
9295 __ bind(done);
9296 %}
9297 ins_pipe( pipe_slow );
9298 %}
9300 instruct string_compare(a4_RegP str1, mA5RegI cnt1, a6_RegP str2, mA7RegI cnt2, no_Ax_mRegI result) %{
9301 match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
9302 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2);
9304 format %{ "String Compare $str1[len: $cnt1], $str2[len: $cnt2] -> $result @ string_compare" %}
9305 ins_encode %{
9306 // Get the first character position in both strings
9307 // [8] char array, [12] offset, [16] count
9308 Register str1 = $str1$$Register;
9309 Register str2 = $str2$$Register;
9310 Register cnt1 = $cnt1$$Register;
9311 Register cnt2 = $cnt2$$Register;
9312 Register result = $result$$Register;
9314 Label L, Loop, haveResult, done;
9316 // compute the and difference of lengths (in result)
9317 __ subu(result, cnt1, cnt2); // result holds the difference of two lengths
9319 // compute the shorter length (in cnt1)
9320 __ slt(AT, cnt2, cnt1);
9321 __ movn(cnt1, cnt2, AT);
9323 // Now the shorter length is in cnt1 and cnt2 can be used as a tmp register
9324 __ bind(Loop); // Loop begin
9325 __ beq(cnt1, R0, done);
9326 __ delayed()->lhu(AT, str1, 0);;
9328 // compare current character
9329 __ lhu(cnt2, str2, 0);
9330 __ bne(AT, cnt2, haveResult);
9331 __ delayed()->addi(str1, str1, 2);
9332 __ addi(str2, str2, 2);
9333 __ b(Loop);
9334 __ delayed()->addi(cnt1, cnt1, -1); // Loop end
9336 __ bind(haveResult);
9337 __ subu(result, AT, cnt2);
9339 __ bind(done);
9340 %}
9342 ins_pipe( pipe_slow );
9343 %}
9345 // intrinsic optimization
9346 instruct string_equals(a4_RegP str1, a5_RegP str2, mA6RegI cnt, mA7RegI temp, no_Ax_mRegI result) %{
9347 match(Set result (StrEquals (Binary str1 str2) cnt));
9348 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL temp);
9350 format %{ "String Equal $str1, $str2, len:$cnt tmp:$temp -> $result @ string_equals" %}
9351 ins_encode %{
9352 // Get the first character position in both strings
9353 // [8] char array, [12] offset, [16] count
9354 Register str1 = $str1$$Register;
9355 Register str2 = $str2$$Register;
9356 Register cnt = $cnt$$Register;
9357 Register tmp = $temp$$Register;
9358 Register result = $result$$Register;
9360 Label Loop, done;
9363 __ beq(str1, str2, done); // same char[] ?
9364 __ daddiu(result, R0, 1);
9366 __ bind(Loop); // Loop begin
9367 __ beq(cnt, R0, done);
9368 __ daddiu(result, R0, 1); // count == 0
9370 // compare current character
9371 __ lhu(AT, str1, 0);;
9372 __ lhu(tmp, str2, 0);
9373 __ bne(AT, tmp, done);
9374 __ delayed()->daddi(result, R0, 0);
9375 __ addi(str1, str1, 2);
9376 __ addi(str2, str2, 2);
9377 __ b(Loop);
9378 __ delayed()->addi(cnt, cnt, -1); // Loop end
9380 __ bind(done);
9381 %}
9383 ins_pipe( pipe_slow );
9384 %}
9386 //----------Arithmetic Instructions-------------------------------------------
9387 //----------Addition Instructions---------------------------------------------
9388 instruct addI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9389 match(Set dst (AddI src1 src2));
9391 format %{ "add $dst, $src1, $src2 #@addI_Reg_Reg" %}
9392 ins_encode %{
9393 Register dst = $dst$$Register;
9394 Register src1 = $src1$$Register;
9395 Register src2 = $src2$$Register;
9396 __ addu32(dst, src1, src2);
9397 %}
9398 ins_pipe( ialu_regI_regI );
9399 %}
9401 instruct addI_Reg_imm(mRegI dst, mRegI src1, immI src2) %{
9402 match(Set dst (AddI src1 src2));
9404 format %{ "add $dst, $src1, $src2 #@addI_Reg_imm" %}
9405 ins_encode %{
9406 Register dst = $dst$$Register;
9407 Register src1 = $src1$$Register;
9408 int imm = $src2$$constant;
9410 if(Assembler::is_simm16(imm)) {
9411 __ addiu32(dst, src1, imm);
9412 } else {
9413 __ move(AT, imm);
9414 __ addu32(dst, src1, AT);
9415 }
9416 %}
9417 ins_pipe( ialu_regI_regI );
9418 %}
9420 instruct addP_reg_reg(mRegP dst, mRegP src1, mRegL src2) %{
9421 match(Set dst (AddP src1 src2));
9423 format %{ "dadd $dst, $src1, $src2 #@addP_reg_reg" %}
9425 ins_encode %{
9426 Register dst = $dst$$Register;
9427 Register src1 = $src1$$Register;
9428 Register src2 = $src2$$Register;
9429 __ daddu(dst, src1, src2);
9430 %}
9432 ins_pipe( ialu_regI_regI );
9433 %}
9435 instruct addP_reg_reg_convI2L(mRegP dst, mRegP src1, mRegI src2) %{
9436 match(Set dst (AddP src1 (ConvI2L src2)));
9438 format %{ "dadd $dst, $src1, $src2 #@addP_reg_reg_convI2L" %}
9440 ins_encode %{
9441 Register dst = $dst$$Register;
9442 Register src1 = $src1$$Register;
9443 Register src2 = $src2$$Register;
9444 __ daddu(dst, src1, src2);
9445 %}
9447 ins_pipe( ialu_regI_regI );
9448 %}
9450 instruct addP_reg_imm(mRegP dst, mRegP src1, immL src2) %{
9451 match(Set dst (AddP src1 src2));
9453 format %{ "daddi $dst, $src1, $src2 #@addP_reg_imm" %}
9454 ins_encode %{
9455 Register src1 = $src1$$Register;
9456 long src2 = $src2$$constant;
9457 Register dst = $dst$$Register;
9459 if(Assembler::is_simm16(src2)) {
9460 __ daddiu(dst, src1, src2);
9461 } else {
9462 __ set64(AT, src2);
9463 __ daddu(dst, src1, AT);
9464 }
9465 %}
9466 ins_pipe( ialu_regI_imm16 );
9467 %}
9469 // Add Long Register with Register
9470 instruct addL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
9471 match(Set dst (AddL src1 src2));
9472 ins_cost(200);
9473 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_Reg\t" %}
9475 ins_encode %{
9476 Register dst_reg = as_Register($dst$$reg);
9477 Register src1_reg = as_Register($src1$$reg);
9478 Register src2_reg = as_Register($src2$$reg);
9480 __ daddu(dst_reg, src1_reg, src2_reg);
9481 %}
9483 ins_pipe( ialu_regL_regL );
9484 %}
9486 instruct addL_Reg_imm(mRegL dst, mRegL src1, immL16 src2)
9487 %{
9488 match(Set dst (AddL src1 src2));
9490 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_imm " %}
9491 ins_encode %{
9492 Register dst_reg = as_Register($dst$$reg);
9493 Register src1_reg = as_Register($src1$$reg);
9494 int src2_imm = $src2$$constant;
9496 __ daddiu(dst_reg, src1_reg, src2_imm);
9497 %}
9499 ins_pipe( ialu_regL_regL );
9500 %}
9502 instruct addL_RegI2L_imm(mRegL dst, mRegI src1, immL16 src2)
9503 %{
9504 match(Set dst (AddL (ConvI2L src1) src2));
9506 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_imm " %}
9507 ins_encode %{
9508 Register dst_reg = as_Register($dst$$reg);
9509 Register src1_reg = as_Register($src1$$reg);
9510 int src2_imm = $src2$$constant;
9512 __ daddiu(dst_reg, src1_reg, src2_imm);
9513 %}
9515 ins_pipe( ialu_regL_regL );
9516 %}
9518 instruct addL_RegI2L_Reg(mRegL dst, mRegI src1, mRegL src2) %{
9519 match(Set dst (AddL (ConvI2L src1) src2));
9520 ins_cost(200);
9521 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_Reg\t" %}
9523 ins_encode %{
9524 Register dst_reg = as_Register($dst$$reg);
9525 Register src1_reg = as_Register($src1$$reg);
9526 Register src2_reg = as_Register($src2$$reg);
9528 __ daddu(dst_reg, src1_reg, src2_reg);
9529 %}
9531 ins_pipe( ialu_regL_regL );
9532 %}
9534 instruct addL_RegI2L_RegI2L(mRegL dst, mRegI src1, mRegI src2) %{
9535 match(Set dst (AddL (ConvI2L src1) (ConvI2L src2)));
9536 ins_cost(200);
9537 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_RegI2L\t" %}
9539 ins_encode %{
9540 Register dst_reg = as_Register($dst$$reg);
9541 Register src1_reg = as_Register($src1$$reg);
9542 Register src2_reg = as_Register($src2$$reg);
9544 __ daddu(dst_reg, src1_reg, src2_reg);
9545 %}
9547 ins_pipe( ialu_regL_regL );
9548 %}
9550 instruct addL_Reg_RegI2L(mRegL dst, mRegL src1, mRegI src2) %{
9551 match(Set dst (AddL src1 (ConvI2L src2)));
9552 ins_cost(200);
9553 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_RegI2L\t" %}
9555 ins_encode %{
9556 Register dst_reg = as_Register($dst$$reg);
9557 Register src1_reg = as_Register($src1$$reg);
9558 Register src2_reg = as_Register($src2$$reg);
9560 __ daddu(dst_reg, src1_reg, src2_reg);
9561 %}
9563 ins_pipe( ialu_regL_regL );
9564 %}
9566 //----------Subtraction Instructions-------------------------------------------
9567 // Integer Subtraction Instructions
9568 instruct subI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9569 match(Set dst (SubI src1 src2));
9570 ins_cost(100);
9572 format %{ "sub $dst, $src1, $src2 #@subI_Reg_Reg" %}
9573 ins_encode %{
9574 Register dst = $dst$$Register;
9575 Register src1 = $src1$$Register;
9576 Register src2 = $src2$$Register;
9577 __ subu32(dst, src1, src2);
9578 %}
9579 ins_pipe( ialu_regI_regI );
9580 %}
9582 instruct subI_Reg_immI16_sub(mRegI dst, mRegI src1, immI16_sub src2) %{
9583 match(Set dst (SubI src1 src2));
9584 ins_cost(80);
9586 format %{ "sub $dst, $src1, $src2 #@subI_Reg_immI16_sub" %}
9587 ins_encode %{
9588 Register dst = $dst$$Register;
9589 Register src1 = $src1$$Register;
9590 __ addiu32(dst, src1, -1 * $src2$$constant);
9591 %}
9592 ins_pipe( ialu_regI_regI );
9593 %}
9595 instruct negI_Reg(mRegI dst, immI0 zero, mRegI src) %{
9596 match(Set dst (SubI zero src));
9597 ins_cost(80);
9599 format %{ "neg $dst, $src #@negI_Reg" %}
9600 ins_encode %{
9601 Register dst = $dst$$Register;
9602 Register src = $src$$Register;
9603 __ subu32(dst, R0, src);
9604 %}
9605 ins_pipe( ialu_regI_regI );
9606 %}
9608 instruct negL_Reg(mRegL dst, immL0 zero, mRegL src) %{
9609 match(Set dst (SubL zero src));
9610 ins_cost(80);
9612 format %{ "neg $dst, $src #@negL_Reg" %}
9613 ins_encode %{
9614 Register dst = $dst$$Register;
9615 Register src = $src$$Register;
9616 __ subu(dst, R0, src);
9617 %}
9618 ins_pipe( ialu_regI_regI );
9619 %}
9621 instruct subL_Reg_immL16_sub(mRegL dst, mRegL src1, immL16_sub src2) %{
9622 match(Set dst (SubL src1 src2));
9623 ins_cost(80);
9625 format %{ "sub $dst, $src1, $src2 #@subL_Reg_immL16_sub" %}
9626 ins_encode %{
9627 Register dst = $dst$$Register;
9628 Register src1 = $src1$$Register;
9629 __ daddiu(dst, src1, -1 * $src2$$constant);
9630 %}
9631 ins_pipe( ialu_regI_regI );
9632 %}
9634 // Subtract Long Register with Register.
9635 instruct subL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
9636 match(Set dst (SubL src1 src2));
9637 ins_cost(100);
9638 format %{ "SubL $dst, $src1, $src2 @ subL_Reg_Reg" %}
9639 ins_encode %{
9640 Register dst = as_Register($dst$$reg);
9641 Register src1 = as_Register($src1$$reg);
9642 Register src2 = as_Register($src2$$reg);
9644 __ subu(dst, src1, src2);
9645 %}
9646 ins_pipe( ialu_regL_regL );
9647 %}
9649 instruct subL_Reg_RegI2L(mRegL dst, mRegL src1, mRegI src2) %{
9650 match(Set dst (SubL src1 (ConvI2L src2)));
9651 ins_cost(100);
9652 format %{ "SubL $dst, $src1, $src2 @ subL_Reg_RegI2L" %}
9653 ins_encode %{
9654 Register dst = as_Register($dst$$reg);
9655 Register src1 = as_Register($src1$$reg);
9656 Register src2 = as_Register($src2$$reg);
9658 __ subu(dst, src1, src2);
9659 %}
9660 ins_pipe( ialu_regL_regL );
9661 %}
9663 instruct subL_RegI2L_Reg(mRegL dst, mRegI src1, mRegL src2) %{
9664 match(Set dst (SubL (ConvI2L src1) src2));
9665 ins_cost(200);
9666 format %{ "SubL $dst, $src1, $src2 @ subL_RegI2L_Reg" %}
9667 ins_encode %{
9668 Register dst = as_Register($dst$$reg);
9669 Register src1 = as_Register($src1$$reg);
9670 Register src2 = as_Register($src2$$reg);
9672 __ subu(dst, src1, src2);
9673 %}
9674 ins_pipe( ialu_regL_regL );
9675 %}
9677 instruct subL_RegI2L_RegI2L(mRegL dst, mRegI src1, mRegI src2) %{
9678 match(Set dst (SubL (ConvI2L src1) (ConvI2L src2)));
9679 ins_cost(200);
9680 format %{ "SubL $dst, $src1, $src2 @ subL_RegI2L_RegI2L" %}
9681 ins_encode %{
9682 Register dst = as_Register($dst$$reg);
9683 Register src1 = as_Register($src1$$reg);
9684 Register src2 = as_Register($src2$$reg);
9686 __ subu(dst, src1, src2);
9687 %}
9688 ins_pipe( ialu_regL_regL );
9689 %}
9691 // Integer MOD with Register
9692 instruct modI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9693 match(Set dst (ModI src1 src2));
9694 ins_cost(300);
9695 format %{ "modi $dst, $src1, $src2 @ modI_Reg_Reg" %}
9696 ins_encode %{
9697 Register dst = $dst$$Register;
9698 Register src1 = $src1$$Register;
9699 Register src2 = $src2$$Register;
9701 //if (UseLoongsonISA) {
9702 if (0) {
9703 // 2016.08.10
9704 // Experiments show that gsmod is slower that div+mfhi.
9705 // So I just disable it here.
9706 __ gsmod(dst, src1, src2);
9707 } else {
9708 __ div(src1, src2);
9709 __ mfhi(dst);
9710 }
9711 %}
9713 //ins_pipe( ialu_mod );
9714 ins_pipe( ialu_regI_regI );
9715 %}
9717 instruct modL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
9718 match(Set dst (ModL src1 src2));
9719 format %{ "modL $dst, $src1, $src2 @modL_reg_reg" %}
9721 ins_encode %{
9722 Register dst = as_Register($dst$$reg);
9723 Register op1 = as_Register($src1$$reg);
9724 Register op2 = as_Register($src2$$reg);
9726 if (UseLoongsonISA) {
9727 __ gsdmod(dst, op1, op2);
9728 } else {
9729 __ ddiv(op1, op2);
9730 __ mfhi(dst);
9731 }
9732 %}
9733 ins_pipe( pipe_slow );
9734 %}
9736 instruct mulI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9737 match(Set dst (MulI src1 src2));
9739 ins_cost(300);
9740 format %{ "mul $dst, $src1, $src2 @ mulI_Reg_Reg" %}
9741 ins_encode %{
9742 Register src1 = $src1$$Register;
9743 Register src2 = $src2$$Register;
9744 Register dst = $dst$$Register;
9746 __ mul(dst, src1, src2);
9747 %}
9748 ins_pipe( ialu_mult );
9749 %}
9751 instruct maddI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2, mRegI src3) %{
9752 match(Set dst (AddI (MulI src1 src2) src3));
9754 ins_cost(999);
9755 format %{ "madd $dst, $src1 * $src2 + $src3 #@maddI_Reg_Reg" %}
9756 ins_encode %{
9757 Register src1 = $src1$$Register;
9758 Register src2 = $src2$$Register;
9759 Register src3 = $src3$$Register;
9760 Register dst = $dst$$Register;
9762 __ mtlo(src3);
9763 __ madd(src1, src2);
9764 __ mflo(dst);
9765 %}
9766 ins_pipe( ialu_mult );
9767 %}
9769 instruct divI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9770 match(Set dst (DivI src1 src2));
9772 ins_cost(300);
9773 format %{ "div $dst, $src1, $src2 @ divI_Reg_Reg" %}
9774 ins_encode %{
9775 Register src1 = $src1$$Register;
9776 Register src2 = $src2$$Register;
9777 Register dst = $dst$$Register;
9779 /* 2012/4/21 Jin: In MIPS, div does not cause exception.
9780 We must trap an exception manually. */
9781 __ teq(R0, src2, 0x7);
9783 if (UseLoongsonISA) {
9784 __ gsdiv(dst, src1, src2);
9785 } else {
9786 __ div(src1, src2);
9788 __ nop();
9789 __ nop();
9790 __ mflo(dst);
9791 }
9792 %}
9793 ins_pipe( ialu_mod );
9794 %}
9796 instruct divF_Reg_Reg(regF dst, regF src1, regF src2) %{
9797 match(Set dst (DivF src1 src2));
9799 ins_cost(300);
9800 format %{ "divF $dst, $src1, $src2 @ divF_Reg_Reg" %}
9801 ins_encode %{
9802 FloatRegister src1 = $src1$$FloatRegister;
9803 FloatRegister src2 = $src2$$FloatRegister;
9804 FloatRegister dst = $dst$$FloatRegister;
9806 /* Here do we need to trap an exception manually ? */
9807 __ div_s(dst, src1, src2);
9808 %}
9809 ins_pipe( pipe_slow );
9810 %}
9812 instruct divD_Reg_Reg(regD dst, regD src1, regD src2) %{
9813 match(Set dst (DivD src1 src2));
9815 ins_cost(300);
9816 format %{ "divD $dst, $src1, $src2 @ divD_Reg_Reg" %}
9817 ins_encode %{
9818 FloatRegister src1 = $src1$$FloatRegister;
9819 FloatRegister src2 = $src2$$FloatRegister;
9820 FloatRegister dst = $dst$$FloatRegister;
9822 /* Here do we need to trap an exception manually ? */
9823 __ div_d(dst, src1, src2);
9824 %}
9825 ins_pipe( pipe_slow );
9826 %}
9828 instruct mulL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
9829 match(Set dst (MulL src1 src2));
9830 format %{ "mulL $dst, $src1, $src2 @mulL_reg_reg" %}
9831 ins_encode %{
9832 Register dst = as_Register($dst$$reg);
9833 Register op1 = as_Register($src1$$reg);
9834 Register op2 = as_Register($src2$$reg);
9836 if (UseLoongsonISA) {
9837 __ gsdmult(dst, op1, op2);
9838 } else {
9839 __ dmult(op1, op2);
9840 __ mflo(dst);
9841 }
9842 %}
9843 ins_pipe( pipe_slow );
9844 %}
9846 instruct mulL_reg_regI2L(mRegL dst, mRegL src1, mRegI src2) %{
9847 match(Set dst (MulL src1 (ConvI2L src2)));
9848 format %{ "mulL $dst, $src1, $src2 @mulL_reg_regI2L" %}
9849 ins_encode %{
9850 Register dst = as_Register($dst$$reg);
9851 Register op1 = as_Register($src1$$reg);
9852 Register op2 = as_Register($src2$$reg);
9854 if (UseLoongsonISA) {
9855 __ gsdmult(dst, op1, op2);
9856 } else {
9857 __ dmult(op1, op2);
9858 __ mflo(dst);
9859 }
9860 %}
9861 ins_pipe( pipe_slow );
9862 %}
9864 instruct divL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
9865 match(Set dst (DivL src1 src2));
9866 format %{ "divL $dst, $src1, $src2 @divL_reg_reg" %}
9868 ins_encode %{
9869 Register dst = as_Register($dst$$reg);
9870 Register op1 = as_Register($src1$$reg);
9871 Register op2 = as_Register($src2$$reg);
9873 if (UseLoongsonISA) {
9874 __ gsddiv(dst, op1, op2);
9875 } else {
9876 __ ddiv(op1, op2);
9877 __ mflo(dst);
9878 }
9879 %}
9880 ins_pipe( pipe_slow );
9881 %}
9883 instruct addF_reg_reg(regF dst, regF src1, regF src2) %{
9884 match(Set dst (AddF src1 src2));
9885 format %{ "AddF $dst, $src1, $src2 @addF_reg_reg" %}
9886 ins_encode %{
9887 FloatRegister src1 = as_FloatRegister($src1$$reg);
9888 FloatRegister src2 = as_FloatRegister($src2$$reg);
9889 FloatRegister dst = as_FloatRegister($dst$$reg);
9891 __ add_s(dst, src1, src2);
9892 %}
9893 ins_pipe( fpu_regF_regF );
9894 %}
9896 instruct subF_reg_reg(regF dst, regF src1, regF src2) %{
9897 match(Set dst (SubF src1 src2));
9898 format %{ "SubF $dst, $src1, $src2 @subF_reg_reg" %}
9899 ins_encode %{
9900 FloatRegister src1 = as_FloatRegister($src1$$reg);
9901 FloatRegister src2 = as_FloatRegister($src2$$reg);
9902 FloatRegister dst = as_FloatRegister($dst$$reg);
9904 __ sub_s(dst, src1, src2);
9905 %}
9906 ins_pipe( fpu_regF_regF );
9907 %}
9908 instruct addD_reg_reg(regD dst, regD src1, regD src2) %{
9909 match(Set dst (AddD src1 src2));
9910 format %{ "AddD $dst, $src1, $src2 @addD_reg_reg" %}
9911 ins_encode %{
9912 FloatRegister src1 = as_FloatRegister($src1$$reg);
9913 FloatRegister src2 = as_FloatRegister($src2$$reg);
9914 FloatRegister dst = as_FloatRegister($dst$$reg);
9916 __ add_d(dst, src1, src2);
9917 %}
9918 ins_pipe( fpu_regF_regF );
9919 %}
9921 instruct subD_reg_reg(regD dst, regD src1, regD src2) %{
9922 match(Set dst (SubD src1 src2));
9923 format %{ "SubD $dst, $src1, $src2 @subD_reg_reg" %}
9924 ins_encode %{
9925 FloatRegister src1 = as_FloatRegister($src1$$reg);
9926 FloatRegister src2 = as_FloatRegister($src2$$reg);
9927 FloatRegister dst = as_FloatRegister($dst$$reg);
9929 __ sub_d(dst, src1, src2);
9930 %}
9931 ins_pipe( fpu_regF_regF );
9932 %}
9934 instruct negF_reg(regF dst, regF src) %{
9935 match(Set dst (NegF src));
9936 format %{ "negF $dst, $src @negF_reg" %}
9937 ins_encode %{
9938 FloatRegister src = as_FloatRegister($src$$reg);
9939 FloatRegister dst = as_FloatRegister($dst$$reg);
9941 __ neg_s(dst, src);
9942 %}
9943 ins_pipe( fpu_regF_regF );
9944 %}
9946 instruct negD_reg(regD dst, regD src) %{
9947 match(Set dst (NegD src));
9948 format %{ "negD $dst, $src @negD_reg" %}
9949 ins_encode %{
9950 FloatRegister src = as_FloatRegister($src$$reg);
9951 FloatRegister dst = as_FloatRegister($dst$$reg);
9953 __ neg_d(dst, src);
9954 %}
9955 ins_pipe( fpu_regF_regF );
9956 %}
9959 instruct mulF_reg_reg(regF dst, regF src1, regF src2) %{
9960 match(Set dst (MulF src1 src2));
9961 format %{ "MULF $dst, $src1, $src2 @mulF_reg_reg" %}
9962 ins_encode %{
9963 FloatRegister src1 = $src1$$FloatRegister;
9964 FloatRegister src2 = $src2$$FloatRegister;
9965 FloatRegister dst = $dst$$FloatRegister;
9967 __ mul_s(dst, src1, src2);
9968 %}
9969 ins_pipe( fpu_regF_regF );
9970 %}
9972 instruct maddF_reg_reg(regF dst, regF src1, regF src2, regF src3) %{
9973 match(Set dst (AddF (MulF src1 src2) src3));
9974 // For compatibility reason (e.g. on the Loongson platform), disable this guy.
9975 ins_cost(44444);
9976 format %{ "maddF $dst, $src1, $src2, $src3 @maddF_reg_reg" %}
9977 ins_encode %{
9978 FloatRegister src1 = $src1$$FloatRegister;
9979 FloatRegister src2 = $src2$$FloatRegister;
9980 FloatRegister src3 = $src3$$FloatRegister;
9981 FloatRegister dst = $dst$$FloatRegister;
9983 __ madd_s(dst, src1, src2, src3);
9984 %}
9985 ins_pipe( fpu_regF_regF );
9986 %}
9988 // Mul two double precision floating piont number
9989 instruct mulD_reg_reg(regD dst, regD src1, regD src2) %{
9990 match(Set dst (MulD src1 src2));
9991 format %{ "MULD $dst, $src1, $src2 @mulD_reg_reg" %}
9992 ins_encode %{
9993 FloatRegister src1 = $src1$$FloatRegister;
9994 FloatRegister src2 = $src2$$FloatRegister;
9995 FloatRegister dst = $dst$$FloatRegister;
9997 __ mul_d(dst, src1, src2);
9998 %}
9999 ins_pipe( fpu_regF_regF );
10000 %}
10002 instruct maddD_reg_reg(regD dst, regD src1, regD src2, regD src3) %{
10003 match(Set dst (AddD (MulD src1 src2) src3));
10004 // For compatibility reason (e.g. on the Loongson platform), disable this guy.
10005 ins_cost(44444);
10006 format %{ "maddD $dst, $src1, $src2, $src3 @maddD_reg_reg" %}
10007 ins_encode %{
10008 FloatRegister src1 = $src1$$FloatRegister;
10009 FloatRegister src2 = $src2$$FloatRegister;
10010 FloatRegister src3 = $src3$$FloatRegister;
10011 FloatRegister dst = $dst$$FloatRegister;
10013 __ madd_d(dst, src1, src2, src3);
10014 %}
10015 ins_pipe( fpu_regF_regF );
10016 %}
10018 instruct absF_reg(regF dst, regF src) %{
10019 match(Set dst (AbsF src));
10020 ins_cost(100);
10021 format %{ "absF $dst, $src @absF_reg" %}
10022 ins_encode %{
10023 FloatRegister src = as_FloatRegister($src$$reg);
10024 FloatRegister dst = as_FloatRegister($dst$$reg);
10026 __ abs_s(dst, src);
10027 %}
10028 ins_pipe( fpu_regF_regF );
10029 %}
10032 // intrinsics for math_native.
10033 // AbsD SqrtD CosD SinD TanD LogD Log10D
10035 instruct absD_reg(regD dst, regD src) %{
10036 match(Set dst (AbsD src));
10037 ins_cost(100);
10038 format %{ "absD $dst, $src @absD_reg" %}
10039 ins_encode %{
10040 FloatRegister src = as_FloatRegister($src$$reg);
10041 FloatRegister dst = as_FloatRegister($dst$$reg);
10043 __ abs_d(dst, src);
10044 %}
10045 ins_pipe( fpu_regF_regF );
10046 %}
10048 instruct sqrtD_reg(regD dst, regD src) %{
10049 match(Set dst (SqrtD src));
10050 ins_cost(100);
10051 format %{ "SqrtD $dst, $src @sqrtD_reg" %}
10052 ins_encode %{
10053 FloatRegister src = as_FloatRegister($src$$reg);
10054 FloatRegister dst = as_FloatRegister($dst$$reg);
10056 __ sqrt_d(dst, src);
10057 %}
10058 ins_pipe( fpu_regF_regF );
10059 %}
10061 instruct sqrtF_reg(regF dst, regF src) %{
10062 match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
10063 ins_cost(100);
10064 format %{ "SqrtF $dst, $src @sqrtF_reg" %}
10065 ins_encode %{
10066 FloatRegister src = as_FloatRegister($src$$reg);
10067 FloatRegister dst = as_FloatRegister($dst$$reg);
10069 __ sqrt_s(dst, src);
10070 %}
10071 ins_pipe( fpu_regF_regF );
10072 %}
10073 //----------------------------------Logical Instructions----------------------
10074 //__________________________________Integer Logical Instructions-------------
10076 //And Instuctions
10077 // And Register with Immediate
10078 instruct andI_Reg_immI(mRegI dst, mRegI src1, immI src2) %{
10079 match(Set dst (AndI src1 src2));
10081 format %{ "and $dst, $src1, $src2 #@andI_Reg_immI" %}
10082 ins_encode %{
10083 Register dst = $dst$$Register;
10084 Register src = $src1$$Register;
10085 int val = $src2$$constant;
10087 __ move(AT, val);
10088 __ andr(dst, src, AT);
10089 %}
10090 ins_pipe( ialu_regI_regI );
10091 %}
10093 instruct andI_Reg_imm_0_65535(mRegI dst, mRegI src1, immI_0_65535 src2) %{
10094 match(Set dst (AndI src1 src2));
10095 ins_cost(60);
10097 format %{ "and $dst, $src1, $src2 #@andI_Reg_imm_0_65535" %}
10098 ins_encode %{
10099 Register dst = $dst$$Register;
10100 Register src = $src1$$Register;
10101 int val = $src2$$constant;
10103 __ andi(dst, src, val);
10104 %}
10105 ins_pipe( ialu_regI_regI );
10106 %}
10108 instruct andI_Reg_immI_nonneg_mask(mRegI dst, mRegI src1, immI_nonneg_mask mask) %{
10109 match(Set dst (AndI src1 mask));
10110 ins_cost(60);
10112 format %{ "and $dst, $src1, $mask #@andI_Reg_immI_nonneg_mask" %}
10113 ins_encode %{
10114 Register dst = $dst$$Register;
10115 Register src = $src1$$Register;
10116 int size = Assembler::is_int_mask($mask$$constant);
10118 __ ext(dst, src, 0, size);
10119 %}
10120 ins_pipe( ialu_regI_regI );
10121 %}
10123 instruct andL_Reg_immL_nonneg_mask(mRegL dst, mRegL src1, immL_nonneg_mask mask) %{
10124 match(Set dst (AndL src1 mask));
10125 ins_cost(60);
10127 format %{ "and $dst, $src1, $mask #@andL_Reg_immL_nonneg_mask" %}
10128 ins_encode %{
10129 Register dst = $dst$$Register;
10130 Register src = $src1$$Register;
10131 int size = Assembler::is_jlong_mask($mask$$constant);
10133 __ dext(dst, src, 0, size);
10134 %}
10135 ins_pipe( ialu_regI_regI );
10136 %}
10138 instruct xorI_Reg_imm_0_65535(mRegI dst, mRegI src1, immI_0_65535 src2) %{
10139 match(Set dst (XorI src1 src2));
10140 ins_cost(60);
10142 format %{ "xori $dst, $src1, $src2 #@xorI_Reg_imm_0_65535" %}
10143 ins_encode %{
10144 Register dst = $dst$$Register;
10145 Register src = $src1$$Register;
10146 int val = $src2$$constant;
10148 __ xori(dst, src, val);
10149 %}
10150 ins_pipe( ialu_regI_regI );
10151 %}
10153 instruct xorI_Reg_immI_M1(mRegI dst, mRegI src1, immI_M1 M1) %{
10154 match(Set dst (XorI src1 M1));
10155 predicate(UseLoongsonISA);
10156 ins_cost(60);
10158 format %{ "xor $dst, $src1, $M1 #@xorI_Reg_immI_M1" %}
10159 ins_encode %{
10160 Register dst = $dst$$Register;
10161 Register src = $src1$$Register;
10163 __ gsorn(dst, R0, src);
10164 %}
10165 ins_pipe( ialu_regI_regI );
10166 %}
10168 instruct xorL2I_Reg_immI_M1(mRegI dst, mRegL src1, immI_M1 M1) %{
10169 match(Set dst (XorI (ConvL2I src1) M1));
10170 predicate(UseLoongsonISA);
10171 ins_cost(60);
10173 format %{ "xor $dst, $src1, $M1 #@xorL2I_Reg_immI_M1" %}
10174 ins_encode %{
10175 Register dst = $dst$$Register;
10176 Register src = $src1$$Register;
10178 __ gsorn(dst, R0, src);
10179 %}
10180 ins_pipe( ialu_regI_regI );
10181 %}
10183 instruct xorL_Reg_imm_0_65535(mRegL dst, mRegL src1, immL_0_65535 src2) %{
10184 match(Set dst (XorL src1 src2));
10185 ins_cost(60);
10187 format %{ "xori $dst, $src1, $src2 #@xorL_Reg_imm_0_65535" %}
10188 ins_encode %{
10189 Register dst = $dst$$Register;
10190 Register src = $src1$$Register;
10191 int val = $src2$$constant;
10193 __ xori(dst, src, val);
10194 %}
10195 ins_pipe( ialu_regI_regI );
10196 %}
10198 /*
10199 instruct xorL_Reg_immL_M1(mRegL dst, mRegL src1, immL_M1 M1) %{
10200 match(Set dst (XorL src1 M1));
10201 predicate(UseLoongsonISA);
10202 ins_cost(60);
10204 format %{ "xor $dst, $src1, $M1 #@xorL_Reg_immL_M1" %}
10205 ins_encode %{
10206 Register dst = $dst$$Register;
10207 Register src = $src1$$Register;
10209 __ gsorn(dst, R0, src);
10210 %}
10211 ins_pipe( ialu_regI_regI );
10212 %}
10213 */
10215 instruct lbu_and_lmask(mRegI dst, memory mem, immI_255 mask) %{
10216 match(Set dst (AndI mask (LoadB mem)));
10217 ins_cost(60);
10219 format %{ "lhu $dst, $mem #@lbu_and_lmask" %}
10220 ins_encode(load_UB_enc(dst, mem));
10221 ins_pipe( ialu_loadI );
10222 %}
10224 instruct lbu_and_rmask(mRegI dst, memory mem, immI_255 mask) %{
10225 match(Set dst (AndI (LoadB mem) mask));
10226 ins_cost(60);
10228 format %{ "lhu $dst, $mem #@lbu_and_rmask" %}
10229 ins_encode(load_UB_enc(dst, mem));
10230 ins_pipe( ialu_loadI );
10231 %}
10233 instruct andI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10234 match(Set dst (AndI src1 src2));
10236 format %{ "and $dst, $src1, $src2 #@andI_Reg_Reg" %}
10237 ins_encode %{
10238 Register dst = $dst$$Register;
10239 Register src1 = $src1$$Register;
10240 Register src2 = $src2$$Register;
10241 __ andr(dst, src1, src2);
10242 %}
10243 ins_pipe( ialu_regI_regI );
10244 %}
10246 instruct andnI_Reg_nReg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10247 match(Set dst (AndI src1 (XorI src2 M1)));
10248 predicate(UseLoongsonISA);
10250 format %{ "andn $dst, $src1, $src2 #@andnI_Reg_nReg" %}
10251 ins_encode %{
10252 Register dst = $dst$$Register;
10253 Register src1 = $src1$$Register;
10254 Register src2 = $src2$$Register;
10256 __ gsandn(dst, src1, src2);
10257 %}
10258 ins_pipe( ialu_regI_regI );
10259 %}
10261 instruct ornI_Reg_nReg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10262 match(Set dst (OrI src1 (XorI src2 M1)));
10263 predicate(UseLoongsonISA);
10265 format %{ "orn $dst, $src1, $src2 #@ornI_Reg_nReg" %}
10266 ins_encode %{
10267 Register dst = $dst$$Register;
10268 Register src1 = $src1$$Register;
10269 Register src2 = $src2$$Register;
10271 __ gsorn(dst, src1, src2);
10272 %}
10273 ins_pipe( ialu_regI_regI );
10274 %}
10276 instruct andnI_nReg_Reg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10277 match(Set dst (AndI (XorI src1 M1) src2));
10278 predicate(UseLoongsonISA);
10280 format %{ "andn $dst, $src2, $src1 #@andnI_nReg_Reg" %}
10281 ins_encode %{
10282 Register dst = $dst$$Register;
10283 Register src1 = $src1$$Register;
10284 Register src2 = $src2$$Register;
10286 __ gsandn(dst, src2, src1);
10287 %}
10288 ins_pipe( ialu_regI_regI );
10289 %}
10291 instruct ornI_nReg_Reg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10292 match(Set dst (OrI (XorI src1 M1) src2));
10293 predicate(UseLoongsonISA);
10295 format %{ "orn $dst, $src2, $src1 #@ornI_nReg_Reg" %}
10296 ins_encode %{
10297 Register dst = $dst$$Register;
10298 Register src1 = $src1$$Register;
10299 Register src2 = $src2$$Register;
10301 __ gsorn(dst, src2, src1);
10302 %}
10303 ins_pipe( ialu_regI_regI );
10304 %}
10306 // And Long Register with Register
10307 instruct andL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10308 match(Set dst (AndL src1 src2));
10309 format %{ "AND $dst, $src1, $src2 @ andL_Reg_Reg\n\t" %}
10310 ins_encode %{
10311 Register dst_reg = as_Register($dst$$reg);
10312 Register src1_reg = as_Register($src1$$reg);
10313 Register src2_reg = as_Register($src2$$reg);
10315 __ andr(dst_reg, src1_reg, src2_reg);
10316 %}
10317 ins_pipe( ialu_regL_regL );
10318 %}
10320 instruct andL_Reg_Reg_convI2L(mRegL dst, mRegL src1, mRegI src2) %{
10321 match(Set dst (AndL src1 (ConvI2L src2)));
10322 format %{ "AND $dst, $src1, $src2 @ andL_Reg_Reg_convI2L\n\t" %}
10323 ins_encode %{
10324 Register dst_reg = as_Register($dst$$reg);
10325 Register src1_reg = as_Register($src1$$reg);
10326 Register src2_reg = as_Register($src2$$reg);
10328 __ andr(dst_reg, src1_reg, src2_reg);
10329 %}
10330 ins_pipe( ialu_regL_regL );
10331 %}
10333 instruct andL_Reg_imm_0_65535(mRegL dst, mRegL src1, immL_0_65535 src2) %{
10334 match(Set dst (AndL src1 src2));
10335 ins_cost(60);
10337 format %{ "and $dst, $src1, $src2 #@andL_Reg_imm_0_65535" %}
10338 ins_encode %{
10339 Register dst = $dst$$Register;
10340 Register src = $src1$$Register;
10341 long val = $src2$$constant;
10343 __ andi(dst, src, val);
10344 %}
10345 ins_pipe( ialu_regI_regI );
10346 %}
10348 instruct andL2I_Reg_imm_0_65535(mRegI dst, mRegL src1, immL_0_65535 src2) %{
10349 match(Set dst (ConvL2I (AndL src1 src2)));
10350 ins_cost(60);
10352 format %{ "and $dst, $src1, $src2 #@andL2I_Reg_imm_0_65535" %}
10353 ins_encode %{
10354 Register dst = $dst$$Register;
10355 Register src = $src1$$Register;
10356 long val = $src2$$constant;
10358 __ andi(dst, src, val);
10359 %}
10360 ins_pipe( ialu_regI_regI );
10361 %}
10363 /*
10364 instruct andnL_Reg_nReg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10365 match(Set dst (AndL src1 (XorL src2 M1)));
10366 predicate(UseLoongsonISA);
10368 format %{ "andn $dst, $src1, $src2 #@andnL_Reg_nReg" %}
10369 ins_encode %{
10370 Register dst = $dst$$Register;
10371 Register src1 = $src1$$Register;
10372 Register src2 = $src2$$Register;
10374 __ gsandn(dst, src1, src2);
10375 %}
10376 ins_pipe( ialu_regI_regI );
10377 %}
10378 */
10380 /*
10381 instruct ornL_Reg_nReg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10382 match(Set dst (OrL src1 (XorL src2 M1)));
10383 predicate(UseLoongsonISA);
10385 format %{ "orn $dst, $src1, $src2 #@ornL_Reg_nReg" %}
10386 ins_encode %{
10387 Register dst = $dst$$Register;
10388 Register src1 = $src1$$Register;
10389 Register src2 = $src2$$Register;
10391 __ gsorn(dst, src1, src2);
10392 %}
10393 ins_pipe( ialu_regI_regI );
10394 %}
10395 */
10397 /*
10398 instruct andnL_nReg_Reg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10399 match(Set dst (AndL (XorL src1 M1) src2));
10400 predicate(UseLoongsonISA);
10402 format %{ "andn $dst, $src2, $src1 #@andnL_nReg_Reg" %}
10403 ins_encode %{
10404 Register dst = $dst$$Register;
10405 Register src1 = $src1$$Register;
10406 Register src2 = $src2$$Register;
10408 __ gsandn(dst, src2, src1);
10409 %}
10410 ins_pipe( ialu_regI_regI );
10411 %}
10412 */
10414 /*
10415 instruct ornL_nReg_Reg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10416 match(Set dst (OrL (XorL src1 M1) src2));
10417 predicate(UseLoongsonISA);
10419 format %{ "orn $dst, $src2, $src1 #@ornL_nReg_Reg" %}
10420 ins_encode %{
10421 Register dst = $dst$$Register;
10422 Register src1 = $src1$$Register;
10423 Register src2 = $src2$$Register;
10425 __ gsorn(dst, src2, src1);
10426 %}
10427 ins_pipe( ialu_regI_regI );
10428 %}
10429 */
10431 instruct andL_Reg_immL_M8(mRegL dst, immL_M8 M8) %{
10432 match(Set dst (AndL dst M8));
10433 ins_cost(60);
10435 format %{ "and $dst, $dst, $M8 #@andL_Reg_immL_M8" %}
10436 ins_encode %{
10437 Register dst = $dst$$Register;
10439 __ dins(dst, R0, 0, 3);
10440 %}
10441 ins_pipe( ialu_regI_regI );
10442 %}
10444 instruct andL_Reg_immL_M5(mRegL dst, immL_M5 M5) %{
10445 match(Set dst (AndL dst M5));
10446 ins_cost(60);
10448 format %{ "and $dst, $dst, $M5 #@andL_Reg_immL_M5" %}
10449 ins_encode %{
10450 Register dst = $dst$$Register;
10452 __ dins(dst, R0, 2, 1);
10453 %}
10454 ins_pipe( ialu_regI_regI );
10455 %}
10457 instruct andL_Reg_immL_M7(mRegL dst, immL_M7 M7) %{
10458 match(Set dst (AndL dst M7));
10459 ins_cost(60);
10461 format %{ "and $dst, $dst, $M7 #@andL_Reg_immL_M7" %}
10462 ins_encode %{
10463 Register dst = $dst$$Register;
10465 __ dins(dst, R0, 1, 2);
10466 %}
10467 ins_pipe( ialu_regI_regI );
10468 %}
10470 instruct andL_Reg_immL_M4(mRegL dst, immL_M4 M4) %{
10471 match(Set dst (AndL dst M4));
10472 ins_cost(60);
10474 format %{ "and $dst, $dst, $M4 #@andL_Reg_immL_M4" %}
10475 ins_encode %{
10476 Register dst = $dst$$Register;
10478 __ dins(dst, R0, 0, 2);
10479 %}
10480 ins_pipe( ialu_regI_regI );
10481 %}
10483 instruct andL_Reg_immL_M121(mRegL dst, immL_M121 M121) %{
10484 match(Set dst (AndL dst M121));
10485 ins_cost(60);
10487 format %{ "and $dst, $dst, $M121 #@andL_Reg_immL_M121" %}
10488 ins_encode %{
10489 Register dst = $dst$$Register;
10491 __ dins(dst, R0, 3, 4);
10492 %}
10493 ins_pipe( ialu_regI_regI );
10494 %}
10496 // Or Long Register with Register
10497 instruct orL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10498 match(Set dst (OrL src1 src2));
10499 format %{ "OR $dst, $src1, $src2 @ orL_Reg_Reg\t" %}
10500 ins_encode %{
10501 Register dst_reg = $dst$$Register;
10502 Register src1_reg = $src1$$Register;
10503 Register src2_reg = $src2$$Register;
10505 __ orr(dst_reg, src1_reg, src2_reg);
10506 %}
10507 ins_pipe( ialu_regL_regL );
10508 %}
10510 instruct orL_Reg_P2XReg(mRegL dst, mRegP src1, mRegL src2) %{
10511 match(Set dst (OrL (CastP2X src1) src2));
10512 format %{ "OR $dst, $src1, $src2 @ orL_Reg_P2XReg\t" %}
10513 ins_encode %{
10514 Register dst_reg = $dst$$Register;
10515 Register src1_reg = $src1$$Register;
10516 Register src2_reg = $src2$$Register;
10518 __ orr(dst_reg, src1_reg, src2_reg);
10519 %}
10520 ins_pipe( ialu_regL_regL );
10521 %}
10523 // Xor Long Register with Register
10524 instruct xorL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10525 match(Set dst (XorL src1 src2));
10526 format %{ "XOR $dst, $src1, $src2 @ xorL_Reg_Reg\t" %}
10527 ins_encode %{
10528 Register dst_reg = as_Register($dst$$reg);
10529 Register src1_reg = as_Register($src1$$reg);
10530 Register src2_reg = as_Register($src2$$reg);
10532 __ xorr(dst_reg, src1_reg, src2_reg);
10533 %}
10534 ins_pipe( ialu_regL_regL );
10535 %}
10537 // Shift Left by 8-bit immediate
10538 instruct salI_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
10539 match(Set dst (LShiftI src shift));
10541 format %{ "SHL $dst, $src, $shift #@salI_Reg_imm" %}
10542 ins_encode %{
10543 Register src = $src$$Register;
10544 Register dst = $dst$$Register;
10545 int shamt = $shift$$constant;
10547 __ sll(dst, src, shamt);
10548 %}
10549 ins_pipe( ialu_regI_regI );
10550 %}
10552 instruct salL2I_Reg_imm(mRegI dst, mRegL src, immI8 shift) %{
10553 match(Set dst (LShiftI (ConvL2I src) shift));
10555 format %{ "SHL $dst, $src, $shift #@salL2I_Reg_imm" %}
10556 ins_encode %{
10557 Register src = $src$$Register;
10558 Register dst = $dst$$Register;
10559 int shamt = $shift$$constant;
10561 __ sll(dst, src, shamt);
10562 %}
10563 ins_pipe( ialu_regI_regI );
10564 %}
10566 instruct salI_Reg_imm_and_M65536(mRegI dst, mRegI src, immI_16 shift, immI_M65536 mask) %{
10567 match(Set dst (AndI (LShiftI src shift) mask));
10569 format %{ "SHL $dst, $src, $shift #@salI_Reg_imm_and_M65536" %}
10570 ins_encode %{
10571 Register src = $src$$Register;
10572 Register dst = $dst$$Register;
10574 __ sll(dst, src, 16);
10575 %}
10576 ins_pipe( ialu_regI_regI );
10577 %}
10579 instruct land7_2_s(mRegI dst, mRegL src, immL7 seven, immI_16 sixteen)
10580 %{
10581 match(Set dst (RShiftI (LShiftI (ConvL2I (AndL src seven)) sixteen) sixteen));
10583 format %{ "andi $dst, $src, 7\t# @land7_2_s" %}
10584 ins_encode %{
10585 Register src = $src$$Register;
10586 Register dst = $dst$$Register;
10588 __ andi(dst, src, 7);
10589 %}
10590 ins_pipe(ialu_regI_regI);
10591 %}
10593 instruct ori2s(mRegI dst, mRegI src1, immI_0_32767 src2, immI_16 sixteen)
10594 %{
10595 match(Set dst (RShiftI (LShiftI (OrI src1 src2) sixteen) sixteen));
10597 format %{ "ori $dst, $src1, $src2\t# @ori2s" %}
10598 ins_encode %{
10599 Register src = $src1$$Register;
10600 int val = $src2$$constant;
10601 Register dst = $dst$$Register;
10603 __ ori(dst, src, val);
10604 %}
10605 ins_pipe(ialu_regI_regI);
10606 %}
10608 // Logical Shift Right by 16, followed by Arithmetic Shift Left by 16.
10609 // This idiom is used by the compiler the i2s bytecode.
10610 instruct i2s(mRegI dst, mRegI src, immI_16 sixteen)
10611 %{
10612 match(Set dst (RShiftI (LShiftI src sixteen) sixteen));
10614 format %{ "i2s $dst, $src\t# @i2s" %}
10615 ins_encode %{
10616 Register src = $src$$Register;
10617 Register dst = $dst$$Register;
10619 __ seh(dst, src);
10620 %}
10621 ins_pipe(ialu_regI_regI);
10622 %}
10624 // Logical Shift Right by 24, followed by Arithmetic Shift Left by 24.
10625 // This idiom is used by the compiler for the i2b bytecode.
10626 instruct i2b(mRegI dst, mRegI src, immI_24 twentyfour)
10627 %{
10628 match(Set dst (RShiftI (LShiftI src twentyfour) twentyfour));
10630 format %{ "i2b $dst, $src\t# @i2b" %}
10631 ins_encode %{
10632 Register src = $src$$Register;
10633 Register dst = $dst$$Register;
10635 __ seb(dst, src);
10636 %}
10637 ins_pipe(ialu_regI_regI);
10638 %}
10641 instruct salI_RegL2I_imm(mRegI dst, mRegL src, immI8 shift) %{
10642 match(Set dst (LShiftI (ConvL2I src) shift));
10644 format %{ "SHL $dst, $src, $shift #@salI_RegL2I_imm" %}
10645 ins_encode %{
10646 Register src = $src$$Register;
10647 Register dst = $dst$$Register;
10648 int shamt = $shift$$constant;
10650 __ sll(dst, src, shamt);
10651 %}
10652 ins_pipe( ialu_regI_regI );
10653 %}
10655 // Shift Left by 8-bit immediate
10656 instruct salI_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
10657 match(Set dst (LShiftI src shift));
10659 format %{ "SHL $dst, $src, $shift #@salI_Reg_Reg" %}
10660 ins_encode %{
10661 Register src = $src$$Register;
10662 Register dst = $dst$$Register;
10663 Register shamt = $shift$$Register;
10664 __ sllv(dst, src, shamt);
10665 %}
10666 ins_pipe( ialu_regI_regI );
10667 %}
10670 // Shift Left Long
10671 instruct salL_Reg_imm(mRegL dst, mRegL src, immI8 shift) %{
10672 //predicate(UseNewLongLShift);
10673 match(Set dst (LShiftL src shift));
10674 ins_cost(100);
10675 format %{ "salL $dst, $src, $shift @ salL_Reg_imm" %}
10676 ins_encode %{
10677 Register src_reg = as_Register($src$$reg);
10678 Register dst_reg = as_Register($dst$$reg);
10679 int shamt = $shift$$constant;
10681 if (__ is_simm(shamt, 5))
10682 __ dsll(dst_reg, src_reg, shamt);
10683 else
10684 {
10685 int sa = Assembler::low(shamt, 6);
10686 if (sa < 32) {
10687 __ dsll(dst_reg, src_reg, sa);
10688 } else {
10689 __ dsll32(dst_reg, src_reg, sa - 32);
10690 }
10691 }
10692 %}
10693 ins_pipe( ialu_regL_regL );
10694 %}
10696 instruct salL_RegI2L_imm(mRegL dst, mRegI src, immI8 shift) %{
10697 //predicate(UseNewLongLShift);
10698 match(Set dst (LShiftL (ConvI2L src) shift));
10699 ins_cost(100);
10700 format %{ "salL $dst, $src, $shift @ salL_RegI2L_imm" %}
10701 ins_encode %{
10702 Register src_reg = as_Register($src$$reg);
10703 Register dst_reg = as_Register($dst$$reg);
10704 int shamt = $shift$$constant;
10706 if (__ is_simm(shamt, 5))
10707 __ dsll(dst_reg, src_reg, shamt);
10708 else
10709 {
10710 int sa = Assembler::low(shamt, 6);
10711 if (sa < 32) {
10712 __ dsll(dst_reg, src_reg, sa);
10713 } else {
10714 __ dsll32(dst_reg, src_reg, sa - 32);
10715 }
10716 }
10717 %}
10718 ins_pipe( ialu_regL_regL );
10719 %}
10721 // Shift Left Long
10722 instruct salL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
10723 //predicate(UseNewLongLShift);
10724 match(Set dst (LShiftL src shift));
10725 ins_cost(100);
10726 format %{ "salL $dst, $src, $shift @ salL_Reg_Reg" %}
10727 ins_encode %{
10728 Register src_reg = as_Register($src$$reg);
10729 Register dst_reg = as_Register($dst$$reg);
10731 __ dsllv(dst_reg, src_reg, $shift$$Register);
10732 %}
10733 ins_pipe( ialu_regL_regL );
10734 %}
10736 instruct salL_convI2L_Reg_imm(mRegL dst, mRegI src, immI8 shift) %{
10737 match(Set dst (LShiftL (ConvI2L src) shift));
10738 ins_cost(100);
10739 format %{ "salL $dst, $src, $shift @ salL_convI2L_Reg_imm" %}
10740 ins_encode %{
10741 Register src_reg = as_Register($src$$reg);
10742 Register dst_reg = as_Register($dst$$reg);
10743 int shamt = $shift$$constant;
10745 if (__ is_simm(shamt, 5)) {
10746 __ dsll(dst_reg, src_reg, shamt);
10747 } else {
10748 int sa = Assembler::low(shamt, 6);
10749 if (sa < 32) {
10750 __ dsll(dst_reg, src_reg, sa);
10751 } else {
10752 __ dsll32(dst_reg, src_reg, sa - 32);
10753 }
10754 }
10755 %}
10756 ins_pipe( ialu_regL_regL );
10757 %}
10759 // Shift Right Long
10760 instruct sarL_Reg_imm(mRegL dst, mRegL src, immI8 shift) %{
10761 match(Set dst (RShiftL src shift));
10762 ins_cost(100);
10763 format %{ "sarL $dst, $src, $shift @ sarL_Reg_imm" %}
10764 ins_encode %{
10765 Register src_reg = as_Register($src$$reg);
10766 Register dst_reg = as_Register($dst$$reg);
10767 int shamt = ($shift$$constant & 0x3f);
10768 if (__ is_simm(shamt, 5))
10769 __ dsra(dst_reg, src_reg, shamt);
10770 else {
10771 int sa = Assembler::low(shamt, 6);
10772 if (sa < 32) {
10773 __ dsra(dst_reg, src_reg, sa);
10774 } else {
10775 __ dsra32(dst_reg, src_reg, sa - 32);
10776 }
10777 }
10778 %}
10779 ins_pipe( ialu_regL_regL );
10780 %}
10782 instruct sarL2I_Reg_immI_32_63(mRegI dst, mRegL src, immI_32_63 shift) %{
10783 match(Set dst (ConvL2I (RShiftL src shift)));
10784 ins_cost(100);
10785 format %{ "sarL $dst, $src, $shift @ sarL2I_Reg_immI_32_63" %}
10786 ins_encode %{
10787 Register src_reg = as_Register($src$$reg);
10788 Register dst_reg = as_Register($dst$$reg);
10789 int shamt = $shift$$constant;
10791 __ dsra32(dst_reg, src_reg, shamt - 32);
10792 %}
10793 ins_pipe( ialu_regL_regL );
10794 %}
10796 // Shift Right Long arithmetically
10797 instruct sarL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
10798 //predicate(UseNewLongLShift);
10799 match(Set dst (RShiftL src shift));
10800 ins_cost(100);
10801 format %{ "sarL $dst, $src, $shift @ sarL_Reg_Reg" %}
10802 ins_encode %{
10803 Register src_reg = as_Register($src$$reg);
10804 Register dst_reg = as_Register($dst$$reg);
10806 __ dsrav(dst_reg, src_reg, $shift$$Register);
10807 %}
10808 ins_pipe( ialu_regL_regL );
10809 %}
10811 // Shift Right Long logically
10812 instruct slrL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
10813 match(Set dst (URShiftL src shift));
10814 ins_cost(100);
10815 format %{ "slrL $dst, $src, $shift @ slrL_Reg_Reg" %}
10816 ins_encode %{
10817 Register src_reg = as_Register($src$$reg);
10818 Register dst_reg = as_Register($dst$$reg);
10820 __ dsrlv(dst_reg, src_reg, $shift$$Register);
10821 %}
10822 ins_pipe( ialu_regL_regL );
10823 %}
10825 instruct slrL_Reg_immI_0_31(mRegL dst, mRegL src, immI_0_31 shift) %{
10826 match(Set dst (URShiftL src shift));
10827 ins_cost(80);
10828 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_0_31" %}
10829 ins_encode %{
10830 Register src_reg = as_Register($src$$reg);
10831 Register dst_reg = as_Register($dst$$reg);
10832 int shamt = $shift$$constant;
10834 __ dsrl(dst_reg, src_reg, shamt);
10835 %}
10836 ins_pipe( ialu_regL_regL );
10837 %}
10839 instruct slrL_Reg_immI_0_31_and_max_int(mRegI dst, mRegL src, immI_0_31 shift, immI_MaxI max_int) %{
10840 match(Set dst (AndI (ConvL2I (URShiftL src shift)) max_int));
10841 ins_cost(80);
10842 format %{ "dext $dst, $src, $shift, 31 @ slrL_Reg_immI_0_31_and_max_int" %}
10843 ins_encode %{
10844 Register src_reg = as_Register($src$$reg);
10845 Register dst_reg = as_Register($dst$$reg);
10846 int shamt = $shift$$constant;
10848 __ dext(dst_reg, src_reg, shamt, 31);
10849 %}
10850 ins_pipe( ialu_regL_regL );
10851 %}
10853 instruct slrL_P2XReg_immI_0_31(mRegL dst, mRegP src, immI_0_31 shift) %{
10854 match(Set dst (URShiftL (CastP2X src) shift));
10855 ins_cost(80);
10856 format %{ "slrL $dst, $src, $shift @ slrL_P2XReg_immI_0_31" %}
10857 ins_encode %{
10858 Register src_reg = as_Register($src$$reg);
10859 Register dst_reg = as_Register($dst$$reg);
10860 int shamt = $shift$$constant;
10862 __ dsrl(dst_reg, src_reg, shamt);
10863 %}
10864 ins_pipe( ialu_regL_regL );
10865 %}
10867 instruct slrL_Reg_immI_32_63(mRegL dst, mRegL src, immI_32_63 shift) %{
10868 match(Set dst (URShiftL src shift));
10869 ins_cost(80);
10870 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_32_63" %}
10871 ins_encode %{
10872 Register src_reg = as_Register($src$$reg);
10873 Register dst_reg = as_Register($dst$$reg);
10874 int shamt = $shift$$constant;
10876 __ dsrl32(dst_reg, src_reg, shamt - 32);
10877 %}
10878 ins_pipe( ialu_regL_regL );
10879 %}
10881 instruct slrL_Reg_immI_convL2I(mRegI dst, mRegL src, immI_32_63 shift) %{
10882 match(Set dst (ConvL2I (URShiftL src shift)));
10883 predicate(n->in(1)->in(2)->get_int() > 32);
10884 ins_cost(80);
10885 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_convL2I" %}
10886 ins_encode %{
10887 Register src_reg = as_Register($src$$reg);
10888 Register dst_reg = as_Register($dst$$reg);
10889 int shamt = $shift$$constant;
10891 __ dsrl32(dst_reg, src_reg, shamt - 32);
10892 %}
10893 ins_pipe( ialu_regL_regL );
10894 %}
10896 instruct slrL_P2XReg_immI_32_63(mRegL dst, mRegP src, immI_32_63 shift) %{
10897 match(Set dst (URShiftL (CastP2X src) shift));
10898 ins_cost(80);
10899 format %{ "slrL $dst, $src, $shift @ slrL_P2XReg_immI_32_63" %}
10900 ins_encode %{
10901 Register src_reg = as_Register($src$$reg);
10902 Register dst_reg = as_Register($dst$$reg);
10903 int shamt = $shift$$constant;
10905 __ dsrl32(dst_reg, src_reg, shamt - 32);
10906 %}
10907 ins_pipe( ialu_regL_regL );
10908 %}
10910 // Xor Instructions
10911 // Xor Register with Register
10912 instruct xorI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10913 match(Set dst (XorI src1 src2));
10915 format %{ "XOR $dst, $src1, $src2 #@xorI_Reg_Reg" %}
10917 ins_encode %{
10918 Register dst = $dst$$Register;
10919 Register src1 = $src1$$Register;
10920 Register src2 = $src2$$Register;
10921 __ xorr(dst, src1, src2);
10922 __ sll(dst, dst, 0); /* long -> int */
10923 %}
10925 ins_pipe( ialu_regI_regI );
10926 %}
10928 // Or Instructions
10929 // Or Register with Register
10930 instruct orI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10931 match(Set dst (OrI src1 src2));
10933 format %{ "OR $dst, $src1, $src2 #@orI_Reg_Reg" %}
10934 ins_encode %{
10935 Register dst = $dst$$Register;
10936 Register src1 = $src1$$Register;
10937 Register src2 = $src2$$Register;
10938 __ orr(dst, src1, src2);
10939 %}
10941 ins_pipe( ialu_regI_regI );
10942 %}
10944 instruct rotI_shr_logical_Reg(mRegI dst, mRegI src, immI_0_31 rshift, immI_0_31 lshift, immI_1 one) %{
10945 match(Set dst (OrI (URShiftI src rshift) (LShiftI (AndI src one) lshift)));
10946 predicate(32 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int())));
10948 format %{ "rotr $dst, $src, 1 ...\n\t"
10949 "srl $dst, $dst, ($rshift-1) @ rotI_shr_logical_Reg" %}
10950 ins_encode %{
10951 Register dst = $dst$$Register;
10952 Register src = $src$$Register;
10953 int rshift = $rshift$$constant;
10955 __ rotr(dst, src, 1);
10956 if (rshift - 1) {
10957 __ srl(dst, dst, rshift - 1);
10958 }
10959 %}
10961 ins_pipe( ialu_regI_regI );
10962 %}
10964 instruct orI_Reg_castP2X(mRegL dst, mRegL src1, mRegP src2) %{
10965 match(Set dst (OrI src1 (CastP2X src2)));
10967 format %{ "OR $dst, $src1, $src2 #@orI_Reg_castP2X" %}
10968 ins_encode %{
10969 Register dst = $dst$$Register;
10970 Register src1 = $src1$$Register;
10971 Register src2 = $src2$$Register;
10972 __ orr(dst, src1, src2);
10973 %}
10975 ins_pipe( ialu_regI_regI );
10976 %}
10978 // Logical Shift Right by 8-bit immediate
10979 instruct shr_logical_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
10980 match(Set dst (URShiftI src shift));
10981 // effect(KILL cr);
10983 format %{ "SRL $dst, $src, $shift #@shr_logical_Reg_imm" %}
10984 ins_encode %{
10985 Register src = $src$$Register;
10986 Register dst = $dst$$Register;
10987 int shift = $shift$$constant;
10989 __ srl(dst, src, shift);
10990 %}
10991 ins_pipe( ialu_regI_regI );
10992 %}
10994 instruct shr_logical_Reg_imm_nonneg_mask(mRegI dst, mRegI src, immI_0_31 shift, immI_nonneg_mask mask) %{
10995 match(Set dst (AndI (URShiftI src shift) mask));
10997 format %{ "ext $dst, $src, $shift, one-bits($mask) #@shr_logical_Reg_imm_nonneg_mask" %}
10998 ins_encode %{
10999 Register src = $src$$Register;
11000 Register dst = $dst$$Register;
11001 int pos = $shift$$constant;
11002 int size = Assembler::is_int_mask($mask$$constant);
11004 __ ext(dst, src, pos, size);
11005 %}
11006 ins_pipe( ialu_regI_regI );
11007 %}
11009 instruct rolI_Reg_immI_0_31(mRegI dst, immI_0_31 lshift, immI_0_31 rshift)
11010 %{
11011 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
11012 match(Set dst (OrI (LShiftI dst lshift) (URShiftI dst rshift)));
11014 ins_cost(100);
11015 format %{ "rotr $dst, $dst, $rshift #@rolI_Reg_immI_0_31" %}
11016 ins_encode %{
11017 Register dst = $dst$$Register;
11018 int sa = $rshift$$constant;
11020 __ rotr(dst, dst, sa);
11021 %}
11022 ins_pipe( ialu_regI_regI );
11023 %}
11025 instruct rolL_Reg_immI_0_31(mRegL dst, immI_32_63 lshift, immI_0_31 rshift)
11026 %{
11027 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11028 match(Set dst (OrL (LShiftL dst lshift) (URShiftL dst rshift)));
11030 ins_cost(100);
11031 format %{ "rotr $dst, $dst, $rshift #@rolL_Reg_immI_0_31" %}
11032 ins_encode %{
11033 Register dst = $dst$$Register;
11034 int sa = $rshift$$constant;
11036 __ drotr(dst, dst, sa);
11037 %}
11038 ins_pipe( ialu_regI_regI );
11039 %}
11041 instruct rolL_Reg_immI_32_63(mRegL dst, immI_0_31 lshift, immI_32_63 rshift)
11042 %{
11043 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11044 match(Set dst (OrL (LShiftL dst lshift) (URShiftL dst rshift)));
11046 ins_cost(100);
11047 format %{ "rotr $dst, $dst, $rshift #@rolL_Reg_immI_32_63" %}
11048 ins_encode %{
11049 Register dst = $dst$$Register;
11050 int sa = $rshift$$constant;
11052 __ drotr32(dst, dst, sa - 32);
11053 %}
11054 ins_pipe( ialu_regI_regI );
11055 %}
11057 instruct rorI_Reg_immI_0_31(mRegI dst, immI_0_31 rshift, immI_0_31 lshift)
11058 %{
11059 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
11060 match(Set dst (OrI (URShiftI dst rshift) (LShiftI dst lshift)));
11062 ins_cost(100);
11063 format %{ "rotr $dst, $dst, $rshift #@rorI_Reg_immI_0_31" %}
11064 ins_encode %{
11065 Register dst = $dst$$Register;
11066 int sa = $rshift$$constant;
11068 __ rotr(dst, dst, sa);
11069 %}
11070 ins_pipe( ialu_regI_regI );
11071 %}
11073 instruct rorL_Reg_immI_0_31(mRegL dst, immI_0_31 rshift, immI_32_63 lshift)
11074 %{
11075 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11076 match(Set dst (OrL (URShiftL dst rshift) (LShiftL dst lshift)));
11078 ins_cost(100);
11079 format %{ "rotr $dst, $dst, $rshift #@rorL_Reg_immI_0_31" %}
11080 ins_encode %{
11081 Register dst = $dst$$Register;
11082 int sa = $rshift$$constant;
11084 __ drotr(dst, dst, sa);
11085 %}
11086 ins_pipe( ialu_regI_regI );
11087 %}
11089 instruct rorL_Reg_immI_32_63(mRegL dst, immI_32_63 rshift, immI_0_31 lshift)
11090 %{
11091 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11092 match(Set dst (OrL (URShiftL dst rshift) (LShiftL dst lshift)));
11094 ins_cost(100);
11095 format %{ "rotr $dst, $dst, $rshift #@rorL_Reg_immI_32_63" %}
11096 ins_encode %{
11097 Register dst = $dst$$Register;
11098 int sa = $rshift$$constant;
11100 __ drotr32(dst, dst, sa - 32);
11101 %}
11102 ins_pipe( ialu_regI_regI );
11103 %}
11105 // Logical Shift Right
11106 instruct shr_logical_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
11107 match(Set dst (URShiftI src shift));
11109 format %{ "SRL $dst, $src, $shift #@shr_logical_Reg_Reg" %}
11110 ins_encode %{
11111 Register src = $src$$Register;
11112 Register dst = $dst$$Register;
11113 Register shift = $shift$$Register;
11114 __ srlv(dst, src, shift);
11115 %}
11116 ins_pipe( ialu_regI_regI );
11117 %}
11120 instruct shr_arith_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
11121 match(Set dst (RShiftI src shift));
11122 // effect(KILL cr);
11124 format %{ "SRA $dst, $src, $shift #@shr_arith_Reg_imm" %}
11125 ins_encode %{
11126 Register src = $src$$Register;
11127 Register dst = $dst$$Register;
11128 int shift = $shift$$constant;
11129 __ sra(dst, src, shift);
11130 %}
11131 ins_pipe( ialu_regI_regI );
11132 %}
11134 instruct shr_arith_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
11135 match(Set dst (RShiftI src shift));
11136 // effect(KILL cr);
11138 format %{ "SRA $dst, $src, $shift #@shr_arith_Reg_Reg" %}
11139 ins_encode %{
11140 Register src = $src$$Register;
11141 Register dst = $dst$$Register;
11142 Register shift = $shift$$Register;
11143 __ srav(dst, src, shift);
11144 %}
11145 ins_pipe( ialu_regI_regI );
11146 %}
11148 //----------Convert Int to Boolean---------------------------------------------
11150 instruct convI2B(mRegI dst, mRegI src) %{
11151 match(Set dst (Conv2B src));
11153 ins_cost(100);
11154 format %{ "convI2B $dst, $src @ convI2B" %}
11155 ins_encode %{
11156 Register dst = as_Register($dst$$reg);
11157 Register src = as_Register($src$$reg);
11159 if (dst != src) {
11160 __ daddiu(dst, R0, 1);
11161 __ movz(dst, R0, src);
11162 } else {
11163 __ move(AT, src);
11164 __ daddiu(dst, R0, 1);
11165 __ movz(dst, R0, AT);
11166 }
11167 %}
11169 ins_pipe( ialu_regL_regL );
11170 %}
11172 instruct convI2L_reg( mRegL dst, mRegI src) %{
11173 match(Set dst (ConvI2L src));
11175 ins_cost(100);
11176 format %{ "SLL $dst, $src @ convI2L_reg\t" %}
11177 ins_encode %{
11178 Register dst = as_Register($dst$$reg);
11179 Register src = as_Register($src$$reg);
11181 if(dst != src) __ sll(dst, src, 0);
11182 %}
11183 ins_pipe( ialu_regL_regL );
11184 %}
11187 instruct convL2I_reg( mRegI dst, mRegL src ) %{
11188 match(Set dst (ConvL2I src));
11190 format %{ "MOV $dst, $src @ convL2I_reg" %}
11191 ins_encode %{
11192 Register dst = as_Register($dst$$reg);
11193 Register src = as_Register($src$$reg);
11195 __ sll(dst, src, 0);
11196 %}
11198 ins_pipe( ialu_regI_regI );
11199 %}
11201 instruct convL2I2L_reg( mRegL dst, mRegL src ) %{
11202 match(Set dst (ConvI2L (ConvL2I src)));
11204 format %{ "sll $dst, $src, 0 @ convL2I2L_reg" %}
11205 ins_encode %{
11206 Register dst = as_Register($dst$$reg);
11207 Register src = as_Register($src$$reg);
11209 __ sll(dst, src, 0);
11210 %}
11212 ins_pipe( ialu_regI_regI );
11213 %}
11215 instruct convL2D_reg( regD dst, mRegL src ) %{
11216 match(Set dst (ConvL2D src));
11217 format %{ "convL2D $dst, $src @ convL2D_reg" %}
11218 ins_encode %{
11219 Register src = as_Register($src$$reg);
11220 FloatRegister dst = as_FloatRegister($dst$$reg);
11222 __ dmtc1(src, dst);
11223 __ cvt_d_l(dst, dst);
11224 %}
11226 ins_pipe( pipe_slow );
11227 %}
11229 instruct convD2L_reg_fast( mRegL dst, regD src ) %{
11230 match(Set dst (ConvD2L src));
11231 ins_cost(150);
11232 format %{ "convD2L $dst, $src @ convD2L_reg_fast" %}
11233 ins_encode %{
11234 Register dst = as_Register($dst$$reg);
11235 FloatRegister src = as_FloatRegister($src$$reg);
11237 Label Done;
11239 __ trunc_l_d(F30, src);
11240 // max_long: 0x7fffffffffffffff
11241 // __ set64(AT, 0x7fffffffffffffff);
11242 __ daddiu(AT, R0, -1);
11243 __ dsrl(AT, AT, 1);
11244 __ dmfc1(dst, F30);
11246 __ bne(dst, AT, Done);
11247 __ delayed()->mtc1(R0, F30);
11249 __ cvt_d_w(F30, F30);
11250 __ c_ult_d(src, F30);
11251 __ bc1f(Done);
11252 __ delayed()->daddiu(T9, R0, -1);
11254 __ c_un_d(src, src); //NaN?
11255 __ subu(dst, T9, AT);
11256 __ movt(dst, R0);
11258 __ bind(Done);
11259 %}
11261 ins_pipe( pipe_slow );
11262 %}
11264 instruct convD2L_reg_slow( mRegL dst, regD src ) %{
11265 match(Set dst (ConvD2L src));
11266 ins_cost(250);
11267 format %{ "convD2L $dst, $src @ convD2L_reg_slow" %}
11268 ins_encode %{
11269 Register dst = as_Register($dst$$reg);
11270 FloatRegister src = as_FloatRegister($src$$reg);
11272 Label L;
11274 __ c_un_d(src, src); //NaN?
11275 __ bc1t(L);
11276 __ delayed();
11277 __ move(dst, R0);
11279 __ trunc_l_d(F30, src);
11280 __ cfc1(AT, 31);
11281 __ li(T9, 0x10000);
11282 __ andr(AT, AT, T9);
11283 __ beq(AT, R0, L);
11284 __ delayed()->dmfc1(dst, F30);
11286 __ mov_d(F12, src);
11287 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1);
11288 __ move(dst, V0);
11289 __ bind(L);
11290 %}
11292 ins_pipe( pipe_slow );
11293 %}
11295 instruct convF2I_reg_fast( mRegI dst, regF src ) %{
11296 match(Set dst (ConvF2I src));
11297 ins_cost(150);
11298 format %{ "convf2i $dst, $src @ convF2I_reg_fast" %}
11299 ins_encode %{
11300 Register dreg = $dst$$Register;
11301 FloatRegister fval = $src$$FloatRegister;
11303 __ trunc_w_s(F30, fval);
11304 __ mfc1(dreg, F30);
11305 __ c_un_s(fval, fval); //NaN?
11306 __ movt(dreg, R0);
11307 %}
11309 ins_pipe( pipe_slow );
11310 %}
11312 instruct convF2I_reg_slow( mRegI dst, regF src ) %{
11313 match(Set dst (ConvF2I src));
11314 ins_cost(250);
11315 format %{ "convf2i $dst, $src @ convF2I_reg_slow" %}
11316 ins_encode %{
11317 Register dreg = $dst$$Register;
11318 FloatRegister fval = $src$$FloatRegister;
11319 Label L;
11321 __ c_un_s(fval, fval); //NaN?
11322 __ bc1t(L);
11323 __ delayed();
11324 __ move(dreg, R0);
11326 __ trunc_w_s(F30, fval);
11328 /* Call SharedRuntime:f2i() to do valid convention */
11329 __ cfc1(AT, 31);
11330 __ li(T9, 0x10000);
11331 __ andr(AT, AT, T9);
11332 __ beq(AT, R0, L);
11333 __ delayed()->mfc1(dreg, F30);
11335 __ mov_s(F12, fval);
11337 /* 2014/01/08 Fu : This bug was found when running ezDS's control-panel.
11338 * J 982 C2 javax.swing.text.BoxView.layoutMajorAxis(II[I[I)V (283 bytes) @ 0x000000555c46aa74
11339 *
11340 * An interger array index has been assigned to V0, and then changed from 1 to Integer.MAX_VALUE.
11341 * V0 is corrupted during call_VM_leaf(), and should be preserved.
11342 */
11343 if(dreg != V0) {
11344 __ push(V0);
11345 }
11346 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
11347 if(dreg != V0) {
11348 __ move(dreg, V0);
11349 __ pop(V0);
11350 }
11351 __ bind(L);
11352 %}
11354 ins_pipe( pipe_slow );
11355 %}
11357 instruct convF2L_reg_fast( mRegL dst, regF src ) %{
11358 match(Set dst (ConvF2L src));
11359 ins_cost(150);
11360 format %{ "convf2l $dst, $src @ convF2L_reg_fast" %}
11361 ins_encode %{
11362 Register dreg = $dst$$Register;
11363 FloatRegister fval = $src$$FloatRegister;
11365 __ trunc_l_s(F30, fval);
11366 __ dmfc1(dreg, F30);
11367 __ c_un_s(fval, fval); //NaN?
11368 __ movt(dreg, R0);
11369 %}
11371 ins_pipe( pipe_slow );
11372 %}
11374 instruct convF2L_reg_slow( mRegL dst, regF src ) %{
11375 match(Set dst (ConvF2L src));
11376 ins_cost(250);
11377 format %{ "convf2l $dst, $src @ convF2L_reg_slow" %}
11378 ins_encode %{
11379 Register dst = as_Register($dst$$reg);
11380 FloatRegister fval = $src$$FloatRegister;
11381 Label L;
11383 __ c_un_s(fval, fval); //NaN?
11384 __ bc1t(L);
11385 __ delayed();
11386 __ move(dst, R0);
11388 __ trunc_l_s(F30, fval);
11389 __ cfc1(AT, 31);
11390 __ li(T9, 0x10000);
11391 __ andr(AT, AT, T9);
11392 __ beq(AT, R0, L);
11393 __ delayed()->dmfc1(dst, F30);
11395 __ mov_s(F12, fval);
11396 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
11397 __ move(dst, V0);
11398 __ bind(L);
11399 %}
11401 ins_pipe( pipe_slow );
11402 %}
11404 instruct convL2F_reg( regF dst, mRegL src ) %{
11405 match(Set dst (ConvL2F src));
11406 format %{ "convl2f $dst, $src @ convL2F_reg" %}
11407 ins_encode %{
11408 FloatRegister dst = $dst$$FloatRegister;
11409 Register src = as_Register($src$$reg);
11410 Label L;
11412 __ dmtc1(src, dst);
11413 __ cvt_s_l(dst, dst);
11414 %}
11416 ins_pipe( pipe_slow );
11417 %}
11419 instruct convI2F_reg( regF dst, mRegI src ) %{
11420 match(Set dst (ConvI2F src));
11421 format %{ "convi2f $dst, $src @ convI2F_reg" %}
11422 ins_encode %{
11423 Register src = $src$$Register;
11424 FloatRegister dst = $dst$$FloatRegister;
11426 __ mtc1(src, dst);
11427 __ cvt_s_w(dst, dst);
11428 %}
11430 ins_pipe( fpu_regF_regF );
11431 %}
11433 instruct cmpLTMask_immI0( mRegI dst, mRegI p, immI0 zero ) %{
11434 match(Set dst (CmpLTMask p zero));
11435 ins_cost(100);
11437 format %{ "sra $dst, $p, 31 @ cmpLTMask_immI0" %}
11438 ins_encode %{
11439 Register src = $p$$Register;
11440 Register dst = $dst$$Register;
11442 __ sra(dst, src, 31);
11443 %}
11444 ins_pipe( pipe_slow );
11445 %}
11448 instruct cmpLTMask( mRegI dst, mRegI p, mRegI q ) %{
11449 match(Set dst (CmpLTMask p q));
11450 ins_cost(400);
11452 format %{ "cmpLTMask $dst, $p, $q @ cmpLTMask" %}
11453 ins_encode %{
11454 Register p = $p$$Register;
11455 Register q = $q$$Register;
11456 Register dst = $dst$$Register;
11458 __ slt(dst, p, q);
11459 __ subu(dst, R0, dst);
11460 %}
11461 ins_pipe( pipe_slow );
11462 %}
11464 instruct convP2B(mRegI dst, mRegP src) %{
11465 match(Set dst (Conv2B src));
11467 ins_cost(100);
11468 format %{ "convP2B $dst, $src @ convP2B" %}
11469 ins_encode %{
11470 Register dst = as_Register($dst$$reg);
11471 Register src = as_Register($src$$reg);
11473 if (dst != src) {
11474 __ daddiu(dst, R0, 1);
11475 __ movz(dst, R0, src);
11476 } else {
11477 __ move(AT, src);
11478 __ daddiu(dst, R0, 1);
11479 __ movz(dst, R0, AT);
11480 }
11481 %}
11483 ins_pipe( ialu_regL_regL );
11484 %}
11487 instruct convI2D_reg_reg(regD dst, mRegI src) %{
11488 match(Set dst (ConvI2D src));
11489 format %{ "conI2D $dst, $src @convI2D_reg" %}
11490 ins_encode %{
11491 Register src = $src$$Register;
11492 FloatRegister dst = $dst$$FloatRegister;
11493 __ mtc1(src, dst);
11494 __ cvt_d_w(dst, dst);
11495 %}
11496 ins_pipe( fpu_regF_regF );
11497 %}
11499 instruct convF2D_reg_reg(regD dst, regF src) %{
11500 match(Set dst (ConvF2D src));
11501 format %{ "convF2D $dst, $src\t# @convF2D_reg_reg" %}
11502 ins_encode %{
11503 FloatRegister dst = $dst$$FloatRegister;
11504 FloatRegister src = $src$$FloatRegister;
11506 __ cvt_d_s(dst, src);
11507 %}
11508 ins_pipe( fpu_regF_regF );
11509 %}
11511 instruct convD2F_reg_reg(regF dst, regD src) %{
11512 match(Set dst (ConvD2F src));
11513 format %{ "convD2F $dst, $src\t# @convD2F_reg_reg" %}
11514 ins_encode %{
11515 FloatRegister dst = $dst$$FloatRegister;
11516 FloatRegister src = $src$$FloatRegister;
11518 __ cvt_s_d(dst, src);
11519 %}
11520 ins_pipe( fpu_regF_regF );
11521 %}
11523 // Convert a double to an int. If the double is a NAN, stuff a zero in instead.
11524 instruct convD2I_reg_reg_fast( mRegI dst, regD src ) %{
11525 match(Set dst (ConvD2I src));
11527 ins_cost(150);
11528 format %{ "convD2I $dst, $src\t# @ convD2I_reg_reg_fast" %}
11530 ins_encode %{
11531 FloatRegister src = $src$$FloatRegister;
11532 Register dst = $dst$$Register;
11534 Label Done;
11536 __ trunc_w_d(F30, src);
11537 // max_int: 2147483647
11538 __ move(AT, 0x7fffffff);
11539 __ mfc1(dst, F30);
11541 __ bne(dst, AT, Done);
11542 __ delayed()->mtc1(R0, F30);
11544 __ cvt_d_w(F30, F30);
11545 __ c_ult_d(src, F30);
11546 __ bc1f(Done);
11547 __ delayed()->addiu(T9, R0, -1);
11549 __ c_un_d(src, src); //NaN?
11550 __ subu32(dst, T9, AT);
11551 __ movt(dst, R0);
11553 __ bind(Done);
11554 %}
11555 ins_pipe( pipe_slow );
11556 %}
11558 instruct convD2I_reg_reg_slow( mRegI dst, regD src ) %{
11559 match(Set dst (ConvD2I src));
11561 ins_cost(250);
11562 format %{ "convD2I $dst, $src\t# @ convD2I_reg_reg_slow" %}
11564 ins_encode %{
11565 FloatRegister src = $src$$FloatRegister;
11566 Register dst = $dst$$Register;
11567 Label L;
11569 __ trunc_w_d(F30, src);
11570 __ cfc1(AT, 31);
11571 __ li(T9, 0x10000);
11572 __ andr(AT, AT, T9);
11573 __ beq(AT, R0, L);
11574 __ delayed()->mfc1(dst, F30);
11576 __ mov_d(F12, src);
11577 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 1);
11578 __ move(dst, V0);
11579 __ bind(L);
11581 %}
11582 ins_pipe( pipe_slow );
11583 %}
11585 // Convert oop pointer into compressed form
11586 instruct encodeHeapOop(mRegN dst, mRegP src) %{
11587 predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
11588 match(Set dst (EncodeP src));
11589 format %{ "encode_heap_oop $dst,$src" %}
11590 ins_encode %{
11591 Register src = $src$$Register;
11592 Register dst = $dst$$Register;
11593 if (src != dst) {
11594 __ move(dst, src);
11595 }
11596 __ encode_heap_oop(dst);
11597 %}
11598 ins_pipe( ialu_regL_regL );
11599 %}
11601 instruct encodeHeapOop_not_null(mRegN dst, mRegP src) %{
11602 predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
11603 match(Set dst (EncodeP src));
11604 format %{ "encode_heap_oop_not_null $dst,$src @ encodeHeapOop_not_null" %}
11605 ins_encode %{
11606 __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
11607 %}
11608 ins_pipe( ialu_regL_regL );
11609 %}
11611 instruct decodeHeapOop(mRegP dst, mRegN src) %{
11612 predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
11613 n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
11614 match(Set dst (DecodeN src));
11615 format %{ "decode_heap_oop $dst,$src @ decodeHeapOop" %}
11616 ins_encode %{
11617 Register s = $src$$Register;
11618 Register d = $dst$$Register;
11619 if (s != d) {
11620 __ move(d, s);
11621 }
11622 __ decode_heap_oop(d);
11623 %}
11624 ins_pipe( ialu_regL_regL );
11625 %}
11627 instruct decodeHeapOop_not_null(mRegP dst, mRegN src) %{
11628 predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
11629 n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
11630 match(Set dst (DecodeN src));
11631 format %{ "decode_heap_oop_not_null $dst,$src @ decodeHeapOop_not_null" %}
11632 ins_encode %{
11633 Register s = $src$$Register;
11634 Register d = $dst$$Register;
11635 if (s != d) {
11636 __ decode_heap_oop_not_null(d, s);
11637 } else {
11638 __ decode_heap_oop_not_null(d);
11639 }
11640 %}
11641 ins_pipe( ialu_regL_regL );
11642 %}
11644 instruct encodeKlass_not_null(mRegN dst, mRegP src) %{
11645 match(Set dst (EncodePKlass src));
11646 format %{ "encode_heap_oop_not_null $dst,$src @ encodeKlass_not_null" %}
11647 ins_encode %{
11648 __ encode_klass_not_null($dst$$Register, $src$$Register);
11649 %}
11650 ins_pipe( ialu_regL_regL );
11651 %}
11653 instruct decodeKlass_not_null(mRegP dst, mRegN src) %{
11654 match(Set dst (DecodeNKlass src));
11655 format %{ "decode_heap_klass_not_null $dst,$src" %}
11656 ins_encode %{
11657 Register s = $src$$Register;
11658 Register d = $dst$$Register;
11659 if (s != d) {
11660 __ decode_klass_not_null(d, s);
11661 } else {
11662 __ decode_klass_not_null(d);
11663 }
11664 %}
11665 ins_pipe( ialu_regL_regL );
11666 %}
11668 //FIXME
11669 instruct tlsLoadP(mRegP dst) %{
11670 match(Set dst (ThreadLocal));
11672 ins_cost(0);
11673 format %{ " get_thread in $dst #@tlsLoadP" %}
11674 ins_encode %{
11675 Register dst = $dst$$Register;
11676 #ifdef OPT_THREAD
11677 __ move(dst, TREG);
11678 #else
11679 __ get_thread(dst);
11680 #endif
11681 %}
11683 ins_pipe( ialu_loadI );
11684 %}
11687 instruct checkCastPP( mRegP dst ) %{
11688 match(Set dst (CheckCastPP dst));
11690 format %{ "#checkcastPP of $dst (empty encoding) #@chekCastPP" %}
11691 ins_encode( /*empty encoding*/ );
11692 ins_pipe( empty );
11693 %}
11695 instruct castPP(mRegP dst)
11696 %{
11697 match(Set dst (CastPP dst));
11699 size(0);
11700 format %{ "# castPP of $dst" %}
11701 ins_encode(/* empty encoding */);
11702 ins_pipe(empty);
11703 %}
11705 instruct castII( mRegI dst ) %{
11706 match(Set dst (CastII dst));
11707 format %{ "#castII of $dst empty encoding" %}
11708 ins_encode( /*empty encoding*/ );
11709 ins_cost(0);
11710 ins_pipe( empty );
11711 %}
11713 // Return Instruction
11714 // Remove the return address & jump to it.
11715 instruct Ret() %{
11716 match(Return);
11717 format %{ "RET #@Ret" %}
11719 ins_encode %{
11720 __ jr(RA);
11721 __ nop();
11722 %}
11724 ins_pipe( pipe_jump );
11725 %}
11727 /*
11728 // For Loongson CPUs, jr seems too slow, so this rule shouldn't be imported.
11729 instruct jumpXtnd(mRegL switch_val) %{
11730 match(Jump switch_val);
11732 ins_cost(350);
11734 format %{ "load T9 <-- [$constanttablebase, $switch_val, $constantoffset] @ jumpXtnd\n\t"
11735 "jr T9\n\t"
11736 "nop" %}
11737 ins_encode %{
11738 Register table_base = $constanttablebase;
11739 int con_offset = $constantoffset;
11740 Register switch_reg = $switch_val$$Register;
11742 if (UseLoongsonISA) {
11743 if (Assembler::is_simm(con_offset, 8)) {
11744 __ gsldx(T9, table_base, switch_reg, con_offset);
11745 } else if (Assembler::is_simm16(con_offset)) {
11746 __ daddu(T9, table_base, switch_reg);
11747 __ ld(T9, T9, con_offset);
11748 } else {
11749 __ move(T9, con_offset);
11750 __ daddu(AT, table_base, switch_reg);
11751 __ gsldx(T9, AT, T9, 0);
11752 }
11753 } else {
11754 if (Assembler::is_simm16(con_offset)) {
11755 __ daddu(T9, table_base, switch_reg);
11756 __ ld(T9, T9, con_offset);
11757 } else {
11758 __ move(T9, con_offset);
11759 __ daddu(AT, table_base, switch_reg);
11760 __ daddu(AT, T9, AT);
11761 __ ld(T9, AT, 0);
11762 }
11763 }
11765 __ jr(T9);
11766 __ nop();
11768 %}
11769 ins_pipe(pipe_jump);
11770 %}
11771 */
11773 // Jump Direct - Label defines a relative address from JMP
11774 instruct jmpDir(label labl) %{
11775 match(Goto);
11776 effect(USE labl);
11778 ins_cost(300);
11779 format %{ "JMP $labl #@jmpDir" %}
11781 ins_encode %{
11782 Label &L = *($labl$$label);
11783 if(&L)
11784 __ b(L);
11785 else
11786 __ b(int(0));
11787 __ nop();
11788 %}
11790 ins_pipe( pipe_jump );
11791 ins_pc_relative(1);
11792 %}
11796 // Tail Jump; remove the return address; jump to target.
11797 // TailCall above leaves the return address around.
11798 // TailJump is used in only one place, the rethrow_Java stub (fancy_jump=2).
11799 // ex_oop (Exception Oop) is needed in %o0 at the jump. As there would be a
11800 // "restore" before this instruction (in Epilogue), we need to materialize it
11801 // in %i0.
11802 //FIXME
11803 instruct tailjmpInd(mRegP jump_target,mRegP ex_oop) %{
11804 match( TailJump jump_target ex_oop );
11805 ins_cost(200);
11806 format %{ "Jmp $jump_target ; ex_oop = $ex_oop #@tailjmpInd" %}
11807 ins_encode %{
11808 Register target = $jump_target$$Register;
11810 /* 2012/9/14 Jin: V0, V1 are indicated in:
11811 * [stubGenerator_mips.cpp] generate_forward_exception()
11812 * [runtime_mips.cpp] OptoRuntime::generate_exception_blob()
11813 */
11814 Register oop = $ex_oop$$Register;
11815 Register exception_oop = V0;
11816 Register exception_pc = V1;
11818 __ move(exception_pc, RA);
11819 __ move(exception_oop, oop);
11821 __ jr(target);
11822 __ nop();
11823 %}
11824 ins_pipe( pipe_jump );
11825 %}
11827 // ============================================================================
11828 // Procedure Call/Return Instructions
11829 // Call Java Static Instruction
11830 // Note: If this code changes, the corresponding ret_addr_offset() and
11831 // compute_padding() functions will have to be adjusted.
11832 instruct CallStaticJavaDirect(method meth) %{
11833 match(CallStaticJava);
11834 effect(USE meth);
11836 ins_cost(300);
11837 format %{ "CALL,static #@CallStaticJavaDirect " %}
11838 ins_encode( Java_Static_Call( meth ) );
11839 ins_pipe( pipe_slow );
11840 ins_pc_relative(1);
11841 ins_alignment(16);
11842 %}
11844 // Call Java Dynamic Instruction
11845 // Note: If this code changes, the corresponding ret_addr_offset() and
11846 // compute_padding() functions will have to be adjusted.
11847 instruct CallDynamicJavaDirect(method meth) %{
11848 match(CallDynamicJava);
11849 effect(USE meth);
11851 ins_cost(300);
11852 format %{"MOV IC_Klass, (oop)-1 @ CallDynamicJavaDirect\n\t"
11853 "CallDynamic @ CallDynamicJavaDirect" %}
11854 ins_encode( Java_Dynamic_Call( meth ) );
11855 ins_pipe( pipe_slow );
11856 ins_pc_relative(1);
11857 ins_alignment(16);
11858 %}
11860 instruct CallLeafNoFPDirect(method meth) %{
11861 match(CallLeafNoFP);
11862 effect(USE meth);
11864 ins_cost(300);
11865 format %{ "CALL_LEAF_NOFP,runtime " %}
11866 ins_encode(Java_To_Runtime(meth));
11867 ins_pipe( pipe_slow );
11868 ins_pc_relative(1);
11869 ins_alignment(16);
11870 %}
11872 // Prefetch instructions.
11874 instruct prefetchrNTA( memory mem ) %{
11875 match(PrefetchRead mem);
11876 ins_cost(125);
11878 format %{ "pref $mem\t# Prefetch into non-temporal cache for read @ prefetchrNTA" %}
11879 ins_encode %{
11880 int base = $mem$$base;
11881 int index = $mem$$index;
11882 int scale = $mem$$scale;
11883 int disp = $mem$$disp;
11885 if( index != 0 ) {
11886 if (scale == 0) {
11887 __ daddu(AT, as_Register(base), as_Register(index));
11888 } else {
11889 __ dsll(AT, as_Register(index), scale);
11890 __ daddu(AT, as_Register(base), AT);
11891 }
11892 } else {
11893 __ move(AT, as_Register(base));
11894 }
11895 if( Assembler::is_simm16(disp) ) {
11896 __ daddiu(AT, as_Register(base), disp);
11897 __ daddiu(AT, AT, disp);
11898 } else {
11899 __ move(T9, disp);
11900 __ daddu(AT, as_Register(base), T9);
11901 }
11902 __ pref(0, AT, 0); //hint: 0:load
11903 %}
11904 ins_pipe(pipe_slow);
11905 %}
11907 instruct prefetchwNTA( memory mem ) %{
11908 match(PrefetchWrite mem);
11909 ins_cost(125);
11910 format %{ "pref $mem\t# Prefetch to non-temporal cache for write @ prefetchwNTA" %}
11911 ins_encode %{
11912 int base = $mem$$base;
11913 int index = $mem$$index;
11914 int scale = $mem$$scale;
11915 int disp = $mem$$disp;
11917 if( index != 0 ) {
11918 if (scale == 0) {
11919 __ daddu(AT, as_Register(base), as_Register(index));
11920 } else {
11921 __ dsll(AT, as_Register(index), scale);
11922 __ daddu(AT, as_Register(base), AT);
11923 }
11924 } else {
11925 __ move(AT, as_Register(base));
11926 }
11927 if( Assembler::is_simm16(disp) ) {
11928 __ daddiu(AT, as_Register(base), disp);
11929 __ daddiu(AT, AT, disp);
11930 } else {
11931 __ move(T9, disp);
11932 __ daddu(AT, as_Register(base), T9);
11933 }
11934 __ pref(1, AT, 0); //hint: 1:store
11935 %}
11936 ins_pipe(pipe_slow);
11937 %}
11939 // Prefetch instructions for allocation.
11941 instruct prefetchAllocNTA( memory mem ) %{
11942 match(PrefetchAllocation mem);
11943 ins_cost(125);
11944 format %{ "pref $mem\t# Prefetch allocation @ prefetchAllocNTA" %}
11945 ins_encode %{
11946 int base = $mem$$base;
11947 int index = $mem$$index;
11948 int scale = $mem$$scale;
11949 int disp = $mem$$disp;
11951 Register dst = R0;
11953 if( index != 0 ) {
11954 if( Assembler::is_simm16(disp) ) {
11955 if( UseLoongsonISA ) {
11956 if (scale == 0) {
11957 __ gslbx(dst, as_Register(base), as_Register(index), disp);
11958 } else {
11959 __ dsll(AT, as_Register(index), scale);
11960 __ gslbx(dst, as_Register(base), AT, disp);
11961 }
11962 } else {
11963 if (scale == 0) {
11964 __ addu(AT, as_Register(base), as_Register(index));
11965 } else {
11966 __ dsll(AT, as_Register(index), scale);
11967 __ addu(AT, as_Register(base), AT);
11968 }
11969 __ lb(dst, AT, disp);
11970 }
11971 } else {
11972 if (scale == 0) {
11973 __ addu(AT, as_Register(base), as_Register(index));
11974 } else {
11975 __ dsll(AT, as_Register(index), scale);
11976 __ addu(AT, as_Register(base), AT);
11977 }
11978 __ move(T9, disp);
11979 if( UseLoongsonISA ) {
11980 __ gslbx(dst, AT, T9, 0);
11981 } else {
11982 __ addu(AT, AT, T9);
11983 __ lb(dst, AT, 0);
11984 }
11985 }
11986 } else {
11987 if( Assembler::is_simm16(disp) ) {
11988 __ lb(dst, as_Register(base), disp);
11989 } else {
11990 __ move(T9, disp);
11991 if( UseLoongsonISA ) {
11992 __ gslbx(dst, as_Register(base), T9, 0);
11993 } else {
11994 __ addu(AT, as_Register(base), T9);
11995 __ lb(dst, AT, 0);
11996 }
11997 }
11998 }
11999 %}
12000 ins_pipe(pipe_slow);
12001 %}
12004 // Call runtime without safepoint
12005 instruct CallLeafDirect(method meth) %{
12006 match(CallLeaf);
12007 effect(USE meth);
12009 ins_cost(300);
12010 format %{ "CALL_LEAF,runtime #@CallLeafDirect " %}
12011 ins_encode(Java_To_Runtime(meth));
12012 ins_pipe( pipe_slow );
12013 ins_pc_relative(1);
12014 ins_alignment(16);
12015 %}
12017 // Load Char (16bit unsigned)
12018 instruct loadUS(mRegI dst, memory mem) %{
12019 match(Set dst (LoadUS mem));
12021 ins_cost(125);
12022 format %{ "loadUS $dst,$mem @ loadC" %}
12023 ins_encode(load_C_enc(dst, mem));
12024 ins_pipe( ialu_loadI );
12025 %}
12027 instruct loadUS_convI2L(mRegL dst, memory mem) %{
12028 match(Set dst (ConvI2L (LoadUS mem)));
12030 ins_cost(125);
12031 format %{ "loadUS $dst,$mem @ loadUS_convI2L" %}
12032 ins_encode(load_C_enc(dst, mem));
12033 ins_pipe( ialu_loadI );
12034 %}
12036 // Store Char (16bit unsigned)
12037 instruct storeC(memory mem, mRegI src) %{
12038 match(Set mem (StoreC mem src));
12040 ins_cost(125);
12041 format %{ "storeC $src, $mem @ storeC" %}
12042 ins_encode(store_C_reg_enc(mem, src));
12043 ins_pipe( ialu_loadI );
12044 %}
12046 instruct storeC0(memory mem, immI0 zero) %{
12047 match(Set mem (StoreC mem zero));
12049 ins_cost(125);
12050 format %{ "storeC $zero, $mem @ storeC0" %}
12051 ins_encode(store_C0_enc(mem));
12052 ins_pipe( ialu_loadI );
12053 %}
12056 instruct loadConF0(regF dst, immF0 zero) %{
12057 match(Set dst zero);
12058 ins_cost(100);
12060 format %{ "mov $dst, zero @ loadConF0\n"%}
12061 ins_encode %{
12062 FloatRegister dst = $dst$$FloatRegister;
12064 __ mtc1(R0, dst);
12065 %}
12066 ins_pipe( fpu_loadF );
12067 %}
12070 instruct loadConF(regF dst, immF src) %{
12071 match(Set dst src);
12072 ins_cost(125);
12074 format %{ "lwc1 $dst, $constantoffset[$constanttablebase] # load FLOAT $src from table @ loadConF" %}
12075 ins_encode %{
12076 int con_offset = $constantoffset($src);
12078 if (Assembler::is_simm16(con_offset)) {
12079 __ lwc1($dst$$FloatRegister, $constanttablebase, con_offset);
12080 } else {
12081 __ set64(AT, con_offset);
12082 if (UseLoongsonISA) {
12083 __ gslwxc1($dst$$FloatRegister, $constanttablebase, AT, 0);
12084 } else {
12085 __ daddu(AT, $constanttablebase, AT);
12086 __ lwc1($dst$$FloatRegister, AT, 0);
12087 }
12088 }
12089 %}
12090 ins_pipe( fpu_loadF );
12091 %}
12094 instruct loadConD0(regD dst, immD0 zero) %{
12095 match(Set dst zero);
12096 ins_cost(100);
12098 format %{ "mov $dst, zero @ loadConD0"%}
12099 ins_encode %{
12100 FloatRegister dst = as_FloatRegister($dst$$reg);
12102 __ dmtc1(R0, dst);
12103 %}
12104 ins_pipe( fpu_loadF );
12105 %}
12107 instruct loadConD(regD dst, immD src) %{
12108 match(Set dst src);
12109 ins_cost(125);
12111 format %{ "ldc1 $dst, $constantoffset[$constanttablebase] # load DOUBLE $src from table @ loadConD" %}
12112 ins_encode %{
12113 int con_offset = $constantoffset($src);
12115 if (Assembler::is_simm16(con_offset)) {
12116 __ ldc1($dst$$FloatRegister, $constanttablebase, con_offset);
12117 } else {
12118 __ set64(AT, con_offset);
12119 if (UseLoongsonISA) {
12120 __ gsldxc1($dst$$FloatRegister, $constanttablebase, AT, 0);
12121 } else {
12122 __ daddu(AT, $constanttablebase, AT);
12123 __ ldc1($dst$$FloatRegister, AT, 0);
12124 }
12125 }
12126 %}
12127 ins_pipe( fpu_loadF );
12128 %}
12130 // Store register Float value (it is faster than store from FPU register)
12131 instruct storeF_reg( memory mem, regF src) %{
12132 match(Set mem (StoreF mem src));
12134 ins_cost(50);
12135 format %{ "store $mem, $src\t# store float @ storeF_reg" %}
12136 ins_encode(store_F_reg_enc(mem, src));
12137 ins_pipe( fpu_storeF );
12138 %}
12140 instruct storeF_imm0( memory mem, immF0 zero) %{
12141 match(Set mem (StoreF mem zero));
12143 ins_cost(40);
12144 format %{ "store $mem, zero\t# store float @ storeF_imm0" %}
12145 ins_encode %{
12146 int base = $mem$$base;
12147 int index = $mem$$index;
12148 int scale = $mem$$scale;
12149 int disp = $mem$$disp;
12151 if( index != 0 ) {
12152 if(scale != 0) {
12153 __ dsll(T9, as_Register(index), scale);
12154 __ addu(AT, as_Register(base), T9);
12155 } else {
12156 __ daddu(AT, as_Register(base), as_Register(index));
12157 }
12158 if( Assembler::is_simm16(disp) ) {
12159 __ sw(R0, AT, disp);
12160 } else {
12161 __ move(T9, disp);
12162 __ addu(AT, AT, T9);
12163 __ sw(R0, AT, 0);
12164 }
12166 } else {
12167 if( Assembler::is_simm16(disp) ) {
12168 __ sw(R0, as_Register(base), disp);
12169 } else {
12170 __ move(T9, disp);
12171 __ addu(AT, as_Register(base), T9);
12172 __ sw(R0, AT, 0);
12173 }
12174 }
12175 %}
12176 ins_pipe( ialu_storeI );
12177 %}
12179 // Load Double
12180 instruct loadD(regD dst, memory mem) %{
12181 match(Set dst (LoadD mem));
12183 ins_cost(150);
12184 format %{ "loadD $dst, $mem #@loadD" %}
12185 ins_encode(load_D_enc(dst, mem));
12186 ins_pipe( ialu_loadI );
12187 %}
12189 // Load Double - UNaligned
12190 instruct loadD_unaligned(regD dst, memory mem ) %{
12191 match(Set dst (LoadD_unaligned mem));
12192 ins_cost(250);
12193 // FIXME: Jin: Need more effective ldl/ldr
12194 format %{ "loadD_unaligned $dst, $mem #@loadD_unaligned" %}
12195 ins_encode(load_D_enc(dst, mem));
12196 ins_pipe( ialu_loadI );
12197 %}
12199 instruct storeD_reg( memory mem, regD src) %{
12200 match(Set mem (StoreD mem src));
12202 ins_cost(50);
12203 format %{ "store $mem, $src\t# store float @ storeD_reg" %}
12204 ins_encode(store_D_reg_enc(mem, src));
12205 ins_pipe( fpu_storeF );
12206 %}
12208 instruct storeD_imm0( memory mem, immD0 zero) %{
12209 match(Set mem (StoreD mem zero));
12211 ins_cost(40);
12212 format %{ "store $mem, zero\t# store float @ storeD_imm0" %}
12213 ins_encode %{
12214 int base = $mem$$base;
12215 int index = $mem$$index;
12216 int scale = $mem$$scale;
12217 int disp = $mem$$disp;
12219 __ mtc1(R0, F30);
12220 __ cvt_d_w(F30, F30);
12222 if( index != 0 ) {
12223 if(scale != 0) {
12224 __ dsll(T9, as_Register(index), scale);
12225 __ addu(AT, as_Register(base), T9);
12226 } else {
12227 __ daddu(AT, as_Register(base), as_Register(index));
12228 }
12229 if( Assembler::is_simm16(disp) ) {
12230 __ sdc1(F30, AT, disp);
12231 } else {
12232 __ move(T9, disp);
12233 __ addu(AT, AT, T9);
12234 __ sdc1(F30, AT, 0);
12235 }
12237 } else {
12238 if( Assembler::is_simm16(disp) ) {
12239 __ sdc1(F30, as_Register(base), disp);
12240 } else {
12241 __ move(T9, disp);
12242 __ addu(AT, as_Register(base), T9);
12243 __ sdc1(F30, AT, 0);
12244 }
12245 }
12246 %}
12247 ins_pipe( ialu_storeI );
12248 %}
12250 instruct loadSSI(mRegI dst, stackSlotI src)
12251 %{
12252 match(Set dst src);
12254 ins_cost(125);
12255 format %{ "lw $dst, $src\t# int stk @ loadSSI" %}
12256 ins_encode %{
12257 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSI) !");
12258 __ lw($dst$$Register, SP, $src$$disp);
12259 %}
12260 ins_pipe(ialu_loadI);
12261 %}
12263 instruct storeSSI(stackSlotI dst, mRegI src)
12264 %{
12265 match(Set dst src);
12267 ins_cost(100);
12268 format %{ "sw $dst, $src\t# int stk @ storeSSI" %}
12269 ins_encode %{
12270 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSI) !");
12271 __ sw($src$$Register, SP, $dst$$disp);
12272 %}
12273 ins_pipe(ialu_storeI);
12274 %}
12276 instruct loadSSL(mRegL dst, stackSlotL src)
12277 %{
12278 match(Set dst src);
12280 ins_cost(125);
12281 format %{ "ld $dst, $src\t# long stk @ loadSSL" %}
12282 ins_encode %{
12283 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSL) !");
12284 __ ld($dst$$Register, SP, $src$$disp);
12285 %}
12286 ins_pipe(ialu_loadI);
12287 %}
12289 instruct storeSSL(stackSlotL dst, mRegL src)
12290 %{
12291 match(Set dst src);
12293 ins_cost(100);
12294 format %{ "sd $dst, $src\t# long stk @ storeSSL" %}
12295 ins_encode %{
12296 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSL) !");
12297 __ sd($src$$Register, SP, $dst$$disp);
12298 %}
12299 ins_pipe(ialu_storeI);
12300 %}
12302 instruct loadSSP(mRegP dst, stackSlotP src)
12303 %{
12304 match(Set dst src);
12306 ins_cost(125);
12307 format %{ "ld $dst, $src\t# ptr stk @ loadSSP" %}
12308 ins_encode %{
12309 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSP) !");
12310 __ ld($dst$$Register, SP, $src$$disp);
12311 %}
12312 ins_pipe(ialu_loadI);
12313 %}
12315 instruct storeSSP(stackSlotP dst, mRegP src)
12316 %{
12317 match(Set dst src);
12319 ins_cost(100);
12320 format %{ "sd $dst, $src\t# ptr stk @ storeSSP" %}
12321 ins_encode %{
12322 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSP) !");
12323 __ sd($src$$Register, SP, $dst$$disp);
12324 %}
12325 ins_pipe(ialu_storeI);
12326 %}
12328 instruct loadSSF(regF dst, stackSlotF src)
12329 %{
12330 match(Set dst src);
12332 ins_cost(125);
12333 format %{ "lwc1 $dst, $src\t# float stk @ loadSSF" %}
12334 ins_encode %{
12335 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSF) !");
12336 __ lwc1($dst$$FloatRegister, SP, $src$$disp);
12337 %}
12338 ins_pipe(ialu_loadI);
12339 %}
12341 instruct storeSSF(stackSlotF dst, regF src)
12342 %{
12343 match(Set dst src);
12345 ins_cost(100);
12346 format %{ "swc1 $dst, $src\t# float stk @ storeSSF" %}
12347 ins_encode %{
12348 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSF) !");
12349 __ swc1($src$$FloatRegister, SP, $dst$$disp);
12350 %}
12351 ins_pipe(fpu_storeF);
12352 %}
12354 // Use the same format since predicate() can not be used here.
12355 instruct loadSSD(regD dst, stackSlotD src)
12356 %{
12357 match(Set dst src);
12359 ins_cost(125);
12360 format %{ "ldc1 $dst, $src\t# double stk @ loadSSD" %}
12361 ins_encode %{
12362 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSD) !");
12363 __ ldc1($dst$$FloatRegister, SP, $src$$disp);
12364 %}
12365 ins_pipe(ialu_loadI);
12366 %}
12368 instruct storeSSD(stackSlotD dst, regD src)
12369 %{
12370 match(Set dst src);
12372 ins_cost(100);
12373 format %{ "sdc1 $dst, $src\t# double stk @ storeSSD" %}
12374 ins_encode %{
12375 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSD) !");
12376 __ sdc1($src$$FloatRegister, SP, $dst$$disp);
12377 %}
12378 ins_pipe(fpu_storeF);
12379 %}
12381 instruct cmpFastLock( FlagsReg cr, mRegP object, s0_RegP box, mRegI tmp, mRegP scr) %{
12382 match( Set cr (FastLock object box) );
12383 effect( TEMP tmp, TEMP scr, USE_KILL box );
12384 ins_cost(300);
12385 format %{ "FASTLOCK $cr $object, $box, $tmp #@ cmpFastLock" %}
12386 ins_encode %{
12387 __ fast_lock($object$$Register, $box$$Register, $tmp$$Register, $scr$$Register);
12388 %}
12390 ins_pipe( pipe_slow );
12391 ins_pc_relative(1);
12392 %}
12394 instruct cmpFastUnlock( FlagsReg cr, mRegP object, s0_RegP box, mRegP tmp ) %{
12395 match( Set cr (FastUnlock object box) );
12396 effect( TEMP tmp, USE_KILL box );
12397 ins_cost(300);
12398 format %{ "FASTUNLOCK $object, $box, $tmp #@cmpFastUnlock" %}
12399 ins_encode %{
12400 __ fast_unlock($object$$Register, $box$$Register, $tmp$$Register);
12401 %}
12403 ins_pipe( pipe_slow );
12404 ins_pc_relative(1);
12405 %}
12407 // Store CMS card-mark Immediate
12408 instruct storeImmCM(memory mem, immI8 src) %{
12409 match(Set mem (StoreCM mem src));
12411 ins_cost(150);
12412 format %{ "MOV8 $mem,$src\t! CMS card-mark imm0" %}
12413 // opcode(0xC6);
12414 ins_encode(store_B_immI_enc_sync(mem, src));
12415 ins_pipe( ialu_storeI );
12416 %}
12418 // Die now
12419 instruct ShouldNotReachHere( )
12420 %{
12421 match(Halt);
12422 ins_cost(300);
12424 // Use the following format syntax
12425 format %{ "ILLTRAP ;#@ShouldNotReachHere" %}
12426 ins_encode %{
12427 // Here we should emit illtrap !
12429 __ stop("in ShoudNotReachHere");
12431 %}
12432 ins_pipe( pipe_jump );
12433 %}
12435 instruct leaP8Narrow(mRegP dst, indOffset8Narrow mem)
12436 %{
12437 predicate(Universe::narrow_oop_shift() == 0);
12438 match(Set dst mem);
12440 ins_cost(110);
12441 format %{ "leaq $dst, $mem\t# ptr off8narrow @ leaP8Narrow" %}
12442 ins_encode %{
12443 Register dst = $dst$$Register;
12444 Register base = as_Register($mem$$base);
12445 int disp = $mem$$disp;
12447 __ daddiu(dst, base, disp);
12448 %}
12449 ins_pipe( ialu_regI_imm16 );
12450 %}
12452 instruct leaPPosIdxScaleOff8(mRegP dst, basePosIndexScaleOffset8 mem)
12453 %{
12454 match(Set dst mem);
12456 ins_cost(110);
12457 format %{ "leaq $dst, $mem\t# @ PosIdxScaleOff8" %}
12458 ins_encode %{
12459 Register dst = $dst$$Register;
12460 Register base = as_Register($mem$$base);
12461 Register index = as_Register($mem$$index);
12462 int scale = $mem$$scale;
12463 int disp = $mem$$disp;
12465 if (scale == 0) {
12466 __ daddu(AT, base, index);
12467 __ daddiu(dst, AT, disp);
12468 } else {
12469 __ dsll(AT, index, scale);
12470 __ daddu(AT, base, AT);
12471 __ daddiu(dst, AT, disp);
12472 }
12473 %}
12475 ins_pipe( ialu_regI_imm16 );
12476 %}
12478 instruct leaPIdxScale(mRegP dst, indIndexScale mem)
12479 %{
12480 match(Set dst mem);
12482 ins_cost(110);
12483 format %{ "leaq $dst, $mem\t# @ leaPIdxScale" %}
12484 ins_encode %{
12485 Register dst = $dst$$Register;
12486 Register base = as_Register($mem$$base);
12487 Register index = as_Register($mem$$index);
12488 int scale = $mem$$scale;
12490 if (scale == 0) {
12491 __ daddu(dst, base, index);
12492 } else {
12493 __ dsll(AT, index, scale);
12494 __ daddu(dst, base, AT);
12495 }
12496 %}
12498 ins_pipe( ialu_regI_imm16 );
12499 %}
12501 // Jump Direct Conditional - Label defines a relative address from Jcc+1
12502 instruct jmpLoopEnd(cmpOp cop, mRegI src1, mRegI src2, label labl) %{
12503 match(CountedLoopEnd cop (CmpI src1 src2));
12504 effect(USE labl);
12506 ins_cost(300);
12507 format %{ "J$cop $src1, $src2, $labl\t# Loop end @ jmpLoopEnd" %}
12508 ins_encode %{
12509 Register op1 = $src1$$Register;
12510 Register op2 = $src2$$Register;
12511 Label &L = *($labl$$label);
12512 int flag = $cop$$cmpcode;
12514 switch(flag)
12515 {
12516 case 0x01: //equal
12517 if (&L)
12518 __ beq(op1, op2, L);
12519 else
12520 __ beq(op1, op2, (int)0);
12521 break;
12522 case 0x02: //not_equal
12523 if (&L)
12524 __ bne(op1, op2, L);
12525 else
12526 __ bne(op1, op2, (int)0);
12527 break;
12528 case 0x03: //above
12529 __ slt(AT, op2, op1);
12530 if(&L)
12531 __ bne(AT, R0, L);
12532 else
12533 __ bne(AT, R0, (int)0);
12534 break;
12535 case 0x04: //above_equal
12536 __ slt(AT, op1, op2);
12537 if(&L)
12538 __ beq(AT, R0, L);
12539 else
12540 __ beq(AT, R0, (int)0);
12541 break;
12542 case 0x05: //below
12543 __ slt(AT, op1, op2);
12544 if(&L)
12545 __ bne(AT, R0, L);
12546 else
12547 __ bne(AT, R0, (int)0);
12548 break;
12549 case 0x06: //below_equal
12550 __ slt(AT, op2, op1);
12551 if(&L)
12552 __ beq(AT, R0, L);
12553 else
12554 __ beq(AT, R0, (int)0);
12555 break;
12556 default:
12557 Unimplemented();
12558 }
12559 __ nop();
12560 %}
12561 ins_pipe( pipe_jump );
12562 ins_pc_relative(1);
12563 %}
12566 instruct jmpLoopEnd_reg_imm16_sub(cmpOp cop, mRegI src1, immI16_sub src2, label labl) %{
12567 match(CountedLoopEnd cop (CmpI src1 src2));
12568 effect(USE labl);
12570 ins_cost(250);
12571 format %{ "J$cop $src1, $src2, $labl\t# Loop end @ jmpLoopEnd_reg_imm16_sub" %}
12572 ins_encode %{
12573 Register op1 = $src1$$Register;
12574 int op2 = $src2$$constant;
12575 Label &L = *($labl$$label);
12576 int flag = $cop$$cmpcode;
12578 __ addiu32(AT, op1, -1 * op2);
12580 switch(flag)
12581 {
12582 case 0x01: //equal
12583 if (&L)
12584 __ beq(AT, R0, L);
12585 else
12586 __ beq(AT, R0, (int)0);
12587 break;
12588 case 0x02: //not_equal
12589 if (&L)
12590 __ bne(AT, R0, L);
12591 else
12592 __ bne(AT, R0, (int)0);
12593 break;
12594 case 0x03: //above
12595 if(&L)
12596 __ bgtz(AT, L);
12597 else
12598 __ bgtz(AT, (int)0);
12599 break;
12600 case 0x04: //above_equal
12601 if(&L)
12602 __ bgez(AT, L);
12603 else
12604 __ bgez(AT,(int)0);
12605 break;
12606 case 0x05: //below
12607 if(&L)
12608 __ bltz(AT, L);
12609 else
12610 __ bltz(AT, (int)0);
12611 break;
12612 case 0x06: //below_equal
12613 if(&L)
12614 __ blez(AT, L);
12615 else
12616 __ blez(AT, (int)0);
12617 break;
12618 default:
12619 Unimplemented();
12620 }
12621 __ nop();
12622 %}
12623 ins_pipe( pipe_jump );
12624 ins_pc_relative(1);
12625 %}
12628 /*
12629 // Jump Direct Conditional - Label defines a relative address from Jcc+1
12630 instruct jmpLoopEndU(cmpOpU cop, eFlagsRegU cmp, label labl) %{
12631 match(CountedLoopEnd cop cmp);
12632 effect(USE labl);
12634 ins_cost(300);
12635 format %{ "J$cop,u $labl\t# Loop end" %}
12636 size(6);
12637 opcode(0x0F, 0x80);
12638 ins_encode( Jcc( cop, labl) );
12639 ins_pipe( pipe_jump );
12640 ins_pc_relative(1);
12641 %}
12643 instruct jmpLoopEndUCF(cmpOpUCF cop, eFlagsRegUCF cmp, label labl) %{
12644 match(CountedLoopEnd cop cmp);
12645 effect(USE labl);
12647 ins_cost(200);
12648 format %{ "J$cop,u $labl\t# Loop end" %}
12649 opcode(0x0F, 0x80);
12650 ins_encode( Jcc( cop, labl) );
12651 ins_pipe( pipe_jump );
12652 ins_pc_relative(1);
12653 %}
12654 */
12656 // This match pattern is created for StoreIConditional since I cannot match IfNode without a RegFlags! fujie 2012/07/17
12657 instruct jmpCon_flags(cmpOp cop, FlagsReg cr, label labl) %{
12658 match(If cop cr);
12659 effect(USE labl);
12661 ins_cost(300);
12662 format %{ "J$cop $labl #mips uses AT as eflag @jmpCon_flags" %}
12664 ins_encode %{
12665 Label &L = *($labl$$label);
12666 switch($cop$$cmpcode)
12667 {
12668 case 0x01: //equal
12669 if (&L)
12670 __ bne(AT, R0, L);
12671 else
12672 __ bne(AT, R0, (int)0);
12673 break;
12674 case 0x02: //not equal
12675 if (&L)
12676 __ beq(AT, R0, L);
12677 else
12678 __ beq(AT, R0, (int)0);
12679 break;
12680 default:
12681 Unimplemented();
12682 }
12683 __ nop();
12684 %}
12686 ins_pipe( pipe_jump );
12687 ins_pc_relative(1);
12688 %}
12691 // ============================================================================
12692 // The 2nd slow-half of a subtype check. Scan the subklass's 2ndary superklass
12693 // array for an instance of the superklass. Set a hidden internal cache on a
12694 // hit (cache is checked with exposed code in gen_subtype_check()). Return
12695 // NZ for a miss or zero for a hit. The encoding ALSO sets flags.
12696 instruct partialSubtypeCheck( mRegP result, no_T8_mRegP sub, no_T8_mRegP super, mT8RegI tmp ) %{
12697 match(Set result (PartialSubtypeCheck sub super));
12698 effect(KILL tmp);
12699 ins_cost(1100); // slightly larger than the next version
12700 format %{ "partialSubtypeCheck result=$result, sub=$sub, super=$super, tmp=$tmp " %}
12702 ins_encode( enc_PartialSubtypeCheck(result, sub, super, tmp) );
12703 ins_pipe( pipe_slow );
12704 %}
12707 // Conditional-store of an int value.
12708 // ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG on Intel.
12709 instruct storeIConditional( memory mem, mRegI oldval, mRegI newval, FlagsReg cr ) %{
12710 match(Set cr (StoreIConditional mem (Binary oldval newval)));
12711 // effect(KILL oldval);
12712 format %{ "CMPXCHG $newval, $mem, $oldval \t# @storeIConditional" %}
12714 ins_encode %{
12715 Register oldval = $oldval$$Register;
12716 Register newval = $newval$$Register;
12717 Address addr(as_Register($mem$$base), $mem$$disp);
12718 Label again, failure;
12720 // int base = $mem$$base;
12721 int index = $mem$$index;
12722 int scale = $mem$$scale;
12723 int disp = $mem$$disp;
12725 guarantee(Assembler::is_simm16(disp), "");
12727 if( index != 0 ) {
12728 __ stop("in storeIConditional: index != 0");
12729 } else {
12730 __ bind(again);
12731 if(UseSyncLevel <= 1000) __ sync();
12732 __ ll(AT, addr);
12733 __ bne(AT, oldval, failure);
12734 __ delayed()->addu(AT, R0, R0);
12736 __ addu(AT, newval, R0);
12737 __ sc(AT, addr);
12738 __ beq(AT, R0, again);
12739 __ delayed()->addiu(AT, R0, 0xFF);
12740 __ bind(failure);
12741 __ sync();
12742 }
12743 %}
12745 ins_pipe( long_memory_op );
12746 %}
12748 // Conditional-store of a long value.
12749 // ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG.
12750 instruct storeLConditional(memory mem, t2RegL oldval, mRegL newval, FlagsReg cr )
12751 %{
12752 match(Set cr (StoreLConditional mem (Binary oldval newval)));
12753 effect(KILL oldval);
12755 format %{ "cmpxchg $mem, $newval\t# If $oldval == $mem then store $newval into $mem" %}
12756 ins_encode%{
12757 Register oldval = $oldval$$Register;
12758 Register newval = $newval$$Register;
12759 Address addr((Register)$mem$$base, $mem$$disp);
12761 int index = $mem$$index;
12762 int scale = $mem$$scale;
12763 int disp = $mem$$disp;
12765 guarantee(Assembler::is_simm16(disp), "");
12767 if( index != 0 ) {
12768 __ stop("in storeIConditional: index != 0");
12769 } else {
12770 __ cmpxchg(newval, addr, oldval);
12771 }
12772 %}
12773 ins_pipe( long_memory_op );
12774 %}
12777 instruct compareAndSwapI( mRegI res, mRegP mem_ptr, mS2RegI oldval, mRegI newval) %{
12778 match(Set res (CompareAndSwapI mem_ptr (Binary oldval newval)));
12779 effect(KILL oldval);
12780 // match(CompareAndSwapI mem_ptr (Binary oldval newval));
12781 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapI\n\t"
12782 "MOV $res, 1 @ compareAndSwapI\n\t"
12783 "BNE AT, R0 @ compareAndSwapI\n\t"
12784 "MOV $res, 0 @ compareAndSwapI\n"
12785 "L:" %}
12786 ins_encode %{
12787 Register newval = $newval$$Register;
12788 Register oldval = $oldval$$Register;
12789 Register res = $res$$Register;
12790 Address addr($mem_ptr$$Register, 0);
12791 Label L;
12793 __ cmpxchg32(newval, addr, oldval);
12794 __ move(res, AT);
12795 %}
12796 ins_pipe( long_memory_op );
12797 %}
12799 //FIXME:
12800 instruct compareAndSwapP( mRegI res, mRegP mem_ptr, s2_RegP oldval, mRegP newval) %{
12801 match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval)));
12802 effect(KILL oldval);
12803 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapP\n\t"
12804 "MOV $res, AT @ compareAndSwapP\n\t"
12805 "L:" %}
12806 ins_encode %{
12807 Register newval = $newval$$Register;
12808 Register oldval = $oldval$$Register;
12809 Register res = $res$$Register;
12810 Address addr($mem_ptr$$Register, 0);
12811 Label L;
12813 __ cmpxchg(newval, addr, oldval);
12814 __ move(res, AT);
12815 %}
12816 ins_pipe( long_memory_op );
12817 %}
12819 instruct compareAndSwapN( mRegI res, mRegP mem_ptr, t2_RegN oldval, mRegN newval) %{
12820 match(Set res (CompareAndSwapN mem_ptr (Binary oldval newval)));
12821 effect(KILL oldval);
12822 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapN\n\t"
12823 "MOV $res, AT @ compareAndSwapN\n\t"
12824 "L:" %}
12825 ins_encode %{
12826 Register newval = $newval$$Register;
12827 Register oldval = $oldval$$Register;
12828 Register res = $res$$Register;
12829 Address addr($mem_ptr$$Register, 0);
12830 Label L;
12832 /* 2013/7/19 Jin: cmpxchg32 is implemented with ll/sc, which will do sign extension.
12833 * Thus, we should extend oldval's sign for correct comparision.
12834 */
12835 __ sll(oldval, oldval, 0);
12837 __ cmpxchg32(newval, addr, oldval);
12838 __ move(res, AT);
12839 %}
12840 ins_pipe( long_memory_op );
12841 %}
12843 //----------Max and Min--------------------------------------------------------
12844 // Min Instructions
12845 ////
12846 // *** Min and Max using the conditional move are slower than the
12847 // *** branch version on a Pentium III.
12848 // // Conditional move for min
12849 //instruct cmovI_reg_lt( eRegI op2, eRegI op1, eFlagsReg cr ) %{
12850 // effect( USE_DEF op2, USE op1, USE cr );
12851 // format %{ "CMOVlt $op2,$op1\t! min" %}
12852 // opcode(0x4C,0x0F);
12853 // ins_encode( OpcS, OpcP, RegReg( op2, op1 ) );
12854 // ins_pipe( pipe_cmov_reg );
12855 //%}
12856 //
12857 //// Min Register with Register (P6 version)
12858 //instruct minI_eReg_p6( eRegI op1, eRegI op2 ) %{
12859 // predicate(VM_Version::supports_cmov() );
12860 // match(Set op2 (MinI op1 op2));
12861 // ins_cost(200);
12862 // expand %{
12863 // eFlagsReg cr;
12864 // compI_eReg(cr,op1,op2);
12865 // cmovI_reg_lt(op2,op1,cr);
12866 // %}
12867 //%}
12869 // Min Register with Register (generic version)
12870 instruct minI_Reg_Reg(mRegI dst, mRegI src) %{
12871 match(Set dst (MinI dst src));
12872 //effect(KILL flags);
12873 ins_cost(80);
12875 format %{ "MIN $dst, $src @minI_Reg_Reg" %}
12876 ins_encode %{
12877 Register dst = $dst$$Register;
12878 Register src = $src$$Register;
12880 __ slt(AT, src, dst);
12881 __ movn(dst, src, AT);
12883 %}
12885 ins_pipe( pipe_slow );
12886 %}
12888 // Max Register with Register
12889 // *** Min and Max using the conditional move are slower than the
12890 // *** branch version on a Pentium III.
12891 // // Conditional move for max
12892 //instruct cmovI_reg_gt( eRegI op2, eRegI op1, eFlagsReg cr ) %{
12893 // effect( USE_DEF op2, USE op1, USE cr );
12894 // format %{ "CMOVgt $op2,$op1\t! max" %}
12895 // opcode(0x4F,0x0F);
12896 // ins_encode( OpcS, OpcP, RegReg( op2, op1 ) );
12897 // ins_pipe( pipe_cmov_reg );
12898 //%}
12899 //
12900 // // Max Register with Register (P6 version)
12901 //instruct maxI_eReg_p6( eRegI op1, eRegI op2 ) %{
12902 // predicate(VM_Version::supports_cmov() );
12903 // match(Set op2 (MaxI op1 op2));
12904 // ins_cost(200);
12905 // expand %{
12906 // eFlagsReg cr;
12907 // compI_eReg(cr,op1,op2);
12908 // cmovI_reg_gt(op2,op1,cr);
12909 // %}
12910 //%}
12912 // Max Register with Register (generic version)
12913 instruct maxI_Reg_Reg(mRegI dst, mRegI src) %{
12914 match(Set dst (MaxI dst src));
12915 ins_cost(80);
12917 format %{ "MAX $dst, $src @maxI_Reg_Reg" %}
12919 ins_encode %{
12920 Register dst = $dst$$Register;
12921 Register src = $src$$Register;
12923 __ slt(AT, dst, src);
12924 __ movn(dst, src, AT);
12926 %}
12928 ins_pipe( pipe_slow );
12929 %}
12931 instruct maxI_Reg_zero(mRegI dst, immI0 zero) %{
12932 match(Set dst (MaxI dst zero));
12933 ins_cost(50);
12935 format %{ "MAX $dst, 0 @maxI_Reg_zero" %}
12937 ins_encode %{
12938 Register dst = $dst$$Register;
12940 __ slt(AT, dst, R0);
12941 __ movn(dst, R0, AT);
12943 %}
12945 ins_pipe( pipe_slow );
12946 %}
12948 instruct zerox_long_reg_reg(mRegL dst, mRegL src, immL_32bits mask)
12949 %{
12950 match(Set dst (AndL src mask));
12952 format %{ "movl $dst, $src\t# zero-extend long @ zerox_long_reg_reg" %}
12953 ins_encode %{
12954 Register dst = $dst$$Register;
12955 Register src = $src$$Register;
12957 __ dext(dst, src, 0, 32);
12958 %}
12959 ins_pipe(ialu_regI_regI);
12960 %}
12962 instruct combine_i2l(mRegL dst, mRegI src1, immL_32bits mask, mRegI src2, immI_32 shift32)
12963 %{
12964 match(Set dst (OrL (AndL (ConvI2L src1) mask) (LShiftL (ConvI2L src2) shift32)));
12966 format %{ "combine_i2l $dst, $src2(H), $src1(L) @ combine_i2l" %}
12967 ins_encode %{
12968 Register dst = $dst$$Register;
12969 Register src1 = $src1$$Register;
12970 Register src2 = $src2$$Register;
12972 if (src1 == dst) {
12973 __ dinsu(dst, src2, 32, 32);
12974 } else if (src2 == dst) {
12975 __ dsll32(dst, dst, 0);
12976 __ dins(dst, src1, 0, 32);
12977 } else {
12978 __ dext(dst, src1, 0, 32);
12979 __ dinsu(dst, src2, 32, 32);
12980 }
12981 %}
12982 ins_pipe(ialu_regI_regI);
12983 %}
12985 // Zero-extend convert int to long
12986 instruct convI2L_reg_reg_zex(mRegL dst, mRegI src, immL_32bits mask)
12987 %{
12988 match(Set dst (AndL (ConvI2L src) mask));
12990 format %{ "movl $dst, $src\t# i2l zero-extend @ convI2L_reg_reg_zex" %}
12991 ins_encode %{
12992 Register dst = $dst$$Register;
12993 Register src = $src$$Register;
12995 __ dext(dst, src, 0, 32);
12996 %}
12997 ins_pipe(ialu_regI_regI);
12998 %}
13000 instruct convL2I2L_reg_reg_zex(mRegL dst, mRegL src, immL_32bits mask)
13001 %{
13002 match(Set dst (AndL (ConvI2L (ConvL2I src)) mask));
13004 format %{ "movl $dst, $src\t# i2l zero-extend @ convL2I2L_reg_reg_zex" %}
13005 ins_encode %{
13006 Register dst = $dst$$Register;
13007 Register src = $src$$Register;
13009 __ dext(dst, src, 0, 32);
13010 %}
13011 ins_pipe(ialu_regI_regI);
13012 %}
13014 // Match loading integer and casting it to unsigned int in long register.
13015 // LoadI + ConvI2L + AndL 0xffffffff.
13016 instruct loadUI2L_rmask(mRegL dst, memory mem, immL_32bits mask) %{
13017 match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
13019 format %{ "lwu $dst, $mem \t// zero-extend to long @ loadUI2L_rmask" %}
13020 ins_encode (load_N_enc(dst, mem));
13021 ins_pipe(ialu_loadI);
13022 %}
13024 instruct loadUI2L_lmask(mRegL dst, memory mem, immL_32bits mask) %{
13025 match(Set dst (AndL mask (ConvI2L (LoadI mem))));
13027 format %{ "lwu $dst, $mem \t// zero-extend to long @ loadUI2L_lmask" %}
13028 ins_encode (load_N_enc(dst, mem));
13029 ins_pipe(ialu_loadI);
13030 %}
13033 // ============================================================================
13034 // Safepoint Instruction
13035 instruct safePoint_poll(mRegP poll) %{
13036 match(SafePoint poll);
13037 effect(USE poll);
13039 ins_cost(125);
13040 format %{ "Safepoint @ [$poll] : poll for GC @ safePoint_poll" %}
13042 ins_encode %{
13043 Register poll_reg = $poll$$Register;
13045 __ block_comment("Safepoint:");
13046 __ relocate(relocInfo::poll_type);
13047 __ lw(AT, poll_reg, 0);
13048 %}
13050 ins_pipe( ialu_storeI );
13051 %}
13053 //----------Arithmetic Conversion Instructions---------------------------------
13055 instruct roundFloat_nop(regF dst)
13056 %{
13057 match(Set dst (RoundFloat dst));
13059 ins_cost(0);
13060 ins_encode();
13061 ins_pipe(empty);
13062 %}
13064 instruct roundDouble_nop(regD dst)
13065 %{
13066 match(Set dst (RoundDouble dst));
13068 ins_cost(0);
13069 ins_encode();
13070 ins_pipe(empty);
13071 %}
13073 //---------- Zeros Count Instructions ------------------------------------------
13074 // CountLeadingZerosINode CountTrailingZerosINode
13075 instruct countLeadingZerosI(mRegI dst, mRegI src) %{
13076 predicate(UseCountLeadingZerosInstruction);
13077 match(Set dst (CountLeadingZerosI src));
13079 format %{ "clz $dst, $src\t# count leading zeros (int)" %}
13080 ins_encode %{
13081 __ clz($dst$$Register, $src$$Register);
13082 %}
13083 ins_pipe( ialu_regL_regL );
13084 %}
13086 instruct countLeadingZerosL(mRegI dst, mRegL src) %{
13087 predicate(UseCountLeadingZerosInstruction);
13088 match(Set dst (CountLeadingZerosL src));
13090 format %{ "dclz $dst, $src\t# count leading zeros (long)" %}
13091 ins_encode %{
13092 __ dclz($dst$$Register, $src$$Register);
13093 %}
13094 ins_pipe( ialu_regL_regL );
13095 %}
13097 instruct countTrailingZerosI(mRegI dst, mRegI src) %{
13098 predicate(UseCountTrailingZerosInstruction);
13099 match(Set dst (CountTrailingZerosI src));
13101 format %{ "ctz $dst, $src\t# count trailing zeros (int)" %}
13102 ins_encode %{
13103 // ctz and dctz is gs instructions.
13104 __ ctz($dst$$Register, $src$$Register);
13105 %}
13106 ins_pipe( ialu_regL_regL );
13107 %}
13109 instruct countTrailingZerosL(mRegI dst, mRegL src) %{
13110 predicate(UseCountTrailingZerosInstruction);
13111 match(Set dst (CountTrailingZerosL src));
13113 format %{ "dcto $dst, $src\t# count trailing zeros (long)" %}
13114 ins_encode %{
13115 __ dctz($dst$$Register, $src$$Register);
13116 %}
13117 ins_pipe( ialu_regL_regL );
13118 %}
13120 // ====================VECTOR INSTRUCTIONS=====================================
13122 // Load vectors (8 bytes long)
13123 instruct loadV8(vecD dst, memory mem) %{
13124 predicate(n->as_LoadVector()->memory_size() == 8);
13125 match(Set dst (LoadVector mem));
13126 ins_cost(125);
13127 format %{ "load $dst, $mem\t! load vector (8 bytes)" %}
13128 ins_encode(load_D_enc(dst, mem));
13129 ins_pipe( fpu_loadF );
13130 %}
13132 // Store vectors (8 bytes long)
13133 instruct storeV8(memory mem, vecD src) %{
13134 predicate(n->as_StoreVector()->memory_size() == 8);
13135 match(Set mem (StoreVector mem src));
13136 ins_cost(145);
13137 format %{ "store $mem, $src\t! store vector (8 bytes)" %}
13138 ins_encode(store_D_reg_enc(mem, src));
13139 ins_pipe( fpu_storeF );
13140 %}
13142 instruct Repl8B(vecD dst, mRegI src) %{
13143 predicate(n->as_Vector()->length() == 8);
13144 match(Set dst (ReplicateB src));
13145 format %{ "replv_ob AT, $src\n\t"
13146 "dmtc1 AT, $dst\t! replicate8B" %}
13147 ins_encode %{
13148 __ replv_ob(AT, $src$$Register);
13149 __ dmtc1(AT, $dst$$FloatRegister);
13150 %}
13151 ins_pipe( pipe_mtc1 );
13152 %}
13154 instruct Repl8B_imm(vecD dst, immI con) %{
13155 predicate(n->as_Vector()->length() == 8);
13156 match(Set dst (ReplicateB con));
13157 format %{ "repl_ob AT, [$con]\n\t"
13158 "dmtc1 AT, $dst,0x00\t! replicate8B($con)" %}
13159 ins_encode %{
13160 int val = $con$$constant;
13161 __ repl_ob(AT, val);
13162 __ dmtc1(AT, $dst$$FloatRegister);
13163 %}
13164 ins_pipe( pipe_mtc1 );
13165 %}
13167 instruct Repl8B_zero(vecD dst, immI0 zero) %{
13168 predicate(n->as_Vector()->length() == 8);
13169 match(Set dst (ReplicateB zero));
13170 format %{ "dmtc1 R0, $dst\t! replicate8B zero" %}
13171 ins_encode %{
13172 __ dmtc1(R0, $dst$$FloatRegister);
13173 %}
13174 ins_pipe( pipe_mtc1 );
13175 %}
13177 instruct Repl8B_M1(vecD dst, immI_M1 M1) %{
13178 predicate(n->as_Vector()->length() == 8);
13179 match(Set dst (ReplicateB M1));
13180 format %{ "dmtc1 -1, $dst\t! replicate8B -1" %}
13181 ins_encode %{
13182 __ nor(AT, R0, R0);
13183 __ dmtc1(AT, $dst$$FloatRegister);
13184 %}
13185 ins_pipe( pipe_mtc1 );
13186 %}
13188 instruct Repl4S(vecD dst, mRegI src) %{
13189 predicate(n->as_Vector()->length() == 4);
13190 match(Set dst (ReplicateS src));
13191 format %{ "replv_qh AT, $src\n\t"
13192 "dmtc1 AT, $dst\t! replicate4S" %}
13193 ins_encode %{
13194 __ replv_qh(AT, $src$$Register);
13195 __ dmtc1(AT, $dst$$FloatRegister);
13196 %}
13197 ins_pipe( pipe_mtc1 );
13198 %}
13200 instruct Repl4S_imm(vecD dst, immI con) %{
13201 predicate(n->as_Vector()->length() == 4);
13202 match(Set dst (ReplicateS con));
13203 format %{ "replv_qh AT, [$con]\n\t"
13204 "dmtc1 AT, $dst\t! replicate4S($con)" %}
13205 ins_encode %{
13206 int val = $con$$constant;
13207 if ( Assembler::is_simm(val, 10)) {
13208 //repl_qh supports 10 bits immediate
13209 __ repl_qh(AT, val);
13210 } else {
13211 __ li32(AT, val);
13212 __ replv_qh(AT, AT);
13213 }
13214 __ dmtc1(AT, $dst$$FloatRegister);
13215 %}
13216 ins_pipe( pipe_mtc1 );
13217 %}
13219 instruct Repl4S_zero(vecD dst, immI0 zero) %{
13220 predicate(n->as_Vector()->length() == 4);
13221 match(Set dst (ReplicateS zero));
13222 format %{ "dmtc1 R0, $dst\t! replicate4S zero" %}
13223 ins_encode %{
13224 __ dmtc1(R0, $dst$$FloatRegister);
13225 %}
13226 ins_pipe( pipe_mtc1 );
13227 %}
13229 instruct Repl4S_M1(vecD dst, immI_M1 M1) %{
13230 predicate(n->as_Vector()->length() == 4);
13231 match(Set dst (ReplicateS M1));
13232 format %{ "dmtc1 -1, $dst\t! replicate4S -1" %}
13233 ins_encode %{
13234 __ nor(AT, R0, R0);
13235 __ dmtc1(AT, $dst$$FloatRegister);
13236 %}
13237 ins_pipe( pipe_mtc1 );
13238 %}
13240 // Replicate integer (4 byte) scalar to be vector
13241 instruct Repl2I(vecD dst, mRegI src) %{
13242 predicate(n->as_Vector()->length() == 2);
13243 match(Set dst (ReplicateI src));
13244 format %{ "dins AT, $src, 0, 32\n\t"
13245 "dinsu AT, $src, 32, 32\n\t"
13246 "dmtc1 AT, $dst\t! replicate2I" %}
13247 ins_encode %{
13248 __ dins(AT, $src$$Register, 0, 32);
13249 __ dinsu(AT, $src$$Register, 32, 32);
13250 __ dmtc1(AT, $dst$$FloatRegister);
13251 %}
13252 ins_pipe( pipe_mtc1 );
13253 %}
13255 // Replicate integer (4 byte) scalar immediate to be vector by loading from const table.
13256 instruct Repl2I_imm(vecD dst, immI con, mA7RegI tmp) %{
13257 predicate(n->as_Vector()->length() == 2);
13258 match(Set dst (ReplicateI con));
13259 effect(KILL tmp);
13260 format %{ "li32 AT, [$con], 32\n\t"
13261 "replv_pw AT, AT\n\t"
13262 "dmtc1 AT, $dst\t! replicate2I($con)" %}
13263 ins_encode %{
13264 int val = $con$$constant;
13265 __ li32(AT, val);
13266 __ replv_pw(AT, AT);
13267 __ dmtc1(AT, $dst$$FloatRegister);
13268 %}
13269 ins_pipe( pipe_mtc1 );
13270 %}
13272 // Replicate integer (4 byte) scalar zero to be vector
13273 instruct Repl2I_zero(vecD dst, immI0 zero) %{
13274 predicate(n->as_Vector()->length() == 2);
13275 match(Set dst (ReplicateI zero));
13276 format %{ "dmtc1 R0, $dst\t! replicate2I zero" %}
13277 ins_encode %{
13278 __ dmtc1(R0, $dst$$FloatRegister);
13279 %}
13280 ins_pipe( pipe_mtc1 );
13281 %}
13283 // Replicate integer (4 byte) scalar -1 to be vector
13284 instruct Repl2I_M1(vecD dst, immI_M1 M1) %{
13285 predicate(n->as_Vector()->length() == 2);
13286 match(Set dst (ReplicateI M1));
13287 format %{ "dmtc1 -1, $dst\t! replicate2I -1, use AT" %}
13288 ins_encode %{
13289 __ nor(AT, R0, R0);
13290 __ dmtc1(AT, $dst$$FloatRegister);
13291 %}
13292 ins_pipe( pipe_mtc1 );
13293 %}
13295 // Replicate float (4 byte) scalar to be vector
13296 instruct Repl2F(vecD dst, regF src) %{
13297 predicate(n->as_Vector()->length() == 2);
13298 match(Set dst (ReplicateF src));
13299 format %{ "cvt.ps $dst, $src, $src\t! replicate2F" %}
13300 ins_encode %{
13301 __ cvt_ps_s($dst$$FloatRegister, $src$$FloatRegister, $src$$FloatRegister);
13302 %}
13303 ins_pipe( pipe_slow );
13304 %}
13306 // Replicate float (4 byte) scalar zero to be vector
13307 instruct Repl2F_zero(vecD dst, immF0 zero) %{
13308 predicate(n->as_Vector()->length() == 2);
13309 match(Set dst (ReplicateF zero));
13310 format %{ "dmtc1 R0, $dst\t! replicate2F zero" %}
13311 ins_encode %{
13312 __ dmtc1(R0, $dst$$FloatRegister);
13313 %}
13314 ins_pipe( pipe_mtc1 );
13315 %}
13318 // ====================VECTOR ARITHMETIC=======================================
13320 // --------------------------------- ADD --------------------------------------
13322 // Floats vector add
13323 instruct vadd2F(vecD dst, vecD src) %{
13324 predicate(n->as_Vector()->length() == 2);
13325 match(Set dst (AddVF dst src));
13326 format %{ "add.ps $dst,$src\t! add packed2F" %}
13327 ins_encode %{
13328 __ add_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
13329 %}
13330 ins_pipe( pipe_slow );
13331 %}
13333 instruct vadd2F3(vecD dst, vecD src1, vecD src2) %{
13334 predicate(n->as_Vector()->length() == 2);
13335 match(Set dst (AddVF src1 src2));
13336 format %{ "add.ps $dst,$src1,$src2\t! add packed2F" %}
13337 ins_encode %{
13338 __ add_ps($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
13339 %}
13340 ins_pipe( fpu_regF_regF );
13341 %}
13343 // --------------------------------- SUB --------------------------------------
13345 // Floats vector sub
13346 instruct vsub2F(vecD dst, vecD src) %{
13347 predicate(n->as_Vector()->length() == 2);
13348 match(Set dst (SubVF dst src));
13349 format %{ "sub.ps $dst,$src\t! sub packed2F" %}
13350 ins_encode %{
13351 __ sub_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
13352 %}
13353 ins_pipe( fpu_regF_regF );
13354 %}
13356 // --------------------------------- MUL --------------------------------------
13358 // Floats vector mul
13359 instruct vmul2F(vecD dst, vecD src) %{
13360 predicate(n->as_Vector()->length() == 2);
13361 match(Set dst (MulVF dst src));
13362 format %{ "mul.ps $dst, $src\t! mul packed2F" %}
13363 ins_encode %{
13364 __ mul_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
13365 %}
13366 ins_pipe( fpu_regF_regF );
13367 %}
13369 instruct vmul2F3(vecD dst, vecD src1, vecD src2) %{
13370 predicate(n->as_Vector()->length() == 2);
13371 match(Set dst (MulVF src1 src2));
13372 format %{ "mul.ps $dst, $src1, $src2\t! mul packed2F" %}
13373 ins_encode %{
13374 __ mul_ps($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
13375 %}
13376 ins_pipe( fpu_regF_regF );
13377 %}
13379 // --------------------------------- DIV --------------------------------------
13380 // MIPS do not have div.ps
13383 //----------PEEPHOLE RULES-----------------------------------------------------
13384 // These must follow all instruction definitions as they use the names
13385 // defined in the instructions definitions.
13386 //
13387 // peepmatch ( root_instr_name [preceeding_instruction]* );
13388 //
13389 // peepconstraint %{
13390 // (instruction_number.operand_name relational_op instruction_number.operand_name
13391 // [, ...] );
13392 // // instruction numbers are zero-based using left to right order in peepmatch
13393 //
13394 // peepreplace ( instr_name ( [instruction_number.operand_name]* ) );
13395 // // provide an instruction_number.operand_name for each operand that appears
13396 // // in the replacement instruction's match rule
13397 //
13398 // ---------VM FLAGS---------------------------------------------------------
13399 //
13400 // All peephole optimizations can be turned off using -XX:-OptoPeephole
13401 //
13402 // Each peephole rule is given an identifying number starting with zero and
13403 // increasing by one in the order seen by the parser. An individual peephole
13404 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
13405 // on the command-line.
13406 //
13407 // ---------CURRENT LIMITATIONS----------------------------------------------
13408 //
13409 // Only match adjacent instructions in same basic block
13410 // Only equality constraints
13411 // Only constraints between operands, not (0.dest_reg == EAX_enc)
13412 // Only one replacement instruction
13413 //
13414 // ---------EXAMPLE----------------------------------------------------------
13415 //
13416 // // pertinent parts of existing instructions in architecture description
13417 // instruct movI(eRegI dst, eRegI src) %{
13418 // match(Set dst (CopyI src));
13419 // %}
13420 //
13421 // instruct incI_eReg(eRegI dst, immI1 src, eFlagsReg cr) %{
13422 // match(Set dst (AddI dst src));
13423 // effect(KILL cr);
13424 // %}
13425 //
13426 // // Change (inc mov) to lea
13427 // peephole %{
13428 // // increment preceeded by register-register move
13429 // peepmatch ( incI_eReg movI );
13430 // // require that the destination register of the increment
13431 // // match the destination register of the move
13432 // peepconstraint ( 0.dst == 1.dst );
13433 // // construct a replacement instruction that sets
13434 // // the destination to ( move's source register + one )
13435 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13436 // %}
13437 //
13438 // Implementation no longer uses movX instructions since
13439 // machine-independent system no longer uses CopyX nodes.
13440 //
13441 // peephole %{
13442 // peepmatch ( incI_eReg movI );
13443 // peepconstraint ( 0.dst == 1.dst );
13444 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13445 // %}
13446 //
13447 // peephole %{
13448 // peepmatch ( decI_eReg movI );
13449 // peepconstraint ( 0.dst == 1.dst );
13450 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13451 // %}
13452 //
13453 // peephole %{
13454 // peepmatch ( addI_eReg_imm movI );
13455 // peepconstraint ( 0.dst == 1.dst );
13456 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13457 // %}
13458 //
13459 // peephole %{
13460 // peepmatch ( addP_eReg_imm movP );
13461 // peepconstraint ( 0.dst == 1.dst );
13462 // peepreplace ( leaP_eReg_immI( 0.dst 1.src 0.src ) );
13463 // %}
13465 // // Change load of spilled value to only a spill
13466 // instruct storeI(memory mem, eRegI src) %{
13467 // match(Set mem (StoreI mem src));
13468 // %}
13469 //
13470 // instruct loadI(eRegI dst, memory mem) %{
13471 // match(Set dst (LoadI mem));
13472 // %}
13473 //
13474 //peephole %{
13475 // peepmatch ( loadI storeI );
13476 // peepconstraint ( 1.src == 0.dst, 1.mem == 0.mem );
13477 // peepreplace ( storeI( 1.mem 1.mem 1.src ) );
13478 //%}
13480 //----------SMARTSPILL RULES---------------------------------------------------
13481 // These must follow all instruction definitions as they use the names
13482 // defined in the instructions definitions.