Fri, 14 Apr 2017 17:18:26 +0800
#5321 added missing breaks, which were removed mistakenly in 516:812d6f6a9b6c.
1 //
2 // Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
3 // Copyright (c) 2015, 2016, Loongson Technology. All rights reserved.
4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 //
6 // This code is free software; you can redistribute it and/or modify it
7 // under the terms of the GNU General Public License version 2 only, as
8 // published by the Free Software Foundation.
9 //
10 // This code is distributed in the hope that it will be useful, but WITHOUT
11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 // version 2 for more details (a copy is included in the LICENSE file that
14 // accompanied this code).
15 //
16 // You should have received a copy of the GNU General Public License version
17 // 2 along with this work; if not, write to the Free Software Foundation,
18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 //
20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 // or visit www.oracle.com if you need additional information or have any
22 // questions.
23 //
24 //
26 // GodSon3 Architecture Description File
28 //----------REGISTER DEFINITION BLOCK------------------------------------------
29 // This information is used by the matcher and the register allocator to
30 // describe individual registers and classes of registers within the target
31 // archtecture.
33 // format:
34 // reg_def name (call convention, c-call convention, ideal type, encoding);
35 // call convention :
36 // NS = No-Save
37 // SOC = Save-On-Call
38 // SOE = Save-On-Entry
39 // AS = Always-Save
40 // ideal type :
41 // see opto/opcodes.hpp for more info
42 // reg_class name (reg, ...);
43 // alloc_class name (reg, ...);
44 register %{
46 // General Registers
47 // Integer Registers
48 reg_def R0 ( NS, NS, Op_RegI, 0, VMRegImpl::Bad());
49 reg_def AT ( NS, NS, Op_RegI, 1, AT->as_VMReg());
50 reg_def AT_H ( NS, NS, Op_RegI, 1, AT->as_VMReg()->next());
51 reg_def V0 (SOC, SOC, Op_RegI, 2, V0->as_VMReg());
52 reg_def V0_H (SOC, SOC, Op_RegI, 2, V0->as_VMReg()->next());
53 reg_def V1 (SOC, SOC, Op_RegI, 3, V1->as_VMReg());
54 reg_def V1_H (SOC, SOC, Op_RegI, 3, V1->as_VMReg()->next());
55 reg_def A0 (SOC, SOC, Op_RegI, 4, A0->as_VMReg());
56 reg_def A0_H (SOC, SOC, Op_RegI, 4, A0->as_VMReg()->next());
57 reg_def A1 (SOC, SOC, Op_RegI, 5, A1->as_VMReg());
58 reg_def A1_H (SOC, SOC, Op_RegI, 5, A1->as_VMReg()->next());
59 reg_def A2 (SOC, SOC, Op_RegI, 6, A2->as_VMReg());
60 reg_def A2_H (SOC, SOC, Op_RegI, 6, A2->as_VMReg()->next());
61 reg_def A3 (SOC, SOC, Op_RegI, 7, A3->as_VMReg());
62 reg_def A3_H (SOC, SOC, Op_RegI, 7, A3->as_VMReg()->next());
63 reg_def A4 (SOC, SOC, Op_RegI, 8, A4->as_VMReg());
64 reg_def A4_H (SOC, SOC, Op_RegI, 8, A4->as_VMReg()->next());
65 reg_def A5 (SOC, SOC, Op_RegI, 9, A5->as_VMReg());
66 reg_def A5_H (SOC, SOC, Op_RegI, 9, A5->as_VMReg()->next());
67 reg_def A6 (SOC, SOC, Op_RegI, 10, A6->as_VMReg());
68 reg_def A6_H (SOC, SOC, Op_RegI, 10, A6->as_VMReg()->next());
69 reg_def A7 (SOC, SOC, Op_RegI, 11, A7->as_VMReg());
70 reg_def A7_H (SOC, SOC, Op_RegI, 11, A7->as_VMReg()->next());
71 reg_def T0 (SOC, SOC, Op_RegI, 12, T0->as_VMReg());
72 reg_def T0_H (SOC, SOC, Op_RegI, 12, T0->as_VMReg()->next());
73 reg_def T1 (SOC, SOC, Op_RegI, 13, T1->as_VMReg());
74 reg_def T1_H (SOC, SOC, Op_RegI, 13, T1->as_VMReg()->next());
75 reg_def T2 (SOC, SOC, Op_RegI, 14, T2->as_VMReg());
76 reg_def T2_H (SOC, SOC, Op_RegI, 14, T2->as_VMReg()->next());
77 reg_def T3 (SOC, SOC, Op_RegI, 15, T3->as_VMReg());
78 reg_def T3_H (SOC, SOC, Op_RegI, 15, T3->as_VMReg()->next());
79 reg_def S0 (SOC, SOE, Op_RegI, 16, S0->as_VMReg());
80 reg_def S0_H (SOC, SOE, Op_RegI, 16, S0->as_VMReg()->next());
81 reg_def S1 (SOC, SOE, Op_RegI, 17, S1->as_VMReg());
82 reg_def S1_H (SOC, SOE, Op_RegI, 17, S1->as_VMReg()->next());
83 reg_def S2 (SOC, SOE, Op_RegI, 18, S2->as_VMReg());
84 reg_def S2_H (SOC, SOE, Op_RegI, 18, S2->as_VMReg()->next());
85 reg_def S3 (SOC, SOE, Op_RegI, 19, S3->as_VMReg());
86 reg_def S3_H (SOC, SOE, Op_RegI, 19, S3->as_VMReg()->next());
87 reg_def S4 (SOC, SOE, Op_RegI, 20, S4->as_VMReg());
88 reg_def S4_H (SOC, SOE, Op_RegI, 20, S4->as_VMReg()->next());
89 reg_def S5 (SOC, SOE, Op_RegI, 21, S5->as_VMReg());
90 reg_def S5_H (SOC, SOE, Op_RegI, 21, S5->as_VMReg()->next());
91 reg_def S6 (SOC, SOE, Op_RegI, 22, S6->as_VMReg());
92 reg_def S6_H (SOC, SOE, Op_RegI, 22, S6->as_VMReg()->next());
93 reg_def S7 (SOC, SOE, Op_RegI, 23, S7->as_VMReg());
94 reg_def S7_H (SOC, SOE, Op_RegI, 23, S7->as_VMReg()->next());
95 reg_def T8 (SOC, SOC, Op_RegI, 24, T8->as_VMReg());
96 reg_def T8_H (SOC, SOC, Op_RegI, 24, T8->as_VMReg()->next());
97 reg_def T9 (SOC, SOC, Op_RegI, 25, T9->as_VMReg());
98 reg_def T9_H (SOC, SOC, Op_RegI, 25, T9->as_VMReg()->next());
100 // Special Registers
101 reg_def K0 ( NS, NS, Op_RegI, 26, K0->as_VMReg());
102 reg_def K1 ( NS, NS, Op_RegI, 27, K1->as_VMReg());
103 reg_def GP ( NS, NS, Op_RegI, 28, GP->as_VMReg());
104 reg_def GP_H ( NS, NS, Op_RegI, 28, GP->as_VMReg()->next());
105 reg_def SP ( NS, NS, Op_RegI, 29, SP->as_VMReg());
106 reg_def SP_H ( NS, NS, Op_RegI, 29, SP->as_VMReg()->next());
107 reg_def FP ( NS, NS, Op_RegI, 30, FP->as_VMReg());
108 reg_def FP_H ( NS, NS, Op_RegI, 30, FP->as_VMReg()->next());
109 reg_def RA ( NS, NS, Op_RegI, 31, RA->as_VMReg());
110 reg_def RA_H ( NS, NS, Op_RegI, 31, RA->as_VMReg()->next());
112 // Floating registers.
113 reg_def F0 ( SOC, SOC, Op_RegF, 0, F0->as_VMReg());
114 reg_def F0_H ( SOC, SOC, Op_RegF, 0, F0->as_VMReg()->next());
115 reg_def F1 ( SOC, SOC, Op_RegF, 1, F1->as_VMReg());
116 reg_def F1_H ( SOC, SOC, Op_RegF, 1, F1->as_VMReg()->next());
117 reg_def F2 ( SOC, SOC, Op_RegF, 2, F2->as_VMReg());
118 reg_def F2_H ( SOC, SOC, Op_RegF, 2, F2->as_VMReg()->next());
119 reg_def F3 ( SOC, SOC, Op_RegF, 3, F3->as_VMReg());
120 reg_def F3_H ( SOC, SOC, Op_RegF, 3, F3->as_VMReg()->next());
121 reg_def F4 ( SOC, SOC, Op_RegF, 4, F4->as_VMReg());
122 reg_def F4_H ( SOC, SOC, Op_RegF, 4, F4->as_VMReg()->next());
123 reg_def F5 ( SOC, SOC, Op_RegF, 5, F5->as_VMReg());
124 reg_def F5_H ( SOC, SOC, Op_RegF, 5, F5->as_VMReg()->next());
125 reg_def F6 ( SOC, SOC, Op_RegF, 6, F6->as_VMReg());
126 reg_def F6_H ( SOC, SOC, Op_RegF, 6, F6->as_VMReg()->next());
127 reg_def F7 ( SOC, SOC, Op_RegF, 7, F7->as_VMReg());
128 reg_def F7_H ( SOC, SOC, Op_RegF, 7, F7->as_VMReg()->next());
129 reg_def F8 ( SOC, SOC, Op_RegF, 8, F8->as_VMReg());
130 reg_def F8_H ( SOC, SOC, Op_RegF, 8, F8->as_VMReg()->next());
131 reg_def F9 ( SOC, SOC, Op_RegF, 9, F9->as_VMReg());
132 reg_def F9_H ( SOC, SOC, Op_RegF, 9, F9->as_VMReg()->next());
133 reg_def F10 ( SOC, SOC, Op_RegF, 10, F10->as_VMReg());
134 reg_def F10_H ( SOC, SOC, Op_RegF, 10, F10->as_VMReg()->next());
135 reg_def F11 ( SOC, SOC, Op_RegF, 11, F11->as_VMReg());
136 reg_def F11_H ( SOC, SOC, Op_RegF, 11, F11->as_VMReg()->next());
137 reg_def F12 ( SOC, SOC, Op_RegF, 12, F12->as_VMReg());
138 reg_def F12_H ( SOC, SOC, Op_RegF, 12, F12->as_VMReg()->next());
139 reg_def F13 ( SOC, SOC, Op_RegF, 13, F13->as_VMReg());
140 reg_def F13_H ( SOC, SOC, Op_RegF, 13, F13->as_VMReg()->next());
141 reg_def F14 ( SOC, SOC, Op_RegF, 14, F14->as_VMReg());
142 reg_def F14_H ( SOC, SOC, Op_RegF, 14, F14->as_VMReg()->next());
143 reg_def F15 ( SOC, SOC, Op_RegF, 15, F15->as_VMReg());
144 reg_def F15_H ( SOC, SOC, Op_RegF, 15, F15->as_VMReg()->next());
145 reg_def F16 ( SOC, SOC, Op_RegF, 16, F16->as_VMReg());
146 reg_def F16_H ( SOC, SOC, Op_RegF, 16, F16->as_VMReg()->next());
147 reg_def F17 ( SOC, SOC, Op_RegF, 17, F17->as_VMReg());
148 reg_def F17_H ( SOC, SOC, Op_RegF, 17, F17->as_VMReg()->next());
149 reg_def F18 ( SOC, SOC, Op_RegF, 18, F18->as_VMReg());
150 reg_def F18_H ( SOC, SOC, Op_RegF, 18, F18->as_VMReg()->next());
151 reg_def F19 ( SOC, SOC, Op_RegF, 19, F19->as_VMReg());
152 reg_def F19_H ( SOC, SOC, Op_RegF, 19, F19->as_VMReg()->next());
153 reg_def F20 ( SOC, SOC, Op_RegF, 20, F20->as_VMReg());
154 reg_def F20_H ( SOC, SOC, Op_RegF, 20, F20->as_VMReg()->next());
155 reg_def F21 ( SOC, SOC, Op_RegF, 21, F21->as_VMReg());
156 reg_def F21_H ( SOC, SOC, Op_RegF, 21, F21->as_VMReg()->next());
157 reg_def F22 ( SOC, SOC, Op_RegF, 22, F22->as_VMReg());
158 reg_def F22_H ( SOC, SOC, Op_RegF, 22, F22->as_VMReg()->next());
159 reg_def F23 ( SOC, SOC, Op_RegF, 23, F23->as_VMReg());
160 reg_def F23_H ( SOC, SOC, Op_RegF, 23, F23->as_VMReg()->next());
161 reg_def F24 ( SOC, SOC, Op_RegF, 24, F24->as_VMReg());
162 reg_def F24_H ( SOC, SOC, Op_RegF, 24, F24->as_VMReg()->next());
163 reg_def F25 ( SOC, SOC, Op_RegF, 25, F25->as_VMReg());
164 reg_def F25_H ( SOC, SOC, Op_RegF, 25, F25->as_VMReg()->next());
165 reg_def F26 ( SOC, SOC, Op_RegF, 26, F26->as_VMReg());
166 reg_def F26_H ( SOC, SOC, Op_RegF, 26, F26->as_VMReg()->next());
167 reg_def F27 ( SOC, SOC, Op_RegF, 27, F27->as_VMReg());
168 reg_def F27_H ( SOC, SOC, Op_RegF, 27, F27->as_VMReg()->next());
169 reg_def F28 ( SOC, SOC, Op_RegF, 28, F28->as_VMReg());
170 reg_def F28_H ( SOC, SOC, Op_RegF, 28, F28->as_VMReg()->next());
171 reg_def F29 ( SOC, SOC, Op_RegF, 29, F29->as_VMReg());
172 reg_def F29_H ( SOC, SOC, Op_RegF, 29, F29->as_VMReg()->next());
173 reg_def F30 ( SOC, SOC, Op_RegF, 30, F30->as_VMReg());
174 reg_def F30_H ( SOC, SOC, Op_RegF, 30, F30->as_VMReg()->next());
175 reg_def F31 ( SOC, SOC, Op_RegF, 31, F31->as_VMReg());
176 reg_def F31_H ( SOC, SOC, Op_RegF, 31, F31->as_VMReg()->next());
179 // ----------------------------
180 // Special Registers
181 // Condition Codes Flag Registers
182 reg_def MIPS_FLAG (SOC, SOC, Op_RegFlags, 1, as_Register(1)->as_VMReg());
183 //S6 is used for get_thread(S6)
184 //S5 is uesd for heapbase of compressed oop
185 alloc_class chunk0(
186 S7, S7_H,
187 S0, S0_H,
188 S1, S1_H,
189 S2, S2_H,
190 S4, S4_H,
191 S5, S5_H,
192 S6, S6_H,
193 S3, S3_H,
194 T2, T2_H,
195 T3, T3_H,
196 T8, T8_H,
197 T9, T9_H,
198 T1, T1_H, // inline_cache_reg
199 V1, V1_H,
200 A7, A7_H,
201 A6, A6_H,
202 A5, A5_H,
203 A4, A4_H,
204 V0, V0_H,
205 A3, A3_H,
206 A2, A2_H,
207 A1, A1_H,
208 A0, A0_H,
209 T0, T0_H,
210 GP, GP_H
211 RA, RA_H,
212 SP, SP_H, // stack_pointer
213 FP, FP_H // frame_pointer
214 );
216 alloc_class chunk1( F0, F0_H,
217 F1, F1_H,
218 F2, F2_H,
219 F3, F3_H,
220 F4, F4_H,
221 F5, F5_H,
222 F6, F6_H,
223 F7, F7_H,
224 F8, F8_H,
225 F9, F9_H,
226 F10, F10_H,
227 F11, F11_H,
228 F20, F20_H,
229 F21, F21_H,
230 F22, F22_H,
231 F23, F23_H,
232 F24, F24_H,
233 F25, F25_H,
234 F26, F26_H,
235 F27, F27_H,
236 F28, F28_H,
237 F19, F19_H,
238 F18, F18_H,
239 F17, F17_H,
240 F16, F16_H,
241 F15, F15_H,
242 F14, F14_H,
243 F13, F13_H,
244 F12, F12_H,
245 F29, F29_H,
246 F30, F30_H,
247 F31, F31_H);
249 alloc_class chunk2(MIPS_FLAG);
251 reg_class s_reg( S0, S1, S2, S3, S4, S5, S6, S7 );
252 reg_class s0_reg( S0 );
253 reg_class s1_reg( S1 );
254 reg_class s2_reg( S2 );
255 reg_class s3_reg( S3 );
256 reg_class s4_reg( S4 );
257 reg_class s5_reg( S5 );
258 reg_class s6_reg( S6 );
259 reg_class s7_reg( S7 );
261 reg_class t_reg( T0, T1, T2, T3, T8, T9 );
262 reg_class t0_reg( T0 );
263 reg_class t1_reg( T1 );
264 reg_class t2_reg( T2 );
265 reg_class t3_reg( T3 );
266 reg_class t8_reg( T8 );
267 reg_class t9_reg( T9 );
269 reg_class a_reg( A0, A1, A2, A3, A4, A5, A6, A7 );
270 reg_class a0_reg( A0 );
271 reg_class a1_reg( A1 );
272 reg_class a2_reg( A2 );
273 reg_class a3_reg( A3 );
274 reg_class a4_reg( A4 );
275 reg_class a5_reg( A5 );
276 reg_class a6_reg( A6 );
277 reg_class a7_reg( A7 );
279 reg_class v0_reg( V0 );
280 reg_class v1_reg( V1 );
282 reg_class sp_reg( SP, SP_H );
283 reg_class fp_reg( FP, FP_H );
285 reg_class mips_flags(MIPS_FLAG);
287 reg_class v0_long_reg( V0, V0_H );
288 reg_class v1_long_reg( V1, V1_H );
289 reg_class a0_long_reg( A0, A0_H );
290 reg_class a1_long_reg( A1, A1_H );
291 reg_class a2_long_reg( A2, A2_H );
292 reg_class a3_long_reg( A3, A3_H );
293 reg_class a4_long_reg( A4, A4_H );
294 reg_class a5_long_reg( A5, A5_H );
295 reg_class a6_long_reg( A6, A6_H );
296 reg_class a7_long_reg( A7, A7_H );
297 reg_class t0_long_reg( T0, T0_H );
298 reg_class t1_long_reg( T1, T1_H );
299 reg_class t2_long_reg( T2, T2_H );
300 reg_class t3_long_reg( T3, T3_H );
301 reg_class t8_long_reg( T8, T8_H );
302 reg_class t9_long_reg( T9, T9_H );
303 reg_class s0_long_reg( S0, S0_H );
304 reg_class s1_long_reg( S1, S1_H );
305 reg_class s2_long_reg( S2, S2_H );
306 reg_class s3_long_reg( S3, S3_H );
307 reg_class s4_long_reg( S4, S4_H );
308 reg_class s5_long_reg( S5, S5_H );
309 reg_class s6_long_reg( S6, S6_H );
310 reg_class s7_long_reg( S7, S7_H );
312 reg_class int_reg( S7, S0, S1, S2, S4, S3, T8, T2, T3, T1, V1, A7, A6, A5, A4, V0, A3, A2, A1, A0, T0 );
314 reg_class no_Ax_int_reg( S7, S0, S1, S2, S4, S3, T8, T2, T3, T1, V1, V0, T0 );
316 reg_class p_reg(
317 S7, S7_H,
318 S0, S0_H,
319 S1, S1_H,
320 S2, S2_H,
321 S4, S4_H,
322 S3, S3_H,
323 T8, T8_H,
324 T2, T2_H,
325 T3, T3_H,
326 T1, T1_H,
327 A7, A7_H,
328 A6, A6_H,
329 A5, A5_H,
330 A4, A4_H,
331 A3, A3_H,
332 A2, A2_H,
333 A1, A1_H,
334 A0, A0_H,
335 T0, T0_H
336 );
338 reg_class no_T8_p_reg(
339 S7, S7_H,
340 S0, S0_H,
341 S1, S1_H,
342 S2, S2_H,
343 S4, S4_H,
344 S3, S3_H,
345 T2, T2_H,
346 T3, T3_H,
347 T1, T1_H,
348 A7, A7_H,
349 A6, A6_H,
350 A5, A5_H,
351 A4, A4_H,
352 A3, A3_H,
353 A2, A2_H,
354 A1, A1_H,
355 A0, A0_H,
356 T0, T0_H
357 );
359 reg_class long_reg(
360 S7, S7_H,
361 S0, S0_H,
362 S1, S1_H,
363 S2, S2_H,
364 S4, S4_H,
365 S3, S3_H,
366 T8, T8_H,
367 T2, T2_H,
368 T3, T3_H,
369 T1, T1_H,
370 A7, A7_H,
371 A6, A6_H,
372 A5, A5_H,
373 A4, A4_H,
374 A3, A3_H,
375 A2, A2_H,
376 A1, A1_H,
377 A0, A0_H,
378 T0, T0_H
379 );
382 // Floating point registers.
383 // 2012/8/23 Fu: F30/F31 are used as temporary registers in D2I
384 // 2016/12/1 aoqi: F31 are not used as temporary registers in D2I
385 reg_class flt_reg( F0, F1, F2, F3, F4, F5, F6, F7, F8, F9, F10, F11, F12, F13, F14, F15, F16, F17 F18, F19, F20, F21, F22, F23, F24, F25, F26, F27, F28, F29, F31);
386 reg_class dbl_reg( F0, F0_H,
387 F1, F1_H,
388 F2, F2_H,
389 F3, F3_H,
390 F4, F4_H,
391 F5, F5_H,
392 F6, F6_H,
393 F7, F7_H,
394 F8, F8_H,
395 F9, F9_H,
396 F10, F10_H,
397 F11, F11_H,
398 F12, F12_H,
399 F13, F13_H,
400 F14, F14_H,
401 F15, F15_H,
402 F16, F16_H,
403 F17, F17_H,
404 F18, F18_H,
405 F19, F19_H,
406 F20, F20_H,
407 F21, F21_H,
408 F22, F22_H,
409 F23, F23_H,
410 F24, F24_H,
411 F25, F25_H,
412 F26, F26_H,
413 F27, F27_H,
414 F28, F28_H,
415 F29, F29_H,
416 F31, F31_H);
418 reg_class flt_arg0( F12 );
419 reg_class dbl_arg0( F12, F12_H );
420 reg_class dbl_arg1( F14, F14_H );
422 %}
424 //----------DEFINITION BLOCK---------------------------------------------------
425 // Define name --> value mappings to inform the ADLC of an integer valued name
426 // Current support includes integer values in the range [0, 0x7FFFFFFF]
427 // Format:
428 // int_def <name> ( <int_value>, <expression>);
429 // Generated Code in ad_<arch>.hpp
430 // #define <name> (<expression>)
431 // // value == <int_value>
432 // Generated code in ad_<arch>.cpp adlc_verification()
433 // assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
434 //
435 definitions %{
436 int_def DEFAULT_COST ( 100, 100);
437 int_def HUGE_COST (1000000, 1000000);
439 // Memory refs are twice as expensive as run-of-the-mill.
440 int_def MEMORY_REF_COST ( 200, DEFAULT_COST * 2);
442 // Branches are even more expensive.
443 int_def BRANCH_COST ( 300, DEFAULT_COST * 3);
444 // we use jr instruction to construct call, so more expensive
445 // by yjl 2/28/2006
446 int_def CALL_COST ( 500, DEFAULT_COST * 5);
447 /*
448 int_def EQUAL ( 1, 1 );
449 int_def NOT_EQUAL ( 2, 2 );
450 int_def GREATER ( 3, 3 );
451 int_def GREATER_EQUAL ( 4, 4 );
452 int_def LESS ( 5, 5 );
453 int_def LESS_EQUAL ( 6, 6 );
454 */
455 %}
459 //----------SOURCE BLOCK-------------------------------------------------------
460 // This is a block of C++ code which provides values, functions, and
461 // definitions necessary in the rest of the architecture description
463 source_hpp %{
464 // Header information of the source block.
465 // Method declarations/definitions which are used outside
466 // the ad-scope can conveniently be defined here.
467 //
468 // To keep related declarations/definitions/uses close together,
469 // we switch between source %{ }% and source_hpp %{ }% freely as needed.
471 class CallStubImpl {
473 //--------------------------------------------------------------
474 //---< Used for optimization in Compile::shorten_branches >---
475 //--------------------------------------------------------------
477 public:
478 // Size of call trampoline stub.
479 static uint size_call_trampoline() {
480 return 0; // no call trampolines on this platform
481 }
483 // number of relocations needed by a call trampoline stub
484 static uint reloc_call_trampoline() {
485 return 0; // no call trampolines on this platform
486 }
487 };
489 class HandlerImpl {
491 public:
493 static int emit_exception_handler(CodeBuffer &cbuf);
494 static int emit_deopt_handler(CodeBuffer& cbuf);
496 static uint size_exception_handler() {
497 // NativeCall instruction size is the same as NativeJump.
498 // exception handler starts out as jump and can be patched to
499 // a call be deoptimization. (4932387)
500 // Note that this value is also credited (in output.cpp) to
501 // the size of the code section.
502 // return NativeJump::instruction_size;
503 int size = NativeCall::instruction_size;
504 return round_to(size, 16);
505 }
507 #ifdef _LP64
508 static uint size_deopt_handler() {
509 int size = NativeCall::instruction_size;
510 return round_to(size, 16);
511 }
512 #else
513 static uint size_deopt_handler() {
514 // NativeCall instruction size is the same as NativeJump.
515 // exception handler starts out as jump and can be patched to
516 // a call be deoptimization. (4932387)
517 // Note that this value is also credited (in output.cpp) to
518 // the size of the code section.
519 return 5 + NativeJump::instruction_size; // pushl(); jmp;
520 }
521 #endif
522 };
524 %} // end source_hpp
526 source %{
528 #define NO_INDEX 0
529 #define RELOC_IMM64 Assembler::imm_operand
530 #define RELOC_DISP32 Assembler::disp32_operand
533 #define __ _masm.
536 // Emit exception handler code.
537 // Stuff framesize into a register and call a VM stub routine.
538 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf) {
539 // Note that the code buffer's insts_mark is always relative to insts.
540 // That's why we must use the macroassembler to generate a handler.
541 MacroAssembler _masm(&cbuf);
542 address base =
543 __ start_a_stub(size_exception_handler());
544 if (base == NULL) return 0; // CodeBuffer::expand failed
545 int offset = __ offset();
547 __ block_comment("; emit_exception_handler");
549 cbuf.set_insts_mark();
550 __ relocate(relocInfo::runtime_call_type);
551 __ patchable_jump((address)OptoRuntime::exception_blob()->entry_point());
552 __ align(16);
553 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
554 __ end_a_stub();
555 return offset;
556 }
558 // Emit deopt handler code.
559 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) {
560 // Note that the code buffer's insts_mark is always relative to insts.
561 // That's why we must use the macroassembler to generate a handler.
562 MacroAssembler _masm(&cbuf);
563 address base =
564 __ start_a_stub(size_deopt_handler());
566 // FIXME
567 if (base == NULL) return 0; // CodeBuffer::expand failed
568 int offset = __ offset();
570 __ block_comment("; emit_deopt_handler");
572 cbuf.set_insts_mark();
573 __ relocate(relocInfo::runtime_call_type);
574 __ patchable_call(SharedRuntime::deopt_blob()->unpack());
575 __ align(16);
576 assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
577 __ end_a_stub();
578 return offset;
579 }
582 const bool Matcher::match_rule_supported(int opcode) {
583 if (!has_match_rule(opcode))
584 return false;
586 switch (opcode) {
587 //Op_CountLeadingZerosI Op_CountLeadingZerosL can be deleted, all MIPS CPUs support clz & dclz.
588 case Op_CountLeadingZerosI:
589 case Op_CountLeadingZerosL:
590 if (!UseCountLeadingZerosInstruction)
591 return false;
592 break;
593 case Op_CountTrailingZerosI:
594 case Op_CountTrailingZerosL:
595 if (!UseCountTrailingZerosInstruction)
596 return false;
597 break;
598 }
600 return true; // Per default match rules are supported.
601 }
603 //FIXME
604 // emit call stub, compiled java to interpreter
605 void emit_java_to_interp(CodeBuffer &cbuf ) {
606 // Stub is fixed up when the corresponding call is converted from calling
607 // compiled code to calling interpreted code.
608 // mov rbx,0
609 // jmp -1
611 address mark = cbuf.insts_mark(); // get mark within main instrs section
613 // Note that the code buffer's insts_mark is always relative to insts.
614 // That's why we must use the macroassembler to generate a stub.
615 MacroAssembler _masm(&cbuf);
617 address base =
618 __ start_a_stub(Compile::MAX_stubs_size);
619 if (base == NULL) return; // CodeBuffer::expand failed
620 // static stub relocation stores the instruction address of the call
622 __ relocate(static_stub_Relocation::spec(mark), 0);
624 // static stub relocation also tags the methodOop in the code-stream.
625 __ patchable_set48(S3, (long)0);
626 // This is recognized as unresolved by relocs/nativeInst/ic code
628 __ relocate(relocInfo::runtime_call_type);
630 cbuf.set_insts_mark();
631 address call_pc = (address)-1;
632 __ patchable_jump(call_pc);
633 __ align(16);
634 __ end_a_stub();
635 // Update current stubs pointer and restore code_end.
636 }
638 // size of call stub, compiled java to interpretor
639 uint size_java_to_interp() {
640 int size = 4 * 4 + NativeCall::instruction_size; // sizeof(li48) + NativeCall::instruction_size
641 return round_to(size, 16);
642 }
644 // relocation entries for call stub, compiled java to interpreter
645 uint reloc_java_to_interp() {
646 return 16; // in emit_java_to_interp + in Java_Static_Call
647 }
649 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
650 if( Assembler::is_simm16(offset) ) return true;
651 else {
652 assert(false, "Not implemented yet !" );
653 Unimplemented();
654 }
655 }
658 // No additional cost for CMOVL.
659 const int Matcher::long_cmove_cost() { return 0; }
661 // No CMOVF/CMOVD with SSE2
662 const int Matcher::float_cmove_cost() { return ConditionalMoveLimit; }
664 // Does the CPU require late expand (see block.cpp for description of late expand)?
665 const bool Matcher::require_postalloc_expand = false;
667 // Should the Matcher clone shifts on addressing modes, expecting them
668 // to be subsumed into complex addressing expressions or compute them
669 // into registers? True for Intel but false for most RISCs
670 const bool Matcher::clone_shift_expressions = false;
672 // Do we need to mask the count passed to shift instructions or does
673 // the cpu only look at the lower 5/6 bits anyway?
674 const bool Matcher::need_masked_shift_count = false;
676 bool Matcher::narrow_oop_use_complex_address() {
677 NOT_LP64(ShouldNotCallThis());
678 assert(UseCompressedOops, "only for compressed oops code");
679 return false;
680 }
682 bool Matcher::narrow_klass_use_complex_address() {
683 NOT_LP64(ShouldNotCallThis());
684 assert(UseCompressedClassPointers, "only for compressed klass code");
685 return false;
686 }
688 // This is UltraSparc specific, true just means we have fast l2f conversion
689 const bool Matcher::convL2FSupported(void) {
690 return true;
691 }
693 // Max vector size in bytes. 0 if not supported.
694 const int Matcher::vector_width_in_bytes(BasicType bt) {
695 assert(MaxVectorSize == 8, "");
696 return 8;
697 }
699 // Vector ideal reg
700 const int Matcher::vector_ideal_reg(int size) {
701 assert(MaxVectorSize == 8, "");
702 switch(size) {
703 case 8: return Op_VecD;
704 }
705 ShouldNotReachHere();
706 return 0;
707 }
709 // Only lowest bits of xmm reg are used for vector shift count.
710 const int Matcher::vector_shift_count_ideal_reg(int size) {
711 fatal("vector shift is not supported");
712 return Node::NotAMachineReg;
713 }
715 // Limits on vector size (number of elements) loaded into vector.
716 const int Matcher::max_vector_size(const BasicType bt) {
717 assert(is_java_primitive(bt), "only primitive type vectors");
718 return vector_width_in_bytes(bt)/type2aelembytes(bt);
719 }
721 const int Matcher::min_vector_size(const BasicType bt) {
722 return max_vector_size(bt); // Same as max.
723 }
725 // MIPS supports misaligned vectors store/load? FIXME
726 const bool Matcher::misaligned_vectors_ok() {
727 return false;
728 //return !AlignVector; // can be changed by flag
729 }
731 // Register for DIVI projection of divmodI
732 RegMask Matcher::divI_proj_mask() {
733 ShouldNotReachHere();
734 return RegMask();
735 }
737 // Register for MODI projection of divmodI
738 RegMask Matcher::modI_proj_mask() {
739 ShouldNotReachHere();
740 return RegMask();
741 }
743 // Register for DIVL projection of divmodL
744 RegMask Matcher::divL_proj_mask() {
745 ShouldNotReachHere();
746 return RegMask();
747 }
749 int Matcher::regnum_to_fpu_offset(int regnum) {
750 return regnum - 32; // The FP registers are in the second chunk
751 }
754 const bool Matcher::isSimpleConstant64(jlong value) {
755 // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
756 return true;
757 }
760 // Return whether or not this register is ever used as an argument. This
761 // function is used on startup to build the trampoline stubs in generateOptoStub.
762 // Registers not mentioned will be killed by the VM call in the trampoline, and
763 // arguments in those registers not be available to the callee.
764 bool Matcher::can_be_java_arg( int reg ) {
765 /* Refer to: [sharedRuntime_mips_64.cpp] SharedRuntime::java_calling_convention() */
766 if ( reg == T0_num || reg == T0_H_num
767 || reg == A0_num || reg == A0_H_num
768 || reg == A1_num || reg == A1_H_num
769 || reg == A2_num || reg == A2_H_num
770 || reg == A3_num || reg == A3_H_num
771 || reg == A4_num || reg == A4_H_num
772 || reg == A5_num || reg == A5_H_num
773 || reg == A6_num || reg == A6_H_num
774 || reg == A7_num || reg == A7_H_num )
775 return true;
777 if ( reg == F12_num || reg == F12_H_num
778 || reg == F13_num || reg == F13_H_num
779 || reg == F14_num || reg == F14_H_num
780 || reg == F15_num || reg == F15_H_num
781 || reg == F16_num || reg == F16_H_num
782 || reg == F17_num || reg == F17_H_num
783 || reg == F18_num || reg == F18_H_num
784 || reg == F19_num || reg == F19_H_num )
785 return true;
787 return false;
788 }
790 bool Matcher::is_spillable_arg( int reg ) {
791 return can_be_java_arg(reg);
792 }
794 bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) {
795 return false;
796 }
798 // Register for MODL projection of divmodL
799 RegMask Matcher::modL_proj_mask() {
800 ShouldNotReachHere();
801 return RegMask();
802 }
804 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
805 return FP_REG_mask();
806 }
808 // MIPS doesn't support AES intrinsics
809 const bool Matcher::pass_original_key_for_aes() {
810 return false;
811 }
813 int CallLeafNoFPDirectNode::compute_padding(int current_offset) const {
814 //lui
815 //ori
816 //dsll
817 //ori
819 //jalr
820 //nop
822 return round_to(current_offset, alignment_required()) - current_offset;
823 }
825 int CallLeafDirectNode::compute_padding(int current_offset) const {
826 //lui
827 //ori
828 //dsll
829 //ori
831 //jalr
832 //nop
834 return round_to(current_offset, alignment_required()) - current_offset;
835 }
837 int CallRuntimeDirectNode::compute_padding(int current_offset) const {
838 //lui
839 //ori
840 //dsll
841 //ori
843 //jalr
844 //nop
846 return round_to(current_offset, alignment_required()) - current_offset;
847 }
849 // If CPU can load and store mis-aligned doubles directly then no fixup is
850 // needed. Else we split the double into 2 integer pieces and move it
851 // piece-by-piece. Only happens when passing doubles into C code as the
852 // Java calling convention forces doubles to be aligned.
853 const bool Matcher::misaligned_doubles_ok = false;
854 // Do floats take an entire double register or just half?
855 //const bool Matcher::float_in_double = true;
856 bool Matcher::float_in_double() { return false; }
857 // Threshold size for cleararray.
858 const int Matcher::init_array_short_size = 8 * BytesPerLong;
859 // Do ints take an entire long register or just half?
860 const bool Matcher::int_in_long = true;
861 // Is it better to copy float constants, or load them directly from memory?
862 // Intel can load a float constant from a direct address, requiring no
863 // extra registers. Most RISCs will have to materialize an address into a
864 // register first, so they would do better to copy the constant from stack.
865 const bool Matcher::rematerialize_float_constants = false;
866 // Advertise here if the CPU requires explicit rounding operations
867 // to implement the UseStrictFP mode.
868 const bool Matcher::strict_fp_requires_explicit_rounding = false;
869 // The ecx parameter to rep stos for the ClearArray node is in dwords.
870 const bool Matcher::init_array_count_is_in_bytes = false;
873 // Indicate if the safepoint node needs the polling page as an input.
874 // Since MIPS doesn't have absolute addressing, it needs.
875 bool SafePointNode::needs_polling_address_input() {
876 return false;
877 }
879 // !!!!! Special hack to get all type of calls to specify the byte offset
880 // from the start of the call to the point where the return address
881 // will point.
882 int MachCallStaticJavaNode::ret_addr_offset() {
883 //lui
884 //ori
885 //nop
886 //nop
887 //jalr
888 //nop
889 return 24;
890 }
892 int MachCallDynamicJavaNode::ret_addr_offset() {
893 //lui IC_Klass,
894 //ori IC_Klass,
895 //dsll IC_Klass
896 //ori IC_Klass
898 //lui T9
899 //ori T9
900 //nop
901 //nop
902 //jalr T9
903 //nop
904 return 4 * 4 + 4 * 6;
905 }
907 //=============================================================================
909 // Figure out which register class each belongs in: rc_int, rc_float, rc_stack
910 enum RC { rc_bad, rc_int, rc_float, rc_stack };
911 static enum RC rc_class( OptoReg::Name reg ) {
912 if( !OptoReg::is_valid(reg) ) return rc_bad;
913 if (OptoReg::is_stack(reg)) return rc_stack;
914 VMReg r = OptoReg::as_VMReg(reg);
915 if (r->is_Register()) return rc_int;
916 assert(r->is_FloatRegister(), "must be");
917 return rc_float;
918 }
920 uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream* st ) const {
921 // Get registers to move
922 OptoReg::Name src_second = ra_->get_reg_second(in(1));
923 OptoReg::Name src_first = ra_->get_reg_first(in(1));
924 OptoReg::Name dst_second = ra_->get_reg_second(this );
925 OptoReg::Name dst_first = ra_->get_reg_first(this );
927 enum RC src_second_rc = rc_class(src_second);
928 enum RC src_first_rc = rc_class(src_first);
929 enum RC dst_second_rc = rc_class(dst_second);
930 enum RC dst_first_rc = rc_class(dst_first);
932 assert(OptoReg::is_valid(src_first) && OptoReg::is_valid(dst_first), "must move at least 1 register" );
934 // Generate spill code!
935 int size = 0;
937 if( src_first == dst_first && src_second == dst_second )
938 return 0; // Self copy, no move
940 if (src_first_rc == rc_stack) {
941 // mem ->
942 if (dst_first_rc == rc_stack) {
943 // mem -> mem
944 assert(src_second != dst_first, "overlap");
945 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
946 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
947 // 64-bit
948 int src_offset = ra_->reg2offset(src_first);
949 int dst_offset = ra_->reg2offset(dst_first);
950 if (cbuf) {
951 MacroAssembler _masm(cbuf);
952 __ ld(AT, Address(SP, src_offset));
953 __ sd(AT, Address(SP, dst_offset));
954 #ifndef PRODUCT
955 } else {
956 if(!do_size){
957 if (size != 0) st->print("\n\t");
958 st->print("ld AT, [SP + #%d]\t# 64-bit mem-mem spill 1\n\t"
959 "sd AT, [SP + #%d]",
960 src_offset, dst_offset);
961 }
962 #endif
963 }
964 size += 8;
965 } else {
966 // 32-bit
967 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
968 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
969 // No pushl/popl, so:
970 int src_offset = ra_->reg2offset(src_first);
971 int dst_offset = ra_->reg2offset(dst_first);
972 if (cbuf) {
973 MacroAssembler _masm(cbuf);
974 __ lw(AT, Address(SP, src_offset));
975 __ sw(AT, Address(SP, dst_offset));
976 #ifndef PRODUCT
977 } else {
978 if(!do_size){
979 if (size != 0) st->print("\n\t");
980 st->print("lw AT, [SP + #%d] spill 2\n\t"
981 "sw AT, [SP + #%d]\n\t",
982 src_offset, dst_offset);
983 }
984 #endif
985 }
986 size += 8;
987 }
988 return size;
989 } else if (dst_first_rc == rc_int) {
990 // mem -> gpr
991 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
992 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
993 // 64-bit
994 int offset = ra_->reg2offset(src_first);
995 if (cbuf) {
996 MacroAssembler _masm(cbuf);
997 __ ld(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
998 #ifndef PRODUCT
999 } else {
1000 if(!do_size){
1001 if (size != 0) st->print("\n\t");
1002 st->print("ld %s, [SP + #%d]\t# spill 3",
1003 Matcher::regName[dst_first],
1004 offset);
1005 }
1006 #endif
1007 }
1008 size += 4;
1009 } else {
1010 // 32-bit
1011 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1012 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1013 int offset = ra_->reg2offset(src_first);
1014 if (cbuf) {
1015 MacroAssembler _masm(cbuf);
1016 if (this->ideal_reg() == Op_RegI)
1017 __ lw(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1018 else
1019 __ lwu(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1020 #ifndef PRODUCT
1021 } else {
1022 if(!do_size){
1023 if (size != 0) st->print("\n\t");
1024 if (this->ideal_reg() == Op_RegI)
1025 st->print("lw %s, [SP + #%d]\t# spill 4",
1026 Matcher::regName[dst_first],
1027 offset);
1028 else
1029 st->print("lwu %s, [SP + #%d]\t# spill 5",
1030 Matcher::regName[dst_first],
1031 offset);
1032 }
1033 #endif
1034 }
1035 size += 4;
1036 }
1037 return size;
1038 } else if (dst_first_rc == rc_float) {
1039 // mem-> xmm
1040 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1041 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1042 // 64-bit
1043 int offset = ra_->reg2offset(src_first);
1044 if (cbuf) {
1045 MacroAssembler _masm(cbuf);
1046 __ ldc1( as_FloatRegister(Matcher::_regEncode[dst_first]), Address(SP, offset));
1047 #ifndef PRODUCT
1048 } else {
1049 if(!do_size){
1050 if (size != 0) st->print("\n\t");
1051 st->print("ldc1 %s, [SP + #%d]\t# spill 6",
1052 Matcher::regName[dst_first],
1053 offset);
1054 }
1055 #endif
1056 }
1057 size += 4;
1058 } else {
1059 // 32-bit
1060 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1061 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1062 int offset = ra_->reg2offset(src_first);
1063 if (cbuf) {
1064 MacroAssembler _masm(cbuf);
1065 __ lwc1( as_FloatRegister(Matcher::_regEncode[dst_first]), Address(SP, offset));
1066 #ifndef PRODUCT
1067 } else {
1068 if(!do_size){
1069 if (size != 0) st->print("\n\t");
1070 st->print("lwc1 %s, [SP + #%d]\t# spill 7",
1071 Matcher::regName[dst_first],
1072 offset);
1073 }
1074 #endif
1075 }
1076 size += 4;
1077 }
1078 return size;
1079 }
1080 } else if (src_first_rc == rc_int) {
1081 // gpr ->
1082 if (dst_first_rc == rc_stack) {
1083 // gpr -> mem
1084 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1085 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1086 // 64-bit
1087 int offset = ra_->reg2offset(dst_first);
1088 if (cbuf) {
1089 MacroAssembler _masm(cbuf);
1090 __ sd(as_Register(Matcher::_regEncode[src_first]), Address(SP, offset));
1091 #ifndef PRODUCT
1092 } else {
1093 if(!do_size){
1094 if (size != 0) st->print("\n\t");
1095 st->print("sd %s, [SP + #%d] # spill 8",
1096 Matcher::regName[src_first],
1097 offset);
1098 }
1099 #endif
1100 }
1101 size += 4;
1102 } else {
1103 // 32-bit
1104 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1105 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1106 int offset = ra_->reg2offset(dst_first);
1107 if (cbuf) {
1108 MacroAssembler _masm(cbuf);
1109 __ sw(as_Register(Matcher::_regEncode[src_first]), Address(SP, offset));
1110 #ifndef PRODUCT
1111 } else {
1112 if(!do_size){
1113 if (size != 0) st->print("\n\t");
1114 st->print("sw %s, [SP + #%d]\t# spill 9",
1115 Matcher::regName[src_first], offset);
1116 }
1117 #endif
1118 }
1119 size += 4;
1120 }
1121 return size;
1122 } else if (dst_first_rc == rc_int) {
1123 // gpr -> gpr
1124 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1125 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1126 // 64-bit
1127 if (cbuf) {
1128 MacroAssembler _masm(cbuf);
1129 __ move(as_Register(Matcher::_regEncode[dst_first]),
1130 as_Register(Matcher::_regEncode[src_first]));
1131 #ifndef PRODUCT
1132 } else {
1133 if(!do_size){
1134 if (size != 0) st->print("\n\t");
1135 st->print("move(64bit) %s <-- %s\t# spill 10",
1136 Matcher::regName[dst_first],
1137 Matcher::regName[src_first]);
1138 }
1139 #endif
1140 }
1141 size += 4;
1142 return size;
1143 } else {
1144 // 32-bit
1145 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1146 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1147 if (cbuf) {
1148 MacroAssembler _masm(cbuf);
1149 if (this->ideal_reg() == Op_RegI)
1150 __ move_u32(as_Register(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first]));
1151 else
1152 __ daddu(as_Register(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first]), R0);
1154 #ifndef PRODUCT
1155 } else {
1156 if(!do_size){
1157 if (size != 0) st->print("\n\t");
1158 st->print("move(32-bit) %s <-- %s\t# spill 11",
1159 Matcher::regName[dst_first],
1160 Matcher::regName[src_first]);
1161 }
1162 #endif
1163 }
1164 size += 4;
1165 return size;
1166 }
1167 } else if (dst_first_rc == rc_float) {
1168 // gpr -> xmm
1169 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1170 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1171 // 64-bit
1172 if (cbuf) {
1173 MacroAssembler _masm(cbuf);
1174 __ dmtc1(as_Register(Matcher::_regEncode[src_first]), as_FloatRegister(Matcher::_regEncode[dst_first]));
1175 #ifndef PRODUCT
1176 } else {
1177 if(!do_size){
1178 if (size != 0) st->print("\n\t");
1179 st->print("dmtc1 %s, %s\t# spill 12",
1180 Matcher::regName[dst_first],
1181 Matcher::regName[src_first]);
1182 }
1183 #endif
1184 }
1185 size += 4;
1186 } else {
1187 // 32-bit
1188 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1189 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1190 if (cbuf) {
1191 MacroAssembler _masm(cbuf);
1192 __ mtc1( as_Register(Matcher::_regEncode[src_first]), as_FloatRegister(Matcher::_regEncode[dst_first]) );
1193 #ifndef PRODUCT
1194 } else {
1195 if(!do_size){
1196 if (size != 0) st->print("\n\t");
1197 st->print("mtc1 %s, %s\t# spill 13",
1198 Matcher::regName[dst_first],
1199 Matcher::regName[src_first]);
1200 }
1201 #endif
1202 }
1203 size += 4;
1204 }
1205 return size;
1206 }
1207 } else if (src_first_rc == rc_float) {
1208 // xmm ->
1209 if (dst_first_rc == rc_stack) {
1210 // xmm -> mem
1211 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1212 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1213 // 64-bit
1214 int offset = ra_->reg2offset(dst_first);
1215 if (cbuf) {
1216 MacroAssembler _masm(cbuf);
1217 __ sdc1( as_FloatRegister(Matcher::_regEncode[src_first]), Address(SP, offset) );
1218 #ifndef PRODUCT
1219 } else {
1220 if(!do_size){
1221 if (size != 0) st->print("\n\t");
1222 st->print("sdc1 %s, [SP + #%d]\t# spill 14",
1223 Matcher::regName[src_first],
1224 offset);
1225 }
1226 #endif
1227 }
1228 size += 4;
1229 } else {
1230 // 32-bit
1231 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1232 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1233 int offset = ra_->reg2offset(dst_first);
1234 if (cbuf) {
1235 MacroAssembler _masm(cbuf);
1236 __ swc1(as_FloatRegister(Matcher::_regEncode[src_first]), Address(SP, offset));
1237 #ifndef PRODUCT
1238 } else {
1239 if(!do_size){
1240 if (size != 0) st->print("\n\t");
1241 st->print("swc1 %s, [SP + #%d]\t# spill 15",
1242 Matcher::regName[src_first],
1243 offset);
1244 }
1245 #endif
1246 }
1247 size += 4;
1248 }
1249 return size;
1250 } else if (dst_first_rc == rc_int) {
1251 // xmm -> gpr
1252 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1253 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1254 // 64-bit
1255 if (cbuf) {
1256 MacroAssembler _masm(cbuf);
1257 __ dmfc1( as_Register(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1258 #ifndef PRODUCT
1259 } else {
1260 if(!do_size){
1261 if (size != 0) st->print("\n\t");
1262 st->print("dmfc1 %s, %s\t# spill 16",
1263 Matcher::regName[dst_first],
1264 Matcher::regName[src_first]);
1265 }
1266 #endif
1267 }
1268 size += 4;
1269 } else {
1270 // 32-bit
1271 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1272 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1273 if (cbuf) {
1274 MacroAssembler _masm(cbuf);
1275 __ mfc1( as_Register(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1276 #ifndef PRODUCT
1277 } else {
1278 if(!do_size){
1279 if (size != 0) st->print("\n\t");
1280 st->print("mfc1 %s, %s\t# spill 17",
1281 Matcher::regName[dst_first],
1282 Matcher::regName[src_first]);
1283 }
1284 #endif
1285 }
1286 size += 4;
1287 }
1288 return size;
1289 } else if (dst_first_rc == rc_float) {
1290 // xmm -> xmm
1291 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1292 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1293 // 64-bit
1294 if (cbuf) {
1295 MacroAssembler _masm(cbuf);
1296 __ mov_d( as_FloatRegister(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1297 #ifndef PRODUCT
1298 } else {
1299 if(!do_size){
1300 if (size != 0) st->print("\n\t");
1301 st->print("mov_d %s <-- %s\t# spill 18",
1302 Matcher::regName[dst_first],
1303 Matcher::regName[src_first]);
1304 }
1305 #endif
1306 }
1307 size += 4;
1308 } else {
1309 // 32-bit
1310 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1311 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1312 if (cbuf) {
1313 MacroAssembler _masm(cbuf);
1314 __ mov_s( as_FloatRegister(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1315 #ifndef PRODUCT
1316 } else {
1317 if(!do_size){
1318 if (size != 0) st->print("\n\t");
1319 st->print("mov_s %s <-- %s\t# spill 19",
1320 Matcher::regName[dst_first],
1321 Matcher::regName[src_first]);
1322 }
1323 #endif
1324 }
1325 size += 4;
1326 }
1327 return size;
1328 }
1329 }
1331 assert(0," foo ");
1332 Unimplemented();
1333 return size;
1335 }
1337 #ifndef PRODUCT
1338 void MachSpillCopyNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1339 implementation( NULL, ra_, false, st );
1340 }
1341 #endif
1343 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1344 implementation( &cbuf, ra_, false, NULL );
1345 }
1347 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
1348 return implementation( NULL, ra_, true, NULL );
1349 }
1351 //=============================================================================
1352 #
1354 #ifndef PRODUCT
1355 void MachBreakpointNode::format( PhaseRegAlloc *, outputStream* st ) const {
1356 st->print("INT3");
1357 }
1358 #endif
1360 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc* ra_) const {
1361 MacroAssembler _masm(&cbuf);
1362 __ int3();
1363 }
1365 uint MachBreakpointNode::size(PhaseRegAlloc* ra_) const {
1366 return MachNode::size(ra_);
1367 }
1370 //=============================================================================
1371 #ifndef PRODUCT
1372 void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1373 Compile *C = ra_->C;
1374 int framesize = C->frame_size_in_bytes();
1376 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1378 st->print("daddiu SP, SP, %d # Rlease stack @ MachEpilogNode",framesize);
1379 st->cr(); st->print("\t");
1380 if (UseLoongsonISA) {
1381 st->print("gslq RA, FP, SP, %d # Restore FP & RA @ MachEpilogNode", -wordSize*2);
1382 } else {
1383 st->print("ld RA, SP, %d # Restore RA @ MachEpilogNode", -wordSize);
1384 st->cr(); st->print("\t");
1385 st->print("ld FP, SP, %d # Restore FP @ MachEpilogNode", -wordSize*2);
1386 }
1388 if( do_polling() && C->is_method_compilation() ) {
1389 st->print("Poll Safepoint # MachEpilogNode");
1390 }
1391 }
1392 #endif
1394 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1395 Compile *C = ra_->C;
1396 MacroAssembler _masm(&cbuf);
1397 int framesize = C->frame_size_in_bytes();
1399 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1401 __ daddiu(SP, SP, framesize);
1403 if (UseLoongsonISA) {
1404 __ gslq(RA, FP, SP, -wordSize*2);
1405 } else {
1406 __ ld(RA, SP, -wordSize );
1407 __ ld(FP, SP, -wordSize*2 );
1408 }
1410 if( do_polling() && C->is_method_compilation() ) {
1411 __ set64(AT, (long)os::get_polling_page());
1412 __ relocate(relocInfo::poll_return_type);
1413 __ lw(AT, AT, 0);
1414 }
1415 }
1417 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
1418 return MachNode::size(ra_); // too many variables; just compute it the hard way fujie debug
1419 }
1421 int MachEpilogNode::reloc() const {
1422 return 0; // a large enough number
1423 }
1425 const Pipeline * MachEpilogNode::pipeline() const {
1426 return MachNode::pipeline_class();
1427 }
1429 int MachEpilogNode::safepoint_offset() const { return 0; }
1431 //=============================================================================
1433 #ifndef PRODUCT
1434 void BoxLockNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1435 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1436 int reg = ra_->get_reg_first(this);
1437 st->print("ADDI %s, SP, %d @BoxLockNode",Matcher::regName[reg],offset);
1438 }
1439 #endif
1442 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
1443 return 4;
1444 }
1446 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1447 MacroAssembler _masm(&cbuf);
1448 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1449 int reg = ra_->get_encode(this);
1451 __ addi(as_Register(reg), SP, offset);
1452 /*
1453 if( offset >= 128 ) {
1454 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
1455 emit_rm(cbuf, 0x2, reg, 0x04);
1456 emit_rm(cbuf, 0x0, 0x04, SP_enc);
1457 emit_d32(cbuf, offset);
1458 }
1459 else {
1460 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
1461 emit_rm(cbuf, 0x1, reg, 0x04);
1462 emit_rm(cbuf, 0x0, 0x04, SP_enc);
1463 emit_d8(cbuf, offset);
1464 }
1465 */
1466 }
1469 //static int sizeof_FFree_Float_Stack_All = -1;
1471 int MachCallRuntimeNode::ret_addr_offset() {
1472 //lui
1473 //ori
1474 //dsll
1475 //ori
1476 //jalr
1477 //nop
1478 assert(NativeCall::instruction_size == 24, "in MachCallRuntimeNode::ret_addr_offset()");
1479 return NativeCall::instruction_size;
1480 // return 16;
1481 }
1487 //=============================================================================
1488 #ifndef PRODUCT
1489 void MachNopNode::format( PhaseRegAlloc *, outputStream* st ) const {
1490 st->print("NOP \t# %d bytes pad for loops and calls", 4 * _count);
1491 }
1492 #endif
1494 void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc * ) const {
1495 MacroAssembler _masm(&cbuf);
1496 int i = 0;
1497 for(i = 0; i < _count; i++)
1498 __ nop();
1499 }
1501 uint MachNopNode::size(PhaseRegAlloc *) const {
1502 return 4 * _count;
1503 }
1504 const Pipeline* MachNopNode::pipeline() const {
1505 return MachNode::pipeline_class();
1506 }
1508 //=============================================================================
1510 //=============================================================================
1511 #ifndef PRODUCT
1512 void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1513 st->print_cr("load_klass(T9, T0)");
1514 st->print_cr("\tbeq(T9, iCache, L)");
1515 st->print_cr("\tnop");
1516 st->print_cr("\tjmp(SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type)");
1517 st->print_cr("\tnop");
1518 st->print_cr("\tnop");
1519 st->print_cr(" L:");
1520 }
1521 #endif
1524 void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1525 MacroAssembler _masm(&cbuf);
1526 #ifdef ASSERT
1527 //uint code_size = cbuf.code_size();
1528 #endif
1529 int ic_reg = Matcher::inline_cache_reg_encode();
1530 Label L;
1531 Register receiver = T0;
1532 Register iCache = as_Register(ic_reg);
1533 __ load_klass(T9, receiver);
1534 __ beq(T9, iCache, L);
1535 __ nop();
1537 __ relocate(relocInfo::runtime_call_type);
1538 __ patchable_jump((address)SharedRuntime::get_ic_miss_stub());
1540 /* WARNING these NOPs are critical so that verified entry point is properly
1541 * 8 bytes aligned for patching by NativeJump::patch_verified_entry() */
1542 __ align(CodeEntryAlignment);
1543 __ bind(L);
1544 }
1546 uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
1547 return MachNode::size(ra_);
1548 }
1552 //=============================================================================
1554 const RegMask& MachConstantBaseNode::_out_RegMask = P_REG_mask();
1556 int Compile::ConstantTable::calculate_table_base_offset() const {
1557 return 0; // absolute addressing, no offset
1558 }
1560 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
1561 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
1562 ShouldNotReachHere();
1563 }
1565 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
1566 Compile* C = ra_->C;
1567 Compile::ConstantTable& constant_table = C->constant_table();
1568 MacroAssembler _masm(&cbuf);
1570 Register Rtoc = as_Register(ra_->get_encode(this));
1571 CodeSection* consts_section = __ code()->consts();
1572 int consts_size = consts_section->align_at_start(consts_section->size());
1573 assert(constant_table.size() == consts_size, "must be equal");
1575 if (consts_section->size()) {
1576 // Materialize the constant table base.
1577 address baseaddr = consts_section->start() + -(constant_table.table_base_offset());
1578 // RelocationHolder rspec = internal_word_Relocation::spec(baseaddr);
1579 __ relocate(relocInfo::internal_pc_type);
1580 __ patchable_set48(Rtoc, (long)baseaddr);
1581 }
1582 }
1584 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
1585 // patchable_set48 (4 insts)
1586 return 4 * 4;
1587 }
1589 #ifndef PRODUCT
1590 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
1591 Register r = as_Register(ra_->get_encode(this));
1592 st->print("patchable_set48 %s, &constanttable (constant table base) @ MachConstantBaseNode", r->name());
1593 }
1594 #endif
1597 //=============================================================================
1598 #ifndef PRODUCT
1599 void MachPrologNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1600 Compile* C = ra_->C;
1602 int framesize = C->frame_size_in_bytes();
1603 int bangsize = C->bang_size_in_bytes();
1604 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1606 // Calls to C2R adapters often do not accept exceptional returns.
1607 // We require that their callers must bang for them. But be careful, because
1608 // some VM calls (such as call site linkage) can use several kilobytes of
1609 // stack. But the stack safety zone should account for that.
1610 // See bugs 4446381, 4468289, 4497237.
1611 if (C->need_stack_bang(bangsize)) {
1612 st->print_cr("# stack bang"); st->print("\t");
1613 }
1614 if (UseLoongsonISA) {
1615 st->print("gssq RA, FP, %d(SP) @ MachPrologNode\n\t", -wordSize*2);
1616 } else {
1617 st->print("sd RA, %d(SP) @ MachPrologNode\n\t", -wordSize);
1618 st->print("sd FP, %d(SP) @ MachPrologNode\n\t", -wordSize*2);
1619 }
1620 st->print("daddiu FP, SP, -%d \n\t", wordSize*2);
1621 st->print("daddiu SP, SP, -%d \t",framesize);
1622 }
1623 #endif
1626 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1627 Compile* C = ra_->C;
1628 MacroAssembler _masm(&cbuf);
1630 int framesize = C->frame_size_in_bytes();
1631 int bangsize = C->bang_size_in_bytes();
1633 // __ verified_entry(framesize, C->need_stack_bang(bangsize)?bangsize:0, false);
1635 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1637 if (C->need_stack_bang(framesize)) {
1638 __ generate_stack_overflow_check(framesize);
1639 }
1641 if (UseLoongsonISA) {
1642 __ gssq(RA, FP, SP, -wordSize*2);
1643 } else {
1644 __ sd(RA, SP, -wordSize);
1645 __ sd(FP, SP, -wordSize*2);
1646 }
1647 __ daddiu(FP, SP, -wordSize*2);
1648 __ daddiu(SP, SP, -framesize);
1649 __ nop(); /* 2013.10.22 Jin: Make enough room for patch_verified_entry() */
1650 __ nop();
1652 C->set_frame_complete(cbuf.insts_size());
1653 if (C->has_mach_constant_base_node()) {
1654 // NOTE: We set the table base offset here because users might be
1655 // emitted before MachConstantBaseNode.
1656 Compile::ConstantTable& constant_table = C->constant_table();
1657 constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
1658 }
1660 }
1663 uint MachPrologNode::size(PhaseRegAlloc *ra_) const {
1664 //fprintf(stderr, "\nPrologNode::size(ra_)= %d \n", MachNode::size(ra_));//fujie debug
1665 return MachNode::size(ra_); // too many variables; just compute it the hard way
1666 }
1668 int MachPrologNode::reloc() const {
1669 return 0; // a large enough number
1670 }
1672 %}
1674 //----------ENCODING BLOCK-----------------------------------------------------
1675 // This block specifies the encoding classes used by the compiler to output
1676 // byte streams. Encoding classes generate functions which are called by
1677 // Machine Instruction Nodes in order to generate the bit encoding of the
1678 // instruction. Operands specify their base encoding interface with the
1679 // interface keyword. There are currently supported four interfaces,
1680 // REG_INTER, CONST_INTER, MEMORY_INTER, & COND_INTER. REG_INTER causes an
1681 // operand to generate a function which returns its register number when
1682 // queried. CONST_INTER causes an operand to generate a function which
1683 // returns the value of the constant when queried. MEMORY_INTER causes an
1684 // operand to generate four functions which return the Base Register, the
1685 // Index Register, the Scale Value, and the Offset Value of the operand when
1686 // queried. COND_INTER causes an operand to generate six functions which
1687 // return the encoding code (ie - encoding bits for the instruction)
1688 // associated with each basic boolean condition for a conditional instruction.
1689 // Instructions specify two basic values for encoding. They use the
1690 // ins_encode keyword to specify their encoding class (which must be one of
1691 // the class names specified in the encoding block), and they use the
1692 // opcode keyword to specify, in order, their primary, secondary, and
1693 // tertiary opcode. Only the opcode sections which a particular instruction
1694 // needs for encoding need to be specified.
1695 encode %{
1697 //Load byte signed
1698 enc_class load_B_enc (mRegI dst, memory mem) %{
1699 MacroAssembler _masm(&cbuf);
1700 int dst = $dst$$reg;
1701 int base = $mem$$base;
1702 int index = $mem$$index;
1703 int scale = $mem$$scale;
1704 int disp = $mem$$disp;
1706 if( index != 0 ) {
1707 if( Assembler::is_simm16(disp) ) {
1708 if( UseLoongsonISA ) {
1709 if (scale == 0) {
1710 __ gslbx(as_Register(dst), as_Register(base), as_Register(index), disp);
1711 } else {
1712 __ dsll(AT, as_Register(index), scale);
1713 __ gslbx(as_Register(dst), as_Register(base), AT, disp);
1714 }
1715 } else {
1716 if (scale == 0) {
1717 __ addu(AT, as_Register(base), as_Register(index));
1718 } else {
1719 __ dsll(AT, as_Register(index), scale);
1720 __ addu(AT, as_Register(base), AT);
1721 }
1722 __ lb(as_Register(dst), AT, disp);
1723 }
1724 } else {
1725 if (scale == 0) {
1726 __ addu(AT, as_Register(base), as_Register(index));
1727 } else {
1728 __ dsll(AT, as_Register(index), scale);
1729 __ addu(AT, as_Register(base), AT);
1730 }
1731 __ move(T9, disp);
1732 if( UseLoongsonISA ) {
1733 __ gslbx(as_Register(dst), AT, T9, 0);
1734 } else {
1735 __ addu(AT, AT, T9);
1736 __ lb(as_Register(dst), AT, 0);
1737 }
1738 }
1739 } else {
1740 if( Assembler::is_simm16(disp) ) {
1741 __ lb(as_Register(dst), as_Register(base), disp);
1742 } else {
1743 __ move(T9, disp);
1744 if( UseLoongsonISA ) {
1745 __ gslbx(as_Register(dst), as_Register(base), T9, 0);
1746 } else {
1747 __ addu(AT, as_Register(base), T9);
1748 __ lb(as_Register(dst), AT, 0);
1749 }
1750 }
1751 }
1752 %}
1754 //Load byte unsigned
1755 enc_class load_UB_enc (mRegI dst, memory mem) %{
1756 MacroAssembler _masm(&cbuf);
1757 int dst = $dst$$reg;
1758 int base = $mem$$base;
1759 int index = $mem$$index;
1760 int scale = $mem$$scale;
1761 int disp = $mem$$disp;
1763 if( index != 0 ) {
1764 if (scale == 0) {
1765 __ daddu(AT, as_Register(base), as_Register(index));
1766 } else {
1767 __ dsll(AT, as_Register(index), scale);
1768 __ daddu(AT, as_Register(base), AT);
1769 }
1770 if( Assembler::is_simm16(disp) ) {
1771 __ lbu(as_Register(dst), AT, disp);
1772 } else {
1773 __ move(T9, disp);
1774 __ daddu(AT, AT, T9);
1775 __ lbu(as_Register(dst), AT, 0);
1776 }
1777 } else {
1778 if( Assembler::is_simm16(disp) ) {
1779 __ lbu(as_Register(dst), as_Register(base), disp);
1780 } else {
1781 __ move(T9, disp);
1782 __ daddu(AT, as_Register(base), T9);
1783 __ lbu(as_Register(dst), AT, 0);
1784 }
1785 }
1786 %}
1788 enc_class store_B_reg_enc (memory mem, mRegI src) %{
1789 MacroAssembler _masm(&cbuf);
1790 int src = $src$$reg;
1791 int base = $mem$$base;
1792 int index = $mem$$index;
1793 int scale = $mem$$scale;
1794 int disp = $mem$$disp;
1796 if( index != 0 ) {
1797 if (scale == 0) {
1798 if( Assembler::is_simm(disp, 8) ) {
1799 if (UseLoongsonISA) {
1800 __ gssbx(as_Register(src), as_Register(base), as_Register(index), disp);
1801 } else {
1802 __ addu(AT, as_Register(base), as_Register(index));
1803 __ sb(as_Register(src), AT, disp);
1804 }
1805 } else if( Assembler::is_simm16(disp) ) {
1806 __ addu(AT, as_Register(base), as_Register(index));
1807 __ sb(as_Register(src), AT, disp);
1808 } else {
1809 __ addu(AT, as_Register(base), as_Register(index));
1810 __ move(T9, disp);
1811 if (UseLoongsonISA) {
1812 __ gssbx(as_Register(src), AT, T9, 0);
1813 } else {
1814 __ addu(AT, AT, T9);
1815 __ sb(as_Register(src), AT, 0);
1816 }
1817 }
1818 } else {
1819 __ dsll(AT, as_Register(index), scale);
1820 if( Assembler::is_simm(disp, 8) ) {
1821 if (UseLoongsonISA) {
1822 __ gssbx(as_Register(src), AT, as_Register(base), disp);
1823 } else {
1824 __ addu(AT, as_Register(base), AT);
1825 __ sb(as_Register(src), AT, disp);
1826 }
1827 } else if( Assembler::is_simm16(disp) ) {
1828 __ addu(AT, as_Register(base), AT);
1829 __ sb(as_Register(src), AT, disp);
1830 } else {
1831 __ addu(AT, as_Register(base), AT);
1832 __ move(T9, disp);
1833 if (UseLoongsonISA) {
1834 __ gssbx(as_Register(src), AT, T9, 0);
1835 } else {
1836 __ addu(AT, AT, T9);
1837 __ sb(as_Register(src), AT, 0);
1838 }
1839 }
1840 }
1841 } else {
1842 if( Assembler::is_simm16(disp) ) {
1843 __ sb(as_Register(src), as_Register(base), disp);
1844 } else {
1845 __ move(T9, disp);
1846 if (UseLoongsonISA) {
1847 __ gssbx(as_Register(src), as_Register(base), T9, 0);
1848 } else {
1849 __ addu(AT, as_Register(base), T9);
1850 __ sb(as_Register(src), AT, 0);
1851 }
1852 }
1853 }
1854 %}
1856 enc_class store_B_immI_enc (memory mem, immI8 src) %{
1857 MacroAssembler _masm(&cbuf);
1858 int base = $mem$$base;
1859 int index = $mem$$index;
1860 int scale = $mem$$scale;
1861 int disp = $mem$$disp;
1862 int value = $src$$constant;
1864 if( index != 0 ) {
1865 if (!UseLoongsonISA) {
1866 if (scale == 0) {
1867 __ daddu(AT, as_Register(base), as_Register(index));
1868 } else {
1869 __ dsll(AT, as_Register(index), scale);
1870 __ daddu(AT, as_Register(base), AT);
1871 }
1872 if( Assembler::is_simm16(disp) ) {
1873 if (value == 0) {
1874 __ sb(R0, AT, disp);
1875 } else {
1876 __ move(T9, value);
1877 __ sb(T9, AT, disp);
1878 }
1879 } else {
1880 if (value == 0) {
1881 __ move(T9, disp);
1882 __ daddu(AT, AT, T9);
1883 __ sb(R0, AT, 0);
1884 } else {
1885 __ move(T9, disp);
1886 __ daddu(AT, AT, T9);
1887 __ move(T9, value);
1888 __ sb(T9, AT, 0);
1889 }
1890 }
1891 } else {
1893 if (scale == 0) {
1894 if( Assembler::is_simm(disp, 8) ) {
1895 if (value == 0) {
1896 __ gssbx(R0, as_Register(base), as_Register(index), disp);
1897 } else {
1898 __ move(T9, value);
1899 __ gssbx(T9, as_Register(base), as_Register(index), disp);
1900 }
1901 } else if( Assembler::is_simm16(disp) ) {
1902 __ daddu(AT, as_Register(base), as_Register(index));
1903 if (value == 0) {
1904 __ sb(R0, AT, disp);
1905 } else {
1906 __ move(T9, value);
1907 __ sb(T9, AT, disp);
1908 }
1909 } else {
1910 if (value == 0) {
1911 __ daddu(AT, as_Register(base), as_Register(index));
1912 __ move(T9, disp);
1913 __ gssbx(R0, AT, T9, 0);
1914 } else {
1915 __ move(AT, disp);
1916 __ move(T9, value);
1917 __ daddu(AT, as_Register(base), AT);
1918 __ gssbx(T9, AT, as_Register(index), 0);
1919 }
1920 }
1922 } else {
1924 if( Assembler::is_simm(disp, 8) ) {
1925 __ dsll(AT, as_Register(index), scale);
1926 if (value == 0) {
1927 __ gssbx(R0, as_Register(base), AT, disp);
1928 } else {
1929 __ move(T9, value);
1930 __ gssbx(T9, as_Register(base), AT, disp);
1931 }
1932 } else if( Assembler::is_simm16(disp) ) {
1933 __ dsll(AT, as_Register(index), scale);
1934 __ daddu(AT, as_Register(base), AT);
1935 if (value == 0) {
1936 __ sb(R0, AT, disp);
1937 } else {
1938 __ move(T9, value);
1939 __ sb(T9, AT, disp);
1940 }
1941 } else {
1942 __ dsll(AT, as_Register(index), scale);
1943 if (value == 0) {
1944 __ daddu(AT, as_Register(base), AT);
1945 __ move(T9, disp);
1946 __ gssbx(R0, AT, T9, 0);
1947 } else {
1948 __ move(T9, disp);
1949 __ daddu(AT, AT, T9);
1950 __ move(T9, value);
1951 __ gssbx(T9, as_Register(base), AT, 0);
1952 }
1953 }
1954 }
1955 }
1956 } else {
1957 if( Assembler::is_simm16(disp) ) {
1958 if (value == 0) {
1959 __ sb(R0, as_Register(base), disp);
1960 } else {
1961 __ move(AT, value);
1962 __ sb(AT, as_Register(base), disp);
1963 }
1964 } else {
1965 if (value == 0) {
1966 __ move(T9, disp);
1967 if (UseLoongsonISA) {
1968 __ gssbx(R0, as_Register(base), T9, 0);
1969 } else {
1970 __ daddu(AT, as_Register(base), T9);
1971 __ sb(R0, AT, 0);
1972 }
1973 } else {
1974 __ move(T9, disp);
1975 if (UseLoongsonISA) {
1976 __ move(AT, value);
1977 __ gssbx(AT, as_Register(base), T9, 0);
1978 } else {
1979 __ daddu(AT, as_Register(base), T9);
1980 __ move(T9, value);
1981 __ sb(T9, AT, 0);
1982 }
1983 }
1984 }
1985 }
1986 %}
1989 enc_class store_B_immI_enc_sync (memory mem, immI8 src) %{
1990 MacroAssembler _masm(&cbuf);
1991 int base = $mem$$base;
1992 int index = $mem$$index;
1993 int scale = $mem$$scale;
1994 int disp = $mem$$disp;
1995 int value = $src$$constant;
1997 if( index != 0 ) {
1998 if ( UseLoongsonISA ) {
1999 if ( Assembler::is_simm(disp,8) ) {
2000 if ( scale == 0 ) {
2001 if ( value == 0 ) {
2002 __ gssbx(R0, as_Register(base), as_Register(index), disp);
2003 } else {
2004 __ move(AT, value);
2005 __ gssbx(AT, as_Register(base), as_Register(index), disp);
2006 }
2007 } else {
2008 __ dsll(AT, as_Register(index), scale);
2009 if ( value == 0 ) {
2010 __ gssbx(R0, as_Register(base), AT, disp);
2011 } else {
2012 __ move(T9, value);
2013 __ gssbx(T9, as_Register(base), AT, disp);
2014 }
2015 }
2016 } else if ( Assembler::is_simm16(disp) ) {
2017 if ( scale == 0 ) {
2018 __ daddu(AT, as_Register(base), as_Register(index));
2019 if ( value == 0 ){
2020 __ sb(R0, AT, disp);
2021 } else {
2022 __ move(T9, value);
2023 __ sb(T9, AT, disp);
2024 }
2025 } else {
2026 __ dsll(AT, as_Register(index), scale);
2027 __ daddu(AT, as_Register(base), AT);
2028 if ( value == 0 ) {
2029 __ sb(R0, AT, disp);
2030 } else {
2031 __ move(T9, value);
2032 __ sb(T9, AT, disp);
2033 }
2034 }
2035 } else {
2036 if ( scale == 0 ) {
2037 __ move(AT, disp);
2038 __ daddu(AT, as_Register(index), AT);
2039 if ( value == 0 ) {
2040 __ gssbx(R0, as_Register(base), AT, 0);
2041 } else {
2042 __ move(T9, value);
2043 __ gssbx(T9, as_Register(base), AT, 0);
2044 }
2045 } else {
2046 __ dsll(AT, as_Register(index), scale);
2047 __ move(T9, disp);
2048 __ daddu(AT, AT, T9);
2049 if ( value == 0 ) {
2050 __ gssbx(R0, as_Register(base), AT, 0);
2051 } else {
2052 __ move(T9, value);
2053 __ gssbx(T9, as_Register(base), AT, 0);
2054 }
2055 }
2056 }
2057 } else { //not use loongson isa
2058 if (scale == 0) {
2059 __ daddu(AT, as_Register(base), as_Register(index));
2060 } else {
2061 __ dsll(AT, as_Register(index), scale);
2062 __ daddu(AT, as_Register(base), AT);
2063 }
2064 if( Assembler::is_simm16(disp) ) {
2065 if (value == 0) {
2066 __ sb(R0, AT, disp);
2067 } else {
2068 __ move(T9, value);
2069 __ sb(T9, AT, disp);
2070 }
2071 } else {
2072 if (value == 0) {
2073 __ move(T9, disp);
2074 __ daddu(AT, AT, T9);
2075 __ sb(R0, AT, 0);
2076 } else {
2077 __ move(T9, disp);
2078 __ daddu(AT, AT, T9);
2079 __ move(T9, value);
2080 __ sb(T9, AT, 0);
2081 }
2082 }
2083 }
2084 } else {
2085 if ( UseLoongsonISA ){
2086 if ( Assembler::is_simm16(disp) ){
2087 if ( value == 0 ) {
2088 __ sb(R0, as_Register(base), disp);
2089 } else {
2090 __ move(AT, value);
2091 __ sb(AT, as_Register(base), disp);
2092 }
2093 } else {
2094 __ move(AT, disp);
2095 if ( value == 0 ) {
2096 __ gssbx(R0, as_Register(base), AT, 0);
2097 } else {
2098 __ move(T9, value);
2099 __ gssbx(T9, as_Register(base), AT, 0);
2100 }
2101 }
2102 } else {
2103 if( Assembler::is_simm16(disp) ) {
2104 if (value == 0) {
2105 __ sb(R0, as_Register(base), disp);
2106 } else {
2107 __ move(AT, value);
2108 __ sb(AT, as_Register(base), disp);
2109 }
2110 } else {
2111 if (value == 0) {
2112 __ move(T9, disp);
2113 __ daddu(AT, as_Register(base), T9);
2114 __ sb(R0, AT, 0);
2115 } else {
2116 __ move(T9, disp);
2117 __ daddu(AT, as_Register(base), T9);
2118 __ move(T9, value);
2119 __ sb(T9, AT, 0);
2120 }
2121 }
2122 }
2123 }
2125 __ sync();
2126 %}
2128 // Load Short (16bit signed)
2129 enc_class load_S_enc (mRegI dst, memory mem) %{
2130 MacroAssembler _masm(&cbuf);
2131 int dst = $dst$$reg;
2132 int base = $mem$$base;
2133 int index = $mem$$index;
2134 int scale = $mem$$scale;
2135 int disp = $mem$$disp;
2137 if( index != 0 ) {
2138 if ( UseLoongsonISA ) {
2139 if ( Assembler::is_simm(disp, 8) ) {
2140 if (scale == 0) {
2141 __ gslhx(as_Register(dst), as_Register(base), as_Register(index), disp);
2142 } else {
2143 __ dsll(AT, as_Register(index), scale);
2144 __ gslhx(as_Register(dst), as_Register(base), AT, disp);
2145 }
2146 } else if ( Assembler::is_simm16(disp) ) {
2147 if (scale == 0) {
2148 __ daddu(AT, as_Register(base), as_Register(index));
2149 __ lh(as_Register(dst), AT, disp);
2150 } else {
2151 __ dsll(AT, as_Register(index), scale);
2152 __ daddu(AT, as_Register(base), AT);
2153 __ lh(as_Register(dst), AT, disp);
2154 }
2155 } else {
2156 if (scale == 0) {
2157 __ move(AT, disp);
2158 __ daddu(AT, as_Register(index), AT);
2159 __ gslhx(as_Register(dst), as_Register(base), AT, 0);
2160 } else {
2161 __ dsll(AT, as_Register(index), scale);
2162 __ move(T9, disp);
2163 __ daddu(AT, AT, T9);
2164 __ gslhx(as_Register(dst), as_Register(base), AT, 0);
2165 }
2166 }
2167 } else { // not use loongson isa
2168 if (scale == 0) {
2169 __ daddu(AT, as_Register(base), as_Register(index));
2170 } else {
2171 __ dsll(AT, as_Register(index), scale);
2172 __ daddu(AT, as_Register(base), AT);
2173 }
2174 if( Assembler::is_simm16(disp) ) {
2175 __ lh(as_Register(dst), AT, disp);
2176 } else {
2177 __ move(T9, disp);
2178 __ daddu(AT, AT, T9);
2179 __ lh(as_Register(dst), AT, 0);
2180 }
2181 }
2182 } else { // index is 0
2183 if ( UseLoongsonISA ) {
2184 if ( Assembler::is_simm16(disp) ) {
2185 __ lh(as_Register(dst), as_Register(base), disp);
2186 } else {
2187 __ move(T9, disp);
2188 __ gslhx(as_Register(dst), as_Register(base), T9, 0);
2189 }
2190 } else { //not use loongson isa
2191 if( Assembler::is_simm16(disp) ) {
2192 __ lh(as_Register(dst), as_Register(base), disp);
2193 } else {
2194 __ move(T9, disp);
2195 __ daddu(AT, as_Register(base), T9);
2196 __ lh(as_Register(dst), AT, 0);
2197 }
2198 }
2199 }
2200 %}
2202 // Load Char (16bit unsigned)
2203 enc_class load_C_enc (mRegI dst, memory mem) %{
2204 MacroAssembler _masm(&cbuf);
2205 int dst = $dst$$reg;
2206 int base = $mem$$base;
2207 int index = $mem$$index;
2208 int scale = $mem$$scale;
2209 int disp = $mem$$disp;
2211 if( index != 0 ) {
2212 if (scale == 0) {
2213 __ daddu(AT, as_Register(base), as_Register(index));
2214 } else {
2215 __ dsll(AT, as_Register(index), scale);
2216 __ daddu(AT, as_Register(base), AT);
2217 }
2218 if( Assembler::is_simm16(disp) ) {
2219 __ lhu(as_Register(dst), AT, disp);
2220 } else {
2221 __ move(T9, disp);
2222 __ addu(AT, AT, T9);
2223 __ lhu(as_Register(dst), AT, 0);
2224 }
2225 } else {
2226 if( Assembler::is_simm16(disp) ) {
2227 __ lhu(as_Register(dst), as_Register(base), disp);
2228 } else {
2229 __ move(T9, disp);
2230 __ daddu(AT, as_Register(base), T9);
2231 __ lhu(as_Register(dst), AT, 0);
2232 }
2233 }
2234 %}
2236 // Store Char (16bit unsigned)
2237 enc_class store_C_reg_enc (memory mem, mRegI src) %{
2238 MacroAssembler _masm(&cbuf);
2239 int src = $src$$reg;
2240 int base = $mem$$base;
2241 int index = $mem$$index;
2242 int scale = $mem$$scale;
2243 int disp = $mem$$disp;
2245 if( index != 0 ) {
2246 if( Assembler::is_simm16(disp) ) {
2247 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2248 if (scale == 0) {
2249 __ gsshx(as_Register(src), as_Register(base), as_Register(index), disp);
2250 } else {
2251 __ dsll(AT, as_Register(index), scale);
2252 __ gsshx(as_Register(src), as_Register(base), AT, disp);
2253 }
2254 } else {
2255 if (scale == 0) {
2256 __ addu(AT, as_Register(base), as_Register(index));
2257 } else {
2258 __ dsll(AT, as_Register(index), scale);
2259 __ addu(AT, as_Register(base), AT);
2260 }
2261 __ sh(as_Register(src), AT, disp);
2262 }
2263 } else {
2264 if (scale == 0) {
2265 __ addu(AT, as_Register(base), as_Register(index));
2266 } else {
2267 __ dsll(AT, as_Register(index), scale);
2268 __ addu(AT, as_Register(base), AT);
2269 }
2270 __ move(T9, disp);
2271 if( UseLoongsonISA ) {
2272 __ gsshx(as_Register(src), AT, T9, 0);
2273 } else {
2274 __ addu(AT, AT, T9);
2275 __ sh(as_Register(src), AT, 0);
2276 }
2277 }
2278 } else {
2279 if( Assembler::is_simm16(disp) ) {
2280 __ sh(as_Register(src), as_Register(base), disp);
2281 } else {
2282 __ move(T9, disp);
2283 if( UseLoongsonISA ) {
2284 __ gsshx(as_Register(src), as_Register(base), T9, 0);
2285 } else {
2286 __ addu(AT, as_Register(base), T9);
2287 __ sh(as_Register(src), AT, 0);
2288 }
2289 }
2290 }
2291 %}
2293 enc_class store_C0_enc (memory mem) %{
2294 MacroAssembler _masm(&cbuf);
2295 int base = $mem$$base;
2296 int index = $mem$$index;
2297 int scale = $mem$$scale;
2298 int disp = $mem$$disp;
2300 if( index != 0 ) {
2301 if( Assembler::is_simm16(disp) ) {
2302 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2303 if (scale == 0) {
2304 __ gsshx(R0, as_Register(base), as_Register(index), disp);
2305 } else {
2306 __ dsll(AT, as_Register(index), scale);
2307 __ gsshx(R0, as_Register(base), AT, disp);
2308 }
2309 } else {
2310 if (scale == 0) {
2311 __ addu(AT, as_Register(base), as_Register(index));
2312 } else {
2313 __ dsll(AT, as_Register(index), scale);
2314 __ addu(AT, as_Register(base), AT);
2315 }
2316 __ sh(R0, AT, disp);
2317 }
2318 } else {
2319 if (scale == 0) {
2320 __ addu(AT, as_Register(base), as_Register(index));
2321 } else {
2322 __ dsll(AT, as_Register(index), scale);
2323 __ addu(AT, as_Register(base), AT);
2324 }
2325 __ move(T9, disp);
2326 if( UseLoongsonISA ) {
2327 __ gsshx(R0, AT, T9, 0);
2328 } else {
2329 __ addu(AT, AT, T9);
2330 __ sh(R0, AT, 0);
2331 }
2332 }
2333 } else {
2334 if( Assembler::is_simm16(disp) ) {
2335 __ sh(R0, as_Register(base), disp);
2336 } else {
2337 __ move(T9, disp);
2338 if( UseLoongsonISA ) {
2339 __ gsshx(R0, as_Register(base), T9, 0);
2340 } else {
2341 __ addu(AT, as_Register(base), T9);
2342 __ sh(R0, AT, 0);
2343 }
2344 }
2345 }
2346 %}
2348 enc_class load_I_enc (mRegI dst, memory mem) %{
2349 MacroAssembler _masm(&cbuf);
2350 int dst = $dst$$reg;
2351 int base = $mem$$base;
2352 int index = $mem$$index;
2353 int scale = $mem$$scale;
2354 int disp = $mem$$disp;
2356 if( index != 0 ) {
2357 if( Assembler::is_simm16(disp) ) {
2358 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2359 if (scale == 0) {
2360 __ gslwx(as_Register(dst), as_Register(base), as_Register(index), disp);
2361 } else {
2362 __ dsll(AT, as_Register(index), scale);
2363 __ gslwx(as_Register(dst), as_Register(base), AT, disp);
2364 }
2365 } else {
2366 if (scale == 0) {
2367 __ addu(AT, as_Register(base), as_Register(index));
2368 } else {
2369 __ dsll(AT, as_Register(index), scale);
2370 __ addu(AT, as_Register(base), AT);
2371 }
2372 __ lw(as_Register(dst), AT, disp);
2373 }
2374 } else {
2375 if (scale == 0) {
2376 __ addu(AT, as_Register(base), as_Register(index));
2377 } else {
2378 __ dsll(AT, as_Register(index), scale);
2379 __ addu(AT, as_Register(base), AT);
2380 }
2381 __ move(T9, disp);
2382 if( UseLoongsonISA ) {
2383 __ gslwx(as_Register(dst), AT, T9, 0);
2384 } else {
2385 __ addu(AT, AT, T9);
2386 __ lw(as_Register(dst), AT, 0);
2387 }
2388 }
2389 } else {
2390 if( Assembler::is_simm16(disp) ) {
2391 __ lw(as_Register(dst), as_Register(base), disp);
2392 } else {
2393 __ move(T9, disp);
2394 if( UseLoongsonISA ) {
2395 __ gslwx(as_Register(dst), as_Register(base), T9, 0);
2396 } else {
2397 __ addu(AT, as_Register(base), T9);
2398 __ lw(as_Register(dst), AT, 0);
2399 }
2400 }
2401 }
2402 %}
2404 enc_class store_I_reg_enc (memory mem, mRegI src) %{
2405 MacroAssembler _masm(&cbuf);
2406 int src = $src$$reg;
2407 int base = $mem$$base;
2408 int index = $mem$$index;
2409 int scale = $mem$$scale;
2410 int disp = $mem$$disp;
2412 if( index != 0 ) {
2413 if( Assembler::is_simm16(disp) ) {
2414 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2415 if (scale == 0) {
2416 __ gsswx(as_Register(src), as_Register(base), as_Register(index), disp);
2417 } else {
2418 __ dsll(AT, as_Register(index), scale);
2419 __ gsswx(as_Register(src), as_Register(base), AT, disp);
2420 }
2421 } else {
2422 if (scale == 0) {
2423 __ addu(AT, as_Register(base), as_Register(index));
2424 } else {
2425 __ dsll(AT, as_Register(index), scale);
2426 __ addu(AT, as_Register(base), AT);
2427 }
2428 __ sw(as_Register(src), AT, disp);
2429 }
2430 } else {
2431 if (scale == 0) {
2432 __ addu(AT, as_Register(base), as_Register(index));
2433 } else {
2434 __ dsll(AT, as_Register(index), scale);
2435 __ addu(AT, as_Register(base), AT);
2436 }
2437 __ move(T9, disp);
2438 if( UseLoongsonISA ) {
2439 __ gsswx(as_Register(src), AT, T9, 0);
2440 } else {
2441 __ addu(AT, AT, T9);
2442 __ sw(as_Register(src), AT, 0);
2443 }
2444 }
2445 } else {
2446 if( Assembler::is_simm16(disp) ) {
2447 __ sw(as_Register(src), as_Register(base), disp);
2448 } else {
2449 __ move(T9, disp);
2450 if( UseLoongsonISA ) {
2451 __ gsswx(as_Register(src), as_Register(base), T9, 0);
2452 } else {
2453 __ addu(AT, as_Register(base), T9);
2454 __ sw(as_Register(src), AT, 0);
2455 }
2456 }
2457 }
2458 %}
2460 enc_class store_I_immI_enc (memory mem, immI src) %{
2461 MacroAssembler _masm(&cbuf);
2462 int base = $mem$$base;
2463 int index = $mem$$index;
2464 int scale = $mem$$scale;
2465 int disp = $mem$$disp;
2466 int value = $src$$constant;
2468 if( index != 0 ) {
2469 if ( UseLoongsonISA ) {
2470 if ( Assembler::is_simm(disp, 8) ) {
2471 if ( scale == 0 ) {
2472 if ( value == 0 ) {
2473 __ gsswx(R0, as_Register(base), as_Register(index), disp);
2474 } else {
2475 __ move(T9, value);
2476 __ gsswx(T9, as_Register(base), as_Register(index), disp);
2477 }
2478 } else {
2479 __ dsll(AT, as_Register(index), scale);
2480 if ( value == 0 ) {
2481 __ gsswx(R0, as_Register(base), AT, disp);
2482 } else {
2483 __ move(T9, value);
2484 __ gsswx(T9, as_Register(base), AT, disp);
2485 }
2486 }
2487 } else if ( Assembler::is_simm16(disp) ) {
2488 if ( scale == 0 ) {
2489 __ daddu(AT, as_Register(base), as_Register(index));
2490 if ( value == 0 ) {
2491 __ sw(R0, AT, disp);
2492 } else {
2493 __ move(T9, value);
2494 __ sw(T9, AT, disp);
2495 }
2496 } else {
2497 __ dsll(AT, as_Register(index), scale);
2498 __ daddu(AT, as_Register(base), AT);
2499 if ( value == 0 ) {
2500 __ sw(R0, AT, disp);
2501 } else {
2502 __ move(T9, value);
2503 __ sw(T9, AT, disp);
2504 }
2505 }
2506 } else {
2507 if ( scale == 0 ) {
2508 __ move(T9, disp);
2509 __ daddu(AT, as_Register(index), T9);
2510 if ( value ==0 ) {
2511 __ gsswx(R0, as_Register(base), AT, 0);
2512 } else {
2513 __ move(T9, value);
2514 __ gsswx(T9, as_Register(base), AT, 0);
2515 }
2516 } else {
2517 __ dsll(AT, as_Register(index), scale);
2518 __ move(T9, disp);
2519 __ daddu(AT, AT, T9);
2520 if ( value == 0 ) {
2521 __ gsswx(R0, as_Register(base), AT, 0);
2522 } else {
2523 __ move(T9, value);
2524 __ gsswx(T9, as_Register(base), AT, 0);
2525 }
2526 }
2527 }
2528 } else { //not use loongson isa
2529 if (scale == 0) {
2530 __ daddu(AT, as_Register(base), as_Register(index));
2531 } else {
2532 __ dsll(AT, as_Register(index), scale);
2533 __ daddu(AT, as_Register(base), AT);
2534 }
2535 if( Assembler::is_simm16(disp) ) {
2536 if (value == 0) {
2537 __ sw(R0, AT, disp);
2538 } else {
2539 __ move(T9, value);
2540 __ sw(T9, AT, disp);
2541 }
2542 } else {
2543 if (value == 0) {
2544 __ move(T9, disp);
2545 __ daddu(AT, AT, T9);
2546 __ sw(R0, AT, 0);
2547 } else {
2548 __ move(T9, disp);
2549 __ daddu(AT, AT, T9);
2550 __ move(T9, value);
2551 __ sw(T9, AT, 0);
2552 }
2553 }
2554 }
2555 } else {
2556 if ( UseLoongsonISA ) {
2557 if ( Assembler::is_simm16(disp) ) {
2558 if ( value == 0 ) {
2559 __ sw(R0, as_Register(base), disp);
2560 } else {
2561 __ move(AT, value);
2562 __ sw(AT, as_Register(base), disp);
2563 }
2564 } else {
2565 __ move(T9, disp);
2566 if ( value == 0 ) {
2567 __ gsswx(R0, as_Register(base), T9, 0);
2568 } else {
2569 __ move(AT, value);
2570 __ gsswx(AT, as_Register(base), T9, 0);
2571 }
2572 }
2573 } else {
2574 if( Assembler::is_simm16(disp) ) {
2575 if (value == 0) {
2576 __ sw(R0, as_Register(base), disp);
2577 } else {
2578 __ move(AT, value);
2579 __ sw(AT, as_Register(base), disp);
2580 }
2581 } else {
2582 if (value == 0) {
2583 __ move(T9, disp);
2584 __ daddu(AT, as_Register(base), T9);
2585 __ sw(R0, AT, 0);
2586 } else {
2587 __ move(T9, disp);
2588 __ daddu(AT, as_Register(base), T9);
2589 __ move(T9, value);
2590 __ sw(T9, AT, 0);
2591 }
2592 }
2593 }
2594 }
2595 %}
2597 enc_class load_N_enc (mRegN dst, memory mem) %{
2598 MacroAssembler _masm(&cbuf);
2599 int dst = $dst$$reg;
2600 int base = $mem$$base;
2601 int index = $mem$$index;
2602 int scale = $mem$$scale;
2603 int disp = $mem$$disp;
2604 relocInfo::relocType disp_reloc = $mem->disp_reloc();
2605 assert(disp_reloc == relocInfo::none, "cannot have disp");
2607 if( index != 0 ) {
2608 if (scale == 0) {
2609 __ daddu(AT, as_Register(base), as_Register(index));
2610 } else {
2611 __ dsll(AT, as_Register(index), scale);
2612 __ daddu(AT, as_Register(base), AT);
2613 }
2614 if( Assembler::is_simm16(disp) ) {
2615 __ lwu(as_Register(dst), AT, disp);
2616 } else {
2617 __ set64(T9, disp);
2618 __ daddu(AT, AT, T9);
2619 __ lwu(as_Register(dst), AT, 0);
2620 }
2621 } else {
2622 if( Assembler::is_simm16(disp) ) {
2623 __ lwu(as_Register(dst), as_Register(base), disp);
2624 } else {
2625 __ set64(T9, disp);
2626 __ daddu(AT, as_Register(base), T9);
2627 __ lwu(as_Register(dst), AT, 0);
2628 }
2629 }
2631 %}
2634 enc_class load_P_enc (mRegP dst, memory mem) %{
2635 MacroAssembler _masm(&cbuf);
2636 int dst = $dst$$reg;
2637 int base = $mem$$base;
2638 int index = $mem$$index;
2639 int scale = $mem$$scale;
2640 int disp = $mem$$disp;
2641 relocInfo::relocType disp_reloc = $mem->disp_reloc();
2642 assert(disp_reloc == relocInfo::none, "cannot have disp");
2644 if( index != 0 ) {
2645 if ( UseLoongsonISA ) {
2646 if ( Assembler::is_simm(disp, 8) ) {
2647 if ( scale != 0 ) {
2648 __ dsll(AT, as_Register(index), scale);
2649 __ gsldx(as_Register(dst), as_Register(base), AT, disp);
2650 } else {
2651 __ gsldx(as_Register(dst), as_Register(base), as_Register(index), disp);
2652 }
2653 } else if ( Assembler::is_simm16(disp) ){
2654 if ( scale != 0 ) {
2655 __ dsll(AT, as_Register(index), scale);
2656 __ daddu(AT, AT, as_Register(base));
2657 } else {
2658 __ daddu(AT, as_Register(index), as_Register(base));
2659 }
2660 __ ld(as_Register(dst), AT, disp);
2661 } else {
2662 if ( scale != 0 ) {
2663 __ dsll(AT, as_Register(index), scale);
2664 __ move(T9, disp);
2665 __ daddu(AT, AT, T9);
2666 } else {
2667 __ move(T9, disp);
2668 __ daddu(AT, as_Register(index), T9);
2669 }
2670 __ gsldx(as_Register(dst), as_Register(base), AT, 0);
2671 }
2672 } else { //not use loongson isa
2673 if (scale == 0) {
2674 __ daddu(AT, as_Register(base), as_Register(index));
2675 } else {
2676 __ dsll(AT, as_Register(index), scale);
2677 __ daddu(AT, as_Register(base), AT);
2678 }
2679 if( Assembler::is_simm16(disp) ) {
2680 __ ld(as_Register(dst), AT, disp);
2681 } else {
2682 __ set64(T9, disp);
2683 __ daddu(AT, AT, T9);
2684 __ ld(as_Register(dst), AT, 0);
2685 }
2686 }
2687 } else {
2688 if ( UseLoongsonISA ) {
2689 if ( Assembler::is_simm16(disp) ){
2690 __ ld(as_Register(dst), as_Register(base), disp);
2691 } else {
2692 __ set64(T9, disp);
2693 __ gsldx(as_Register(dst), as_Register(base), T9, 0);
2694 }
2695 } else { //not use loongson isa
2696 if( Assembler::is_simm16(disp) ) {
2697 __ ld(as_Register(dst), as_Register(base), disp);
2698 } else {
2699 __ set64(T9, disp);
2700 __ daddu(AT, as_Register(base), T9);
2701 __ ld(as_Register(dst), AT, 0);
2702 }
2703 }
2704 }
2705 // if( disp_reloc != relocInfo::none) __ ld(as_Register(dst), as_Register(dst), 0);
2706 %}
2708 enc_class store_P_reg_enc (memory mem, mRegP src) %{
2709 MacroAssembler _masm(&cbuf);
2710 int src = $src$$reg;
2711 int base = $mem$$base;
2712 int index = $mem$$index;
2713 int scale = $mem$$scale;
2714 int disp = $mem$$disp;
2716 if( index != 0 ) {
2717 if ( UseLoongsonISA ){
2718 if ( Assembler::is_simm(disp, 8) ) {
2719 if ( scale == 0 ) {
2720 __ gssdx(as_Register(src), as_Register(base), as_Register(index), disp);
2721 } else {
2722 __ dsll(AT, as_Register(index), scale);
2723 __ gssdx(as_Register(src), as_Register(base), AT, disp);
2724 }
2725 } else if ( Assembler::is_simm16(disp) ) {
2726 if ( scale == 0 ) {
2727 __ daddu(AT, as_Register(base), as_Register(index));
2728 } else {
2729 __ dsll(AT, as_Register(index), scale);
2730 __ daddu(AT, as_Register(base), AT);
2731 }
2732 __ sd(as_Register(src), AT, disp);
2733 } else {
2734 if ( scale == 0 ) {
2735 __ move(T9, disp);
2736 __ daddu(AT, as_Register(index), T9);
2737 } else {
2738 __ dsll(AT, as_Register(index), scale);
2739 __ move(T9, disp);
2740 __ daddu(AT, AT, T9);
2741 }
2742 __ gssdx(as_Register(src), as_Register(base), AT, 0);
2743 }
2744 } else { //not use loongson isa
2745 if (scale == 0) {
2746 __ daddu(AT, as_Register(base), as_Register(index));
2747 } else {
2748 __ dsll(AT, as_Register(index), scale);
2749 __ daddu(AT, as_Register(base), AT);
2750 }
2751 if( Assembler::is_simm16(disp) ) {
2752 __ sd(as_Register(src), AT, disp);
2753 } else {
2754 __ move(T9, disp);
2755 __ daddu(AT, AT, T9);
2756 __ sd(as_Register(src), AT, 0);
2757 }
2758 }
2759 } else {
2760 if ( UseLoongsonISA ) {
2761 if ( Assembler::is_simm16(disp) ) {
2762 __ sd(as_Register(src), as_Register(base), disp);
2763 } else {
2764 __ move(T9, disp);
2765 __ gssdx(as_Register(src), as_Register(base), T9, 0);
2766 }
2767 } else {
2768 if( Assembler::is_simm16(disp) ) {
2769 __ sd(as_Register(src), as_Register(base), disp);
2770 } else {
2771 __ move(T9, disp);
2772 __ daddu(AT, as_Register(base), T9);
2773 __ sd(as_Register(src), AT, 0);
2774 }
2775 }
2776 }
2777 %}
2779 enc_class store_N_reg_enc (memory mem, mRegN src) %{
2780 MacroAssembler _masm(&cbuf);
2781 int src = $src$$reg;
2782 int base = $mem$$base;
2783 int index = $mem$$index;
2784 int scale = $mem$$scale;
2785 int disp = $mem$$disp;
2787 if( index != 0 ) {
2788 if ( UseLoongsonISA ){
2789 if ( Assembler::is_simm(disp, 8) ) {
2790 if ( scale == 0 ) {
2791 __ gsswx(as_Register(src), as_Register(base), as_Register(index), disp);
2792 } else {
2793 __ dsll(AT, as_Register(index), scale);
2794 __ gsswx(as_Register(src), as_Register(base), AT, disp);
2795 }
2796 } else if ( Assembler::is_simm16(disp) ) {
2797 if ( scale == 0 ) {
2798 __ daddu(AT, as_Register(base), as_Register(index));
2799 } else {
2800 __ dsll(AT, as_Register(index), scale);
2801 __ daddu(AT, as_Register(base), AT);
2802 }
2803 __ sw(as_Register(src), AT, disp);
2804 } else {
2805 if ( scale == 0 ) {
2806 __ move(T9, disp);
2807 __ daddu(AT, as_Register(index), T9);
2808 } else {
2809 __ dsll(AT, as_Register(index), scale);
2810 __ move(T9, disp);
2811 __ daddu(AT, AT, T9);
2812 }
2813 __ gsswx(as_Register(src), as_Register(base), AT, 0);
2814 }
2815 } else { //not use loongson isa
2816 if (scale == 0) {
2817 __ daddu(AT, as_Register(base), as_Register(index));
2818 } else {
2819 __ dsll(AT, as_Register(index), scale);
2820 __ daddu(AT, as_Register(base), AT);
2821 }
2822 if( Assembler::is_simm16(disp) ) {
2823 __ sw(as_Register(src), AT, disp);
2824 } else {
2825 __ move(T9, disp);
2826 __ daddu(AT, AT, T9);
2827 __ sw(as_Register(src), AT, 0);
2828 }
2829 }
2830 } else {
2831 if ( UseLoongsonISA ) {
2832 if ( Assembler::is_simm16(disp) ) {
2833 __ sw(as_Register(src), as_Register(base), disp);
2834 } else {
2835 __ move(T9, disp);
2836 __ gsswx(as_Register(src), as_Register(base), T9, 0);
2837 }
2838 } else {
2839 if( Assembler::is_simm16(disp) ) {
2840 __ sw(as_Register(src), as_Register(base), disp);
2841 } else {
2842 __ move(T9, disp);
2843 __ daddu(AT, as_Register(base), T9);
2844 __ sw(as_Register(src), AT, 0);
2845 }
2846 }
2847 }
2848 %}
2850 enc_class store_P_immP0_enc (memory mem) %{
2851 MacroAssembler _masm(&cbuf);
2852 int base = $mem$$base;
2853 int index = $mem$$index;
2854 int scale = $mem$$scale;
2855 int disp = $mem$$disp;
2857 if( index != 0 ) {
2858 if (scale == 0) {
2859 if( Assembler::is_simm16(disp) ) {
2860 if (UseLoongsonISA && Assembler::is_simm(disp, 8)) {
2861 __ gssdx(R0, as_Register(base), as_Register(index), disp);
2862 } else {
2863 __ daddu(AT, as_Register(base), as_Register(index));
2864 __ sd(R0, AT, disp);
2865 }
2866 } else {
2867 __ daddu(AT, as_Register(base), as_Register(index));
2868 __ move(T9, disp);
2869 if(UseLoongsonISA) {
2870 __ gssdx(R0, AT, T9, 0);
2871 } else {
2872 __ daddu(AT, AT, T9);
2873 __ sd(R0, AT, 0);
2874 }
2875 }
2876 } else {
2877 __ dsll(AT, as_Register(index), scale);
2878 if( Assembler::is_simm16(disp) ) {
2879 if (UseLoongsonISA && Assembler::is_simm(disp, 8)) {
2880 __ gssdx(R0, as_Register(base), AT, disp);
2881 } else {
2882 __ daddu(AT, as_Register(base), AT);
2883 __ sd(R0, AT, disp);
2884 }
2885 } else {
2886 __ daddu(AT, as_Register(base), AT);
2887 __ move(T9, disp);
2888 if (UseLoongsonISA) {
2889 __ gssdx(R0, AT, T9, 0);
2890 } else {
2891 __ daddu(AT, AT, T9);
2892 __ sd(R0, AT, 0);
2893 }
2894 }
2895 }
2896 } else {
2897 if( Assembler::is_simm16(disp) ) {
2898 __ sd(R0, as_Register(base), disp);
2899 } else {
2900 __ move(T9, disp);
2901 if (UseLoongsonISA) {
2902 __ gssdx(R0, as_Register(base), T9, 0);
2903 } else {
2904 __ daddu(AT, as_Register(base), T9);
2905 __ sd(R0, AT, 0);
2906 }
2907 }
2908 }
2909 %}
2911 enc_class store_P_immP_enc (memory mem, immP31 src) %{
2912 MacroAssembler _masm(&cbuf);
2913 int base = $mem$$base;
2914 int index = $mem$$index;
2915 int scale = $mem$$scale;
2916 int disp = $mem$$disp;
2917 long value = $src$$constant;
2919 if( index != 0 ) {
2920 if (scale == 0) {
2921 __ daddu(AT, as_Register(base), as_Register(index));
2922 } else {
2923 __ dsll(AT, as_Register(index), scale);
2924 __ daddu(AT, as_Register(base), AT);
2925 }
2926 if( Assembler::is_simm16(disp) ) {
2927 if (value == 0) {
2928 __ sd(R0, AT, disp);
2929 } else {
2930 __ move(T9, value);
2931 __ sd(T9, AT, disp);
2932 }
2933 } else {
2934 if (value == 0) {
2935 __ move(T9, disp);
2936 __ daddu(AT, AT, T9);
2937 __ sd(R0, AT, 0);
2938 } else {
2939 __ move(T9, disp);
2940 __ daddu(AT, AT, T9);
2941 __ move(T9, value);
2942 __ sd(T9, AT, 0);
2943 }
2944 }
2945 } else {
2946 if( Assembler::is_simm16(disp) ) {
2947 if (value == 0) {
2948 __ sd(R0, as_Register(base), disp);
2949 } else {
2950 __ move(AT, value);
2951 __ sd(AT, as_Register(base), disp);
2952 }
2953 } else {
2954 if (value == 0) {
2955 __ move(T9, disp);
2956 __ daddu(AT, as_Register(base), T9);
2957 __ sd(R0, AT, 0);
2958 } else {
2959 __ move(T9, disp);
2960 __ daddu(AT, as_Register(base), T9);
2961 __ move(T9, value);
2962 __ sd(T9, AT, 0);
2963 }
2964 }
2965 }
2966 %}
2968 enc_class storeImmN0_enc(memory mem, ImmN0 src) %{
2969 MacroAssembler _masm(&cbuf);
2970 int base = $mem$$base;
2971 int index = $mem$$index;
2972 int scale = $mem$$scale;
2973 int disp = $mem$$disp;
2975 if(index!=0){
2976 if (scale == 0) {
2977 __ daddu(AT, as_Register(base), as_Register(index));
2978 } else {
2979 __ dsll(AT, as_Register(index), scale);
2980 __ daddu(AT, as_Register(base), AT);
2981 }
2983 if( Assembler::is_simm16(disp) ) {
2984 __ sw(R0, AT, disp);
2985 } else {
2986 __ move(T9, disp);
2987 __ daddu(AT, AT, T9);
2988 __ sw(R0, AT, 0);
2989 }
2990 }
2991 else {
2992 if( Assembler::is_simm16(disp) ) {
2993 __ sw(R0, as_Register(base), disp);
2994 } else {
2995 __ move(T9, disp);
2996 __ daddu(AT, as_Register(base), T9);
2997 __ sw(R0, AT, 0);
2998 }
2999 }
3000 %}
3002 enc_class storeImmN_enc (memory mem, immN src) %{
3003 MacroAssembler _masm(&cbuf);
3004 int base = $mem$$base;
3005 int index = $mem$$index;
3006 int scale = $mem$$scale;
3007 int disp = $mem$$disp;
3008 long * value = (long *)$src$$constant;
3010 if (value == NULL) {
3011 guarantee(Assembler::is_simm16(disp), "FIXME: disp is not simm16!");
3012 if (index == 0) {
3013 __ sw(R0, as_Register(base), disp);
3014 } else {
3015 if (scale == 0) {
3016 __ daddu(AT, as_Register(base), as_Register(index));
3017 } else {
3018 __ dsll(AT, as_Register(index), scale);
3019 __ daddu(AT, as_Register(base), AT);
3020 }
3021 __ sw(R0, AT, disp);
3022 }
3024 return;
3025 }
3027 int oop_index = __ oop_recorder()->find_index((jobject)value);
3028 RelocationHolder rspec = oop_Relocation::spec(oop_index);
3030 guarantee(scale == 0, "FIXME: scale is not zero !");
3031 guarantee(value != 0, "FIXME: value is zero !");
3033 if (index != 0) {
3034 if (scale == 0) {
3035 __ daddu(AT, as_Register(base), as_Register(index));
3036 } else {
3037 __ dsll(AT, as_Register(index), scale);
3038 __ daddu(AT, as_Register(base), AT);
3039 }
3040 if( Assembler::is_simm16(disp) ) {
3041 if(rspec.type() != relocInfo::none) {
3042 __ relocate(rspec, Assembler::narrow_oop_operand);
3043 __ patchable_set48(T9, oop_index);
3044 } else {
3045 __ set64(T9, oop_index);
3046 }
3047 __ sw(T9, AT, disp);
3048 } else {
3049 __ move(T9, disp);
3050 __ addu(AT, AT, T9);
3052 if(rspec.type() != relocInfo::none) {
3053 __ relocate(rspec, Assembler::narrow_oop_operand);
3054 __ patchable_set48(T9, oop_index);
3055 } else {
3056 __ set64(T9, oop_index);
3057 }
3058 __ sw(T9, AT, 0);
3059 }
3060 }
3061 else {
3062 if( Assembler::is_simm16(disp) ) {
3063 if($src->constant_reloc() != relocInfo::none) {
3064 __ relocate(rspec, Assembler::narrow_oop_operand);
3065 __ patchable_set48(T9, oop_index);
3066 } else {
3067 __ set64(T9, oop_index);
3068 }
3069 __ sw(T9, as_Register(base), disp);
3070 } else {
3071 __ move(T9, disp);
3072 __ daddu(AT, as_Register(base), T9);
3074 if($src->constant_reloc() != relocInfo::none){
3075 __ relocate(rspec, Assembler::narrow_oop_operand);
3076 __ patchable_set48(T9, oop_index);
3077 } else {
3078 __ set64(T9, oop_index);
3079 }
3080 __ sw(T9, AT, 0);
3081 }
3082 }
3083 %}
3085 enc_class storeImmNKlass_enc (memory mem, immNKlass src) %{
3086 MacroAssembler _masm(&cbuf);
3088 assert (UseCompressedOops, "should only be used for compressed headers");
3089 assert (__ oop_recorder() != NULL, "this assembler needs an OopRecorder");
3091 int base = $mem$$base;
3092 int index = $mem$$index;
3093 int scale = $mem$$scale;
3094 int disp = $mem$$disp;
3095 long value = $src$$constant;
3097 int klass_index = __ oop_recorder()->find_index((Klass*)value);
3098 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
3099 long narrowp = Klass::encode_klass((Klass*)value);
3101 if(index!=0){
3102 if (scale == 0) {
3103 __ daddu(AT, as_Register(base), as_Register(index));
3104 } else {
3105 __ dsll(AT, as_Register(index), scale);
3106 __ daddu(AT, as_Register(base), AT);
3107 }
3109 if( Assembler::is_simm16(disp) ) {
3110 if(rspec.type() != relocInfo::none){
3111 __ relocate(rspec, Assembler::narrow_oop_operand);
3112 __ patchable_set48(T9, narrowp);
3113 } else {
3114 __ set64(T9, narrowp);
3115 }
3116 __ sw(T9, AT, disp);
3117 } else {
3118 __ move(T9, disp);
3119 __ daddu(AT, AT, T9);
3121 if(rspec.type() != relocInfo::none){
3122 __ relocate(rspec, Assembler::narrow_oop_operand);
3123 __ patchable_set48(T9, narrowp);
3124 } else {
3125 __ set64(T9, narrowp);
3126 }
3128 __ sw(T9, AT, 0);
3129 }
3130 } else {
3131 if( Assembler::is_simm16(disp) ) {
3132 if(rspec.type() != relocInfo::none){
3133 __ relocate(rspec, Assembler::narrow_oop_operand);
3134 __ patchable_set48(T9, narrowp);
3135 }
3136 else {
3137 __ set64(T9, narrowp);
3138 }
3139 __ sw(T9, as_Register(base), disp);
3140 } else {
3141 __ move(T9, disp);
3142 __ daddu(AT, as_Register(base), T9);
3144 if(rspec.type() != relocInfo::none){
3145 __ relocate(rspec, Assembler::narrow_oop_operand);
3146 __ patchable_set48(T9, narrowp);
3147 } else {
3148 __ set64(T9, narrowp);
3149 }
3150 __ sw(T9, AT, 0);
3151 }
3152 }
3153 %}
3155 enc_class load_L_enc (mRegL dst, memory mem) %{
3156 MacroAssembler _masm(&cbuf);
3157 int base = $mem$$base;
3158 int index = $mem$$index;
3159 int scale = $mem$$scale;
3160 int disp = $mem$$disp;
3161 Register dst_reg = as_Register($dst$$reg);
3163 // For implicit null check
3164 __ lb(AT, as_Register(base), 0);
3166 if( index != 0 ) {
3167 if (scale == 0) {
3168 __ daddu(AT, as_Register(base), as_Register(index));
3169 } else {
3170 __ dsll(AT, as_Register(index), scale);
3171 __ daddu(AT, as_Register(base), AT);
3172 }
3173 if( Assembler::is_simm16(disp) ) {
3174 __ ld(dst_reg, AT, disp);
3175 } else {
3176 __ move(T9, disp);
3177 __ daddu(AT, AT, T9);
3178 __ ld(dst_reg, AT, 0);
3179 }
3180 } else {
3181 if( Assembler::is_simm16(disp) ) {
3182 __ ld(dst_reg, as_Register(base), disp);
3183 } else {
3184 __ move(T9, disp);
3185 __ daddu(AT, as_Register(base), T9);
3186 __ ld(dst_reg, AT, 0);
3187 }
3188 }
3189 %}
3191 enc_class store_L_reg_enc (memory mem, mRegL src) %{
3192 MacroAssembler _masm(&cbuf);
3193 int base = $mem$$base;
3194 int index = $mem$$index;
3195 int scale = $mem$$scale;
3196 int disp = $mem$$disp;
3197 Register src_reg = as_Register($src$$reg);
3199 if( index != 0 ) {
3200 if (scale == 0) {
3201 __ daddu(AT, as_Register(base), as_Register(index));
3202 } else {
3203 __ dsll(AT, as_Register(index), scale);
3204 __ daddu(AT, as_Register(base), AT);
3205 }
3206 if( Assembler::is_simm16(disp) ) {
3207 __ sd(src_reg, AT, disp);
3208 } else {
3209 __ move(T9, disp);
3210 __ daddu(AT, AT, T9);
3211 __ sd(src_reg, AT, 0);
3212 }
3213 } else {
3214 if( Assembler::is_simm16(disp) ) {
3215 __ sd(src_reg, as_Register(base), disp);
3216 } else {
3217 __ move(T9, disp);
3218 __ daddu(AT, as_Register(base), T9);
3219 __ sd(src_reg, AT, 0);
3220 }
3221 }
3222 %}
3224 enc_class store_L_immL0_enc (memory mem, immL0 src) %{
3225 MacroAssembler _masm(&cbuf);
3226 int base = $mem$$base;
3227 int index = $mem$$index;
3228 int scale = $mem$$scale;
3229 int disp = $mem$$disp;
3231 if( index != 0 ) {
3232 // For implicit null check
3233 __ lb(AT, as_Register(base), 0);
3235 if (scale == 0) {
3236 __ daddu(AT, as_Register(base), as_Register(index));
3237 } else {
3238 __ dsll(AT, as_Register(index), scale);
3239 __ daddu(AT, as_Register(base), AT);
3240 }
3241 if( Assembler::is_simm16(disp) ) {
3242 __ sd(R0, AT, disp);
3243 } else {
3244 __ move(T9, disp);
3245 __ addu(AT, AT, T9);
3246 __ sd(R0, AT, 0);
3247 }
3248 } else {
3249 if( Assembler::is_simm16(disp) ) {
3250 __ sd(R0, as_Register(base), disp);
3251 } else {
3252 __ move(T9, disp);
3253 __ addu(AT, as_Register(base), T9);
3254 __ sd(R0, AT, 0);
3255 }
3256 }
3257 %}
3259 enc_class store_L_immL_enc (memory mem, immL src) %{
3260 MacroAssembler _masm(&cbuf);
3261 int base = $mem$$base;
3262 int index = $mem$$index;
3263 int scale = $mem$$scale;
3264 int disp = $mem$$disp;
3265 long imm = $src$$constant;
3267 if( index != 0 ) {
3268 if (scale == 0) {
3269 __ daddu(AT, as_Register(base), as_Register(index));
3270 } else {
3271 __ dsll(AT, as_Register(index), scale);
3272 __ daddu(AT, as_Register(base), AT);
3273 }
3274 if( Assembler::is_simm16(disp) ) {
3275 __ set64(T9, imm);
3276 __ sd(T9, AT, disp);
3277 } else {
3278 __ move(T9, disp);
3279 __ addu(AT, AT, T9);
3280 __ set64(T9, imm);
3281 __ sd(T9, AT, 0);
3282 }
3283 } else {
3284 if( Assembler::is_simm16(disp) ) {
3285 __ move(AT, as_Register(base));
3286 __ set64(T9, imm);
3287 __ sd(T9, AT, disp);
3288 } else {
3289 __ move(T9, disp);
3290 __ addu(AT, as_Register(base), T9);
3291 __ set64(T9, imm);
3292 __ sd(T9, AT, 0);
3293 }
3294 }
3295 %}
3297 enc_class load_F_enc (regF dst, memory mem) %{
3298 MacroAssembler _masm(&cbuf);
3299 int base = $mem$$base;
3300 int index = $mem$$index;
3301 int scale = $mem$$scale;
3302 int disp = $mem$$disp;
3303 FloatRegister dst = $dst$$FloatRegister;
3305 if( index != 0 ) {
3306 if( Assembler::is_simm16(disp) ) {
3307 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3308 if (scale == 0) {
3309 __ gslwxc1(dst, as_Register(base), as_Register(index), disp);
3310 } else {
3311 __ dsll(AT, as_Register(index), scale);
3312 __ gslwxc1(dst, as_Register(base), AT, disp);
3313 }
3314 } else {
3315 if (scale == 0) {
3316 __ daddu(AT, as_Register(base), as_Register(index));
3317 } else {
3318 __ dsll(AT, as_Register(index), scale);
3319 __ daddu(AT, as_Register(base), AT);
3320 }
3321 __ lwc1(dst, AT, disp);
3322 }
3323 } else {
3324 if (scale == 0) {
3325 __ daddu(AT, as_Register(base), as_Register(index));
3326 } else {
3327 __ dsll(AT, as_Register(index), scale);
3328 __ daddu(AT, as_Register(base), AT);
3329 }
3330 __ move(T9, disp);
3331 if( UseLoongsonISA ) {
3332 __ gslwxc1(dst, AT, T9, 0);
3333 } else {
3334 __ daddu(AT, AT, T9);
3335 __ lwc1(dst, AT, 0);
3336 }
3337 }
3338 } else {
3339 if( Assembler::is_simm16(disp) ) {
3340 __ lwc1(dst, as_Register(base), disp);
3341 } else {
3342 __ move(T9, disp);
3343 if( UseLoongsonISA ) {
3344 __ gslwxc1(dst, as_Register(base), T9, 0);
3345 } else {
3346 __ daddu(AT, as_Register(base), T9);
3347 __ lwc1(dst, AT, 0);
3348 }
3349 }
3350 }
3351 %}
3353 enc_class store_F_reg_enc (memory mem, regF src) %{
3354 MacroAssembler _masm(&cbuf);
3355 int base = $mem$$base;
3356 int index = $mem$$index;
3357 int scale = $mem$$scale;
3358 int disp = $mem$$disp;
3359 FloatRegister src = $src$$FloatRegister;
3361 if( index != 0 ) {
3362 if( Assembler::is_simm16(disp) ) {
3363 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3364 if (scale == 0) {
3365 __ gsswxc1(src, as_Register(base), as_Register(index), disp);
3366 } else {
3367 __ dsll(AT, as_Register(index), scale);
3368 __ gsswxc1(src, as_Register(base), AT, disp);
3369 }
3370 } else {
3371 if (scale == 0) {
3372 __ daddu(AT, as_Register(base), as_Register(index));
3373 } else {
3374 __ dsll(AT, as_Register(index), scale);
3375 __ daddu(AT, as_Register(base), AT);
3376 }
3377 __ swc1(src, AT, disp);
3378 }
3379 } else {
3380 if (scale == 0) {
3381 __ daddu(AT, as_Register(base), as_Register(index));
3382 } else {
3383 __ dsll(AT, as_Register(index), scale);
3384 __ daddu(AT, as_Register(base), AT);
3385 }
3386 __ move(T9, disp);
3387 if( UseLoongsonISA ) {
3388 __ gsswxc1(src, AT, T9, 0);
3389 } else {
3390 __ daddu(AT, AT, T9);
3391 __ swc1(src, AT, 0);
3392 }
3393 }
3394 } else {
3395 if( Assembler::is_simm16(disp) ) {
3396 __ swc1(src, as_Register(base), disp);
3397 } else {
3398 __ move(T9, disp);
3399 if( UseLoongsonISA ) {
3400 __ gslwxc1(src, as_Register(base), T9, 0);
3401 } else {
3402 __ daddu(AT, as_Register(base), T9);
3403 __ swc1(src, AT, 0);
3404 }
3405 }
3406 }
3407 %}
3409 enc_class load_D_enc (regD dst, memory mem) %{
3410 MacroAssembler _masm(&cbuf);
3411 int base = $mem$$base;
3412 int index = $mem$$index;
3413 int scale = $mem$$scale;
3414 int disp = $mem$$disp;
3415 FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3417 if( index != 0 ) {
3418 if( Assembler::is_simm16(disp) ) {
3419 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3420 if (scale == 0) {
3421 __ gsldxc1(dst_reg, as_Register(base), as_Register(index), disp);
3422 } else {
3423 __ dsll(AT, as_Register(index), scale);
3424 __ gsldxc1(dst_reg, as_Register(base), AT, disp);
3425 }
3426 } else {
3427 if (scale == 0) {
3428 __ daddu(AT, as_Register(base), as_Register(index));
3429 } else {
3430 __ dsll(AT, as_Register(index), scale);
3431 __ daddu(AT, as_Register(base), AT);
3432 }
3433 __ ldc1(dst_reg, AT, disp);
3434 }
3435 } else {
3436 if (scale == 0) {
3437 __ daddu(AT, as_Register(base), as_Register(index));
3438 } else {
3439 __ dsll(AT, as_Register(index), scale);
3440 __ daddu(AT, as_Register(base), AT);
3441 }
3442 __ move(T9, disp);
3443 if( UseLoongsonISA ) {
3444 __ gsldxc1(dst_reg, AT, T9, 0);
3445 } else {
3446 __ addu(AT, AT, T9);
3447 __ ldc1(dst_reg, AT, 0);
3448 }
3449 }
3450 } else {
3451 if( Assembler::is_simm16(disp) ) {
3452 __ ldc1(dst_reg, as_Register(base), disp);
3453 } else {
3454 __ move(T9, disp);
3455 if( UseLoongsonISA ) {
3456 __ gsldxc1(dst_reg, as_Register(base), T9, 0);
3457 } else {
3458 __ addu(AT, as_Register(base), T9);
3459 __ ldc1(dst_reg, AT, 0);
3460 }
3461 }
3462 }
3463 %}
3465 enc_class store_D_reg_enc (memory mem, regD src) %{
3466 MacroAssembler _masm(&cbuf);
3467 int base = $mem$$base;
3468 int index = $mem$$index;
3469 int scale = $mem$$scale;
3470 int disp = $mem$$disp;
3471 FloatRegister src_reg = as_FloatRegister($src$$reg);
3473 if( index != 0 ) {
3474 if( Assembler::is_simm16(disp) ) {
3475 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3476 if (scale == 0) {
3477 __ gssdxc1(src_reg, as_Register(base), as_Register(index), disp);
3478 } else {
3479 __ dsll(AT, as_Register(index), scale);
3480 __ gssdxc1(src_reg, as_Register(base), AT, disp);
3481 }
3482 } else {
3483 if (scale == 0) {
3484 __ daddu(AT, as_Register(base), as_Register(index));
3485 } else {
3486 __ dsll(AT, as_Register(index), scale);
3487 __ daddu(AT, as_Register(base), AT);
3488 }
3489 __ sdc1(src_reg, AT, disp);
3490 }
3491 } else {
3492 if (scale == 0) {
3493 __ daddu(AT, as_Register(base), as_Register(index));
3494 } else {
3495 __ dsll(AT, as_Register(index), scale);
3496 __ daddu(AT, as_Register(base), AT);
3497 }
3498 __ move(T9, disp);
3499 if( UseLoongsonISA ) {
3500 __ gssdxc1(src_reg, AT, T9, 0);
3501 } else {
3502 __ addu(AT, AT, T9);
3503 __ sdc1(src_reg, AT, 0);
3504 }
3505 }
3506 } else {
3507 if( Assembler::is_simm16(disp) ) {
3508 __ sdc1(src_reg, as_Register(base), disp);
3509 } else {
3510 __ move(T9, disp);
3511 if( UseLoongsonISA ) {
3512 __ gssdxc1(src_reg, as_Register(base), T9, 0);
3513 } else {
3514 __ addu(AT, as_Register(base), T9);
3515 __ sdc1(src_reg, AT, 0);
3516 }
3517 }
3518 }
3519 %}
3521 enc_class Java_To_Runtime (method meth) %{ // CALL Java_To_Runtime, Java_To_Runtime_Leaf
3522 MacroAssembler _masm(&cbuf);
3523 // This is the instruction starting address for relocation info.
3524 __ block_comment("Java_To_Runtime");
3525 cbuf.set_insts_mark();
3526 __ relocate(relocInfo::runtime_call_type);
3528 __ patchable_call((address)$meth$$method);
3529 %}
3531 enc_class Java_Static_Call (method meth) %{ // JAVA STATIC CALL
3532 // CALL to fixup routine. Fixup routine uses ScopeDesc info to determine
3533 // who we intended to call.
3534 MacroAssembler _masm(&cbuf);
3535 cbuf.set_insts_mark();
3537 if ( !_method ) {
3538 __ relocate(relocInfo::runtime_call_type);
3539 } else if(_optimized_virtual) {
3540 __ relocate(relocInfo::opt_virtual_call_type);
3541 } else {
3542 __ relocate(relocInfo::static_call_type);
3543 }
3545 __ patchable_call((address)($meth$$method));
3546 if( _method ) { // Emit stub for static call
3547 emit_java_to_interp(cbuf);
3548 }
3549 %}
3552 /*
3553 * [Ref: LIR_Assembler::ic_call() ]
3554 */
3555 enc_class Java_Dynamic_Call (method meth) %{ // JAVA DYNAMIC CALL
3556 MacroAssembler _masm(&cbuf);
3557 __ block_comment("Java_Dynamic_Call");
3558 __ ic_call((address)$meth$$method);
3559 %}
3562 enc_class Set_Flags_After_Fast_Lock_Unlock(FlagsReg cr) %{
3563 Register flags = $cr$$Register;
3564 Label L;
3566 MacroAssembler _masm(&cbuf);
3568 __ addu(flags, R0, R0);
3569 __ beq(AT, R0, L);
3570 __ delayed()->nop();
3571 __ move(flags, 0xFFFFFFFF);
3572 __ bind(L);
3573 %}
3575 enc_class enc_PartialSubtypeCheck(mRegP result, mRegP sub, mRegP super, mRegI tmp) %{
3576 Register result = $result$$Register;
3577 Register sub = $sub$$Register;
3578 Register super = $super$$Register;
3579 Register length = $tmp$$Register;
3580 Register tmp = T9;
3581 Label miss;
3583 /* 2012/9/28 Jin: result may be the same as sub
3584 * 47c B40: # B21 B41 <- B20 Freq: 0.155379
3585 * 47c partialSubtypeCheck result=S1, sub=S1, super=S3, length=S0
3586 * 4bc mov S2, NULL #@loadConP
3587 * 4c0 beq S1, S2, B21 #@branchConP P=0.999999 C=-1.000000
3588 */
3589 MacroAssembler _masm(&cbuf);
3590 Label done;
3591 __ check_klass_subtype_slow_path(sub, super, length, tmp,
3592 NULL, &miss,
3593 /*set_cond_codes:*/ true);
3594 /* 2013/7/22 Jin: Refer to X86_64's RDI */
3595 __ move(result, 0);
3596 __ b(done);
3597 __ nop();
3599 __ bind(miss);
3600 __ move(result, 1);
3601 __ bind(done);
3602 %}
3604 %}
3607 //---------MIPS FRAME--------------------------------------------------------------
3608 // Definition of frame structure and management information.
3609 //
3610 // S T A C K L A Y O U T Allocators stack-slot number
3611 // | (to get allocators register number
3612 // G Owned by | | v add SharedInfo::stack0)
3613 // r CALLER | |
3614 // o | +--------+ pad to even-align allocators stack-slot
3615 // w V | pad0 | numbers; owned by CALLER
3616 // t -----------+--------+----> Matcher::_in_arg_limit, unaligned
3617 // h ^ | in | 5
3618 // | | args | 4 Holes in incoming args owned by SELF
3619 // | | old | | 3
3620 // | | SP-+--------+----> Matcher::_old_SP, even aligned
3621 // v | | ret | 3 return address
3622 // Owned by +--------+
3623 // Self | pad2 | 2 pad to align old SP
3624 // | +--------+ 1
3625 // | | locks | 0
3626 // | +--------+----> SharedInfo::stack0, even aligned
3627 // | | pad1 | 11 pad to align new SP
3628 // | +--------+
3629 // | | | 10
3630 // | | spills | 9 spills
3631 // V | | 8 (pad0 slot for callee)
3632 // -----------+--------+----> Matcher::_out_arg_limit, unaligned
3633 // ^ | out | 7
3634 // | | args | 6 Holes in outgoing args owned by CALLEE
3635 // Owned by new | |
3636 // Callee SP-+--------+----> Matcher::_new_SP, even aligned
3637 // | |
3638 //
3639 // Note 1: Only region 8-11 is determined by the allocator. Region 0-5 is
3640 // known from SELF's arguments and the Java calling convention.
3641 // Region 6-7 is determined per call site.
3642 // Note 2: If the calling convention leaves holes in the incoming argument
3643 // area, those holes are owned by SELF. Holes in the outgoing area
3644 // are owned by the CALLEE. Holes should not be nessecary in the
3645 // incoming area, as the Java calling convention is completely under
3646 // the control of the AD file. Doubles can be sorted and packed to
3647 // avoid holes. Holes in the outgoing arguments may be nessecary for
3648 // varargs C calling conventions.
3649 // Note 3: Region 0-3 is even aligned, with pad2 as needed. Region 3-5 is
3650 // even aligned with pad0 as needed.
3651 // Region 6 is even aligned. Region 6-7 is NOT even aligned;
3652 // region 6-11 is even aligned; it may be padded out more so that
3653 // the region from SP to FP meets the minimum stack alignment.
3654 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
3655 // alignment. Region 11, pad1, may be dynamically extended so that
3656 // SP meets the minimum alignment.
3659 frame %{
3661 stack_direction(TOWARDS_LOW);
3663 // These two registers define part of the calling convention
3664 // between compiled code and the interpreter.
3665 // SEE StartI2CNode::calling_convention & StartC2INode::calling_convention & StartOSRNode::calling_convention
3666 // for more information. by yjl 3/16/2006
3668 inline_cache_reg(T1); // Inline Cache Register
3669 interpreter_method_oop_reg(S3); // Method Oop Register when calling interpreter
3670 /*
3671 inline_cache_reg(T1); // Inline Cache Register or methodOop for I2C
3672 interpreter_arg_ptr_reg(A0); // Argument pointer for I2C adapters
3673 */
3675 // Optional: name the operand used by cisc-spilling to access [stack_pointer + offset]
3676 cisc_spilling_operand_name(indOffset32);
3678 // Number of stack slots consumed by locking an object
3679 // generate Compile::sync_stack_slots
3680 #ifdef _LP64
3681 sync_stack_slots(2);
3682 #else
3683 sync_stack_slots(1);
3684 #endif
3686 frame_pointer(SP);
3688 // Interpreter stores its frame pointer in a register which is
3689 // stored to the stack by I2CAdaptors.
3690 // I2CAdaptors convert from interpreted java to compiled java.
3692 interpreter_frame_pointer(FP);
3694 // generate Matcher::stack_alignment
3695 stack_alignment(StackAlignmentInBytes); //wordSize = sizeof(char*);
3697 // Number of stack slots between incoming argument block and the start of
3698 // a new frame. The PROLOG must add this many slots to the stack. The
3699 // EPILOG must remove this many slots. Intel needs one slot for
3700 // return address.
3701 // generate Matcher::in_preserve_stack_slots
3702 //in_preserve_stack_slots(VerifyStackAtCalls + 2); //Now VerifyStackAtCalls is defined as false ! Leave one stack slot for ra and fp
3703 in_preserve_stack_slots(4); //Now VerifyStackAtCalls is defined as false ! Leave two stack slots for ra and fp
3705 // Number of outgoing stack slots killed above the out_preserve_stack_slots
3706 // for calls to C. Supports the var-args backing area for register parms.
3707 varargs_C_out_slots_killed(0);
3709 // The after-PROLOG location of the return address. Location of
3710 // return address specifies a type (REG or STACK) and a number
3711 // representing the register number (i.e. - use a register name) or
3712 // stack slot.
3713 // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
3714 // Otherwise, it is above the locks and verification slot and alignment word
3715 //return_addr(STACK -1+ round_to(1+VerifyStackAtCalls+Compile::current()->sync()*Compile::current()->sync_stack_slots(),WordsPerLong));
3716 return_addr(REG RA);
3718 // Body of function which returns an integer array locating
3719 // arguments either in registers or in stack slots. Passed an array
3720 // of ideal registers called "sig" and a "length" count. Stack-slot
3721 // offsets are based on outgoing arguments, i.e. a CALLER setting up
3722 // arguments for a CALLEE. Incoming stack arguments are
3723 // automatically biased by the preserve_stack_slots field above.
3726 // will generated to Matcher::calling_convention(OptoRegPair *sig, uint length, bool is_outgoing)
3727 // StartNode::calling_convention call this. by yjl 3/16/2006
3728 calling_convention %{
3729 SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
3730 %}
3735 // Body of function which returns an integer array locating
3736 // arguments either in registers or in stack slots. Passed an array
3737 // of ideal registers called "sig" and a "length" count. Stack-slot
3738 // offsets are based on outgoing arguments, i.e. a CALLER setting up
3739 // arguments for a CALLEE. Incoming stack arguments are
3740 // automatically biased by the preserve_stack_slots field above.
3743 // SEE CallRuntimeNode::calling_convention for more information. by yjl 3/16/2006
3744 c_calling_convention %{
3745 (void) SharedRuntime::c_calling_convention(sig_bt, regs, /*regs2=*/NULL, length);
3746 %}
3749 // Location of C & interpreter return values
3750 // register(s) contain(s) return value for Op_StartI2C and Op_StartOSR.
3751 // SEE Matcher::match. by yjl 3/16/2006
3752 c_return_value %{
3753 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3754 /* -- , -- , Op_RegN, Op_RegI, Op_RegP, Op_RegF, Op_RegD, Op_RegL */
3755 static int lo[Op_RegL+1] = { 0, 0, V0_num, V0_num, V0_num, F0_num, F0_num, V0_num };
3756 static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, V0_H_num, OptoReg::Bad, F0_H_num, V0_H_num };
3757 return OptoRegPair(hi[ideal_reg],lo[ideal_reg]);
3758 %}
3760 // Location of return values
3761 // register(s) contain(s) return value for Op_StartC2I and Op_Start.
3762 // SEE Matcher::match. by yjl 3/16/2006
3764 return_value %{
3765 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3766 /* -- , -- , Op_RegN, Op_RegI, Op_RegP, Op_RegF, Op_RegD, Op_RegL */
3767 static int lo[Op_RegL+1] = { 0, 0, V0_num, V0_num, V0_num, F0_num, F0_num, V0_num };
3768 static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, V0_H_num, OptoReg::Bad, F0_H_num, V0_H_num};
3769 return OptoRegPair(hi[ideal_reg],lo[ideal_reg]);
3770 %}
3772 %}
3774 //----------ATTRIBUTES---------------------------------------------------------
3775 //----------Operand Attributes-------------------------------------------------
3776 op_attrib op_cost(0); // Required cost attribute
3778 //----------Instruction Attributes---------------------------------------------
3779 ins_attrib ins_cost(100); // Required cost attribute
3780 ins_attrib ins_size(32); // Required size attribute (in bits)
3781 ins_attrib ins_pc_relative(0); // Required PC Relative flag
3782 ins_attrib ins_short_branch(0); // Required flag: is this instruction a
3783 // non-matching short branch variant of some
3784 // long branch?
3785 ins_attrib ins_alignment(4); // Required alignment attribute (must be a power of 2)
3786 // specifies the alignment that some part of the instruction (not
3787 // necessarily the start) requires. If > 1, a compute_padding()
3788 // function must be provided for the instruction
3790 //----------OPERANDS-----------------------------------------------------------
3791 // Operand definitions must precede instruction definitions for correct parsing
3792 // in the ADLC because operands constitute user defined types which are used in
3793 // instruction definitions.
3795 // Vectors
3796 operand vecD() %{
3797 constraint(ALLOC_IN_RC(dbl_reg));
3798 match(VecD);
3800 format %{ %}
3801 interface(REG_INTER);
3802 %}
3804 // Flags register, used as output of compare instructions
3805 operand FlagsReg() %{
3806 constraint(ALLOC_IN_RC(mips_flags));
3807 match(RegFlags);
3809 format %{ "EFLAGS" %}
3810 interface(REG_INTER);
3811 %}
3813 //----------Simple Operands----------------------------------------------------
3814 //TODO: Should we need to define some more special immediate number ?
3815 // Immediate Operands
3816 // Integer Immediate
3817 operand immI() %{
3818 match(ConI);
3819 //TODO: should not match immI8 here LEE
3820 match(immI8);
3822 op_cost(20);
3823 format %{ %}
3824 interface(CONST_INTER);
3825 %}
3827 // Long Immediate 8-bit
3828 operand immL8()
3829 %{
3830 predicate(-0x80L <= n->get_long() && n->get_long() < 0x80L);
3831 match(ConL);
3833 op_cost(5);
3834 format %{ %}
3835 interface(CONST_INTER);
3836 %}
3838 // Constant for test vs zero
3839 operand immI0() %{
3840 predicate(n->get_int() == 0);
3841 match(ConI);
3843 op_cost(0);
3844 format %{ %}
3845 interface(CONST_INTER);
3846 %}
3848 // Constant for increment
3849 operand immI1() %{
3850 predicate(n->get_int() == 1);
3851 match(ConI);
3853 op_cost(0);
3854 format %{ %}
3855 interface(CONST_INTER);
3856 %}
3858 // Constant for decrement
3859 operand immI_M1() %{
3860 predicate(n->get_int() == -1);
3861 match(ConI);
3863 op_cost(0);
3864 format %{ %}
3865 interface(CONST_INTER);
3866 %}
3868 operand immI_MaxI() %{
3869 predicate(n->get_int() == 2147483647);
3870 match(ConI);
3872 op_cost(0);
3873 format %{ %}
3874 interface(CONST_INTER);
3875 %}
3877 // Valid scale values for addressing modes
3878 operand immI2() %{
3879 predicate(0 <= n->get_int() && (n->get_int() <= 3));
3880 match(ConI);
3882 format %{ %}
3883 interface(CONST_INTER);
3884 %}
3886 operand immI8() %{
3887 predicate((-128 <= n->get_int()) && (n->get_int() <= 127));
3888 match(ConI);
3890 op_cost(5);
3891 format %{ %}
3892 interface(CONST_INTER);
3893 %}
3895 operand immI16() %{
3896 predicate((-32768 <= n->get_int()) && (n->get_int() <= 32767));
3897 match(ConI);
3899 op_cost(10);
3900 format %{ %}
3901 interface(CONST_INTER);
3902 %}
3904 // Constant for long shifts
3905 operand immI_32() %{
3906 predicate( n->get_int() == 32 );
3907 match(ConI);
3909 op_cost(0);
3910 format %{ %}
3911 interface(CONST_INTER);
3912 %}
3914 operand immI_63() %{
3915 predicate( n->get_int() == 63 );
3916 match(ConI);
3918 op_cost(0);
3919 format %{ %}
3920 interface(CONST_INTER);
3921 %}
3923 operand immI_0_31() %{
3924 predicate( n->get_int() >= 0 && n->get_int() <= 31 );
3925 match(ConI);
3927 op_cost(0);
3928 format %{ %}
3929 interface(CONST_INTER);
3930 %}
3932 // Operand for non-negtive integer mask
3933 operand immI_nonneg_mask() %{
3934 predicate( (n->get_int() >= 0) && (Assembler::is_int_mask(n->get_int()) != -1) );
3935 match(ConI);
3937 op_cost(0);
3938 format %{ %}
3939 interface(CONST_INTER);
3940 %}
3942 operand immI_32_63() %{
3943 predicate( n->get_int() >= 32 && n->get_int() <= 63 );
3944 match(ConI);
3945 op_cost(0);
3947 format %{ %}
3948 interface(CONST_INTER);
3949 %}
3951 operand immI16_sub() %{
3952 predicate((-32767 <= n->get_int()) && (n->get_int() <= 32768));
3953 match(ConI);
3955 op_cost(10);
3956 format %{ %}
3957 interface(CONST_INTER);
3958 %}
3960 operand immI_0_32767() %{
3961 predicate( n->get_int() >= 0 && n->get_int() <= 32767 );
3962 match(ConI);
3963 op_cost(0);
3965 format %{ %}
3966 interface(CONST_INTER);
3967 %}
3969 operand immI_0_65535() %{
3970 predicate( n->get_int() >= 0 && n->get_int() <= 65535 );
3971 match(ConI);
3972 op_cost(0);
3974 format %{ %}
3975 interface(CONST_INTER);
3976 %}
3978 operand immI_1() %{
3979 predicate( n->get_int() == 1 );
3980 match(ConI);
3982 op_cost(0);
3983 format %{ %}
3984 interface(CONST_INTER);
3985 %}
3987 operand immI_2() %{
3988 predicate( n->get_int() == 2 );
3989 match(ConI);
3991 op_cost(0);
3992 format %{ %}
3993 interface(CONST_INTER);
3994 %}
3996 operand immI_3() %{
3997 predicate( n->get_int() == 3 );
3998 match(ConI);
4000 op_cost(0);
4001 format %{ %}
4002 interface(CONST_INTER);
4003 %}
4005 operand immI_7() %{
4006 predicate( n->get_int() == 7 );
4007 match(ConI);
4009 format %{ %}
4010 interface(CONST_INTER);
4011 %}
4013 // Immediates for special shifts (sign extend)
4015 // Constants for increment
4016 operand immI_16() %{
4017 predicate( n->get_int() == 16 );
4018 match(ConI);
4020 format %{ %}
4021 interface(CONST_INTER);
4022 %}
4024 operand immI_24() %{
4025 predicate( n->get_int() == 24 );
4026 match(ConI);
4028 format %{ %}
4029 interface(CONST_INTER);
4030 %}
4032 // Constant for byte-wide masking
4033 operand immI_255() %{
4034 predicate( n->get_int() == 255 );
4035 match(ConI);
4037 op_cost(0);
4038 format %{ %}
4039 interface(CONST_INTER);
4040 %}
4042 operand immI_65535() %{
4043 predicate( n->get_int() == 65535 );
4044 match(ConI);
4046 op_cost(5);
4047 format %{ %}
4048 interface(CONST_INTER);
4049 %}
4051 operand immI_65536() %{
4052 predicate( n->get_int() == 65536 );
4053 match(ConI);
4055 op_cost(5);
4056 format %{ %}
4057 interface(CONST_INTER);
4058 %}
4060 operand immI_M65536() %{
4061 predicate( n->get_int() == -65536 );
4062 match(ConI);
4064 op_cost(5);
4065 format %{ %}
4066 interface(CONST_INTER);
4067 %}
4069 // Pointer Immediate
4070 operand immP() %{
4071 match(ConP);
4073 op_cost(10);
4074 format %{ %}
4075 interface(CONST_INTER);
4076 %}
4078 operand immP31()
4079 %{
4080 predicate(n->as_Type()->type()->reloc() == relocInfo::none
4081 && (n->get_ptr() >> 31) == 0);
4082 match(ConP);
4084 op_cost(5);
4085 format %{ %}
4086 interface(CONST_INTER);
4087 %}
4089 // NULL Pointer Immediate
4090 operand immP0() %{
4091 predicate( n->get_ptr() == 0 );
4092 match(ConP);
4093 op_cost(0);
4095 format %{ %}
4096 interface(CONST_INTER);
4097 %}
4099 // Pointer Immediate: 64-bit
4100 operand immP_set() %{
4101 match(ConP);
4103 op_cost(5);
4104 // formats are generated automatically for constants and base registers
4105 format %{ %}
4106 interface(CONST_INTER);
4107 %}
4109 // Pointer Immediate: 64-bit
4110 operand immP_load() %{
4111 predicate(n->bottom_type()->isa_oop_ptr() || (MacroAssembler::insts_for_set64(n->get_ptr()) > 3));
4112 match(ConP);
4114 op_cost(5);
4115 // formats are generated automatically for constants and base registers
4116 format %{ %}
4117 interface(CONST_INTER);
4118 %}
4120 // Pointer Immediate: 64-bit
4121 operand immP_no_oop_cheap() %{
4122 predicate(!n->bottom_type()->isa_oop_ptr() && (MacroAssembler::insts_for_set64(n->get_ptr()) <= 3));
4123 match(ConP);
4125 op_cost(5);
4126 // formats are generated automatically for constants and base registers
4127 format %{ %}
4128 interface(CONST_INTER);
4129 %}
4131 // Pointer for polling page
4132 operand immP_poll() %{
4133 predicate(n->get_ptr() != 0 && n->get_ptr() == (intptr_t)os::get_polling_page());
4134 match(ConP);
4135 op_cost(5);
4137 format %{ %}
4138 interface(CONST_INTER);
4139 %}
4141 // Pointer Immediate
4142 operand immN() %{
4143 match(ConN);
4145 op_cost(10);
4146 format %{ %}
4147 interface(CONST_INTER);
4148 %}
4150 operand immNKlass() %{
4151 match(ConNKlass);
4153 op_cost(10);
4154 format %{ %}
4155 interface(CONST_INTER);
4156 %}
4158 // NULL Pointer Immediate
4159 operand immN0() %{
4160 predicate(n->get_narrowcon() == 0);
4161 match(ConN);
4163 op_cost(5);
4164 format %{ %}
4165 interface(CONST_INTER);
4166 %}
4168 // Long Immediate
4169 operand immL() %{
4170 match(ConL);
4172 op_cost(20);
4173 format %{ %}
4174 interface(CONST_INTER);
4175 %}
4177 // Long Immediate zero
4178 operand immL0() %{
4179 predicate( n->get_long() == 0L );
4180 match(ConL);
4181 op_cost(0);
4183 format %{ %}
4184 interface(CONST_INTER);
4185 %}
4187 operand immL7() %{
4188 predicate( n->get_long() == 7L );
4189 match(ConL);
4190 op_cost(0);
4192 format %{ %}
4193 interface(CONST_INTER);
4194 %}
4196 operand immL_M1() %{
4197 predicate( n->get_long() == -1L );
4198 match(ConL);
4199 op_cost(0);
4201 format %{ %}
4202 interface(CONST_INTER);
4203 %}
4205 // bit 0..2 zero
4206 operand immL_M8() %{
4207 predicate( n->get_long() == -8L );
4208 match(ConL);
4209 op_cost(0);
4211 format %{ %}
4212 interface(CONST_INTER);
4213 %}
4215 // bit 2 zero
4216 operand immL_M5() %{
4217 predicate( n->get_long() == -5L );
4218 match(ConL);
4219 op_cost(0);
4221 format %{ %}
4222 interface(CONST_INTER);
4223 %}
4225 // bit 1..2 zero
4226 operand immL_M7() %{
4227 predicate( n->get_long() == -7L );
4228 match(ConL);
4229 op_cost(0);
4231 format %{ %}
4232 interface(CONST_INTER);
4233 %}
4235 // bit 0..1 zero
4236 operand immL_M4() %{
4237 predicate( n->get_long() == -4L );
4238 match(ConL);
4239 op_cost(0);
4241 format %{ %}
4242 interface(CONST_INTER);
4243 %}
4245 // bit 3..6 zero
4246 operand immL_M121() %{
4247 predicate( n->get_long() == -121L );
4248 match(ConL);
4249 op_cost(0);
4251 format %{ %}
4252 interface(CONST_INTER);
4253 %}
4255 // Long immediate from 0 to 127.
4256 // Used for a shorter form of long mul by 10.
4257 operand immL_127() %{
4258 predicate((0 <= n->get_long()) && (n->get_long() <= 127));
4259 match(ConL);
4260 op_cost(0);
4262 format %{ %}
4263 interface(CONST_INTER);
4264 %}
4266 // Operand for non-negtive long mask
4267 operand immL_nonneg_mask() %{
4268 predicate( (n->get_long() >= 0) && (Assembler::is_jlong_mask(n->get_long()) != -1) );
4269 match(ConL);
4271 op_cost(0);
4272 format %{ %}
4273 interface(CONST_INTER);
4274 %}
4276 operand immL_0_65535() %{
4277 predicate( n->get_long() >= 0 && n->get_long() <= 65535 );
4278 match(ConL);
4279 op_cost(0);
4281 format %{ %}
4282 interface(CONST_INTER);
4283 %}
4285 // Long Immediate: cheap (materialize in <= 3 instructions)
4286 operand immL_cheap() %{
4287 predicate(MacroAssembler::insts_for_set64(n->get_long()) <= 3);
4288 match(ConL);
4289 op_cost(0);
4291 format %{ %}
4292 interface(CONST_INTER);
4293 %}
4295 // Long Immediate: expensive (materialize in > 3 instructions)
4296 operand immL_expensive() %{
4297 predicate(MacroAssembler::insts_for_set64(n->get_long()) > 3);
4298 match(ConL);
4299 op_cost(0);
4301 format %{ %}
4302 interface(CONST_INTER);
4303 %}
4305 operand immL16() %{
4306 predicate((-32768 <= n->get_long()) && (n->get_long() <= 32767));
4307 match(ConL);
4309 op_cost(10);
4310 format %{ %}
4311 interface(CONST_INTER);
4312 %}
4314 operand immL16_sub() %{
4315 predicate((-32767 <= n->get_long()) && (n->get_long() <= 32768));
4316 match(ConL);
4318 op_cost(10);
4319 format %{ %}
4320 interface(CONST_INTER);
4321 %}
4323 // Long Immediate: low 32-bit mask
4324 operand immL_32bits() %{
4325 predicate(n->get_long() == 0xFFFFFFFFL);
4326 match(ConL);
4327 op_cost(20);
4329 format %{ %}
4330 interface(CONST_INTER);
4331 %}
4333 // Long Immediate 32-bit signed
4334 operand immL32()
4335 %{
4336 predicate(n->get_long() == (int) (n->get_long()));
4337 match(ConL);
4339 op_cost(15);
4340 format %{ %}
4341 interface(CONST_INTER);
4342 %}
4345 //single-precision floating-point zero
4346 operand immF0() %{
4347 predicate(jint_cast(n->getf()) == 0);
4348 match(ConF);
4350 op_cost(5);
4351 format %{ %}
4352 interface(CONST_INTER);
4353 %}
4355 //single-precision floating-point immediate
4356 operand immF() %{
4357 match(ConF);
4359 op_cost(20);
4360 format %{ %}
4361 interface(CONST_INTER);
4362 %}
4364 //double-precision floating-point zero
4365 operand immD0() %{
4366 predicate(jlong_cast(n->getd()) == 0);
4367 match(ConD);
4369 op_cost(5);
4370 format %{ %}
4371 interface(CONST_INTER);
4372 %}
4374 //double-precision floating-point immediate
4375 operand immD() %{
4376 match(ConD);
4378 op_cost(20);
4379 format %{ %}
4380 interface(CONST_INTER);
4381 %}
4383 // Register Operands
4384 // Integer Register
4385 operand mRegI() %{
4386 constraint(ALLOC_IN_RC(int_reg));
4387 match(RegI);
4389 format %{ %}
4390 interface(REG_INTER);
4391 %}
4393 operand no_Ax_mRegI() %{
4394 constraint(ALLOC_IN_RC(no_Ax_int_reg));
4395 match(RegI);
4396 match(mRegI);
4398 format %{ %}
4399 interface(REG_INTER);
4400 %}
4402 operand mS0RegI() %{
4403 constraint(ALLOC_IN_RC(s0_reg));
4404 match(RegI);
4405 match(mRegI);
4407 format %{ "S0" %}
4408 interface(REG_INTER);
4409 %}
4411 operand mS1RegI() %{
4412 constraint(ALLOC_IN_RC(s1_reg));
4413 match(RegI);
4414 match(mRegI);
4416 format %{ "S1" %}
4417 interface(REG_INTER);
4418 %}
4420 operand mS2RegI() %{
4421 constraint(ALLOC_IN_RC(s2_reg));
4422 match(RegI);
4423 match(mRegI);
4425 format %{ "S2" %}
4426 interface(REG_INTER);
4427 %}
4429 operand mS3RegI() %{
4430 constraint(ALLOC_IN_RC(s3_reg));
4431 match(RegI);
4432 match(mRegI);
4434 format %{ "S3" %}
4435 interface(REG_INTER);
4436 %}
4438 operand mS4RegI() %{
4439 constraint(ALLOC_IN_RC(s4_reg));
4440 match(RegI);
4441 match(mRegI);
4443 format %{ "S4" %}
4444 interface(REG_INTER);
4445 %}
4447 operand mS5RegI() %{
4448 constraint(ALLOC_IN_RC(s5_reg));
4449 match(RegI);
4450 match(mRegI);
4452 format %{ "S5" %}
4453 interface(REG_INTER);
4454 %}
4456 operand mS6RegI() %{
4457 constraint(ALLOC_IN_RC(s6_reg));
4458 match(RegI);
4459 match(mRegI);
4461 format %{ "S6" %}
4462 interface(REG_INTER);
4463 %}
4465 operand mS7RegI() %{
4466 constraint(ALLOC_IN_RC(s7_reg));
4467 match(RegI);
4468 match(mRegI);
4470 format %{ "S7" %}
4471 interface(REG_INTER);
4472 %}
4475 operand mT0RegI() %{
4476 constraint(ALLOC_IN_RC(t0_reg));
4477 match(RegI);
4478 match(mRegI);
4480 format %{ "T0" %}
4481 interface(REG_INTER);
4482 %}
4484 operand mT1RegI() %{
4485 constraint(ALLOC_IN_RC(t1_reg));
4486 match(RegI);
4487 match(mRegI);
4489 format %{ "T1" %}
4490 interface(REG_INTER);
4491 %}
4493 operand mT2RegI() %{
4494 constraint(ALLOC_IN_RC(t2_reg));
4495 match(RegI);
4496 match(mRegI);
4498 format %{ "T2" %}
4499 interface(REG_INTER);
4500 %}
4502 operand mT3RegI() %{
4503 constraint(ALLOC_IN_RC(t3_reg));
4504 match(RegI);
4505 match(mRegI);
4507 format %{ "T3" %}
4508 interface(REG_INTER);
4509 %}
4511 operand mT8RegI() %{
4512 constraint(ALLOC_IN_RC(t8_reg));
4513 match(RegI);
4514 match(mRegI);
4516 format %{ "T8" %}
4517 interface(REG_INTER);
4518 %}
4520 operand mT9RegI() %{
4521 constraint(ALLOC_IN_RC(t9_reg));
4522 match(RegI);
4523 match(mRegI);
4525 format %{ "T9" %}
4526 interface(REG_INTER);
4527 %}
4529 operand mA0RegI() %{
4530 constraint(ALLOC_IN_RC(a0_reg));
4531 match(RegI);
4532 match(mRegI);
4534 format %{ "A0" %}
4535 interface(REG_INTER);
4536 %}
4538 operand mA1RegI() %{
4539 constraint(ALLOC_IN_RC(a1_reg));
4540 match(RegI);
4541 match(mRegI);
4543 format %{ "A1" %}
4544 interface(REG_INTER);
4545 %}
4547 operand mA2RegI() %{
4548 constraint(ALLOC_IN_RC(a2_reg));
4549 match(RegI);
4550 match(mRegI);
4552 format %{ "A2" %}
4553 interface(REG_INTER);
4554 %}
4556 operand mA3RegI() %{
4557 constraint(ALLOC_IN_RC(a3_reg));
4558 match(RegI);
4559 match(mRegI);
4561 format %{ "A3" %}
4562 interface(REG_INTER);
4563 %}
4565 operand mA4RegI() %{
4566 constraint(ALLOC_IN_RC(a4_reg));
4567 match(RegI);
4568 match(mRegI);
4570 format %{ "A4" %}
4571 interface(REG_INTER);
4572 %}
4574 operand mA5RegI() %{
4575 constraint(ALLOC_IN_RC(a5_reg));
4576 match(RegI);
4577 match(mRegI);
4579 format %{ "A5" %}
4580 interface(REG_INTER);
4581 %}
4583 operand mA6RegI() %{
4584 constraint(ALLOC_IN_RC(a6_reg));
4585 match(RegI);
4586 match(mRegI);
4588 format %{ "A6" %}
4589 interface(REG_INTER);
4590 %}
4592 operand mA7RegI() %{
4593 constraint(ALLOC_IN_RC(a7_reg));
4594 match(RegI);
4595 match(mRegI);
4597 format %{ "A7" %}
4598 interface(REG_INTER);
4599 %}
4601 operand mV0RegI() %{
4602 constraint(ALLOC_IN_RC(v0_reg));
4603 match(RegI);
4604 match(mRegI);
4606 format %{ "V0" %}
4607 interface(REG_INTER);
4608 %}
4610 operand mV1RegI() %{
4611 constraint(ALLOC_IN_RC(v1_reg));
4612 match(RegI);
4613 match(mRegI);
4615 format %{ "V1" %}
4616 interface(REG_INTER);
4617 %}
4619 operand mRegN() %{
4620 constraint(ALLOC_IN_RC(int_reg));
4621 match(RegN);
4623 format %{ %}
4624 interface(REG_INTER);
4625 %}
4627 operand t0_RegN() %{
4628 constraint(ALLOC_IN_RC(t0_reg));
4629 match(RegN);
4630 match(mRegN);
4632 format %{ %}
4633 interface(REG_INTER);
4634 %}
4636 operand t1_RegN() %{
4637 constraint(ALLOC_IN_RC(t1_reg));
4638 match(RegN);
4639 match(mRegN);
4641 format %{ %}
4642 interface(REG_INTER);
4643 %}
4645 operand t2_RegN() %{
4646 constraint(ALLOC_IN_RC(t2_reg));
4647 match(RegN);
4648 match(mRegN);
4650 format %{ %}
4651 interface(REG_INTER);
4652 %}
4654 operand t3_RegN() %{
4655 constraint(ALLOC_IN_RC(t3_reg));
4656 match(RegN);
4657 match(mRegN);
4659 format %{ %}
4660 interface(REG_INTER);
4661 %}
4663 operand t8_RegN() %{
4664 constraint(ALLOC_IN_RC(t8_reg));
4665 match(RegN);
4666 match(mRegN);
4668 format %{ %}
4669 interface(REG_INTER);
4670 %}
4672 operand t9_RegN() %{
4673 constraint(ALLOC_IN_RC(t9_reg));
4674 match(RegN);
4675 match(mRegN);
4677 format %{ %}
4678 interface(REG_INTER);
4679 %}
4681 operand a0_RegN() %{
4682 constraint(ALLOC_IN_RC(a0_reg));
4683 match(RegN);
4684 match(mRegN);
4686 format %{ %}
4687 interface(REG_INTER);
4688 %}
4690 operand a1_RegN() %{
4691 constraint(ALLOC_IN_RC(a1_reg));
4692 match(RegN);
4693 match(mRegN);
4695 format %{ %}
4696 interface(REG_INTER);
4697 %}
4699 operand a2_RegN() %{
4700 constraint(ALLOC_IN_RC(a2_reg));
4701 match(RegN);
4702 match(mRegN);
4704 format %{ %}
4705 interface(REG_INTER);
4706 %}
4708 operand a3_RegN() %{
4709 constraint(ALLOC_IN_RC(a3_reg));
4710 match(RegN);
4711 match(mRegN);
4713 format %{ %}
4714 interface(REG_INTER);
4715 %}
4717 operand a4_RegN() %{
4718 constraint(ALLOC_IN_RC(a4_reg));
4719 match(RegN);
4720 match(mRegN);
4722 format %{ %}
4723 interface(REG_INTER);
4724 %}
4726 operand a5_RegN() %{
4727 constraint(ALLOC_IN_RC(a5_reg));
4728 match(RegN);
4729 match(mRegN);
4731 format %{ %}
4732 interface(REG_INTER);
4733 %}
4735 operand a6_RegN() %{
4736 constraint(ALLOC_IN_RC(a6_reg));
4737 match(RegN);
4738 match(mRegN);
4740 format %{ %}
4741 interface(REG_INTER);
4742 %}
4744 operand a7_RegN() %{
4745 constraint(ALLOC_IN_RC(a7_reg));
4746 match(RegN);
4747 match(mRegN);
4749 format %{ %}
4750 interface(REG_INTER);
4751 %}
4753 operand s0_RegN() %{
4754 constraint(ALLOC_IN_RC(s0_reg));
4755 match(RegN);
4756 match(mRegN);
4758 format %{ %}
4759 interface(REG_INTER);
4760 %}
4762 operand s1_RegN() %{
4763 constraint(ALLOC_IN_RC(s1_reg));
4764 match(RegN);
4765 match(mRegN);
4767 format %{ %}
4768 interface(REG_INTER);
4769 %}
4771 operand s2_RegN() %{
4772 constraint(ALLOC_IN_RC(s2_reg));
4773 match(RegN);
4774 match(mRegN);
4776 format %{ %}
4777 interface(REG_INTER);
4778 %}
4780 operand s3_RegN() %{
4781 constraint(ALLOC_IN_RC(s3_reg));
4782 match(RegN);
4783 match(mRegN);
4785 format %{ %}
4786 interface(REG_INTER);
4787 %}
4789 operand s4_RegN() %{
4790 constraint(ALLOC_IN_RC(s4_reg));
4791 match(RegN);
4792 match(mRegN);
4794 format %{ %}
4795 interface(REG_INTER);
4796 %}
4798 operand s5_RegN() %{
4799 constraint(ALLOC_IN_RC(s5_reg));
4800 match(RegN);
4801 match(mRegN);
4803 format %{ %}
4804 interface(REG_INTER);
4805 %}
4807 operand s6_RegN() %{
4808 constraint(ALLOC_IN_RC(s6_reg));
4809 match(RegN);
4810 match(mRegN);
4812 format %{ %}
4813 interface(REG_INTER);
4814 %}
4816 operand s7_RegN() %{
4817 constraint(ALLOC_IN_RC(s7_reg));
4818 match(RegN);
4819 match(mRegN);
4821 format %{ %}
4822 interface(REG_INTER);
4823 %}
4825 operand v0_RegN() %{
4826 constraint(ALLOC_IN_RC(v0_reg));
4827 match(RegN);
4828 match(mRegN);
4830 format %{ %}
4831 interface(REG_INTER);
4832 %}
4834 operand v1_RegN() %{
4835 constraint(ALLOC_IN_RC(v1_reg));
4836 match(RegN);
4837 match(mRegN);
4839 format %{ %}
4840 interface(REG_INTER);
4841 %}
4843 // Pointer Register
4844 operand mRegP() %{
4845 constraint(ALLOC_IN_RC(p_reg));
4846 match(RegP);
4848 format %{ %}
4849 interface(REG_INTER);
4850 %}
4852 operand no_T8_mRegP() %{
4853 constraint(ALLOC_IN_RC(no_T8_p_reg));
4854 match(RegP);
4855 match(mRegP);
4857 format %{ %}
4858 interface(REG_INTER);
4859 %}
4861 operand s0_RegP()
4862 %{
4863 constraint(ALLOC_IN_RC(s0_long_reg));
4864 match(RegP);
4865 match(mRegP);
4866 match(no_T8_mRegP);
4868 format %{ %}
4869 interface(REG_INTER);
4870 %}
4872 operand s1_RegP()
4873 %{
4874 constraint(ALLOC_IN_RC(s1_long_reg));
4875 match(RegP);
4876 match(mRegP);
4877 match(no_T8_mRegP);
4879 format %{ %}
4880 interface(REG_INTER);
4881 %}
4883 operand s2_RegP()
4884 %{
4885 constraint(ALLOC_IN_RC(s2_long_reg));
4886 match(RegP);
4887 match(mRegP);
4888 match(no_T8_mRegP);
4890 format %{ %}
4891 interface(REG_INTER);
4892 %}
4894 operand s3_RegP()
4895 %{
4896 constraint(ALLOC_IN_RC(s3_long_reg));
4897 match(RegP);
4898 match(mRegP);
4899 match(no_T8_mRegP);
4901 format %{ %}
4902 interface(REG_INTER);
4903 %}
4905 operand s4_RegP()
4906 %{
4907 constraint(ALLOC_IN_RC(s4_long_reg));
4908 match(RegP);
4909 match(mRegP);
4910 match(no_T8_mRegP);
4912 format %{ %}
4913 interface(REG_INTER);
4914 %}
4916 operand s5_RegP()
4917 %{
4918 constraint(ALLOC_IN_RC(s5_long_reg));
4919 match(RegP);
4920 match(mRegP);
4921 match(no_T8_mRegP);
4923 format %{ %}
4924 interface(REG_INTER);
4925 %}
4927 operand s6_RegP()
4928 %{
4929 constraint(ALLOC_IN_RC(s6_long_reg));
4930 match(RegP);
4931 match(mRegP);
4932 match(no_T8_mRegP);
4934 format %{ %}
4935 interface(REG_INTER);
4936 %}
4938 operand s7_RegP()
4939 %{
4940 constraint(ALLOC_IN_RC(s7_long_reg));
4941 match(RegP);
4942 match(mRegP);
4943 match(no_T8_mRegP);
4945 format %{ %}
4946 interface(REG_INTER);
4947 %}
4949 operand t0_RegP()
4950 %{
4951 constraint(ALLOC_IN_RC(t0_long_reg));
4952 match(RegP);
4953 match(mRegP);
4954 match(no_T8_mRegP);
4956 format %{ %}
4957 interface(REG_INTER);
4958 %}
4960 operand t1_RegP()
4961 %{
4962 constraint(ALLOC_IN_RC(t1_long_reg));
4963 match(RegP);
4964 match(mRegP);
4965 match(no_T8_mRegP);
4967 format %{ %}
4968 interface(REG_INTER);
4969 %}
4971 operand t2_RegP()
4972 %{
4973 constraint(ALLOC_IN_RC(t2_long_reg));
4974 match(RegP);
4975 match(mRegP);
4976 match(no_T8_mRegP);
4978 format %{ %}
4979 interface(REG_INTER);
4980 %}
4982 operand t3_RegP()
4983 %{
4984 constraint(ALLOC_IN_RC(t3_long_reg));
4985 match(RegP);
4986 match(mRegP);
4987 match(no_T8_mRegP);
4989 format %{ %}
4990 interface(REG_INTER);
4991 %}
4993 operand t8_RegP()
4994 %{
4995 constraint(ALLOC_IN_RC(t8_long_reg));
4996 match(RegP);
4997 match(mRegP);
4999 format %{ %}
5000 interface(REG_INTER);
5001 %}
5003 operand t9_RegP()
5004 %{
5005 constraint(ALLOC_IN_RC(t9_long_reg));
5006 match(RegP);
5007 match(mRegP);
5008 match(no_T8_mRegP);
5010 format %{ %}
5011 interface(REG_INTER);
5012 %}
5014 operand a0_RegP()
5015 %{
5016 constraint(ALLOC_IN_RC(a0_long_reg));
5017 match(RegP);
5018 match(mRegP);
5019 match(no_T8_mRegP);
5021 format %{ %}
5022 interface(REG_INTER);
5023 %}
5025 operand a1_RegP()
5026 %{
5027 constraint(ALLOC_IN_RC(a1_long_reg));
5028 match(RegP);
5029 match(mRegP);
5030 match(no_T8_mRegP);
5032 format %{ %}
5033 interface(REG_INTER);
5034 %}
5036 operand a2_RegP()
5037 %{
5038 constraint(ALLOC_IN_RC(a2_long_reg));
5039 match(RegP);
5040 match(mRegP);
5041 match(no_T8_mRegP);
5043 format %{ %}
5044 interface(REG_INTER);
5045 %}
5047 operand a3_RegP()
5048 %{
5049 constraint(ALLOC_IN_RC(a3_long_reg));
5050 match(RegP);
5051 match(mRegP);
5052 match(no_T8_mRegP);
5054 format %{ %}
5055 interface(REG_INTER);
5056 %}
5058 operand a4_RegP()
5059 %{
5060 constraint(ALLOC_IN_RC(a4_long_reg));
5061 match(RegP);
5062 match(mRegP);
5063 match(no_T8_mRegP);
5065 format %{ %}
5066 interface(REG_INTER);
5067 %}
5070 operand a5_RegP()
5071 %{
5072 constraint(ALLOC_IN_RC(a5_long_reg));
5073 match(RegP);
5074 match(mRegP);
5075 match(no_T8_mRegP);
5077 format %{ %}
5078 interface(REG_INTER);
5079 %}
5081 operand a6_RegP()
5082 %{
5083 constraint(ALLOC_IN_RC(a6_long_reg));
5084 match(RegP);
5085 match(mRegP);
5086 match(no_T8_mRegP);
5088 format %{ %}
5089 interface(REG_INTER);
5090 %}
5092 operand a7_RegP()
5093 %{
5094 constraint(ALLOC_IN_RC(a7_long_reg));
5095 match(RegP);
5096 match(mRegP);
5097 match(no_T8_mRegP);
5099 format %{ %}
5100 interface(REG_INTER);
5101 %}
5103 operand v0_RegP()
5104 %{
5105 constraint(ALLOC_IN_RC(v0_long_reg));
5106 match(RegP);
5107 match(mRegP);
5108 match(no_T8_mRegP);
5110 format %{ %}
5111 interface(REG_INTER);
5112 %}
5114 operand v1_RegP()
5115 %{
5116 constraint(ALLOC_IN_RC(v1_long_reg));
5117 match(RegP);
5118 match(mRegP);
5119 match(no_T8_mRegP);
5121 format %{ %}
5122 interface(REG_INTER);
5123 %}
5125 /*
5126 operand mSPRegP(mRegP reg) %{
5127 constraint(ALLOC_IN_RC(sp_reg));
5128 match(reg);
5130 format %{ "SP" %}
5131 interface(REG_INTER);
5132 %}
5134 operand mFPRegP(mRegP reg) %{
5135 constraint(ALLOC_IN_RC(fp_reg));
5136 match(reg);
5138 format %{ "FP" %}
5139 interface(REG_INTER);
5140 %}
5141 */
5143 operand mRegL() %{
5144 constraint(ALLOC_IN_RC(long_reg));
5145 match(RegL);
5147 format %{ %}
5148 interface(REG_INTER);
5149 %}
5151 operand v0RegL() %{
5152 constraint(ALLOC_IN_RC(v0_long_reg));
5153 match(RegL);
5154 match(mRegL);
5156 format %{ %}
5157 interface(REG_INTER);
5158 %}
5160 operand v1RegL() %{
5161 constraint(ALLOC_IN_RC(v1_long_reg));
5162 match(RegL);
5163 match(mRegL);
5165 format %{ %}
5166 interface(REG_INTER);
5167 %}
5169 operand a0RegL() %{
5170 constraint(ALLOC_IN_RC(a0_long_reg));
5171 match(RegL);
5172 match(mRegL);
5174 format %{ "A0" %}
5175 interface(REG_INTER);
5176 %}
5178 operand a1RegL() %{
5179 constraint(ALLOC_IN_RC(a1_long_reg));
5180 match(RegL);
5181 match(mRegL);
5183 format %{ %}
5184 interface(REG_INTER);
5185 %}
5187 operand a2RegL() %{
5188 constraint(ALLOC_IN_RC(a2_long_reg));
5189 match(RegL);
5190 match(mRegL);
5192 format %{ %}
5193 interface(REG_INTER);
5194 %}
5196 operand a3RegL() %{
5197 constraint(ALLOC_IN_RC(a3_long_reg));
5198 match(RegL);
5199 match(mRegL);
5201 format %{ %}
5202 interface(REG_INTER);
5203 %}
5205 operand t0RegL() %{
5206 constraint(ALLOC_IN_RC(t0_long_reg));
5207 match(RegL);
5208 match(mRegL);
5210 format %{ %}
5211 interface(REG_INTER);
5212 %}
5214 operand t1RegL() %{
5215 constraint(ALLOC_IN_RC(t1_long_reg));
5216 match(RegL);
5217 match(mRegL);
5219 format %{ %}
5220 interface(REG_INTER);
5221 %}
5223 operand t2RegL() %{
5224 constraint(ALLOC_IN_RC(t2_long_reg));
5225 match(RegL);
5226 match(mRegL);
5228 format %{ %}
5229 interface(REG_INTER);
5230 %}
5232 operand t3RegL() %{
5233 constraint(ALLOC_IN_RC(t3_long_reg));
5234 match(RegL);
5235 match(mRegL);
5237 format %{ %}
5238 interface(REG_INTER);
5239 %}
5241 operand t8RegL() %{
5242 constraint(ALLOC_IN_RC(t8_long_reg));
5243 match(RegL);
5244 match(mRegL);
5246 format %{ %}
5247 interface(REG_INTER);
5248 %}
5250 operand a4RegL() %{
5251 constraint(ALLOC_IN_RC(a4_long_reg));
5252 match(RegL);
5253 match(mRegL);
5255 format %{ %}
5256 interface(REG_INTER);
5257 %}
5259 operand a5RegL() %{
5260 constraint(ALLOC_IN_RC(a5_long_reg));
5261 match(RegL);
5262 match(mRegL);
5264 format %{ %}
5265 interface(REG_INTER);
5266 %}
5268 operand a6RegL() %{
5269 constraint(ALLOC_IN_RC(a6_long_reg));
5270 match(RegL);
5271 match(mRegL);
5273 format %{ %}
5274 interface(REG_INTER);
5275 %}
5277 operand a7RegL() %{
5278 constraint(ALLOC_IN_RC(a7_long_reg));
5279 match(RegL);
5280 match(mRegL);
5282 format %{ %}
5283 interface(REG_INTER);
5284 %}
5286 operand s0RegL() %{
5287 constraint(ALLOC_IN_RC(s0_long_reg));
5288 match(RegL);
5289 match(mRegL);
5291 format %{ %}
5292 interface(REG_INTER);
5293 %}
5295 operand s1RegL() %{
5296 constraint(ALLOC_IN_RC(s1_long_reg));
5297 match(RegL);
5298 match(mRegL);
5300 format %{ %}
5301 interface(REG_INTER);
5302 %}
5304 operand s2RegL() %{
5305 constraint(ALLOC_IN_RC(s2_long_reg));
5306 match(RegL);
5307 match(mRegL);
5309 format %{ %}
5310 interface(REG_INTER);
5311 %}
5313 operand s3RegL() %{
5314 constraint(ALLOC_IN_RC(s3_long_reg));
5315 match(RegL);
5316 match(mRegL);
5318 format %{ %}
5319 interface(REG_INTER);
5320 %}
5322 operand s4RegL() %{
5323 constraint(ALLOC_IN_RC(s4_long_reg));
5324 match(RegL);
5325 match(mRegL);
5327 format %{ %}
5328 interface(REG_INTER);
5329 %}
5331 operand s7RegL() %{
5332 constraint(ALLOC_IN_RC(s7_long_reg));
5333 match(RegL);
5334 match(mRegL);
5336 format %{ %}
5337 interface(REG_INTER);
5338 %}
5340 // Floating register operands
5341 operand regF() %{
5342 constraint(ALLOC_IN_RC(flt_reg));
5343 match(RegF);
5345 format %{ %}
5346 interface(REG_INTER);
5347 %}
5349 //Double Precision Floating register operands
5350 operand regD() %{
5351 constraint(ALLOC_IN_RC(dbl_reg));
5352 match(RegD);
5354 format %{ %}
5355 interface(REG_INTER);
5356 %}
5358 //----------Memory Operands----------------------------------------------------
5359 // Indirect Memory Operand
5360 operand indirect(mRegP reg) %{
5361 constraint(ALLOC_IN_RC(p_reg));
5362 match(reg);
5364 format %{ "[$reg] @ indirect" %}
5365 interface(MEMORY_INTER) %{
5366 base($reg);
5367 index(0x0); /* NO_INDEX */
5368 scale(0x0);
5369 disp(0x0);
5370 %}
5371 %}
5373 // Indirect Memory Plus Short Offset Operand
5374 operand indOffset8(mRegP reg, immL8 off)
5375 %{
5376 constraint(ALLOC_IN_RC(p_reg));
5377 match(AddP reg off);
5379 op_cost(10);
5380 format %{ "[$reg + $off (8-bit)] @ indOffset8" %}
5381 interface(MEMORY_INTER) %{
5382 base($reg);
5383 index(0x0); /* NO_INDEX */
5384 scale(0x0);
5385 disp($off);
5386 %}
5387 %}
5389 // Indirect Memory Times Scale Plus Index Register
5390 operand indIndexScale(mRegP reg, mRegL lreg, immI2 scale)
5391 %{
5392 constraint(ALLOC_IN_RC(p_reg));
5393 match(AddP reg (LShiftL lreg scale));
5395 op_cost(10);
5396 format %{"[$reg + $lreg << $scale] @ indIndexScale" %}
5397 interface(MEMORY_INTER) %{
5398 base($reg);
5399 index($lreg);
5400 scale($scale);
5401 disp(0x0);
5402 %}
5403 %}
5406 // [base + index + offset]
5407 operand baseIndexOffset8(mRegP base, mRegL index, immL8 off)
5408 %{
5409 constraint(ALLOC_IN_RC(p_reg));
5410 op_cost(5);
5411 match(AddP (AddP base index) off);
5413 format %{ "[$base + $index + $off (8-bit)] @ baseIndexOffset8" %}
5414 interface(MEMORY_INTER) %{
5415 base($base);
5416 index($index);
5417 scale(0x0);
5418 disp($off);
5419 %}
5420 %}
5422 // [base + index + offset]
5423 operand baseIndexOffset8_convI2L(mRegP base, mRegI index, immL8 off)
5424 %{
5425 constraint(ALLOC_IN_RC(p_reg));
5426 op_cost(5);
5427 match(AddP (AddP base (ConvI2L index)) off);
5429 format %{ "[$base + $index + $off (8-bit)] @ baseIndexOffset8_convI2L" %}
5430 interface(MEMORY_INTER) %{
5431 base($base);
5432 index($index);
5433 scale(0x0);
5434 disp($off);
5435 %}
5436 %}
5438 // Indirect Memory Times Scale Plus Index Register Plus Offset Operand
5439 operand indIndexScaleOffset8(mRegP reg, immL8 off, mRegL lreg, immI2 scale)
5440 %{
5441 constraint(ALLOC_IN_RC(p_reg));
5442 match(AddP (AddP reg (LShiftL lreg scale)) off);
5444 op_cost(10);
5445 format %{"[$reg + $off + $lreg << $scale] @ indIndexScaleOffset8" %}
5446 interface(MEMORY_INTER) %{
5447 base($reg);
5448 index($lreg);
5449 scale($scale);
5450 disp($off);
5451 %}
5452 %}
5454 operand indIndexScaleOffset8_convI2L(mRegP reg, immL8 off, mRegI ireg, immI2 scale)
5455 %{
5456 constraint(ALLOC_IN_RC(p_reg));
5457 match(AddP (AddP reg (LShiftL (ConvI2L ireg) scale)) off);
5459 op_cost(10);
5460 format %{"[$reg + $off + $ireg << $scale] @ indIndexScaleOffset8_convI2L" %}
5461 interface(MEMORY_INTER) %{
5462 base($reg);
5463 index($ireg);
5464 scale($scale);
5465 disp($off);
5466 %}
5467 %}
5469 // [base + index<<scale + offset]
5470 operand basePosIndexScaleOffset8(mRegP base, mRegI index, immL8 off, immI_0_31 scale)
5471 %{
5472 constraint(ALLOC_IN_RC(p_reg));
5473 //predicate(n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
5474 op_cost(10);
5475 match(AddP (AddP base (LShiftL (ConvI2L index) scale)) off);
5477 format %{ "[$base + $index << $scale + $off (8-bit)] @ basePosIndexScaleOffset8" %}
5478 interface(MEMORY_INTER) %{
5479 base($base);
5480 index($index);
5481 scale($scale);
5482 disp($off);
5483 %}
5484 %}
5486 // Indirect Memory Times Scale Plus Index Register Plus Offset Operand
5487 operand indIndexScaleOffsetNarrow(mRegN reg, immL8 off, mRegL lreg, immI2 scale)
5488 %{
5489 predicate(Universe::narrow_oop_shift() == 0);
5490 constraint(ALLOC_IN_RC(p_reg));
5491 match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
5493 op_cost(10);
5494 format %{"[$reg + $off + $lreg << $scale] @ indIndexScaleOffsetNarrow" %}
5495 interface(MEMORY_INTER) %{
5496 base($reg);
5497 index($lreg);
5498 scale($scale);
5499 disp($off);
5500 %}
5501 %}
5503 // [base + index<<scale + offset] for compressd Oops
5504 operand indPosIndexI2LScaleOffset8Narrow(mRegN base, mRegI index, immL8 off, immI_0_31 scale)
5505 %{
5506 constraint(ALLOC_IN_RC(p_reg));
5507 //predicate(Universe::narrow_oop_shift() == 0 && n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
5508 predicate(Universe::narrow_oop_shift() == 0);
5509 op_cost(10);
5510 match(AddP (AddP (DecodeN base) (LShiftL (ConvI2L index) scale)) off);
5512 format %{ "[$base + $index << $scale + $off (8-bit)] @ indPosIndexI2LScaleOffset8Narrow" %}
5513 interface(MEMORY_INTER) %{
5514 base($base);
5515 index($index);
5516 scale($scale);
5517 disp($off);
5518 %}
5519 %}
5521 //FIXME: I think it's better to limit the immI to be 16-bit at most!
5522 // Indirect Memory Plus Long Offset Operand
5523 operand indOffset32(mRegP reg, immL32 off) %{
5524 constraint(ALLOC_IN_RC(p_reg));
5525 op_cost(20);
5526 match(AddP reg off);
5528 format %{ "[$reg + $off (32-bit)] @ indOffset32" %}
5529 interface(MEMORY_INTER) %{
5530 base($reg);
5531 index(0x0); /* NO_INDEX */
5532 scale(0x0);
5533 disp($off);
5534 %}
5535 %}
5537 // Indirect Memory Plus Index Register
5538 operand indIndex(mRegP addr, mRegL index) %{
5539 constraint(ALLOC_IN_RC(p_reg));
5540 match(AddP addr index);
5542 op_cost(20);
5543 format %{"[$addr + $index] @ indIndex" %}
5544 interface(MEMORY_INTER) %{
5545 base($addr);
5546 index($index);
5547 scale(0x0);
5548 disp(0x0);
5549 %}
5550 %}
5552 operand indirectNarrowKlass(mRegN reg)
5553 %{
5554 predicate(Universe::narrow_klass_shift() == 0);
5555 constraint(ALLOC_IN_RC(p_reg));
5556 op_cost(10);
5557 match(DecodeNKlass reg);
5559 format %{ "[$reg] @ indirectNarrowKlass" %}
5560 interface(MEMORY_INTER) %{
5561 base($reg);
5562 index(0x0);
5563 scale(0x0);
5564 disp(0x0);
5565 %}
5566 %}
5568 operand indOffset8NarrowKlass(mRegN reg, immL8 off)
5569 %{
5570 predicate(Universe::narrow_klass_shift() == 0);
5571 constraint(ALLOC_IN_RC(p_reg));
5572 op_cost(10);
5573 match(AddP (DecodeNKlass reg) off);
5575 format %{ "[$reg + $off (8-bit)] @ indOffset8NarrowKlass" %}
5576 interface(MEMORY_INTER) %{
5577 base($reg);
5578 index(0x0);
5579 scale(0x0);
5580 disp($off);
5581 %}
5582 %}
5584 operand indOffset32NarrowKlass(mRegN reg, immL32 off)
5585 %{
5586 predicate(Universe::narrow_klass_shift() == 0);
5587 constraint(ALLOC_IN_RC(p_reg));
5588 op_cost(10);
5589 match(AddP (DecodeNKlass reg) off);
5591 format %{ "[$reg + $off (32-bit)] @ indOffset32NarrowKlass" %}
5592 interface(MEMORY_INTER) %{
5593 base($reg);
5594 index(0x0);
5595 scale(0x0);
5596 disp($off);
5597 %}
5598 %}
5600 operand indIndexOffsetNarrowKlass(mRegN reg, mRegL lreg, immL32 off)
5601 %{
5602 predicate(Universe::narrow_klass_shift() == 0);
5603 constraint(ALLOC_IN_RC(p_reg));
5604 match(AddP (AddP (DecodeNKlass reg) lreg) off);
5606 op_cost(10);
5607 format %{"[$reg + $off + $lreg] @ indIndexOffsetNarrowKlass" %}
5608 interface(MEMORY_INTER) %{
5609 base($reg);
5610 index($lreg);
5611 scale(0x0);
5612 disp($off);
5613 %}
5614 %}
5616 operand indIndexNarrowKlass(mRegN reg, mRegL lreg)
5617 %{
5618 predicate(Universe::narrow_klass_shift() == 0);
5619 constraint(ALLOC_IN_RC(p_reg));
5620 match(AddP (DecodeNKlass reg) lreg);
5622 op_cost(10);
5623 format %{"[$reg + $lreg] @ indIndexNarrowKlass" %}
5624 interface(MEMORY_INTER) %{
5625 base($reg);
5626 index($lreg);
5627 scale(0x0);
5628 disp(0x0);
5629 %}
5630 %}
5632 // Indirect Memory Operand
5633 operand indirectNarrow(mRegN reg)
5634 %{
5635 predicate(Universe::narrow_oop_shift() == 0);
5636 constraint(ALLOC_IN_RC(p_reg));
5637 op_cost(10);
5638 match(DecodeN reg);
5640 format %{ "[$reg] @ indirectNarrow" %}
5641 interface(MEMORY_INTER) %{
5642 base($reg);
5643 index(0x0);
5644 scale(0x0);
5645 disp(0x0);
5646 %}
5647 %}
5649 // Indirect Memory Plus Short Offset Operand
5650 operand indOffset8Narrow(mRegN reg, immL8 off)
5651 %{
5652 predicate(Universe::narrow_oop_shift() == 0);
5653 constraint(ALLOC_IN_RC(p_reg));
5654 op_cost(10);
5655 match(AddP (DecodeN reg) off);
5657 format %{ "[$reg + $off (8-bit)] @ indOffset8Narrow" %}
5658 interface(MEMORY_INTER) %{
5659 base($reg);
5660 index(0x0);
5661 scale(0x0);
5662 disp($off);
5663 %}
5664 %}
5666 // Indirect Memory Plus Index Register Plus Offset Operand
5667 operand indIndexOffset8Narrow(mRegN reg, mRegL lreg, immL8 off)
5668 %{
5669 predicate(Universe::narrow_oop_shift() == 0);
5670 constraint(ALLOC_IN_RC(p_reg));
5671 match(AddP (AddP (DecodeN reg) lreg) off);
5673 op_cost(10);
5674 format %{"[$reg + $off + $lreg] @ indIndexOffset8Narrow" %}
5675 interface(MEMORY_INTER) %{
5676 base($reg);
5677 index($lreg);
5678 scale(0x0);
5679 disp($off);
5680 %}
5681 %}
5683 //----------Load Long Memory Operands------------------------------------------
5684 // The load-long idiom will use it's address expression again after loading
5685 // the first word of the long. If the load-long destination overlaps with
5686 // registers used in the addressing expression, the 2nd half will be loaded
5687 // from a clobbered address. Fix this by requiring that load-long use
5688 // address registers that do not overlap with the load-long target.
5690 // load-long support
5691 operand load_long_RegP() %{
5692 constraint(ALLOC_IN_RC(p_reg));
5693 match(RegP);
5694 match(mRegP);
5695 op_cost(100);
5696 format %{ %}
5697 interface(REG_INTER);
5698 %}
5700 // Indirect Memory Operand Long
5701 operand load_long_indirect(load_long_RegP reg) %{
5702 constraint(ALLOC_IN_RC(p_reg));
5703 match(reg);
5705 format %{ "[$reg]" %}
5706 interface(MEMORY_INTER) %{
5707 base($reg);
5708 index(0x0);
5709 scale(0x0);
5710 disp(0x0);
5711 %}
5712 %}
5714 // Indirect Memory Plus Long Offset Operand
5715 operand load_long_indOffset32(load_long_RegP reg, immL32 off) %{
5716 match(AddP reg off);
5718 format %{ "[$reg + $off]" %}
5719 interface(MEMORY_INTER) %{
5720 base($reg);
5721 index(0x0);
5722 scale(0x0);
5723 disp($off);
5724 %}
5725 %}
5727 //----------Conditional Branch Operands----------------------------------------
5728 // Comparison Op - This is the operation of the comparison, and is limited to
5729 // the following set of codes:
5730 // L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
5731 //
5732 // Other attributes of the comparison, such as unsignedness, are specified
5733 // by the comparison instruction that sets a condition code flags register.
5734 // That result is represented by a flags operand whose subtype is appropriate
5735 // to the unsignedness (etc.) of the comparison.
5736 //
5737 // Later, the instruction which matches both the Comparison Op (a Bool) and
5738 // the flags (produced by the Cmp) specifies the coding of the comparison op
5739 // by matching a specific subtype of Bool operand below, such as cmpOpU.
5741 // Comparision Code
5742 operand cmpOp() %{
5743 match(Bool);
5745 format %{ "" %}
5746 interface(COND_INTER) %{
5747 equal(0x01);
5748 not_equal(0x02);
5749 greater(0x03);
5750 greater_equal(0x04);
5751 less(0x05);
5752 less_equal(0x06);
5753 overflow(0x7);
5754 no_overflow(0x8);
5755 %}
5756 %}
5759 // Comparision Code
5760 // Comparison Code, unsigned compare. Used by FP also, with
5761 // C2 (unordered) turned into GT or LT already. The other bits
5762 // C0 and C3 are turned into Carry & Zero flags.
5763 operand cmpOpU() %{
5764 match(Bool);
5766 format %{ "" %}
5767 interface(COND_INTER) %{
5768 equal(0x01);
5769 not_equal(0x02);
5770 greater(0x03);
5771 greater_equal(0x04);
5772 less(0x05);
5773 less_equal(0x06);
5774 overflow(0x7);
5775 no_overflow(0x8);
5776 %}
5777 %}
5779 /*
5780 // Comparison Code, unsigned compare. Used by FP also, with
5781 // C2 (unordered) turned into GT or LT already. The other bits
5782 // C0 and C3 are turned into Carry & Zero flags.
5783 operand cmpOpU() %{
5784 match(Bool);
5786 format %{ "" %}
5787 interface(COND_INTER) %{
5788 equal(0x4);
5789 not_equal(0x5);
5790 less(0x2);
5791 greater_equal(0x3);
5792 less_equal(0x6);
5793 greater(0x7);
5794 %}
5795 %}
5796 */
5797 /*
5798 // Comparison Code for FP conditional move
5799 operand cmpOp_fcmov() %{
5800 match(Bool);
5802 format %{ "" %}
5803 interface(COND_INTER) %{
5804 equal (0x01);
5805 not_equal (0x02);
5806 greater (0x03);
5807 greater_equal(0x04);
5808 less (0x05);
5809 less_equal (0x06);
5810 %}
5811 %}
5813 // Comparision Code used in long compares
5814 operand cmpOp_commute() %{
5815 match(Bool);
5817 format %{ "" %}
5818 interface(COND_INTER) %{
5819 equal(0x4);
5820 not_equal(0x5);
5821 less(0xF);
5822 greater_equal(0xE);
5823 less_equal(0xD);
5824 greater(0xC);
5825 %}
5826 %}
5827 */
5829 //----------Special Memory Operands--------------------------------------------
5830 // Stack Slot Operand - This operand is used for loading and storing temporary
5831 // values on the stack where a match requires a value to
5832 // flow through memory.
5833 operand stackSlotP(sRegP reg) %{
5834 constraint(ALLOC_IN_RC(stack_slots));
5835 // No match rule because this operand is only generated in matching
5836 op_cost(50);
5837 format %{ "[$reg]" %}
5838 interface(MEMORY_INTER) %{
5839 base(0x1d); // SP
5840 index(0x0); // No Index
5841 scale(0x0); // No Scale
5842 disp($reg); // Stack Offset
5843 %}
5844 %}
5846 operand stackSlotI(sRegI reg) %{
5847 constraint(ALLOC_IN_RC(stack_slots));
5848 // No match rule because this operand is only generated in matching
5849 op_cost(50);
5850 format %{ "[$reg]" %}
5851 interface(MEMORY_INTER) %{
5852 base(0x1d); // SP
5853 index(0x0); // No Index
5854 scale(0x0); // No Scale
5855 disp($reg); // Stack Offset
5856 %}
5857 %}
5859 operand stackSlotF(sRegF reg) %{
5860 constraint(ALLOC_IN_RC(stack_slots));
5861 // No match rule because this operand is only generated in matching
5862 op_cost(50);
5863 format %{ "[$reg]" %}
5864 interface(MEMORY_INTER) %{
5865 base(0x1d); // SP
5866 index(0x0); // No Index
5867 scale(0x0); // No Scale
5868 disp($reg); // Stack Offset
5869 %}
5870 %}
5872 operand stackSlotD(sRegD reg) %{
5873 constraint(ALLOC_IN_RC(stack_slots));
5874 // No match rule because this operand is only generated in matching
5875 op_cost(50);
5876 format %{ "[$reg]" %}
5877 interface(MEMORY_INTER) %{
5878 base(0x1d); // SP
5879 index(0x0); // No Index
5880 scale(0x0); // No Scale
5881 disp($reg); // Stack Offset
5882 %}
5883 %}
5885 operand stackSlotL(sRegL reg) %{
5886 constraint(ALLOC_IN_RC(stack_slots));
5887 // No match rule because this operand is only generated in matching
5888 op_cost(50);
5889 format %{ "[$reg]" %}
5890 interface(MEMORY_INTER) %{
5891 base(0x1d); // SP
5892 index(0x0); // No Index
5893 scale(0x0); // No Scale
5894 disp($reg); // Stack Offset
5895 %}
5896 %}
5899 //------------------------OPERAND CLASSES--------------------------------------
5900 //opclass memory( direct, indirect, indOffset16, indOffset32, indOffset32X, indIndexOffset );
5901 opclass memory( indirect, indirectNarrow, indOffset8, indOffset32, indIndex, indIndexScale, load_long_indirect, load_long_indOffset32, baseIndexOffset8, baseIndexOffset8_convI2L, indIndexScaleOffset8, indIndexScaleOffset8_convI2L, basePosIndexScaleOffset8, indIndexScaleOffsetNarrow, indPosIndexI2LScaleOffset8Narrow, indOffset8Narrow, indIndexOffset8Narrow);
5904 //----------PIPELINE-----------------------------------------------------------
5905 // Rules which define the behavior of the target architectures pipeline.
5907 pipeline %{
5909 //----------ATTRIBUTES---------------------------------------------------------
5910 attributes %{
5911 fixed_size_instructions; // Fixed size instructions
5912 branch_has_delay_slot; // branch have delay slot in gs2
5913 max_instructions_per_bundle = 1; // 1 instruction per bundle
5914 max_bundles_per_cycle = 4; // Up to 4 bundles per cycle
5915 bundle_unit_size=4;
5916 instruction_unit_size = 4; // An instruction is 4 bytes long
5917 instruction_fetch_unit_size = 16; // The processor fetches one line
5918 instruction_fetch_units = 1; // of 16 bytes
5920 // List of nop instructions
5921 nops( MachNop );
5922 %}
5924 //----------RESOURCES----------------------------------------------------------
5925 // Resources are the functional units available to the machine
5927 resources(D1, D2, D3, D4, DECODE = D1 | D2 | D3| D4, ALU1, ALU2, ALU = ALU1 | ALU2, FPU1, FPU2, FPU = FPU1 | FPU2, MEM, BR);
5929 //----------PIPELINE DESCRIPTION-----------------------------------------------
5930 // Pipeline Description specifies the stages in the machine's pipeline
5932 // IF: fetch
5933 // ID: decode
5934 // RD: read
5935 // CA: caculate
5936 // WB: write back
5937 // CM: commit
5939 pipe_desc(IF, ID, RD, CA, WB, CM);
5942 //----------PIPELINE CLASSES---------------------------------------------------
5943 // Pipeline Classes describe the stages in which input and output are
5944 // referenced by the hardware pipeline.
5946 //No.1 Integer ALU reg-reg operation : dst <-- reg1 op reg2
5947 pipe_class ialu_regI_regI(mRegI dst, mRegI src1, mRegI src2) %{
5948 single_instruction;
5949 src1 : RD(read);
5950 src2 : RD(read);
5951 dst : WB(write)+1;
5952 DECODE : ID;
5953 ALU : CA;
5954 %}
5956 //No.19 Integer mult operation : dst <-- reg1 mult reg2
5957 pipe_class ialu_mult(mRegI dst, mRegI src1, mRegI src2) %{
5958 src1 : RD(read);
5959 src2 : RD(read);
5960 dst : WB(write)+5;
5961 DECODE : ID;
5962 ALU2 : CA;
5963 %}
5965 pipe_class mulL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
5966 src1 : RD(read);
5967 src2 : RD(read);
5968 dst : WB(write)+10;
5969 DECODE : ID;
5970 ALU2 : CA;
5971 %}
5973 //No.19 Integer div operation : dst <-- reg1 div reg2
5974 pipe_class ialu_div(mRegI dst, mRegI src1, mRegI src2) %{
5975 src1 : RD(read);
5976 src2 : RD(read);
5977 dst : WB(write)+10;
5978 DECODE : ID;
5979 ALU2 : CA;
5980 %}
5982 //No.19 Integer mod operation : dst <-- reg1 mod reg2
5983 pipe_class ialu_mod(mRegI dst, mRegI src1, mRegI src2) %{
5984 instruction_count(2);
5985 src1 : RD(read);
5986 src2 : RD(read);
5987 dst : WB(write)+10;
5988 DECODE : ID;
5989 ALU2 : CA;
5990 %}
5992 //No.15 Long ALU reg-reg operation : dst <-- reg1 op reg2
5993 pipe_class ialu_regL_regL(mRegL dst, mRegL src1, mRegL src2) %{
5994 instruction_count(2);
5995 src1 : RD(read);
5996 src2 : RD(read);
5997 dst : WB(write);
5998 DECODE : ID;
5999 ALU : CA;
6000 %}
6002 //No.18 Long ALU reg-imm16 operation : dst <-- reg1 op imm16
6003 pipe_class ialu_regL_imm16(mRegL dst, mRegL src) %{
6004 instruction_count(2);
6005 src : RD(read);
6006 dst : WB(write);
6007 DECODE : ID;
6008 ALU : CA;
6009 %}
6011 //no.16 load Long from memory :
6012 pipe_class ialu_loadL(mRegL dst, memory mem) %{
6013 instruction_count(2);
6014 mem : RD(read);
6015 dst : WB(write)+5;
6016 DECODE : ID;
6017 MEM : RD;
6018 %}
6020 //No.17 Store Long to Memory :
6021 pipe_class ialu_storeL(mRegL src, memory mem) %{
6022 instruction_count(2);
6023 mem : RD(read);
6024 src : RD(read);
6025 DECODE : ID;
6026 MEM : RD;
6027 %}
6029 //No.2 Integer ALU reg-imm16 operation : dst <-- reg1 op imm16
6030 pipe_class ialu_regI_imm16(mRegI dst, mRegI src) %{
6031 single_instruction;
6032 src : RD(read);
6033 dst : WB(write);
6034 DECODE : ID;
6035 ALU : CA;
6036 %}
6038 //No.3 Integer move operation : dst <-- reg
6039 pipe_class ialu_regI_mov(mRegI dst, mRegI src) %{
6040 src : RD(read);
6041 dst : WB(write);
6042 DECODE : ID;
6043 ALU : CA;
6044 %}
6046 //No.4 No instructions : do nothing
6047 pipe_class empty( ) %{
6048 instruction_count(0);
6049 %}
6051 //No.5 UnConditional branch :
6052 pipe_class pipe_jump( label labl ) %{
6053 multiple_bundles;
6054 DECODE : ID;
6055 BR : RD;
6056 %}
6058 //No.6 ALU Conditional branch :
6059 pipe_class pipe_alu_branch(mRegI src1, mRegI src2, label labl ) %{
6060 multiple_bundles;
6061 src1 : RD(read);
6062 src2 : RD(read);
6063 DECODE : ID;
6064 BR : RD;
6065 %}
6067 //no.7 load integer from memory :
6068 pipe_class ialu_loadI(mRegI dst, memory mem) %{
6069 mem : RD(read);
6070 dst : WB(write)+3;
6071 DECODE : ID;
6072 MEM : RD;
6073 %}
6075 //No.8 Store Integer to Memory :
6076 pipe_class ialu_storeI(mRegI src, memory mem) %{
6077 mem : RD(read);
6078 src : RD(read);
6079 DECODE : ID;
6080 MEM : RD;
6081 %}
6084 //No.10 Floating FPU reg-reg operation : dst <-- reg1 op reg2
6085 pipe_class fpu_regF_regF(regF dst, regF src1, regF src2) %{
6086 src1 : RD(read);
6087 src2 : RD(read);
6088 dst : WB(write);
6089 DECODE : ID;
6090 FPU : CA;
6091 %}
6093 //No.22 Floating div operation : dst <-- reg1 div reg2
6094 pipe_class fpu_div(regF dst, regF src1, regF src2) %{
6095 src1 : RD(read);
6096 src2 : RD(read);
6097 dst : WB(write);
6098 DECODE : ID;
6099 FPU2 : CA;
6100 %}
6102 pipe_class fcvt_I2D(regD dst, mRegI src) %{
6103 src : RD(read);
6104 dst : WB(write);
6105 DECODE : ID;
6106 FPU1 : CA;
6107 %}
6109 pipe_class fcvt_D2I(mRegI dst, regD src) %{
6110 src : RD(read);
6111 dst : WB(write);
6112 DECODE : ID;
6113 FPU1 : CA;
6114 %}
6116 pipe_class pipe_mfc1(mRegI dst, regD src) %{
6117 src : RD(read);
6118 dst : WB(write);
6119 DECODE : ID;
6120 MEM : RD;
6121 %}
6123 pipe_class pipe_mtc1(regD dst, mRegI src) %{
6124 src : RD(read);
6125 dst : WB(write);
6126 DECODE : ID;
6127 MEM : RD(5);
6128 %}
6130 //No.23 Floating sqrt operation : dst <-- reg1 sqrt reg2
6131 pipe_class fpu_sqrt(regF dst, regF src1, regF src2) %{
6132 multiple_bundles;
6133 src1 : RD(read);
6134 src2 : RD(read);
6135 dst : WB(write);
6136 DECODE : ID;
6137 FPU2 : CA;
6138 %}
6140 //No.11 Load Floating from Memory :
6141 pipe_class fpu_loadF(regF dst, memory mem) %{
6142 instruction_count(1);
6143 mem : RD(read);
6144 dst : WB(write)+3;
6145 DECODE : ID;
6146 MEM : RD;
6147 %}
6149 //No.12 Store Floating to Memory :
6150 pipe_class fpu_storeF(regF src, memory mem) %{
6151 instruction_count(1);
6152 mem : RD(read);
6153 src : RD(read);
6154 DECODE : ID;
6155 MEM : RD;
6156 %}
6158 //No.13 FPU Conditional branch :
6159 pipe_class pipe_fpu_branch(regF src1, regF src2, label labl ) %{
6160 multiple_bundles;
6161 src1 : RD(read);
6162 src2 : RD(read);
6163 DECODE : ID;
6164 BR : RD;
6165 %}
6167 //No.14 Floating FPU reg operation : dst <-- op reg
6168 pipe_class fpu1_regF(regF dst, regF src) %{
6169 src : RD(read);
6170 dst : WB(write);
6171 DECODE : ID;
6172 FPU : CA;
6173 %}
6175 pipe_class long_memory_op() %{
6176 instruction_count(10); multiple_bundles; force_serialization;
6177 fixed_latency(30);
6178 %}
6180 pipe_class simple_call() %{
6181 instruction_count(10); multiple_bundles; force_serialization;
6182 fixed_latency(200);
6183 BR : RD;
6184 %}
6186 pipe_class call() %{
6187 instruction_count(10); multiple_bundles; force_serialization;
6188 fixed_latency(200);
6189 %}
6191 //FIXME:
6192 //No.9 Piple slow : for multi-instructions
6193 pipe_class pipe_slow( ) %{
6194 instruction_count(20);
6195 force_serialization;
6196 multiple_bundles;
6197 fixed_latency(50);
6198 %}
6200 %}
6204 //----------INSTRUCTIONS-------------------------------------------------------
6205 //
6206 // match -- States which machine-independent subtree may be replaced
6207 // by this instruction.
6208 // ins_cost -- The estimated cost of this instruction is used by instruction
6209 // selection to identify a minimum cost tree of machine
6210 // instructions that matches a tree of machine-independent
6211 // instructions.
6212 // format -- A string providing the disassembly for this instruction.
6213 // The value of an instruction's operand may be inserted
6214 // by referring to it with a '$' prefix.
6215 // opcode -- Three instruction opcodes may be provided. These are referred
6216 // to within an encode class as $primary, $secondary, and $tertiary
6217 // respectively. The primary opcode is commonly used to
6218 // indicate the type of machine instruction, while secondary
6219 // and tertiary are often used for prefix options or addressing
6220 // modes.
6221 // ins_encode -- A list of encode classes with parameters. The encode class
6222 // name must have been defined in an 'enc_class' specification
6223 // in the encode section of the architecture description.
6226 // Load Integer
6227 instruct loadI(mRegI dst, memory mem) %{
6228 match(Set dst (LoadI mem));
6230 ins_cost(125);
6231 format %{ "lw $dst, $mem #@loadI" %}
6232 ins_encode (load_I_enc(dst, mem));
6233 ins_pipe( ialu_loadI );
6234 %}
6236 instruct loadI_convI2L(mRegL dst, memory mem) %{
6237 match(Set dst (ConvI2L (LoadI mem)));
6239 ins_cost(125);
6240 format %{ "lw $dst, $mem #@loadI_convI2L" %}
6241 ins_encode (load_I_enc(dst, mem));
6242 ins_pipe( ialu_loadI );
6243 %}
6245 // Load Integer (32 bit signed) to Byte (8 bit signed)
6246 instruct loadI2B(mRegI dst, memory mem, immI_24 twentyfour) %{
6247 match(Set dst (RShiftI (LShiftI (LoadI mem) twentyfour) twentyfour));
6249 ins_cost(125);
6250 format %{ "lb $dst, $mem\t# int -> byte #@loadI2B" %}
6251 ins_encode(load_B_enc(dst, mem));
6252 ins_pipe(ialu_loadI);
6253 %}
6255 // Load Integer (32 bit signed) to Unsigned Byte (8 bit UNsigned)
6256 instruct loadI2UB(mRegI dst, memory mem, immI_255 mask) %{
6257 match(Set dst (AndI (LoadI mem) mask));
6259 ins_cost(125);
6260 format %{ "lbu $dst, $mem\t# int -> ubyte #@loadI2UB" %}
6261 ins_encode(load_UB_enc(dst, mem));
6262 ins_pipe(ialu_loadI);
6263 %}
6265 // Load Integer (32 bit signed) to Short (16 bit signed)
6266 instruct loadI2S(mRegI dst, memory mem, immI_16 sixteen) %{
6267 match(Set dst (RShiftI (LShiftI (LoadI mem) sixteen) sixteen));
6269 ins_cost(125);
6270 format %{ "lh $dst, $mem\t# int -> short #@loadI2S" %}
6271 ins_encode(load_S_enc(dst, mem));
6272 ins_pipe(ialu_loadI);
6273 %}
6275 // Load Integer (32 bit signed) to Unsigned Short/Char (16 bit UNsigned)
6276 instruct loadI2US(mRegI dst, memory mem, immI_65535 mask) %{
6277 match(Set dst (AndI (LoadI mem) mask));
6279 ins_cost(125);
6280 format %{ "lhu $dst, $mem\t# int -> ushort/char #@loadI2US" %}
6281 ins_encode(load_C_enc(dst, mem));
6282 ins_pipe(ialu_loadI);
6283 %}
6285 // Load Long.
6286 instruct loadL(mRegL dst, memory mem) %{
6287 // predicate(!((LoadLNode*)n)->require_atomic_access());
6288 match(Set dst (LoadL mem));
6290 ins_cost(250);
6291 format %{ "ld $dst, $mem #@loadL" %}
6292 ins_encode(load_L_enc(dst, mem));
6293 ins_pipe( ialu_loadL );
6294 %}
6296 // Load Long - UNaligned
6297 instruct loadL_unaligned(mRegL dst, memory mem) %{
6298 match(Set dst (LoadL_unaligned mem));
6300 // FIXME: Jin: Need more effective ldl/ldr
6301 ins_cost(450);
6302 format %{ "ld $dst, $mem #@loadL_unaligned\n\t" %}
6303 ins_encode(load_L_enc(dst, mem));
6304 ins_pipe( ialu_loadL );
6305 %}
6307 // Store Long
6308 instruct storeL_reg(memory mem, mRegL src) %{
6309 match(Set mem (StoreL mem src));
6311 ins_cost(200);
6312 format %{ "sd $mem, $src #@storeL_reg\n" %}
6313 ins_encode(store_L_reg_enc(mem, src));
6314 ins_pipe( ialu_storeL );
6315 %}
6317 instruct storeL_immL0(memory mem, immL0 zero) %{
6318 match(Set mem (StoreL mem zero));
6320 ins_cost(180);
6321 format %{ "sd zero, $mem #@storeL_immL0" %}
6322 ins_encode(store_L_immL0_enc(mem, zero));
6323 ins_pipe( ialu_storeL );
6324 %}
6326 instruct storeL_imm(memory mem, immL src) %{
6327 match(Set mem (StoreL mem src));
6329 ins_cost(200);
6330 format %{ "sd $src, $mem #@storeL_imm" %}
6331 ins_encode(store_L_immL_enc(mem, src));
6332 ins_pipe( ialu_storeL );
6333 %}
6335 // Load Compressed Pointer
6336 instruct loadN(mRegN dst, memory mem)
6337 %{
6338 match(Set dst (LoadN mem));
6340 ins_cost(125); // XXX
6341 format %{ "lwu $dst, $mem\t# compressed ptr @ loadN" %}
6342 ins_encode (load_N_enc(dst, mem));
6343 ins_pipe( ialu_loadI ); // XXX
6344 %}
6346 instruct loadN2P(mRegP dst, memory mem)
6347 %{
6348 match(Set dst (DecodeN (LoadN mem)));
6349 predicate(Universe::narrow_oop_base() == NULL && Universe::narrow_oop_shift() == 0);
6351 ins_cost(125); // XXX
6352 format %{ "lwu $dst, $mem\t# @ loadN2P" %}
6353 ins_encode (load_N_enc(dst, mem));
6354 ins_pipe( ialu_loadI ); // XXX
6355 %}
6357 // Load Pointer
6358 instruct loadP(mRegP dst, memory mem) %{
6359 match(Set dst (LoadP mem));
6361 ins_cost(125);
6362 format %{ "ld $dst, $mem #@loadP" %}
6363 ins_encode (load_P_enc(dst, mem));
6364 ins_pipe( ialu_loadI );
6365 %}
6367 // Load Klass Pointer
6368 instruct loadKlass(mRegP dst, memory mem) %{
6369 match(Set dst (LoadKlass mem));
6371 ins_cost(125);
6372 format %{ "MOV $dst,$mem @ loadKlass" %}
6373 ins_encode (load_P_enc(dst, mem));
6374 ins_pipe( ialu_loadI );
6375 %}
6377 // Load narrow Klass Pointer
6378 instruct loadNKlass(mRegN dst, memory mem)
6379 %{
6380 match(Set dst (LoadNKlass mem));
6382 ins_cost(125); // XXX
6383 format %{ "lwu $dst, $mem\t# compressed klass ptr @ loadNKlass" %}
6384 ins_encode (load_N_enc(dst, mem));
6385 ins_pipe( ialu_loadI ); // XXX
6386 %}
6388 instruct loadN2PKlass(mRegP dst, memory mem)
6389 %{
6390 match(Set dst (DecodeNKlass (LoadNKlass mem)));
6391 predicate(Universe::narrow_klass_base() == NULL && Universe::narrow_klass_shift() == 0);
6393 ins_cost(125); // XXX
6394 format %{ "lwu $dst, $mem\t# compressed klass ptr @ loadN2PKlass" %}
6395 ins_encode (load_N_enc(dst, mem));
6396 ins_pipe( ialu_loadI ); // XXX
6397 %}
6399 // Load Constant
6400 instruct loadConI(mRegI dst, immI src) %{
6401 match(Set dst src);
6403 ins_cost(150);
6404 format %{ "mov $dst, $src #@loadConI" %}
6405 ins_encode %{
6406 Register dst = $dst$$Register;
6407 int value = $src$$constant;
6408 __ move(dst, value);
6409 %}
6410 ins_pipe( ialu_regI_regI );
6411 %}
6414 instruct loadConL_set64(mRegL dst, immL src) %{
6415 match(Set dst src);
6416 ins_cost(120);
6417 format %{ "li $dst, $src @ loadConL_set64" %}
6418 ins_encode %{
6419 __ set64($dst$$Register, $src$$constant);
6420 %}
6421 ins_pipe(ialu_regL_regL);
6422 %}
6424 /*
6425 // Load long value from constant table (predicated by immL_expensive).
6426 instruct loadConL_load(mRegL dst, immL_expensive src) %{
6427 match(Set dst src);
6428 ins_cost(150);
6429 format %{ "ld $dst, $constantoffset[$constanttablebase] # load long $src from table @ loadConL_ldx" %}
6430 ins_encode %{
6431 int con_offset = $constantoffset($src);
6433 if (Assembler::is_simm16(con_offset)) {
6434 __ ld($dst$$Register, $constanttablebase, con_offset);
6435 } else {
6436 __ set64(AT, con_offset);
6437 if (UseLoongsonISA) {
6438 __ gsldx($dst$$Register, $constanttablebase, AT, 0);
6439 } else {
6440 __ daddu(AT, $constanttablebase, AT);
6441 __ ld($dst$$Register, AT, 0);
6442 }
6443 }
6444 %}
6445 ins_pipe(ialu_loadI);
6446 %}
6447 */
6449 instruct loadConL16(mRegL dst, immL16 src) %{
6450 match(Set dst src);
6451 ins_cost(105);
6452 format %{ "mov $dst, $src #@loadConL16" %}
6453 ins_encode %{
6454 Register dst_reg = as_Register($dst$$reg);
6455 int value = $src$$constant;
6456 __ daddiu(dst_reg, R0, value);
6457 %}
6458 ins_pipe( ialu_regL_regL );
6459 %}
6462 instruct loadConL0(mRegL dst, immL0 src) %{
6463 match(Set dst src);
6464 ins_cost(100);
6465 format %{ "mov $dst, zero #@loadConL0" %}
6466 ins_encode %{
6467 Register dst_reg = as_Register($dst$$reg);
6468 __ daddu(dst_reg, R0, R0);
6469 %}
6470 ins_pipe( ialu_regL_regL );
6471 %}
6473 // Load Range
6474 instruct loadRange(mRegI dst, memory mem) %{
6475 match(Set dst (LoadRange mem));
6477 ins_cost(125);
6478 format %{ "MOV $dst,$mem @ loadRange" %}
6479 ins_encode(load_I_enc(dst, mem));
6480 ins_pipe( ialu_loadI );
6481 %}
6484 instruct storeP(memory mem, mRegP src ) %{
6485 match(Set mem (StoreP mem src));
6487 ins_cost(125);
6488 format %{ "sd $src, $mem #@storeP" %}
6489 ins_encode(store_P_reg_enc(mem, src));
6490 ins_pipe( ialu_storeI );
6491 %}
6493 // Store NULL Pointer, mark word, or other simple pointer constant.
6494 instruct storeImmP0(memory mem, immP0 zero) %{
6495 match(Set mem (StoreP mem zero));
6497 ins_cost(125);
6498 format %{ "mov $mem, $zero #@storeImmP0" %}
6499 ins_encode(store_P_immP0_enc(mem));
6500 ins_pipe( ialu_storeI );
6501 %}
6503 // Store NULL Pointer, mark word, or other simple pointer constant.
6504 instruct storeImmP(memory mem, immP31 src) %{
6505 match(Set mem (StoreP mem src));
6507 ins_cost(150);
6508 format %{ "mov $mem, $src #@storeImmP" %}
6509 ins_encode(store_P_immP_enc(mem, src));
6510 ins_pipe( ialu_storeI );
6511 %}
6513 // Store Byte Immediate
6514 instruct storeImmB(memory mem, immI8 src) %{
6515 match(Set mem (StoreB mem src));
6517 ins_cost(150);
6518 format %{ "movb $mem, $src #@storeImmB" %}
6519 ins_encode(store_B_immI_enc(mem, src));
6520 ins_pipe( ialu_storeI );
6521 %}
6523 // Store Compressed Pointer
6524 instruct storeN(memory mem, mRegN src)
6525 %{
6526 match(Set mem (StoreN mem src));
6528 ins_cost(125); // XXX
6529 format %{ "sw $mem, $src\t# compressed ptr @ storeN" %}
6530 ins_encode(store_N_reg_enc(mem, src));
6531 ins_pipe( ialu_storeI );
6532 %}
6534 instruct storeP2N(memory mem, mRegP src)
6535 %{
6536 match(Set mem (StoreN mem (EncodeP src)));
6537 predicate(Universe::narrow_oop_base() == NULL && Universe::narrow_oop_shift() == 0);
6539 ins_cost(125); // XXX
6540 format %{ "sw $mem, $src\t# @ storeP2N" %}
6541 ins_encode(store_N_reg_enc(mem, src));
6542 ins_pipe( ialu_storeI );
6543 %}
6545 instruct storeNKlass(memory mem, mRegN src)
6546 %{
6547 match(Set mem (StoreNKlass mem src));
6549 ins_cost(125); // XXX
6550 format %{ "sw $mem, $src\t# compressed klass ptr @ storeNKlass" %}
6551 ins_encode(store_N_reg_enc(mem, src));
6552 ins_pipe( ialu_storeI );
6553 %}
6555 instruct storeP2NKlass(memory mem, mRegP src)
6556 %{
6557 match(Set mem (StoreNKlass mem (EncodePKlass src)));
6558 predicate(Universe::narrow_klass_base() == NULL && Universe::narrow_klass_shift() == 0);
6560 ins_cost(125); // XXX
6561 format %{ "sw $mem, $src\t# @ storeP2NKlass" %}
6562 ins_encode(store_N_reg_enc(mem, src));
6563 ins_pipe( ialu_storeI );
6564 %}
6566 instruct storeImmN0(memory mem, immN0 zero)
6567 %{
6568 match(Set mem (StoreN mem zero));
6570 ins_cost(125); // XXX
6571 format %{ "storeN0 zero, $mem\t# compressed ptr" %}
6572 ins_encode(storeImmN0_enc(mem, zero));
6573 ins_pipe( ialu_storeI );
6574 %}
6576 instruct storeImmN(memory mem, immN src)
6577 %{
6578 match(Set mem (StoreN mem src));
6580 ins_cost(150);
6581 format %{ "storeImmN $mem, $src\t# compressed ptr @ storeImmN" %}
6582 ins_encode(storeImmN_enc(mem, src));
6583 ins_pipe( ialu_storeI );
6584 %}
6586 instruct storeImmNKlass(memory mem, immNKlass src)
6587 %{
6588 match(Set mem (StoreNKlass mem src));
6590 ins_cost(150); // XXX
6591 format %{ "sw $mem, $src\t# compressed klass ptr @ storeImmNKlass" %}
6592 ins_encode(storeImmNKlass_enc(mem, src));
6593 ins_pipe( ialu_storeI );
6594 %}
6596 // Store Byte
6597 instruct storeB(memory mem, mRegI src) %{
6598 match(Set mem (StoreB mem src));
6600 ins_cost(125);
6601 format %{ "sb $src, $mem #@storeB" %}
6602 ins_encode(store_B_reg_enc(mem, src));
6603 ins_pipe( ialu_storeI );
6604 %}
6606 instruct storeB_convL2I(memory mem, mRegL src) %{
6607 match(Set mem (StoreB mem (ConvL2I src)));
6609 ins_cost(125);
6610 format %{ "sb $src, $mem #@storeB_convL2I" %}
6611 ins_encode(store_B_reg_enc(mem, src));
6612 ins_pipe( ialu_storeI );
6613 %}
6615 // Load Byte (8bit signed)
6616 instruct loadB(mRegI dst, memory mem) %{
6617 match(Set dst (LoadB mem));
6619 ins_cost(125);
6620 format %{ "lb $dst, $mem #@loadB" %}
6621 ins_encode(load_B_enc(dst, mem));
6622 ins_pipe( ialu_loadI );
6623 %}
6625 instruct loadB_convI2L(mRegL dst, memory mem) %{
6626 match(Set dst (ConvI2L (LoadB mem)));
6628 ins_cost(125);
6629 format %{ "lb $dst, $mem #@loadB_convI2L" %}
6630 ins_encode(load_B_enc(dst, mem));
6631 ins_pipe( ialu_loadI );
6632 %}
6634 // Load Byte (8bit UNsigned)
6635 instruct loadUB(mRegI dst, memory mem) %{
6636 match(Set dst (LoadUB mem));
6638 ins_cost(125);
6639 format %{ "lbu $dst, $mem #@loadUB" %}
6640 ins_encode(load_UB_enc(dst, mem));
6641 ins_pipe( ialu_loadI );
6642 %}
6644 instruct loadUB_convI2L(mRegL dst, memory mem) %{
6645 match(Set dst (ConvI2L (LoadUB mem)));
6647 ins_cost(125);
6648 format %{ "lbu $dst, $mem #@loadUB_convI2L" %}
6649 ins_encode(load_UB_enc(dst, mem));
6650 ins_pipe( ialu_loadI );
6651 %}
6653 // Load Short (16bit signed)
6654 instruct loadS(mRegI dst, memory mem) %{
6655 match(Set dst (LoadS mem));
6657 ins_cost(125);
6658 format %{ "lh $dst, $mem #@loadS" %}
6659 ins_encode(load_S_enc(dst, mem));
6660 ins_pipe( ialu_loadI );
6661 %}
6663 // Load Short (16 bit signed) to Byte (8 bit signed)
6664 instruct loadS2B(mRegI dst, memory mem, immI_24 twentyfour) %{
6665 match(Set dst (RShiftI (LShiftI (LoadS mem) twentyfour) twentyfour));
6667 ins_cost(125);
6668 format %{ "lb $dst, $mem\t# short -> byte #@loadS2B" %}
6669 ins_encode(load_B_enc(dst, mem));
6670 ins_pipe(ialu_loadI);
6671 %}
6673 instruct loadS_convI2L(mRegL dst, memory mem) %{
6674 match(Set dst (ConvI2L (LoadS mem)));
6676 ins_cost(125);
6677 format %{ "lh $dst, $mem #@loadS_convI2L" %}
6678 ins_encode(load_S_enc(dst, mem));
6679 ins_pipe( ialu_loadI );
6680 %}
6682 // Store Integer Immediate
6683 instruct storeImmI(memory mem, immI src) %{
6684 match(Set mem (StoreI mem src));
6686 ins_cost(150);
6687 format %{ "mov $mem, $src #@storeImmI" %}
6688 ins_encode(store_I_immI_enc(mem, src));
6689 ins_pipe( ialu_storeI );
6690 %}
6692 // Store Integer
6693 instruct storeI(memory mem, mRegI src) %{
6694 match(Set mem (StoreI mem src));
6696 ins_cost(125);
6697 format %{ "sw $mem, $src #@storeI" %}
6698 ins_encode(store_I_reg_enc(mem, src));
6699 ins_pipe( ialu_storeI );
6700 %}
6702 instruct storeI_convL2I(memory mem, mRegL src) %{
6703 match(Set mem (StoreI mem (ConvL2I src)));
6705 ins_cost(125);
6706 format %{ "sw $mem, $src #@storeI_convL2I" %}
6707 ins_encode(store_I_reg_enc(mem, src));
6708 ins_pipe( ialu_storeI );
6709 %}
6711 // Load Float
6712 instruct loadF(regF dst, memory mem) %{
6713 match(Set dst (LoadF mem));
6715 ins_cost(150);
6716 format %{ "loadF $dst, $mem #@loadF" %}
6717 ins_encode(load_F_enc(dst, mem));
6718 ins_pipe( ialu_loadI );
6719 %}
6721 instruct loadConP_general(mRegP dst, immP src) %{
6722 match(Set dst src);
6724 ins_cost(120);
6725 format %{ "li $dst, $src #@loadConP_general" %}
6727 ins_encode %{
6728 Register dst = $dst$$Register;
6729 long* value = (long*)$src$$constant;
6731 if($src->constant_reloc() == relocInfo::metadata_type){
6732 int klass_index = __ oop_recorder()->find_index((Klass*)value);
6733 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
6735 __ relocate(rspec);
6736 __ patchable_set48(dst, (long)value);
6737 }else if($src->constant_reloc() == relocInfo::oop_type){
6738 int oop_index = __ oop_recorder()->find_index((jobject)value);
6739 RelocationHolder rspec = oop_Relocation::spec(oop_index);
6741 __ relocate(rspec);
6742 __ patchable_set48(dst, (long)value);
6743 } else if ($src->constant_reloc() == relocInfo::none) {
6744 __ set64(dst, (long)value);
6745 }
6746 %}
6748 ins_pipe( ialu_regI_regI );
6749 %}
6751 /*
6752 instruct loadConP_load(mRegP dst, immP_load src) %{
6753 match(Set dst src);
6755 ins_cost(100);
6756 format %{ "ld $dst, [$constanttablebase + $constantoffset] load from constant table: ptr=$src @ loadConP_load" %}
6758 ins_encode %{
6760 int con_offset = $constantoffset($src);
6762 if (Assembler::is_simm16(con_offset)) {
6763 __ ld($dst$$Register, $constanttablebase, con_offset);
6764 } else {
6765 __ set64(AT, con_offset);
6766 if (UseLoongsonISA) {
6767 __ gsldx($dst$$Register, $constanttablebase, AT, 0);
6768 } else {
6769 __ daddu(AT, $constanttablebase, AT);
6770 __ ld($dst$$Register, AT, 0);
6771 }
6772 }
6773 %}
6775 ins_pipe(ialu_loadI);
6776 %}
6777 */
6779 instruct loadConP_no_oop_cheap(mRegP dst, immP_no_oop_cheap src) %{
6780 match(Set dst src);
6782 ins_cost(80);
6783 format %{ "li $dst, $src @ loadConP_no_oop_cheap" %}
6785 ins_encode %{
6786 __ set64($dst$$Register, $src$$constant);
6787 %}
6789 ins_pipe(ialu_regI_regI);
6790 %}
6793 instruct loadConP_poll(mRegP dst, immP_poll src) %{
6794 match(Set dst src);
6796 ins_cost(50);
6797 format %{ "li $dst, $src #@loadConP_poll" %}
6799 ins_encode %{
6800 Register dst = $dst$$Register;
6801 intptr_t value = (intptr_t)$src$$constant;
6803 __ set64(dst, (jlong)value);
6804 %}
6806 ins_pipe( ialu_regI_regI );
6807 %}
6809 instruct loadConP0(mRegP dst, immP0 src)
6810 %{
6811 match(Set dst src);
6813 ins_cost(50);
6814 format %{ "mov $dst, R0\t# ptr" %}
6815 ins_encode %{
6816 Register dst_reg = $dst$$Register;
6817 __ daddu(dst_reg, R0, R0);
6818 %}
6819 ins_pipe( ialu_regI_regI );
6820 %}
6822 instruct loadConN0(mRegN dst, immN0 src) %{
6823 match(Set dst src);
6824 format %{ "move $dst, R0\t# compressed NULL ptr" %}
6825 ins_encode %{
6826 __ move($dst$$Register, R0);
6827 %}
6828 ins_pipe( ialu_regI_regI );
6829 %}
6831 instruct loadConN(mRegN dst, immN src) %{
6832 match(Set dst src);
6834 ins_cost(125);
6835 format %{ "li $dst, $src\t# compressed ptr @ loadConN" %}
6836 ins_encode %{
6837 Register dst = $dst$$Register;
6838 __ set_narrow_oop(dst, (jobject)$src$$constant);
6839 %}
6840 ins_pipe( ialu_regI_regI ); // XXX
6841 %}
6843 instruct loadConNKlass(mRegN dst, immNKlass src) %{
6844 match(Set dst src);
6846 ins_cost(125);
6847 format %{ "li $dst, $src\t# compressed klass ptr @ loadConNKlass" %}
6848 ins_encode %{
6849 Register dst = $dst$$Register;
6850 __ set_narrow_klass(dst, (Klass*)$src$$constant);
6851 %}
6852 ins_pipe( ialu_regI_regI ); // XXX
6853 %}
6855 //FIXME
6856 // Tail Call; Jump from runtime stub to Java code.
6857 // Also known as an 'interprocedural jump'.
6858 // Target of jump will eventually return to caller.
6859 // TailJump below removes the return address.
6860 instruct TailCalljmpInd(mRegP jump_target, mRegP method_oop) %{
6861 match(TailCall jump_target method_oop );
6862 ins_cost(300);
6863 format %{ "JMP $jump_target \t# @TailCalljmpInd" %}
6865 ins_encode %{
6866 Register target = $jump_target$$Register;
6867 Register oop = $method_oop$$Register;
6869 /* 2012/10/12 Jin: RA will be used in generate_forward_exception() */
6870 __ push(RA);
6872 __ move(S3, oop);
6873 __ jr(target);
6874 __ nop();
6875 %}
6877 ins_pipe( pipe_jump );
6878 %}
6880 // Create exception oop: created by stack-crawling runtime code.
6881 // Created exception is now available to this handler, and is setup
6882 // just prior to jumping to this handler. No code emitted.
6883 instruct CreateException( a0_RegP ex_oop )
6884 %{
6885 match(Set ex_oop (CreateEx));
6887 // use the following format syntax
6888 format %{ "# exception oop is in A0; no code emitted @CreateException" %}
6889 ins_encode %{
6890 /* Jin: X86 leaves this function empty */
6891 __ block_comment("CreateException is empty in X86/MIPS");
6892 %}
6893 ins_pipe( empty );
6894 // ins_pipe( pipe_jump );
6895 %}
6898 /* 2012/9/14 Jin: The mechanism of exception handling is clear now.
6900 - Common try/catch:
6901 2012/9/14 Jin: [stubGenerator_mips.cpp] generate_forward_exception()
6902 |- V0, V1 are created
6903 |- T9 <= SharedRuntime::exception_handler_for_return_address
6904 `- jr T9
6905 `- the caller's exception_handler
6906 `- jr OptoRuntime::exception_blob
6907 `- here
6908 - Rethrow(e.g. 'unwind'):
6909 * The callee:
6910 |- an exception is triggered during execution
6911 `- exits the callee method through RethrowException node
6912 |- The callee pushes exception_oop(T0) and exception_pc(RA)
6913 `- The callee jumps to OptoRuntime::rethrow_stub()
6914 * In OptoRuntime::rethrow_stub:
6915 |- The VM calls _rethrow_Java to determine the return address in the caller method
6916 `- exits the stub with tailjmpInd
6917 |- pops exception_oop(V0) and exception_pc(V1)
6918 `- jumps to the return address(usually an exception_handler)
6919 * The caller:
6920 `- continues processing the exception_blob with V0/V1
6921 */
6923 /*
6924 Disassembling OptoRuntime::rethrow_stub()
6926 ; locals
6927 0x2d3bf320: addiu sp, sp, 0xfffffff8
6928 0x2d3bf324: sw ra, 0x4(sp)
6929 0x2d3bf328: sw fp, 0x0(sp)
6930 0x2d3bf32c: addu fp, sp, zero
6931 0x2d3bf330: addiu sp, sp, 0xfffffff0
6932 0x2d3bf334: sw ra, 0x8(sp)
6933 0x2d3bf338: sw t0, 0x4(sp)
6934 0x2d3bf33c: sw sp, 0x0(sp)
6936 ; get_thread(S2)
6937 0x2d3bf340: addu s2, sp, zero
6938 0x2d3bf344: srl s2, s2, 12
6939 0x2d3bf348: sll s2, s2, 2
6940 0x2d3bf34c: lui at, 0x2c85
6941 0x2d3bf350: addu at, at, s2
6942 0x2d3bf354: lw s2, 0xffffcc80(at)
6944 0x2d3bf358: lw s0, 0x0(sp)
6945 0x2d3bf35c: sw s0, 0x118(s2) // last_sp -> threa
6946 0x2d3bf360: sw s2, 0xc(sp)
6948 ; OptoRuntime::rethrow_C(oopDesc* exception, JavaThread* thread, address ret_pc)
6949 0x2d3bf364: lw a0, 0x4(sp)
6950 0x2d3bf368: lw a1, 0xc(sp)
6951 0x2d3bf36c: lw a2, 0x8(sp)
6952 ;; Java_To_Runtime
6953 0x2d3bf370: lui t9, 0x2c34
6954 0x2d3bf374: addiu t9, t9, 0xffff8a48
6955 0x2d3bf378: jalr t9
6956 0x2d3bf37c: nop
6958 0x2d3bf380: addu s3, v0, zero ; S3: SharedRuntime::raw_exception_handler_for_return_address()
6960 0x2d3bf384: lw s0, 0xc(sp)
6961 0x2d3bf388: sw zero, 0x118(s0)
6962 0x2d3bf38c: sw zero, 0x11c(s0)
6963 0x2d3bf390: lw s1, 0x144(s0) ; ex_oop: S1
6964 0x2d3bf394: addu s2, s0, zero
6965 0x2d3bf398: sw zero, 0x144(s2)
6966 0x2d3bf39c: lw s0, 0x4(s2)
6967 0x2d3bf3a0: addiu s4, zero, 0x0
6968 0x2d3bf3a4: bne s0, s4, 0x2d3bf3d4
6969 0x2d3bf3a8: nop
6970 0x2d3bf3ac: addiu sp, sp, 0x10
6971 0x2d3bf3b0: addiu sp, sp, 0x8
6972 0x2d3bf3b4: lw ra, 0xfffffffc(sp)
6973 0x2d3bf3b8: lw fp, 0xfffffff8(sp)
6974 0x2d3bf3bc: lui at, 0x2b48
6975 0x2d3bf3c0: lw at, 0x100(at)
6977 ; tailjmpInd: Restores exception_oop & exception_pc
6978 0x2d3bf3c4: addu v1, ra, zero
6979 0x2d3bf3c8: addu v0, s1, zero
6980 0x2d3bf3cc: jr s3
6981 0x2d3bf3d0: nop
6982 ; Exception:
6983 0x2d3bf3d4: lui s1, 0x2cc8 ; generate_forward_exception()
6984 0x2d3bf3d8: addiu s1, s1, 0x40
6985 0x2d3bf3dc: addiu s2, zero, 0x0
6986 0x2d3bf3e0: addiu sp, sp, 0x10
6987 0x2d3bf3e4: addiu sp, sp, 0x8
6988 0x2d3bf3e8: lw ra, 0xfffffffc(sp)
6989 0x2d3bf3ec: lw fp, 0xfffffff8(sp)
6990 0x2d3bf3f0: lui at, 0x2b48
6991 0x2d3bf3f4: lw at, 0x100(at)
6992 ; TailCalljmpInd
6993 __ push(RA); ; to be used in generate_forward_exception()
6994 0x2d3bf3f8: addu t7, s2, zero
6995 0x2d3bf3fc: jr s1
6996 0x2d3bf400: nop
6997 */
6998 // Rethrow exception:
6999 // The exception oop will come in the first argument position.
7000 // Then JUMP (not call) to the rethrow stub code.
7001 instruct RethrowException()
7002 %{
7003 match(Rethrow);
7005 // use the following format syntax
7006 format %{ "JMP rethrow_stub #@RethrowException" %}
7007 ins_encode %{
7008 __ block_comment("@ RethrowException");
7010 cbuf.set_insts_mark();
7011 cbuf.relocate(cbuf.insts_mark(), runtime_call_Relocation::spec());
7013 // call OptoRuntime::rethrow_stub to get the exception handler in parent method
7014 __ patchable_jump((address)OptoRuntime::rethrow_stub());
7015 %}
7016 ins_pipe( pipe_jump );
7017 %}
7019 instruct branchConP_zero(cmpOpU cmp, mRegP op1, immP0 zero, label labl) %{
7020 match(If cmp (CmpP op1 zero));
7021 effect(USE labl);
7023 ins_cost(180);
7024 format %{ "b$cmp $op1, R0, $labl #@branchConP_zero" %}
7026 ins_encode %{
7027 Register op1 = $op1$$Register;
7028 Register op2 = R0;
7029 Label &L = *($labl$$label);
7030 int flag = $cmp$$cmpcode;
7032 switch(flag)
7033 {
7034 case 0x01: //equal
7035 if (&L)
7036 __ beq(op1, op2, L);
7037 else
7038 __ beq(op1, op2, (int)0);
7039 break;
7040 case 0x02: //not_equal
7041 if (&L)
7042 __ bne(op1, op2, L);
7043 else
7044 __ bne(op1, op2, (int)0);
7045 break;
7046 /*
7047 case 0x03: //above
7048 __ sltu(AT, op2, op1);
7049 if(&L)
7050 __ bne(R0, AT, L);
7051 else
7052 __ bne(R0, AT, (int)0);
7053 break;
7054 case 0x04: //above_equal
7055 __ sltu(AT, op1, op2);
7056 if(&L)
7057 __ beq(AT, R0, L);
7058 else
7059 __ beq(AT, R0, (int)0);
7060 break;
7061 case 0x05: //below
7062 __ sltu(AT, op1, op2);
7063 if(&L)
7064 __ bne(R0, AT, L);
7065 else
7066 __ bne(R0, AT, (int)0);
7067 break;
7068 case 0x06: //below_equal
7069 __ sltu(AT, op2, op1);
7070 if(&L)
7071 __ beq(AT, R0, L);
7072 else
7073 __ beq(AT, R0, (int)0);
7074 break;
7075 */
7076 default:
7077 Unimplemented();
7078 }
7079 __ nop();
7080 %}
7082 ins_pc_relative(1);
7083 ins_pipe( pipe_alu_branch );
7084 %}
7086 instruct branchConN2P_zero(cmpOpU cmp, mRegN op1, immP0 zero, label labl) %{
7087 match(If cmp (CmpP (DecodeN op1) zero));
7088 predicate(Universe::narrow_oop_base() == NULL && Universe::narrow_oop_shift() == 0);
7089 effect(USE labl);
7091 ins_cost(180);
7092 format %{ "b$cmp $op1, R0, $labl #@branchConN2P_zero" %}
7094 ins_encode %{
7095 Register op1 = $op1$$Register;
7096 Register op2 = R0;
7097 Label &L = *($labl$$label);
7098 int flag = $cmp$$cmpcode;
7100 switch(flag)
7101 {
7102 case 0x01: //equal
7103 if (&L)
7104 __ beq(op1, op2, L);
7105 else
7106 __ beq(op1, op2, (int)0);
7107 break;
7108 case 0x02: //not_equal
7109 if (&L)
7110 __ bne(op1, op2, L);
7111 else
7112 __ bne(op1, op2, (int)0);
7113 break;
7114 default:
7115 Unimplemented();
7116 }
7117 __ nop();
7118 %}
7120 ins_pc_relative(1);
7121 ins_pipe( pipe_alu_branch );
7122 %}
7125 instruct branchConP(cmpOpU cmp, mRegP op1, mRegP op2, label labl) %{
7126 match(If cmp (CmpP op1 op2));
7127 // predicate(can_branch_register(_kids[0]->_leaf, _kids[1]->_leaf));
7128 effect(USE labl);
7130 ins_cost(200);
7131 format %{ "b$cmp $op1, $op2, $labl #@branchConP" %}
7133 ins_encode %{
7134 Register op1 = $op1$$Register;
7135 Register op2 = $op2$$Register;
7136 Label &L = *($labl$$label);
7137 int flag = $cmp$$cmpcode;
7139 switch(flag)
7140 {
7141 case 0x01: //equal
7142 if (&L)
7143 __ beq(op1, op2, L);
7144 else
7145 __ beq(op1, op2, (int)0);
7146 break;
7147 case 0x02: //not_equal
7148 if (&L)
7149 __ bne(op1, op2, L);
7150 else
7151 __ bne(op1, op2, (int)0);
7152 break;
7153 case 0x03: //above
7154 __ sltu(AT, op2, op1);
7155 if(&L)
7156 __ bne(R0, AT, L);
7157 else
7158 __ bne(R0, AT, (int)0);
7159 break;
7160 case 0x04: //above_equal
7161 __ sltu(AT, op1, op2);
7162 if(&L)
7163 __ beq(AT, R0, L);
7164 else
7165 __ beq(AT, R0, (int)0);
7166 break;
7167 case 0x05: //below
7168 __ sltu(AT, op1, op2);
7169 if(&L)
7170 __ bne(R0, AT, L);
7171 else
7172 __ bne(R0, AT, (int)0);
7173 break;
7174 case 0x06: //below_equal
7175 __ sltu(AT, op2, op1);
7176 if(&L)
7177 __ beq(AT, R0, L);
7178 else
7179 __ beq(AT, R0, (int)0);
7180 break;
7181 default:
7182 Unimplemented();
7183 }
7184 __ nop();
7185 %}
7187 ins_pc_relative(1);
7188 ins_pipe( pipe_alu_branch );
7189 %}
7191 instruct cmpN_null_branch(cmpOp cmp, mRegN op1, immN0 null, label labl) %{
7192 match(If cmp (CmpN op1 null));
7193 effect(USE labl);
7195 ins_cost(180);
7196 format %{ "CMP $op1,0\t! compressed ptr\n\t"
7197 "BP$cmp $labl @ cmpN_null_branch" %}
7198 ins_encode %{
7199 Register op1 = $op1$$Register;
7200 Register op2 = R0;
7201 Label &L = *($labl$$label);
7202 int flag = $cmp$$cmpcode;
7204 switch(flag)
7205 {
7206 case 0x01: //equal
7207 if (&L)
7208 __ beq(op1, op2, L);
7209 else
7210 __ beq(op1, op2, (int)0);
7211 break;
7212 case 0x02: //not_equal
7213 if (&L)
7214 __ bne(op1, op2, L);
7215 else
7216 __ bne(op1, op2, (int)0);
7217 break;
7218 default:
7219 Unimplemented();
7220 }
7221 __ nop();
7222 %}
7223 //TODO: pipe_branchP or create pipe_branchN LEE
7224 ins_pc_relative(1);
7225 ins_pipe( pipe_alu_branch );
7226 %}
7228 instruct cmpN_reg_branch(cmpOp cmp, mRegN op1, mRegN op2, label labl) %{
7229 match(If cmp (CmpN op1 op2));
7230 effect(USE labl);
7232 ins_cost(180);
7233 format %{ "CMP $op1,$op2\t! compressed ptr\n\t"
7234 "BP$cmp $labl" %}
7235 ins_encode %{
7236 Register op1_reg = $op1$$Register;
7237 Register op2_reg = $op2$$Register;
7238 Label &L = *($labl$$label);
7239 int flag = $cmp$$cmpcode;
7241 switch(flag)
7242 {
7243 case 0x01: //equal
7244 if (&L)
7245 __ beq(op1_reg, op2_reg, L);
7246 else
7247 __ beq(op1_reg, op2_reg, (int)0);
7248 break;
7249 case 0x02: //not_equal
7250 if (&L)
7251 __ bne(op1_reg, op2_reg, L);
7252 else
7253 __ bne(op1_reg, op2_reg, (int)0);
7254 break;
7255 case 0x03: //above
7256 __ sltu(AT, op2_reg, op1_reg);
7257 if(&L)
7258 __ bne(R0, AT, L);
7259 else
7260 __ bne(R0, AT, (int)0);
7261 break;
7262 case 0x04: //above_equal
7263 __ sltu(AT, op1_reg, op2_reg);
7264 if(&L)
7265 __ beq(AT, R0, L);
7266 else
7267 __ beq(AT, R0, (int)0);
7268 break;
7269 case 0x05: //below
7270 __ sltu(AT, op1_reg, op2_reg);
7271 if(&L)
7272 __ bne(R0, AT, L);
7273 else
7274 __ bne(R0, AT, (int)0);
7275 break;
7276 case 0x06: //below_equal
7277 __ sltu(AT, op2_reg, op1_reg);
7278 if(&L)
7279 __ beq(AT, R0, L);
7280 else
7281 __ beq(AT, R0, (int)0);
7282 break;
7283 default:
7284 Unimplemented();
7285 }
7286 __ nop();
7287 %}
7288 ins_pc_relative(1);
7289 ins_pipe( pipe_alu_branch );
7290 %}
7292 instruct branchConIU_reg_reg(cmpOpU cmp, mRegI src1, mRegI src2, label labl) %{
7293 match( If cmp (CmpU src1 src2) );
7294 effect(USE labl);
7295 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_reg" %}
7297 ins_encode %{
7298 Register op1 = $src1$$Register;
7299 Register op2 = $src2$$Register;
7300 Label &L = *($labl$$label);
7301 int flag = $cmp$$cmpcode;
7303 switch(flag)
7304 {
7305 case 0x01: //equal
7306 if (&L)
7307 __ beq(op1, op2, L);
7308 else
7309 __ beq(op1, op2, (int)0);
7310 break;
7311 case 0x02: //not_equal
7312 if (&L)
7313 __ bne(op1, op2, L);
7314 else
7315 __ bne(op1, op2, (int)0);
7316 break;
7317 case 0x03: //above
7318 __ sltu(AT, op2, op1);
7319 if(&L)
7320 __ bne(AT, R0, L);
7321 else
7322 __ bne(AT, R0, (int)0);
7323 break;
7324 case 0x04: //above_equal
7325 __ sltu(AT, op1, op2);
7326 if(&L)
7327 __ beq(AT, R0, L);
7328 else
7329 __ beq(AT, R0, (int)0);
7330 break;
7331 case 0x05: //below
7332 __ sltu(AT, op1, op2);
7333 if(&L)
7334 __ bne(AT, R0, L);
7335 else
7336 __ bne(AT, R0, (int)0);
7337 break;
7338 case 0x06: //below_equal
7339 __ sltu(AT, op2, op1);
7340 if(&L)
7341 __ beq(AT, R0, L);
7342 else
7343 __ beq(AT, R0, (int)0);
7344 break;
7345 default:
7346 Unimplemented();
7347 }
7348 __ nop();
7349 %}
7351 ins_pc_relative(1);
7352 ins_pipe( pipe_alu_branch );
7353 %}
7356 instruct branchConIU_reg_imm(cmpOpU cmp, mRegI src1, immI src2, label labl) %{
7357 match( If cmp (CmpU src1 src2) );
7358 effect(USE labl);
7359 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_imm" %}
7361 ins_encode %{
7362 Register op1 = $src1$$Register;
7363 int val = $src2$$constant;
7364 Label &L = *($labl$$label);
7365 int flag = $cmp$$cmpcode;
7367 __ move(AT, val);
7368 switch(flag)
7369 {
7370 case 0x01: //equal
7371 if (&L)
7372 __ beq(op1, AT, L);
7373 else
7374 __ beq(op1, AT, (int)0);
7375 break;
7376 case 0x02: //not_equal
7377 if (&L)
7378 __ bne(op1, AT, L);
7379 else
7380 __ bne(op1, AT, (int)0);
7381 break;
7382 case 0x03: //above
7383 __ sltu(AT, AT, op1);
7384 if(&L)
7385 __ bne(R0, AT, L);
7386 else
7387 __ bne(R0, AT, (int)0);
7388 break;
7389 case 0x04: //above_equal
7390 __ sltu(AT, op1, AT);
7391 if(&L)
7392 __ beq(AT, R0, L);
7393 else
7394 __ beq(AT, R0, (int)0);
7395 break;
7396 case 0x05: //below
7397 __ sltu(AT, op1, AT);
7398 if(&L)
7399 __ bne(R0, AT, L);
7400 else
7401 __ bne(R0, AT, (int)0);
7402 break;
7403 case 0x06: //below_equal
7404 __ sltu(AT, AT, op1);
7405 if(&L)
7406 __ beq(AT, R0, L);
7407 else
7408 __ beq(AT, R0, (int)0);
7409 break;
7410 default:
7411 Unimplemented();
7412 }
7413 __ nop();
7414 %}
7416 ins_pc_relative(1);
7417 ins_pipe( pipe_alu_branch );
7418 %}
7420 instruct branchConI_reg_reg(cmpOp cmp, mRegI src1, mRegI src2, label labl) %{
7421 match( If cmp (CmpI src1 src2) );
7422 effect(USE labl);
7423 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_reg" %}
7425 ins_encode %{
7426 Register op1 = $src1$$Register;
7427 Register op2 = $src2$$Register;
7428 Label &L = *($labl$$label);
7429 int flag = $cmp$$cmpcode;
7431 switch(flag)
7432 {
7433 case 0x01: //equal
7434 if (&L)
7435 __ beq(op1, op2, L);
7436 else
7437 __ beq(op1, op2, (int)0);
7438 break;
7439 case 0x02: //not_equal
7440 if (&L)
7441 __ bne(op1, op2, L);
7442 else
7443 __ bne(op1, op2, (int)0);
7444 break;
7445 case 0x03: //above
7446 __ slt(AT, op2, op1);
7447 if(&L)
7448 __ bne(R0, AT, L);
7449 else
7450 __ bne(R0, AT, (int)0);
7451 break;
7452 case 0x04: //above_equal
7453 __ slt(AT, op1, op2);
7454 if(&L)
7455 __ beq(AT, R0, L);
7456 else
7457 __ beq(AT, R0, (int)0);
7458 break;
7459 case 0x05: //below
7460 __ slt(AT, op1, op2);
7461 if(&L)
7462 __ bne(R0, AT, L);
7463 else
7464 __ bne(R0, AT, (int)0);
7465 break;
7466 case 0x06: //below_equal
7467 __ slt(AT, op2, op1);
7468 if(&L)
7469 __ beq(AT, R0, L);
7470 else
7471 __ beq(AT, R0, (int)0);
7472 break;
7473 default:
7474 Unimplemented();
7475 }
7476 __ nop();
7477 %}
7479 ins_pc_relative(1);
7480 ins_pipe( pipe_alu_branch );
7481 %}
7483 instruct branchConI_reg_imm0(cmpOp cmp, mRegI src1, immI0 src2, label labl) %{
7484 match( If cmp (CmpI src1 src2) );
7485 effect(USE labl);
7486 ins_cost(170);
7487 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm0" %}
7489 ins_encode %{
7490 Register op1 = $src1$$Register;
7491 // int val = $src2$$constant;
7492 Label &L = *($labl$$label);
7493 int flag = $cmp$$cmpcode;
7495 //__ move(AT, val);
7496 switch(flag)
7497 {
7498 case 0x01: //equal
7499 if (&L)
7500 __ beq(op1, R0, L);
7501 else
7502 __ beq(op1, R0, (int)0);
7503 break;
7504 case 0x02: //not_equal
7505 if (&L)
7506 __ bne(op1, R0, L);
7507 else
7508 __ bne(op1, R0, (int)0);
7509 break;
7510 case 0x03: //greater
7511 if(&L)
7512 __ bgtz(op1, L);
7513 else
7514 __ bgtz(op1, (int)0);
7515 break;
7516 case 0x04: //greater_equal
7517 if(&L)
7518 __ bgez(op1, L);
7519 else
7520 __ bgez(op1, (int)0);
7521 break;
7522 case 0x05: //less
7523 if(&L)
7524 __ bltz(op1, L);
7525 else
7526 __ bltz(op1, (int)0);
7527 break;
7528 case 0x06: //less_equal
7529 if(&L)
7530 __ blez(op1, L);
7531 else
7532 __ blez(op1, (int)0);
7533 break;
7534 default:
7535 Unimplemented();
7536 }
7537 __ nop();
7538 %}
7540 ins_pc_relative(1);
7541 ins_pipe( pipe_alu_branch );
7542 %}
7545 instruct branchConI_reg_imm(cmpOp cmp, mRegI src1, immI src2, label labl) %{
7546 match( If cmp (CmpI src1 src2) );
7547 effect(USE labl);
7548 ins_cost(200);
7549 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm" %}
7551 ins_encode %{
7552 Register op1 = $src1$$Register;
7553 int val = $src2$$constant;
7554 Label &L = *($labl$$label);
7555 int flag = $cmp$$cmpcode;
7557 __ move(AT, val);
7558 switch(flag)
7559 {
7560 case 0x01: //equal
7561 if (&L)
7562 __ beq(op1, AT, L);
7563 else
7564 __ beq(op1, AT, (int)0);
7565 break;
7566 case 0x02: //not_equal
7567 if (&L)
7568 __ bne(op1, AT, L);
7569 else
7570 __ bne(op1, AT, (int)0);
7571 break;
7572 case 0x03: //greater
7573 __ slt(AT, AT, op1);
7574 if(&L)
7575 __ bne(R0, AT, L);
7576 else
7577 __ bne(R0, AT, (int)0);
7578 break;
7579 case 0x04: //greater_equal
7580 __ slt(AT, op1, AT);
7581 if(&L)
7582 __ beq(AT, R0, L);
7583 else
7584 __ beq(AT, R0, (int)0);
7585 break;
7586 case 0x05: //less
7587 __ slt(AT, op1, AT);
7588 if(&L)
7589 __ bne(R0, AT, L);
7590 else
7591 __ bne(R0, AT, (int)0);
7592 break;
7593 case 0x06: //less_equal
7594 __ slt(AT, AT, op1);
7595 if(&L)
7596 __ beq(AT, R0, L);
7597 else
7598 __ beq(AT, R0, (int)0);
7599 break;
7600 default:
7601 Unimplemented();
7602 }
7603 __ nop();
7604 %}
7606 ins_pc_relative(1);
7607 ins_pipe( pipe_alu_branch );
7608 %}
7610 instruct branchConIU_reg_imm0(cmpOpU cmp, mRegI src1, immI0 zero, label labl) %{
7611 match( If cmp (CmpU src1 zero) );
7612 effect(USE labl);
7613 format %{ "BR$cmp $src1, zero, $labl #@branchConIU_reg_imm0" %}
7615 ins_encode %{
7616 Register op1 = $src1$$Register;
7617 Label &L = *($labl$$label);
7618 int flag = $cmp$$cmpcode;
7620 switch(flag)
7621 {
7622 case 0x01: //equal
7623 if (&L)
7624 __ beq(op1, R0, L);
7625 else
7626 __ beq(op1, R0, (int)0);
7627 break;
7628 case 0x02: //not_equal
7629 if (&L)
7630 __ bne(op1, R0, L);
7631 else
7632 __ bne(op1, R0, (int)0);
7633 break;
7634 case 0x03: //above
7635 if(&L)
7636 __ bne(R0, op1, L);
7637 else
7638 __ bne(R0, op1, (int)0);
7639 break;
7640 case 0x04: //above_equal
7641 if(&L)
7642 __ beq(R0, R0, L);
7643 else
7644 __ beq(R0, R0, (int)0);
7645 break;
7646 case 0x05: //below
7647 return;
7648 break;
7649 case 0x06: //below_equal
7650 if(&L)
7651 __ beq(op1, R0, L);
7652 else
7653 __ beq(op1, R0, (int)0);
7654 break;
7655 default:
7656 Unimplemented();
7657 }
7658 __ nop();
7659 %}
7661 ins_pc_relative(1);
7662 ins_pipe( pipe_alu_branch );
7663 %}
7666 instruct branchConIU_reg_immI16(cmpOpU cmp, mRegI src1, immI16 src2, label labl) %{
7667 match( If cmp (CmpU src1 src2) );
7668 effect(USE labl);
7669 ins_cost(180);
7670 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_immI16" %}
7672 ins_encode %{
7673 Register op1 = $src1$$Register;
7674 int val = $src2$$constant;
7675 Label &L = *($labl$$label);
7676 int flag = $cmp$$cmpcode;
7678 switch(flag)
7679 {
7680 case 0x01: //equal
7681 __ move(AT, val);
7682 if (&L)
7683 __ beq(op1, AT, L);
7684 else
7685 __ beq(op1, AT, (int)0);
7686 break;
7687 case 0x02: //not_equal
7688 __ move(AT, val);
7689 if (&L)
7690 __ bne(op1, AT, L);
7691 else
7692 __ bne(op1, AT, (int)0);
7693 break;
7694 case 0x03: //above
7695 __ move(AT, val);
7696 __ sltu(AT, AT, op1);
7697 if(&L)
7698 __ bne(R0, AT, L);
7699 else
7700 __ bne(R0, AT, (int)0);
7701 break;
7702 case 0x04: //above_equal
7703 __ sltiu(AT, op1, val);
7704 if(&L)
7705 __ beq(AT, R0, L);
7706 else
7707 __ beq(AT, R0, (int)0);
7708 break;
7709 case 0x05: //below
7710 __ sltiu(AT, op1, val);
7711 if(&L)
7712 __ bne(R0, AT, L);
7713 else
7714 __ bne(R0, AT, (int)0);
7715 break;
7716 case 0x06: //below_equal
7717 __ move(AT, val);
7718 __ sltu(AT, AT, op1);
7719 if(&L)
7720 __ beq(AT, R0, L);
7721 else
7722 __ beq(AT, R0, (int)0);
7723 break;
7724 default:
7725 Unimplemented();
7726 }
7727 __ nop();
7728 %}
7730 ins_pc_relative(1);
7731 ins_pipe( pipe_alu_branch );
7732 %}
7735 instruct branchConL_regL_regL(cmpOp cmp, mRegL src1, mRegL src2, label labl) %{
7736 match( If cmp (CmpL src1 src2) );
7737 effect(USE labl);
7738 format %{ "BR$cmp $src1, $src2, $labl #@branchConL_regL_regL" %}
7739 ins_cost(250);
7741 ins_encode %{
7742 Register opr1_reg = as_Register($src1$$reg);
7743 Register opr2_reg = as_Register($src2$$reg);
7745 Label &target = *($labl$$label);
7746 int flag = $cmp$$cmpcode;
7748 switch(flag)
7749 {
7750 case 0x01: //equal
7751 if (&target)
7752 __ beq(opr1_reg, opr2_reg, target);
7753 else
7754 __ beq(opr1_reg, opr2_reg, (int)0);
7755 __ delayed()->nop();
7756 break;
7758 case 0x02: //not_equal
7759 if(&target)
7760 __ bne(opr1_reg, opr2_reg, target);
7761 else
7762 __ bne(opr1_reg, opr2_reg, (int)0);
7763 __ delayed()->nop();
7764 break;
7766 case 0x03: //greater
7767 __ slt(AT, opr2_reg, opr1_reg);
7768 if(&target)
7769 __ bne(AT, R0, target);
7770 else
7771 __ bne(AT, R0, (int)0);
7772 __ delayed()->nop();
7773 break;
7775 case 0x04: //greater_equal
7776 __ slt(AT, opr1_reg, opr2_reg);
7777 if(&target)
7778 __ beq(AT, R0, target);
7779 else
7780 __ beq(AT, R0, (int)0);
7781 __ delayed()->nop();
7783 break;
7785 case 0x05: //less
7786 __ slt(AT, opr1_reg, opr2_reg);
7787 if(&target)
7788 __ bne(AT, R0, target);
7789 else
7790 __ bne(AT, R0, (int)0);
7791 __ delayed()->nop();
7793 break;
7795 case 0x06: //less_equal
7796 __ slt(AT, opr2_reg, opr1_reg);
7798 if(&target)
7799 __ beq(AT, R0, target);
7800 else
7801 __ beq(AT, R0, (int)0);
7802 __ delayed()->nop();
7804 break;
7806 default:
7807 Unimplemented();
7808 }
7809 %}
7812 ins_pc_relative(1);
7813 ins_pipe( pipe_alu_branch );
7814 %}
7816 instruct branchConL_reg_immL16_sub(cmpOp cmp, mRegL src1, immL16_sub src2, label labl) %{
7817 match( If cmp (CmpL src1 src2) );
7818 effect(USE labl);
7819 ins_cost(180);
7820 format %{ "BR$cmp $src1, $src2, $labl #@branchConL_reg_immL16_sub" %}
7822 ins_encode %{
7823 Register op1 = $src1$$Register;
7824 int val = $src2$$constant;
7825 Label &L = *($labl$$label);
7826 int flag = $cmp$$cmpcode;
7828 __ daddiu(AT, op1, -1 * val);
7829 switch(flag)
7830 {
7831 case 0x01: //equal
7832 if (&L)
7833 __ beq(R0, AT, L);
7834 else
7835 __ beq(R0, AT, (int)0);
7836 break;
7837 case 0x02: //not_equal
7838 if (&L)
7839 __ bne(R0, AT, L);
7840 else
7841 __ bne(R0, AT, (int)0);
7842 break;
7843 case 0x03: //greater
7844 if(&L)
7845 __ bgtz(AT, L);
7846 else
7847 __ bgtz(AT, (int)0);
7848 break;
7849 case 0x04: //greater_equal
7850 if(&L)
7851 __ bgez(AT, L);
7852 else
7853 __ bgez(AT, (int)0);
7854 break;
7855 case 0x05: //less
7856 if(&L)
7857 __ bltz(AT, L);
7858 else
7859 __ bltz(AT, (int)0);
7860 break;
7861 case 0x06: //less_equal
7862 if(&L)
7863 __ blez(AT, L);
7864 else
7865 __ blez(AT, (int)0);
7866 break;
7867 default:
7868 Unimplemented();
7869 }
7870 __ nop();
7871 %}
7873 ins_pc_relative(1);
7874 ins_pipe( pipe_alu_branch );
7875 %}
7878 instruct branchConI_reg_imm16_sub(cmpOp cmp, mRegI src1, immI16_sub src2, label labl) %{
7879 match( If cmp (CmpI src1 src2) );
7880 effect(USE labl);
7881 ins_cost(180);
7882 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm16_sub" %}
7884 ins_encode %{
7885 Register op1 = $src1$$Register;
7886 int val = $src2$$constant;
7887 Label &L = *($labl$$label);
7888 int flag = $cmp$$cmpcode;
7890 __ addiu32(AT, op1, -1 * val);
7891 switch(flag)
7892 {
7893 case 0x01: //equal
7894 if (&L)
7895 __ beq(R0, AT, L);
7896 else
7897 __ beq(R0, AT, (int)0);
7898 break;
7899 case 0x02: //not_equal
7900 if (&L)
7901 __ bne(R0, AT, L);
7902 else
7903 __ bne(R0, AT, (int)0);
7904 break;
7905 case 0x03: //greater
7906 if(&L)
7907 __ bgtz(AT, L);
7908 else
7909 __ bgtz(AT, (int)0);
7910 break;
7911 case 0x04: //greater_equal
7912 if(&L)
7913 __ bgez(AT, L);
7914 else
7915 __ bgez(AT, (int)0);
7916 break;
7917 case 0x05: //less
7918 if(&L)
7919 __ bltz(AT, L);
7920 else
7921 __ bltz(AT, (int)0);
7922 break;
7923 case 0x06: //less_equal
7924 if(&L)
7925 __ blez(AT, L);
7926 else
7927 __ blez(AT, (int)0);
7928 break;
7929 default:
7930 Unimplemented();
7931 }
7932 __ nop();
7933 %}
7935 ins_pc_relative(1);
7936 ins_pipe( pipe_alu_branch );
7937 %}
7939 instruct branchConL_regL_immL0(cmpOp cmp, mRegL src1, immL0 zero, label labl) %{
7940 match( If cmp (CmpL src1 zero) );
7941 effect(USE labl);
7942 format %{ "BR$cmp $src1, zero, $labl #@branchConL_regL_immL0" %}
7943 ins_cost(150);
7945 ins_encode %{
7946 Register opr1_reg = as_Register($src1$$reg);
7947 Label &target = *($labl$$label);
7948 int flag = $cmp$$cmpcode;
7950 switch(flag)
7951 {
7952 case 0x01: //equal
7953 if (&target)
7954 __ beq(opr1_reg, R0, target);
7955 else
7956 __ beq(opr1_reg, R0, int(0));
7957 break;
7959 case 0x02: //not_equal
7960 if(&target)
7961 __ bne(opr1_reg, R0, target);
7962 else
7963 __ bne(opr1_reg, R0, (int)0);
7964 break;
7966 case 0x03: //greater
7967 if(&target)
7968 __ bgtz(opr1_reg, target);
7969 else
7970 __ bgtz(opr1_reg, (int)0);
7971 break;
7973 case 0x04: //greater_equal
7974 if(&target)
7975 __ bgez(opr1_reg, target);
7976 else
7977 __ bgez(opr1_reg, (int)0);
7978 break;
7980 case 0x05: //less
7981 __ slt(AT, opr1_reg, R0);
7982 if(&target)
7983 __ bne(AT, R0, target);
7984 else
7985 __ bne(AT, R0, (int)0);
7986 break;
7988 case 0x06: //less_equal
7989 if (&target)
7990 __ blez(opr1_reg, target);
7991 else
7992 __ blez(opr1_reg, int(0));
7993 break;
7995 default:
7996 Unimplemented();
7997 }
7998 __ delayed()->nop();
7999 %}
8002 ins_pc_relative(1);
8003 ins_pipe( pipe_alu_branch );
8004 %}
8007 //FIXME
8008 instruct branchConF_reg_reg(cmpOp cmp, regF src1, regF src2, label labl) %{
8009 match( If cmp (CmpF src1 src2) );
8010 effect(USE labl);
8011 format %{ "BR$cmp $src1, $src2, $labl #@branchConF_reg_reg" %}
8013 ins_encode %{
8014 FloatRegister reg_op1 = $src1$$FloatRegister;
8015 FloatRegister reg_op2 = $src2$$FloatRegister;
8016 Label &L = *($labl$$label);
8017 int flag = $cmp$$cmpcode;
8019 switch(flag)
8020 {
8021 case 0x01: //equal
8022 __ c_eq_s(reg_op1, reg_op2);
8023 if (&L)
8024 __ bc1t(L);
8025 else
8026 __ bc1t((int)0);
8027 break;
8028 case 0x02: //not_equal
8029 __ c_eq_s(reg_op1, reg_op2);
8030 if (&L)
8031 __ bc1f(L);
8032 else
8033 __ bc1f((int)0);
8034 break;
8035 case 0x03: //greater
8036 __ c_ule_s(reg_op1, reg_op2);
8037 if(&L)
8038 __ bc1f(L);
8039 else
8040 __ bc1f((int)0);
8041 break;
8042 case 0x04: //greater_equal
8043 __ c_ult_s(reg_op1, reg_op2);
8044 if(&L)
8045 __ bc1f(L);
8046 else
8047 __ bc1f((int)0);
8048 break;
8049 case 0x05: //less
8050 __ c_ult_s(reg_op1, reg_op2);
8051 if(&L)
8052 __ bc1t(L);
8053 else
8054 __ bc1t((int)0);
8055 break;
8056 case 0x06: //less_equal
8057 __ c_ule_s(reg_op1, reg_op2);
8058 if(&L)
8059 __ bc1t(L);
8060 else
8061 __ bc1t((int)0);
8062 break;
8063 default:
8064 Unimplemented();
8065 }
8066 __ nop();
8067 %}
8069 ins_pc_relative(1);
8070 ins_pipe(pipe_slow);
8071 %}
8073 instruct branchConD_reg_reg(cmpOp cmp, regD src1, regD src2, label labl) %{
8074 match( If cmp (CmpD src1 src2) );
8075 effect(USE labl);
8076 format %{ "BR$cmp $src1, $src2, $labl #@branchConD_reg_reg" %}
8078 ins_encode %{
8079 FloatRegister reg_op1 = $src1$$FloatRegister;
8080 FloatRegister reg_op2 = $src2$$FloatRegister;
8081 Label &L = *($labl$$label);
8082 int flag = $cmp$$cmpcode;
8084 switch(flag)
8085 {
8086 case 0x01: //equal
8087 __ c_eq_d(reg_op1, reg_op2);
8088 if (&L)
8089 __ bc1t(L);
8090 else
8091 __ bc1t((int)0);
8092 break;
8093 case 0x02: //not_equal
8094 //2016/4/19 aoqi: c_ueq_d cannot distinguish NaN from equal. Double.isNaN(Double) is implemented by 'f != f', so the use of c_ueq_d causes bugs.
8095 __ c_eq_d(reg_op1, reg_op2);
8096 if (&L)
8097 __ bc1f(L);
8098 else
8099 __ bc1f((int)0);
8100 break;
8101 case 0x03: //greater
8102 __ c_ule_d(reg_op1, reg_op2);
8103 if(&L)
8104 __ bc1f(L);
8105 else
8106 __ bc1f((int)0);
8107 break;
8108 case 0x04: //greater_equal
8109 __ c_ult_d(reg_op1, reg_op2);
8110 if(&L)
8111 __ bc1f(L);
8112 else
8113 __ bc1f((int)0);
8114 break;
8115 case 0x05: //less
8116 __ c_ult_d(reg_op1, reg_op2);
8117 if(&L)
8118 __ bc1t(L);
8119 else
8120 __ bc1t((int)0);
8121 break;
8122 case 0x06: //less_equal
8123 __ c_ule_d(reg_op1, reg_op2);
8124 if(&L)
8125 __ bc1t(L);
8126 else
8127 __ bc1t((int)0);
8128 break;
8129 default:
8130 Unimplemented();
8131 }
8132 __ nop();
8133 %}
8135 ins_pc_relative(1);
8136 ins_pipe(pipe_slow);
8137 %}
8140 // Call Runtime Instruction
8141 instruct CallRuntimeDirect(method meth) %{
8142 match(CallRuntime );
8143 effect(USE meth);
8145 ins_cost(300);
8146 format %{ "CALL,runtime #@CallRuntimeDirect" %}
8147 ins_encode( Java_To_Runtime( meth ) );
8148 ins_pipe( pipe_slow );
8149 ins_alignment(16);
8150 %}
8154 //------------------------MemBar Instructions-------------------------------
8155 //Memory barrier flavors
8157 instruct membar_acquire() %{
8158 match(MemBarAcquire);
8159 ins_cost(0);
8161 size(0);
8162 format %{ "MEMBAR-acquire (empty) @ membar_acquire" %}
8163 ins_encode();
8164 ins_pipe(empty);
8165 %}
8167 instruct load_fence() %{
8168 match(LoadFence);
8169 ins_cost(400);
8171 format %{ "MEMBAR @ load_fence" %}
8172 ins_encode %{
8173 __ sync();
8174 %}
8175 ins_pipe(pipe_slow);
8176 %}
8178 instruct membar_acquire_lock()
8179 %{
8180 match(MemBarAcquireLock);
8181 ins_cost(0);
8183 size(0);
8184 format %{ "MEMBAR-acquire (acquire as part of CAS in prior FastLock so empty encoding) @ membar_acquire_lock" %}
8185 ins_encode();
8186 ins_pipe(empty);
8187 %}
8189 instruct membar_release() %{
8190 match(MemBarRelease);
8191 ins_cost(400);
8193 format %{ "MEMBAR-release @ membar_release" %}
8195 ins_encode %{
8196 // Attention: DO NOT DELETE THIS GUY!
8197 __ sync();
8198 %}
8200 ins_pipe(pipe_slow);
8201 %}
8203 instruct store_fence() %{
8204 match(StoreFence);
8205 ins_cost(400);
8207 format %{ "MEMBAR @ store_fence" %}
8209 ins_encode %{
8210 __ sync();
8211 %}
8213 ins_pipe(pipe_slow);
8214 %}
8216 instruct membar_release_lock()
8217 %{
8218 match(MemBarReleaseLock);
8219 ins_cost(0);
8221 size(0);
8222 format %{ "MEMBAR-release-lock (release in FastUnlock so empty) @ membar_release_lock" %}
8223 ins_encode();
8224 ins_pipe(empty);
8225 %}
8228 instruct membar_volatile() %{
8229 match(MemBarVolatile);
8230 ins_cost(400);
8232 format %{ "MEMBAR-volatile" %}
8233 ins_encode %{
8234 if( !os::is_MP() ) return; // Not needed on single CPU
8235 __ sync();
8237 %}
8238 ins_pipe(pipe_slow);
8239 %}
8241 instruct unnecessary_membar_volatile() %{
8242 match(MemBarVolatile);
8243 predicate(Matcher::post_store_load_barrier(n));
8244 ins_cost(0);
8246 size(0);
8247 format %{ "MEMBAR-volatile (unnecessary so empty encoding) @ unnecessary_membar_volatile" %}
8248 ins_encode( );
8249 ins_pipe(empty);
8250 %}
8252 instruct membar_storestore() %{
8253 match(MemBarStoreStore);
8255 ins_cost(0);
8256 size(0);
8257 format %{ "MEMBAR-storestore (empty encoding) @ membar_storestore" %}
8258 ins_encode( );
8259 ins_pipe(empty);
8260 %}
8262 //----------Move Instructions--------------------------------------------------
8263 instruct castX2P(mRegP dst, mRegL src) %{
8264 match(Set dst (CastX2P src));
8265 format %{ "castX2P $dst, $src @ castX2P" %}
8266 ins_encode %{
8267 Register src = $src$$Register;
8268 Register dst = $dst$$Register;
8270 if(src != dst)
8271 __ move(dst, src);
8272 %}
8273 ins_cost(10);
8274 ins_pipe( ialu_regI_mov );
8275 %}
8277 instruct castP2X(mRegL dst, mRegP src ) %{
8278 match(Set dst (CastP2X src));
8280 format %{ "mov $dst, $src\t #@castP2X" %}
8281 ins_encode %{
8282 Register src = $src$$Register;
8283 Register dst = $dst$$Register;
8285 if(src != dst)
8286 __ move(dst, src);
8287 %}
8288 ins_pipe( ialu_regI_mov );
8289 %}
8291 instruct MoveF2I_reg_reg(mRegI dst, regF src) %{
8292 match(Set dst (MoveF2I src));
8293 effect(DEF dst, USE src);
8294 ins_cost(85);
8295 format %{ "MoveF2I $dst, $src @ MoveF2I_reg_reg" %}
8296 ins_encode %{
8297 Register dst = as_Register($dst$$reg);
8298 FloatRegister src = as_FloatRegister($src$$reg);
8300 __ mfc1(dst, src);
8301 %}
8302 ins_pipe( pipe_slow );
8303 %}
8305 instruct MoveI2F_reg_reg(regF dst, mRegI src) %{
8306 match(Set dst (MoveI2F src));
8307 effect(DEF dst, USE src);
8308 ins_cost(85);
8309 format %{ "MoveI2F $dst, $src @ MoveI2F_reg_reg" %}
8310 ins_encode %{
8311 Register src = as_Register($src$$reg);
8312 FloatRegister dst = as_FloatRegister($dst$$reg);
8314 __ mtc1(src, dst);
8315 %}
8316 ins_pipe( pipe_slow );
8317 %}
8319 instruct MoveD2L_reg_reg(mRegL dst, regD src) %{
8320 match(Set dst (MoveD2L src));
8321 effect(DEF dst, USE src);
8322 ins_cost(85);
8323 format %{ "MoveD2L $dst, $src @ MoveD2L_reg_reg" %}
8324 ins_encode %{
8325 Register dst = as_Register($dst$$reg);
8326 FloatRegister src = as_FloatRegister($src$$reg);
8328 __ dmfc1(dst, src);
8329 %}
8330 ins_pipe( pipe_slow );
8331 %}
8333 instruct MoveL2D_reg_reg(regD dst, mRegL src) %{
8334 match(Set dst (MoveL2D src));
8335 effect(DEF dst, USE src);
8336 ins_cost(85);
8337 format %{ "MoveL2D $dst, $src @ MoveL2D_reg_reg" %}
8338 ins_encode %{
8339 FloatRegister dst = as_FloatRegister($dst$$reg);
8340 Register src = as_Register($src$$reg);
8342 __ dmtc1(src, dst);
8343 %}
8344 ins_pipe( pipe_slow );
8345 %}
8347 //----------Conditional Move---------------------------------------------------
8348 // Conditional move
8349 instruct cmovI_cmpI_reg_reg(mRegI dst, mRegI src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8350 match(Set dst (CMoveI (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8351 ins_cost(80);
8352 format %{
8353 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpI_reg_reg\n"
8354 "\tCMOV $dst,$src \t @cmovI_cmpI_reg_reg"
8355 %}
8357 ins_encode %{
8358 Register op1 = $tmp1$$Register;
8359 Register op2 = $tmp2$$Register;
8360 Register dst = $dst$$Register;
8361 Register src = $src$$Register;
8362 int flag = $cop$$cmpcode;
8364 switch(flag)
8365 {
8366 case 0x01: //equal
8367 __ subu32(AT, op1, op2);
8368 __ movz(dst, src, AT);
8369 break;
8371 case 0x02: //not_equal
8372 __ subu32(AT, op1, op2);
8373 __ movn(dst, src, AT);
8374 break;
8376 case 0x03: //great
8377 __ slt(AT, op2, op1);
8378 __ movn(dst, src, AT);
8379 break;
8381 case 0x04: //great_equal
8382 __ slt(AT, op1, op2);
8383 __ movz(dst, src, AT);
8384 break;
8386 case 0x05: //less
8387 __ slt(AT, op1, op2);
8388 __ movn(dst, src, AT);
8389 break;
8391 case 0x06: //less_equal
8392 __ slt(AT, op2, op1);
8393 __ movz(dst, src, AT);
8394 break;
8396 default:
8397 Unimplemented();
8398 }
8399 %}
8401 ins_pipe( pipe_slow );
8402 %}
8404 instruct cmovI_cmpP_reg_reg(mRegI dst, mRegI src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
8405 match(Set dst (CMoveI (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
8406 ins_cost(80);
8407 format %{
8408 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpP_reg_reg\n\t"
8409 "CMOV $dst,$src\t @cmovI_cmpP_reg_reg"
8410 %}
8411 ins_encode %{
8412 Register op1 = $tmp1$$Register;
8413 Register op2 = $tmp2$$Register;
8414 Register dst = $dst$$Register;
8415 Register src = $src$$Register;
8416 int flag = $cop$$cmpcode;
8418 switch(flag)
8419 {
8420 case 0x01: //equal
8421 __ subu(AT, op1, op2);
8422 __ movz(dst, src, AT);
8423 break;
8425 case 0x02: //not_equal
8426 __ subu(AT, op1, op2);
8427 __ movn(dst, src, AT);
8428 break;
8430 case 0x03: //above
8431 __ sltu(AT, op2, op1);
8432 __ movn(dst, src, AT);
8433 break;
8435 case 0x04: //above_equal
8436 __ sltu(AT, op1, op2);
8437 __ movz(dst, src, AT);
8438 break;
8440 case 0x05: //below
8441 __ sltu(AT, op1, op2);
8442 __ movn(dst, src, AT);
8443 break;
8445 case 0x06: //below_equal
8446 __ sltu(AT, op2, op1);
8447 __ movz(dst, src, AT);
8448 break;
8450 default:
8451 Unimplemented();
8452 }
8453 %}
8455 ins_pipe( pipe_slow );
8456 %}
8458 instruct cmovI_cmpN_reg_reg(mRegI dst, mRegI src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8459 match(Set dst (CMoveI (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8460 ins_cost(80);
8461 format %{
8462 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpN_reg_reg\n\t"
8463 "CMOV $dst,$src\t @cmovI_cmpN_reg_reg"
8464 %}
8465 ins_encode %{
8466 Register op1 = $tmp1$$Register;
8467 Register op2 = $tmp2$$Register;
8468 Register dst = $dst$$Register;
8469 Register src = $src$$Register;
8470 int flag = $cop$$cmpcode;
8472 switch(flag)
8473 {
8474 case 0x01: //equal
8475 __ subu32(AT, op1, op2);
8476 __ movz(dst, src, AT);
8477 break;
8479 case 0x02: //not_equal
8480 __ subu32(AT, op1, op2);
8481 __ movn(dst, src, AT);
8482 break;
8484 case 0x03: //above
8485 __ sltu(AT, op2, op1);
8486 __ movn(dst, src, AT);
8487 break;
8489 case 0x04: //above_equal
8490 __ sltu(AT, op1, op2);
8491 __ movz(dst, src, AT);
8492 break;
8494 case 0x05: //below
8495 __ sltu(AT, op1, op2);
8496 __ movn(dst, src, AT);
8497 break;
8499 case 0x06: //below_equal
8500 __ sltu(AT, op2, op1);
8501 __ movz(dst, src, AT);
8502 break;
8504 default:
8505 Unimplemented();
8506 }
8507 %}
8509 ins_pipe( pipe_slow );
8510 %}
8512 instruct cmovP_cmpN_reg_reg(mRegP dst, mRegP src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8513 match(Set dst (CMoveP (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8514 ins_cost(80);
8515 format %{
8516 "CMPU$cop $tmp1,$tmp2\t @cmovP_cmpN_reg_reg\n\t"
8517 "CMOV $dst,$src\t @cmovP_cmpN_reg_reg"
8518 %}
8519 ins_encode %{
8520 Register op1 = $tmp1$$Register;
8521 Register op2 = $tmp2$$Register;
8522 Register dst = $dst$$Register;
8523 Register src = $src$$Register;
8524 int flag = $cop$$cmpcode;
8526 switch(flag)
8527 {
8528 case 0x01: //equal
8529 __ subu32(AT, op1, op2);
8530 __ movz(dst, src, AT);
8531 break;
8533 case 0x02: //not_equal
8534 __ subu32(AT, op1, op2);
8535 __ movn(dst, src, AT);
8536 break;
8538 case 0x03: //above
8539 __ sltu(AT, op2, op1);
8540 __ movn(dst, src, AT);
8541 break;
8543 case 0x04: //above_equal
8544 __ sltu(AT, op1, op2);
8545 __ movz(dst, src, AT);
8546 break;
8548 case 0x05: //below
8549 __ sltu(AT, op1, op2);
8550 __ movn(dst, src, AT);
8551 break;
8553 case 0x06: //below_equal
8554 __ sltu(AT, op2, op1);
8555 __ movz(dst, src, AT);
8556 break;
8558 default:
8559 Unimplemented();
8560 }
8561 %}
8563 ins_pipe( pipe_slow );
8564 %}
8566 instruct cmovN_cmpP_reg_reg(mRegN dst, mRegN src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
8567 match(Set dst (CMoveN (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
8568 ins_cost(80);
8569 format %{
8570 "CMPU$cop $tmp1,$tmp2\t @cmovN_cmpP_reg_reg\n\t"
8571 "CMOV $dst,$src\t @cmovN_cmpP_reg_reg"
8572 %}
8573 ins_encode %{
8574 Register op1 = $tmp1$$Register;
8575 Register op2 = $tmp2$$Register;
8576 Register dst = $dst$$Register;
8577 Register src = $src$$Register;
8578 int flag = $cop$$cmpcode;
8580 switch(flag)
8581 {
8582 case 0x01: //equal
8583 __ subu(AT, op1, op2);
8584 __ movz(dst, src, AT);
8585 break;
8587 case 0x02: //not_equal
8588 __ subu(AT, op1, op2);
8589 __ movn(dst, src, AT);
8590 break;
8592 case 0x03: //above
8593 __ sltu(AT, op2, op1);
8594 __ movn(dst, src, AT);
8595 break;
8597 case 0x04: //above_equal
8598 __ sltu(AT, op1, op2);
8599 __ movz(dst, src, AT);
8600 break;
8602 case 0x05: //below
8603 __ sltu(AT, op1, op2);
8604 __ movn(dst, src, AT);
8605 break;
8607 case 0x06: //below_equal
8608 __ sltu(AT, op2, op1);
8609 __ movz(dst, src, AT);
8610 break;
8612 default:
8613 Unimplemented();
8614 }
8615 %}
8617 ins_pipe( pipe_slow );
8618 %}
8620 instruct cmovP_cmpD_reg_reg(mRegP dst, mRegP src, regD tmp1, regD tmp2, cmpOp cop ) %{
8621 match(Set dst (CMoveP (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8622 ins_cost(80);
8623 format %{
8624 "CMP$cop $tmp1, $tmp2\t @cmovP_cmpD_reg_reg\n"
8625 "\tCMOV $dst,$src \t @cmovP_cmpD_reg_reg"
8626 %}
8627 ins_encode %{
8628 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8629 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8630 Register dst = as_Register($dst$$reg);
8631 Register src = as_Register($src$$reg);
8633 int flag = $cop$$cmpcode;
8635 switch(flag)
8636 {
8637 case 0x01: //equal
8638 __ c_eq_d(reg_op1, reg_op2);
8639 __ movt(dst, src);
8640 break;
8641 case 0x02: //not_equal
8642 __ c_eq_d(reg_op1, reg_op2);
8643 __ movf(dst, src);
8644 break;
8645 case 0x03: //greater
8646 __ c_ole_d(reg_op1, reg_op2);
8647 __ movf(dst, src);
8648 break;
8649 case 0x04: //greater_equal
8650 __ c_olt_d(reg_op1, reg_op2);
8651 __ movf(dst, src);
8652 break;
8653 case 0x05: //less
8654 __ c_ult_d(reg_op1, reg_op2);
8655 __ movt(dst, src);
8656 break;
8657 case 0x06: //less_equal
8658 __ c_ule_d(reg_op1, reg_op2);
8659 __ movt(dst, src);
8660 break;
8661 default:
8662 Unimplemented();
8663 }
8664 %}
8666 ins_pipe( pipe_slow );
8667 %}
8670 instruct cmovN_cmpN_reg_reg(mRegN dst, mRegN src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8671 match(Set dst (CMoveN (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8672 ins_cost(80);
8673 format %{
8674 "CMPU$cop $tmp1,$tmp2\t @cmovN_cmpN_reg_reg\n\t"
8675 "CMOV $dst,$src\t @cmovN_cmpN_reg_reg"
8676 %}
8677 ins_encode %{
8678 Register op1 = $tmp1$$Register;
8679 Register op2 = $tmp2$$Register;
8680 Register dst = $dst$$Register;
8681 Register src = $src$$Register;
8682 int flag = $cop$$cmpcode;
8684 switch(flag)
8685 {
8686 case 0x01: //equal
8687 __ subu32(AT, op1, op2);
8688 __ movz(dst, src, AT);
8689 break;
8691 case 0x02: //not_equal
8692 __ subu32(AT, op1, op2);
8693 __ movn(dst, src, AT);
8694 break;
8696 case 0x03: //above
8697 __ sltu(AT, op2, op1);
8698 __ movn(dst, src, AT);
8699 break;
8701 case 0x04: //above_equal
8702 __ sltu(AT, op1, op2);
8703 __ movz(dst, src, AT);
8704 break;
8706 case 0x05: //below
8707 __ sltu(AT, op1, op2);
8708 __ movn(dst, src, AT);
8709 break;
8711 case 0x06: //below_equal
8712 __ sltu(AT, op2, op1);
8713 __ movz(dst, src, AT);
8714 break;
8716 default:
8717 Unimplemented();
8718 }
8719 %}
8721 ins_pipe( pipe_slow );
8722 %}
8725 instruct cmovI_cmpU_reg_reg(mRegI dst, mRegI src, mRegI tmp1, mRegI tmp2, cmpOpU cop ) %{
8726 match(Set dst (CMoveI (Binary cop (CmpU tmp1 tmp2)) (Binary dst src)));
8727 ins_cost(80);
8728 format %{
8729 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpU_reg_reg\n\t"
8730 "CMOV $dst,$src\t @cmovI_cmpU_reg_reg"
8731 %}
8732 ins_encode %{
8733 Register op1 = $tmp1$$Register;
8734 Register op2 = $tmp2$$Register;
8735 Register dst = $dst$$Register;
8736 Register src = $src$$Register;
8737 int flag = $cop$$cmpcode;
8739 switch(flag)
8740 {
8741 case 0x01: //equal
8742 __ subu(AT, op1, op2);
8743 __ movz(dst, src, AT);
8744 break;
8746 case 0x02: //not_equal
8747 __ subu(AT, op1, op2);
8748 __ movn(dst, src, AT);
8749 break;
8751 case 0x03: //above
8752 __ sltu(AT, op2, op1);
8753 __ movn(dst, src, AT);
8754 break;
8756 case 0x04: //above_equal
8757 __ sltu(AT, op1, op2);
8758 __ movz(dst, src, AT);
8759 break;
8761 case 0x05: //below
8762 __ sltu(AT, op1, op2);
8763 __ movn(dst, src, AT);
8764 break;
8766 case 0x06: //below_equal
8767 __ sltu(AT, op2, op1);
8768 __ movz(dst, src, AT);
8769 break;
8771 default:
8772 Unimplemented();
8773 }
8774 %}
8776 ins_pipe( pipe_slow );
8777 %}
8779 instruct cmovI_cmpL_reg_reg(mRegI dst, mRegI src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8780 match(Set dst (CMoveI (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8781 ins_cost(80);
8782 format %{
8783 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpL_reg_reg\n"
8784 "\tCMOV $dst,$src \t @cmovI_cmpL_reg_reg"
8785 %}
8786 ins_encode %{
8787 Register opr1 = as_Register($tmp1$$reg);
8788 Register opr2 = as_Register($tmp2$$reg);
8789 Register dst = $dst$$Register;
8790 Register src = $src$$Register;
8791 int flag = $cop$$cmpcode;
8793 switch(flag)
8794 {
8795 case 0x01: //equal
8796 __ subu(AT, opr1, opr2);
8797 __ movz(dst, src, AT);
8798 break;
8800 case 0x02: //not_equal
8801 __ subu(AT, opr1, opr2);
8802 __ movn(dst, src, AT);
8803 break;
8805 case 0x03: //greater
8806 __ slt(AT, opr2, opr1);
8807 __ movn(dst, src, AT);
8808 break;
8810 case 0x04: //greater_equal
8811 __ slt(AT, opr1, opr2);
8812 __ movz(dst, src, AT);
8813 break;
8815 case 0x05: //less
8816 __ slt(AT, opr1, opr2);
8817 __ movn(dst, src, AT);
8818 break;
8820 case 0x06: //less_equal
8821 __ slt(AT, opr2, opr1);
8822 __ movz(dst, src, AT);
8823 break;
8825 default:
8826 Unimplemented();
8827 }
8828 %}
8830 ins_pipe( pipe_slow );
8831 %}
8833 instruct cmovP_cmpL_reg_reg(mRegP dst, mRegP src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8834 match(Set dst (CMoveP (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8835 ins_cost(80);
8836 format %{
8837 "CMP$cop $tmp1, $tmp2\t @cmovP_cmpL_reg_reg\n"
8838 "\tCMOV $dst,$src \t @cmovP_cmpL_reg_reg"
8839 %}
8840 ins_encode %{
8841 Register opr1 = as_Register($tmp1$$reg);
8842 Register opr2 = as_Register($tmp2$$reg);
8843 Register dst = $dst$$Register;
8844 Register src = $src$$Register;
8845 int flag = $cop$$cmpcode;
8847 switch(flag)
8848 {
8849 case 0x01: //equal
8850 __ subu(AT, opr1, opr2);
8851 __ movz(dst, src, AT);
8852 break;
8854 case 0x02: //not_equal
8855 __ subu(AT, opr1, opr2);
8856 __ movn(dst, src, AT);
8857 break;
8859 case 0x03: //greater
8860 __ slt(AT, opr2, opr1);
8861 __ movn(dst, src, AT);
8862 break;
8864 case 0x04: //greater_equal
8865 __ slt(AT, opr1, opr2);
8866 __ movz(dst, src, AT);
8867 break;
8869 case 0x05: //less
8870 __ slt(AT, opr1, opr2);
8871 __ movn(dst, src, AT);
8872 break;
8874 case 0x06: //less_equal
8875 __ slt(AT, opr2, opr1);
8876 __ movz(dst, src, AT);
8877 break;
8879 default:
8880 Unimplemented();
8881 }
8882 %}
8884 ins_pipe( pipe_slow );
8885 %}
8887 instruct cmovI_cmpD_reg_reg(mRegI dst, mRegI src, regD tmp1, regD tmp2, cmpOp cop ) %{
8888 match(Set dst (CMoveI (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8889 ins_cost(80);
8890 format %{
8891 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpD_reg_reg\n"
8892 "\tCMOV $dst,$src \t @cmovI_cmpD_reg_reg"
8893 %}
8894 ins_encode %{
8895 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8896 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8897 Register dst = as_Register($dst$$reg);
8898 Register src = as_Register($src$$reg);
8900 int flag = $cop$$cmpcode;
8902 switch(flag)
8903 {
8904 case 0x01: //equal
8905 __ c_eq_d(reg_op1, reg_op2);
8906 __ movt(dst, src);
8907 break;
8908 case 0x02: //not_equal
8909 //2016/4/19 aoqi: See instruct branchConD_reg_reg. The change in branchConD_reg_reg fixed a bug. It seems similar here, so I made thesame change.
8910 __ c_eq_d(reg_op1, reg_op2);
8911 __ movf(dst, src);
8912 break;
8913 case 0x03: //greater
8914 __ c_ole_d(reg_op1, reg_op2);
8915 __ movf(dst, src);
8916 break;
8917 case 0x04: //greater_equal
8918 __ c_olt_d(reg_op1, reg_op2);
8919 __ movf(dst, src);
8920 break;
8921 case 0x05: //less
8922 __ c_ult_d(reg_op1, reg_op2);
8923 __ movt(dst, src);
8924 break;
8925 case 0x06: //less_equal
8926 __ c_ule_d(reg_op1, reg_op2);
8927 __ movt(dst, src);
8928 break;
8929 default:
8930 Unimplemented();
8931 }
8932 %}
8934 ins_pipe( pipe_slow );
8935 %}
8938 instruct cmovP_cmpP_reg_reg(mRegP dst, mRegP src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
8939 match(Set dst (CMoveP (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
8940 ins_cost(80);
8941 format %{
8942 "CMPU$cop $tmp1,$tmp2\t @cmovP_cmpP_reg_reg\n\t"
8943 "CMOV $dst,$src\t @cmovP_cmpP_reg_reg"
8944 %}
8945 ins_encode %{
8946 Register op1 = $tmp1$$Register;
8947 Register op2 = $tmp2$$Register;
8948 Register dst = $dst$$Register;
8949 Register src = $src$$Register;
8950 int flag = $cop$$cmpcode;
8952 switch(flag)
8953 {
8954 case 0x01: //equal
8955 __ subu(AT, op1, op2);
8956 __ movz(dst, src, AT);
8957 break;
8959 case 0x02: //not_equal
8960 __ subu(AT, op1, op2);
8961 __ movn(dst, src, AT);
8962 break;
8964 case 0x03: //above
8965 __ sltu(AT, op2, op1);
8966 __ movn(dst, src, AT);
8967 break;
8969 case 0x04: //above_equal
8970 __ sltu(AT, op1, op2);
8971 __ movz(dst, src, AT);
8972 break;
8974 case 0x05: //below
8975 __ sltu(AT, op1, op2);
8976 __ movn(dst, src, AT);
8977 break;
8979 case 0x06: //below_equal
8980 __ sltu(AT, op2, op1);
8981 __ movz(dst, src, AT);
8982 break;
8984 default:
8985 Unimplemented();
8986 }
8987 %}
8989 ins_pipe( pipe_slow );
8990 %}
8992 instruct cmovP_cmpI_reg_reg(mRegP dst, mRegP src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8993 match(Set dst (CMoveP (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8994 ins_cost(80);
8995 format %{
8996 "CMP$cop $tmp1,$tmp2\t @cmovP_cmpI_reg_reg\n\t"
8997 "CMOV $dst,$src\t @cmovP_cmpI_reg_reg"
8998 %}
8999 ins_encode %{
9000 Register op1 = $tmp1$$Register;
9001 Register op2 = $tmp2$$Register;
9002 Register dst = $dst$$Register;
9003 Register src = $src$$Register;
9004 int flag = $cop$$cmpcode;
9006 switch(flag)
9007 {
9008 case 0x01: //equal
9009 __ subu32(AT, op1, op2);
9010 __ movz(dst, src, AT);
9011 break;
9013 case 0x02: //not_equal
9014 __ subu32(AT, op1, op2);
9015 __ movn(dst, src, AT);
9016 break;
9018 case 0x03: //above
9019 __ slt(AT, op2, op1);
9020 __ movn(dst, src, AT);
9021 break;
9023 case 0x04: //above_equal
9024 __ slt(AT, op1, op2);
9025 __ movz(dst, src, AT);
9026 break;
9028 case 0x05: //below
9029 __ slt(AT, op1, op2);
9030 __ movn(dst, src, AT);
9031 break;
9033 case 0x06: //below_equal
9034 __ slt(AT, op2, op1);
9035 __ movz(dst, src, AT);
9036 break;
9038 default:
9039 Unimplemented();
9040 }
9041 %}
9043 ins_pipe( pipe_slow );
9044 %}
9046 instruct cmovN_cmpI_reg_reg(mRegN dst, mRegN src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
9047 match(Set dst (CMoveN (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
9048 ins_cost(80);
9049 format %{
9050 "CMP$cop $tmp1,$tmp2\t @cmovN_cmpI_reg_reg\n\t"
9051 "CMOV $dst,$src\t @cmovN_cmpI_reg_reg"
9052 %}
9053 ins_encode %{
9054 Register op1 = $tmp1$$Register;
9055 Register op2 = $tmp2$$Register;
9056 Register dst = $dst$$Register;
9057 Register src = $src$$Register;
9058 int flag = $cop$$cmpcode;
9060 switch(flag)
9061 {
9062 case 0x01: //equal
9063 __ subu32(AT, op1, op2);
9064 __ movz(dst, src, AT);
9065 break;
9067 case 0x02: //not_equal
9068 __ subu32(AT, op1, op2);
9069 __ movn(dst, src, AT);
9070 break;
9072 case 0x03: //above
9073 __ slt(AT, op2, op1);
9074 __ movn(dst, src, AT);
9075 break;
9077 case 0x04: //above_equal
9078 __ slt(AT, op1, op2);
9079 __ movz(dst, src, AT);
9080 break;
9082 case 0x05: //below
9083 __ slt(AT, op1, op2);
9084 __ movn(dst, src, AT);
9085 break;
9087 case 0x06: //below_equal
9088 __ slt(AT, op2, op1);
9089 __ movz(dst, src, AT);
9090 break;
9092 default:
9093 Unimplemented();
9094 }
9095 %}
9097 ins_pipe( pipe_slow );
9098 %}
9101 instruct cmovL_cmpI_reg_reg(mRegL dst, mRegL src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
9102 match(Set dst (CMoveL (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
9103 ins_cost(80);
9104 format %{
9105 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpI_reg_reg\n"
9106 "\tCMOV $dst,$src \t @cmovL_cmpI_reg_reg"
9107 %}
9109 ins_encode %{
9110 Register op1 = $tmp1$$Register;
9111 Register op2 = $tmp2$$Register;
9112 Register dst = as_Register($dst$$reg);
9113 Register src = as_Register($src$$reg);
9114 int flag = $cop$$cmpcode;
9116 switch(flag)
9117 {
9118 case 0x01: //equal
9119 __ subu32(AT, op1, op2);
9120 __ movz(dst, src, AT);
9121 break;
9123 case 0x02: //not_equal
9124 __ subu32(AT, op1, op2);
9125 __ movn(dst, src, AT);
9126 break;
9128 case 0x03: //great
9129 __ slt(AT, op2, op1);
9130 __ movn(dst, src, AT);
9131 break;
9133 case 0x04: //great_equal
9134 __ slt(AT, op1, op2);
9135 __ movz(dst, src, AT);
9136 break;
9138 case 0x05: //less
9139 __ slt(AT, op1, op2);
9140 __ movn(dst, src, AT);
9141 break;
9143 case 0x06: //less_equal
9144 __ slt(AT, op2, op1);
9145 __ movz(dst, src, AT);
9146 break;
9148 default:
9149 Unimplemented();
9150 }
9151 %}
9153 ins_pipe( pipe_slow );
9154 %}
9156 instruct cmovL_cmpL_reg_reg(mRegL dst, mRegL src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
9157 match(Set dst (CMoveL (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
9158 ins_cost(80);
9159 format %{
9160 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpL_reg_reg\n"
9161 "\tCMOV $dst,$src \t @cmovL_cmpL_reg_reg"
9162 %}
9163 ins_encode %{
9164 Register opr1 = as_Register($tmp1$$reg);
9165 Register opr2 = as_Register($tmp2$$reg);
9166 Register dst = as_Register($dst$$reg);
9167 Register src = as_Register($src$$reg);
9168 int flag = $cop$$cmpcode;
9170 switch(flag)
9171 {
9172 case 0x01: //equal
9173 __ subu(AT, opr1, opr2);
9174 __ movz(dst, src, AT);
9175 break;
9177 case 0x02: //not_equal
9178 __ subu(AT, opr1, opr2);
9179 __ movn(dst, src, AT);
9180 break;
9182 case 0x03: //greater
9183 __ slt(AT, opr2, opr1);
9184 __ movn(dst, src, AT);
9185 break;
9187 case 0x04: //greater_equal
9188 __ slt(AT, opr1, opr2);
9189 __ movz(dst, src, AT);
9190 break;
9192 case 0x05: //less
9193 __ slt(AT, opr1, opr2);
9194 __ movn(dst, src, AT);
9195 break;
9197 case 0x06: //less_equal
9198 __ slt(AT, opr2, opr1);
9199 __ movz(dst, src, AT);
9200 break;
9202 default:
9203 Unimplemented();
9204 }
9205 %}
9207 ins_pipe( pipe_slow );
9208 %}
9210 instruct cmovL_cmpN_reg_reg(mRegL dst, mRegL src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
9211 match(Set dst (CMoveL (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
9212 ins_cost(80);
9213 format %{
9214 "CMPU$cop $tmp1,$tmp2\t @cmovL_cmpN_reg_reg\n\t"
9215 "CMOV $dst,$src\t @cmovL_cmpN_reg_reg"
9216 %}
9217 ins_encode %{
9218 Register op1 = $tmp1$$Register;
9219 Register op2 = $tmp2$$Register;
9220 Register dst = $dst$$Register;
9221 Register src = $src$$Register;
9222 int flag = $cop$$cmpcode;
9224 switch(flag)
9225 {
9226 case 0x01: //equal
9227 __ subu32(AT, op1, op2);
9228 __ movz(dst, src, AT);
9229 break;
9231 case 0x02: //not_equal
9232 __ subu32(AT, op1, op2);
9233 __ movn(dst, src, AT);
9234 break;
9236 case 0x03: //above
9237 __ sltu(AT, op2, op1);
9238 __ movn(dst, src, AT);
9239 break;
9241 case 0x04: //above_equal
9242 __ sltu(AT, op1, op2);
9243 __ movz(dst, src, AT);
9244 break;
9246 case 0x05: //below
9247 __ sltu(AT, op1, op2);
9248 __ movn(dst, src, AT);
9249 break;
9251 case 0x06: //below_equal
9252 __ sltu(AT, op2, op1);
9253 __ movz(dst, src, AT);
9254 break;
9256 default:
9257 Unimplemented();
9258 }
9259 %}
9261 ins_pipe( pipe_slow );
9262 %}
9265 instruct cmovL_cmpD_reg_reg(mRegL dst, mRegL src, regD tmp1, regD tmp2, cmpOp cop ) %{
9266 match(Set dst (CMoveL (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
9267 ins_cost(80);
9268 format %{
9269 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpD_reg_reg\n"
9270 "\tCMOV $dst,$src \t @cmovL_cmpD_reg_reg"
9271 %}
9272 ins_encode %{
9273 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
9274 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
9275 Register dst = as_Register($dst$$reg);
9276 Register src = as_Register($src$$reg);
9278 int flag = $cop$$cmpcode;
9280 switch(flag)
9281 {
9282 case 0x01: //equal
9283 __ c_eq_d(reg_op1, reg_op2);
9284 __ movt(dst, src);
9285 break;
9286 case 0x02: //not_equal
9287 __ c_eq_d(reg_op1, reg_op2);
9288 __ movf(dst, src);
9289 break;
9290 case 0x03: //greater
9291 __ c_ole_d(reg_op1, reg_op2);
9292 __ movf(dst, src);
9293 break;
9294 case 0x04: //greater_equal
9295 __ c_olt_d(reg_op1, reg_op2);
9296 __ movf(dst, src);
9297 break;
9298 case 0x05: //less
9299 __ c_ult_d(reg_op1, reg_op2);
9300 __ movt(dst, src);
9301 break;
9302 case 0x06: //less_equal
9303 __ c_ule_d(reg_op1, reg_op2);
9304 __ movt(dst, src);
9305 break;
9306 default:
9307 Unimplemented();
9308 }
9309 %}
9311 ins_pipe( pipe_slow );
9312 %}
9314 instruct cmovD_cmpD_reg_reg(regD dst, regD src, regD tmp1, regD tmp2, cmpOp cop ) %{
9315 match(Set dst (CMoveD (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
9316 ins_cost(200);
9317 format %{
9318 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpD_reg_reg\n"
9319 "\tCMOV $dst,$src \t @cmovD_cmpD_reg_reg"
9320 %}
9321 ins_encode %{
9322 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
9323 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
9324 FloatRegister dst = as_FloatRegister($dst$$reg);
9325 FloatRegister src = as_FloatRegister($src$$reg);
9327 int flag = $cop$$cmpcode;
9329 switch(flag)
9330 {
9331 case 0x01: //equal
9332 __ c_eq_d(reg_op1, reg_op2);
9333 __ movt_d(dst, src);
9334 break;
9335 case 0x02: //not_equal
9336 __ c_eq_d(reg_op1, reg_op2);
9337 __ movf_d(dst, src);
9338 break;
9339 case 0x03: //greater
9340 __ c_ole_d(reg_op1, reg_op2);
9341 __ movf_d(dst, src);
9342 break;
9343 case 0x04: //greater_equal
9344 __ c_olt_d(reg_op1, reg_op2);
9345 __ movf_d(dst, src);
9346 break;
9347 case 0x05: //less
9348 __ c_ult_d(reg_op1, reg_op2);
9349 __ movt_d(dst, src);
9350 break;
9351 case 0x06: //less_equal
9352 __ c_ule_d(reg_op1, reg_op2);
9353 __ movt_d(dst, src);
9354 break;
9355 default:
9356 Unimplemented();
9357 }
9358 %}
9360 ins_pipe( pipe_slow );
9361 %}
9363 instruct cmovF_cmpI_reg_reg(regF dst, regF src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
9364 match(Set dst (CMoveF (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
9365 ins_cost(200);
9366 format %{
9367 "CMP$cop $tmp1, $tmp2\t @cmovF_cmpI_reg_reg\n"
9368 "\tCMOV $dst, $src \t @cmovF_cmpI_reg_reg"
9369 %}
9371 ins_encode %{
9372 Register op1 = $tmp1$$Register;
9373 Register op2 = $tmp2$$Register;
9374 FloatRegister dst = as_FloatRegister($dst$$reg);
9375 FloatRegister src = as_FloatRegister($src$$reg);
9376 int flag = $cop$$cmpcode;
9377 Label L;
9379 switch(flag)
9380 {
9381 case 0x01: //equal
9382 __ bne(op1, op2, L);
9383 __ nop();
9384 __ mov_s(dst, src);
9385 __ bind(L);
9386 break;
9387 case 0x02: //not_equal
9388 __ beq(op1, op2, L);
9389 __ nop();
9390 __ mov_s(dst, src);
9391 __ bind(L);
9392 break;
9393 case 0x03: //great
9394 __ slt(AT, op2, op1);
9395 __ beq(AT, R0, L);
9396 __ nop();
9397 __ mov_s(dst, src);
9398 __ bind(L);
9399 break;
9400 case 0x04: //great_equal
9401 __ slt(AT, op1, op2);
9402 __ bne(AT, R0, L);
9403 __ nop();
9404 __ mov_s(dst, src);
9405 __ bind(L);
9406 break;
9407 case 0x05: //less
9408 __ slt(AT, op1, op2);
9409 __ beq(AT, R0, L);
9410 __ nop();
9411 __ mov_s(dst, src);
9412 __ bind(L);
9413 break;
9414 case 0x06: //less_equal
9415 __ slt(AT, op2, op1);
9416 __ bne(AT, R0, L);
9417 __ nop();
9418 __ mov_s(dst, src);
9419 __ bind(L);
9420 break;
9421 default:
9422 Unimplemented();
9423 }
9424 %}
9426 ins_pipe( pipe_slow );
9427 %}
9429 instruct cmovD_cmpI_reg_reg(regD dst, regD src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
9430 match(Set dst (CMoveD (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
9431 ins_cost(200);
9432 format %{
9433 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpI_reg_reg\n"
9434 "\tCMOV $dst, $src \t @cmovD_cmpI_reg_reg"
9435 %}
9437 ins_encode %{
9438 Register op1 = $tmp1$$Register;
9439 Register op2 = $tmp2$$Register;
9440 FloatRegister dst = as_FloatRegister($dst$$reg);
9441 FloatRegister src = as_FloatRegister($src$$reg);
9442 int flag = $cop$$cmpcode;
9443 Label L;
9445 switch(flag)
9446 {
9447 case 0x01: //equal
9448 __ bne(op1, op2, L);
9449 __ nop();
9450 __ mov_d(dst, src);
9451 __ bind(L);
9452 break;
9453 case 0x02: //not_equal
9454 __ beq(op1, op2, L);
9455 __ nop();
9456 __ mov_d(dst, src);
9457 __ bind(L);
9458 break;
9459 case 0x03: //great
9460 __ slt(AT, op2, op1);
9461 __ beq(AT, R0, L);
9462 __ nop();
9463 __ mov_d(dst, src);
9464 __ bind(L);
9465 break;
9466 case 0x04: //great_equal
9467 __ slt(AT, op1, op2);
9468 __ bne(AT, R0, L);
9469 __ nop();
9470 __ mov_d(dst, src);
9471 __ bind(L);
9472 break;
9473 case 0x05: //less
9474 __ slt(AT, op1, op2);
9475 __ beq(AT, R0, L);
9476 __ nop();
9477 __ mov_d(dst, src);
9478 __ bind(L);
9479 break;
9480 case 0x06: //less_equal
9481 __ slt(AT, op2, op1);
9482 __ bne(AT, R0, L);
9483 __ nop();
9484 __ mov_d(dst, src);
9485 __ bind(L);
9486 break;
9487 default:
9488 Unimplemented();
9489 }
9490 %}
9492 ins_pipe( pipe_slow );
9493 %}
9495 instruct cmovD_cmpP_reg_reg(regD dst, regD src, mRegP tmp1, mRegP tmp2, cmpOp cop ) %{
9496 match(Set dst (CMoveD (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
9497 ins_cost(200);
9498 format %{
9499 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpP_reg_reg\n"
9500 "\tCMOV $dst, $src \t @cmovD_cmpP_reg_reg"
9501 %}
9503 ins_encode %{
9504 Register op1 = $tmp1$$Register;
9505 Register op2 = $tmp2$$Register;
9506 FloatRegister dst = as_FloatRegister($dst$$reg);
9507 FloatRegister src = as_FloatRegister($src$$reg);
9508 int flag = $cop$$cmpcode;
9509 Label L;
9511 switch(flag)
9512 {
9513 case 0x01: //equal
9514 __ bne(op1, op2, L);
9515 __ nop();
9516 __ mov_d(dst, src);
9517 __ bind(L);
9518 break;
9519 case 0x02: //not_equal
9520 __ beq(op1, op2, L);
9521 __ nop();
9522 __ mov_d(dst, src);
9523 __ bind(L);
9524 break;
9525 case 0x03: //great
9526 __ slt(AT, op2, op1);
9527 __ beq(AT, R0, L);
9528 __ nop();
9529 __ mov_d(dst, src);
9530 __ bind(L);
9531 break;
9532 case 0x04: //great_equal
9533 __ slt(AT, op1, op2);
9534 __ bne(AT, R0, L);
9535 __ nop();
9536 __ mov_d(dst, src);
9537 __ bind(L);
9538 break;
9539 case 0x05: //less
9540 __ slt(AT, op1, op2);
9541 __ beq(AT, R0, L);
9542 __ nop();
9543 __ mov_d(dst, src);
9544 __ bind(L);
9545 break;
9546 case 0x06: //less_equal
9547 __ slt(AT, op2, op1);
9548 __ bne(AT, R0, L);
9549 __ nop();
9550 __ mov_d(dst, src);
9551 __ bind(L);
9552 break;
9553 default:
9554 Unimplemented();
9555 }
9556 %}
9558 ins_pipe( pipe_slow );
9559 %}
9561 //FIXME
9562 instruct cmovI_cmpF_reg_reg(mRegI dst, mRegI src, regF tmp1, regF tmp2, cmpOp cop ) %{
9563 match(Set dst (CMoveI (Binary cop (CmpF tmp1 tmp2)) (Binary dst src)));
9564 ins_cost(80);
9565 format %{
9566 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpF_reg_reg\n"
9567 "\tCMOV $dst,$src \t @cmovI_cmpF_reg_reg"
9568 %}
9570 ins_encode %{
9571 FloatRegister reg_op1 = $tmp1$$FloatRegister;
9572 FloatRegister reg_op2 = $tmp2$$FloatRegister;
9573 Register dst = $dst$$Register;
9574 Register src = $src$$Register;
9575 int flag = $cop$$cmpcode;
9577 switch(flag)
9578 {
9579 case 0x01: //equal
9580 __ c_eq_s(reg_op1, reg_op2);
9581 __ movt(dst, src);
9582 break;
9583 case 0x02: //not_equal
9584 __ c_eq_s(reg_op1, reg_op2);
9585 __ movf(dst, src);
9586 break;
9587 case 0x03: //greater
9588 __ c_ole_s(reg_op1, reg_op2);
9589 __ movf(dst, src);
9590 break;
9591 case 0x04: //greater_equal
9592 __ c_olt_s(reg_op1, reg_op2);
9593 __ movf(dst, src);
9594 break;
9595 case 0x05: //less
9596 __ c_ult_s(reg_op1, reg_op2);
9597 __ movt(dst, src);
9598 break;
9599 case 0x06: //less_equal
9600 __ c_ule_s(reg_op1, reg_op2);
9601 __ movt(dst, src);
9602 break;
9603 default:
9604 Unimplemented();
9605 }
9606 %}
9607 ins_pipe( pipe_slow );
9608 %}
9610 instruct cmovF_cmpF_reg_reg(regF dst, regF src, regF tmp1, regF tmp2, cmpOp cop ) %{
9611 match(Set dst (CMoveF (Binary cop (CmpF tmp1 tmp2)) (Binary dst src)));
9612 ins_cost(200);
9613 format %{
9614 "CMP$cop $tmp1, $tmp2\t @cmovF_cmpF_reg_reg\n"
9615 "\tCMOV $dst,$src \t @cmovF_cmpF_reg_reg"
9616 %}
9618 ins_encode %{
9619 FloatRegister reg_op1 = $tmp1$$FloatRegister;
9620 FloatRegister reg_op2 = $tmp2$$FloatRegister;
9621 FloatRegister dst = $dst$$FloatRegister;
9622 FloatRegister src = $src$$FloatRegister;
9623 int flag = $cop$$cmpcode;
9625 switch(flag)
9626 {
9627 case 0x01: //equal
9628 __ c_eq_s(reg_op1, reg_op2);
9629 __ movt_s(dst, src);
9630 break;
9631 case 0x02: //not_equal
9632 __ c_eq_s(reg_op1, reg_op2);
9633 __ movf_s(dst, src);
9634 break;
9635 case 0x03: //greater
9636 __ c_ole_s(reg_op1, reg_op2);
9637 __ movf_s(dst, src);
9638 break;
9639 case 0x04: //greater_equal
9640 __ c_olt_s(reg_op1, reg_op2);
9641 __ movf_s(dst, src);
9642 break;
9643 case 0x05: //less
9644 __ c_ult_s(reg_op1, reg_op2);
9645 __ movt_s(dst, src);
9646 break;
9647 case 0x06: //less_equal
9648 __ c_ule_s(reg_op1, reg_op2);
9649 __ movt_s(dst, src);
9650 break;
9651 default:
9652 Unimplemented();
9653 }
9654 %}
9655 ins_pipe( pipe_slow );
9656 %}
9658 // Manifest a CmpL result in an integer register. Very painful.
9659 // This is the test to avoid.
9660 instruct cmpL3_reg_reg(mRegI dst, mRegL src1, mRegL src2) %{
9661 match(Set dst (CmpL3 src1 src2));
9662 ins_cost(1000);
9663 format %{ "cmpL3 $dst, $src1, $src2 @ cmpL3_reg_reg" %}
9664 ins_encode %{
9665 Register opr1 = as_Register($src1$$reg);
9666 Register opr2 = as_Register($src2$$reg);
9667 Register dst = as_Register($dst$$reg);
9669 Label Done;
9671 __ subu(AT, opr1, opr2);
9672 __ bltz(AT, Done);
9673 __ delayed()->daddiu(dst, R0, -1);
9675 __ move(dst, 1);
9676 __ movz(dst, R0, AT);
9678 __ bind(Done);
9679 %}
9680 ins_pipe( pipe_slow );
9681 %}
9683 //
9684 // less_rsult = -1
9685 // greater_result = 1
9686 // equal_result = 0
9687 // nan_result = -1
9688 //
9689 instruct cmpF3_reg_reg(mRegI dst, regF src1, regF src2) %{
9690 match(Set dst (CmpF3 src1 src2));
9691 ins_cost(1000);
9692 format %{ "cmpF3 $dst, $src1, $src2 @ cmpF3_reg_reg" %}
9693 ins_encode %{
9694 FloatRegister src1 = as_FloatRegister($src1$$reg);
9695 FloatRegister src2 = as_FloatRegister($src2$$reg);
9696 Register dst = as_Register($dst$$reg);
9698 Label Done;
9700 __ c_ult_s(src1, src2);
9701 __ bc1t(Done);
9702 __ delayed()->daddiu(dst, R0, -1);
9704 __ c_eq_s(src1, src2);
9705 __ move(dst, 1);
9706 __ movt(dst, R0);
9708 __ bind(Done);
9709 %}
9710 ins_pipe( pipe_slow );
9711 %}
9713 instruct cmpD3_reg_reg(mRegI dst, regD src1, regD src2) %{
9714 match(Set dst (CmpD3 src1 src2));
9715 ins_cost(1000);
9716 format %{ "cmpD3 $dst, $src1, $src2 @ cmpD3_reg_reg" %}
9717 ins_encode %{
9718 FloatRegister src1 = as_FloatRegister($src1$$reg);
9719 FloatRegister src2 = as_FloatRegister($src2$$reg);
9720 Register dst = as_Register($dst$$reg);
9722 Label Done;
9724 __ c_ult_d(src1, src2);
9725 __ bc1t(Done);
9726 __ delayed()->daddiu(dst, R0, -1);
9728 __ c_eq_d(src1, src2);
9729 __ move(dst, 1);
9730 __ movt(dst, R0);
9732 __ bind(Done);
9733 %}
9734 ins_pipe( pipe_slow );
9735 %}
9737 instruct clear_array(mRegL cnt, mRegP base, Universe dummy) %{
9738 match(Set dummy (ClearArray cnt base));
9739 format %{ "CLEAR_ARRAY base = $base, cnt = $cnt # Clear doublewords" %}
9740 ins_encode %{
9741 //Assume cnt is the number of bytes in an array to be cleared,
9742 //and base points to the starting address of the array.
9743 Register base = $base$$Register;
9744 Register num = $cnt$$Register;
9745 Label Loop, done;
9747 __ beq(num, R0, done);
9748 __ delayed()->daddu(AT, base, R0);
9750 __ move(T9, num); /* T9 = words */
9752 __ bind(Loop);
9753 __ sd(R0, AT, 0);
9754 __ daddi(T9, T9, -1);
9755 __ bne(T9, R0, Loop);
9756 __ delayed()->daddi(AT, AT, wordSize);
9758 __ bind(done);
9759 %}
9760 ins_pipe( pipe_slow );
9761 %}
9763 instruct string_compare(a4_RegP str1, mA5RegI cnt1, a6_RegP str2, mA7RegI cnt2, no_Ax_mRegI result) %{
9764 match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
9765 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2);
9767 format %{ "String Compare $str1[len: $cnt1], $str2[len: $cnt2] -> $result @ string_compare" %}
9768 ins_encode %{
9769 // Get the first character position in both strings
9770 // [8] char array, [12] offset, [16] count
9771 Register str1 = $str1$$Register;
9772 Register str2 = $str2$$Register;
9773 Register cnt1 = $cnt1$$Register;
9774 Register cnt2 = $cnt2$$Register;
9775 Register result = $result$$Register;
9777 Label L, Loop, haveResult, done;
9779 // compute the and difference of lengths (in result)
9780 __ subu(result, cnt1, cnt2); // result holds the difference of two lengths
9782 // compute the shorter length (in cnt1)
9783 __ slt(AT, cnt2, cnt1);
9784 __ movn(cnt1, cnt2, AT);
9786 // Now the shorter length is in cnt1 and cnt2 can be used as a tmp register
9787 __ bind(Loop); // Loop begin
9788 __ beq(cnt1, R0, done);
9789 __ delayed()->lhu(AT, str1, 0);;
9791 // compare current character
9792 __ lhu(cnt2, str2, 0);
9793 __ bne(AT, cnt2, haveResult);
9794 __ delayed()->addi(str1, str1, 2);
9795 __ addi(str2, str2, 2);
9796 __ b(Loop);
9797 __ delayed()->addi(cnt1, cnt1, -1); // Loop end
9799 __ bind(haveResult);
9800 __ subu(result, AT, cnt2);
9802 __ bind(done);
9803 %}
9805 ins_pipe( pipe_slow );
9806 %}
9808 // intrinsic optimization
9809 instruct string_equals(a4_RegP str1, a5_RegP str2, mA6RegI cnt, mA7RegI temp, no_Ax_mRegI result) %{
9810 match(Set result (StrEquals (Binary str1 str2) cnt));
9811 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL temp);
9813 format %{ "String Equal $str1, $str2, len:$cnt tmp:$temp -> $result @ string_equals" %}
9814 ins_encode %{
9815 // Get the first character position in both strings
9816 // [8] char array, [12] offset, [16] count
9817 Register str1 = $str1$$Register;
9818 Register str2 = $str2$$Register;
9819 Register cnt = $cnt$$Register;
9820 Register tmp = $temp$$Register;
9821 Register result = $result$$Register;
9823 Label Loop, done;
9826 __ beq(str1, str2, done); // same char[] ?
9827 __ daddiu(result, R0, 1);
9829 __ bind(Loop); // Loop begin
9830 __ beq(cnt, R0, done);
9831 __ daddiu(result, R0, 1); // count == 0
9833 // compare current character
9834 __ lhu(AT, str1, 0);;
9835 __ lhu(tmp, str2, 0);
9836 __ bne(AT, tmp, done);
9837 __ delayed()->daddi(result, R0, 0);
9838 __ addi(str1, str1, 2);
9839 __ addi(str2, str2, 2);
9840 __ b(Loop);
9841 __ delayed()->addi(cnt, cnt, -1); // Loop end
9843 __ bind(done);
9844 %}
9846 ins_pipe( pipe_slow );
9847 %}
9849 //----------Arithmetic Instructions-------------------------------------------
9850 //----------Addition Instructions---------------------------------------------
9851 instruct addI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9852 match(Set dst (AddI src1 src2));
9854 format %{ "add $dst, $src1, $src2 #@addI_Reg_Reg" %}
9855 ins_encode %{
9856 Register dst = $dst$$Register;
9857 Register src1 = $src1$$Register;
9858 Register src2 = $src2$$Register;
9859 __ addu32(dst, src1, src2);
9860 %}
9861 ins_pipe( ialu_regI_regI );
9862 %}
9864 instruct addI_Reg_imm(mRegI dst, mRegI src1, immI src2) %{
9865 match(Set dst (AddI src1 src2));
9867 format %{ "add $dst, $src1, $src2 #@addI_Reg_imm" %}
9868 ins_encode %{
9869 Register dst = $dst$$Register;
9870 Register src1 = $src1$$Register;
9871 int imm = $src2$$constant;
9873 if(Assembler::is_simm16(imm)) {
9874 __ addiu32(dst, src1, imm);
9875 } else {
9876 __ move(AT, imm);
9877 __ addu32(dst, src1, AT);
9878 }
9879 %}
9880 ins_pipe( ialu_regI_regI );
9881 %}
9883 instruct addP_reg_reg(mRegP dst, mRegP src1, mRegL src2) %{
9884 match(Set dst (AddP src1 src2));
9886 format %{ "dadd $dst, $src1, $src2 #@addP_reg_reg" %}
9888 ins_encode %{
9889 Register dst = $dst$$Register;
9890 Register src1 = $src1$$Register;
9891 Register src2 = $src2$$Register;
9892 __ daddu(dst, src1, src2);
9893 %}
9895 ins_pipe( ialu_regI_regI );
9896 %}
9898 instruct addP_reg_reg_convI2L(mRegP dst, mRegP src1, mRegI src2) %{
9899 match(Set dst (AddP src1 (ConvI2L src2)));
9901 format %{ "dadd $dst, $src1, $src2 #@addP_reg_reg_convI2L" %}
9903 ins_encode %{
9904 Register dst = $dst$$Register;
9905 Register src1 = $src1$$Register;
9906 Register src2 = $src2$$Register;
9907 __ daddu(dst, src1, src2);
9908 %}
9910 ins_pipe( ialu_regI_regI );
9911 %}
9913 instruct addP_reg_imm(mRegP dst, mRegP src1, immL src2) %{
9914 match(Set dst (AddP src1 src2));
9916 format %{ "daddi $dst, $src1, $src2 #@addP_reg_imm" %}
9917 ins_encode %{
9918 Register src1 = $src1$$Register;
9919 long src2 = $src2$$constant;
9920 Register dst = $dst$$Register;
9922 if(Assembler::is_simm16(src2)) {
9923 __ daddiu(dst, src1, src2);
9924 } else {
9925 __ set64(AT, src2);
9926 __ daddu(dst, src1, AT);
9927 }
9928 %}
9929 ins_pipe( ialu_regI_imm16 );
9930 %}
9932 // Add Long Register with Register
9933 instruct addL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
9934 match(Set dst (AddL src1 src2));
9935 ins_cost(200);
9936 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_Reg\t" %}
9938 ins_encode %{
9939 Register dst_reg = as_Register($dst$$reg);
9940 Register src1_reg = as_Register($src1$$reg);
9941 Register src2_reg = as_Register($src2$$reg);
9943 __ daddu(dst_reg, src1_reg, src2_reg);
9944 %}
9946 ins_pipe( ialu_regL_regL );
9947 %}
9949 instruct addL_Reg_imm(mRegL dst, mRegL src1, immL16 src2)
9950 %{
9951 match(Set dst (AddL src1 src2));
9953 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_imm " %}
9954 ins_encode %{
9955 Register dst_reg = as_Register($dst$$reg);
9956 Register src1_reg = as_Register($src1$$reg);
9957 int src2_imm = $src2$$constant;
9959 __ daddiu(dst_reg, src1_reg, src2_imm);
9960 %}
9962 ins_pipe( ialu_regL_regL );
9963 %}
9965 instruct addL_RegI2L_imm(mRegL dst, mRegI src1, immL16 src2)
9966 %{
9967 match(Set dst (AddL (ConvI2L src1) src2));
9969 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_imm " %}
9970 ins_encode %{
9971 Register dst_reg = as_Register($dst$$reg);
9972 Register src1_reg = as_Register($src1$$reg);
9973 int src2_imm = $src2$$constant;
9975 __ daddiu(dst_reg, src1_reg, src2_imm);
9976 %}
9978 ins_pipe( ialu_regL_regL );
9979 %}
9981 instruct addL_RegI2L_Reg(mRegL dst, mRegI src1, mRegL src2) %{
9982 match(Set dst (AddL (ConvI2L src1) src2));
9983 ins_cost(200);
9984 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_Reg\t" %}
9986 ins_encode %{
9987 Register dst_reg = as_Register($dst$$reg);
9988 Register src1_reg = as_Register($src1$$reg);
9989 Register src2_reg = as_Register($src2$$reg);
9991 __ daddu(dst_reg, src1_reg, src2_reg);
9992 %}
9994 ins_pipe( ialu_regL_regL );
9995 %}
9997 instruct addL_RegI2L_RegI2L(mRegL dst, mRegI src1, mRegI src2) %{
9998 match(Set dst (AddL (ConvI2L src1) (ConvI2L src2)));
9999 ins_cost(200);
10000 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_RegI2L\t" %}
10002 ins_encode %{
10003 Register dst_reg = as_Register($dst$$reg);
10004 Register src1_reg = as_Register($src1$$reg);
10005 Register src2_reg = as_Register($src2$$reg);
10007 __ daddu(dst_reg, src1_reg, src2_reg);
10008 %}
10010 ins_pipe( ialu_regL_regL );
10011 %}
10013 instruct addL_Reg_RegI2L(mRegL dst, mRegL src1, mRegI src2) %{
10014 match(Set dst (AddL src1 (ConvI2L src2)));
10015 ins_cost(200);
10016 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_RegI2L\t" %}
10018 ins_encode %{
10019 Register dst_reg = as_Register($dst$$reg);
10020 Register src1_reg = as_Register($src1$$reg);
10021 Register src2_reg = as_Register($src2$$reg);
10023 __ daddu(dst_reg, src1_reg, src2_reg);
10024 %}
10026 ins_pipe( ialu_regL_regL );
10027 %}
10029 //----------Subtraction Instructions-------------------------------------------
10030 // Integer Subtraction Instructions
10031 instruct subI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10032 match(Set dst (SubI src1 src2));
10033 ins_cost(100);
10035 format %{ "sub $dst, $src1, $src2 #@subI_Reg_Reg" %}
10036 ins_encode %{
10037 Register dst = $dst$$Register;
10038 Register src1 = $src1$$Register;
10039 Register src2 = $src2$$Register;
10040 __ subu32(dst, src1, src2);
10041 %}
10042 ins_pipe( ialu_regI_regI );
10043 %}
10045 instruct subI_Reg_immI16_sub(mRegI dst, mRegI src1, immI16_sub src2) %{
10046 match(Set dst (SubI src1 src2));
10047 ins_cost(80);
10049 format %{ "sub $dst, $src1, $src2 #@subI_Reg_immI16_sub" %}
10050 ins_encode %{
10051 Register dst = $dst$$Register;
10052 Register src1 = $src1$$Register;
10053 __ addiu32(dst, src1, -1 * $src2$$constant);
10054 %}
10055 ins_pipe( ialu_regI_regI );
10056 %}
10058 instruct negI_Reg(mRegI dst, immI0 zero, mRegI src) %{
10059 match(Set dst (SubI zero src));
10060 ins_cost(80);
10062 format %{ "neg $dst, $src #@negI_Reg" %}
10063 ins_encode %{
10064 Register dst = $dst$$Register;
10065 Register src = $src$$Register;
10066 __ subu32(dst, R0, src);
10067 %}
10068 ins_pipe( ialu_regI_regI );
10069 %}
10071 instruct negL_Reg(mRegL dst, immL0 zero, mRegL src) %{
10072 match(Set dst (SubL zero src));
10073 ins_cost(80);
10075 format %{ "neg $dst, $src #@negL_Reg" %}
10076 ins_encode %{
10077 Register dst = $dst$$Register;
10078 Register src = $src$$Register;
10079 __ subu(dst, R0, src);
10080 %}
10081 ins_pipe( ialu_regI_regI );
10082 %}
10084 instruct subL_Reg_immL16_sub(mRegL dst, mRegL src1, immL16_sub src2) %{
10085 match(Set dst (SubL src1 src2));
10086 ins_cost(80);
10088 format %{ "sub $dst, $src1, $src2 #@subL_Reg_immL16_sub" %}
10089 ins_encode %{
10090 Register dst = $dst$$Register;
10091 Register src1 = $src1$$Register;
10092 __ daddiu(dst, src1, -1 * $src2$$constant);
10093 %}
10094 ins_pipe( ialu_regI_regI );
10095 %}
10097 // Subtract Long Register with Register.
10098 instruct subL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10099 match(Set dst (SubL src1 src2));
10100 ins_cost(100);
10101 format %{ "SubL $dst, $src1, $src2 @ subL_Reg_Reg" %}
10102 ins_encode %{
10103 Register dst = as_Register($dst$$reg);
10104 Register src1 = as_Register($src1$$reg);
10105 Register src2 = as_Register($src2$$reg);
10107 __ subu(dst, src1, src2);
10108 %}
10109 ins_pipe( ialu_regL_regL );
10110 %}
10112 instruct subL_Reg_RegI2L(mRegL dst, mRegL src1, mRegI src2) %{
10113 match(Set dst (SubL src1 (ConvI2L src2)));
10114 ins_cost(100);
10115 format %{ "SubL $dst, $src1, $src2 @ subL_Reg_RegI2L" %}
10116 ins_encode %{
10117 Register dst = as_Register($dst$$reg);
10118 Register src1 = as_Register($src1$$reg);
10119 Register src2 = as_Register($src2$$reg);
10121 __ subu(dst, src1, src2);
10122 %}
10123 ins_pipe( ialu_regL_regL );
10124 %}
10126 instruct subL_RegI2L_Reg(mRegL dst, mRegI src1, mRegL src2) %{
10127 match(Set dst (SubL (ConvI2L src1) src2));
10128 ins_cost(200);
10129 format %{ "SubL $dst, $src1, $src2 @ subL_RegI2L_Reg" %}
10130 ins_encode %{
10131 Register dst = as_Register($dst$$reg);
10132 Register src1 = as_Register($src1$$reg);
10133 Register src2 = as_Register($src2$$reg);
10135 __ subu(dst, src1, src2);
10136 %}
10137 ins_pipe( ialu_regL_regL );
10138 %}
10140 instruct subL_RegI2L_RegI2L(mRegL dst, mRegI src1, mRegI src2) %{
10141 match(Set dst (SubL (ConvI2L src1) (ConvI2L src2)));
10142 ins_cost(200);
10143 format %{ "SubL $dst, $src1, $src2 @ subL_RegI2L_RegI2L" %}
10144 ins_encode %{
10145 Register dst = as_Register($dst$$reg);
10146 Register src1 = as_Register($src1$$reg);
10147 Register src2 = as_Register($src2$$reg);
10149 __ subu(dst, src1, src2);
10150 %}
10151 ins_pipe( ialu_regL_regL );
10152 %}
10154 // Integer MOD with Register
10155 instruct modI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10156 match(Set dst (ModI src1 src2));
10157 ins_cost(300);
10158 format %{ "modi $dst, $src1, $src2 @ modI_Reg_Reg" %}
10159 ins_encode %{
10160 Register dst = $dst$$Register;
10161 Register src1 = $src1$$Register;
10162 Register src2 = $src2$$Register;
10164 //if (UseLoongsonISA) {
10165 if (0) {
10166 // 2016.08.10
10167 // Experiments show that gsmod is slower that div+mfhi.
10168 // So I just disable it here.
10169 __ gsmod(dst, src1, src2);
10170 } else {
10171 __ div(src1, src2);
10172 __ mfhi(dst);
10173 }
10174 %}
10176 //ins_pipe( ialu_mod );
10177 ins_pipe( ialu_regI_regI );
10178 %}
10180 instruct modL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
10181 match(Set dst (ModL src1 src2));
10182 format %{ "modL $dst, $src1, $src2 @modL_reg_reg" %}
10184 ins_encode %{
10185 Register dst = as_Register($dst$$reg);
10186 Register op1 = as_Register($src1$$reg);
10187 Register op2 = as_Register($src2$$reg);
10189 if (UseLoongsonISA) {
10190 __ gsdmod(dst, op1, op2);
10191 } else {
10192 __ ddiv(op1, op2);
10193 __ mfhi(dst);
10194 }
10195 %}
10196 ins_pipe( pipe_slow );
10197 %}
10199 instruct mulI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10200 match(Set dst (MulI src1 src2));
10202 ins_cost(300);
10203 format %{ "mul $dst, $src1, $src2 @ mulI_Reg_Reg" %}
10204 ins_encode %{
10205 Register src1 = $src1$$Register;
10206 Register src2 = $src2$$Register;
10207 Register dst = $dst$$Register;
10209 __ mul(dst, src1, src2);
10210 %}
10211 ins_pipe( ialu_mult );
10212 %}
10214 instruct maddI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2, mRegI src3) %{
10215 match(Set dst (AddI (MulI src1 src2) src3));
10217 ins_cost(999);
10218 format %{ "madd $dst, $src1 * $src2 + $src3 #@maddI_Reg_Reg" %}
10219 ins_encode %{
10220 Register src1 = $src1$$Register;
10221 Register src2 = $src2$$Register;
10222 Register src3 = $src3$$Register;
10223 Register dst = $dst$$Register;
10225 __ mtlo(src3);
10226 __ madd(src1, src2);
10227 __ mflo(dst);
10228 %}
10229 ins_pipe( ialu_mult );
10230 %}
10232 instruct divI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10233 match(Set dst (DivI src1 src2));
10235 ins_cost(300);
10236 format %{ "div $dst, $src1, $src2 @ divI_Reg_Reg" %}
10237 ins_encode %{
10238 Register src1 = $src1$$Register;
10239 Register src2 = $src2$$Register;
10240 Register dst = $dst$$Register;
10242 /* 2012/4/21 Jin: In MIPS, div does not cause exception.
10243 We must trap an exception manually. */
10244 __ teq(R0, src2, 0x7);
10246 if (UseLoongsonISA) {
10247 __ gsdiv(dst, src1, src2);
10248 } else {
10249 __ div(src1, src2);
10251 __ nop();
10252 __ nop();
10253 __ mflo(dst);
10254 }
10255 %}
10256 ins_pipe( ialu_mod );
10257 %}
10259 instruct divF_Reg_Reg(regF dst, regF src1, regF src2) %{
10260 match(Set dst (DivF src1 src2));
10262 ins_cost(300);
10263 format %{ "divF $dst, $src1, $src2 @ divF_Reg_Reg" %}
10264 ins_encode %{
10265 FloatRegister src1 = $src1$$FloatRegister;
10266 FloatRegister src2 = $src2$$FloatRegister;
10267 FloatRegister dst = $dst$$FloatRegister;
10269 /* Here do we need to trap an exception manually ? */
10270 __ div_s(dst, src1, src2);
10271 %}
10272 ins_pipe( pipe_slow );
10273 %}
10275 instruct divD_Reg_Reg(regD dst, regD src1, regD src2) %{
10276 match(Set dst (DivD src1 src2));
10278 ins_cost(300);
10279 format %{ "divD $dst, $src1, $src2 @ divD_Reg_Reg" %}
10280 ins_encode %{
10281 FloatRegister src1 = $src1$$FloatRegister;
10282 FloatRegister src2 = $src2$$FloatRegister;
10283 FloatRegister dst = $dst$$FloatRegister;
10285 /* Here do we need to trap an exception manually ? */
10286 __ div_d(dst, src1, src2);
10287 %}
10288 ins_pipe( pipe_slow );
10289 %}
10291 instruct mulL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
10292 match(Set dst (MulL src1 src2));
10293 format %{ "mulL $dst, $src1, $src2 @mulL_reg_reg" %}
10294 ins_encode %{
10295 Register dst = as_Register($dst$$reg);
10296 Register op1 = as_Register($src1$$reg);
10297 Register op2 = as_Register($src2$$reg);
10299 if (UseLoongsonISA) {
10300 __ gsdmult(dst, op1, op2);
10301 } else {
10302 __ dmult(op1, op2);
10303 __ mflo(dst);
10304 }
10305 %}
10306 ins_pipe( pipe_slow );
10307 %}
10309 instruct mulL_reg_regI2L(mRegL dst, mRegL src1, mRegI src2) %{
10310 match(Set dst (MulL src1 (ConvI2L src2)));
10311 format %{ "mulL $dst, $src1, $src2 @mulL_reg_regI2L" %}
10312 ins_encode %{
10313 Register dst = as_Register($dst$$reg);
10314 Register op1 = as_Register($src1$$reg);
10315 Register op2 = as_Register($src2$$reg);
10317 if (UseLoongsonISA) {
10318 __ gsdmult(dst, op1, op2);
10319 } else {
10320 __ dmult(op1, op2);
10321 __ mflo(dst);
10322 }
10323 %}
10324 ins_pipe( pipe_slow );
10325 %}
10327 instruct divL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
10328 match(Set dst (DivL src1 src2));
10329 format %{ "divL $dst, $src1, $src2 @divL_reg_reg" %}
10331 ins_encode %{
10332 Register dst = as_Register($dst$$reg);
10333 Register op1 = as_Register($src1$$reg);
10334 Register op2 = as_Register($src2$$reg);
10336 if (UseLoongsonISA) {
10337 __ gsddiv(dst, op1, op2);
10338 } else {
10339 __ ddiv(op1, op2);
10340 __ mflo(dst);
10341 }
10342 %}
10343 ins_pipe( pipe_slow );
10344 %}
10346 instruct addF_reg_reg(regF dst, regF src1, regF src2) %{
10347 match(Set dst (AddF src1 src2));
10348 format %{ "AddF $dst, $src1, $src2 @addF_reg_reg" %}
10349 ins_encode %{
10350 FloatRegister src1 = as_FloatRegister($src1$$reg);
10351 FloatRegister src2 = as_FloatRegister($src2$$reg);
10352 FloatRegister dst = as_FloatRegister($dst$$reg);
10354 __ add_s(dst, src1, src2);
10355 %}
10356 ins_pipe( fpu_regF_regF );
10357 %}
10359 instruct subF_reg_reg(regF dst, regF src1, regF src2) %{
10360 match(Set dst (SubF src1 src2));
10361 format %{ "SubF $dst, $src1, $src2 @subF_reg_reg" %}
10362 ins_encode %{
10363 FloatRegister src1 = as_FloatRegister($src1$$reg);
10364 FloatRegister src2 = as_FloatRegister($src2$$reg);
10365 FloatRegister dst = as_FloatRegister($dst$$reg);
10367 __ sub_s(dst, src1, src2);
10368 %}
10369 ins_pipe( fpu_regF_regF );
10370 %}
10371 instruct addD_reg_reg(regD dst, regD src1, regD src2) %{
10372 match(Set dst (AddD src1 src2));
10373 format %{ "AddD $dst, $src1, $src2 @addD_reg_reg" %}
10374 ins_encode %{
10375 FloatRegister src1 = as_FloatRegister($src1$$reg);
10376 FloatRegister src2 = as_FloatRegister($src2$$reg);
10377 FloatRegister dst = as_FloatRegister($dst$$reg);
10379 __ add_d(dst, src1, src2);
10380 %}
10381 ins_pipe( fpu_regF_regF );
10382 %}
10384 instruct subD_reg_reg(regD dst, regD src1, regD src2) %{
10385 match(Set dst (SubD src1 src2));
10386 format %{ "SubD $dst, $src1, $src2 @subD_reg_reg" %}
10387 ins_encode %{
10388 FloatRegister src1 = as_FloatRegister($src1$$reg);
10389 FloatRegister src2 = as_FloatRegister($src2$$reg);
10390 FloatRegister dst = as_FloatRegister($dst$$reg);
10392 __ sub_d(dst, src1, src2);
10393 %}
10394 ins_pipe( fpu_regF_regF );
10395 %}
10397 instruct negF_reg(regF dst, regF src) %{
10398 match(Set dst (NegF src));
10399 format %{ "negF $dst, $src @negF_reg" %}
10400 ins_encode %{
10401 FloatRegister src = as_FloatRegister($src$$reg);
10402 FloatRegister dst = as_FloatRegister($dst$$reg);
10404 __ neg_s(dst, src);
10405 %}
10406 ins_pipe( fpu_regF_regF );
10407 %}
10409 instruct negD_reg(regD dst, regD src) %{
10410 match(Set dst (NegD src));
10411 format %{ "negD $dst, $src @negD_reg" %}
10412 ins_encode %{
10413 FloatRegister src = as_FloatRegister($src$$reg);
10414 FloatRegister dst = as_FloatRegister($dst$$reg);
10416 __ neg_d(dst, src);
10417 %}
10418 ins_pipe( fpu_regF_regF );
10419 %}
10422 instruct mulF_reg_reg(regF dst, regF src1, regF src2) %{
10423 match(Set dst (MulF src1 src2));
10424 format %{ "MULF $dst, $src1, $src2 @mulF_reg_reg" %}
10425 ins_encode %{
10426 FloatRegister src1 = $src1$$FloatRegister;
10427 FloatRegister src2 = $src2$$FloatRegister;
10428 FloatRegister dst = $dst$$FloatRegister;
10430 __ mul_s(dst, src1, src2);
10431 %}
10432 ins_pipe( fpu_regF_regF );
10433 %}
10435 instruct maddF_reg_reg(regF dst, regF src1, regF src2, regF src3) %{
10436 match(Set dst (AddF (MulF src1 src2) src3));
10437 // For compatibility reason (e.g. on the Loongson platform), disable this guy.
10438 ins_cost(44444);
10439 format %{ "maddF $dst, $src1, $src2, $src3 @maddF_reg_reg" %}
10440 ins_encode %{
10441 FloatRegister src1 = $src1$$FloatRegister;
10442 FloatRegister src2 = $src2$$FloatRegister;
10443 FloatRegister src3 = $src3$$FloatRegister;
10444 FloatRegister dst = $dst$$FloatRegister;
10446 __ madd_s(dst, src1, src2, src3);
10447 %}
10448 ins_pipe( fpu_regF_regF );
10449 %}
10451 // Mul two double precision floating piont number
10452 instruct mulD_reg_reg(regD dst, regD src1, regD src2) %{
10453 match(Set dst (MulD src1 src2));
10454 format %{ "MULD $dst, $src1, $src2 @mulD_reg_reg" %}
10455 ins_encode %{
10456 FloatRegister src1 = $src1$$FloatRegister;
10457 FloatRegister src2 = $src2$$FloatRegister;
10458 FloatRegister dst = $dst$$FloatRegister;
10460 __ mul_d(dst, src1, src2);
10461 %}
10462 ins_pipe( fpu_regF_regF );
10463 %}
10465 instruct maddD_reg_reg(regD dst, regD src1, regD src2, regD src3) %{
10466 match(Set dst (AddD (MulD src1 src2) src3));
10467 // For compatibility reason (e.g. on the Loongson platform), disable this guy.
10468 ins_cost(44444);
10469 format %{ "maddD $dst, $src1, $src2, $src3 @maddD_reg_reg" %}
10470 ins_encode %{
10471 FloatRegister src1 = $src1$$FloatRegister;
10472 FloatRegister src2 = $src2$$FloatRegister;
10473 FloatRegister src3 = $src3$$FloatRegister;
10474 FloatRegister dst = $dst$$FloatRegister;
10476 __ madd_d(dst, src1, src2, src3);
10477 %}
10478 ins_pipe( fpu_regF_regF );
10479 %}
10481 instruct absF_reg(regF dst, regF src) %{
10482 match(Set dst (AbsF src));
10483 ins_cost(100);
10484 format %{ "absF $dst, $src @absF_reg" %}
10485 ins_encode %{
10486 FloatRegister src = as_FloatRegister($src$$reg);
10487 FloatRegister dst = as_FloatRegister($dst$$reg);
10489 __ abs_s(dst, src);
10490 %}
10491 ins_pipe( fpu_regF_regF );
10492 %}
10495 // intrinsics for math_native.
10496 // AbsD SqrtD CosD SinD TanD LogD Log10D
10498 instruct absD_reg(regD dst, regD src) %{
10499 match(Set dst (AbsD src));
10500 ins_cost(100);
10501 format %{ "absD $dst, $src @absD_reg" %}
10502 ins_encode %{
10503 FloatRegister src = as_FloatRegister($src$$reg);
10504 FloatRegister dst = as_FloatRegister($dst$$reg);
10506 __ abs_d(dst, src);
10507 %}
10508 ins_pipe( fpu_regF_regF );
10509 %}
10511 instruct sqrtD_reg(regD dst, regD src) %{
10512 match(Set dst (SqrtD src));
10513 ins_cost(100);
10514 format %{ "SqrtD $dst, $src @sqrtD_reg" %}
10515 ins_encode %{
10516 FloatRegister src = as_FloatRegister($src$$reg);
10517 FloatRegister dst = as_FloatRegister($dst$$reg);
10519 __ sqrt_d(dst, src);
10520 %}
10521 ins_pipe( fpu_regF_regF );
10522 %}
10524 instruct sqrtF_reg(regF dst, regF src) %{
10525 match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
10526 ins_cost(100);
10527 format %{ "SqrtF $dst, $src @sqrtF_reg" %}
10528 ins_encode %{
10529 FloatRegister src = as_FloatRegister($src$$reg);
10530 FloatRegister dst = as_FloatRegister($dst$$reg);
10532 __ sqrt_s(dst, src);
10533 %}
10534 ins_pipe( fpu_regF_regF );
10535 %}
10536 //----------------------------------Logical Instructions----------------------
10537 //__________________________________Integer Logical Instructions-------------
10539 //And Instuctions
10540 // And Register with Immediate
10541 instruct andI_Reg_immI(mRegI dst, mRegI src1, immI src2) %{
10542 match(Set dst (AndI src1 src2));
10544 format %{ "and $dst, $src1, $src2 #@andI_Reg_immI" %}
10545 ins_encode %{
10546 Register dst = $dst$$Register;
10547 Register src = $src1$$Register;
10548 int val = $src2$$constant;
10550 __ move(AT, val);
10551 __ andr(dst, src, AT);
10552 %}
10553 ins_pipe( ialu_regI_regI );
10554 %}
10556 instruct andI_Reg_imm_0_65535(mRegI dst, mRegI src1, immI_0_65535 src2) %{
10557 match(Set dst (AndI src1 src2));
10558 ins_cost(60);
10560 format %{ "and $dst, $src1, $src2 #@andI_Reg_imm_0_65535" %}
10561 ins_encode %{
10562 Register dst = $dst$$Register;
10563 Register src = $src1$$Register;
10564 int val = $src2$$constant;
10566 __ andi(dst, src, val);
10567 %}
10568 ins_pipe( ialu_regI_regI );
10569 %}
10571 instruct andI_Reg_immI_nonneg_mask(mRegI dst, mRegI src1, immI_nonneg_mask mask) %{
10572 match(Set dst (AndI src1 mask));
10573 ins_cost(60);
10575 format %{ "and $dst, $src1, $mask #@andI_Reg_immI_nonneg_mask" %}
10576 ins_encode %{
10577 Register dst = $dst$$Register;
10578 Register src = $src1$$Register;
10579 int size = Assembler::is_int_mask($mask$$constant);
10581 __ ext(dst, src, 0, size);
10582 %}
10583 ins_pipe( ialu_regI_regI );
10584 %}
10586 instruct andL_Reg_immL_nonneg_mask(mRegL dst, mRegL src1, immL_nonneg_mask mask) %{
10587 match(Set dst (AndL src1 mask));
10588 ins_cost(60);
10590 format %{ "and $dst, $src1, $mask #@andL_Reg_immL_nonneg_mask" %}
10591 ins_encode %{
10592 Register dst = $dst$$Register;
10593 Register src = $src1$$Register;
10594 int size = Assembler::is_jlong_mask($mask$$constant);
10596 __ dext(dst, src, 0, size);
10597 %}
10598 ins_pipe( ialu_regI_regI );
10599 %}
10601 instruct xorI_Reg_imm_0_65535(mRegI dst, mRegI src1, immI_0_65535 src2) %{
10602 match(Set dst (XorI src1 src2));
10603 ins_cost(60);
10605 format %{ "xori $dst, $src1, $src2 #@xorI_Reg_imm_0_65535" %}
10606 ins_encode %{
10607 Register dst = $dst$$Register;
10608 Register src = $src1$$Register;
10609 int val = $src2$$constant;
10611 __ xori(dst, src, val);
10612 %}
10613 ins_pipe( ialu_regI_regI );
10614 %}
10616 instruct xorI_Reg_immI_M1(mRegI dst, mRegI src1, immI_M1 M1) %{
10617 match(Set dst (XorI src1 M1));
10618 predicate(UseLoongsonISA && Use3A2000);
10619 ins_cost(60);
10621 format %{ "xor $dst, $src1, $M1 #@xorI_Reg_immI_M1" %}
10622 ins_encode %{
10623 Register dst = $dst$$Register;
10624 Register src = $src1$$Register;
10626 __ gsorn(dst, R0, src);
10627 %}
10628 ins_pipe( ialu_regI_regI );
10629 %}
10631 instruct xorL2I_Reg_immI_M1(mRegI dst, mRegL src1, immI_M1 M1) %{
10632 match(Set dst (XorI (ConvL2I src1) M1));
10633 predicate(UseLoongsonISA && Use3A2000);
10634 ins_cost(60);
10636 format %{ "xor $dst, $src1, $M1 #@xorL2I_Reg_immI_M1" %}
10637 ins_encode %{
10638 Register dst = $dst$$Register;
10639 Register src = $src1$$Register;
10641 __ gsorn(dst, R0, src);
10642 %}
10643 ins_pipe( ialu_regI_regI );
10644 %}
10646 instruct xorL_Reg_imm_0_65535(mRegL dst, mRegL src1, immL_0_65535 src2) %{
10647 match(Set dst (XorL src1 src2));
10648 ins_cost(60);
10650 format %{ "xori $dst, $src1, $src2 #@xorL_Reg_imm_0_65535" %}
10651 ins_encode %{
10652 Register dst = $dst$$Register;
10653 Register src = $src1$$Register;
10654 int val = $src2$$constant;
10656 __ xori(dst, src, val);
10657 %}
10658 ins_pipe( ialu_regI_regI );
10659 %}
10661 /*
10662 instruct xorL_Reg_immL_M1(mRegL dst, mRegL src1, immL_M1 M1) %{
10663 match(Set dst (XorL src1 M1));
10664 predicate(UseLoongsonISA);
10665 ins_cost(60);
10667 format %{ "xor $dst, $src1, $M1 #@xorL_Reg_immL_M1" %}
10668 ins_encode %{
10669 Register dst = $dst$$Register;
10670 Register src = $src1$$Register;
10672 __ gsorn(dst, R0, src);
10673 %}
10674 ins_pipe( ialu_regI_regI );
10675 %}
10676 */
10678 instruct lbu_and_lmask(mRegI dst, memory mem, immI_255 mask) %{
10679 match(Set dst (AndI mask (LoadB mem)));
10680 ins_cost(60);
10682 format %{ "lhu $dst, $mem #@lbu_and_lmask" %}
10683 ins_encode(load_UB_enc(dst, mem));
10684 ins_pipe( ialu_loadI );
10685 %}
10687 instruct lbu_and_rmask(mRegI dst, memory mem, immI_255 mask) %{
10688 match(Set dst (AndI (LoadB mem) mask));
10689 ins_cost(60);
10691 format %{ "lhu $dst, $mem #@lbu_and_rmask" %}
10692 ins_encode(load_UB_enc(dst, mem));
10693 ins_pipe( ialu_loadI );
10694 %}
10696 instruct andI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10697 match(Set dst (AndI src1 src2));
10699 format %{ "and $dst, $src1, $src2 #@andI_Reg_Reg" %}
10700 ins_encode %{
10701 Register dst = $dst$$Register;
10702 Register src1 = $src1$$Register;
10703 Register src2 = $src2$$Register;
10704 __ andr(dst, src1, src2);
10705 %}
10706 ins_pipe( ialu_regI_regI );
10707 %}
10709 instruct andnI_Reg_nReg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10710 match(Set dst (AndI src1 (XorI src2 M1)));
10711 predicate(UseLoongsonISA && Use3A2000);
10713 format %{ "andn $dst, $src1, $src2 #@andnI_Reg_nReg" %}
10714 ins_encode %{
10715 Register dst = $dst$$Register;
10716 Register src1 = $src1$$Register;
10717 Register src2 = $src2$$Register;
10719 __ gsandn(dst, src1, src2);
10720 %}
10721 ins_pipe( ialu_regI_regI );
10722 %}
10724 instruct ornI_Reg_nReg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10725 match(Set dst (OrI src1 (XorI src2 M1)));
10726 predicate(UseLoongsonISA && Use3A2000);
10728 format %{ "orn $dst, $src1, $src2 #@ornI_Reg_nReg" %}
10729 ins_encode %{
10730 Register dst = $dst$$Register;
10731 Register src1 = $src1$$Register;
10732 Register src2 = $src2$$Register;
10734 __ gsorn(dst, src1, src2);
10735 %}
10736 ins_pipe( ialu_regI_regI );
10737 %}
10739 instruct andnI_nReg_Reg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10740 match(Set dst (AndI (XorI src1 M1) src2));
10741 predicate(UseLoongsonISA && Use3A2000);
10743 format %{ "andn $dst, $src2, $src1 #@andnI_nReg_Reg" %}
10744 ins_encode %{
10745 Register dst = $dst$$Register;
10746 Register src1 = $src1$$Register;
10747 Register src2 = $src2$$Register;
10749 __ gsandn(dst, src2, src1);
10750 %}
10751 ins_pipe( ialu_regI_regI );
10752 %}
10754 instruct ornI_nReg_Reg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10755 match(Set dst (OrI (XorI src1 M1) src2));
10756 predicate(UseLoongsonISA && Use3A2000);
10758 format %{ "orn $dst, $src2, $src1 #@ornI_nReg_Reg" %}
10759 ins_encode %{
10760 Register dst = $dst$$Register;
10761 Register src1 = $src1$$Register;
10762 Register src2 = $src2$$Register;
10764 __ gsorn(dst, src2, src1);
10765 %}
10766 ins_pipe( ialu_regI_regI );
10767 %}
10769 // And Long Register with Register
10770 instruct andL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10771 match(Set dst (AndL src1 src2));
10772 format %{ "AND $dst, $src1, $src2 @ andL_Reg_Reg\n\t" %}
10773 ins_encode %{
10774 Register dst_reg = as_Register($dst$$reg);
10775 Register src1_reg = as_Register($src1$$reg);
10776 Register src2_reg = as_Register($src2$$reg);
10778 __ andr(dst_reg, src1_reg, src2_reg);
10779 %}
10780 ins_pipe( ialu_regL_regL );
10781 %}
10783 instruct andL_Reg_Reg_convI2L(mRegL dst, mRegL src1, mRegI src2) %{
10784 match(Set dst (AndL src1 (ConvI2L src2)));
10785 format %{ "AND $dst, $src1, $src2 @ andL_Reg_Reg_convI2L\n\t" %}
10786 ins_encode %{
10787 Register dst_reg = as_Register($dst$$reg);
10788 Register src1_reg = as_Register($src1$$reg);
10789 Register src2_reg = as_Register($src2$$reg);
10791 __ andr(dst_reg, src1_reg, src2_reg);
10792 %}
10793 ins_pipe( ialu_regL_regL );
10794 %}
10796 instruct andL_Reg_imm_0_65535(mRegL dst, mRegL src1, immL_0_65535 src2) %{
10797 match(Set dst (AndL src1 src2));
10798 ins_cost(60);
10800 format %{ "and $dst, $src1, $src2 #@andL_Reg_imm_0_65535" %}
10801 ins_encode %{
10802 Register dst = $dst$$Register;
10803 Register src = $src1$$Register;
10804 long val = $src2$$constant;
10806 __ andi(dst, src, val);
10807 %}
10808 ins_pipe( ialu_regI_regI );
10809 %}
10811 instruct andL2I_Reg_imm_0_65535(mRegI dst, mRegL src1, immL_0_65535 src2) %{
10812 match(Set dst (ConvL2I (AndL src1 src2)));
10813 ins_cost(60);
10815 format %{ "and $dst, $src1, $src2 #@andL2I_Reg_imm_0_65535" %}
10816 ins_encode %{
10817 Register dst = $dst$$Register;
10818 Register src = $src1$$Register;
10819 long val = $src2$$constant;
10821 __ andi(dst, src, val);
10822 %}
10823 ins_pipe( ialu_regI_regI );
10824 %}
10826 /*
10827 instruct andnL_Reg_nReg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10828 match(Set dst (AndL src1 (XorL src2 M1)));
10829 predicate(UseLoongsonISA);
10831 format %{ "andn $dst, $src1, $src2 #@andnL_Reg_nReg" %}
10832 ins_encode %{
10833 Register dst = $dst$$Register;
10834 Register src1 = $src1$$Register;
10835 Register src2 = $src2$$Register;
10837 __ gsandn(dst, src1, src2);
10838 %}
10839 ins_pipe( ialu_regI_regI );
10840 %}
10841 */
10843 /*
10844 instruct ornL_Reg_nReg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10845 match(Set dst (OrL src1 (XorL src2 M1)));
10846 predicate(UseLoongsonISA);
10848 format %{ "orn $dst, $src1, $src2 #@ornL_Reg_nReg" %}
10849 ins_encode %{
10850 Register dst = $dst$$Register;
10851 Register src1 = $src1$$Register;
10852 Register src2 = $src2$$Register;
10854 __ gsorn(dst, src1, src2);
10855 %}
10856 ins_pipe( ialu_regI_regI );
10857 %}
10858 */
10860 /*
10861 instruct andnL_nReg_Reg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10862 match(Set dst (AndL (XorL src1 M1) src2));
10863 predicate(UseLoongsonISA);
10865 format %{ "andn $dst, $src2, $src1 #@andnL_nReg_Reg" %}
10866 ins_encode %{
10867 Register dst = $dst$$Register;
10868 Register src1 = $src1$$Register;
10869 Register src2 = $src2$$Register;
10871 __ gsandn(dst, src2, src1);
10872 %}
10873 ins_pipe( ialu_regI_regI );
10874 %}
10875 */
10877 /*
10878 instruct ornL_nReg_Reg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10879 match(Set dst (OrL (XorL src1 M1) src2));
10880 predicate(UseLoongsonISA);
10882 format %{ "orn $dst, $src2, $src1 #@ornL_nReg_Reg" %}
10883 ins_encode %{
10884 Register dst = $dst$$Register;
10885 Register src1 = $src1$$Register;
10886 Register src2 = $src2$$Register;
10888 __ gsorn(dst, src2, src1);
10889 %}
10890 ins_pipe( ialu_regI_regI );
10891 %}
10892 */
10894 instruct andL_Reg_immL_M8(mRegL dst, immL_M8 M8) %{
10895 match(Set dst (AndL dst M8));
10896 ins_cost(60);
10898 format %{ "and $dst, $dst, $M8 #@andL_Reg_immL_M8" %}
10899 ins_encode %{
10900 Register dst = $dst$$Register;
10902 __ dins(dst, R0, 0, 3);
10903 %}
10904 ins_pipe( ialu_regI_regI );
10905 %}
10907 instruct andL_Reg_immL_M5(mRegL dst, immL_M5 M5) %{
10908 match(Set dst (AndL dst M5));
10909 ins_cost(60);
10911 format %{ "and $dst, $dst, $M5 #@andL_Reg_immL_M5" %}
10912 ins_encode %{
10913 Register dst = $dst$$Register;
10915 __ dins(dst, R0, 2, 1);
10916 %}
10917 ins_pipe( ialu_regI_regI );
10918 %}
10920 instruct andL_Reg_immL_M7(mRegL dst, immL_M7 M7) %{
10921 match(Set dst (AndL dst M7));
10922 ins_cost(60);
10924 format %{ "and $dst, $dst, $M7 #@andL_Reg_immL_M7" %}
10925 ins_encode %{
10926 Register dst = $dst$$Register;
10928 __ dins(dst, R0, 1, 2);
10929 %}
10930 ins_pipe( ialu_regI_regI );
10931 %}
10933 instruct andL_Reg_immL_M4(mRegL dst, immL_M4 M4) %{
10934 match(Set dst (AndL dst M4));
10935 ins_cost(60);
10937 format %{ "and $dst, $dst, $M4 #@andL_Reg_immL_M4" %}
10938 ins_encode %{
10939 Register dst = $dst$$Register;
10941 __ dins(dst, R0, 0, 2);
10942 %}
10943 ins_pipe( ialu_regI_regI );
10944 %}
10946 instruct andL_Reg_immL_M121(mRegL dst, immL_M121 M121) %{
10947 match(Set dst (AndL dst M121));
10948 ins_cost(60);
10950 format %{ "and $dst, $dst, $M121 #@andL_Reg_immL_M121" %}
10951 ins_encode %{
10952 Register dst = $dst$$Register;
10954 __ dins(dst, R0, 3, 4);
10955 %}
10956 ins_pipe( ialu_regI_regI );
10957 %}
10959 // Or Long Register with Register
10960 instruct orL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10961 match(Set dst (OrL src1 src2));
10962 format %{ "OR $dst, $src1, $src2 @ orL_Reg_Reg\t" %}
10963 ins_encode %{
10964 Register dst_reg = $dst$$Register;
10965 Register src1_reg = $src1$$Register;
10966 Register src2_reg = $src2$$Register;
10968 __ orr(dst_reg, src1_reg, src2_reg);
10969 %}
10970 ins_pipe( ialu_regL_regL );
10971 %}
10973 instruct orL_Reg_P2XReg(mRegL dst, mRegP src1, mRegL src2) %{
10974 match(Set dst (OrL (CastP2X src1) src2));
10975 format %{ "OR $dst, $src1, $src2 @ orL_Reg_P2XReg\t" %}
10976 ins_encode %{
10977 Register dst_reg = $dst$$Register;
10978 Register src1_reg = $src1$$Register;
10979 Register src2_reg = $src2$$Register;
10981 __ orr(dst_reg, src1_reg, src2_reg);
10982 %}
10983 ins_pipe( ialu_regL_regL );
10984 %}
10986 // Xor Long Register with Register
10987 instruct xorL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10988 match(Set dst (XorL src1 src2));
10989 format %{ "XOR $dst, $src1, $src2 @ xorL_Reg_Reg\t" %}
10990 ins_encode %{
10991 Register dst_reg = as_Register($dst$$reg);
10992 Register src1_reg = as_Register($src1$$reg);
10993 Register src2_reg = as_Register($src2$$reg);
10995 __ xorr(dst_reg, src1_reg, src2_reg);
10996 %}
10997 ins_pipe( ialu_regL_regL );
10998 %}
11000 // Shift Left by 8-bit immediate
11001 instruct salI_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
11002 match(Set dst (LShiftI src shift));
11004 format %{ "SHL $dst, $src, $shift #@salI_Reg_imm" %}
11005 ins_encode %{
11006 Register src = $src$$Register;
11007 Register dst = $dst$$Register;
11008 int shamt = $shift$$constant;
11010 __ sll(dst, src, shamt);
11011 %}
11012 ins_pipe( ialu_regI_regI );
11013 %}
11015 instruct salL2I_Reg_imm(mRegI dst, mRegL src, immI8 shift) %{
11016 match(Set dst (LShiftI (ConvL2I src) shift));
11018 format %{ "SHL $dst, $src, $shift #@salL2I_Reg_imm" %}
11019 ins_encode %{
11020 Register src = $src$$Register;
11021 Register dst = $dst$$Register;
11022 int shamt = $shift$$constant;
11024 __ sll(dst, src, shamt);
11025 %}
11026 ins_pipe( ialu_regI_regI );
11027 %}
11029 instruct salI_Reg_imm_and_M65536(mRegI dst, mRegI src, immI_16 shift, immI_M65536 mask) %{
11030 match(Set dst (AndI (LShiftI src shift) mask));
11032 format %{ "SHL $dst, $src, $shift #@salI_Reg_imm_and_M65536" %}
11033 ins_encode %{
11034 Register src = $src$$Register;
11035 Register dst = $dst$$Register;
11037 __ sll(dst, src, 16);
11038 %}
11039 ins_pipe( ialu_regI_regI );
11040 %}
11042 instruct land7_2_s(mRegI dst, mRegL src, immL7 seven, immI_16 sixteen)
11043 %{
11044 match(Set dst (RShiftI (LShiftI (ConvL2I (AndL src seven)) sixteen) sixteen));
11046 format %{ "andi $dst, $src, 7\t# @land7_2_s" %}
11047 ins_encode %{
11048 Register src = $src$$Register;
11049 Register dst = $dst$$Register;
11051 __ andi(dst, src, 7);
11052 %}
11053 ins_pipe(ialu_regI_regI);
11054 %}
11056 instruct ori2s(mRegI dst, mRegI src1, immI_0_32767 src2, immI_16 sixteen)
11057 %{
11058 match(Set dst (RShiftI (LShiftI (OrI src1 src2) sixteen) sixteen));
11060 format %{ "ori $dst, $src1, $src2\t# @ori2s" %}
11061 ins_encode %{
11062 Register src = $src1$$Register;
11063 int val = $src2$$constant;
11064 Register dst = $dst$$Register;
11066 __ ori(dst, src, val);
11067 %}
11068 ins_pipe(ialu_regI_regI);
11069 %}
11071 // Logical Shift Right by 16, followed by Arithmetic Shift Left by 16.
11072 // This idiom is used by the compiler the i2s bytecode.
11073 instruct i2s(mRegI dst, mRegI src, immI_16 sixteen)
11074 %{
11075 match(Set dst (RShiftI (LShiftI src sixteen) sixteen));
11077 format %{ "i2s $dst, $src\t# @i2s" %}
11078 ins_encode %{
11079 Register src = $src$$Register;
11080 Register dst = $dst$$Register;
11082 __ seh(dst, src);
11083 %}
11084 ins_pipe(ialu_regI_regI);
11085 %}
11087 // Logical Shift Right by 24, followed by Arithmetic Shift Left by 24.
11088 // This idiom is used by the compiler for the i2b bytecode.
11089 instruct i2b(mRegI dst, mRegI src, immI_24 twentyfour)
11090 %{
11091 match(Set dst (RShiftI (LShiftI src twentyfour) twentyfour));
11093 format %{ "i2b $dst, $src\t# @i2b" %}
11094 ins_encode %{
11095 Register src = $src$$Register;
11096 Register dst = $dst$$Register;
11098 __ seb(dst, src);
11099 %}
11100 ins_pipe(ialu_regI_regI);
11101 %}
11104 instruct salI_RegL2I_imm(mRegI dst, mRegL src, immI8 shift) %{
11105 match(Set dst (LShiftI (ConvL2I src) shift));
11107 format %{ "SHL $dst, $src, $shift #@salI_RegL2I_imm" %}
11108 ins_encode %{
11109 Register src = $src$$Register;
11110 Register dst = $dst$$Register;
11111 int shamt = $shift$$constant;
11113 __ sll(dst, src, shamt);
11114 %}
11115 ins_pipe( ialu_regI_regI );
11116 %}
11118 // Shift Left by 8-bit immediate
11119 instruct salI_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
11120 match(Set dst (LShiftI src shift));
11122 format %{ "SHL $dst, $src, $shift #@salI_Reg_Reg" %}
11123 ins_encode %{
11124 Register src = $src$$Register;
11125 Register dst = $dst$$Register;
11126 Register shamt = $shift$$Register;
11127 __ sllv(dst, src, shamt);
11128 %}
11129 ins_pipe( ialu_regI_regI );
11130 %}
11133 // Shift Left Long
11134 instruct salL_Reg_imm(mRegL dst, mRegL src, immI8 shift) %{
11135 //predicate(UseNewLongLShift);
11136 match(Set dst (LShiftL src shift));
11137 ins_cost(100);
11138 format %{ "salL $dst, $src, $shift @ salL_Reg_imm" %}
11139 ins_encode %{
11140 Register src_reg = as_Register($src$$reg);
11141 Register dst_reg = as_Register($dst$$reg);
11142 int shamt = $shift$$constant;
11144 if (__ is_simm(shamt, 5))
11145 __ dsll(dst_reg, src_reg, shamt);
11146 else
11147 {
11148 int sa = Assembler::low(shamt, 6);
11149 if (sa < 32) {
11150 __ dsll(dst_reg, src_reg, sa);
11151 } else {
11152 __ dsll32(dst_reg, src_reg, sa - 32);
11153 }
11154 }
11155 %}
11156 ins_pipe( ialu_regL_regL );
11157 %}
11159 instruct salL_RegI2L_imm(mRegL dst, mRegI src, immI8 shift) %{
11160 //predicate(UseNewLongLShift);
11161 match(Set dst (LShiftL (ConvI2L src) shift));
11162 ins_cost(100);
11163 format %{ "salL $dst, $src, $shift @ salL_RegI2L_imm" %}
11164 ins_encode %{
11165 Register src_reg = as_Register($src$$reg);
11166 Register dst_reg = as_Register($dst$$reg);
11167 int shamt = $shift$$constant;
11169 if (__ is_simm(shamt, 5))
11170 __ dsll(dst_reg, src_reg, shamt);
11171 else
11172 {
11173 int sa = Assembler::low(shamt, 6);
11174 if (sa < 32) {
11175 __ dsll(dst_reg, src_reg, sa);
11176 } else {
11177 __ dsll32(dst_reg, src_reg, sa - 32);
11178 }
11179 }
11180 %}
11181 ins_pipe( ialu_regL_regL );
11182 %}
11184 // Shift Left Long
11185 instruct salL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
11186 //predicate(UseNewLongLShift);
11187 match(Set dst (LShiftL src shift));
11188 ins_cost(100);
11189 format %{ "salL $dst, $src, $shift @ salL_Reg_Reg" %}
11190 ins_encode %{
11191 Register src_reg = as_Register($src$$reg);
11192 Register dst_reg = as_Register($dst$$reg);
11194 __ dsllv(dst_reg, src_reg, $shift$$Register);
11195 %}
11196 ins_pipe( ialu_regL_regL );
11197 %}
11199 instruct salL_convI2L_Reg_imm(mRegL dst, mRegI src, immI8 shift) %{
11200 match(Set dst (LShiftL (ConvI2L src) shift));
11201 ins_cost(100);
11202 format %{ "salL $dst, $src, $shift @ salL_convI2L_Reg_imm" %}
11203 ins_encode %{
11204 Register src_reg = as_Register($src$$reg);
11205 Register dst_reg = as_Register($dst$$reg);
11206 int shamt = $shift$$constant;
11208 if (__ is_simm(shamt, 5)) {
11209 __ dsll(dst_reg, src_reg, shamt);
11210 } else {
11211 int sa = Assembler::low(shamt, 6);
11212 if (sa < 32) {
11213 __ dsll(dst_reg, src_reg, sa);
11214 } else {
11215 __ dsll32(dst_reg, src_reg, sa - 32);
11216 }
11217 }
11218 %}
11219 ins_pipe( ialu_regL_regL );
11220 %}
11222 // Shift Right Long
11223 instruct sarL_Reg_imm(mRegL dst, mRegL src, immI8 shift) %{
11224 match(Set dst (RShiftL src shift));
11225 ins_cost(100);
11226 format %{ "sarL $dst, $src, $shift @ sarL_Reg_imm" %}
11227 ins_encode %{
11228 Register src_reg = as_Register($src$$reg);
11229 Register dst_reg = as_Register($dst$$reg);
11230 int shamt = ($shift$$constant & 0x3f);
11231 if (__ is_simm(shamt, 5))
11232 __ dsra(dst_reg, src_reg, shamt);
11233 else {
11234 int sa = Assembler::low(shamt, 6);
11235 if (sa < 32) {
11236 __ dsra(dst_reg, src_reg, sa);
11237 } else {
11238 __ dsra32(dst_reg, src_reg, sa - 32);
11239 }
11240 }
11241 %}
11242 ins_pipe( ialu_regL_regL );
11243 %}
11245 instruct sarL2I_Reg_immI_32_63(mRegI dst, mRegL src, immI_32_63 shift) %{
11246 match(Set dst (ConvL2I (RShiftL src shift)));
11247 ins_cost(100);
11248 format %{ "sarL $dst, $src, $shift @ sarL2I_Reg_immI_32_63" %}
11249 ins_encode %{
11250 Register src_reg = as_Register($src$$reg);
11251 Register dst_reg = as_Register($dst$$reg);
11252 int shamt = $shift$$constant;
11254 __ dsra32(dst_reg, src_reg, shamt - 32);
11255 %}
11256 ins_pipe( ialu_regL_regL );
11257 %}
11259 // Shift Right Long arithmetically
11260 instruct sarL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
11261 //predicate(UseNewLongLShift);
11262 match(Set dst (RShiftL src shift));
11263 ins_cost(100);
11264 format %{ "sarL $dst, $src, $shift @ sarL_Reg_Reg" %}
11265 ins_encode %{
11266 Register src_reg = as_Register($src$$reg);
11267 Register dst_reg = as_Register($dst$$reg);
11269 __ dsrav(dst_reg, src_reg, $shift$$Register);
11270 %}
11271 ins_pipe( ialu_regL_regL );
11272 %}
11274 // Shift Right Long logically
11275 instruct slrL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
11276 match(Set dst (URShiftL src shift));
11277 ins_cost(100);
11278 format %{ "slrL $dst, $src, $shift @ slrL_Reg_Reg" %}
11279 ins_encode %{
11280 Register src_reg = as_Register($src$$reg);
11281 Register dst_reg = as_Register($dst$$reg);
11283 __ dsrlv(dst_reg, src_reg, $shift$$Register);
11284 %}
11285 ins_pipe( ialu_regL_regL );
11286 %}
11288 instruct slrL_Reg_immI_0_31(mRegL dst, mRegL src, immI_0_31 shift) %{
11289 match(Set dst (URShiftL src shift));
11290 ins_cost(80);
11291 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_0_31" %}
11292 ins_encode %{
11293 Register src_reg = as_Register($src$$reg);
11294 Register dst_reg = as_Register($dst$$reg);
11295 int shamt = $shift$$constant;
11297 __ dsrl(dst_reg, src_reg, shamt);
11298 %}
11299 ins_pipe( ialu_regL_regL );
11300 %}
11302 instruct slrL_Reg_immI_0_31_and_max_int(mRegI dst, mRegL src, immI_0_31 shift, immI_MaxI max_int) %{
11303 match(Set dst (AndI (ConvL2I (URShiftL src shift)) max_int));
11304 ins_cost(80);
11305 format %{ "dext $dst, $src, $shift, 31 @ slrL_Reg_immI_0_31_and_max_int" %}
11306 ins_encode %{
11307 Register src_reg = as_Register($src$$reg);
11308 Register dst_reg = as_Register($dst$$reg);
11309 int shamt = $shift$$constant;
11311 __ dext(dst_reg, src_reg, shamt, 31);
11312 %}
11313 ins_pipe( ialu_regL_regL );
11314 %}
11316 instruct slrL_P2XReg_immI_0_31(mRegL dst, mRegP src, immI_0_31 shift) %{
11317 match(Set dst (URShiftL (CastP2X src) shift));
11318 ins_cost(80);
11319 format %{ "slrL $dst, $src, $shift @ slrL_P2XReg_immI_0_31" %}
11320 ins_encode %{
11321 Register src_reg = as_Register($src$$reg);
11322 Register dst_reg = as_Register($dst$$reg);
11323 int shamt = $shift$$constant;
11325 __ dsrl(dst_reg, src_reg, shamt);
11326 %}
11327 ins_pipe( ialu_regL_regL );
11328 %}
11330 instruct slrL_Reg_immI_32_63(mRegL dst, mRegL src, immI_32_63 shift) %{
11331 match(Set dst (URShiftL src shift));
11332 ins_cost(80);
11333 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_32_63" %}
11334 ins_encode %{
11335 Register src_reg = as_Register($src$$reg);
11336 Register dst_reg = as_Register($dst$$reg);
11337 int shamt = $shift$$constant;
11339 __ dsrl32(dst_reg, src_reg, shamt - 32);
11340 %}
11341 ins_pipe( ialu_regL_regL );
11342 %}
11344 instruct slrL_Reg_immI_convL2I(mRegI dst, mRegL src, immI_32_63 shift) %{
11345 match(Set dst (ConvL2I (URShiftL src shift)));
11346 predicate(n->in(1)->in(2)->get_int() > 32);
11347 ins_cost(80);
11348 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_convL2I" %}
11349 ins_encode %{
11350 Register src_reg = as_Register($src$$reg);
11351 Register dst_reg = as_Register($dst$$reg);
11352 int shamt = $shift$$constant;
11354 __ dsrl32(dst_reg, src_reg, shamt - 32);
11355 %}
11356 ins_pipe( ialu_regL_regL );
11357 %}
11359 instruct slrL_P2XReg_immI_32_63(mRegL dst, mRegP src, immI_32_63 shift) %{
11360 match(Set dst (URShiftL (CastP2X src) shift));
11361 ins_cost(80);
11362 format %{ "slrL $dst, $src, $shift @ slrL_P2XReg_immI_32_63" %}
11363 ins_encode %{
11364 Register src_reg = as_Register($src$$reg);
11365 Register dst_reg = as_Register($dst$$reg);
11366 int shamt = $shift$$constant;
11368 __ dsrl32(dst_reg, src_reg, shamt - 32);
11369 %}
11370 ins_pipe( ialu_regL_regL );
11371 %}
11373 // Xor Instructions
11374 // Xor Register with Register
11375 instruct xorI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
11376 match(Set dst (XorI src1 src2));
11378 format %{ "XOR $dst, $src1, $src2 #@xorI_Reg_Reg" %}
11380 ins_encode %{
11381 Register dst = $dst$$Register;
11382 Register src1 = $src1$$Register;
11383 Register src2 = $src2$$Register;
11384 __ xorr(dst, src1, src2);
11385 __ sll(dst, dst, 0); /* long -> int */
11386 %}
11388 ins_pipe( ialu_regI_regI );
11389 %}
11391 // Or Instructions
11392 // Or Register with Register
11393 instruct orI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
11394 match(Set dst (OrI src1 src2));
11396 format %{ "OR $dst, $src1, $src2 #@orI_Reg_Reg" %}
11397 ins_encode %{
11398 Register dst = $dst$$Register;
11399 Register src1 = $src1$$Register;
11400 Register src2 = $src2$$Register;
11401 __ orr(dst, src1, src2);
11402 %}
11404 ins_pipe( ialu_regI_regI );
11405 %}
11407 instruct rotI_shr_logical_Reg(mRegI dst, mRegI src, immI_0_31 rshift, immI_0_31 lshift, immI_1 one) %{
11408 match(Set dst (OrI (URShiftI src rshift) (LShiftI (AndI src one) lshift)));
11409 predicate(32 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int())));
11411 format %{ "rotr $dst, $src, 1 ...\n\t"
11412 "srl $dst, $dst, ($rshift-1) @ rotI_shr_logical_Reg" %}
11413 ins_encode %{
11414 Register dst = $dst$$Register;
11415 Register src = $src$$Register;
11416 int rshift = $rshift$$constant;
11418 __ rotr(dst, src, 1);
11419 if (rshift - 1) {
11420 __ srl(dst, dst, rshift - 1);
11421 }
11422 %}
11424 ins_pipe( ialu_regI_regI );
11425 %}
11427 instruct orI_Reg_castP2X(mRegL dst, mRegL src1, mRegP src2) %{
11428 match(Set dst (OrI src1 (CastP2X src2)));
11430 format %{ "OR $dst, $src1, $src2 #@orI_Reg_castP2X" %}
11431 ins_encode %{
11432 Register dst = $dst$$Register;
11433 Register src1 = $src1$$Register;
11434 Register src2 = $src2$$Register;
11435 __ orr(dst, src1, src2);
11436 %}
11438 ins_pipe( ialu_regI_regI );
11439 %}
11441 // Logical Shift Right by 8-bit immediate
11442 instruct shr_logical_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
11443 match(Set dst (URShiftI src shift));
11444 // effect(KILL cr);
11446 format %{ "SRL $dst, $src, $shift #@shr_logical_Reg_imm" %}
11447 ins_encode %{
11448 Register src = $src$$Register;
11449 Register dst = $dst$$Register;
11450 int shift = $shift$$constant;
11452 __ srl(dst, src, shift);
11453 %}
11454 ins_pipe( ialu_regI_regI );
11455 %}
11457 instruct shr_logical_Reg_imm_nonneg_mask(mRegI dst, mRegI src, immI_0_31 shift, immI_nonneg_mask mask) %{
11458 match(Set dst (AndI (URShiftI src shift) mask));
11460 format %{ "ext $dst, $src, $shift, one-bits($mask) #@shr_logical_Reg_imm_nonneg_mask" %}
11461 ins_encode %{
11462 Register src = $src$$Register;
11463 Register dst = $dst$$Register;
11464 int pos = $shift$$constant;
11465 int size = Assembler::is_int_mask($mask$$constant);
11467 __ ext(dst, src, pos, size);
11468 %}
11469 ins_pipe( ialu_regI_regI );
11470 %}
11472 instruct rolI_Reg_immI_0_31(mRegI dst, immI_0_31 lshift, immI_0_31 rshift)
11473 %{
11474 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
11475 match(Set dst (OrI (LShiftI dst lshift) (URShiftI dst rshift)));
11477 ins_cost(100);
11478 format %{ "rotr $dst, $dst, $rshift #@rolI_Reg_immI_0_31" %}
11479 ins_encode %{
11480 Register dst = $dst$$Register;
11481 int sa = $rshift$$constant;
11483 __ rotr(dst, dst, sa);
11484 %}
11485 ins_pipe( ialu_regI_regI );
11486 %}
11488 instruct rolL_Reg_immI_0_31(mRegL dst, immI_32_63 lshift, immI_0_31 rshift)
11489 %{
11490 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11491 match(Set dst (OrL (LShiftL dst lshift) (URShiftL dst rshift)));
11493 ins_cost(100);
11494 format %{ "rotr $dst, $dst, $rshift #@rolL_Reg_immI_0_31" %}
11495 ins_encode %{
11496 Register dst = $dst$$Register;
11497 int sa = $rshift$$constant;
11499 __ drotr(dst, dst, sa);
11500 %}
11501 ins_pipe( ialu_regI_regI );
11502 %}
11504 instruct rolL_Reg_immI_32_63(mRegL dst, immI_0_31 lshift, immI_32_63 rshift)
11505 %{
11506 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11507 match(Set dst (OrL (LShiftL dst lshift) (URShiftL dst rshift)));
11509 ins_cost(100);
11510 format %{ "rotr $dst, $dst, $rshift #@rolL_Reg_immI_32_63" %}
11511 ins_encode %{
11512 Register dst = $dst$$Register;
11513 int sa = $rshift$$constant;
11515 __ drotr32(dst, dst, sa - 32);
11516 %}
11517 ins_pipe( ialu_regI_regI );
11518 %}
11520 instruct rorI_Reg_immI_0_31(mRegI dst, immI_0_31 rshift, immI_0_31 lshift)
11521 %{
11522 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
11523 match(Set dst (OrI (URShiftI dst rshift) (LShiftI dst lshift)));
11525 ins_cost(100);
11526 format %{ "rotr $dst, $dst, $rshift #@rorI_Reg_immI_0_31" %}
11527 ins_encode %{
11528 Register dst = $dst$$Register;
11529 int sa = $rshift$$constant;
11531 __ rotr(dst, dst, sa);
11532 %}
11533 ins_pipe( ialu_regI_regI );
11534 %}
11536 instruct rorL_Reg_immI_0_31(mRegL dst, immI_0_31 rshift, immI_32_63 lshift)
11537 %{
11538 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11539 match(Set dst (OrL (URShiftL dst rshift) (LShiftL dst lshift)));
11541 ins_cost(100);
11542 format %{ "rotr $dst, $dst, $rshift #@rorL_Reg_immI_0_31" %}
11543 ins_encode %{
11544 Register dst = $dst$$Register;
11545 int sa = $rshift$$constant;
11547 __ drotr(dst, dst, sa);
11548 %}
11549 ins_pipe( ialu_regI_regI );
11550 %}
11552 instruct rorL_Reg_immI_32_63(mRegL dst, immI_32_63 rshift, immI_0_31 lshift)
11553 %{
11554 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11555 match(Set dst (OrL (URShiftL dst rshift) (LShiftL dst lshift)));
11557 ins_cost(100);
11558 format %{ "rotr $dst, $dst, $rshift #@rorL_Reg_immI_32_63" %}
11559 ins_encode %{
11560 Register dst = $dst$$Register;
11561 int sa = $rshift$$constant;
11563 __ drotr32(dst, dst, sa - 32);
11564 %}
11565 ins_pipe( ialu_regI_regI );
11566 %}
11568 // Logical Shift Right
11569 instruct shr_logical_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
11570 match(Set dst (URShiftI src shift));
11572 format %{ "SRL $dst, $src, $shift #@shr_logical_Reg_Reg" %}
11573 ins_encode %{
11574 Register src = $src$$Register;
11575 Register dst = $dst$$Register;
11576 Register shift = $shift$$Register;
11577 __ srlv(dst, src, shift);
11578 %}
11579 ins_pipe( ialu_regI_regI );
11580 %}
11583 instruct shr_arith_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
11584 match(Set dst (RShiftI src shift));
11585 // effect(KILL cr);
11587 format %{ "SRA $dst, $src, $shift #@shr_arith_Reg_imm" %}
11588 ins_encode %{
11589 Register src = $src$$Register;
11590 Register dst = $dst$$Register;
11591 int shift = $shift$$constant;
11592 __ sra(dst, src, shift);
11593 %}
11594 ins_pipe( ialu_regI_regI );
11595 %}
11597 instruct shr_arith_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
11598 match(Set dst (RShiftI src shift));
11599 // effect(KILL cr);
11601 format %{ "SRA $dst, $src, $shift #@shr_arith_Reg_Reg" %}
11602 ins_encode %{
11603 Register src = $src$$Register;
11604 Register dst = $dst$$Register;
11605 Register shift = $shift$$Register;
11606 __ srav(dst, src, shift);
11607 %}
11608 ins_pipe( ialu_regI_regI );
11609 %}
11611 //----------Convert Int to Boolean---------------------------------------------
11613 instruct convI2B(mRegI dst, mRegI src) %{
11614 match(Set dst (Conv2B src));
11616 ins_cost(100);
11617 format %{ "convI2B $dst, $src @ convI2B" %}
11618 ins_encode %{
11619 Register dst = as_Register($dst$$reg);
11620 Register src = as_Register($src$$reg);
11622 if (dst != src) {
11623 __ daddiu(dst, R0, 1);
11624 __ movz(dst, R0, src);
11625 } else {
11626 __ move(AT, src);
11627 __ daddiu(dst, R0, 1);
11628 __ movz(dst, R0, AT);
11629 }
11630 %}
11632 ins_pipe( ialu_regL_regL );
11633 %}
11635 instruct convI2L_reg( mRegL dst, mRegI src) %{
11636 match(Set dst (ConvI2L src));
11638 ins_cost(100);
11639 format %{ "SLL $dst, $src @ convI2L_reg\t" %}
11640 ins_encode %{
11641 Register dst = as_Register($dst$$reg);
11642 Register src = as_Register($src$$reg);
11644 if(dst != src) __ sll(dst, src, 0);
11645 %}
11646 ins_pipe( ialu_regL_regL );
11647 %}
11650 instruct convL2I_reg( mRegI dst, mRegL src ) %{
11651 match(Set dst (ConvL2I src));
11653 format %{ "MOV $dst, $src @ convL2I_reg" %}
11654 ins_encode %{
11655 Register dst = as_Register($dst$$reg);
11656 Register src = as_Register($src$$reg);
11658 __ sll(dst, src, 0);
11659 %}
11661 ins_pipe( ialu_regI_regI );
11662 %}
11664 instruct convL2I2L_reg( mRegL dst, mRegL src ) %{
11665 match(Set dst (ConvI2L (ConvL2I src)));
11667 format %{ "sll $dst, $src, 0 @ convL2I2L_reg" %}
11668 ins_encode %{
11669 Register dst = as_Register($dst$$reg);
11670 Register src = as_Register($src$$reg);
11672 __ sll(dst, src, 0);
11673 %}
11675 ins_pipe( ialu_regI_regI );
11676 %}
11678 instruct convL2D_reg( regD dst, mRegL src ) %{
11679 match(Set dst (ConvL2D src));
11680 format %{ "convL2D $dst, $src @ convL2D_reg" %}
11681 ins_encode %{
11682 Register src = as_Register($src$$reg);
11683 FloatRegister dst = as_FloatRegister($dst$$reg);
11685 __ dmtc1(src, dst);
11686 __ cvt_d_l(dst, dst);
11687 %}
11689 ins_pipe( pipe_slow );
11690 %}
11692 instruct convD2L_reg_fast( mRegL dst, regD src ) %{
11693 match(Set dst (ConvD2L src));
11694 ins_cost(150);
11695 format %{ "convD2L $dst, $src @ convD2L_reg_fast" %}
11696 ins_encode %{
11697 Register dst = as_Register($dst$$reg);
11698 FloatRegister src = as_FloatRegister($src$$reg);
11700 Label Done;
11702 __ trunc_l_d(F30, src);
11703 // max_long: 0x7fffffffffffffff
11704 // __ set64(AT, 0x7fffffffffffffff);
11705 __ daddiu(AT, R0, -1);
11706 __ dsrl(AT, AT, 1);
11707 __ dmfc1(dst, F30);
11709 __ bne(dst, AT, Done);
11710 __ delayed()->mtc1(R0, F30);
11712 __ cvt_d_w(F30, F30);
11713 __ c_ult_d(src, F30);
11714 __ bc1f(Done);
11715 __ delayed()->daddiu(T9, R0, -1);
11717 __ c_un_d(src, src); //NaN?
11718 __ subu(dst, T9, AT);
11719 __ movt(dst, R0);
11721 __ bind(Done);
11722 %}
11724 ins_pipe( pipe_slow );
11725 %}
11727 instruct convD2L_reg_slow( mRegL dst, regD src ) %{
11728 match(Set dst (ConvD2L src));
11729 ins_cost(250);
11730 format %{ "convD2L $dst, $src @ convD2L_reg_slow" %}
11731 ins_encode %{
11732 Register dst = as_Register($dst$$reg);
11733 FloatRegister src = as_FloatRegister($src$$reg);
11735 Label L;
11737 __ c_un_d(src, src); //NaN?
11738 __ bc1t(L);
11739 __ delayed();
11740 __ move(dst, R0);
11742 __ trunc_l_d(F30, src);
11743 __ cfc1(AT, 31);
11744 __ li(T9, 0x10000);
11745 __ andr(AT, AT, T9);
11746 __ beq(AT, R0, L);
11747 __ delayed()->dmfc1(dst, F30);
11749 __ mov_d(F12, src);
11750 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1);
11751 __ move(dst, V0);
11752 __ bind(L);
11753 %}
11755 ins_pipe( pipe_slow );
11756 %}
11758 instruct convF2I_reg_fast( mRegI dst, regF src ) %{
11759 match(Set dst (ConvF2I src));
11760 ins_cost(150);
11761 format %{ "convf2i $dst, $src @ convF2I_reg_fast" %}
11762 ins_encode %{
11763 Register dreg = $dst$$Register;
11764 FloatRegister fval = $src$$FloatRegister;
11766 __ trunc_w_s(F30, fval);
11767 __ mfc1(dreg, F30);
11768 __ c_un_s(fval, fval); //NaN?
11769 __ movt(dreg, R0);
11770 %}
11772 ins_pipe( pipe_slow );
11773 %}
11775 instruct convF2I_reg_slow( mRegI dst, regF src ) %{
11776 match(Set dst (ConvF2I src));
11777 ins_cost(250);
11778 format %{ "convf2i $dst, $src @ convF2I_reg_slow" %}
11779 ins_encode %{
11780 Register dreg = $dst$$Register;
11781 FloatRegister fval = $src$$FloatRegister;
11782 Label L;
11784 __ c_un_s(fval, fval); //NaN?
11785 __ bc1t(L);
11786 __ delayed();
11787 __ move(dreg, R0);
11789 __ trunc_w_s(F30, fval);
11791 /* Call SharedRuntime:f2i() to do valid convention */
11792 __ cfc1(AT, 31);
11793 __ li(T9, 0x10000);
11794 __ andr(AT, AT, T9);
11795 __ beq(AT, R0, L);
11796 __ delayed()->mfc1(dreg, F30);
11798 __ mov_s(F12, fval);
11800 /* 2014/01/08 Fu : This bug was found when running ezDS's control-panel.
11801 * J 982 C2 javax.swing.text.BoxView.layoutMajorAxis(II[I[I)V (283 bytes) @ 0x000000555c46aa74
11802 *
11803 * An interger array index has been assigned to V0, and then changed from 1 to Integer.MAX_VALUE.
11804 * V0 is corrupted during call_VM_leaf(), and should be preserved.
11805 */
11806 if(dreg != V0) {
11807 __ push(V0);
11808 }
11809 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
11810 if(dreg != V0) {
11811 __ move(dreg, V0);
11812 __ pop(V0);
11813 }
11814 __ bind(L);
11815 %}
11817 ins_pipe( pipe_slow );
11818 %}
11820 instruct convF2L_reg_fast( mRegL dst, regF src ) %{
11821 match(Set dst (ConvF2L src));
11822 ins_cost(150);
11823 format %{ "convf2l $dst, $src @ convF2L_reg_fast" %}
11824 ins_encode %{
11825 Register dreg = $dst$$Register;
11826 FloatRegister fval = $src$$FloatRegister;
11828 __ trunc_l_s(F30, fval);
11829 __ dmfc1(dreg, F30);
11830 __ c_un_s(fval, fval); //NaN?
11831 __ movt(dreg, R0);
11832 %}
11834 ins_pipe( pipe_slow );
11835 %}
11837 instruct convF2L_reg_slow( mRegL dst, regF src ) %{
11838 match(Set dst (ConvF2L src));
11839 ins_cost(250);
11840 format %{ "convf2l $dst, $src @ convF2L_reg_slow" %}
11841 ins_encode %{
11842 Register dst = as_Register($dst$$reg);
11843 FloatRegister fval = $src$$FloatRegister;
11844 Label L;
11846 __ c_un_s(fval, fval); //NaN?
11847 __ bc1t(L);
11848 __ delayed();
11849 __ move(dst, R0);
11851 __ trunc_l_s(F30, fval);
11852 __ cfc1(AT, 31);
11853 __ li(T9, 0x10000);
11854 __ andr(AT, AT, T9);
11855 __ beq(AT, R0, L);
11856 __ delayed()->dmfc1(dst, F30);
11858 __ mov_s(F12, fval);
11859 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
11860 __ move(dst, V0);
11861 __ bind(L);
11862 %}
11864 ins_pipe( pipe_slow );
11865 %}
11867 instruct convL2F_reg( regF dst, mRegL src ) %{
11868 match(Set dst (ConvL2F src));
11869 format %{ "convl2f $dst, $src @ convL2F_reg" %}
11870 ins_encode %{
11871 FloatRegister dst = $dst$$FloatRegister;
11872 Register src = as_Register($src$$reg);
11873 Label L;
11875 __ dmtc1(src, dst);
11876 __ cvt_s_l(dst, dst);
11877 %}
11879 ins_pipe( pipe_slow );
11880 %}
11882 instruct convI2F_reg( regF dst, mRegI src ) %{
11883 match(Set dst (ConvI2F src));
11884 format %{ "convi2f $dst, $src @ convI2F_reg" %}
11885 ins_encode %{
11886 Register src = $src$$Register;
11887 FloatRegister dst = $dst$$FloatRegister;
11889 __ mtc1(src, dst);
11890 __ cvt_s_w(dst, dst);
11891 %}
11893 ins_pipe( fpu_regF_regF );
11894 %}
11896 instruct cmpLTMask_immI0( mRegI dst, mRegI p, immI0 zero ) %{
11897 match(Set dst (CmpLTMask p zero));
11898 ins_cost(100);
11900 format %{ "sra $dst, $p, 31 @ cmpLTMask_immI0" %}
11901 ins_encode %{
11902 Register src = $p$$Register;
11903 Register dst = $dst$$Register;
11905 __ sra(dst, src, 31);
11906 %}
11907 ins_pipe( pipe_slow );
11908 %}
11911 instruct cmpLTMask( mRegI dst, mRegI p, mRegI q ) %{
11912 match(Set dst (CmpLTMask p q));
11913 ins_cost(400);
11915 format %{ "cmpLTMask $dst, $p, $q @ cmpLTMask" %}
11916 ins_encode %{
11917 Register p = $p$$Register;
11918 Register q = $q$$Register;
11919 Register dst = $dst$$Register;
11921 __ slt(dst, p, q);
11922 __ subu(dst, R0, dst);
11923 %}
11924 ins_pipe( pipe_slow );
11925 %}
11927 instruct convP2B(mRegI dst, mRegP src) %{
11928 match(Set dst (Conv2B src));
11930 ins_cost(100);
11931 format %{ "convP2B $dst, $src @ convP2B" %}
11932 ins_encode %{
11933 Register dst = as_Register($dst$$reg);
11934 Register src = as_Register($src$$reg);
11936 if (dst != src) {
11937 __ daddiu(dst, R0, 1);
11938 __ movz(dst, R0, src);
11939 } else {
11940 __ move(AT, src);
11941 __ daddiu(dst, R0, 1);
11942 __ movz(dst, R0, AT);
11943 }
11944 %}
11946 ins_pipe( ialu_regL_regL );
11947 %}
11950 instruct convI2D_reg_reg(regD dst, mRegI src) %{
11951 match(Set dst (ConvI2D src));
11952 format %{ "conI2D $dst, $src @convI2D_reg" %}
11953 ins_encode %{
11954 Register src = $src$$Register;
11955 FloatRegister dst = $dst$$FloatRegister;
11956 __ mtc1(src, dst);
11957 __ cvt_d_w(dst, dst);
11958 %}
11959 ins_pipe( fpu_regF_regF );
11960 %}
11962 instruct convF2D_reg_reg(regD dst, regF src) %{
11963 match(Set dst (ConvF2D src));
11964 format %{ "convF2D $dst, $src\t# @convF2D_reg_reg" %}
11965 ins_encode %{
11966 FloatRegister dst = $dst$$FloatRegister;
11967 FloatRegister src = $src$$FloatRegister;
11969 __ cvt_d_s(dst, src);
11970 %}
11971 ins_pipe( fpu_regF_regF );
11972 %}
11974 instruct convD2F_reg_reg(regF dst, regD src) %{
11975 match(Set dst (ConvD2F src));
11976 format %{ "convD2F $dst, $src\t# @convD2F_reg_reg" %}
11977 ins_encode %{
11978 FloatRegister dst = $dst$$FloatRegister;
11979 FloatRegister src = $src$$FloatRegister;
11981 __ cvt_s_d(dst, src);
11982 %}
11983 ins_pipe( fpu_regF_regF );
11984 %}
11986 // Convert a double to an int. If the double is a NAN, stuff a zero in instead.
11987 instruct convD2I_reg_reg_fast( mRegI dst, regD src ) %{
11988 match(Set dst (ConvD2I src));
11990 ins_cost(150);
11991 format %{ "convD2I $dst, $src\t# @ convD2I_reg_reg_fast" %}
11993 ins_encode %{
11994 FloatRegister src = $src$$FloatRegister;
11995 Register dst = $dst$$Register;
11997 Label Done;
11999 __ trunc_w_d(F30, src);
12000 // max_int: 2147483647
12001 __ move(AT, 0x7fffffff);
12002 __ mfc1(dst, F30);
12004 __ bne(dst, AT, Done);
12005 __ delayed()->mtc1(R0, F30);
12007 __ cvt_d_w(F30, F30);
12008 __ c_ult_d(src, F30);
12009 __ bc1f(Done);
12010 __ delayed()->addiu(T9, R0, -1);
12012 __ c_un_d(src, src); //NaN?
12013 __ subu32(dst, T9, AT);
12014 __ movt(dst, R0);
12016 __ bind(Done);
12017 %}
12018 ins_pipe( pipe_slow );
12019 %}
12021 instruct convD2I_reg_reg_slow( mRegI dst, regD src ) %{
12022 match(Set dst (ConvD2I src));
12024 ins_cost(250);
12025 format %{ "convD2I $dst, $src\t# @ convD2I_reg_reg_slow" %}
12027 ins_encode %{
12028 FloatRegister src = $src$$FloatRegister;
12029 Register dst = $dst$$Register;
12030 Label L;
12032 __ trunc_w_d(F30, src);
12033 __ cfc1(AT, 31);
12034 __ li(T9, 0x10000);
12035 __ andr(AT, AT, T9);
12036 __ beq(AT, R0, L);
12037 __ delayed()->mfc1(dst, F30);
12039 __ mov_d(F12, src);
12040 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 1);
12041 __ move(dst, V0);
12042 __ bind(L);
12044 %}
12045 ins_pipe( pipe_slow );
12046 %}
12048 // Convert oop pointer into compressed form
12049 instruct encodeHeapOop(mRegN dst, mRegP src) %{
12050 predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
12051 match(Set dst (EncodeP src));
12052 format %{ "encode_heap_oop $dst,$src" %}
12053 ins_encode %{
12054 Register src = $src$$Register;
12055 Register dst = $dst$$Register;
12057 __ encode_heap_oop(dst, src);
12058 %}
12059 ins_pipe( ialu_regL_regL );
12060 %}
12062 instruct encodeHeapOop_not_null(mRegN dst, mRegP src) %{
12063 predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
12064 match(Set dst (EncodeP src));
12065 format %{ "encode_heap_oop_not_null $dst,$src @ encodeHeapOop_not_null" %}
12066 ins_encode %{
12067 __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
12068 %}
12069 ins_pipe( ialu_regL_regL );
12070 %}
12072 instruct decodeHeapOop(mRegP dst, mRegN src) %{
12073 predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
12074 n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
12075 match(Set dst (DecodeN src));
12076 format %{ "decode_heap_oop $dst,$src @ decodeHeapOop" %}
12077 ins_encode %{
12078 Register s = $src$$Register;
12079 Register d = $dst$$Register;
12081 __ decode_heap_oop(d, s);
12082 %}
12083 ins_pipe( ialu_regL_regL );
12084 %}
12086 instruct decodeHeapOop_not_null(mRegP dst, mRegN src) %{
12087 predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
12088 n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
12089 match(Set dst (DecodeN src));
12090 format %{ "decode_heap_oop_not_null $dst,$src @ decodeHeapOop_not_null" %}
12091 ins_encode %{
12092 Register s = $src$$Register;
12093 Register d = $dst$$Register;
12094 if (s != d) {
12095 __ decode_heap_oop_not_null(d, s);
12096 } else {
12097 __ decode_heap_oop_not_null(d);
12098 }
12099 %}
12100 ins_pipe( ialu_regL_regL );
12101 %}
12103 instruct encodeKlass_not_null(mRegN dst, mRegP src) %{
12104 match(Set dst (EncodePKlass src));
12105 format %{ "encode_heap_oop_not_null $dst,$src @ encodeKlass_not_null" %}
12106 ins_encode %{
12107 __ encode_klass_not_null($dst$$Register, $src$$Register);
12108 %}
12109 ins_pipe( ialu_regL_regL );
12110 %}
12112 instruct decodeKlass_not_null(mRegP dst, mRegN src) %{
12113 match(Set dst (DecodeNKlass src));
12114 format %{ "decode_heap_klass_not_null $dst,$src" %}
12115 ins_encode %{
12116 Register s = $src$$Register;
12117 Register d = $dst$$Register;
12118 if (s != d) {
12119 __ decode_klass_not_null(d, s);
12120 } else {
12121 __ decode_klass_not_null(d);
12122 }
12123 %}
12124 ins_pipe( ialu_regL_regL );
12125 %}
12127 //FIXME
12128 instruct tlsLoadP(mRegP dst) %{
12129 match(Set dst (ThreadLocal));
12131 ins_cost(0);
12132 format %{ " get_thread in $dst #@tlsLoadP" %}
12133 ins_encode %{
12134 Register dst = $dst$$Register;
12135 #ifdef OPT_THREAD
12136 __ move(dst, TREG);
12137 #else
12138 __ get_thread(dst);
12139 #endif
12140 %}
12142 ins_pipe( ialu_loadI );
12143 %}
12146 instruct checkCastPP( mRegP dst ) %{
12147 match(Set dst (CheckCastPP dst));
12149 format %{ "#checkcastPP of $dst (empty encoding) #@chekCastPP" %}
12150 ins_encode( /*empty encoding*/ );
12151 ins_pipe( empty );
12152 %}
12154 instruct castPP(mRegP dst)
12155 %{
12156 match(Set dst (CastPP dst));
12158 size(0);
12159 format %{ "# castPP of $dst" %}
12160 ins_encode(/* empty encoding */);
12161 ins_pipe(empty);
12162 %}
12164 instruct castII( mRegI dst ) %{
12165 match(Set dst (CastII dst));
12166 format %{ "#castII of $dst empty encoding" %}
12167 ins_encode( /*empty encoding*/ );
12168 ins_cost(0);
12169 ins_pipe( empty );
12170 %}
12172 // Return Instruction
12173 // Remove the return address & jump to it.
12174 instruct Ret() %{
12175 match(Return);
12176 format %{ "RET #@Ret" %}
12178 ins_encode %{
12179 __ jr(RA);
12180 __ nop();
12181 %}
12183 ins_pipe( pipe_jump );
12184 %}
12186 /*
12187 // For Loongson CPUs, jr seems too slow, so this rule shouldn't be imported.
12188 instruct jumpXtnd(mRegL switch_val) %{
12189 match(Jump switch_val);
12191 ins_cost(350);
12193 format %{ "load T9 <-- [$constanttablebase, $switch_val, $constantoffset] @ jumpXtnd\n\t"
12194 "jr T9\n\t"
12195 "nop" %}
12196 ins_encode %{
12197 Register table_base = $constanttablebase;
12198 int con_offset = $constantoffset;
12199 Register switch_reg = $switch_val$$Register;
12201 if (UseLoongsonISA) {
12202 if (Assembler::is_simm(con_offset, 8)) {
12203 __ gsldx(T9, table_base, switch_reg, con_offset);
12204 } else if (Assembler::is_simm16(con_offset)) {
12205 __ daddu(T9, table_base, switch_reg);
12206 __ ld(T9, T9, con_offset);
12207 } else {
12208 __ move(T9, con_offset);
12209 __ daddu(AT, table_base, switch_reg);
12210 __ gsldx(T9, AT, T9, 0);
12211 }
12212 } else {
12213 if (Assembler::is_simm16(con_offset)) {
12214 __ daddu(T9, table_base, switch_reg);
12215 __ ld(T9, T9, con_offset);
12216 } else {
12217 __ move(T9, con_offset);
12218 __ daddu(AT, table_base, switch_reg);
12219 __ daddu(AT, T9, AT);
12220 __ ld(T9, AT, 0);
12221 }
12222 }
12224 __ jr(T9);
12225 __ nop();
12227 %}
12228 ins_pipe(pipe_jump);
12229 %}
12230 */
12232 // Jump Direct - Label defines a relative address from JMP
12233 instruct jmpDir(label labl) %{
12234 match(Goto);
12235 effect(USE labl);
12237 ins_cost(300);
12238 format %{ "JMP $labl #@jmpDir" %}
12240 ins_encode %{
12241 Label &L = *($labl$$label);
12242 if(&L)
12243 __ b(L);
12244 else
12245 __ b(int(0));
12246 __ nop();
12247 %}
12249 ins_pipe( pipe_jump );
12250 ins_pc_relative(1);
12251 %}
12255 // Tail Jump; remove the return address; jump to target.
12256 // TailCall above leaves the return address around.
12257 // TailJump is used in only one place, the rethrow_Java stub (fancy_jump=2).
12258 // ex_oop (Exception Oop) is needed in %o0 at the jump. As there would be a
12259 // "restore" before this instruction (in Epilogue), we need to materialize it
12260 // in %i0.
12261 //FIXME
12262 instruct tailjmpInd(mRegP jump_target,mRegP ex_oop) %{
12263 match( TailJump jump_target ex_oop );
12264 ins_cost(200);
12265 format %{ "Jmp $jump_target ; ex_oop = $ex_oop #@tailjmpInd" %}
12266 ins_encode %{
12267 Register target = $jump_target$$Register;
12269 /* 2012/9/14 Jin: V0, V1 are indicated in:
12270 * [stubGenerator_mips.cpp] generate_forward_exception()
12271 * [runtime_mips.cpp] OptoRuntime::generate_exception_blob()
12272 */
12273 Register oop = $ex_oop$$Register;
12274 Register exception_oop = V0;
12275 Register exception_pc = V1;
12277 __ move(exception_pc, RA);
12278 __ move(exception_oop, oop);
12280 __ jr(target);
12281 __ nop();
12282 %}
12283 ins_pipe( pipe_jump );
12284 %}
12286 // ============================================================================
12287 // Procedure Call/Return Instructions
12288 // Call Java Static Instruction
12289 // Note: If this code changes, the corresponding ret_addr_offset() and
12290 // compute_padding() functions will have to be adjusted.
12291 instruct CallStaticJavaDirect(method meth) %{
12292 match(CallStaticJava);
12293 effect(USE meth);
12295 ins_cost(300);
12296 format %{ "CALL,static #@CallStaticJavaDirect " %}
12297 ins_encode( Java_Static_Call( meth ) );
12298 ins_pipe( pipe_slow );
12299 ins_pc_relative(1);
12300 %}
12302 // Call Java Dynamic Instruction
12303 // Note: If this code changes, the corresponding ret_addr_offset() and
12304 // compute_padding() functions will have to be adjusted.
12305 instruct CallDynamicJavaDirect(method meth) %{
12306 match(CallDynamicJava);
12307 effect(USE meth);
12309 ins_cost(300);
12310 format %{"MOV IC_Klass, (oop)-1\n\t"
12311 "CallDynamic @ CallDynamicJavaDirect" %}
12312 ins_encode( Java_Dynamic_Call( meth ) );
12313 ins_pipe( pipe_slow );
12314 ins_pc_relative(1);
12315 %}
12317 instruct CallLeafNoFPDirect(method meth) %{
12318 match(CallLeafNoFP);
12319 effect(USE meth);
12321 ins_cost(300);
12322 format %{ "CALL_LEAF_NOFP,runtime " %}
12323 ins_encode(Java_To_Runtime(meth));
12324 ins_pipe( pipe_slow );
12325 ins_pc_relative(1);
12326 ins_alignment(16);
12327 %}
12329 // Prefetch instructions.
12331 instruct prefetchrNTA( memory mem ) %{
12332 match(PrefetchRead mem);
12333 ins_cost(125);
12335 format %{ "pref $mem\t# Prefetch into non-temporal cache for read @ prefetchrNTA" %}
12336 ins_encode %{
12337 int base = $mem$$base;
12338 int index = $mem$$index;
12339 int scale = $mem$$scale;
12340 int disp = $mem$$disp;
12342 if( index != 0 ) {
12343 if (scale == 0) {
12344 __ daddu(AT, as_Register(base), as_Register(index));
12345 } else {
12346 __ dsll(AT, as_Register(index), scale);
12347 __ daddu(AT, as_Register(base), AT);
12348 }
12349 } else {
12350 __ move(AT, as_Register(base));
12351 }
12352 if( Assembler::is_simm16(disp) ) {
12353 __ daddiu(AT, as_Register(base), disp);
12354 __ daddiu(AT, AT, disp);
12355 } else {
12356 __ move(T9, disp);
12357 __ daddu(AT, as_Register(base), T9);
12358 }
12359 __ pref(0, AT, 0); //hint: 0:load
12360 %}
12361 ins_pipe(pipe_slow);
12362 %}
12364 instruct prefetchwNTA( memory mem ) %{
12365 match(PrefetchWrite mem);
12366 ins_cost(125);
12367 format %{ "pref $mem\t# Prefetch to non-temporal cache for write @ prefetchwNTA" %}
12368 ins_encode %{
12369 int base = $mem$$base;
12370 int index = $mem$$index;
12371 int scale = $mem$$scale;
12372 int disp = $mem$$disp;
12374 if( index != 0 ) {
12375 if (scale == 0) {
12376 __ daddu(AT, as_Register(base), as_Register(index));
12377 } else {
12378 __ dsll(AT, as_Register(index), scale);
12379 __ daddu(AT, as_Register(base), AT);
12380 }
12381 } else {
12382 __ move(AT, as_Register(base));
12383 }
12384 if( Assembler::is_simm16(disp) ) {
12385 __ daddiu(AT, as_Register(base), disp);
12386 __ daddiu(AT, AT, disp);
12387 } else {
12388 __ move(T9, disp);
12389 __ daddu(AT, as_Register(base), T9);
12390 }
12391 __ pref(1, AT, 0); //hint: 1:store
12392 %}
12393 ins_pipe(pipe_slow);
12394 %}
12396 // Prefetch instructions for allocation.
12398 instruct prefetchAllocNTA( memory mem ) %{
12399 match(PrefetchAllocation mem);
12400 ins_cost(125);
12401 format %{ "pref $mem\t# Prefetch allocation @ prefetchAllocNTA" %}
12402 ins_encode %{
12403 int base = $mem$$base;
12404 int index = $mem$$index;
12405 int scale = $mem$$scale;
12406 int disp = $mem$$disp;
12408 Register dst = R0;
12410 if( index != 0 ) {
12411 if( Assembler::is_simm16(disp) ) {
12412 if( UseLoongsonISA ) {
12413 if (scale == 0) {
12414 __ gslbx(dst, as_Register(base), as_Register(index), disp);
12415 } else {
12416 __ dsll(AT, as_Register(index), scale);
12417 __ gslbx(dst, as_Register(base), AT, disp);
12418 }
12419 } else {
12420 if (scale == 0) {
12421 __ addu(AT, as_Register(base), as_Register(index));
12422 } else {
12423 __ dsll(AT, as_Register(index), scale);
12424 __ addu(AT, as_Register(base), AT);
12425 }
12426 __ lb(dst, AT, disp);
12427 }
12428 } else {
12429 if (scale == 0) {
12430 __ addu(AT, as_Register(base), as_Register(index));
12431 } else {
12432 __ dsll(AT, as_Register(index), scale);
12433 __ addu(AT, as_Register(base), AT);
12434 }
12435 __ move(T9, disp);
12436 if( UseLoongsonISA ) {
12437 __ gslbx(dst, AT, T9, 0);
12438 } else {
12439 __ addu(AT, AT, T9);
12440 __ lb(dst, AT, 0);
12441 }
12442 }
12443 } else {
12444 if( Assembler::is_simm16(disp) ) {
12445 __ lb(dst, as_Register(base), disp);
12446 } else {
12447 __ move(T9, disp);
12448 if( UseLoongsonISA ) {
12449 __ gslbx(dst, as_Register(base), T9, 0);
12450 } else {
12451 __ addu(AT, as_Register(base), T9);
12452 __ lb(dst, AT, 0);
12453 }
12454 }
12455 }
12456 %}
12457 ins_pipe(pipe_slow);
12458 %}
12461 // Call runtime without safepoint
12462 instruct CallLeafDirect(method meth) %{
12463 match(CallLeaf);
12464 effect(USE meth);
12466 ins_cost(300);
12467 format %{ "CALL_LEAF,runtime #@CallLeafDirect " %}
12468 ins_encode(Java_To_Runtime(meth));
12469 ins_pipe( pipe_slow );
12470 ins_pc_relative(1);
12471 ins_alignment(16);
12472 %}
12474 // Load Char (16bit unsigned)
12475 instruct loadUS(mRegI dst, memory mem) %{
12476 match(Set dst (LoadUS mem));
12478 ins_cost(125);
12479 format %{ "loadUS $dst,$mem @ loadC" %}
12480 ins_encode(load_C_enc(dst, mem));
12481 ins_pipe( ialu_loadI );
12482 %}
12484 instruct loadUS_convI2L(mRegL dst, memory mem) %{
12485 match(Set dst (ConvI2L (LoadUS mem)));
12487 ins_cost(125);
12488 format %{ "loadUS $dst,$mem @ loadUS_convI2L" %}
12489 ins_encode(load_C_enc(dst, mem));
12490 ins_pipe( ialu_loadI );
12491 %}
12493 // Store Char (16bit unsigned)
12494 instruct storeC(memory mem, mRegI src) %{
12495 match(Set mem (StoreC mem src));
12497 ins_cost(125);
12498 format %{ "storeC $src, $mem @ storeC" %}
12499 ins_encode(store_C_reg_enc(mem, src));
12500 ins_pipe( ialu_loadI );
12501 %}
12503 instruct storeC0(memory mem, immI0 zero) %{
12504 match(Set mem (StoreC mem zero));
12506 ins_cost(125);
12507 format %{ "storeC $zero, $mem @ storeC0" %}
12508 ins_encode(store_C0_enc(mem));
12509 ins_pipe( ialu_loadI );
12510 %}
12513 instruct loadConF0(regF dst, immF0 zero) %{
12514 match(Set dst zero);
12515 ins_cost(100);
12517 format %{ "mov $dst, zero @ loadConF0\n"%}
12518 ins_encode %{
12519 FloatRegister dst = $dst$$FloatRegister;
12521 __ mtc1(R0, dst);
12522 %}
12523 ins_pipe( fpu_loadF );
12524 %}
12527 instruct loadConF(regF dst, immF src) %{
12528 match(Set dst src);
12529 ins_cost(125);
12531 format %{ "lwc1 $dst, $constantoffset[$constanttablebase] # load FLOAT $src from table @ loadConF" %}
12532 ins_encode %{
12533 int con_offset = $constantoffset($src);
12535 if (Assembler::is_simm16(con_offset)) {
12536 __ lwc1($dst$$FloatRegister, $constanttablebase, con_offset);
12537 } else {
12538 __ set64(AT, con_offset);
12539 if (UseLoongsonISA) {
12540 __ gslwxc1($dst$$FloatRegister, $constanttablebase, AT, 0);
12541 } else {
12542 __ daddu(AT, $constanttablebase, AT);
12543 __ lwc1($dst$$FloatRegister, AT, 0);
12544 }
12545 }
12546 %}
12547 ins_pipe( fpu_loadF );
12548 %}
12551 instruct loadConD0(regD dst, immD0 zero) %{
12552 match(Set dst zero);
12553 ins_cost(100);
12555 format %{ "mov $dst, zero @ loadConD0"%}
12556 ins_encode %{
12557 FloatRegister dst = as_FloatRegister($dst$$reg);
12559 __ dmtc1(R0, dst);
12560 %}
12561 ins_pipe( fpu_loadF );
12562 %}
12564 instruct loadConD(regD dst, immD src) %{
12565 match(Set dst src);
12566 ins_cost(125);
12568 format %{ "ldc1 $dst, $constantoffset[$constanttablebase] # load DOUBLE $src from table @ loadConD" %}
12569 ins_encode %{
12570 int con_offset = $constantoffset($src);
12572 if (Assembler::is_simm16(con_offset)) {
12573 __ ldc1($dst$$FloatRegister, $constanttablebase, con_offset);
12574 } else {
12575 __ set64(AT, con_offset);
12576 if (UseLoongsonISA) {
12577 __ gsldxc1($dst$$FloatRegister, $constanttablebase, AT, 0);
12578 } else {
12579 __ daddu(AT, $constanttablebase, AT);
12580 __ ldc1($dst$$FloatRegister, AT, 0);
12581 }
12582 }
12583 %}
12584 ins_pipe( fpu_loadF );
12585 %}
12587 // Store register Float value (it is faster than store from FPU register)
12588 instruct storeF_reg( memory mem, regF src) %{
12589 match(Set mem (StoreF mem src));
12591 ins_cost(50);
12592 format %{ "store $mem, $src\t# store float @ storeF_reg" %}
12593 ins_encode(store_F_reg_enc(mem, src));
12594 ins_pipe( fpu_storeF );
12595 %}
12597 instruct storeF_imm0( memory mem, immF0 zero) %{
12598 match(Set mem (StoreF mem zero));
12600 ins_cost(40);
12601 format %{ "store $mem, zero\t# store float @ storeF_imm0" %}
12602 ins_encode %{
12603 int base = $mem$$base;
12604 int index = $mem$$index;
12605 int scale = $mem$$scale;
12606 int disp = $mem$$disp;
12608 if( index != 0 ) {
12609 if ( UseLoongsonISA ) {
12610 if ( Assembler::is_simm(disp, 8) ) {
12611 if ( scale == 0 ) {
12612 __ gsswx(R0, as_Register(base), as_Register(index), disp);
12613 } else {
12614 __ dsll(T9, as_Register(index), scale);
12615 __ gsswx(R0, as_Register(base), T9, disp);
12616 }
12617 } else if ( Assembler::is_simm16(disp) ) {
12618 if ( scale == 0 ) {
12619 __ daddu(AT, as_Register(base), as_Register(index));
12620 } else {
12621 __ dsll(T9, as_Register(index), scale);
12622 __ daddu(AT, as_Register(base), T9);
12623 }
12624 __ sw(R0, AT, disp);
12625 } else {
12626 if ( scale == 0 ) {
12627 __ move(T9, disp);
12628 __ daddu(AT, as_Register(index), T9);
12629 __ gsswx(R0, as_Register(base), AT, 0);
12630 } else {
12631 __ dsll(T9, as_Register(index), scale);
12632 __ move(AT, disp);
12633 __ daddu(AT, AT, T9);
12634 __ gsswx(R0, as_Register(base), AT, 0);
12635 }
12636 }
12637 } else { //not use loongson isa
12638 if(scale != 0) {
12639 __ dsll(T9, as_Register(index), scale);
12640 __ daddu(AT, as_Register(base), T9);
12641 } else {
12642 __ daddu(AT, as_Register(base), as_Register(index));
12643 }
12644 if( Assembler::is_simm16(disp) ) {
12645 __ sw(R0, AT, disp);
12646 } else {
12647 __ move(T9, disp);
12648 __ daddu(AT, AT, T9);
12649 __ sw(R0, AT, 0);
12650 }
12651 }
12652 } else { //index is 0
12653 if ( UseLoongsonISA ) {
12654 if ( Assembler::is_simm16(disp) ) {
12655 __ sw(R0, as_Register(base), disp);
12656 } else {
12657 __ move(T9, disp);
12658 __ gsswx(R0, as_Register(base), T9, 0);
12659 }
12660 } else {
12661 if( Assembler::is_simm16(disp) ) {
12662 __ sw(R0, as_Register(base), disp);
12663 } else {
12664 __ move(T9, disp);
12665 __ daddu(AT, as_Register(base), T9);
12666 __ sw(R0, AT, 0);
12667 }
12668 }
12669 }
12670 %}
12671 ins_pipe( ialu_storeI );
12672 %}
12674 // Load Double
12675 instruct loadD(regD dst, memory mem) %{
12676 match(Set dst (LoadD mem));
12678 ins_cost(150);
12679 format %{ "loadD $dst, $mem #@loadD" %}
12680 ins_encode(load_D_enc(dst, mem));
12681 ins_pipe( ialu_loadI );
12682 %}
12684 // Load Double - UNaligned
12685 instruct loadD_unaligned(regD dst, memory mem ) %{
12686 match(Set dst (LoadD_unaligned mem));
12687 ins_cost(250);
12688 // FIXME: Jin: Need more effective ldl/ldr
12689 format %{ "loadD_unaligned $dst, $mem #@loadD_unaligned" %}
12690 ins_encode(load_D_enc(dst, mem));
12691 ins_pipe( ialu_loadI );
12692 %}
12694 instruct storeD_reg( memory mem, regD src) %{
12695 match(Set mem (StoreD mem src));
12697 ins_cost(50);
12698 format %{ "store $mem, $src\t# store float @ storeD_reg" %}
12699 ins_encode(store_D_reg_enc(mem, src));
12700 ins_pipe( fpu_storeF );
12701 %}
12703 instruct storeD_imm0( memory mem, immD0 zero) %{
12704 match(Set mem (StoreD mem zero));
12706 ins_cost(40);
12707 format %{ "store $mem, zero\t# store float @ storeD_imm0" %}
12708 ins_encode %{
12709 int base = $mem$$base;
12710 int index = $mem$$index;
12711 int scale = $mem$$scale;
12712 int disp = $mem$$disp;
12714 __ mtc1(R0, F30);
12715 __ cvt_d_w(F30, F30);
12717 if( index != 0 ) {
12718 if ( UseLoongsonISA ) {
12719 if ( Assembler::is_simm(disp, 8) ) {
12720 if (scale == 0) {
12721 __ gssdxc1(F30, as_Register(base), as_Register(index), disp);
12722 } else {
12723 __ dsll(T9, as_Register(index), scale);
12724 __ gssdxc1(F30, as_Register(base), T9, disp);
12725 }
12726 } else if ( Assembler::is_simm16(disp) ) {
12727 if (scale == 0) {
12728 __ daddu(AT, as_Register(base), as_Register(index));
12729 __ sdc1(F30, AT, disp);
12730 } else {
12731 __ dsll(T9, as_Register(index), scale);
12732 __ daddu(AT, as_Register(base), T9);
12733 __ sdc1(F30, AT, disp);
12734 }
12735 } else {
12736 if (scale == 0) {
12737 __ move(T9, disp);
12738 __ daddu(AT, as_Register(index), T9);
12739 __ gssdxc1(F30, as_Register(base), AT, 0);
12740 } else {
12741 __ move(T9, disp);
12742 __ dsll(AT, as_Register(index), scale);
12743 __ daddu(AT, AT, T9);
12744 __ gssdxc1(F30, as_Register(base), AT, 0);
12745 }
12746 }
12747 } else { // not use loongson isa
12748 if(scale != 0) {
12749 __ dsll(T9, as_Register(index), scale);
12750 __ daddu(AT, as_Register(base), T9);
12751 } else {
12752 __ daddu(AT, as_Register(base), as_Register(index));
12753 }
12754 if( Assembler::is_simm16(disp) ) {
12755 __ sdc1(F30, AT, disp);
12756 } else {
12757 __ move(T9, disp);
12758 __ daddu(AT, AT, T9);
12759 __ sdc1(F30, AT, 0);
12760 }
12761 }
12762 } else {// index is 0
12763 if ( UseLoongsonISA ) {
12764 if ( Assembler::is_simm16(disp) ) {
12765 __ sdc1(F30, as_Register(base), disp);
12766 } else {
12767 __ move(T9, disp);
12768 __ gssdxc1(F30, as_Register(base), T9, 0);
12769 }
12770 } else {
12771 if( Assembler::is_simm16(disp) ) {
12772 __ sdc1(F30, as_Register(base), disp);
12773 } else {
12774 __ move(T9, disp);
12775 __ daddu(AT, as_Register(base), T9);
12776 __ sdc1(F30, AT, 0);
12777 }
12778 }
12779 }
12780 %}
12781 ins_pipe( ialu_storeI );
12782 %}
12784 instruct loadSSI(mRegI dst, stackSlotI src)
12785 %{
12786 match(Set dst src);
12788 ins_cost(125);
12789 format %{ "lw $dst, $src\t# int stk @ loadSSI" %}
12790 ins_encode %{
12791 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSI) !");
12792 __ lw($dst$$Register, SP, $src$$disp);
12793 %}
12794 ins_pipe(ialu_loadI);
12795 %}
12797 instruct storeSSI(stackSlotI dst, mRegI src)
12798 %{
12799 match(Set dst src);
12801 ins_cost(100);
12802 format %{ "sw $dst, $src\t# int stk @ storeSSI" %}
12803 ins_encode %{
12804 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSI) !");
12805 __ sw($src$$Register, SP, $dst$$disp);
12806 %}
12807 ins_pipe(ialu_storeI);
12808 %}
12810 instruct loadSSL(mRegL dst, stackSlotL src)
12811 %{
12812 match(Set dst src);
12814 ins_cost(125);
12815 format %{ "ld $dst, $src\t# long stk @ loadSSL" %}
12816 ins_encode %{
12817 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSL) !");
12818 __ ld($dst$$Register, SP, $src$$disp);
12819 %}
12820 ins_pipe(ialu_loadI);
12821 %}
12823 instruct storeSSL(stackSlotL dst, mRegL src)
12824 %{
12825 match(Set dst src);
12827 ins_cost(100);
12828 format %{ "sd $dst, $src\t# long stk @ storeSSL" %}
12829 ins_encode %{
12830 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSL) !");
12831 __ sd($src$$Register, SP, $dst$$disp);
12832 %}
12833 ins_pipe(ialu_storeI);
12834 %}
12836 instruct loadSSP(mRegP dst, stackSlotP src)
12837 %{
12838 match(Set dst src);
12840 ins_cost(125);
12841 format %{ "ld $dst, $src\t# ptr stk @ loadSSP" %}
12842 ins_encode %{
12843 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSP) !");
12844 __ ld($dst$$Register, SP, $src$$disp);
12845 %}
12846 ins_pipe(ialu_loadI);
12847 %}
12849 instruct storeSSP(stackSlotP dst, mRegP src)
12850 %{
12851 match(Set dst src);
12853 ins_cost(100);
12854 format %{ "sd $dst, $src\t# ptr stk @ storeSSP" %}
12855 ins_encode %{
12856 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSP) !");
12857 __ sd($src$$Register, SP, $dst$$disp);
12858 %}
12859 ins_pipe(ialu_storeI);
12860 %}
12862 instruct loadSSF(regF dst, stackSlotF src)
12863 %{
12864 match(Set dst src);
12866 ins_cost(125);
12867 format %{ "lwc1 $dst, $src\t# float stk @ loadSSF" %}
12868 ins_encode %{
12869 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSF) !");
12870 __ lwc1($dst$$FloatRegister, SP, $src$$disp);
12871 %}
12872 ins_pipe(ialu_loadI);
12873 %}
12875 instruct storeSSF(stackSlotF dst, regF src)
12876 %{
12877 match(Set dst src);
12879 ins_cost(100);
12880 format %{ "swc1 $dst, $src\t# float stk @ storeSSF" %}
12881 ins_encode %{
12882 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSF) !");
12883 __ swc1($src$$FloatRegister, SP, $dst$$disp);
12884 %}
12885 ins_pipe(fpu_storeF);
12886 %}
12888 // Use the same format since predicate() can not be used here.
12889 instruct loadSSD(regD dst, stackSlotD src)
12890 %{
12891 match(Set dst src);
12893 ins_cost(125);
12894 format %{ "ldc1 $dst, $src\t# double stk @ loadSSD" %}
12895 ins_encode %{
12896 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSD) !");
12897 __ ldc1($dst$$FloatRegister, SP, $src$$disp);
12898 %}
12899 ins_pipe(ialu_loadI);
12900 %}
12902 instruct storeSSD(stackSlotD dst, regD src)
12903 %{
12904 match(Set dst src);
12906 ins_cost(100);
12907 format %{ "sdc1 $dst, $src\t# double stk @ storeSSD" %}
12908 ins_encode %{
12909 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSD) !");
12910 __ sdc1($src$$FloatRegister, SP, $dst$$disp);
12911 %}
12912 ins_pipe(fpu_storeF);
12913 %}
12915 instruct cmpFastLock( FlagsReg cr, mRegP object, s0_RegP box, mRegI tmp, mRegP scr) %{
12916 match( Set cr (FastLock object box) );
12917 effect( TEMP tmp, TEMP scr, USE_KILL box );
12918 ins_cost(300);
12919 format %{ "FASTLOCK $cr $object, $box, $tmp #@ cmpFastLock" %}
12920 ins_encode %{
12921 __ fast_lock($object$$Register, $box$$Register, $tmp$$Register, $scr$$Register);
12922 %}
12924 ins_pipe( pipe_slow );
12925 ins_pc_relative(1);
12926 %}
12928 instruct cmpFastUnlock( FlagsReg cr, mRegP object, s0_RegP box, mRegP tmp ) %{
12929 match( Set cr (FastUnlock object box) );
12930 effect( TEMP tmp, USE_KILL box );
12931 ins_cost(300);
12932 format %{ "FASTUNLOCK $object, $box, $tmp #@cmpFastUnlock" %}
12933 ins_encode %{
12934 __ fast_unlock($object$$Register, $box$$Register, $tmp$$Register);
12935 %}
12937 ins_pipe( pipe_slow );
12938 ins_pc_relative(1);
12939 %}
12941 // Store CMS card-mark Immediate
12942 instruct storeImmCM(memory mem, immI8 src) %{
12943 match(Set mem (StoreCM mem src));
12945 ins_cost(150);
12946 format %{ "MOV8 $mem,$src\t! CMS card-mark imm0" %}
12947 // opcode(0xC6);
12948 ins_encode(store_B_immI_enc_sync(mem, src));
12949 ins_pipe( ialu_storeI );
12950 %}
12952 // Die now
12953 instruct ShouldNotReachHere( )
12954 %{
12955 match(Halt);
12956 ins_cost(300);
12958 // Use the following format syntax
12959 format %{ "ILLTRAP ;#@ShouldNotReachHere" %}
12960 ins_encode %{
12961 // Here we should emit illtrap !
12963 __ stop("in ShoudNotReachHere");
12965 %}
12966 ins_pipe( pipe_jump );
12967 %}
12969 instruct leaP8Narrow(mRegP dst, indOffset8Narrow mem)
12970 %{
12971 predicate(Universe::narrow_oop_shift() == 0);
12972 match(Set dst mem);
12974 ins_cost(110);
12975 format %{ "leaq $dst, $mem\t# ptr off8narrow @ leaP8Narrow" %}
12976 ins_encode %{
12977 Register dst = $dst$$Register;
12978 Register base = as_Register($mem$$base);
12979 int disp = $mem$$disp;
12981 __ daddiu(dst, base, disp);
12982 %}
12983 ins_pipe( ialu_regI_imm16 );
12984 %}
12986 instruct leaPPosIdxScaleOff8(mRegP dst, basePosIndexScaleOffset8 mem)
12987 %{
12988 match(Set dst mem);
12990 ins_cost(110);
12991 format %{ "leaq $dst, $mem\t# @ PosIdxScaleOff8" %}
12992 ins_encode %{
12993 Register dst = $dst$$Register;
12994 Register base = as_Register($mem$$base);
12995 Register index = as_Register($mem$$index);
12996 int scale = $mem$$scale;
12997 int disp = $mem$$disp;
12999 if (scale == 0) {
13000 __ daddu(AT, base, index);
13001 __ daddiu(dst, AT, disp);
13002 } else {
13003 __ dsll(AT, index, scale);
13004 __ daddu(AT, base, AT);
13005 __ daddiu(dst, AT, disp);
13006 }
13007 %}
13009 ins_pipe( ialu_regI_imm16 );
13010 %}
13012 instruct leaPIdxScale(mRegP dst, indIndexScale mem)
13013 %{
13014 match(Set dst mem);
13016 ins_cost(110);
13017 format %{ "leaq $dst, $mem\t# @ leaPIdxScale" %}
13018 ins_encode %{
13019 Register dst = $dst$$Register;
13020 Register base = as_Register($mem$$base);
13021 Register index = as_Register($mem$$index);
13022 int scale = $mem$$scale;
13024 if (scale == 0) {
13025 __ daddu(dst, base, index);
13026 } else {
13027 __ dsll(AT, index, scale);
13028 __ daddu(dst, base, AT);
13029 }
13030 %}
13032 ins_pipe( ialu_regI_imm16 );
13033 %}
13035 // Jump Direct Conditional - Label defines a relative address from Jcc+1
13036 instruct jmpLoopEnd(cmpOp cop, mRegI src1, mRegI src2, label labl) %{
13037 match(CountedLoopEnd cop (CmpI src1 src2));
13038 effect(USE labl);
13040 ins_cost(300);
13041 format %{ "J$cop $src1, $src2, $labl\t# Loop end @ jmpLoopEnd" %}
13042 ins_encode %{
13043 Register op1 = $src1$$Register;
13044 Register op2 = $src2$$Register;
13045 Label &L = *($labl$$label);
13046 int flag = $cop$$cmpcode;
13048 switch(flag)
13049 {
13050 case 0x01: //equal
13051 if (&L)
13052 __ beq(op1, op2, L);
13053 else
13054 __ beq(op1, op2, (int)0);
13055 break;
13056 case 0x02: //not_equal
13057 if (&L)
13058 __ bne(op1, op2, L);
13059 else
13060 __ bne(op1, op2, (int)0);
13061 break;
13062 case 0x03: //above
13063 __ slt(AT, op2, op1);
13064 if(&L)
13065 __ bne(AT, R0, L);
13066 else
13067 __ bne(AT, R0, (int)0);
13068 break;
13069 case 0x04: //above_equal
13070 __ slt(AT, op1, op2);
13071 if(&L)
13072 __ beq(AT, R0, L);
13073 else
13074 __ beq(AT, R0, (int)0);
13075 break;
13076 case 0x05: //below
13077 __ slt(AT, op1, op2);
13078 if(&L)
13079 __ bne(AT, R0, L);
13080 else
13081 __ bne(AT, R0, (int)0);
13082 break;
13083 case 0x06: //below_equal
13084 __ slt(AT, op2, op1);
13085 if(&L)
13086 __ beq(AT, R0, L);
13087 else
13088 __ beq(AT, R0, (int)0);
13089 break;
13090 default:
13091 Unimplemented();
13092 }
13093 __ nop();
13094 %}
13095 ins_pipe( pipe_jump );
13096 ins_pc_relative(1);
13097 %}
13100 instruct jmpLoopEnd_reg_imm16_sub(cmpOp cop, mRegI src1, immI16_sub src2, label labl) %{
13101 match(CountedLoopEnd cop (CmpI src1 src2));
13102 effect(USE labl);
13104 ins_cost(250);
13105 format %{ "J$cop $src1, $src2, $labl\t# Loop end @ jmpLoopEnd_reg_imm16_sub" %}
13106 ins_encode %{
13107 Register op1 = $src1$$Register;
13108 int op2 = $src2$$constant;
13109 Label &L = *($labl$$label);
13110 int flag = $cop$$cmpcode;
13112 __ addiu32(AT, op1, -1 * op2);
13114 switch(flag)
13115 {
13116 case 0x01: //equal
13117 if (&L)
13118 __ beq(AT, R0, L);
13119 else
13120 __ beq(AT, R0, (int)0);
13121 break;
13122 case 0x02: //not_equal
13123 if (&L)
13124 __ bne(AT, R0, L);
13125 else
13126 __ bne(AT, R0, (int)0);
13127 break;
13128 case 0x03: //above
13129 if(&L)
13130 __ bgtz(AT, L);
13131 else
13132 __ bgtz(AT, (int)0);
13133 break;
13134 case 0x04: //above_equal
13135 if(&L)
13136 __ bgez(AT, L);
13137 else
13138 __ bgez(AT,(int)0);
13139 break;
13140 case 0x05: //below
13141 if(&L)
13142 __ bltz(AT, L);
13143 else
13144 __ bltz(AT, (int)0);
13145 break;
13146 case 0x06: //below_equal
13147 if(&L)
13148 __ blez(AT, L);
13149 else
13150 __ blez(AT, (int)0);
13151 break;
13152 default:
13153 Unimplemented();
13154 }
13155 __ nop();
13156 %}
13157 ins_pipe( pipe_jump );
13158 ins_pc_relative(1);
13159 %}
13162 /*
13163 // Jump Direct Conditional - Label defines a relative address from Jcc+1
13164 instruct jmpLoopEndU(cmpOpU cop, eFlagsRegU cmp, label labl) %{
13165 match(CountedLoopEnd cop cmp);
13166 effect(USE labl);
13168 ins_cost(300);
13169 format %{ "J$cop,u $labl\t# Loop end" %}
13170 size(6);
13171 opcode(0x0F, 0x80);
13172 ins_encode( Jcc( cop, labl) );
13173 ins_pipe( pipe_jump );
13174 ins_pc_relative(1);
13175 %}
13177 instruct jmpLoopEndUCF(cmpOpUCF cop, eFlagsRegUCF cmp, label labl) %{
13178 match(CountedLoopEnd cop cmp);
13179 effect(USE labl);
13181 ins_cost(200);
13182 format %{ "J$cop,u $labl\t# Loop end" %}
13183 opcode(0x0F, 0x80);
13184 ins_encode( Jcc( cop, labl) );
13185 ins_pipe( pipe_jump );
13186 ins_pc_relative(1);
13187 %}
13188 */
13190 // This match pattern is created for StoreIConditional since I cannot match IfNode without a RegFlags! fujie 2012/07/17
13191 instruct jmpCon_flags(cmpOp cop, FlagsReg cr, label labl) %{
13192 match(If cop cr);
13193 effect(USE labl);
13195 ins_cost(300);
13196 format %{ "J$cop $labl #mips uses AT as eflag @jmpCon_flags" %}
13198 ins_encode %{
13199 Label &L = *($labl$$label);
13200 switch($cop$$cmpcode)
13201 {
13202 case 0x01: //equal
13203 if (&L)
13204 __ bne(AT, R0, L);
13205 else
13206 __ bne(AT, R0, (int)0);
13207 break;
13208 case 0x02: //not equal
13209 if (&L)
13210 __ beq(AT, R0, L);
13211 else
13212 __ beq(AT, R0, (int)0);
13213 break;
13214 default:
13215 Unimplemented();
13216 }
13217 __ nop();
13218 %}
13220 ins_pipe( pipe_jump );
13221 ins_pc_relative(1);
13222 %}
13225 // ============================================================================
13226 // The 2nd slow-half of a subtype check. Scan the subklass's 2ndary superklass
13227 // array for an instance of the superklass. Set a hidden internal cache on a
13228 // hit (cache is checked with exposed code in gen_subtype_check()). Return
13229 // NZ for a miss or zero for a hit. The encoding ALSO sets flags.
13230 instruct partialSubtypeCheck( mRegP result, no_T8_mRegP sub, no_T8_mRegP super, mT8RegI tmp ) %{
13231 match(Set result (PartialSubtypeCheck sub super));
13232 effect(KILL tmp);
13233 ins_cost(1100); // slightly larger than the next version
13234 format %{ "partialSubtypeCheck result=$result, sub=$sub, super=$super, tmp=$tmp " %}
13236 ins_encode( enc_PartialSubtypeCheck(result, sub, super, tmp) );
13237 ins_pipe( pipe_slow );
13238 %}
13241 // Conditional-store of an int value.
13242 // ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG on Intel.
13243 instruct storeIConditional( memory mem, mRegI oldval, mRegI newval, FlagsReg cr ) %{
13244 match(Set cr (StoreIConditional mem (Binary oldval newval)));
13245 // effect(KILL oldval);
13246 format %{ "CMPXCHG $newval, $mem, $oldval \t# @storeIConditional" %}
13248 ins_encode %{
13249 Register oldval = $oldval$$Register;
13250 Register newval = $newval$$Register;
13251 Address addr(as_Register($mem$$base), $mem$$disp);
13252 Label again, failure;
13254 // int base = $mem$$base;
13255 int index = $mem$$index;
13256 int scale = $mem$$scale;
13257 int disp = $mem$$disp;
13259 guarantee(Assembler::is_simm16(disp), "");
13261 if( index != 0 ) {
13262 __ stop("in storeIConditional: index != 0");
13263 } else {
13264 __ bind(again);
13265 if(!Use3A2000) __ sync();
13266 __ ll(AT, addr);
13267 __ bne(AT, oldval, failure);
13268 __ delayed()->addu(AT, R0, R0);
13270 __ addu(AT, newval, R0);
13271 __ sc(AT, addr);
13272 __ beq(AT, R0, again);
13273 __ delayed()->addiu(AT, R0, 0xFF);
13274 __ bind(failure);
13275 __ sync();
13276 }
13277 %}
13279 ins_pipe( long_memory_op );
13280 %}
13282 // Conditional-store of a long value.
13283 // ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG.
13284 instruct storeLConditional(memory mem, t2RegL oldval, mRegL newval, FlagsReg cr )
13285 %{
13286 match(Set cr (StoreLConditional mem (Binary oldval newval)));
13287 effect(KILL oldval);
13289 format %{ "cmpxchg $mem, $newval\t# If $oldval == $mem then store $newval into $mem" %}
13290 ins_encode%{
13291 Register oldval = $oldval$$Register;
13292 Register newval = $newval$$Register;
13293 Address addr((Register)$mem$$base, $mem$$disp);
13295 int index = $mem$$index;
13296 int scale = $mem$$scale;
13297 int disp = $mem$$disp;
13299 guarantee(Assembler::is_simm16(disp), "");
13301 if( index != 0 ) {
13302 __ stop("in storeIConditional: index != 0");
13303 } else {
13304 __ cmpxchg(newval, addr, oldval);
13305 }
13306 %}
13307 ins_pipe( long_memory_op );
13308 %}
13311 instruct compareAndSwapI( mRegI res, mRegP mem_ptr, mS2RegI oldval, mRegI newval) %{
13312 match(Set res (CompareAndSwapI mem_ptr (Binary oldval newval)));
13313 effect(KILL oldval);
13314 // match(CompareAndSwapI mem_ptr (Binary oldval newval));
13315 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapI\n\t"
13316 "MOV $res, 1 @ compareAndSwapI\n\t"
13317 "BNE AT, R0 @ compareAndSwapI\n\t"
13318 "MOV $res, 0 @ compareAndSwapI\n"
13319 "L:" %}
13320 ins_encode %{
13321 Register newval = $newval$$Register;
13322 Register oldval = $oldval$$Register;
13323 Register res = $res$$Register;
13324 Address addr($mem_ptr$$Register, 0);
13325 Label L;
13327 __ cmpxchg32(newval, addr, oldval);
13328 __ move(res, AT);
13329 %}
13330 ins_pipe( long_memory_op );
13331 %}
13333 //FIXME:
13334 instruct compareAndSwapP( mRegI res, mRegP mem_ptr, s2_RegP oldval, mRegP newval) %{
13335 match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval)));
13336 effect(KILL oldval);
13337 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapP\n\t"
13338 "MOV $res, AT @ compareAndSwapP\n\t"
13339 "L:" %}
13340 ins_encode %{
13341 Register newval = $newval$$Register;
13342 Register oldval = $oldval$$Register;
13343 Register res = $res$$Register;
13344 Address addr($mem_ptr$$Register, 0);
13345 Label L;
13347 __ cmpxchg(newval, addr, oldval);
13348 __ move(res, AT);
13349 %}
13350 ins_pipe( long_memory_op );
13351 %}
13353 instruct compareAndSwapN( mRegI res, mRegP mem_ptr, t2_RegN oldval, mRegN newval) %{
13354 match(Set res (CompareAndSwapN mem_ptr (Binary oldval newval)));
13355 effect(KILL oldval);
13356 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapN\n\t"
13357 "MOV $res, AT @ compareAndSwapN\n\t"
13358 "L:" %}
13359 ins_encode %{
13360 Register newval = $newval$$Register;
13361 Register oldval = $oldval$$Register;
13362 Register res = $res$$Register;
13363 Address addr($mem_ptr$$Register, 0);
13364 Label L;
13366 /* 2013/7/19 Jin: cmpxchg32 is implemented with ll/sc, which will do sign extension.
13367 * Thus, we should extend oldval's sign for correct comparision.
13368 */
13369 __ sll(oldval, oldval, 0);
13371 __ cmpxchg32(newval, addr, oldval);
13372 __ move(res, AT);
13373 %}
13374 ins_pipe( long_memory_op );
13375 %}
13377 //----------Max and Min--------------------------------------------------------
13378 // Min Instructions
13379 ////
13380 // *** Min and Max using the conditional move are slower than the
13381 // *** branch version on a Pentium III.
13382 // // Conditional move for min
13383 //instruct cmovI_reg_lt( eRegI op2, eRegI op1, eFlagsReg cr ) %{
13384 // effect( USE_DEF op2, USE op1, USE cr );
13385 // format %{ "CMOVlt $op2,$op1\t! min" %}
13386 // opcode(0x4C,0x0F);
13387 // ins_encode( OpcS, OpcP, RegReg( op2, op1 ) );
13388 // ins_pipe( pipe_cmov_reg );
13389 //%}
13390 //
13391 //// Min Register with Register (P6 version)
13392 //instruct minI_eReg_p6( eRegI op1, eRegI op2 ) %{
13393 // predicate(VM_Version::supports_cmov() );
13394 // match(Set op2 (MinI op1 op2));
13395 // ins_cost(200);
13396 // expand %{
13397 // eFlagsReg cr;
13398 // compI_eReg(cr,op1,op2);
13399 // cmovI_reg_lt(op2,op1,cr);
13400 // %}
13401 //%}
13403 // Min Register with Register (generic version)
13404 instruct minI_Reg_Reg(mRegI dst, mRegI src) %{
13405 match(Set dst (MinI dst src));
13406 //effect(KILL flags);
13407 ins_cost(80);
13409 format %{ "MIN $dst, $src @minI_Reg_Reg" %}
13410 ins_encode %{
13411 Register dst = $dst$$Register;
13412 Register src = $src$$Register;
13414 __ slt(AT, src, dst);
13415 __ movn(dst, src, AT);
13417 %}
13419 ins_pipe( pipe_slow );
13420 %}
13422 // Max Register with Register
13423 // *** Min and Max using the conditional move are slower than the
13424 // *** branch version on a Pentium III.
13425 // // Conditional move for max
13426 //instruct cmovI_reg_gt( eRegI op2, eRegI op1, eFlagsReg cr ) %{
13427 // effect( USE_DEF op2, USE op1, USE cr );
13428 // format %{ "CMOVgt $op2,$op1\t! max" %}
13429 // opcode(0x4F,0x0F);
13430 // ins_encode( OpcS, OpcP, RegReg( op2, op1 ) );
13431 // ins_pipe( pipe_cmov_reg );
13432 //%}
13433 //
13434 // // Max Register with Register (P6 version)
13435 //instruct maxI_eReg_p6( eRegI op1, eRegI op2 ) %{
13436 // predicate(VM_Version::supports_cmov() );
13437 // match(Set op2 (MaxI op1 op2));
13438 // ins_cost(200);
13439 // expand %{
13440 // eFlagsReg cr;
13441 // compI_eReg(cr,op1,op2);
13442 // cmovI_reg_gt(op2,op1,cr);
13443 // %}
13444 //%}
13446 // Max Register with Register (generic version)
13447 instruct maxI_Reg_Reg(mRegI dst, mRegI src) %{
13448 match(Set dst (MaxI dst src));
13449 ins_cost(80);
13451 format %{ "MAX $dst, $src @maxI_Reg_Reg" %}
13453 ins_encode %{
13454 Register dst = $dst$$Register;
13455 Register src = $src$$Register;
13457 __ slt(AT, dst, src);
13458 __ movn(dst, src, AT);
13460 %}
13462 ins_pipe( pipe_slow );
13463 %}
13465 instruct maxI_Reg_zero(mRegI dst, immI0 zero) %{
13466 match(Set dst (MaxI dst zero));
13467 ins_cost(50);
13469 format %{ "MAX $dst, 0 @maxI_Reg_zero" %}
13471 ins_encode %{
13472 Register dst = $dst$$Register;
13474 __ slt(AT, dst, R0);
13475 __ movn(dst, R0, AT);
13477 %}
13479 ins_pipe( pipe_slow );
13480 %}
13482 instruct zerox_long_reg_reg(mRegL dst, mRegL src, immL_32bits mask)
13483 %{
13484 match(Set dst (AndL src mask));
13486 format %{ "movl $dst, $src\t# zero-extend long @ zerox_long_reg_reg" %}
13487 ins_encode %{
13488 Register dst = $dst$$Register;
13489 Register src = $src$$Register;
13491 __ dext(dst, src, 0, 32);
13492 %}
13493 ins_pipe(ialu_regI_regI);
13494 %}
13496 instruct combine_i2l(mRegL dst, mRegI src1, immL_32bits mask, mRegI src2, immI_32 shift32)
13497 %{
13498 match(Set dst (OrL (AndL (ConvI2L src1) mask) (LShiftL (ConvI2L src2) shift32)));
13500 format %{ "combine_i2l $dst, $src2(H), $src1(L) @ combine_i2l" %}
13501 ins_encode %{
13502 Register dst = $dst$$Register;
13503 Register src1 = $src1$$Register;
13504 Register src2 = $src2$$Register;
13506 if (src1 == dst) {
13507 __ dinsu(dst, src2, 32, 32);
13508 } else if (src2 == dst) {
13509 __ dsll32(dst, dst, 0);
13510 __ dins(dst, src1, 0, 32);
13511 } else {
13512 __ dext(dst, src1, 0, 32);
13513 __ dinsu(dst, src2, 32, 32);
13514 }
13515 %}
13516 ins_pipe(ialu_regI_regI);
13517 %}
13519 // Zero-extend convert int to long
13520 instruct convI2L_reg_reg_zex(mRegL dst, mRegI src, immL_32bits mask)
13521 %{
13522 match(Set dst (AndL (ConvI2L src) mask));
13524 format %{ "movl $dst, $src\t# i2l zero-extend @ convI2L_reg_reg_zex" %}
13525 ins_encode %{
13526 Register dst = $dst$$Register;
13527 Register src = $src$$Register;
13529 __ dext(dst, src, 0, 32);
13530 %}
13531 ins_pipe(ialu_regI_regI);
13532 %}
13534 instruct convL2I2L_reg_reg_zex(mRegL dst, mRegL src, immL_32bits mask)
13535 %{
13536 match(Set dst (AndL (ConvI2L (ConvL2I src)) mask));
13538 format %{ "movl $dst, $src\t# i2l zero-extend @ convL2I2L_reg_reg_zex" %}
13539 ins_encode %{
13540 Register dst = $dst$$Register;
13541 Register src = $src$$Register;
13543 __ dext(dst, src, 0, 32);
13544 %}
13545 ins_pipe(ialu_regI_regI);
13546 %}
13548 // Match loading integer and casting it to unsigned int in long register.
13549 // LoadI + ConvI2L + AndL 0xffffffff.
13550 instruct loadUI2L_rmask(mRegL dst, memory mem, immL_32bits mask) %{
13551 match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
13553 format %{ "lwu $dst, $mem \t// zero-extend to long @ loadUI2L_rmask" %}
13554 ins_encode (load_N_enc(dst, mem));
13555 ins_pipe(ialu_loadI);
13556 %}
13558 instruct loadUI2L_lmask(mRegL dst, memory mem, immL_32bits mask) %{
13559 match(Set dst (AndL mask (ConvI2L (LoadI mem))));
13561 format %{ "lwu $dst, $mem \t// zero-extend to long @ loadUI2L_lmask" %}
13562 ins_encode (load_N_enc(dst, mem));
13563 ins_pipe(ialu_loadI);
13564 %}
13567 // ============================================================================
13568 // Safepoint Instruction
13569 instruct safePoint_poll_reg(mRegP poll) %{
13570 match(SafePoint poll);
13571 predicate(false);
13572 effect(USE poll);
13574 ins_cost(125);
13575 format %{ "Safepoint @ [$poll] : poll for GC @ safePoint_poll_reg" %}
13577 ins_encode %{
13578 Register poll_reg = $poll$$Register;
13580 __ block_comment("Safepoint:");
13581 __ relocate(relocInfo::poll_type);
13582 __ lw(AT, poll_reg, 0);
13583 %}
13585 ins_pipe( ialu_storeI );
13586 %}
13588 instruct safePoint_poll() %{
13589 match(SafePoint);
13591 ins_cost(105);
13592 format %{ "poll for GC @ safePoint_poll" %}
13594 ins_encode %{
13595 __ block_comment("Safepoint:");
13596 __ set64(T9, (long)os::get_polling_page());
13597 __ relocate(relocInfo::poll_type);
13598 __ lw(AT, T9, 0);
13599 %}
13601 ins_pipe( ialu_storeI );
13602 %}
13604 //----------Arithmetic Conversion Instructions---------------------------------
13606 instruct roundFloat_nop(regF dst)
13607 %{
13608 match(Set dst (RoundFloat dst));
13610 ins_cost(0);
13611 ins_encode();
13612 ins_pipe(empty);
13613 %}
13615 instruct roundDouble_nop(regD dst)
13616 %{
13617 match(Set dst (RoundDouble dst));
13619 ins_cost(0);
13620 ins_encode();
13621 ins_pipe(empty);
13622 %}
13624 //---------- Zeros Count Instructions ------------------------------------------
13625 // CountLeadingZerosINode CountTrailingZerosINode
13626 instruct countLeadingZerosI(mRegI dst, mRegI src) %{
13627 predicate(UseCountLeadingZerosInstruction);
13628 match(Set dst (CountLeadingZerosI src));
13630 format %{ "clz $dst, $src\t# count leading zeros (int)" %}
13631 ins_encode %{
13632 __ clz($dst$$Register, $src$$Register);
13633 %}
13634 ins_pipe( ialu_regL_regL );
13635 %}
13637 instruct countLeadingZerosL(mRegI dst, mRegL src) %{
13638 predicate(UseCountLeadingZerosInstruction);
13639 match(Set dst (CountLeadingZerosL src));
13641 format %{ "dclz $dst, $src\t# count leading zeros (long)" %}
13642 ins_encode %{
13643 __ dclz($dst$$Register, $src$$Register);
13644 %}
13645 ins_pipe( ialu_regL_regL );
13646 %}
13648 instruct countTrailingZerosI(mRegI dst, mRegI src) %{
13649 predicate(UseCountTrailingZerosInstruction);
13650 match(Set dst (CountTrailingZerosI src));
13652 format %{ "ctz $dst, $src\t# count trailing zeros (int)" %}
13653 ins_encode %{
13654 // ctz and dctz is gs instructions.
13655 __ ctz($dst$$Register, $src$$Register);
13656 %}
13657 ins_pipe( ialu_regL_regL );
13658 %}
13660 instruct countTrailingZerosL(mRegI dst, mRegL src) %{
13661 predicate(UseCountTrailingZerosInstruction);
13662 match(Set dst (CountTrailingZerosL src));
13664 format %{ "dcto $dst, $src\t# count trailing zeros (long)" %}
13665 ins_encode %{
13666 __ dctz($dst$$Register, $src$$Register);
13667 %}
13668 ins_pipe( ialu_regL_regL );
13669 %}
13671 // ====================VECTOR INSTRUCTIONS=====================================
13673 // Load vectors (8 bytes long)
13674 instruct loadV8(vecD dst, memory mem) %{
13675 predicate(n->as_LoadVector()->memory_size() == 8);
13676 match(Set dst (LoadVector mem));
13677 ins_cost(125);
13678 format %{ "load $dst, $mem\t! load vector (8 bytes)" %}
13679 ins_encode(load_D_enc(dst, mem));
13680 ins_pipe( fpu_loadF );
13681 %}
13683 // Store vectors (8 bytes long)
13684 instruct storeV8(memory mem, vecD src) %{
13685 predicate(n->as_StoreVector()->memory_size() == 8);
13686 match(Set mem (StoreVector mem src));
13687 ins_cost(145);
13688 format %{ "store $mem, $src\t! store vector (8 bytes)" %}
13689 ins_encode(store_D_reg_enc(mem, src));
13690 ins_pipe( fpu_storeF );
13691 %}
13693 instruct Repl8B_DSP(vecD dst, mRegI src) %{
13694 predicate(n->as_Vector()->length() == 8 && Use3A2000);
13695 match(Set dst (ReplicateB src));
13696 ins_cost(100);
13697 format %{ "replv_ob AT, $src\n\t"
13698 "dmtc1 AT, $dst\t! replicate8B" %}
13699 ins_encode %{
13700 __ replv_ob(AT, $src$$Register);
13701 __ dmtc1(AT, $dst$$FloatRegister);
13702 %}
13703 ins_pipe( pipe_mtc1 );
13704 %}
13706 instruct Repl8B(vecD dst, mRegI src) %{
13707 predicate(n->as_Vector()->length() == 8);
13708 match(Set dst (ReplicateB src));
13709 ins_cost(140);
13710 format %{ "move AT, $src\n\t"
13711 "dins AT, AT, 8, 8\n\t"
13712 "dins AT, AT, 16, 16\n\t"
13713 "dinsu AT, AT, 32, 32\n\t"
13714 "dmtc1 AT, $dst\t! replicate8B" %}
13715 ins_encode %{
13716 __ move(AT, $src$$Register);
13717 __ dins(AT, AT, 8, 8);
13718 __ dins(AT, AT, 16, 16);
13719 __ dinsu(AT, AT, 32, 32);
13720 __ dmtc1(AT, $dst$$FloatRegister);
13721 %}
13722 ins_pipe( pipe_mtc1 );
13723 %}
13725 instruct Repl8B_imm_DSP(vecD dst, immI con) %{
13726 predicate(n->as_Vector()->length() == 8 && Use3A2000);
13727 match(Set dst (ReplicateB con));
13728 ins_cost(110);
13729 format %{ "repl_ob AT, [$con]\n\t"
13730 "dmtc1 AT, $dst,0x00\t! replicate8B($con)" %}
13731 ins_encode %{
13732 int val = $con$$constant;
13733 __ repl_ob(AT, val);
13734 __ dmtc1(AT, $dst$$FloatRegister);
13735 %}
13736 ins_pipe( pipe_mtc1 );
13737 %}
13739 instruct Repl8B_imm(vecD dst, immI con) %{
13740 predicate(n->as_Vector()->length() == 8);
13741 match(Set dst (ReplicateB con));
13742 ins_cost(150);
13743 format %{ "move AT, [$con]\n\t"
13744 "dins AT, AT, 8, 8\n\t"
13745 "dins AT, AT, 16, 16\n\t"
13746 "dinsu AT, AT, 32, 32\n\t"
13747 "dmtc1 AT, $dst,0x00\t! replicate8B($con)" %}
13748 ins_encode %{
13749 __ move(AT, $con$$constant);
13750 __ dins(AT, AT, 8, 8);
13751 __ dins(AT, AT, 16, 16);
13752 __ dinsu(AT, AT, 32, 32);
13753 __ dmtc1(AT, $dst$$FloatRegister);
13754 %}
13755 ins_pipe( pipe_mtc1 );
13756 %}
13758 instruct Repl8B_zero(vecD dst, immI0 zero) %{
13759 predicate(n->as_Vector()->length() == 8);
13760 match(Set dst (ReplicateB zero));
13761 ins_cost(90);
13762 format %{ "dmtc1 R0, $dst\t! replicate8B zero" %}
13763 ins_encode %{
13764 __ dmtc1(R0, $dst$$FloatRegister);
13765 %}
13766 ins_pipe( pipe_mtc1 );
13767 %}
13769 instruct Repl8B_M1(vecD dst, immI_M1 M1) %{
13770 predicate(n->as_Vector()->length() == 8);
13771 match(Set dst (ReplicateB M1));
13772 ins_cost(80);
13773 format %{ "dmtc1 -1, $dst\t! replicate8B -1" %}
13774 ins_encode %{
13775 __ nor(AT, R0, R0);
13776 __ dmtc1(AT, $dst$$FloatRegister);
13777 %}
13778 ins_pipe( pipe_mtc1 );
13779 %}
13781 instruct Repl4S_DSP(vecD dst, mRegI src) %{
13782 predicate(n->as_Vector()->length() == 4 && Use3A2000);
13783 match(Set dst (ReplicateS src));
13784 ins_cost(100);
13785 format %{ "replv_qh AT, $src\n\t"
13786 "dmtc1 AT, $dst\t! replicate4S" %}
13787 ins_encode %{
13788 __ replv_qh(AT, $src$$Register);
13789 __ dmtc1(AT, $dst$$FloatRegister);
13790 %}
13791 ins_pipe( pipe_mtc1 );
13792 %}
13794 instruct Repl4S(vecD dst, mRegI src) %{
13795 predicate(n->as_Vector()->length() == 4);
13796 match(Set dst (ReplicateS src));
13797 ins_cost(120);
13798 format %{ "move AT, $src \n\t"
13799 "dins AT, AT, 16, 16\n\t"
13800 "dinsu AT, AT, 32, 32\n\t"
13801 "dmtc1 AT, $dst\t! replicate4S" %}
13802 ins_encode %{
13803 __ move(AT, $src$$Register);
13804 __ dins(AT, AT, 16, 16);
13805 __ dinsu(AT, AT, 32, 32);
13806 __ dmtc1(AT, $dst$$FloatRegister);
13807 %}
13808 ins_pipe( pipe_mtc1 );
13809 %}
13811 instruct Repl4S_imm_DSP(vecD dst, immI con) %{
13812 predicate(n->as_Vector()->length() == 4 && Use3A2000);
13813 match(Set dst (ReplicateS con));
13814 ins_cost(100);
13815 format %{ "replv_qh AT, [$con]\n\t"
13816 "dmtc1 AT, $dst\t! replicate4S($con)" %}
13817 ins_encode %{
13818 int val = $con$$constant;
13819 if ( Assembler::is_simm(val, 10)) {
13820 //repl_qh supports 10 bits immediate
13821 __ repl_qh(AT, val);
13822 } else {
13823 __ li32(AT, val);
13824 __ replv_qh(AT, AT);
13825 }
13826 __ dmtc1(AT, $dst$$FloatRegister);
13827 %}
13828 ins_pipe( pipe_mtc1 );
13829 %}
13831 instruct Repl4S_imm(vecD dst, immI con) %{
13832 predicate(n->as_Vector()->length() == 4);
13833 match(Set dst (ReplicateS con));
13834 ins_cost(110);
13835 format %{ "move AT, [$con]\n\t"
13836 "dins AT, AT, 16, 16\n\t"
13837 "dinsu AT, AT, 32, 32\n\t"
13838 "dmtc1 AT, $dst\t! replicate4S($con)" %}
13839 ins_encode %{
13840 __ move(AT, $con$$constant);
13841 __ dins(AT, AT, 16, 16);
13842 __ dinsu(AT, AT, 32, 32);
13843 __ dmtc1(AT, $dst$$FloatRegister);
13844 %}
13845 ins_pipe( pipe_mtc1 );
13846 %}
13848 instruct Repl4S_zero(vecD dst, immI0 zero) %{
13849 predicate(n->as_Vector()->length() == 4);
13850 match(Set dst (ReplicateS zero));
13851 format %{ "dmtc1 R0, $dst\t! replicate4S zero" %}
13852 ins_encode %{
13853 __ dmtc1(R0, $dst$$FloatRegister);
13854 %}
13855 ins_pipe( pipe_mtc1 );
13856 %}
13858 instruct Repl4S_M1(vecD dst, immI_M1 M1) %{
13859 predicate(n->as_Vector()->length() == 4);
13860 match(Set dst (ReplicateS M1));
13861 format %{ "dmtc1 -1, $dst\t! replicate4S -1" %}
13862 ins_encode %{
13863 __ nor(AT, R0, R0);
13864 __ dmtc1(AT, $dst$$FloatRegister);
13865 %}
13866 ins_pipe( pipe_mtc1 );
13867 %}
13869 // Replicate integer (4 byte) scalar to be vector
13870 instruct Repl2I(vecD dst, mRegI src) %{
13871 predicate(n->as_Vector()->length() == 2);
13872 match(Set dst (ReplicateI src));
13873 format %{ "dins AT, $src, 0, 32\n\t"
13874 "dinsu AT, $src, 32, 32\n\t"
13875 "dmtc1 AT, $dst\t! replicate2I" %}
13876 ins_encode %{
13877 __ dins(AT, $src$$Register, 0, 32);
13878 __ dinsu(AT, $src$$Register, 32, 32);
13879 __ dmtc1(AT, $dst$$FloatRegister);
13880 %}
13881 ins_pipe( pipe_mtc1 );
13882 %}
13884 // Replicate integer (4 byte) scalar immediate to be vector by loading from const table.
13885 instruct Repl2I_imm(vecD dst, immI con, mA7RegI tmp) %{
13886 predicate(n->as_Vector()->length() == 2);
13887 match(Set dst (ReplicateI con));
13888 effect(KILL tmp);
13889 format %{ "li32 AT, [$con], 32\n\t"
13890 "dinsu AT, AT\n\t"
13891 "dmtc1 AT, $dst\t! replicate2I($con)" %}
13892 ins_encode %{
13893 int val = $con$$constant;
13894 __ li32(AT, val);
13895 __ dinsu(AT, AT, 32, 32);
13896 __ dmtc1(AT, $dst$$FloatRegister);
13897 %}
13898 ins_pipe( pipe_mtc1 );
13899 %}
13901 // Replicate integer (4 byte) scalar zero to be vector
13902 instruct Repl2I_zero(vecD dst, immI0 zero) %{
13903 predicate(n->as_Vector()->length() == 2);
13904 match(Set dst (ReplicateI zero));
13905 format %{ "dmtc1 R0, $dst\t! replicate2I zero" %}
13906 ins_encode %{
13907 __ dmtc1(R0, $dst$$FloatRegister);
13908 %}
13909 ins_pipe( pipe_mtc1 );
13910 %}
13912 // Replicate integer (4 byte) scalar -1 to be vector
13913 instruct Repl2I_M1(vecD dst, immI_M1 M1) %{
13914 predicate(n->as_Vector()->length() == 2);
13915 match(Set dst (ReplicateI M1));
13916 format %{ "dmtc1 -1, $dst\t! replicate2I -1, use AT" %}
13917 ins_encode %{
13918 __ nor(AT, R0, R0);
13919 __ dmtc1(AT, $dst$$FloatRegister);
13920 %}
13921 ins_pipe( pipe_mtc1 );
13922 %}
13924 // Replicate float (4 byte) scalar to be vector
13925 instruct Repl2F(vecD dst, regF src) %{
13926 predicate(n->as_Vector()->length() == 2);
13927 match(Set dst (ReplicateF src));
13928 format %{ "cvt.ps $dst, $src, $src\t! replicate2F" %}
13929 ins_encode %{
13930 __ cvt_ps_s($dst$$FloatRegister, $src$$FloatRegister, $src$$FloatRegister);
13931 %}
13932 ins_pipe( pipe_slow );
13933 %}
13935 // Replicate float (4 byte) scalar zero to be vector
13936 instruct Repl2F_zero(vecD dst, immF0 zero) %{
13937 predicate(n->as_Vector()->length() == 2);
13938 match(Set dst (ReplicateF zero));
13939 format %{ "dmtc1 R0, $dst\t! replicate2F zero" %}
13940 ins_encode %{
13941 __ dmtc1(R0, $dst$$FloatRegister);
13942 %}
13943 ins_pipe( pipe_mtc1 );
13944 %}
13947 // ====================VECTOR ARITHMETIC=======================================
13949 // --------------------------------- ADD --------------------------------------
13951 // Floats vector add
13952 // kernel does not have emulation of PS instructions yet, so PS instructions is disabled.
13953 instruct vadd2F(vecD dst, vecD src) %{
13954 predicate(n->as_Vector()->length() == 2);
13955 match(Set dst (AddVF dst src));
13956 format %{ "add.ps $dst,$src\t! add packed2F" %}
13957 ins_encode %{
13958 __ add_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
13959 %}
13960 ins_pipe( pipe_slow );
13961 %}
13963 instruct vadd2F3(vecD dst, vecD src1, vecD src2) %{
13964 predicate(n->as_Vector()->length() == 2);
13965 match(Set dst (AddVF src1 src2));
13966 format %{ "add.ps $dst,$src1,$src2\t! add packed2F" %}
13967 ins_encode %{
13968 __ add_ps($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
13969 %}
13970 ins_pipe( fpu_regF_regF );
13971 %}
13973 // --------------------------------- SUB --------------------------------------
13975 // Floats vector sub
13976 instruct vsub2F(vecD dst, vecD src) %{
13977 predicate(n->as_Vector()->length() == 2);
13978 match(Set dst (SubVF dst src));
13979 format %{ "sub.ps $dst,$src\t! sub packed2F" %}
13980 ins_encode %{
13981 __ sub_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
13982 %}
13983 ins_pipe( fpu_regF_regF );
13984 %}
13986 // --------------------------------- MUL --------------------------------------
13988 // Floats vector mul
13989 instruct vmul2F(vecD dst, vecD src) %{
13990 predicate(n->as_Vector()->length() == 2);
13991 match(Set dst (MulVF dst src));
13992 format %{ "mul.ps $dst, $src\t! mul packed2F" %}
13993 ins_encode %{
13994 __ mul_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
13995 %}
13996 ins_pipe( fpu_regF_regF );
13997 %}
13999 instruct vmul2F3(vecD dst, vecD src1, vecD src2) %{
14000 predicate(n->as_Vector()->length() == 2);
14001 match(Set dst (MulVF src1 src2));
14002 format %{ "mul.ps $dst, $src1, $src2\t! mul packed2F" %}
14003 ins_encode %{
14004 __ mul_ps($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
14005 %}
14006 ins_pipe( fpu_regF_regF );
14007 %}
14009 // --------------------------------- DIV --------------------------------------
14010 // MIPS do not have div.ps
14012 // --------------------------------- MADD --------------------------------------
14013 // Floats vector madd
14014 //instruct vmadd2F(vecD dst, vecD src1, vecD src2, vecD src3) %{
14015 // predicate(n->as_Vector()->length() == 2);
14016 // match(Set dst (AddVF (MulVF src1 src2) src3));
14017 // ins_cost(50);
14018 // format %{ "madd.ps $dst, $src3, $src1, $src2\t! madd packed2F" %}
14019 // ins_encode %{
14020 // __ madd_ps($dst$$FloatRegister, $src3$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
14021 // %}
14022 // ins_pipe( fpu_regF_regF );
14023 //%}
14026 //----------PEEPHOLE RULES-----------------------------------------------------
14027 // These must follow all instruction definitions as they use the names
14028 // defined in the instructions definitions.
14029 //
14030 // peepmatch ( root_instr_name [preceeding_instruction]* );
14031 //
14032 // peepconstraint %{
14033 // (instruction_number.operand_name relational_op instruction_number.operand_name
14034 // [, ...] );
14035 // // instruction numbers are zero-based using left to right order in peepmatch
14036 //
14037 // peepreplace ( instr_name ( [instruction_number.operand_name]* ) );
14038 // // provide an instruction_number.operand_name for each operand that appears
14039 // // in the replacement instruction's match rule
14040 //
14041 // ---------VM FLAGS---------------------------------------------------------
14042 //
14043 // All peephole optimizations can be turned off using -XX:-OptoPeephole
14044 //
14045 // Each peephole rule is given an identifying number starting with zero and
14046 // increasing by one in the order seen by the parser. An individual peephole
14047 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
14048 // on the command-line.
14049 //
14050 // ---------CURRENT LIMITATIONS----------------------------------------------
14051 //
14052 // Only match adjacent instructions in same basic block
14053 // Only equality constraints
14054 // Only constraints between operands, not (0.dest_reg == EAX_enc)
14055 // Only one replacement instruction
14056 //
14057 // ---------EXAMPLE----------------------------------------------------------
14058 //
14059 // // pertinent parts of existing instructions in architecture description
14060 // instruct movI(eRegI dst, eRegI src) %{
14061 // match(Set dst (CopyI src));
14062 // %}
14063 //
14064 // instruct incI_eReg(eRegI dst, immI1 src, eFlagsReg cr) %{
14065 // match(Set dst (AddI dst src));
14066 // effect(KILL cr);
14067 // %}
14068 //
14069 // // Change (inc mov) to lea
14070 // peephole %{
14071 // // increment preceeded by register-register move
14072 // peepmatch ( incI_eReg movI );
14073 // // require that the destination register of the increment
14074 // // match the destination register of the move
14075 // peepconstraint ( 0.dst == 1.dst );
14076 // // construct a replacement instruction that sets
14077 // // the destination to ( move's source register + one )
14078 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
14079 // %}
14080 //
14081 // Implementation no longer uses movX instructions since
14082 // machine-independent system no longer uses CopyX nodes.
14083 //
14084 // peephole %{
14085 // peepmatch ( incI_eReg movI );
14086 // peepconstraint ( 0.dst == 1.dst );
14087 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
14088 // %}
14089 //
14090 // peephole %{
14091 // peepmatch ( decI_eReg movI );
14092 // peepconstraint ( 0.dst == 1.dst );
14093 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
14094 // %}
14095 //
14096 // peephole %{
14097 // peepmatch ( addI_eReg_imm movI );
14098 // peepconstraint ( 0.dst == 1.dst );
14099 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
14100 // %}
14101 //
14102 // peephole %{
14103 // peepmatch ( addP_eReg_imm movP );
14104 // peepconstraint ( 0.dst == 1.dst );
14105 // peepreplace ( leaP_eReg_immI( 0.dst 1.src 0.src ) );
14106 // %}
14108 // // Change load of spilled value to only a spill
14109 // instruct storeI(memory mem, eRegI src) %{
14110 // match(Set mem (StoreI mem src));
14111 // %}
14112 //
14113 // instruct loadI(eRegI dst, memory mem) %{
14114 // match(Set dst (LoadI mem));
14115 // %}
14116 //
14117 //peephole %{
14118 // peepmatch ( loadI storeI );
14119 // peepconstraint ( 1.src == 0.dst, 1.mem == 0.mem );
14120 // peepreplace ( storeI( 1.mem 1.mem 1.src ) );
14121 //%}
14123 //----------SMARTSPILL RULES---------------------------------------------------
14124 // These must follow all instruction definitions as they use the names
14125 // defined in the instructions definitions.