Tue, 16 May 2017 11:53:48 -0400
#5401 Fix compiler/7116216/StackOverflow.java for MIPS.
1 //
2 // Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
3 // Copyright (c) 2015, 2016, Loongson Technology. All rights reserved.
4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 //
6 // This code is free software; you can redistribute it and/or modify it
7 // under the terms of the GNU General Public License version 2 only, as
8 // published by the Free Software Foundation.
9 //
10 // This code is distributed in the hope that it will be useful, but WITHOUT
11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 // version 2 for more details (a copy is included in the LICENSE file that
14 // accompanied this code).
15 //
16 // You should have received a copy of the GNU General Public License version
17 // 2 along with this work; if not, write to the Free Software Foundation,
18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 //
20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 // or visit www.oracle.com if you need additional information or have any
22 // questions.
23 //
24 //
26 // GodSon3 Architecture Description File
28 //----------REGISTER DEFINITION BLOCK------------------------------------------
29 // This information is used by the matcher and the register allocator to
30 // describe individual registers and classes of registers within the target
31 // archtecture.
33 // format:
34 // reg_def name (call convention, c-call convention, ideal type, encoding);
35 // call convention :
36 // NS = No-Save
37 // SOC = Save-On-Call
38 // SOE = Save-On-Entry
39 // AS = Always-Save
40 // ideal type :
41 // see opto/opcodes.hpp for more info
42 // reg_class name (reg, ...);
43 // alloc_class name (reg, ...);
44 register %{
46 // General Registers
47 // Integer Registers
48 reg_def R0 ( NS, NS, Op_RegI, 0, VMRegImpl::Bad());
49 reg_def AT ( NS, NS, Op_RegI, 1, AT->as_VMReg());
50 reg_def AT_H ( NS, NS, Op_RegI, 1, AT->as_VMReg()->next());
51 reg_def V0 (SOC, SOC, Op_RegI, 2, V0->as_VMReg());
52 reg_def V0_H (SOC, SOC, Op_RegI, 2, V0->as_VMReg()->next());
53 reg_def V1 (SOC, SOC, Op_RegI, 3, V1->as_VMReg());
54 reg_def V1_H (SOC, SOC, Op_RegI, 3, V1->as_VMReg()->next());
55 reg_def A0 (SOC, SOC, Op_RegI, 4, A0->as_VMReg());
56 reg_def A0_H (SOC, SOC, Op_RegI, 4, A0->as_VMReg()->next());
57 reg_def A1 (SOC, SOC, Op_RegI, 5, A1->as_VMReg());
58 reg_def A1_H (SOC, SOC, Op_RegI, 5, A1->as_VMReg()->next());
59 reg_def A2 (SOC, SOC, Op_RegI, 6, A2->as_VMReg());
60 reg_def A2_H (SOC, SOC, Op_RegI, 6, A2->as_VMReg()->next());
61 reg_def A3 (SOC, SOC, Op_RegI, 7, A3->as_VMReg());
62 reg_def A3_H (SOC, SOC, Op_RegI, 7, A3->as_VMReg()->next());
63 reg_def A4 (SOC, SOC, Op_RegI, 8, A4->as_VMReg());
64 reg_def A4_H (SOC, SOC, Op_RegI, 8, A4->as_VMReg()->next());
65 reg_def A5 (SOC, SOC, Op_RegI, 9, A5->as_VMReg());
66 reg_def A5_H (SOC, SOC, Op_RegI, 9, A5->as_VMReg()->next());
67 reg_def A6 (SOC, SOC, Op_RegI, 10, A6->as_VMReg());
68 reg_def A6_H (SOC, SOC, Op_RegI, 10, A6->as_VMReg()->next());
69 reg_def A7 (SOC, SOC, Op_RegI, 11, A7->as_VMReg());
70 reg_def A7_H (SOC, SOC, Op_RegI, 11, A7->as_VMReg()->next());
71 reg_def T0 (SOC, SOC, Op_RegI, 12, T0->as_VMReg());
72 reg_def T0_H (SOC, SOC, Op_RegI, 12, T0->as_VMReg()->next());
73 reg_def T1 (SOC, SOC, Op_RegI, 13, T1->as_VMReg());
74 reg_def T1_H (SOC, SOC, Op_RegI, 13, T1->as_VMReg()->next());
75 reg_def T2 (SOC, SOC, Op_RegI, 14, T2->as_VMReg());
76 reg_def T2_H (SOC, SOC, Op_RegI, 14, T2->as_VMReg()->next());
77 reg_def T3 (SOC, SOC, Op_RegI, 15, T3->as_VMReg());
78 reg_def T3_H (SOC, SOC, Op_RegI, 15, T3->as_VMReg()->next());
79 reg_def S0 (SOC, SOE, Op_RegI, 16, S0->as_VMReg());
80 reg_def S0_H (SOC, SOE, Op_RegI, 16, S0->as_VMReg()->next());
81 reg_def S1 (SOC, SOE, Op_RegI, 17, S1->as_VMReg());
82 reg_def S1_H (SOC, SOE, Op_RegI, 17, S1->as_VMReg()->next());
83 reg_def S2 (SOC, SOE, Op_RegI, 18, S2->as_VMReg());
84 reg_def S2_H (SOC, SOE, Op_RegI, 18, S2->as_VMReg()->next());
85 reg_def S3 (SOC, SOE, Op_RegI, 19, S3->as_VMReg());
86 reg_def S3_H (SOC, SOE, Op_RegI, 19, S3->as_VMReg()->next());
87 reg_def S4 (SOC, SOE, Op_RegI, 20, S4->as_VMReg());
88 reg_def S4_H (SOC, SOE, Op_RegI, 20, S4->as_VMReg()->next());
89 reg_def S5 (SOC, SOE, Op_RegI, 21, S5->as_VMReg());
90 reg_def S5_H (SOC, SOE, Op_RegI, 21, S5->as_VMReg()->next());
91 reg_def S6 (SOC, SOE, Op_RegI, 22, S6->as_VMReg());
92 reg_def S6_H (SOC, SOE, Op_RegI, 22, S6->as_VMReg()->next());
93 reg_def S7 (SOC, SOE, Op_RegI, 23, S7->as_VMReg());
94 reg_def S7_H (SOC, SOE, Op_RegI, 23, S7->as_VMReg()->next());
95 reg_def T8 (SOC, SOC, Op_RegI, 24, T8->as_VMReg());
96 reg_def T8_H (SOC, SOC, Op_RegI, 24, T8->as_VMReg()->next());
97 reg_def T9 (SOC, SOC, Op_RegI, 25, T9->as_VMReg());
98 reg_def T9_H (SOC, SOC, Op_RegI, 25, T9->as_VMReg()->next());
100 // Special Registers
101 reg_def K0 ( NS, NS, Op_RegI, 26, K0->as_VMReg());
102 reg_def K1 ( NS, NS, Op_RegI, 27, K1->as_VMReg());
103 reg_def GP ( NS, NS, Op_RegI, 28, GP->as_VMReg());
104 reg_def GP_H ( NS, NS, Op_RegI, 28, GP->as_VMReg()->next());
105 reg_def SP ( NS, NS, Op_RegI, 29, SP->as_VMReg());
106 reg_def SP_H ( NS, NS, Op_RegI, 29, SP->as_VMReg()->next());
107 reg_def FP ( NS, NS, Op_RegI, 30, FP->as_VMReg());
108 reg_def FP_H ( NS, NS, Op_RegI, 30, FP->as_VMReg()->next());
109 reg_def RA ( NS, NS, Op_RegI, 31, RA->as_VMReg());
110 reg_def RA_H ( NS, NS, Op_RegI, 31, RA->as_VMReg()->next());
112 // Floating registers.
113 reg_def F0 ( SOC, SOC, Op_RegF, 0, F0->as_VMReg());
114 reg_def F0_H ( SOC, SOC, Op_RegF, 0, F0->as_VMReg()->next());
115 reg_def F1 ( SOC, SOC, Op_RegF, 1, F1->as_VMReg());
116 reg_def F1_H ( SOC, SOC, Op_RegF, 1, F1->as_VMReg()->next());
117 reg_def F2 ( SOC, SOC, Op_RegF, 2, F2->as_VMReg());
118 reg_def F2_H ( SOC, SOC, Op_RegF, 2, F2->as_VMReg()->next());
119 reg_def F3 ( SOC, SOC, Op_RegF, 3, F3->as_VMReg());
120 reg_def F3_H ( SOC, SOC, Op_RegF, 3, F3->as_VMReg()->next());
121 reg_def F4 ( SOC, SOC, Op_RegF, 4, F4->as_VMReg());
122 reg_def F4_H ( SOC, SOC, Op_RegF, 4, F4->as_VMReg()->next());
123 reg_def F5 ( SOC, SOC, Op_RegF, 5, F5->as_VMReg());
124 reg_def F5_H ( SOC, SOC, Op_RegF, 5, F5->as_VMReg()->next());
125 reg_def F6 ( SOC, SOC, Op_RegF, 6, F6->as_VMReg());
126 reg_def F6_H ( SOC, SOC, Op_RegF, 6, F6->as_VMReg()->next());
127 reg_def F7 ( SOC, SOC, Op_RegF, 7, F7->as_VMReg());
128 reg_def F7_H ( SOC, SOC, Op_RegF, 7, F7->as_VMReg()->next());
129 reg_def F8 ( SOC, SOC, Op_RegF, 8, F8->as_VMReg());
130 reg_def F8_H ( SOC, SOC, Op_RegF, 8, F8->as_VMReg()->next());
131 reg_def F9 ( SOC, SOC, Op_RegF, 9, F9->as_VMReg());
132 reg_def F9_H ( SOC, SOC, Op_RegF, 9, F9->as_VMReg()->next());
133 reg_def F10 ( SOC, SOC, Op_RegF, 10, F10->as_VMReg());
134 reg_def F10_H ( SOC, SOC, Op_RegF, 10, F10->as_VMReg()->next());
135 reg_def F11 ( SOC, SOC, Op_RegF, 11, F11->as_VMReg());
136 reg_def F11_H ( SOC, SOC, Op_RegF, 11, F11->as_VMReg()->next());
137 reg_def F12 ( SOC, SOC, Op_RegF, 12, F12->as_VMReg());
138 reg_def F12_H ( SOC, SOC, Op_RegF, 12, F12->as_VMReg()->next());
139 reg_def F13 ( SOC, SOC, Op_RegF, 13, F13->as_VMReg());
140 reg_def F13_H ( SOC, SOC, Op_RegF, 13, F13->as_VMReg()->next());
141 reg_def F14 ( SOC, SOC, Op_RegF, 14, F14->as_VMReg());
142 reg_def F14_H ( SOC, SOC, Op_RegF, 14, F14->as_VMReg()->next());
143 reg_def F15 ( SOC, SOC, Op_RegF, 15, F15->as_VMReg());
144 reg_def F15_H ( SOC, SOC, Op_RegF, 15, F15->as_VMReg()->next());
145 reg_def F16 ( SOC, SOC, Op_RegF, 16, F16->as_VMReg());
146 reg_def F16_H ( SOC, SOC, Op_RegF, 16, F16->as_VMReg()->next());
147 reg_def F17 ( SOC, SOC, Op_RegF, 17, F17->as_VMReg());
148 reg_def F17_H ( SOC, SOC, Op_RegF, 17, F17->as_VMReg()->next());
149 reg_def F18 ( SOC, SOC, Op_RegF, 18, F18->as_VMReg());
150 reg_def F18_H ( SOC, SOC, Op_RegF, 18, F18->as_VMReg()->next());
151 reg_def F19 ( SOC, SOC, Op_RegF, 19, F19->as_VMReg());
152 reg_def F19_H ( SOC, SOC, Op_RegF, 19, F19->as_VMReg()->next());
153 reg_def F20 ( SOC, SOC, Op_RegF, 20, F20->as_VMReg());
154 reg_def F20_H ( SOC, SOC, Op_RegF, 20, F20->as_VMReg()->next());
155 reg_def F21 ( SOC, SOC, Op_RegF, 21, F21->as_VMReg());
156 reg_def F21_H ( SOC, SOC, Op_RegF, 21, F21->as_VMReg()->next());
157 reg_def F22 ( SOC, SOC, Op_RegF, 22, F22->as_VMReg());
158 reg_def F22_H ( SOC, SOC, Op_RegF, 22, F22->as_VMReg()->next());
159 reg_def F23 ( SOC, SOC, Op_RegF, 23, F23->as_VMReg());
160 reg_def F23_H ( SOC, SOC, Op_RegF, 23, F23->as_VMReg()->next());
161 reg_def F24 ( SOC, SOC, Op_RegF, 24, F24->as_VMReg());
162 reg_def F24_H ( SOC, SOC, Op_RegF, 24, F24->as_VMReg()->next());
163 reg_def F25 ( SOC, SOC, Op_RegF, 25, F25->as_VMReg());
164 reg_def F25_H ( SOC, SOC, Op_RegF, 25, F25->as_VMReg()->next());
165 reg_def F26 ( SOC, SOC, Op_RegF, 26, F26->as_VMReg());
166 reg_def F26_H ( SOC, SOC, Op_RegF, 26, F26->as_VMReg()->next());
167 reg_def F27 ( SOC, SOC, Op_RegF, 27, F27->as_VMReg());
168 reg_def F27_H ( SOC, SOC, Op_RegF, 27, F27->as_VMReg()->next());
169 reg_def F28 ( SOC, SOC, Op_RegF, 28, F28->as_VMReg());
170 reg_def F28_H ( SOC, SOC, Op_RegF, 28, F28->as_VMReg()->next());
171 reg_def F29 ( SOC, SOC, Op_RegF, 29, F29->as_VMReg());
172 reg_def F29_H ( SOC, SOC, Op_RegF, 29, F29->as_VMReg()->next());
173 reg_def F30 ( SOC, SOC, Op_RegF, 30, F30->as_VMReg());
174 reg_def F30_H ( SOC, SOC, Op_RegF, 30, F30->as_VMReg()->next());
175 reg_def F31 ( SOC, SOC, Op_RegF, 31, F31->as_VMReg());
176 reg_def F31_H ( SOC, SOC, Op_RegF, 31, F31->as_VMReg()->next());
179 // ----------------------------
180 // Special Registers
181 // Condition Codes Flag Registers
182 reg_def MIPS_FLAG (SOC, SOC, Op_RegFlags, 1, as_Register(1)->as_VMReg());
183 //S6 is used for get_thread(S6)
184 //S5 is uesd for heapbase of compressed oop
185 alloc_class chunk0(
186 S7, S7_H,
187 S0, S0_H,
188 S1, S1_H,
189 S2, S2_H,
190 S4, S4_H,
191 S5, S5_H,
192 S6, S6_H,
193 S3, S3_H,
194 T2, T2_H,
195 T3, T3_H,
196 T8, T8_H,
197 T9, T9_H,
198 T1, T1_H, // inline_cache_reg
199 V1, V1_H,
200 A7, A7_H,
201 A6, A6_H,
202 A5, A5_H,
203 A4, A4_H,
204 V0, V0_H,
205 A3, A3_H,
206 A2, A2_H,
207 A1, A1_H,
208 A0, A0_H,
209 T0, T0_H,
210 GP, GP_H
211 RA, RA_H,
212 SP, SP_H, // stack_pointer
213 FP, FP_H // frame_pointer
214 );
216 alloc_class chunk1( F0, F0_H,
217 F1, F1_H,
218 F2, F2_H,
219 F3, F3_H,
220 F4, F4_H,
221 F5, F5_H,
222 F6, F6_H,
223 F7, F7_H,
224 F8, F8_H,
225 F9, F9_H,
226 F10, F10_H,
227 F11, F11_H,
228 F20, F20_H,
229 F21, F21_H,
230 F22, F22_H,
231 F23, F23_H,
232 F24, F24_H,
233 F25, F25_H,
234 F26, F26_H,
235 F27, F27_H,
236 F28, F28_H,
237 F19, F19_H,
238 F18, F18_H,
239 F17, F17_H,
240 F16, F16_H,
241 F15, F15_H,
242 F14, F14_H,
243 F13, F13_H,
244 F12, F12_H,
245 F29, F29_H,
246 F30, F30_H,
247 F31, F31_H);
249 alloc_class chunk2(MIPS_FLAG);
251 reg_class s_reg( S0, S1, S2, S3, S4, S5, S6, S7 );
252 reg_class s0_reg( S0 );
253 reg_class s1_reg( S1 );
254 reg_class s2_reg( S2 );
255 reg_class s3_reg( S3 );
256 reg_class s4_reg( S4 );
257 reg_class s5_reg( S5 );
258 reg_class s6_reg( S6 );
259 reg_class s7_reg( S7 );
261 reg_class t_reg( T0, T1, T2, T3, T8, T9 );
262 reg_class t0_reg( T0 );
263 reg_class t1_reg( T1 );
264 reg_class t2_reg( T2 );
265 reg_class t3_reg( T3 );
266 reg_class t8_reg( T8 );
267 reg_class t9_reg( T9 );
269 reg_class a_reg( A0, A1, A2, A3, A4, A5, A6, A7 );
270 reg_class a0_reg( A0 );
271 reg_class a1_reg( A1 );
272 reg_class a2_reg( A2 );
273 reg_class a3_reg( A3 );
274 reg_class a4_reg( A4 );
275 reg_class a5_reg( A5 );
276 reg_class a6_reg( A6 );
277 reg_class a7_reg( A7 );
279 reg_class v0_reg( V0 );
280 reg_class v1_reg( V1 );
282 reg_class sp_reg( SP, SP_H );
283 reg_class fp_reg( FP, FP_H );
285 reg_class mips_flags(MIPS_FLAG);
287 reg_class v0_long_reg( V0, V0_H );
288 reg_class v1_long_reg( V1, V1_H );
289 reg_class a0_long_reg( A0, A0_H );
290 reg_class a1_long_reg( A1, A1_H );
291 reg_class a2_long_reg( A2, A2_H );
292 reg_class a3_long_reg( A3, A3_H );
293 reg_class a4_long_reg( A4, A4_H );
294 reg_class a5_long_reg( A5, A5_H );
295 reg_class a6_long_reg( A6, A6_H );
296 reg_class a7_long_reg( A7, A7_H );
297 reg_class t0_long_reg( T0, T0_H );
298 reg_class t1_long_reg( T1, T1_H );
299 reg_class t2_long_reg( T2, T2_H );
300 reg_class t3_long_reg( T3, T3_H );
301 reg_class t8_long_reg( T8, T8_H );
302 reg_class t9_long_reg( T9, T9_H );
303 reg_class s0_long_reg( S0, S0_H );
304 reg_class s1_long_reg( S1, S1_H );
305 reg_class s2_long_reg( S2, S2_H );
306 reg_class s3_long_reg( S3, S3_H );
307 reg_class s4_long_reg( S4, S4_H );
308 reg_class s5_long_reg( S5, S5_H );
309 reg_class s6_long_reg( S6, S6_H );
310 reg_class s7_long_reg( S7, S7_H );
312 reg_class int_reg( S7, S0, S1, S2, S4, S3, T8, T2, T3, T1, V1, A7, A6, A5, A4, V0, A3, A2, A1, A0, T0 );
314 reg_class no_Ax_int_reg( S7, S0, S1, S2, S4, S3, T8, T2, T3, T1, V1, V0, T0 );
316 reg_class p_reg(
317 S7, S7_H,
318 S0, S0_H,
319 S1, S1_H,
320 S2, S2_H,
321 S4, S4_H,
322 S3, S3_H,
323 T8, T8_H,
324 T2, T2_H,
325 T3, T3_H,
326 T1, T1_H,
327 A7, A7_H,
328 A6, A6_H,
329 A5, A5_H,
330 A4, A4_H,
331 A3, A3_H,
332 A2, A2_H,
333 A1, A1_H,
334 A0, A0_H,
335 T0, T0_H
336 );
338 reg_class no_T8_p_reg(
339 S7, S7_H,
340 S0, S0_H,
341 S1, S1_H,
342 S2, S2_H,
343 S4, S4_H,
344 S3, S3_H,
345 T2, T2_H,
346 T3, T3_H,
347 T1, T1_H,
348 A7, A7_H,
349 A6, A6_H,
350 A5, A5_H,
351 A4, A4_H,
352 A3, A3_H,
353 A2, A2_H,
354 A1, A1_H,
355 A0, A0_H,
356 T0, T0_H
357 );
359 reg_class long_reg(
360 S7, S7_H,
361 S0, S0_H,
362 S1, S1_H,
363 S2, S2_H,
364 S4, S4_H,
365 S3, S3_H,
366 T8, T8_H,
367 T2, T2_H,
368 T3, T3_H,
369 T1, T1_H,
370 A7, A7_H,
371 A6, A6_H,
372 A5, A5_H,
373 A4, A4_H,
374 A3, A3_H,
375 A2, A2_H,
376 A1, A1_H,
377 A0, A0_H,
378 T0, T0_H
379 );
382 // Floating point registers.
383 // 2012/8/23 Fu: F30/F31 are used as temporary registers in D2I
384 // 2016/12/1 aoqi: F31 are not used as temporary registers in D2I
385 reg_class flt_reg( F0, F1, F2, F3, F4, F5, F6, F7, F8, F9, F10, F11, F12, F13, F14, F15, F16, F17 F18, F19, F20, F21, F22, F23, F24, F25, F26, F27, F28, F29, F31);
386 reg_class dbl_reg( F0, F0_H,
387 F1, F1_H,
388 F2, F2_H,
389 F3, F3_H,
390 F4, F4_H,
391 F5, F5_H,
392 F6, F6_H,
393 F7, F7_H,
394 F8, F8_H,
395 F9, F9_H,
396 F10, F10_H,
397 F11, F11_H,
398 F12, F12_H,
399 F13, F13_H,
400 F14, F14_H,
401 F15, F15_H,
402 F16, F16_H,
403 F17, F17_H,
404 F18, F18_H,
405 F19, F19_H,
406 F20, F20_H,
407 F21, F21_H,
408 F22, F22_H,
409 F23, F23_H,
410 F24, F24_H,
411 F25, F25_H,
412 F26, F26_H,
413 F27, F27_H,
414 F28, F28_H,
415 F29, F29_H,
416 F31, F31_H);
418 reg_class flt_arg0( F12 );
419 reg_class dbl_arg0( F12, F12_H );
420 reg_class dbl_arg1( F14, F14_H );
422 %}
424 //----------DEFINITION BLOCK---------------------------------------------------
425 // Define name --> value mappings to inform the ADLC of an integer valued name
426 // Current support includes integer values in the range [0, 0x7FFFFFFF]
427 // Format:
428 // int_def <name> ( <int_value>, <expression>);
429 // Generated Code in ad_<arch>.hpp
430 // #define <name> (<expression>)
431 // // value == <int_value>
432 // Generated code in ad_<arch>.cpp adlc_verification()
433 // assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
434 //
435 definitions %{
436 int_def DEFAULT_COST ( 100, 100);
437 int_def HUGE_COST (1000000, 1000000);
439 // Memory refs are twice as expensive as run-of-the-mill.
440 int_def MEMORY_REF_COST ( 200, DEFAULT_COST * 2);
442 // Branches are even more expensive.
443 int_def BRANCH_COST ( 300, DEFAULT_COST * 3);
444 // we use jr instruction to construct call, so more expensive
445 // by yjl 2/28/2006
446 int_def CALL_COST ( 500, DEFAULT_COST * 5);
447 /*
448 int_def EQUAL ( 1, 1 );
449 int_def NOT_EQUAL ( 2, 2 );
450 int_def GREATER ( 3, 3 );
451 int_def GREATER_EQUAL ( 4, 4 );
452 int_def LESS ( 5, 5 );
453 int_def LESS_EQUAL ( 6, 6 );
454 */
455 %}
459 //----------SOURCE BLOCK-------------------------------------------------------
460 // This is a block of C++ code which provides values, functions, and
461 // definitions necessary in the rest of the architecture description
463 source_hpp %{
464 // Header information of the source block.
465 // Method declarations/definitions which are used outside
466 // the ad-scope can conveniently be defined here.
467 //
468 // To keep related declarations/definitions/uses close together,
469 // we switch between source %{ }% and source_hpp %{ }% freely as needed.
471 class CallStubImpl {
473 //--------------------------------------------------------------
474 //---< Used for optimization in Compile::shorten_branches >---
475 //--------------------------------------------------------------
477 public:
478 // Size of call trampoline stub.
479 static uint size_call_trampoline() {
480 return 0; // no call trampolines on this platform
481 }
483 // number of relocations needed by a call trampoline stub
484 static uint reloc_call_trampoline() {
485 return 0; // no call trampolines on this platform
486 }
487 };
489 class HandlerImpl {
491 public:
493 static int emit_exception_handler(CodeBuffer &cbuf);
494 static int emit_deopt_handler(CodeBuffer& cbuf);
496 static uint size_exception_handler() {
497 // NativeCall instruction size is the same as NativeJump.
498 // exception handler starts out as jump and can be patched to
499 // a call be deoptimization. (4932387)
500 // Note that this value is also credited (in output.cpp) to
501 // the size of the code section.
502 // return NativeJump::instruction_size;
503 int size = NativeCall::instruction_size;
504 return round_to(size, 16);
505 }
507 #ifdef _LP64
508 static uint size_deopt_handler() {
509 int size = NativeCall::instruction_size;
510 return round_to(size, 16);
511 }
512 #else
513 static uint size_deopt_handler() {
514 // NativeCall instruction size is the same as NativeJump.
515 // exception handler starts out as jump and can be patched to
516 // a call be deoptimization. (4932387)
517 // Note that this value is also credited (in output.cpp) to
518 // the size of the code section.
519 return 5 + NativeJump::instruction_size; // pushl(); jmp;
520 }
521 #endif
522 };
524 %} // end source_hpp
526 source %{
528 #define NO_INDEX 0
529 #define RELOC_IMM64 Assembler::imm_operand
530 #define RELOC_DISP32 Assembler::disp32_operand
533 #define __ _masm.
536 // Emit exception handler code.
537 // Stuff framesize into a register and call a VM stub routine.
538 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf) {
539 // Note that the code buffer's insts_mark is always relative to insts.
540 // That's why we must use the macroassembler to generate a handler.
541 MacroAssembler _masm(&cbuf);
542 address base =
543 __ start_a_stub(size_exception_handler());
544 if (base == NULL) return 0; // CodeBuffer::expand failed
545 int offset = __ offset();
547 __ block_comment("; emit_exception_handler");
549 cbuf.set_insts_mark();
550 __ relocate(relocInfo::runtime_call_type);
551 __ patchable_jump((address)OptoRuntime::exception_blob()->entry_point());
552 __ align(16);
553 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
554 __ end_a_stub();
555 return offset;
556 }
558 // Emit deopt handler code.
559 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) {
560 // Note that the code buffer's insts_mark is always relative to insts.
561 // That's why we must use the macroassembler to generate a handler.
562 MacroAssembler _masm(&cbuf);
563 address base =
564 __ start_a_stub(size_deopt_handler());
566 // FIXME
567 if (base == NULL) return 0; // CodeBuffer::expand failed
568 int offset = __ offset();
570 __ block_comment("; emit_deopt_handler");
572 cbuf.set_insts_mark();
573 __ relocate(relocInfo::runtime_call_type);
574 __ patchable_call(SharedRuntime::deopt_blob()->unpack());
575 __ align(16);
576 assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
577 __ end_a_stub();
578 return offset;
579 }
582 const bool Matcher::match_rule_supported(int opcode) {
583 if (!has_match_rule(opcode))
584 return false;
586 switch (opcode) {
587 //Op_CountLeadingZerosI Op_CountLeadingZerosL can be deleted, all MIPS CPUs support clz & dclz.
588 case Op_CountLeadingZerosI:
589 case Op_CountLeadingZerosL:
590 if (!UseCountLeadingZerosInstruction)
591 return false;
592 break;
593 case Op_CountTrailingZerosI:
594 case Op_CountTrailingZerosL:
595 if (!UseCountTrailingZerosInstruction)
596 return false;
597 break;
598 }
600 return true; // Per default match rules are supported.
601 }
603 //FIXME
604 // emit call stub, compiled java to interpreter
605 void emit_java_to_interp(CodeBuffer &cbuf ) {
606 // Stub is fixed up when the corresponding call is converted from calling
607 // compiled code to calling interpreted code.
608 // mov rbx,0
609 // jmp -1
611 address mark = cbuf.insts_mark(); // get mark within main instrs section
613 // Note that the code buffer's insts_mark is always relative to insts.
614 // That's why we must use the macroassembler to generate a stub.
615 MacroAssembler _masm(&cbuf);
617 address base =
618 __ start_a_stub(Compile::MAX_stubs_size);
619 if (base == NULL) return; // CodeBuffer::expand failed
620 // static stub relocation stores the instruction address of the call
622 __ relocate(static_stub_Relocation::spec(mark), 0);
624 // static stub relocation also tags the methodOop in the code-stream.
625 __ patchable_set48(S3, (long)0);
626 // This is recognized as unresolved by relocs/nativeInst/ic code
628 __ relocate(relocInfo::runtime_call_type);
630 cbuf.set_insts_mark();
631 address call_pc = (address)-1;
632 __ patchable_jump(call_pc);
633 __ align(16);
634 __ end_a_stub();
635 // Update current stubs pointer and restore code_end.
636 }
638 // size of call stub, compiled java to interpretor
639 uint size_java_to_interp() {
640 int size = 4 * 4 + NativeCall::instruction_size; // sizeof(li48) + NativeCall::instruction_size
641 return round_to(size, 16);
642 }
644 // relocation entries for call stub, compiled java to interpreter
645 uint reloc_java_to_interp() {
646 return 16; // in emit_java_to_interp + in Java_Static_Call
647 }
649 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
650 if( Assembler::is_simm16(offset) ) return true;
651 else {
652 assert(false, "Not implemented yet !" );
653 Unimplemented();
654 }
655 }
658 // No additional cost for CMOVL.
659 const int Matcher::long_cmove_cost() { return 0; }
661 // No CMOVF/CMOVD with SSE2
662 const int Matcher::float_cmove_cost() { return ConditionalMoveLimit; }
664 // Does the CPU require late expand (see block.cpp for description of late expand)?
665 const bool Matcher::require_postalloc_expand = false;
667 // Should the Matcher clone shifts on addressing modes, expecting them
668 // to be subsumed into complex addressing expressions or compute them
669 // into registers? True for Intel but false for most RISCs
670 const bool Matcher::clone_shift_expressions = false;
672 // Do we need to mask the count passed to shift instructions or does
673 // the cpu only look at the lower 5/6 bits anyway?
674 const bool Matcher::need_masked_shift_count = false;
676 bool Matcher::narrow_oop_use_complex_address() {
677 NOT_LP64(ShouldNotCallThis());
678 assert(UseCompressedOops, "only for compressed oops code");
679 return false;
680 }
682 bool Matcher::narrow_klass_use_complex_address() {
683 NOT_LP64(ShouldNotCallThis());
684 assert(UseCompressedClassPointers, "only for compressed klass code");
685 return false;
686 }
688 // This is UltraSparc specific, true just means we have fast l2f conversion
689 const bool Matcher::convL2FSupported(void) {
690 return true;
691 }
693 // Max vector size in bytes. 0 if not supported.
694 const int Matcher::vector_width_in_bytes(BasicType bt) {
695 assert(MaxVectorSize == 8, "");
696 return 8;
697 }
699 // Vector ideal reg
700 const int Matcher::vector_ideal_reg(int size) {
701 assert(MaxVectorSize == 8, "");
702 switch(size) {
703 case 8: return Op_VecD;
704 }
705 ShouldNotReachHere();
706 return 0;
707 }
709 // Only lowest bits of xmm reg are used for vector shift count.
710 const int Matcher::vector_shift_count_ideal_reg(int size) {
711 fatal("vector shift is not supported");
712 return Node::NotAMachineReg;
713 }
715 // Limits on vector size (number of elements) loaded into vector.
716 const int Matcher::max_vector_size(const BasicType bt) {
717 assert(is_java_primitive(bt), "only primitive type vectors");
718 return vector_width_in_bytes(bt)/type2aelembytes(bt);
719 }
721 const int Matcher::min_vector_size(const BasicType bt) {
722 return max_vector_size(bt); // Same as max.
723 }
725 // MIPS supports misaligned vectors store/load? FIXME
726 const bool Matcher::misaligned_vectors_ok() {
727 return false;
728 //return !AlignVector; // can be changed by flag
729 }
731 // Register for DIVI projection of divmodI
732 RegMask Matcher::divI_proj_mask() {
733 ShouldNotReachHere();
734 return RegMask();
735 }
737 // Register for MODI projection of divmodI
738 RegMask Matcher::modI_proj_mask() {
739 ShouldNotReachHere();
740 return RegMask();
741 }
743 // Register for DIVL projection of divmodL
744 RegMask Matcher::divL_proj_mask() {
745 ShouldNotReachHere();
746 return RegMask();
747 }
749 int Matcher::regnum_to_fpu_offset(int regnum) {
750 return regnum - 32; // The FP registers are in the second chunk
751 }
754 const bool Matcher::isSimpleConstant64(jlong value) {
755 // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
756 return true;
757 }
760 // Return whether or not this register is ever used as an argument. This
761 // function is used on startup to build the trampoline stubs in generateOptoStub.
762 // Registers not mentioned will be killed by the VM call in the trampoline, and
763 // arguments in those registers not be available to the callee.
764 bool Matcher::can_be_java_arg( int reg ) {
765 /* Refer to: [sharedRuntime_mips_64.cpp] SharedRuntime::java_calling_convention() */
766 if ( reg == T0_num || reg == T0_H_num
767 || reg == A0_num || reg == A0_H_num
768 || reg == A1_num || reg == A1_H_num
769 || reg == A2_num || reg == A2_H_num
770 || reg == A3_num || reg == A3_H_num
771 || reg == A4_num || reg == A4_H_num
772 || reg == A5_num || reg == A5_H_num
773 || reg == A6_num || reg == A6_H_num
774 || reg == A7_num || reg == A7_H_num )
775 return true;
777 if ( reg == F12_num || reg == F12_H_num
778 || reg == F13_num || reg == F13_H_num
779 || reg == F14_num || reg == F14_H_num
780 || reg == F15_num || reg == F15_H_num
781 || reg == F16_num || reg == F16_H_num
782 || reg == F17_num || reg == F17_H_num
783 || reg == F18_num || reg == F18_H_num
784 || reg == F19_num || reg == F19_H_num )
785 return true;
787 return false;
788 }
790 bool Matcher::is_spillable_arg( int reg ) {
791 return can_be_java_arg(reg);
792 }
794 bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) {
795 return false;
796 }
798 // Register for MODL projection of divmodL
799 RegMask Matcher::modL_proj_mask() {
800 ShouldNotReachHere();
801 return RegMask();
802 }
804 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
805 return FP_REG_mask();
806 }
808 // MIPS doesn't support AES intrinsics
809 const bool Matcher::pass_original_key_for_aes() {
810 return false;
811 }
813 int CallLeafNoFPDirectNode::compute_padding(int current_offset) const {
814 //lui
815 //ori
816 //dsll
817 //ori
819 //jalr
820 //nop
822 return round_to(current_offset, alignment_required()) - current_offset;
823 }
825 int CallLeafDirectNode::compute_padding(int current_offset) const {
826 //lui
827 //ori
828 //dsll
829 //ori
831 //jalr
832 //nop
834 return round_to(current_offset, alignment_required()) - current_offset;
835 }
837 int CallRuntimeDirectNode::compute_padding(int current_offset) const {
838 //lui
839 //ori
840 //dsll
841 //ori
843 //jalr
844 //nop
846 return round_to(current_offset, alignment_required()) - current_offset;
847 }
849 // If CPU can load and store mis-aligned doubles directly then no fixup is
850 // needed. Else we split the double into 2 integer pieces and move it
851 // piece-by-piece. Only happens when passing doubles into C code as the
852 // Java calling convention forces doubles to be aligned.
853 const bool Matcher::misaligned_doubles_ok = false;
854 // Do floats take an entire double register or just half?
855 //const bool Matcher::float_in_double = true;
856 bool Matcher::float_in_double() { return false; }
857 // Threshold size for cleararray.
858 const int Matcher::init_array_short_size = 8 * BytesPerLong;
859 // Do ints take an entire long register or just half?
860 const bool Matcher::int_in_long = true;
861 // Is it better to copy float constants, or load them directly from memory?
862 // Intel can load a float constant from a direct address, requiring no
863 // extra registers. Most RISCs will have to materialize an address into a
864 // register first, so they would do better to copy the constant from stack.
865 const bool Matcher::rematerialize_float_constants = false;
866 // Advertise here if the CPU requires explicit rounding operations
867 // to implement the UseStrictFP mode.
868 const bool Matcher::strict_fp_requires_explicit_rounding = false;
869 // The ecx parameter to rep stos for the ClearArray node is in dwords.
870 const bool Matcher::init_array_count_is_in_bytes = false;
873 // Indicate if the safepoint node needs the polling page as an input.
874 // Since MIPS doesn't have absolute addressing, it needs.
875 bool SafePointNode::needs_polling_address_input() {
876 return false;
877 }
879 // !!!!! Special hack to get all type of calls to specify the byte offset
880 // from the start of the call to the point where the return address
881 // will point.
882 int MachCallStaticJavaNode::ret_addr_offset() {
883 //lui
884 //ori
885 //nop
886 //nop
887 //jalr
888 //nop
889 return 24;
890 }
892 int MachCallDynamicJavaNode::ret_addr_offset() {
893 //lui IC_Klass,
894 //ori IC_Klass,
895 //dsll IC_Klass
896 //ori IC_Klass
898 //lui T9
899 //ori T9
900 //nop
901 //nop
902 //jalr T9
903 //nop
904 return 4 * 4 + 4 * 6;
905 }
907 //=============================================================================
909 // Figure out which register class each belongs in: rc_int, rc_float, rc_stack
910 enum RC { rc_bad, rc_int, rc_float, rc_stack };
911 static enum RC rc_class( OptoReg::Name reg ) {
912 if( !OptoReg::is_valid(reg) ) return rc_bad;
913 if (OptoReg::is_stack(reg)) return rc_stack;
914 VMReg r = OptoReg::as_VMReg(reg);
915 if (r->is_Register()) return rc_int;
916 assert(r->is_FloatRegister(), "must be");
917 return rc_float;
918 }
920 uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream* st ) const {
921 // Get registers to move
922 OptoReg::Name src_second = ra_->get_reg_second(in(1));
923 OptoReg::Name src_first = ra_->get_reg_first(in(1));
924 OptoReg::Name dst_second = ra_->get_reg_second(this );
925 OptoReg::Name dst_first = ra_->get_reg_first(this );
927 enum RC src_second_rc = rc_class(src_second);
928 enum RC src_first_rc = rc_class(src_first);
929 enum RC dst_second_rc = rc_class(dst_second);
930 enum RC dst_first_rc = rc_class(dst_first);
932 assert(OptoReg::is_valid(src_first) && OptoReg::is_valid(dst_first), "must move at least 1 register" );
934 // Generate spill code!
935 int size = 0;
937 if( src_first == dst_first && src_second == dst_second )
938 return 0; // Self copy, no move
940 if (src_first_rc == rc_stack) {
941 // mem ->
942 if (dst_first_rc == rc_stack) {
943 // mem -> mem
944 assert(src_second != dst_first, "overlap");
945 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
946 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
947 // 64-bit
948 int src_offset = ra_->reg2offset(src_first);
949 int dst_offset = ra_->reg2offset(dst_first);
950 if (cbuf) {
951 MacroAssembler _masm(cbuf);
952 __ ld(AT, Address(SP, src_offset));
953 __ sd(AT, Address(SP, dst_offset));
954 #ifndef PRODUCT
955 } else {
956 if(!do_size){
957 if (size != 0) st->print("\n\t");
958 st->print("ld AT, [SP + #%d]\t# 64-bit mem-mem spill 1\n\t"
959 "sd AT, [SP + #%d]",
960 src_offset, dst_offset);
961 }
962 #endif
963 }
964 size += 8;
965 } else {
966 // 32-bit
967 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
968 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
969 // No pushl/popl, so:
970 int src_offset = ra_->reg2offset(src_first);
971 int dst_offset = ra_->reg2offset(dst_first);
972 if (cbuf) {
973 MacroAssembler _masm(cbuf);
974 __ lw(AT, Address(SP, src_offset));
975 __ sw(AT, Address(SP, dst_offset));
976 #ifndef PRODUCT
977 } else {
978 if(!do_size){
979 if (size != 0) st->print("\n\t");
980 st->print("lw AT, [SP + #%d] spill 2\n\t"
981 "sw AT, [SP + #%d]\n\t",
982 src_offset, dst_offset);
983 }
984 #endif
985 }
986 size += 8;
987 }
988 return size;
989 } else if (dst_first_rc == rc_int) {
990 // mem -> gpr
991 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
992 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
993 // 64-bit
994 int offset = ra_->reg2offset(src_first);
995 if (cbuf) {
996 MacroAssembler _masm(cbuf);
997 __ ld(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
998 #ifndef PRODUCT
999 } else {
1000 if(!do_size){
1001 if (size != 0) st->print("\n\t");
1002 st->print("ld %s, [SP + #%d]\t# spill 3",
1003 Matcher::regName[dst_first],
1004 offset);
1005 }
1006 #endif
1007 }
1008 size += 4;
1009 } else {
1010 // 32-bit
1011 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1012 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1013 int offset = ra_->reg2offset(src_first);
1014 if (cbuf) {
1015 MacroAssembler _masm(cbuf);
1016 if (this->ideal_reg() == Op_RegI)
1017 __ lw(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1018 else
1019 __ lwu(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1020 #ifndef PRODUCT
1021 } else {
1022 if(!do_size){
1023 if (size != 0) st->print("\n\t");
1024 if (this->ideal_reg() == Op_RegI)
1025 st->print("lw %s, [SP + #%d]\t# spill 4",
1026 Matcher::regName[dst_first],
1027 offset);
1028 else
1029 st->print("lwu %s, [SP + #%d]\t# spill 5",
1030 Matcher::regName[dst_first],
1031 offset);
1032 }
1033 #endif
1034 }
1035 size += 4;
1036 }
1037 return size;
1038 } else if (dst_first_rc == rc_float) {
1039 // mem-> xmm
1040 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1041 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1042 // 64-bit
1043 int offset = ra_->reg2offset(src_first);
1044 if (cbuf) {
1045 MacroAssembler _masm(cbuf);
1046 __ ldc1( as_FloatRegister(Matcher::_regEncode[dst_first]), Address(SP, offset));
1047 #ifndef PRODUCT
1048 } else {
1049 if(!do_size){
1050 if (size != 0) st->print("\n\t");
1051 st->print("ldc1 %s, [SP + #%d]\t# spill 6",
1052 Matcher::regName[dst_first],
1053 offset);
1054 }
1055 #endif
1056 }
1057 size += 4;
1058 } else {
1059 // 32-bit
1060 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1061 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1062 int offset = ra_->reg2offset(src_first);
1063 if (cbuf) {
1064 MacroAssembler _masm(cbuf);
1065 __ lwc1( as_FloatRegister(Matcher::_regEncode[dst_first]), Address(SP, offset));
1066 #ifndef PRODUCT
1067 } else {
1068 if(!do_size){
1069 if (size != 0) st->print("\n\t");
1070 st->print("lwc1 %s, [SP + #%d]\t# spill 7",
1071 Matcher::regName[dst_first],
1072 offset);
1073 }
1074 #endif
1075 }
1076 size += 4;
1077 }
1078 return size;
1079 }
1080 } else if (src_first_rc == rc_int) {
1081 // gpr ->
1082 if (dst_first_rc == rc_stack) {
1083 // gpr -> mem
1084 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1085 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1086 // 64-bit
1087 int offset = ra_->reg2offset(dst_first);
1088 if (cbuf) {
1089 MacroAssembler _masm(cbuf);
1090 __ sd(as_Register(Matcher::_regEncode[src_first]), Address(SP, offset));
1091 #ifndef PRODUCT
1092 } else {
1093 if(!do_size){
1094 if (size != 0) st->print("\n\t");
1095 st->print("sd %s, [SP + #%d] # spill 8",
1096 Matcher::regName[src_first],
1097 offset);
1098 }
1099 #endif
1100 }
1101 size += 4;
1102 } else {
1103 // 32-bit
1104 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1105 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1106 int offset = ra_->reg2offset(dst_first);
1107 if (cbuf) {
1108 MacroAssembler _masm(cbuf);
1109 __ sw(as_Register(Matcher::_regEncode[src_first]), Address(SP, offset));
1110 #ifndef PRODUCT
1111 } else {
1112 if(!do_size){
1113 if (size != 0) st->print("\n\t");
1114 st->print("sw %s, [SP + #%d]\t# spill 9",
1115 Matcher::regName[src_first], offset);
1116 }
1117 #endif
1118 }
1119 size += 4;
1120 }
1121 return size;
1122 } else if (dst_first_rc == rc_int) {
1123 // gpr -> gpr
1124 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1125 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1126 // 64-bit
1127 if (cbuf) {
1128 MacroAssembler _masm(cbuf);
1129 __ move(as_Register(Matcher::_regEncode[dst_first]),
1130 as_Register(Matcher::_regEncode[src_first]));
1131 #ifndef PRODUCT
1132 } else {
1133 if(!do_size){
1134 if (size != 0) st->print("\n\t");
1135 st->print("move(64bit) %s <-- %s\t# spill 10",
1136 Matcher::regName[dst_first],
1137 Matcher::regName[src_first]);
1138 }
1139 #endif
1140 }
1141 size += 4;
1142 return size;
1143 } else {
1144 // 32-bit
1145 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1146 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1147 if (cbuf) {
1148 MacroAssembler _masm(cbuf);
1149 if (this->ideal_reg() == Op_RegI)
1150 __ move_u32(as_Register(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first]));
1151 else
1152 __ daddu(as_Register(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first]), R0);
1154 #ifndef PRODUCT
1155 } else {
1156 if(!do_size){
1157 if (size != 0) st->print("\n\t");
1158 st->print("move(32-bit) %s <-- %s\t# spill 11",
1159 Matcher::regName[dst_first],
1160 Matcher::regName[src_first]);
1161 }
1162 #endif
1163 }
1164 size += 4;
1165 return size;
1166 }
1167 } else if (dst_first_rc == rc_float) {
1168 // gpr -> xmm
1169 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1170 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1171 // 64-bit
1172 if (cbuf) {
1173 MacroAssembler _masm(cbuf);
1174 __ dmtc1(as_Register(Matcher::_regEncode[src_first]), as_FloatRegister(Matcher::_regEncode[dst_first]));
1175 #ifndef PRODUCT
1176 } else {
1177 if(!do_size){
1178 if (size != 0) st->print("\n\t");
1179 st->print("dmtc1 %s, %s\t# spill 12",
1180 Matcher::regName[dst_first],
1181 Matcher::regName[src_first]);
1182 }
1183 #endif
1184 }
1185 size += 4;
1186 } else {
1187 // 32-bit
1188 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1189 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1190 if (cbuf) {
1191 MacroAssembler _masm(cbuf);
1192 __ mtc1( as_Register(Matcher::_regEncode[src_first]), as_FloatRegister(Matcher::_regEncode[dst_first]) );
1193 #ifndef PRODUCT
1194 } else {
1195 if(!do_size){
1196 if (size != 0) st->print("\n\t");
1197 st->print("mtc1 %s, %s\t# spill 13",
1198 Matcher::regName[dst_first],
1199 Matcher::regName[src_first]);
1200 }
1201 #endif
1202 }
1203 size += 4;
1204 }
1205 return size;
1206 }
1207 } else if (src_first_rc == rc_float) {
1208 // xmm ->
1209 if (dst_first_rc == rc_stack) {
1210 // xmm -> mem
1211 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1212 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1213 // 64-bit
1214 int offset = ra_->reg2offset(dst_first);
1215 if (cbuf) {
1216 MacroAssembler _masm(cbuf);
1217 __ sdc1( as_FloatRegister(Matcher::_regEncode[src_first]), Address(SP, offset) );
1218 #ifndef PRODUCT
1219 } else {
1220 if(!do_size){
1221 if (size != 0) st->print("\n\t");
1222 st->print("sdc1 %s, [SP + #%d]\t# spill 14",
1223 Matcher::regName[src_first],
1224 offset);
1225 }
1226 #endif
1227 }
1228 size += 4;
1229 } else {
1230 // 32-bit
1231 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1232 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1233 int offset = ra_->reg2offset(dst_first);
1234 if (cbuf) {
1235 MacroAssembler _masm(cbuf);
1236 __ swc1(as_FloatRegister(Matcher::_regEncode[src_first]), Address(SP, offset));
1237 #ifndef PRODUCT
1238 } else {
1239 if(!do_size){
1240 if (size != 0) st->print("\n\t");
1241 st->print("swc1 %s, [SP + #%d]\t# spill 15",
1242 Matcher::regName[src_first],
1243 offset);
1244 }
1245 #endif
1246 }
1247 size += 4;
1248 }
1249 return size;
1250 } else if (dst_first_rc == rc_int) {
1251 // xmm -> gpr
1252 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1253 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1254 // 64-bit
1255 if (cbuf) {
1256 MacroAssembler _masm(cbuf);
1257 __ dmfc1( as_Register(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1258 #ifndef PRODUCT
1259 } else {
1260 if(!do_size){
1261 if (size != 0) st->print("\n\t");
1262 st->print("dmfc1 %s, %s\t# spill 16",
1263 Matcher::regName[dst_first],
1264 Matcher::regName[src_first]);
1265 }
1266 #endif
1267 }
1268 size += 4;
1269 } else {
1270 // 32-bit
1271 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1272 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1273 if (cbuf) {
1274 MacroAssembler _masm(cbuf);
1275 __ mfc1( as_Register(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1276 #ifndef PRODUCT
1277 } else {
1278 if(!do_size){
1279 if (size != 0) st->print("\n\t");
1280 st->print("mfc1 %s, %s\t# spill 17",
1281 Matcher::regName[dst_first],
1282 Matcher::regName[src_first]);
1283 }
1284 #endif
1285 }
1286 size += 4;
1287 }
1288 return size;
1289 } else if (dst_first_rc == rc_float) {
1290 // xmm -> xmm
1291 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1292 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1293 // 64-bit
1294 if (cbuf) {
1295 MacroAssembler _masm(cbuf);
1296 __ mov_d( as_FloatRegister(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1297 #ifndef PRODUCT
1298 } else {
1299 if(!do_size){
1300 if (size != 0) st->print("\n\t");
1301 st->print("mov_d %s <-- %s\t# spill 18",
1302 Matcher::regName[dst_first],
1303 Matcher::regName[src_first]);
1304 }
1305 #endif
1306 }
1307 size += 4;
1308 } else {
1309 // 32-bit
1310 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1311 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1312 if (cbuf) {
1313 MacroAssembler _masm(cbuf);
1314 __ mov_s( as_FloatRegister(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1315 #ifndef PRODUCT
1316 } else {
1317 if(!do_size){
1318 if (size != 0) st->print("\n\t");
1319 st->print("mov_s %s <-- %s\t# spill 19",
1320 Matcher::regName[dst_first],
1321 Matcher::regName[src_first]);
1322 }
1323 #endif
1324 }
1325 size += 4;
1326 }
1327 return size;
1328 }
1329 }
1331 assert(0," foo ");
1332 Unimplemented();
1333 return size;
1335 }
1337 #ifndef PRODUCT
1338 void MachSpillCopyNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1339 implementation( NULL, ra_, false, st );
1340 }
1341 #endif
1343 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1344 implementation( &cbuf, ra_, false, NULL );
1345 }
1347 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
1348 return implementation( NULL, ra_, true, NULL );
1349 }
1351 //=============================================================================
1352 #
1354 #ifndef PRODUCT
1355 void MachBreakpointNode::format( PhaseRegAlloc *, outputStream* st ) const {
1356 st->print("INT3");
1357 }
1358 #endif
1360 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc* ra_) const {
1361 MacroAssembler _masm(&cbuf);
1362 __ int3();
1363 }
1365 uint MachBreakpointNode::size(PhaseRegAlloc* ra_) const {
1366 return MachNode::size(ra_);
1367 }
1370 //=============================================================================
1371 #ifndef PRODUCT
1372 void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1373 Compile *C = ra_->C;
1374 int framesize = C->frame_size_in_bytes();
1376 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1378 st->print("daddiu SP, SP, %d # Rlease stack @ MachEpilogNode",framesize);
1379 st->cr(); st->print("\t");
1380 if (UseLoongsonISA) {
1381 st->print("gslq RA, FP, SP, %d # Restore FP & RA @ MachEpilogNode", -wordSize*2);
1382 } else {
1383 st->print("ld RA, SP, %d # Restore RA @ MachEpilogNode", -wordSize);
1384 st->cr(); st->print("\t");
1385 st->print("ld FP, SP, %d # Restore FP @ MachEpilogNode", -wordSize*2);
1386 }
1388 if( do_polling() && C->is_method_compilation() ) {
1389 st->print("Poll Safepoint # MachEpilogNode");
1390 }
1391 }
1392 #endif
1394 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1395 Compile *C = ra_->C;
1396 MacroAssembler _masm(&cbuf);
1397 int framesize = C->frame_size_in_bytes();
1399 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1401 __ daddiu(SP, SP, framesize);
1403 if (UseLoongsonISA) {
1404 __ gslq(RA, FP, SP, -wordSize*2);
1405 } else {
1406 __ ld(RA, SP, -wordSize );
1407 __ ld(FP, SP, -wordSize*2 );
1408 }
1410 if( do_polling() && C->is_method_compilation() ) {
1411 __ set64(AT, (long)os::get_polling_page());
1412 __ relocate(relocInfo::poll_return_type);
1413 __ lw(AT, AT, 0);
1414 }
1415 }
1417 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
1418 return MachNode::size(ra_); // too many variables; just compute it the hard way fujie debug
1419 }
1421 int MachEpilogNode::reloc() const {
1422 return 0; // a large enough number
1423 }
1425 const Pipeline * MachEpilogNode::pipeline() const {
1426 return MachNode::pipeline_class();
1427 }
1429 int MachEpilogNode::safepoint_offset() const { return 0; }
1431 //=============================================================================
1433 #ifndef PRODUCT
1434 void BoxLockNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1435 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1436 int reg = ra_->get_reg_first(this);
1437 st->print("ADDI %s, SP, %d @BoxLockNode",Matcher::regName[reg],offset);
1438 }
1439 #endif
1442 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
1443 return 4;
1444 }
1446 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1447 MacroAssembler _masm(&cbuf);
1448 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1449 int reg = ra_->get_encode(this);
1451 __ addi(as_Register(reg), SP, offset);
1452 /*
1453 if( offset >= 128 ) {
1454 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
1455 emit_rm(cbuf, 0x2, reg, 0x04);
1456 emit_rm(cbuf, 0x0, 0x04, SP_enc);
1457 emit_d32(cbuf, offset);
1458 }
1459 else {
1460 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
1461 emit_rm(cbuf, 0x1, reg, 0x04);
1462 emit_rm(cbuf, 0x0, 0x04, SP_enc);
1463 emit_d8(cbuf, offset);
1464 }
1465 */
1466 }
1469 //static int sizeof_FFree_Float_Stack_All = -1;
1471 int MachCallRuntimeNode::ret_addr_offset() {
1472 //lui
1473 //ori
1474 //dsll
1475 //ori
1476 //jalr
1477 //nop
1478 assert(NativeCall::instruction_size == 24, "in MachCallRuntimeNode::ret_addr_offset()");
1479 return NativeCall::instruction_size;
1480 // return 16;
1481 }
1487 //=============================================================================
1488 #ifndef PRODUCT
1489 void MachNopNode::format( PhaseRegAlloc *, outputStream* st ) const {
1490 st->print("NOP \t# %d bytes pad for loops and calls", 4 * _count);
1491 }
1492 #endif
1494 void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc * ) const {
1495 MacroAssembler _masm(&cbuf);
1496 int i = 0;
1497 for(i = 0; i < _count; i++)
1498 __ nop();
1499 }
1501 uint MachNopNode::size(PhaseRegAlloc *) const {
1502 return 4 * _count;
1503 }
1504 const Pipeline* MachNopNode::pipeline() const {
1505 return MachNode::pipeline_class();
1506 }
1508 //=============================================================================
1510 //=============================================================================
1511 #ifndef PRODUCT
1512 void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1513 st->print_cr("load_klass(T9, T0)");
1514 st->print_cr("\tbeq(T9, iCache, L)");
1515 st->print_cr("\tnop");
1516 st->print_cr("\tjmp(SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type)");
1517 st->print_cr("\tnop");
1518 st->print_cr("\tnop");
1519 st->print_cr(" L:");
1520 }
1521 #endif
1524 void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1525 MacroAssembler _masm(&cbuf);
1526 #ifdef ASSERT
1527 //uint code_size = cbuf.code_size();
1528 #endif
1529 int ic_reg = Matcher::inline_cache_reg_encode();
1530 Label L;
1531 Register receiver = T0;
1532 Register iCache = as_Register(ic_reg);
1533 __ load_klass(T9, receiver);
1534 __ beq(T9, iCache, L);
1535 __ nop();
1537 __ relocate(relocInfo::runtime_call_type);
1538 __ patchable_jump((address)SharedRuntime::get_ic_miss_stub());
1540 /* WARNING these NOPs are critical so that verified entry point is properly
1541 * 8 bytes aligned for patching by NativeJump::patch_verified_entry() */
1542 __ align(CodeEntryAlignment);
1543 __ bind(L);
1544 }
1546 uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
1547 return MachNode::size(ra_);
1548 }
1552 //=============================================================================
1554 const RegMask& MachConstantBaseNode::_out_RegMask = P_REG_mask();
1556 int Compile::ConstantTable::calculate_table_base_offset() const {
1557 return 0; // absolute addressing, no offset
1558 }
1560 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
1561 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
1562 ShouldNotReachHere();
1563 }
1565 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
1566 Compile* C = ra_->C;
1567 Compile::ConstantTable& constant_table = C->constant_table();
1568 MacroAssembler _masm(&cbuf);
1570 Register Rtoc = as_Register(ra_->get_encode(this));
1571 CodeSection* consts_section = __ code()->consts();
1572 int consts_size = consts_section->align_at_start(consts_section->size());
1573 assert(constant_table.size() == consts_size, "must be equal");
1575 if (consts_section->size()) {
1576 // Materialize the constant table base.
1577 address baseaddr = consts_section->start() + -(constant_table.table_base_offset());
1578 // RelocationHolder rspec = internal_word_Relocation::spec(baseaddr);
1579 __ relocate(relocInfo::internal_pc_type);
1580 __ patchable_set48(Rtoc, (long)baseaddr);
1581 }
1582 }
1584 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
1585 // patchable_set48 (4 insts)
1586 return 4 * 4;
1587 }
1589 #ifndef PRODUCT
1590 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
1591 Register r = as_Register(ra_->get_encode(this));
1592 st->print("patchable_set48 %s, &constanttable (constant table base) @ MachConstantBaseNode", r->name());
1593 }
1594 #endif
1597 //=============================================================================
1598 #ifndef PRODUCT
1599 void MachPrologNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1600 Compile* C = ra_->C;
1602 int framesize = C->frame_size_in_bytes();
1603 int bangsize = C->bang_size_in_bytes();
1604 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1606 // Calls to C2R adapters often do not accept exceptional returns.
1607 // We require that their callers must bang for them. But be careful, because
1608 // some VM calls (such as call site linkage) can use several kilobytes of
1609 // stack. But the stack safety zone should account for that.
1610 // See bugs 4446381, 4468289, 4497237.
1611 if (C->need_stack_bang(bangsize)) {
1612 st->print_cr("# stack bang"); st->print("\t");
1613 }
1614 if (UseLoongsonISA) {
1615 st->print("gssq RA, FP, %d(SP) @ MachPrologNode\n\t", -wordSize*2);
1616 } else {
1617 st->print("sd RA, %d(SP) @ MachPrologNode\n\t", -wordSize);
1618 st->print("sd FP, %d(SP) @ MachPrologNode\n\t", -wordSize*2);
1619 }
1620 st->print("daddiu FP, SP, -%d \n\t", wordSize*2);
1621 st->print("daddiu SP, SP, -%d \t",framesize);
1622 }
1623 #endif
1626 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1627 Compile* C = ra_->C;
1628 MacroAssembler _masm(&cbuf);
1630 int framesize = C->frame_size_in_bytes();
1631 int bangsize = C->bang_size_in_bytes();
1633 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1635 if (C->need_stack_bang(bangsize)) {
1636 __ generate_stack_overflow_check(bangsize);
1637 }
1639 if (UseLoongsonISA) {
1640 __ gssq(RA, FP, SP, -wordSize*2);
1641 } else {
1642 __ sd(RA, SP, -wordSize);
1643 __ sd(FP, SP, -wordSize*2);
1644 }
1645 __ daddiu(FP, SP, -wordSize*2);
1646 __ daddiu(SP, SP, -framesize);
1647 __ nop(); /* 2013.10.22 Jin: Make enough room for patch_verified_entry() */
1648 __ nop();
1650 C->set_frame_complete(cbuf.insts_size());
1651 if (C->has_mach_constant_base_node()) {
1652 // NOTE: We set the table base offset here because users might be
1653 // emitted before MachConstantBaseNode.
1654 Compile::ConstantTable& constant_table = C->constant_table();
1655 constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
1656 }
1658 }
1661 uint MachPrologNode::size(PhaseRegAlloc *ra_) const {
1662 return MachNode::size(ra_); // too many variables; just compute it the hard way
1663 }
1665 int MachPrologNode::reloc() const {
1666 return 0; // a large enough number
1667 }
1669 %}
1671 //----------ENCODING BLOCK-----------------------------------------------------
1672 // This block specifies the encoding classes used by the compiler to output
1673 // byte streams. Encoding classes generate functions which are called by
1674 // Machine Instruction Nodes in order to generate the bit encoding of the
1675 // instruction. Operands specify their base encoding interface with the
1676 // interface keyword. There are currently supported four interfaces,
1677 // REG_INTER, CONST_INTER, MEMORY_INTER, & COND_INTER. REG_INTER causes an
1678 // operand to generate a function which returns its register number when
1679 // queried. CONST_INTER causes an operand to generate a function which
1680 // returns the value of the constant when queried. MEMORY_INTER causes an
1681 // operand to generate four functions which return the Base Register, the
1682 // Index Register, the Scale Value, and the Offset Value of the operand when
1683 // queried. COND_INTER causes an operand to generate six functions which
1684 // return the encoding code (ie - encoding bits for the instruction)
1685 // associated with each basic boolean condition for a conditional instruction.
1686 // Instructions specify two basic values for encoding. They use the
1687 // ins_encode keyword to specify their encoding class (which must be one of
1688 // the class names specified in the encoding block), and they use the
1689 // opcode keyword to specify, in order, their primary, secondary, and
1690 // tertiary opcode. Only the opcode sections which a particular instruction
1691 // needs for encoding need to be specified.
1692 encode %{
1694 //Load byte signed
1695 enc_class load_B_enc (mRegI dst, memory mem) %{
1696 MacroAssembler _masm(&cbuf);
1697 int dst = $dst$$reg;
1698 int base = $mem$$base;
1699 int index = $mem$$index;
1700 int scale = $mem$$scale;
1701 int disp = $mem$$disp;
1703 if( index != 0 ) {
1704 if( Assembler::is_simm16(disp) ) {
1705 if( UseLoongsonISA ) {
1706 if (scale == 0) {
1707 __ gslbx(as_Register(dst), as_Register(base), as_Register(index), disp);
1708 } else {
1709 __ dsll(AT, as_Register(index), scale);
1710 __ gslbx(as_Register(dst), as_Register(base), AT, disp);
1711 }
1712 } else {
1713 if (scale == 0) {
1714 __ addu(AT, as_Register(base), as_Register(index));
1715 } else {
1716 __ dsll(AT, as_Register(index), scale);
1717 __ addu(AT, as_Register(base), AT);
1718 }
1719 __ lb(as_Register(dst), AT, disp);
1720 }
1721 } else {
1722 if (scale == 0) {
1723 __ addu(AT, as_Register(base), as_Register(index));
1724 } else {
1725 __ dsll(AT, as_Register(index), scale);
1726 __ addu(AT, as_Register(base), AT);
1727 }
1728 __ move(T9, disp);
1729 if( UseLoongsonISA ) {
1730 __ gslbx(as_Register(dst), AT, T9, 0);
1731 } else {
1732 __ addu(AT, AT, T9);
1733 __ lb(as_Register(dst), AT, 0);
1734 }
1735 }
1736 } else {
1737 if( Assembler::is_simm16(disp) ) {
1738 __ lb(as_Register(dst), as_Register(base), disp);
1739 } else {
1740 __ move(T9, disp);
1741 if( UseLoongsonISA ) {
1742 __ gslbx(as_Register(dst), as_Register(base), T9, 0);
1743 } else {
1744 __ addu(AT, as_Register(base), T9);
1745 __ lb(as_Register(dst), AT, 0);
1746 }
1747 }
1748 }
1749 %}
1751 //Load byte unsigned
1752 enc_class load_UB_enc (mRegI dst, memory mem) %{
1753 MacroAssembler _masm(&cbuf);
1754 int dst = $dst$$reg;
1755 int base = $mem$$base;
1756 int index = $mem$$index;
1757 int scale = $mem$$scale;
1758 int disp = $mem$$disp;
1760 if( index != 0 ) {
1761 if (scale == 0) {
1762 __ daddu(AT, as_Register(base), as_Register(index));
1763 } else {
1764 __ dsll(AT, as_Register(index), scale);
1765 __ daddu(AT, as_Register(base), AT);
1766 }
1767 if( Assembler::is_simm16(disp) ) {
1768 __ lbu(as_Register(dst), AT, disp);
1769 } else {
1770 __ move(T9, disp);
1771 __ daddu(AT, AT, T9);
1772 __ lbu(as_Register(dst), AT, 0);
1773 }
1774 } else {
1775 if( Assembler::is_simm16(disp) ) {
1776 __ lbu(as_Register(dst), as_Register(base), disp);
1777 } else {
1778 __ move(T9, disp);
1779 __ daddu(AT, as_Register(base), T9);
1780 __ lbu(as_Register(dst), AT, 0);
1781 }
1782 }
1783 %}
1785 enc_class store_B_reg_enc (memory mem, mRegI src) %{
1786 MacroAssembler _masm(&cbuf);
1787 int src = $src$$reg;
1788 int base = $mem$$base;
1789 int index = $mem$$index;
1790 int scale = $mem$$scale;
1791 int disp = $mem$$disp;
1793 if( index != 0 ) {
1794 if (scale == 0) {
1795 if( Assembler::is_simm(disp, 8) ) {
1796 if (UseLoongsonISA) {
1797 __ gssbx(as_Register(src), as_Register(base), as_Register(index), disp);
1798 } else {
1799 __ addu(AT, as_Register(base), as_Register(index));
1800 __ sb(as_Register(src), AT, disp);
1801 }
1802 } else if( Assembler::is_simm16(disp) ) {
1803 __ addu(AT, as_Register(base), as_Register(index));
1804 __ sb(as_Register(src), AT, disp);
1805 } else {
1806 __ addu(AT, as_Register(base), as_Register(index));
1807 __ move(T9, disp);
1808 if (UseLoongsonISA) {
1809 __ gssbx(as_Register(src), AT, T9, 0);
1810 } else {
1811 __ addu(AT, AT, T9);
1812 __ sb(as_Register(src), AT, 0);
1813 }
1814 }
1815 } else {
1816 __ dsll(AT, as_Register(index), scale);
1817 if( Assembler::is_simm(disp, 8) ) {
1818 if (UseLoongsonISA) {
1819 __ gssbx(as_Register(src), AT, as_Register(base), disp);
1820 } else {
1821 __ addu(AT, as_Register(base), AT);
1822 __ sb(as_Register(src), AT, disp);
1823 }
1824 } else if( Assembler::is_simm16(disp) ) {
1825 __ addu(AT, as_Register(base), AT);
1826 __ sb(as_Register(src), AT, disp);
1827 } else {
1828 __ addu(AT, as_Register(base), AT);
1829 __ move(T9, disp);
1830 if (UseLoongsonISA) {
1831 __ gssbx(as_Register(src), AT, T9, 0);
1832 } else {
1833 __ addu(AT, AT, T9);
1834 __ sb(as_Register(src), AT, 0);
1835 }
1836 }
1837 }
1838 } else {
1839 if( Assembler::is_simm16(disp) ) {
1840 __ sb(as_Register(src), as_Register(base), disp);
1841 } else {
1842 __ move(T9, disp);
1843 if (UseLoongsonISA) {
1844 __ gssbx(as_Register(src), as_Register(base), T9, 0);
1845 } else {
1846 __ addu(AT, as_Register(base), T9);
1847 __ sb(as_Register(src), AT, 0);
1848 }
1849 }
1850 }
1851 %}
1853 enc_class store_B_immI_enc (memory mem, immI8 src) %{
1854 MacroAssembler _masm(&cbuf);
1855 int base = $mem$$base;
1856 int index = $mem$$index;
1857 int scale = $mem$$scale;
1858 int disp = $mem$$disp;
1859 int value = $src$$constant;
1861 if( index != 0 ) {
1862 if (!UseLoongsonISA) {
1863 if (scale == 0) {
1864 __ daddu(AT, as_Register(base), as_Register(index));
1865 } else {
1866 __ dsll(AT, as_Register(index), scale);
1867 __ daddu(AT, as_Register(base), AT);
1868 }
1869 if( Assembler::is_simm16(disp) ) {
1870 if (value == 0) {
1871 __ sb(R0, AT, disp);
1872 } else {
1873 __ move(T9, value);
1874 __ sb(T9, AT, disp);
1875 }
1876 } else {
1877 if (value == 0) {
1878 __ move(T9, disp);
1879 __ daddu(AT, AT, T9);
1880 __ sb(R0, AT, 0);
1881 } else {
1882 __ move(T9, disp);
1883 __ daddu(AT, AT, T9);
1884 __ move(T9, value);
1885 __ sb(T9, AT, 0);
1886 }
1887 }
1888 } else {
1890 if (scale == 0) {
1891 if( Assembler::is_simm(disp, 8) ) {
1892 if (value == 0) {
1893 __ gssbx(R0, as_Register(base), as_Register(index), disp);
1894 } else {
1895 __ move(T9, value);
1896 __ gssbx(T9, as_Register(base), as_Register(index), disp);
1897 }
1898 } else if( Assembler::is_simm16(disp) ) {
1899 __ daddu(AT, as_Register(base), as_Register(index));
1900 if (value == 0) {
1901 __ sb(R0, AT, disp);
1902 } else {
1903 __ move(T9, value);
1904 __ sb(T9, AT, disp);
1905 }
1906 } else {
1907 if (value == 0) {
1908 __ daddu(AT, as_Register(base), as_Register(index));
1909 __ move(T9, disp);
1910 __ gssbx(R0, AT, T9, 0);
1911 } else {
1912 __ move(AT, disp);
1913 __ move(T9, value);
1914 __ daddu(AT, as_Register(base), AT);
1915 __ gssbx(T9, AT, as_Register(index), 0);
1916 }
1917 }
1919 } else {
1921 if( Assembler::is_simm(disp, 8) ) {
1922 __ dsll(AT, as_Register(index), scale);
1923 if (value == 0) {
1924 __ gssbx(R0, as_Register(base), AT, disp);
1925 } else {
1926 __ move(T9, value);
1927 __ gssbx(T9, as_Register(base), AT, disp);
1928 }
1929 } else if( Assembler::is_simm16(disp) ) {
1930 __ dsll(AT, as_Register(index), scale);
1931 __ daddu(AT, as_Register(base), AT);
1932 if (value == 0) {
1933 __ sb(R0, AT, disp);
1934 } else {
1935 __ move(T9, value);
1936 __ sb(T9, AT, disp);
1937 }
1938 } else {
1939 __ dsll(AT, as_Register(index), scale);
1940 if (value == 0) {
1941 __ daddu(AT, as_Register(base), AT);
1942 __ move(T9, disp);
1943 __ gssbx(R0, AT, T9, 0);
1944 } else {
1945 __ move(T9, disp);
1946 __ daddu(AT, AT, T9);
1947 __ move(T9, value);
1948 __ gssbx(T9, as_Register(base), AT, 0);
1949 }
1950 }
1951 }
1952 }
1953 } else {
1954 if( Assembler::is_simm16(disp) ) {
1955 if (value == 0) {
1956 __ sb(R0, as_Register(base), disp);
1957 } else {
1958 __ move(AT, value);
1959 __ sb(AT, as_Register(base), disp);
1960 }
1961 } else {
1962 if (value == 0) {
1963 __ move(T9, disp);
1964 if (UseLoongsonISA) {
1965 __ gssbx(R0, as_Register(base), T9, 0);
1966 } else {
1967 __ daddu(AT, as_Register(base), T9);
1968 __ sb(R0, AT, 0);
1969 }
1970 } else {
1971 __ move(T9, disp);
1972 if (UseLoongsonISA) {
1973 __ move(AT, value);
1974 __ gssbx(AT, as_Register(base), T9, 0);
1975 } else {
1976 __ daddu(AT, as_Register(base), T9);
1977 __ move(T9, value);
1978 __ sb(T9, AT, 0);
1979 }
1980 }
1981 }
1982 }
1983 %}
1986 enc_class store_B_immI_enc_sync (memory mem, immI8 src) %{
1987 MacroAssembler _masm(&cbuf);
1988 int base = $mem$$base;
1989 int index = $mem$$index;
1990 int scale = $mem$$scale;
1991 int disp = $mem$$disp;
1992 int value = $src$$constant;
1994 if( index != 0 ) {
1995 if ( UseLoongsonISA ) {
1996 if ( Assembler::is_simm(disp,8) ) {
1997 if ( scale == 0 ) {
1998 if ( value == 0 ) {
1999 __ gssbx(R0, as_Register(base), as_Register(index), disp);
2000 } else {
2001 __ move(AT, value);
2002 __ gssbx(AT, as_Register(base), as_Register(index), disp);
2003 }
2004 } else {
2005 __ dsll(AT, as_Register(index), scale);
2006 if ( value == 0 ) {
2007 __ gssbx(R0, as_Register(base), AT, disp);
2008 } else {
2009 __ move(T9, value);
2010 __ gssbx(T9, as_Register(base), AT, disp);
2011 }
2012 }
2013 } else if ( Assembler::is_simm16(disp) ) {
2014 if ( scale == 0 ) {
2015 __ daddu(AT, as_Register(base), as_Register(index));
2016 if ( value == 0 ){
2017 __ sb(R0, AT, disp);
2018 } else {
2019 __ move(T9, value);
2020 __ sb(T9, AT, disp);
2021 }
2022 } else {
2023 __ dsll(AT, as_Register(index), scale);
2024 __ daddu(AT, as_Register(base), AT);
2025 if ( value == 0 ) {
2026 __ sb(R0, AT, disp);
2027 } else {
2028 __ move(T9, value);
2029 __ sb(T9, AT, disp);
2030 }
2031 }
2032 } else {
2033 if ( scale == 0 ) {
2034 __ move(AT, disp);
2035 __ daddu(AT, as_Register(index), AT);
2036 if ( value == 0 ) {
2037 __ gssbx(R0, as_Register(base), AT, 0);
2038 } else {
2039 __ move(T9, value);
2040 __ gssbx(T9, as_Register(base), AT, 0);
2041 }
2042 } else {
2043 __ dsll(AT, as_Register(index), scale);
2044 __ move(T9, disp);
2045 __ daddu(AT, AT, T9);
2046 if ( value == 0 ) {
2047 __ gssbx(R0, as_Register(base), AT, 0);
2048 } else {
2049 __ move(T9, value);
2050 __ gssbx(T9, as_Register(base), AT, 0);
2051 }
2052 }
2053 }
2054 } else { //not use loongson isa
2055 if (scale == 0) {
2056 __ daddu(AT, as_Register(base), as_Register(index));
2057 } else {
2058 __ dsll(AT, as_Register(index), scale);
2059 __ daddu(AT, as_Register(base), AT);
2060 }
2061 if( Assembler::is_simm16(disp) ) {
2062 if (value == 0) {
2063 __ sb(R0, AT, disp);
2064 } else {
2065 __ move(T9, value);
2066 __ sb(T9, AT, disp);
2067 }
2068 } else {
2069 if (value == 0) {
2070 __ move(T9, disp);
2071 __ daddu(AT, AT, T9);
2072 __ sb(R0, AT, 0);
2073 } else {
2074 __ move(T9, disp);
2075 __ daddu(AT, AT, T9);
2076 __ move(T9, value);
2077 __ sb(T9, AT, 0);
2078 }
2079 }
2080 }
2081 } else {
2082 if ( UseLoongsonISA ){
2083 if ( Assembler::is_simm16(disp) ){
2084 if ( value == 0 ) {
2085 __ sb(R0, as_Register(base), disp);
2086 } else {
2087 __ move(AT, value);
2088 __ sb(AT, as_Register(base), disp);
2089 }
2090 } else {
2091 __ move(AT, disp);
2092 if ( value == 0 ) {
2093 __ gssbx(R0, as_Register(base), AT, 0);
2094 } else {
2095 __ move(T9, value);
2096 __ gssbx(T9, as_Register(base), AT, 0);
2097 }
2098 }
2099 } else {
2100 if( Assembler::is_simm16(disp) ) {
2101 if (value == 0) {
2102 __ sb(R0, as_Register(base), disp);
2103 } else {
2104 __ move(AT, value);
2105 __ sb(AT, as_Register(base), disp);
2106 }
2107 } else {
2108 if (value == 0) {
2109 __ move(T9, disp);
2110 __ daddu(AT, as_Register(base), T9);
2111 __ sb(R0, AT, 0);
2112 } else {
2113 __ move(T9, disp);
2114 __ daddu(AT, as_Register(base), T9);
2115 __ move(T9, value);
2116 __ sb(T9, AT, 0);
2117 }
2118 }
2119 }
2120 }
2122 __ sync();
2123 %}
2125 // Load Short (16bit signed)
2126 enc_class load_S_enc (mRegI dst, memory mem) %{
2127 MacroAssembler _masm(&cbuf);
2128 int dst = $dst$$reg;
2129 int base = $mem$$base;
2130 int index = $mem$$index;
2131 int scale = $mem$$scale;
2132 int disp = $mem$$disp;
2134 if( index != 0 ) {
2135 if ( UseLoongsonISA ) {
2136 if ( Assembler::is_simm(disp, 8) ) {
2137 if (scale == 0) {
2138 __ gslhx(as_Register(dst), as_Register(base), as_Register(index), disp);
2139 } else {
2140 __ dsll(AT, as_Register(index), scale);
2141 __ gslhx(as_Register(dst), as_Register(base), AT, disp);
2142 }
2143 } else if ( Assembler::is_simm16(disp) ) {
2144 if (scale == 0) {
2145 __ daddu(AT, as_Register(base), as_Register(index));
2146 __ lh(as_Register(dst), AT, disp);
2147 } else {
2148 __ dsll(AT, as_Register(index), scale);
2149 __ daddu(AT, as_Register(base), AT);
2150 __ lh(as_Register(dst), AT, disp);
2151 }
2152 } else {
2153 if (scale == 0) {
2154 __ move(AT, disp);
2155 __ daddu(AT, as_Register(index), AT);
2156 __ gslhx(as_Register(dst), as_Register(base), AT, 0);
2157 } else {
2158 __ dsll(AT, as_Register(index), scale);
2159 __ move(T9, disp);
2160 __ daddu(AT, AT, T9);
2161 __ gslhx(as_Register(dst), as_Register(base), AT, 0);
2162 }
2163 }
2164 } else { // not use loongson isa
2165 if (scale == 0) {
2166 __ daddu(AT, as_Register(base), as_Register(index));
2167 } else {
2168 __ dsll(AT, as_Register(index), scale);
2169 __ daddu(AT, as_Register(base), AT);
2170 }
2171 if( Assembler::is_simm16(disp) ) {
2172 __ lh(as_Register(dst), AT, disp);
2173 } else {
2174 __ move(T9, disp);
2175 __ daddu(AT, AT, T9);
2176 __ lh(as_Register(dst), AT, 0);
2177 }
2178 }
2179 } else { // index is 0
2180 if ( UseLoongsonISA ) {
2181 if ( Assembler::is_simm16(disp) ) {
2182 __ lh(as_Register(dst), as_Register(base), disp);
2183 } else {
2184 __ move(T9, disp);
2185 __ gslhx(as_Register(dst), as_Register(base), T9, 0);
2186 }
2187 } else { //not use loongson isa
2188 if( Assembler::is_simm16(disp) ) {
2189 __ lh(as_Register(dst), as_Register(base), disp);
2190 } else {
2191 __ move(T9, disp);
2192 __ daddu(AT, as_Register(base), T9);
2193 __ lh(as_Register(dst), AT, 0);
2194 }
2195 }
2196 }
2197 %}
2199 // Load Char (16bit unsigned)
2200 enc_class load_C_enc (mRegI dst, memory mem) %{
2201 MacroAssembler _masm(&cbuf);
2202 int dst = $dst$$reg;
2203 int base = $mem$$base;
2204 int index = $mem$$index;
2205 int scale = $mem$$scale;
2206 int disp = $mem$$disp;
2208 if( index != 0 ) {
2209 if (scale == 0) {
2210 __ daddu(AT, as_Register(base), as_Register(index));
2211 } else {
2212 __ dsll(AT, as_Register(index), scale);
2213 __ daddu(AT, as_Register(base), AT);
2214 }
2215 if( Assembler::is_simm16(disp) ) {
2216 __ lhu(as_Register(dst), AT, disp);
2217 } else {
2218 __ move(T9, disp);
2219 __ addu(AT, AT, T9);
2220 __ lhu(as_Register(dst), AT, 0);
2221 }
2222 } else {
2223 if( Assembler::is_simm16(disp) ) {
2224 __ lhu(as_Register(dst), as_Register(base), disp);
2225 } else {
2226 __ move(T9, disp);
2227 __ daddu(AT, as_Register(base), T9);
2228 __ lhu(as_Register(dst), AT, 0);
2229 }
2230 }
2231 %}
2233 // Store Char (16bit unsigned)
2234 enc_class store_C_reg_enc (memory mem, mRegI src) %{
2235 MacroAssembler _masm(&cbuf);
2236 int src = $src$$reg;
2237 int base = $mem$$base;
2238 int index = $mem$$index;
2239 int scale = $mem$$scale;
2240 int disp = $mem$$disp;
2242 if( index != 0 ) {
2243 if( Assembler::is_simm16(disp) ) {
2244 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2245 if (scale == 0) {
2246 __ gsshx(as_Register(src), as_Register(base), as_Register(index), disp);
2247 } else {
2248 __ dsll(AT, as_Register(index), scale);
2249 __ gsshx(as_Register(src), as_Register(base), AT, disp);
2250 }
2251 } else {
2252 if (scale == 0) {
2253 __ addu(AT, as_Register(base), as_Register(index));
2254 } else {
2255 __ dsll(AT, as_Register(index), scale);
2256 __ addu(AT, as_Register(base), AT);
2257 }
2258 __ sh(as_Register(src), AT, disp);
2259 }
2260 } else {
2261 if (scale == 0) {
2262 __ addu(AT, as_Register(base), as_Register(index));
2263 } else {
2264 __ dsll(AT, as_Register(index), scale);
2265 __ addu(AT, as_Register(base), AT);
2266 }
2267 __ move(T9, disp);
2268 if( UseLoongsonISA ) {
2269 __ gsshx(as_Register(src), AT, T9, 0);
2270 } else {
2271 __ addu(AT, AT, T9);
2272 __ sh(as_Register(src), AT, 0);
2273 }
2274 }
2275 } else {
2276 if( Assembler::is_simm16(disp) ) {
2277 __ sh(as_Register(src), as_Register(base), disp);
2278 } else {
2279 __ move(T9, disp);
2280 if( UseLoongsonISA ) {
2281 __ gsshx(as_Register(src), as_Register(base), T9, 0);
2282 } else {
2283 __ addu(AT, as_Register(base), T9);
2284 __ sh(as_Register(src), AT, 0);
2285 }
2286 }
2287 }
2288 %}
2290 enc_class store_C0_enc (memory mem) %{
2291 MacroAssembler _masm(&cbuf);
2292 int base = $mem$$base;
2293 int index = $mem$$index;
2294 int scale = $mem$$scale;
2295 int disp = $mem$$disp;
2297 if( index != 0 ) {
2298 if( Assembler::is_simm16(disp) ) {
2299 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2300 if (scale == 0) {
2301 __ gsshx(R0, as_Register(base), as_Register(index), disp);
2302 } else {
2303 __ dsll(AT, as_Register(index), scale);
2304 __ gsshx(R0, as_Register(base), AT, disp);
2305 }
2306 } else {
2307 if (scale == 0) {
2308 __ addu(AT, as_Register(base), as_Register(index));
2309 } else {
2310 __ dsll(AT, as_Register(index), scale);
2311 __ addu(AT, as_Register(base), AT);
2312 }
2313 __ sh(R0, AT, disp);
2314 }
2315 } else {
2316 if (scale == 0) {
2317 __ addu(AT, as_Register(base), as_Register(index));
2318 } else {
2319 __ dsll(AT, as_Register(index), scale);
2320 __ addu(AT, as_Register(base), AT);
2321 }
2322 __ move(T9, disp);
2323 if( UseLoongsonISA ) {
2324 __ gsshx(R0, AT, T9, 0);
2325 } else {
2326 __ addu(AT, AT, T9);
2327 __ sh(R0, AT, 0);
2328 }
2329 }
2330 } else {
2331 if( Assembler::is_simm16(disp) ) {
2332 __ sh(R0, as_Register(base), disp);
2333 } else {
2334 __ move(T9, disp);
2335 if( UseLoongsonISA ) {
2336 __ gsshx(R0, as_Register(base), T9, 0);
2337 } else {
2338 __ addu(AT, as_Register(base), T9);
2339 __ sh(R0, AT, 0);
2340 }
2341 }
2342 }
2343 %}
2345 enc_class load_I_enc (mRegI dst, memory mem) %{
2346 MacroAssembler _masm(&cbuf);
2347 int dst = $dst$$reg;
2348 int base = $mem$$base;
2349 int index = $mem$$index;
2350 int scale = $mem$$scale;
2351 int disp = $mem$$disp;
2353 if( index != 0 ) {
2354 if( Assembler::is_simm16(disp) ) {
2355 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2356 if (scale == 0) {
2357 __ gslwx(as_Register(dst), as_Register(base), as_Register(index), disp);
2358 } else {
2359 __ dsll(AT, as_Register(index), scale);
2360 __ gslwx(as_Register(dst), as_Register(base), AT, disp);
2361 }
2362 } else {
2363 if (scale == 0) {
2364 __ addu(AT, as_Register(base), as_Register(index));
2365 } else {
2366 __ dsll(AT, as_Register(index), scale);
2367 __ addu(AT, as_Register(base), AT);
2368 }
2369 __ lw(as_Register(dst), AT, disp);
2370 }
2371 } else {
2372 if (scale == 0) {
2373 __ addu(AT, as_Register(base), as_Register(index));
2374 } else {
2375 __ dsll(AT, as_Register(index), scale);
2376 __ addu(AT, as_Register(base), AT);
2377 }
2378 __ move(T9, disp);
2379 if( UseLoongsonISA ) {
2380 __ gslwx(as_Register(dst), AT, T9, 0);
2381 } else {
2382 __ addu(AT, AT, T9);
2383 __ lw(as_Register(dst), AT, 0);
2384 }
2385 }
2386 } else {
2387 if( Assembler::is_simm16(disp) ) {
2388 __ lw(as_Register(dst), as_Register(base), disp);
2389 } else {
2390 __ move(T9, disp);
2391 if( UseLoongsonISA ) {
2392 __ gslwx(as_Register(dst), as_Register(base), T9, 0);
2393 } else {
2394 __ addu(AT, as_Register(base), T9);
2395 __ lw(as_Register(dst), AT, 0);
2396 }
2397 }
2398 }
2399 %}
2401 enc_class store_I_reg_enc (memory mem, mRegI src) %{
2402 MacroAssembler _masm(&cbuf);
2403 int src = $src$$reg;
2404 int base = $mem$$base;
2405 int index = $mem$$index;
2406 int scale = $mem$$scale;
2407 int disp = $mem$$disp;
2409 if( index != 0 ) {
2410 if( Assembler::is_simm16(disp) ) {
2411 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2412 if (scale == 0) {
2413 __ gsswx(as_Register(src), as_Register(base), as_Register(index), disp);
2414 } else {
2415 __ dsll(AT, as_Register(index), scale);
2416 __ gsswx(as_Register(src), as_Register(base), AT, disp);
2417 }
2418 } else {
2419 if (scale == 0) {
2420 __ addu(AT, as_Register(base), as_Register(index));
2421 } else {
2422 __ dsll(AT, as_Register(index), scale);
2423 __ addu(AT, as_Register(base), AT);
2424 }
2425 __ sw(as_Register(src), AT, disp);
2426 }
2427 } else {
2428 if (scale == 0) {
2429 __ addu(AT, as_Register(base), as_Register(index));
2430 } else {
2431 __ dsll(AT, as_Register(index), scale);
2432 __ addu(AT, as_Register(base), AT);
2433 }
2434 __ move(T9, disp);
2435 if( UseLoongsonISA ) {
2436 __ gsswx(as_Register(src), AT, T9, 0);
2437 } else {
2438 __ addu(AT, AT, T9);
2439 __ sw(as_Register(src), AT, 0);
2440 }
2441 }
2442 } else {
2443 if( Assembler::is_simm16(disp) ) {
2444 __ sw(as_Register(src), as_Register(base), disp);
2445 } else {
2446 __ move(T9, disp);
2447 if( UseLoongsonISA ) {
2448 __ gsswx(as_Register(src), as_Register(base), T9, 0);
2449 } else {
2450 __ addu(AT, as_Register(base), T9);
2451 __ sw(as_Register(src), AT, 0);
2452 }
2453 }
2454 }
2455 %}
2457 enc_class store_I_immI_enc (memory mem, immI src) %{
2458 MacroAssembler _masm(&cbuf);
2459 int base = $mem$$base;
2460 int index = $mem$$index;
2461 int scale = $mem$$scale;
2462 int disp = $mem$$disp;
2463 int value = $src$$constant;
2465 if( index != 0 ) {
2466 if ( UseLoongsonISA ) {
2467 if ( Assembler::is_simm(disp, 8) ) {
2468 if ( scale == 0 ) {
2469 if ( value == 0 ) {
2470 __ gsswx(R0, as_Register(base), as_Register(index), disp);
2471 } else {
2472 __ move(T9, value);
2473 __ gsswx(T9, as_Register(base), as_Register(index), disp);
2474 }
2475 } else {
2476 __ dsll(AT, as_Register(index), scale);
2477 if ( value == 0 ) {
2478 __ gsswx(R0, as_Register(base), AT, disp);
2479 } else {
2480 __ move(T9, value);
2481 __ gsswx(T9, as_Register(base), AT, disp);
2482 }
2483 }
2484 } else if ( Assembler::is_simm16(disp) ) {
2485 if ( scale == 0 ) {
2486 __ daddu(AT, as_Register(base), as_Register(index));
2487 if ( value == 0 ) {
2488 __ sw(R0, AT, disp);
2489 } else {
2490 __ move(T9, value);
2491 __ sw(T9, AT, disp);
2492 }
2493 } else {
2494 __ dsll(AT, as_Register(index), scale);
2495 __ daddu(AT, as_Register(base), AT);
2496 if ( value == 0 ) {
2497 __ sw(R0, AT, disp);
2498 } else {
2499 __ move(T9, value);
2500 __ sw(T9, AT, disp);
2501 }
2502 }
2503 } else {
2504 if ( scale == 0 ) {
2505 __ move(T9, disp);
2506 __ daddu(AT, as_Register(index), T9);
2507 if ( value ==0 ) {
2508 __ gsswx(R0, as_Register(base), AT, 0);
2509 } else {
2510 __ move(T9, value);
2511 __ gsswx(T9, as_Register(base), AT, 0);
2512 }
2513 } else {
2514 __ dsll(AT, as_Register(index), scale);
2515 __ move(T9, disp);
2516 __ daddu(AT, AT, T9);
2517 if ( value == 0 ) {
2518 __ gsswx(R0, as_Register(base), AT, 0);
2519 } else {
2520 __ move(T9, value);
2521 __ gsswx(T9, as_Register(base), AT, 0);
2522 }
2523 }
2524 }
2525 } else { //not use loongson isa
2526 if (scale == 0) {
2527 __ daddu(AT, as_Register(base), as_Register(index));
2528 } else {
2529 __ dsll(AT, as_Register(index), scale);
2530 __ daddu(AT, as_Register(base), AT);
2531 }
2532 if( Assembler::is_simm16(disp) ) {
2533 if (value == 0) {
2534 __ sw(R0, AT, disp);
2535 } else {
2536 __ move(T9, value);
2537 __ sw(T9, AT, disp);
2538 }
2539 } else {
2540 if (value == 0) {
2541 __ move(T9, disp);
2542 __ daddu(AT, AT, T9);
2543 __ sw(R0, AT, 0);
2544 } else {
2545 __ move(T9, disp);
2546 __ daddu(AT, AT, T9);
2547 __ move(T9, value);
2548 __ sw(T9, AT, 0);
2549 }
2550 }
2551 }
2552 } else {
2553 if ( UseLoongsonISA ) {
2554 if ( Assembler::is_simm16(disp) ) {
2555 if ( value == 0 ) {
2556 __ sw(R0, as_Register(base), disp);
2557 } else {
2558 __ move(AT, value);
2559 __ sw(AT, as_Register(base), disp);
2560 }
2561 } else {
2562 __ move(T9, disp);
2563 if ( value == 0 ) {
2564 __ gsswx(R0, as_Register(base), T9, 0);
2565 } else {
2566 __ move(AT, value);
2567 __ gsswx(AT, as_Register(base), T9, 0);
2568 }
2569 }
2570 } else {
2571 if( Assembler::is_simm16(disp) ) {
2572 if (value == 0) {
2573 __ sw(R0, as_Register(base), disp);
2574 } else {
2575 __ move(AT, value);
2576 __ sw(AT, as_Register(base), disp);
2577 }
2578 } else {
2579 if (value == 0) {
2580 __ move(T9, disp);
2581 __ daddu(AT, as_Register(base), T9);
2582 __ sw(R0, AT, 0);
2583 } else {
2584 __ move(T9, disp);
2585 __ daddu(AT, as_Register(base), T9);
2586 __ move(T9, value);
2587 __ sw(T9, AT, 0);
2588 }
2589 }
2590 }
2591 }
2592 %}
2594 enc_class load_N_enc (mRegN dst, memory mem) %{
2595 MacroAssembler _masm(&cbuf);
2596 int dst = $dst$$reg;
2597 int base = $mem$$base;
2598 int index = $mem$$index;
2599 int scale = $mem$$scale;
2600 int disp = $mem$$disp;
2601 relocInfo::relocType disp_reloc = $mem->disp_reloc();
2602 assert(disp_reloc == relocInfo::none, "cannot have disp");
2604 if( index != 0 ) {
2605 if (scale == 0) {
2606 __ daddu(AT, as_Register(base), as_Register(index));
2607 } else {
2608 __ dsll(AT, as_Register(index), scale);
2609 __ daddu(AT, as_Register(base), AT);
2610 }
2611 if( Assembler::is_simm16(disp) ) {
2612 __ lwu(as_Register(dst), AT, disp);
2613 } else {
2614 __ set64(T9, disp);
2615 __ daddu(AT, AT, T9);
2616 __ lwu(as_Register(dst), AT, 0);
2617 }
2618 } else {
2619 if( Assembler::is_simm16(disp) ) {
2620 __ lwu(as_Register(dst), as_Register(base), disp);
2621 } else {
2622 __ set64(T9, disp);
2623 __ daddu(AT, as_Register(base), T9);
2624 __ lwu(as_Register(dst), AT, 0);
2625 }
2626 }
2628 %}
2631 enc_class load_P_enc (mRegP dst, memory mem) %{
2632 MacroAssembler _masm(&cbuf);
2633 int dst = $dst$$reg;
2634 int base = $mem$$base;
2635 int index = $mem$$index;
2636 int scale = $mem$$scale;
2637 int disp = $mem$$disp;
2638 relocInfo::relocType disp_reloc = $mem->disp_reloc();
2639 assert(disp_reloc == relocInfo::none, "cannot have disp");
2641 if( index != 0 ) {
2642 if ( UseLoongsonISA ) {
2643 if ( Assembler::is_simm(disp, 8) ) {
2644 if ( scale != 0 ) {
2645 __ dsll(AT, as_Register(index), scale);
2646 __ gsldx(as_Register(dst), as_Register(base), AT, disp);
2647 } else {
2648 __ gsldx(as_Register(dst), as_Register(base), as_Register(index), disp);
2649 }
2650 } else if ( Assembler::is_simm16(disp) ){
2651 if ( scale != 0 ) {
2652 __ dsll(AT, as_Register(index), scale);
2653 __ daddu(AT, AT, as_Register(base));
2654 } else {
2655 __ daddu(AT, as_Register(index), as_Register(base));
2656 }
2657 __ ld(as_Register(dst), AT, disp);
2658 } else {
2659 if ( scale != 0 ) {
2660 __ dsll(AT, as_Register(index), scale);
2661 __ move(T9, disp);
2662 __ daddu(AT, AT, T9);
2663 } else {
2664 __ move(T9, disp);
2665 __ daddu(AT, as_Register(index), T9);
2666 }
2667 __ gsldx(as_Register(dst), as_Register(base), AT, 0);
2668 }
2669 } else { //not use loongson isa
2670 if (scale == 0) {
2671 __ daddu(AT, as_Register(base), as_Register(index));
2672 } else {
2673 __ dsll(AT, as_Register(index), scale);
2674 __ daddu(AT, as_Register(base), AT);
2675 }
2676 if( Assembler::is_simm16(disp) ) {
2677 __ ld(as_Register(dst), AT, disp);
2678 } else {
2679 __ set64(T9, disp);
2680 __ daddu(AT, AT, T9);
2681 __ ld(as_Register(dst), AT, 0);
2682 }
2683 }
2684 } else {
2685 if ( UseLoongsonISA ) {
2686 if ( Assembler::is_simm16(disp) ){
2687 __ ld(as_Register(dst), as_Register(base), disp);
2688 } else {
2689 __ set64(T9, disp);
2690 __ gsldx(as_Register(dst), as_Register(base), T9, 0);
2691 }
2692 } else { //not use loongson isa
2693 if( Assembler::is_simm16(disp) ) {
2694 __ ld(as_Register(dst), as_Register(base), disp);
2695 } else {
2696 __ set64(T9, disp);
2697 __ daddu(AT, as_Register(base), T9);
2698 __ ld(as_Register(dst), AT, 0);
2699 }
2700 }
2701 }
2702 // if( disp_reloc != relocInfo::none) __ ld(as_Register(dst), as_Register(dst), 0);
2703 %}
2705 enc_class store_P_reg_enc (memory mem, mRegP src) %{
2706 MacroAssembler _masm(&cbuf);
2707 int src = $src$$reg;
2708 int base = $mem$$base;
2709 int index = $mem$$index;
2710 int scale = $mem$$scale;
2711 int disp = $mem$$disp;
2713 if( index != 0 ) {
2714 if ( UseLoongsonISA ){
2715 if ( Assembler::is_simm(disp, 8) ) {
2716 if ( scale == 0 ) {
2717 __ gssdx(as_Register(src), as_Register(base), as_Register(index), disp);
2718 } else {
2719 __ dsll(AT, as_Register(index), scale);
2720 __ gssdx(as_Register(src), as_Register(base), AT, disp);
2721 }
2722 } else if ( Assembler::is_simm16(disp) ) {
2723 if ( scale == 0 ) {
2724 __ daddu(AT, as_Register(base), as_Register(index));
2725 } else {
2726 __ dsll(AT, as_Register(index), scale);
2727 __ daddu(AT, as_Register(base), AT);
2728 }
2729 __ sd(as_Register(src), AT, disp);
2730 } else {
2731 if ( scale == 0 ) {
2732 __ move(T9, disp);
2733 __ daddu(AT, as_Register(index), T9);
2734 } else {
2735 __ dsll(AT, as_Register(index), scale);
2736 __ move(T9, disp);
2737 __ daddu(AT, AT, T9);
2738 }
2739 __ gssdx(as_Register(src), as_Register(base), AT, 0);
2740 }
2741 } else { //not use loongson isa
2742 if (scale == 0) {
2743 __ daddu(AT, as_Register(base), as_Register(index));
2744 } else {
2745 __ dsll(AT, as_Register(index), scale);
2746 __ daddu(AT, as_Register(base), AT);
2747 }
2748 if( Assembler::is_simm16(disp) ) {
2749 __ sd(as_Register(src), AT, disp);
2750 } else {
2751 __ move(T9, disp);
2752 __ daddu(AT, AT, T9);
2753 __ sd(as_Register(src), AT, 0);
2754 }
2755 }
2756 } else {
2757 if ( UseLoongsonISA ) {
2758 if ( Assembler::is_simm16(disp) ) {
2759 __ sd(as_Register(src), as_Register(base), disp);
2760 } else {
2761 __ move(T9, disp);
2762 __ gssdx(as_Register(src), as_Register(base), T9, 0);
2763 }
2764 } else {
2765 if( Assembler::is_simm16(disp) ) {
2766 __ sd(as_Register(src), as_Register(base), disp);
2767 } else {
2768 __ move(T9, disp);
2769 __ daddu(AT, as_Register(base), T9);
2770 __ sd(as_Register(src), AT, 0);
2771 }
2772 }
2773 }
2774 %}
2776 enc_class store_N_reg_enc (memory mem, mRegN src) %{
2777 MacroAssembler _masm(&cbuf);
2778 int src = $src$$reg;
2779 int base = $mem$$base;
2780 int index = $mem$$index;
2781 int scale = $mem$$scale;
2782 int disp = $mem$$disp;
2784 if( index != 0 ) {
2785 if ( UseLoongsonISA ){
2786 if ( Assembler::is_simm(disp, 8) ) {
2787 if ( scale == 0 ) {
2788 __ gsswx(as_Register(src), as_Register(base), as_Register(index), disp);
2789 } else {
2790 __ dsll(AT, as_Register(index), scale);
2791 __ gsswx(as_Register(src), as_Register(base), AT, disp);
2792 }
2793 } else if ( Assembler::is_simm16(disp) ) {
2794 if ( scale == 0 ) {
2795 __ daddu(AT, as_Register(base), as_Register(index));
2796 } else {
2797 __ dsll(AT, as_Register(index), scale);
2798 __ daddu(AT, as_Register(base), AT);
2799 }
2800 __ sw(as_Register(src), AT, disp);
2801 } else {
2802 if ( scale == 0 ) {
2803 __ move(T9, disp);
2804 __ daddu(AT, as_Register(index), T9);
2805 } else {
2806 __ dsll(AT, as_Register(index), scale);
2807 __ move(T9, disp);
2808 __ daddu(AT, AT, T9);
2809 }
2810 __ gsswx(as_Register(src), as_Register(base), AT, 0);
2811 }
2812 } else { //not use loongson isa
2813 if (scale == 0) {
2814 __ daddu(AT, as_Register(base), as_Register(index));
2815 } else {
2816 __ dsll(AT, as_Register(index), scale);
2817 __ daddu(AT, as_Register(base), AT);
2818 }
2819 if( Assembler::is_simm16(disp) ) {
2820 __ sw(as_Register(src), AT, disp);
2821 } else {
2822 __ move(T9, disp);
2823 __ daddu(AT, AT, T9);
2824 __ sw(as_Register(src), AT, 0);
2825 }
2826 }
2827 } else {
2828 if ( UseLoongsonISA ) {
2829 if ( Assembler::is_simm16(disp) ) {
2830 __ sw(as_Register(src), as_Register(base), disp);
2831 } else {
2832 __ move(T9, disp);
2833 __ gsswx(as_Register(src), as_Register(base), T9, 0);
2834 }
2835 } else {
2836 if( Assembler::is_simm16(disp) ) {
2837 __ sw(as_Register(src), as_Register(base), disp);
2838 } else {
2839 __ move(T9, disp);
2840 __ daddu(AT, as_Register(base), T9);
2841 __ sw(as_Register(src), AT, 0);
2842 }
2843 }
2844 }
2845 %}
2847 enc_class store_P_immP0_enc (memory mem) %{
2848 MacroAssembler _masm(&cbuf);
2849 int base = $mem$$base;
2850 int index = $mem$$index;
2851 int scale = $mem$$scale;
2852 int disp = $mem$$disp;
2854 if( index != 0 ) {
2855 if (scale == 0) {
2856 if( Assembler::is_simm16(disp) ) {
2857 if (UseLoongsonISA && Assembler::is_simm(disp, 8)) {
2858 __ gssdx(R0, as_Register(base), as_Register(index), disp);
2859 } else {
2860 __ daddu(AT, as_Register(base), as_Register(index));
2861 __ sd(R0, AT, disp);
2862 }
2863 } else {
2864 __ daddu(AT, as_Register(base), as_Register(index));
2865 __ move(T9, disp);
2866 if(UseLoongsonISA) {
2867 __ gssdx(R0, AT, T9, 0);
2868 } else {
2869 __ daddu(AT, AT, T9);
2870 __ sd(R0, AT, 0);
2871 }
2872 }
2873 } else {
2874 __ dsll(AT, as_Register(index), scale);
2875 if( Assembler::is_simm16(disp) ) {
2876 if (UseLoongsonISA && Assembler::is_simm(disp, 8)) {
2877 __ gssdx(R0, as_Register(base), AT, disp);
2878 } else {
2879 __ daddu(AT, as_Register(base), AT);
2880 __ sd(R0, AT, disp);
2881 }
2882 } else {
2883 __ daddu(AT, as_Register(base), AT);
2884 __ move(T9, disp);
2885 if (UseLoongsonISA) {
2886 __ gssdx(R0, AT, T9, 0);
2887 } else {
2888 __ daddu(AT, AT, T9);
2889 __ sd(R0, AT, 0);
2890 }
2891 }
2892 }
2893 } else {
2894 if( Assembler::is_simm16(disp) ) {
2895 __ sd(R0, as_Register(base), disp);
2896 } else {
2897 __ move(T9, disp);
2898 if (UseLoongsonISA) {
2899 __ gssdx(R0, as_Register(base), T9, 0);
2900 } else {
2901 __ daddu(AT, as_Register(base), T9);
2902 __ sd(R0, AT, 0);
2903 }
2904 }
2905 }
2906 %}
2908 enc_class store_P_immP_enc (memory mem, immP31 src) %{
2909 MacroAssembler _masm(&cbuf);
2910 int base = $mem$$base;
2911 int index = $mem$$index;
2912 int scale = $mem$$scale;
2913 int disp = $mem$$disp;
2914 long value = $src$$constant;
2916 if( index != 0 ) {
2917 if (scale == 0) {
2918 __ daddu(AT, as_Register(base), as_Register(index));
2919 } else {
2920 __ dsll(AT, as_Register(index), scale);
2921 __ daddu(AT, as_Register(base), AT);
2922 }
2923 if( Assembler::is_simm16(disp) ) {
2924 if (value == 0) {
2925 __ sd(R0, AT, disp);
2926 } else {
2927 __ move(T9, value);
2928 __ sd(T9, AT, disp);
2929 }
2930 } else {
2931 if (value == 0) {
2932 __ move(T9, disp);
2933 __ daddu(AT, AT, T9);
2934 __ sd(R0, AT, 0);
2935 } else {
2936 __ move(T9, disp);
2937 __ daddu(AT, AT, T9);
2938 __ move(T9, value);
2939 __ sd(T9, AT, 0);
2940 }
2941 }
2942 } else {
2943 if( Assembler::is_simm16(disp) ) {
2944 if (value == 0) {
2945 __ sd(R0, as_Register(base), disp);
2946 } else {
2947 __ move(AT, value);
2948 __ sd(AT, as_Register(base), disp);
2949 }
2950 } else {
2951 if (value == 0) {
2952 __ move(T9, disp);
2953 __ daddu(AT, as_Register(base), T9);
2954 __ sd(R0, AT, 0);
2955 } else {
2956 __ move(T9, disp);
2957 __ daddu(AT, as_Register(base), T9);
2958 __ move(T9, value);
2959 __ sd(T9, AT, 0);
2960 }
2961 }
2962 }
2963 %}
2965 enc_class storeImmN0_enc(memory mem, ImmN0 src) %{
2966 MacroAssembler _masm(&cbuf);
2967 int base = $mem$$base;
2968 int index = $mem$$index;
2969 int scale = $mem$$scale;
2970 int disp = $mem$$disp;
2972 if(index!=0){
2973 if (scale == 0) {
2974 __ daddu(AT, as_Register(base), as_Register(index));
2975 } else {
2976 __ dsll(AT, as_Register(index), scale);
2977 __ daddu(AT, as_Register(base), AT);
2978 }
2980 if( Assembler::is_simm16(disp) ) {
2981 __ sw(R0, AT, disp);
2982 } else {
2983 __ move(T9, disp);
2984 __ daddu(AT, AT, T9);
2985 __ sw(R0, AT, 0);
2986 }
2987 }
2988 else {
2989 if( Assembler::is_simm16(disp) ) {
2990 __ sw(R0, as_Register(base), disp);
2991 } else {
2992 __ move(T9, disp);
2993 __ daddu(AT, as_Register(base), T9);
2994 __ sw(R0, AT, 0);
2995 }
2996 }
2997 %}
2999 enc_class storeImmN_enc (memory mem, immN src) %{
3000 MacroAssembler _masm(&cbuf);
3001 int base = $mem$$base;
3002 int index = $mem$$index;
3003 int scale = $mem$$scale;
3004 int disp = $mem$$disp;
3005 long * value = (long *)$src$$constant;
3007 if (value == NULL) {
3008 guarantee(Assembler::is_simm16(disp), "FIXME: disp is not simm16!");
3009 if (index == 0) {
3010 __ sw(R0, as_Register(base), disp);
3011 } else {
3012 if (scale == 0) {
3013 __ daddu(AT, as_Register(base), as_Register(index));
3014 } else {
3015 __ dsll(AT, as_Register(index), scale);
3016 __ daddu(AT, as_Register(base), AT);
3017 }
3018 __ sw(R0, AT, disp);
3019 }
3021 return;
3022 }
3024 int oop_index = __ oop_recorder()->find_index((jobject)value);
3025 RelocationHolder rspec = oop_Relocation::spec(oop_index);
3027 guarantee(scale == 0, "FIXME: scale is not zero !");
3028 guarantee(value != 0, "FIXME: value is zero !");
3030 if (index != 0) {
3031 if (scale == 0) {
3032 __ daddu(AT, as_Register(base), as_Register(index));
3033 } else {
3034 __ dsll(AT, as_Register(index), scale);
3035 __ daddu(AT, as_Register(base), AT);
3036 }
3037 if( Assembler::is_simm16(disp) ) {
3038 if(rspec.type() != relocInfo::none) {
3039 __ relocate(rspec, Assembler::narrow_oop_operand);
3040 __ patchable_set48(T9, oop_index);
3041 } else {
3042 __ set64(T9, oop_index);
3043 }
3044 __ sw(T9, AT, disp);
3045 } else {
3046 __ move(T9, disp);
3047 __ addu(AT, AT, T9);
3049 if(rspec.type() != relocInfo::none) {
3050 __ relocate(rspec, Assembler::narrow_oop_operand);
3051 __ patchable_set48(T9, oop_index);
3052 } else {
3053 __ set64(T9, oop_index);
3054 }
3055 __ sw(T9, AT, 0);
3056 }
3057 }
3058 else {
3059 if( Assembler::is_simm16(disp) ) {
3060 if($src->constant_reloc() != relocInfo::none) {
3061 __ relocate(rspec, Assembler::narrow_oop_operand);
3062 __ patchable_set48(T9, oop_index);
3063 } else {
3064 __ set64(T9, oop_index);
3065 }
3066 __ sw(T9, as_Register(base), disp);
3067 } else {
3068 __ move(T9, disp);
3069 __ daddu(AT, as_Register(base), T9);
3071 if($src->constant_reloc() != relocInfo::none){
3072 __ relocate(rspec, Assembler::narrow_oop_operand);
3073 __ patchable_set48(T9, oop_index);
3074 } else {
3075 __ set64(T9, oop_index);
3076 }
3077 __ sw(T9, AT, 0);
3078 }
3079 }
3080 %}
3082 enc_class storeImmNKlass_enc (memory mem, immNKlass src) %{
3083 MacroAssembler _masm(&cbuf);
3085 assert (UseCompressedOops, "should only be used for compressed headers");
3086 assert (__ oop_recorder() != NULL, "this assembler needs an OopRecorder");
3088 int base = $mem$$base;
3089 int index = $mem$$index;
3090 int scale = $mem$$scale;
3091 int disp = $mem$$disp;
3092 long value = $src$$constant;
3094 int klass_index = __ oop_recorder()->find_index((Klass*)value);
3095 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
3096 long narrowp = Klass::encode_klass((Klass*)value);
3098 if(index!=0){
3099 if (scale == 0) {
3100 __ daddu(AT, as_Register(base), as_Register(index));
3101 } else {
3102 __ dsll(AT, as_Register(index), scale);
3103 __ daddu(AT, as_Register(base), AT);
3104 }
3106 if( Assembler::is_simm16(disp) ) {
3107 if(rspec.type() != relocInfo::none){
3108 __ relocate(rspec, Assembler::narrow_oop_operand);
3109 __ patchable_set48(T9, narrowp);
3110 } else {
3111 __ set64(T9, narrowp);
3112 }
3113 __ sw(T9, AT, disp);
3114 } else {
3115 __ move(T9, disp);
3116 __ daddu(AT, AT, T9);
3118 if(rspec.type() != relocInfo::none){
3119 __ relocate(rspec, Assembler::narrow_oop_operand);
3120 __ patchable_set48(T9, narrowp);
3121 } else {
3122 __ set64(T9, narrowp);
3123 }
3125 __ sw(T9, AT, 0);
3126 }
3127 } else {
3128 if( Assembler::is_simm16(disp) ) {
3129 if(rspec.type() != relocInfo::none){
3130 __ relocate(rspec, Assembler::narrow_oop_operand);
3131 __ patchable_set48(T9, narrowp);
3132 }
3133 else {
3134 __ set64(T9, narrowp);
3135 }
3136 __ sw(T9, as_Register(base), disp);
3137 } else {
3138 __ move(T9, disp);
3139 __ daddu(AT, as_Register(base), T9);
3141 if(rspec.type() != relocInfo::none){
3142 __ relocate(rspec, Assembler::narrow_oop_operand);
3143 __ patchable_set48(T9, narrowp);
3144 } else {
3145 __ set64(T9, narrowp);
3146 }
3147 __ sw(T9, AT, 0);
3148 }
3149 }
3150 %}
3152 enc_class load_L_enc (mRegL dst, memory mem) %{
3153 MacroAssembler _masm(&cbuf);
3154 int base = $mem$$base;
3155 int index = $mem$$index;
3156 int scale = $mem$$scale;
3157 int disp = $mem$$disp;
3158 Register dst_reg = as_Register($dst$$reg);
3160 // For implicit null check
3161 __ lb(AT, as_Register(base), 0);
3163 if( index != 0 ) {
3164 if (scale == 0) {
3165 __ daddu(AT, as_Register(base), as_Register(index));
3166 } else {
3167 __ dsll(AT, as_Register(index), scale);
3168 __ daddu(AT, as_Register(base), AT);
3169 }
3170 if( Assembler::is_simm16(disp) ) {
3171 __ ld(dst_reg, AT, disp);
3172 } else {
3173 __ move(T9, disp);
3174 __ daddu(AT, AT, T9);
3175 __ ld(dst_reg, AT, 0);
3176 }
3177 } else {
3178 if( Assembler::is_simm16(disp) ) {
3179 __ ld(dst_reg, as_Register(base), disp);
3180 } else {
3181 __ move(T9, disp);
3182 __ daddu(AT, as_Register(base), T9);
3183 __ ld(dst_reg, AT, 0);
3184 }
3185 }
3186 %}
3188 enc_class store_L_reg_enc (memory mem, mRegL src) %{
3189 MacroAssembler _masm(&cbuf);
3190 int base = $mem$$base;
3191 int index = $mem$$index;
3192 int scale = $mem$$scale;
3193 int disp = $mem$$disp;
3194 Register src_reg = as_Register($src$$reg);
3196 if( index != 0 ) {
3197 if (scale == 0) {
3198 __ daddu(AT, as_Register(base), as_Register(index));
3199 } else {
3200 __ dsll(AT, as_Register(index), scale);
3201 __ daddu(AT, as_Register(base), AT);
3202 }
3203 if( Assembler::is_simm16(disp) ) {
3204 __ sd(src_reg, AT, disp);
3205 } else {
3206 __ move(T9, disp);
3207 __ daddu(AT, AT, T9);
3208 __ sd(src_reg, AT, 0);
3209 }
3210 } else {
3211 if( Assembler::is_simm16(disp) ) {
3212 __ sd(src_reg, as_Register(base), disp);
3213 } else {
3214 __ move(T9, disp);
3215 __ daddu(AT, as_Register(base), T9);
3216 __ sd(src_reg, AT, 0);
3217 }
3218 }
3219 %}
3221 enc_class store_L_immL0_enc (memory mem, immL0 src) %{
3222 MacroAssembler _masm(&cbuf);
3223 int base = $mem$$base;
3224 int index = $mem$$index;
3225 int scale = $mem$$scale;
3226 int disp = $mem$$disp;
3228 if( index != 0 ) {
3229 // For implicit null check
3230 __ lb(AT, as_Register(base), 0);
3232 if (scale == 0) {
3233 __ daddu(AT, as_Register(base), as_Register(index));
3234 } else {
3235 __ dsll(AT, as_Register(index), scale);
3236 __ daddu(AT, as_Register(base), AT);
3237 }
3238 if( Assembler::is_simm16(disp) ) {
3239 __ sd(R0, AT, disp);
3240 } else {
3241 __ move(T9, disp);
3242 __ addu(AT, AT, T9);
3243 __ sd(R0, AT, 0);
3244 }
3245 } else {
3246 if( Assembler::is_simm16(disp) ) {
3247 __ sd(R0, as_Register(base), disp);
3248 } else {
3249 __ move(T9, disp);
3250 __ addu(AT, as_Register(base), T9);
3251 __ sd(R0, AT, 0);
3252 }
3253 }
3254 %}
3256 enc_class store_L_immL_enc (memory mem, immL src) %{
3257 MacroAssembler _masm(&cbuf);
3258 int base = $mem$$base;
3259 int index = $mem$$index;
3260 int scale = $mem$$scale;
3261 int disp = $mem$$disp;
3262 long imm = $src$$constant;
3264 if( index != 0 ) {
3265 if (scale == 0) {
3266 __ daddu(AT, as_Register(base), as_Register(index));
3267 } else {
3268 __ dsll(AT, as_Register(index), scale);
3269 __ daddu(AT, as_Register(base), AT);
3270 }
3271 if( Assembler::is_simm16(disp) ) {
3272 __ set64(T9, imm);
3273 __ sd(T9, AT, disp);
3274 } else {
3275 __ move(T9, disp);
3276 __ addu(AT, AT, T9);
3277 __ set64(T9, imm);
3278 __ sd(T9, AT, 0);
3279 }
3280 } else {
3281 if( Assembler::is_simm16(disp) ) {
3282 __ move(AT, as_Register(base));
3283 __ set64(T9, imm);
3284 __ sd(T9, AT, disp);
3285 } else {
3286 __ move(T9, disp);
3287 __ addu(AT, as_Register(base), T9);
3288 __ set64(T9, imm);
3289 __ sd(T9, AT, 0);
3290 }
3291 }
3292 %}
3294 enc_class load_F_enc (regF dst, memory mem) %{
3295 MacroAssembler _masm(&cbuf);
3296 int base = $mem$$base;
3297 int index = $mem$$index;
3298 int scale = $mem$$scale;
3299 int disp = $mem$$disp;
3300 FloatRegister dst = $dst$$FloatRegister;
3302 if( index != 0 ) {
3303 if( Assembler::is_simm16(disp) ) {
3304 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3305 if (scale == 0) {
3306 __ gslwxc1(dst, as_Register(base), as_Register(index), disp);
3307 } else {
3308 __ dsll(AT, as_Register(index), scale);
3309 __ gslwxc1(dst, as_Register(base), AT, disp);
3310 }
3311 } else {
3312 if (scale == 0) {
3313 __ daddu(AT, as_Register(base), as_Register(index));
3314 } else {
3315 __ dsll(AT, as_Register(index), scale);
3316 __ daddu(AT, as_Register(base), AT);
3317 }
3318 __ lwc1(dst, AT, disp);
3319 }
3320 } else {
3321 if (scale == 0) {
3322 __ daddu(AT, as_Register(base), as_Register(index));
3323 } else {
3324 __ dsll(AT, as_Register(index), scale);
3325 __ daddu(AT, as_Register(base), AT);
3326 }
3327 __ move(T9, disp);
3328 if( UseLoongsonISA ) {
3329 __ gslwxc1(dst, AT, T9, 0);
3330 } else {
3331 __ daddu(AT, AT, T9);
3332 __ lwc1(dst, AT, 0);
3333 }
3334 }
3335 } else {
3336 if( Assembler::is_simm16(disp) ) {
3337 __ lwc1(dst, as_Register(base), disp);
3338 } else {
3339 __ move(T9, disp);
3340 if( UseLoongsonISA ) {
3341 __ gslwxc1(dst, as_Register(base), T9, 0);
3342 } else {
3343 __ daddu(AT, as_Register(base), T9);
3344 __ lwc1(dst, AT, 0);
3345 }
3346 }
3347 }
3348 %}
3350 enc_class store_F_reg_enc (memory mem, regF src) %{
3351 MacroAssembler _masm(&cbuf);
3352 int base = $mem$$base;
3353 int index = $mem$$index;
3354 int scale = $mem$$scale;
3355 int disp = $mem$$disp;
3356 FloatRegister src = $src$$FloatRegister;
3358 if( index != 0 ) {
3359 if( Assembler::is_simm16(disp) ) {
3360 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3361 if (scale == 0) {
3362 __ gsswxc1(src, as_Register(base), as_Register(index), disp);
3363 } else {
3364 __ dsll(AT, as_Register(index), scale);
3365 __ gsswxc1(src, as_Register(base), AT, disp);
3366 }
3367 } else {
3368 if (scale == 0) {
3369 __ daddu(AT, as_Register(base), as_Register(index));
3370 } else {
3371 __ dsll(AT, as_Register(index), scale);
3372 __ daddu(AT, as_Register(base), AT);
3373 }
3374 __ swc1(src, AT, disp);
3375 }
3376 } else {
3377 if (scale == 0) {
3378 __ daddu(AT, as_Register(base), as_Register(index));
3379 } else {
3380 __ dsll(AT, as_Register(index), scale);
3381 __ daddu(AT, as_Register(base), AT);
3382 }
3383 __ move(T9, disp);
3384 if( UseLoongsonISA ) {
3385 __ gsswxc1(src, AT, T9, 0);
3386 } else {
3387 __ daddu(AT, AT, T9);
3388 __ swc1(src, AT, 0);
3389 }
3390 }
3391 } else {
3392 if( Assembler::is_simm16(disp) ) {
3393 __ swc1(src, as_Register(base), disp);
3394 } else {
3395 __ move(T9, disp);
3396 if( UseLoongsonISA ) {
3397 __ gslwxc1(src, as_Register(base), T9, 0);
3398 } else {
3399 __ daddu(AT, as_Register(base), T9);
3400 __ swc1(src, AT, 0);
3401 }
3402 }
3403 }
3404 %}
3406 enc_class load_D_enc (regD dst, memory mem) %{
3407 MacroAssembler _masm(&cbuf);
3408 int base = $mem$$base;
3409 int index = $mem$$index;
3410 int scale = $mem$$scale;
3411 int disp = $mem$$disp;
3412 FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3414 if( index != 0 ) {
3415 if( Assembler::is_simm16(disp) ) {
3416 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3417 if (scale == 0) {
3418 __ gsldxc1(dst_reg, as_Register(base), as_Register(index), disp);
3419 } else {
3420 __ dsll(AT, as_Register(index), scale);
3421 __ gsldxc1(dst_reg, as_Register(base), AT, disp);
3422 }
3423 } else {
3424 if (scale == 0) {
3425 __ daddu(AT, as_Register(base), as_Register(index));
3426 } else {
3427 __ dsll(AT, as_Register(index), scale);
3428 __ daddu(AT, as_Register(base), AT);
3429 }
3430 __ ldc1(dst_reg, AT, disp);
3431 }
3432 } else {
3433 if (scale == 0) {
3434 __ daddu(AT, as_Register(base), as_Register(index));
3435 } else {
3436 __ dsll(AT, as_Register(index), scale);
3437 __ daddu(AT, as_Register(base), AT);
3438 }
3439 __ move(T9, disp);
3440 if( UseLoongsonISA ) {
3441 __ gsldxc1(dst_reg, AT, T9, 0);
3442 } else {
3443 __ addu(AT, AT, T9);
3444 __ ldc1(dst_reg, AT, 0);
3445 }
3446 }
3447 } else {
3448 if( Assembler::is_simm16(disp) ) {
3449 __ ldc1(dst_reg, as_Register(base), disp);
3450 } else {
3451 __ move(T9, disp);
3452 if( UseLoongsonISA ) {
3453 __ gsldxc1(dst_reg, as_Register(base), T9, 0);
3454 } else {
3455 __ addu(AT, as_Register(base), T9);
3456 __ ldc1(dst_reg, AT, 0);
3457 }
3458 }
3459 }
3460 %}
3462 enc_class store_D_reg_enc (memory mem, regD src) %{
3463 MacroAssembler _masm(&cbuf);
3464 int base = $mem$$base;
3465 int index = $mem$$index;
3466 int scale = $mem$$scale;
3467 int disp = $mem$$disp;
3468 FloatRegister src_reg = as_FloatRegister($src$$reg);
3470 if( index != 0 ) {
3471 if( Assembler::is_simm16(disp) ) {
3472 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3473 if (scale == 0) {
3474 __ gssdxc1(src_reg, as_Register(base), as_Register(index), disp);
3475 } else {
3476 __ dsll(AT, as_Register(index), scale);
3477 __ gssdxc1(src_reg, as_Register(base), AT, disp);
3478 }
3479 } else {
3480 if (scale == 0) {
3481 __ daddu(AT, as_Register(base), as_Register(index));
3482 } else {
3483 __ dsll(AT, as_Register(index), scale);
3484 __ daddu(AT, as_Register(base), AT);
3485 }
3486 __ sdc1(src_reg, AT, disp);
3487 }
3488 } else {
3489 if (scale == 0) {
3490 __ daddu(AT, as_Register(base), as_Register(index));
3491 } else {
3492 __ dsll(AT, as_Register(index), scale);
3493 __ daddu(AT, as_Register(base), AT);
3494 }
3495 __ move(T9, disp);
3496 if( UseLoongsonISA ) {
3497 __ gssdxc1(src_reg, AT, T9, 0);
3498 } else {
3499 __ addu(AT, AT, T9);
3500 __ sdc1(src_reg, AT, 0);
3501 }
3502 }
3503 } else {
3504 if( Assembler::is_simm16(disp) ) {
3505 __ sdc1(src_reg, as_Register(base), disp);
3506 } else {
3507 __ move(T9, disp);
3508 if( UseLoongsonISA ) {
3509 __ gssdxc1(src_reg, as_Register(base), T9, 0);
3510 } else {
3511 __ addu(AT, as_Register(base), T9);
3512 __ sdc1(src_reg, AT, 0);
3513 }
3514 }
3515 }
3516 %}
3518 enc_class Java_To_Runtime (method meth) %{ // CALL Java_To_Runtime, Java_To_Runtime_Leaf
3519 MacroAssembler _masm(&cbuf);
3520 // This is the instruction starting address for relocation info.
3521 __ block_comment("Java_To_Runtime");
3522 cbuf.set_insts_mark();
3523 __ relocate(relocInfo::runtime_call_type);
3525 __ patchable_call((address)$meth$$method);
3526 %}
3528 enc_class Java_Static_Call (method meth) %{ // JAVA STATIC CALL
3529 // CALL to fixup routine. Fixup routine uses ScopeDesc info to determine
3530 // who we intended to call.
3531 MacroAssembler _masm(&cbuf);
3532 cbuf.set_insts_mark();
3534 if ( !_method ) {
3535 __ relocate(relocInfo::runtime_call_type);
3536 } else if(_optimized_virtual) {
3537 __ relocate(relocInfo::opt_virtual_call_type);
3538 } else {
3539 __ relocate(relocInfo::static_call_type);
3540 }
3542 __ patchable_call((address)($meth$$method));
3543 if( _method ) { // Emit stub for static call
3544 emit_java_to_interp(cbuf);
3545 }
3546 %}
3549 /*
3550 * [Ref: LIR_Assembler::ic_call() ]
3551 */
3552 enc_class Java_Dynamic_Call (method meth) %{ // JAVA DYNAMIC CALL
3553 MacroAssembler _masm(&cbuf);
3554 __ block_comment("Java_Dynamic_Call");
3555 __ ic_call((address)$meth$$method);
3556 %}
3559 enc_class Set_Flags_After_Fast_Lock_Unlock(FlagsReg cr) %{
3560 Register flags = $cr$$Register;
3561 Label L;
3563 MacroAssembler _masm(&cbuf);
3565 __ addu(flags, R0, R0);
3566 __ beq(AT, R0, L);
3567 __ delayed()->nop();
3568 __ move(flags, 0xFFFFFFFF);
3569 __ bind(L);
3570 %}
3572 enc_class enc_PartialSubtypeCheck(mRegP result, mRegP sub, mRegP super, mRegI tmp) %{
3573 Register result = $result$$Register;
3574 Register sub = $sub$$Register;
3575 Register super = $super$$Register;
3576 Register length = $tmp$$Register;
3577 Register tmp = T9;
3578 Label miss;
3580 /* 2012/9/28 Jin: result may be the same as sub
3581 * 47c B40: # B21 B41 <- B20 Freq: 0.155379
3582 * 47c partialSubtypeCheck result=S1, sub=S1, super=S3, length=S0
3583 * 4bc mov S2, NULL #@loadConP
3584 * 4c0 beq S1, S2, B21 #@branchConP P=0.999999 C=-1.000000
3585 */
3586 MacroAssembler _masm(&cbuf);
3587 Label done;
3588 __ check_klass_subtype_slow_path(sub, super, length, tmp,
3589 NULL, &miss,
3590 /*set_cond_codes:*/ true);
3591 /* 2013/7/22 Jin: Refer to X86_64's RDI */
3592 __ move(result, 0);
3593 __ b(done);
3594 __ nop();
3596 __ bind(miss);
3597 __ move(result, 1);
3598 __ bind(done);
3599 %}
3601 %}
3604 //---------MIPS FRAME--------------------------------------------------------------
3605 // Definition of frame structure and management information.
3606 //
3607 // S T A C K L A Y O U T Allocators stack-slot number
3608 // | (to get allocators register number
3609 // G Owned by | | v add SharedInfo::stack0)
3610 // r CALLER | |
3611 // o | +--------+ pad to even-align allocators stack-slot
3612 // w V | pad0 | numbers; owned by CALLER
3613 // t -----------+--------+----> Matcher::_in_arg_limit, unaligned
3614 // h ^ | in | 5
3615 // | | args | 4 Holes in incoming args owned by SELF
3616 // | | old | | 3
3617 // | | SP-+--------+----> Matcher::_old_SP, even aligned
3618 // v | | ret | 3 return address
3619 // Owned by +--------+
3620 // Self | pad2 | 2 pad to align old SP
3621 // | +--------+ 1
3622 // | | locks | 0
3623 // | +--------+----> SharedInfo::stack0, even aligned
3624 // | | pad1 | 11 pad to align new SP
3625 // | +--------+
3626 // | | | 10
3627 // | | spills | 9 spills
3628 // V | | 8 (pad0 slot for callee)
3629 // -----------+--------+----> Matcher::_out_arg_limit, unaligned
3630 // ^ | out | 7
3631 // | | args | 6 Holes in outgoing args owned by CALLEE
3632 // Owned by new | |
3633 // Callee SP-+--------+----> Matcher::_new_SP, even aligned
3634 // | |
3635 //
3636 // Note 1: Only region 8-11 is determined by the allocator. Region 0-5 is
3637 // known from SELF's arguments and the Java calling convention.
3638 // Region 6-7 is determined per call site.
3639 // Note 2: If the calling convention leaves holes in the incoming argument
3640 // area, those holes are owned by SELF. Holes in the outgoing area
3641 // are owned by the CALLEE. Holes should not be nessecary in the
3642 // incoming area, as the Java calling convention is completely under
3643 // the control of the AD file. Doubles can be sorted and packed to
3644 // avoid holes. Holes in the outgoing arguments may be nessecary for
3645 // varargs C calling conventions.
3646 // Note 3: Region 0-3 is even aligned, with pad2 as needed. Region 3-5 is
3647 // even aligned with pad0 as needed.
3648 // Region 6 is even aligned. Region 6-7 is NOT even aligned;
3649 // region 6-11 is even aligned; it may be padded out more so that
3650 // the region from SP to FP meets the minimum stack alignment.
3651 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
3652 // alignment. Region 11, pad1, may be dynamically extended so that
3653 // SP meets the minimum alignment.
3656 frame %{
3658 stack_direction(TOWARDS_LOW);
3660 // These two registers define part of the calling convention
3661 // between compiled code and the interpreter.
3662 // SEE StartI2CNode::calling_convention & StartC2INode::calling_convention & StartOSRNode::calling_convention
3663 // for more information. by yjl 3/16/2006
3665 inline_cache_reg(T1); // Inline Cache Register
3666 interpreter_method_oop_reg(S3); // Method Oop Register when calling interpreter
3667 /*
3668 inline_cache_reg(T1); // Inline Cache Register or methodOop for I2C
3669 interpreter_arg_ptr_reg(A0); // Argument pointer for I2C adapters
3670 */
3672 // Optional: name the operand used by cisc-spilling to access [stack_pointer + offset]
3673 cisc_spilling_operand_name(indOffset32);
3675 // Number of stack slots consumed by locking an object
3676 // generate Compile::sync_stack_slots
3677 #ifdef _LP64
3678 sync_stack_slots(2);
3679 #else
3680 sync_stack_slots(1);
3681 #endif
3683 frame_pointer(SP);
3685 // Interpreter stores its frame pointer in a register which is
3686 // stored to the stack by I2CAdaptors.
3687 // I2CAdaptors convert from interpreted java to compiled java.
3689 interpreter_frame_pointer(FP);
3691 // generate Matcher::stack_alignment
3692 stack_alignment(StackAlignmentInBytes); //wordSize = sizeof(char*);
3694 // Number of stack slots between incoming argument block and the start of
3695 // a new frame. The PROLOG must add this many slots to the stack. The
3696 // EPILOG must remove this many slots. Intel needs one slot for
3697 // return address.
3698 // generate Matcher::in_preserve_stack_slots
3699 //in_preserve_stack_slots(VerifyStackAtCalls + 2); //Now VerifyStackAtCalls is defined as false ! Leave one stack slot for ra and fp
3700 in_preserve_stack_slots(4); //Now VerifyStackAtCalls is defined as false ! Leave two stack slots for ra and fp
3702 // Number of outgoing stack slots killed above the out_preserve_stack_slots
3703 // for calls to C. Supports the var-args backing area for register parms.
3704 varargs_C_out_slots_killed(0);
3706 // The after-PROLOG location of the return address. Location of
3707 // return address specifies a type (REG or STACK) and a number
3708 // representing the register number (i.e. - use a register name) or
3709 // stack slot.
3710 // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
3711 // Otherwise, it is above the locks and verification slot and alignment word
3712 //return_addr(STACK -1+ round_to(1+VerifyStackAtCalls+Compile::current()->sync()*Compile::current()->sync_stack_slots(),WordsPerLong));
3713 return_addr(REG RA);
3715 // Body of function which returns an integer array locating
3716 // arguments either in registers or in stack slots. Passed an array
3717 // of ideal registers called "sig" and a "length" count. Stack-slot
3718 // offsets are based on outgoing arguments, i.e. a CALLER setting up
3719 // arguments for a CALLEE. Incoming stack arguments are
3720 // automatically biased by the preserve_stack_slots field above.
3723 // will generated to Matcher::calling_convention(OptoRegPair *sig, uint length, bool is_outgoing)
3724 // StartNode::calling_convention call this. by yjl 3/16/2006
3725 calling_convention %{
3726 SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
3727 %}
3732 // Body of function which returns an integer array locating
3733 // arguments either in registers or in stack slots. Passed an array
3734 // of ideal registers called "sig" and a "length" count. Stack-slot
3735 // offsets are based on outgoing arguments, i.e. a CALLER setting up
3736 // arguments for a CALLEE. Incoming stack arguments are
3737 // automatically biased by the preserve_stack_slots field above.
3740 // SEE CallRuntimeNode::calling_convention for more information. by yjl 3/16/2006
3741 c_calling_convention %{
3742 (void) SharedRuntime::c_calling_convention(sig_bt, regs, /*regs2=*/NULL, length);
3743 %}
3746 // Location of C & interpreter return values
3747 // register(s) contain(s) return value for Op_StartI2C and Op_StartOSR.
3748 // SEE Matcher::match. by yjl 3/16/2006
3749 c_return_value %{
3750 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3751 /* -- , -- , Op_RegN, Op_RegI, Op_RegP, Op_RegF, Op_RegD, Op_RegL */
3752 static int lo[Op_RegL+1] = { 0, 0, V0_num, V0_num, V0_num, F0_num, F0_num, V0_num };
3753 static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, V0_H_num, OptoReg::Bad, F0_H_num, V0_H_num };
3754 return OptoRegPair(hi[ideal_reg],lo[ideal_reg]);
3755 %}
3757 // Location of return values
3758 // register(s) contain(s) return value for Op_StartC2I and Op_Start.
3759 // SEE Matcher::match. by yjl 3/16/2006
3761 return_value %{
3762 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3763 /* -- , -- , Op_RegN, Op_RegI, Op_RegP, Op_RegF, Op_RegD, Op_RegL */
3764 static int lo[Op_RegL+1] = { 0, 0, V0_num, V0_num, V0_num, F0_num, F0_num, V0_num };
3765 static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, V0_H_num, OptoReg::Bad, F0_H_num, V0_H_num};
3766 return OptoRegPair(hi[ideal_reg],lo[ideal_reg]);
3767 %}
3769 %}
3771 //----------ATTRIBUTES---------------------------------------------------------
3772 //----------Operand Attributes-------------------------------------------------
3773 op_attrib op_cost(0); // Required cost attribute
3775 //----------Instruction Attributes---------------------------------------------
3776 ins_attrib ins_cost(100); // Required cost attribute
3777 ins_attrib ins_size(32); // Required size attribute (in bits)
3778 ins_attrib ins_pc_relative(0); // Required PC Relative flag
3779 ins_attrib ins_short_branch(0); // Required flag: is this instruction a
3780 // non-matching short branch variant of some
3781 // long branch?
3782 ins_attrib ins_alignment(4); // Required alignment attribute (must be a power of 2)
3783 // specifies the alignment that some part of the instruction (not
3784 // necessarily the start) requires. If > 1, a compute_padding()
3785 // function must be provided for the instruction
3787 //----------OPERANDS-----------------------------------------------------------
3788 // Operand definitions must precede instruction definitions for correct parsing
3789 // in the ADLC because operands constitute user defined types which are used in
3790 // instruction definitions.
3792 // Vectors
3793 operand vecD() %{
3794 constraint(ALLOC_IN_RC(dbl_reg));
3795 match(VecD);
3797 format %{ %}
3798 interface(REG_INTER);
3799 %}
3801 // Flags register, used as output of compare instructions
3802 operand FlagsReg() %{
3803 constraint(ALLOC_IN_RC(mips_flags));
3804 match(RegFlags);
3806 format %{ "EFLAGS" %}
3807 interface(REG_INTER);
3808 %}
3810 //----------Simple Operands----------------------------------------------------
3811 //TODO: Should we need to define some more special immediate number ?
3812 // Immediate Operands
3813 // Integer Immediate
3814 operand immI() %{
3815 match(ConI);
3816 //TODO: should not match immI8 here LEE
3817 match(immI8);
3819 op_cost(20);
3820 format %{ %}
3821 interface(CONST_INTER);
3822 %}
3824 // Long Immediate 8-bit
3825 operand immL8()
3826 %{
3827 predicate(-0x80L <= n->get_long() && n->get_long() < 0x80L);
3828 match(ConL);
3830 op_cost(5);
3831 format %{ %}
3832 interface(CONST_INTER);
3833 %}
3835 // Constant for test vs zero
3836 operand immI0() %{
3837 predicate(n->get_int() == 0);
3838 match(ConI);
3840 op_cost(0);
3841 format %{ %}
3842 interface(CONST_INTER);
3843 %}
3845 // Constant for increment
3846 operand immI1() %{
3847 predicate(n->get_int() == 1);
3848 match(ConI);
3850 op_cost(0);
3851 format %{ %}
3852 interface(CONST_INTER);
3853 %}
3855 // Constant for decrement
3856 operand immI_M1() %{
3857 predicate(n->get_int() == -1);
3858 match(ConI);
3860 op_cost(0);
3861 format %{ %}
3862 interface(CONST_INTER);
3863 %}
3865 operand immI_MaxI() %{
3866 predicate(n->get_int() == 2147483647);
3867 match(ConI);
3869 op_cost(0);
3870 format %{ %}
3871 interface(CONST_INTER);
3872 %}
3874 // Valid scale values for addressing modes
3875 operand immI2() %{
3876 predicate(0 <= n->get_int() && (n->get_int() <= 3));
3877 match(ConI);
3879 format %{ %}
3880 interface(CONST_INTER);
3881 %}
3883 operand immI8() %{
3884 predicate((-128 <= n->get_int()) && (n->get_int() <= 127));
3885 match(ConI);
3887 op_cost(5);
3888 format %{ %}
3889 interface(CONST_INTER);
3890 %}
3892 operand immI16() %{
3893 predicate((-32768 <= n->get_int()) && (n->get_int() <= 32767));
3894 match(ConI);
3896 op_cost(10);
3897 format %{ %}
3898 interface(CONST_INTER);
3899 %}
3901 // Constant for long shifts
3902 operand immI_32() %{
3903 predicate( n->get_int() == 32 );
3904 match(ConI);
3906 op_cost(0);
3907 format %{ %}
3908 interface(CONST_INTER);
3909 %}
3911 operand immI_63() %{
3912 predicate( n->get_int() == 63 );
3913 match(ConI);
3915 op_cost(0);
3916 format %{ %}
3917 interface(CONST_INTER);
3918 %}
3920 operand immI_0_31() %{
3921 predicate( n->get_int() >= 0 && n->get_int() <= 31 );
3922 match(ConI);
3924 op_cost(0);
3925 format %{ %}
3926 interface(CONST_INTER);
3927 %}
3929 // Operand for non-negtive integer mask
3930 operand immI_nonneg_mask() %{
3931 predicate( (n->get_int() >= 0) && (Assembler::is_int_mask(n->get_int()) != -1) );
3932 match(ConI);
3934 op_cost(0);
3935 format %{ %}
3936 interface(CONST_INTER);
3937 %}
3939 operand immI_32_63() %{
3940 predicate( n->get_int() >= 32 && n->get_int() <= 63 );
3941 match(ConI);
3942 op_cost(0);
3944 format %{ %}
3945 interface(CONST_INTER);
3946 %}
3948 operand immI16_sub() %{
3949 predicate((-32767 <= n->get_int()) && (n->get_int() <= 32768));
3950 match(ConI);
3952 op_cost(10);
3953 format %{ %}
3954 interface(CONST_INTER);
3955 %}
3957 operand immI_0_32767() %{
3958 predicate( n->get_int() >= 0 && n->get_int() <= 32767 );
3959 match(ConI);
3960 op_cost(0);
3962 format %{ %}
3963 interface(CONST_INTER);
3964 %}
3966 operand immI_0_65535() %{
3967 predicate( n->get_int() >= 0 && n->get_int() <= 65535 );
3968 match(ConI);
3969 op_cost(0);
3971 format %{ %}
3972 interface(CONST_INTER);
3973 %}
3975 operand immI_1() %{
3976 predicate( n->get_int() == 1 );
3977 match(ConI);
3979 op_cost(0);
3980 format %{ %}
3981 interface(CONST_INTER);
3982 %}
3984 operand immI_2() %{
3985 predicate( n->get_int() == 2 );
3986 match(ConI);
3988 op_cost(0);
3989 format %{ %}
3990 interface(CONST_INTER);
3991 %}
3993 operand immI_3() %{
3994 predicate( n->get_int() == 3 );
3995 match(ConI);
3997 op_cost(0);
3998 format %{ %}
3999 interface(CONST_INTER);
4000 %}
4002 operand immI_7() %{
4003 predicate( n->get_int() == 7 );
4004 match(ConI);
4006 format %{ %}
4007 interface(CONST_INTER);
4008 %}
4010 // Immediates for special shifts (sign extend)
4012 // Constants for increment
4013 operand immI_16() %{
4014 predicate( n->get_int() == 16 );
4015 match(ConI);
4017 format %{ %}
4018 interface(CONST_INTER);
4019 %}
4021 operand immI_24() %{
4022 predicate( n->get_int() == 24 );
4023 match(ConI);
4025 format %{ %}
4026 interface(CONST_INTER);
4027 %}
4029 // Constant for byte-wide masking
4030 operand immI_255() %{
4031 predicate( n->get_int() == 255 );
4032 match(ConI);
4034 op_cost(0);
4035 format %{ %}
4036 interface(CONST_INTER);
4037 %}
4039 operand immI_65535() %{
4040 predicate( n->get_int() == 65535 );
4041 match(ConI);
4043 op_cost(5);
4044 format %{ %}
4045 interface(CONST_INTER);
4046 %}
4048 operand immI_65536() %{
4049 predicate( n->get_int() == 65536 );
4050 match(ConI);
4052 op_cost(5);
4053 format %{ %}
4054 interface(CONST_INTER);
4055 %}
4057 operand immI_M65536() %{
4058 predicate( n->get_int() == -65536 );
4059 match(ConI);
4061 op_cost(5);
4062 format %{ %}
4063 interface(CONST_INTER);
4064 %}
4066 // Pointer Immediate
4067 operand immP() %{
4068 match(ConP);
4070 op_cost(10);
4071 format %{ %}
4072 interface(CONST_INTER);
4073 %}
4075 operand immP31()
4076 %{
4077 predicate(n->as_Type()->type()->reloc() == relocInfo::none
4078 && (n->get_ptr() >> 31) == 0);
4079 match(ConP);
4081 op_cost(5);
4082 format %{ %}
4083 interface(CONST_INTER);
4084 %}
4086 // NULL Pointer Immediate
4087 operand immP0() %{
4088 predicate( n->get_ptr() == 0 );
4089 match(ConP);
4090 op_cost(0);
4092 format %{ %}
4093 interface(CONST_INTER);
4094 %}
4096 // Pointer Immediate: 64-bit
4097 operand immP_set() %{
4098 match(ConP);
4100 op_cost(5);
4101 // formats are generated automatically for constants and base registers
4102 format %{ %}
4103 interface(CONST_INTER);
4104 %}
4106 // Pointer Immediate: 64-bit
4107 operand immP_load() %{
4108 predicate(n->bottom_type()->isa_oop_ptr() || (MacroAssembler::insts_for_set64(n->get_ptr()) > 3));
4109 match(ConP);
4111 op_cost(5);
4112 // formats are generated automatically for constants and base registers
4113 format %{ %}
4114 interface(CONST_INTER);
4115 %}
4117 // Pointer Immediate: 64-bit
4118 operand immP_no_oop_cheap() %{
4119 predicate(!n->bottom_type()->isa_oop_ptr() && (MacroAssembler::insts_for_set64(n->get_ptr()) <= 3));
4120 match(ConP);
4122 op_cost(5);
4123 // formats are generated automatically for constants and base registers
4124 format %{ %}
4125 interface(CONST_INTER);
4126 %}
4128 // Pointer for polling page
4129 operand immP_poll() %{
4130 predicate(n->get_ptr() != 0 && n->get_ptr() == (intptr_t)os::get_polling_page());
4131 match(ConP);
4132 op_cost(5);
4134 format %{ %}
4135 interface(CONST_INTER);
4136 %}
4138 // Pointer Immediate
4139 operand immN() %{
4140 match(ConN);
4142 op_cost(10);
4143 format %{ %}
4144 interface(CONST_INTER);
4145 %}
4147 operand immNKlass() %{
4148 match(ConNKlass);
4150 op_cost(10);
4151 format %{ %}
4152 interface(CONST_INTER);
4153 %}
4155 // NULL Pointer Immediate
4156 operand immN0() %{
4157 predicate(n->get_narrowcon() == 0);
4158 match(ConN);
4160 op_cost(5);
4161 format %{ %}
4162 interface(CONST_INTER);
4163 %}
4165 // Long Immediate
4166 operand immL() %{
4167 match(ConL);
4169 op_cost(20);
4170 format %{ %}
4171 interface(CONST_INTER);
4172 %}
4174 // Long Immediate zero
4175 operand immL0() %{
4176 predicate( n->get_long() == 0L );
4177 match(ConL);
4178 op_cost(0);
4180 format %{ %}
4181 interface(CONST_INTER);
4182 %}
4184 operand immL7() %{
4185 predicate( n->get_long() == 7L );
4186 match(ConL);
4187 op_cost(0);
4189 format %{ %}
4190 interface(CONST_INTER);
4191 %}
4193 operand immL_M1() %{
4194 predicate( n->get_long() == -1L );
4195 match(ConL);
4196 op_cost(0);
4198 format %{ %}
4199 interface(CONST_INTER);
4200 %}
4202 // bit 0..2 zero
4203 operand immL_M8() %{
4204 predicate( n->get_long() == -8L );
4205 match(ConL);
4206 op_cost(0);
4208 format %{ %}
4209 interface(CONST_INTER);
4210 %}
4212 // bit 2 zero
4213 operand immL_M5() %{
4214 predicate( n->get_long() == -5L );
4215 match(ConL);
4216 op_cost(0);
4218 format %{ %}
4219 interface(CONST_INTER);
4220 %}
4222 // bit 1..2 zero
4223 operand immL_M7() %{
4224 predicate( n->get_long() == -7L );
4225 match(ConL);
4226 op_cost(0);
4228 format %{ %}
4229 interface(CONST_INTER);
4230 %}
4232 // bit 0..1 zero
4233 operand immL_M4() %{
4234 predicate( n->get_long() == -4L );
4235 match(ConL);
4236 op_cost(0);
4238 format %{ %}
4239 interface(CONST_INTER);
4240 %}
4242 // bit 3..6 zero
4243 operand immL_M121() %{
4244 predicate( n->get_long() == -121L );
4245 match(ConL);
4246 op_cost(0);
4248 format %{ %}
4249 interface(CONST_INTER);
4250 %}
4252 // Long immediate from 0 to 127.
4253 // Used for a shorter form of long mul by 10.
4254 operand immL_127() %{
4255 predicate((0 <= n->get_long()) && (n->get_long() <= 127));
4256 match(ConL);
4257 op_cost(0);
4259 format %{ %}
4260 interface(CONST_INTER);
4261 %}
4263 // Operand for non-negtive long mask
4264 operand immL_nonneg_mask() %{
4265 predicate( (n->get_long() >= 0) && (Assembler::is_jlong_mask(n->get_long()) != -1) );
4266 match(ConL);
4268 op_cost(0);
4269 format %{ %}
4270 interface(CONST_INTER);
4271 %}
4273 operand immL_0_65535() %{
4274 predicate( n->get_long() >= 0 && n->get_long() <= 65535 );
4275 match(ConL);
4276 op_cost(0);
4278 format %{ %}
4279 interface(CONST_INTER);
4280 %}
4282 // Long Immediate: cheap (materialize in <= 3 instructions)
4283 operand immL_cheap() %{
4284 predicate(MacroAssembler::insts_for_set64(n->get_long()) <= 3);
4285 match(ConL);
4286 op_cost(0);
4288 format %{ %}
4289 interface(CONST_INTER);
4290 %}
4292 // Long Immediate: expensive (materialize in > 3 instructions)
4293 operand immL_expensive() %{
4294 predicate(MacroAssembler::insts_for_set64(n->get_long()) > 3);
4295 match(ConL);
4296 op_cost(0);
4298 format %{ %}
4299 interface(CONST_INTER);
4300 %}
4302 operand immL16() %{
4303 predicate((-32768 <= n->get_long()) && (n->get_long() <= 32767));
4304 match(ConL);
4306 op_cost(10);
4307 format %{ %}
4308 interface(CONST_INTER);
4309 %}
4311 operand immL16_sub() %{
4312 predicate((-32767 <= n->get_long()) && (n->get_long() <= 32768));
4313 match(ConL);
4315 op_cost(10);
4316 format %{ %}
4317 interface(CONST_INTER);
4318 %}
4320 // Long Immediate: low 32-bit mask
4321 operand immL_32bits() %{
4322 predicate(n->get_long() == 0xFFFFFFFFL);
4323 match(ConL);
4324 op_cost(20);
4326 format %{ %}
4327 interface(CONST_INTER);
4328 %}
4330 // Long Immediate 32-bit signed
4331 operand immL32()
4332 %{
4333 predicate(n->get_long() == (int) (n->get_long()));
4334 match(ConL);
4336 op_cost(15);
4337 format %{ %}
4338 interface(CONST_INTER);
4339 %}
4342 //single-precision floating-point zero
4343 operand immF0() %{
4344 predicate(jint_cast(n->getf()) == 0);
4345 match(ConF);
4347 op_cost(5);
4348 format %{ %}
4349 interface(CONST_INTER);
4350 %}
4352 //single-precision floating-point immediate
4353 operand immF() %{
4354 match(ConF);
4356 op_cost(20);
4357 format %{ %}
4358 interface(CONST_INTER);
4359 %}
4361 //double-precision floating-point zero
4362 operand immD0() %{
4363 predicate(jlong_cast(n->getd()) == 0);
4364 match(ConD);
4366 op_cost(5);
4367 format %{ %}
4368 interface(CONST_INTER);
4369 %}
4371 //double-precision floating-point immediate
4372 operand immD() %{
4373 match(ConD);
4375 op_cost(20);
4376 format %{ %}
4377 interface(CONST_INTER);
4378 %}
4380 // Register Operands
4381 // Integer Register
4382 operand mRegI() %{
4383 constraint(ALLOC_IN_RC(int_reg));
4384 match(RegI);
4386 format %{ %}
4387 interface(REG_INTER);
4388 %}
4390 operand no_Ax_mRegI() %{
4391 constraint(ALLOC_IN_RC(no_Ax_int_reg));
4392 match(RegI);
4393 match(mRegI);
4395 format %{ %}
4396 interface(REG_INTER);
4397 %}
4399 operand mS0RegI() %{
4400 constraint(ALLOC_IN_RC(s0_reg));
4401 match(RegI);
4402 match(mRegI);
4404 format %{ "S0" %}
4405 interface(REG_INTER);
4406 %}
4408 operand mS1RegI() %{
4409 constraint(ALLOC_IN_RC(s1_reg));
4410 match(RegI);
4411 match(mRegI);
4413 format %{ "S1" %}
4414 interface(REG_INTER);
4415 %}
4417 operand mS2RegI() %{
4418 constraint(ALLOC_IN_RC(s2_reg));
4419 match(RegI);
4420 match(mRegI);
4422 format %{ "S2" %}
4423 interface(REG_INTER);
4424 %}
4426 operand mS3RegI() %{
4427 constraint(ALLOC_IN_RC(s3_reg));
4428 match(RegI);
4429 match(mRegI);
4431 format %{ "S3" %}
4432 interface(REG_INTER);
4433 %}
4435 operand mS4RegI() %{
4436 constraint(ALLOC_IN_RC(s4_reg));
4437 match(RegI);
4438 match(mRegI);
4440 format %{ "S4" %}
4441 interface(REG_INTER);
4442 %}
4444 operand mS5RegI() %{
4445 constraint(ALLOC_IN_RC(s5_reg));
4446 match(RegI);
4447 match(mRegI);
4449 format %{ "S5" %}
4450 interface(REG_INTER);
4451 %}
4453 operand mS6RegI() %{
4454 constraint(ALLOC_IN_RC(s6_reg));
4455 match(RegI);
4456 match(mRegI);
4458 format %{ "S6" %}
4459 interface(REG_INTER);
4460 %}
4462 operand mS7RegI() %{
4463 constraint(ALLOC_IN_RC(s7_reg));
4464 match(RegI);
4465 match(mRegI);
4467 format %{ "S7" %}
4468 interface(REG_INTER);
4469 %}
4472 operand mT0RegI() %{
4473 constraint(ALLOC_IN_RC(t0_reg));
4474 match(RegI);
4475 match(mRegI);
4477 format %{ "T0" %}
4478 interface(REG_INTER);
4479 %}
4481 operand mT1RegI() %{
4482 constraint(ALLOC_IN_RC(t1_reg));
4483 match(RegI);
4484 match(mRegI);
4486 format %{ "T1" %}
4487 interface(REG_INTER);
4488 %}
4490 operand mT2RegI() %{
4491 constraint(ALLOC_IN_RC(t2_reg));
4492 match(RegI);
4493 match(mRegI);
4495 format %{ "T2" %}
4496 interface(REG_INTER);
4497 %}
4499 operand mT3RegI() %{
4500 constraint(ALLOC_IN_RC(t3_reg));
4501 match(RegI);
4502 match(mRegI);
4504 format %{ "T3" %}
4505 interface(REG_INTER);
4506 %}
4508 operand mT8RegI() %{
4509 constraint(ALLOC_IN_RC(t8_reg));
4510 match(RegI);
4511 match(mRegI);
4513 format %{ "T8" %}
4514 interface(REG_INTER);
4515 %}
4517 operand mT9RegI() %{
4518 constraint(ALLOC_IN_RC(t9_reg));
4519 match(RegI);
4520 match(mRegI);
4522 format %{ "T9" %}
4523 interface(REG_INTER);
4524 %}
4526 operand mA0RegI() %{
4527 constraint(ALLOC_IN_RC(a0_reg));
4528 match(RegI);
4529 match(mRegI);
4531 format %{ "A0" %}
4532 interface(REG_INTER);
4533 %}
4535 operand mA1RegI() %{
4536 constraint(ALLOC_IN_RC(a1_reg));
4537 match(RegI);
4538 match(mRegI);
4540 format %{ "A1" %}
4541 interface(REG_INTER);
4542 %}
4544 operand mA2RegI() %{
4545 constraint(ALLOC_IN_RC(a2_reg));
4546 match(RegI);
4547 match(mRegI);
4549 format %{ "A2" %}
4550 interface(REG_INTER);
4551 %}
4553 operand mA3RegI() %{
4554 constraint(ALLOC_IN_RC(a3_reg));
4555 match(RegI);
4556 match(mRegI);
4558 format %{ "A3" %}
4559 interface(REG_INTER);
4560 %}
4562 operand mA4RegI() %{
4563 constraint(ALLOC_IN_RC(a4_reg));
4564 match(RegI);
4565 match(mRegI);
4567 format %{ "A4" %}
4568 interface(REG_INTER);
4569 %}
4571 operand mA5RegI() %{
4572 constraint(ALLOC_IN_RC(a5_reg));
4573 match(RegI);
4574 match(mRegI);
4576 format %{ "A5" %}
4577 interface(REG_INTER);
4578 %}
4580 operand mA6RegI() %{
4581 constraint(ALLOC_IN_RC(a6_reg));
4582 match(RegI);
4583 match(mRegI);
4585 format %{ "A6" %}
4586 interface(REG_INTER);
4587 %}
4589 operand mA7RegI() %{
4590 constraint(ALLOC_IN_RC(a7_reg));
4591 match(RegI);
4592 match(mRegI);
4594 format %{ "A7" %}
4595 interface(REG_INTER);
4596 %}
4598 operand mV0RegI() %{
4599 constraint(ALLOC_IN_RC(v0_reg));
4600 match(RegI);
4601 match(mRegI);
4603 format %{ "V0" %}
4604 interface(REG_INTER);
4605 %}
4607 operand mV1RegI() %{
4608 constraint(ALLOC_IN_RC(v1_reg));
4609 match(RegI);
4610 match(mRegI);
4612 format %{ "V1" %}
4613 interface(REG_INTER);
4614 %}
4616 operand mRegN() %{
4617 constraint(ALLOC_IN_RC(int_reg));
4618 match(RegN);
4620 format %{ %}
4621 interface(REG_INTER);
4622 %}
4624 operand t0_RegN() %{
4625 constraint(ALLOC_IN_RC(t0_reg));
4626 match(RegN);
4627 match(mRegN);
4629 format %{ %}
4630 interface(REG_INTER);
4631 %}
4633 operand t1_RegN() %{
4634 constraint(ALLOC_IN_RC(t1_reg));
4635 match(RegN);
4636 match(mRegN);
4638 format %{ %}
4639 interface(REG_INTER);
4640 %}
4642 operand t2_RegN() %{
4643 constraint(ALLOC_IN_RC(t2_reg));
4644 match(RegN);
4645 match(mRegN);
4647 format %{ %}
4648 interface(REG_INTER);
4649 %}
4651 operand t3_RegN() %{
4652 constraint(ALLOC_IN_RC(t3_reg));
4653 match(RegN);
4654 match(mRegN);
4656 format %{ %}
4657 interface(REG_INTER);
4658 %}
4660 operand t8_RegN() %{
4661 constraint(ALLOC_IN_RC(t8_reg));
4662 match(RegN);
4663 match(mRegN);
4665 format %{ %}
4666 interface(REG_INTER);
4667 %}
4669 operand t9_RegN() %{
4670 constraint(ALLOC_IN_RC(t9_reg));
4671 match(RegN);
4672 match(mRegN);
4674 format %{ %}
4675 interface(REG_INTER);
4676 %}
4678 operand a0_RegN() %{
4679 constraint(ALLOC_IN_RC(a0_reg));
4680 match(RegN);
4681 match(mRegN);
4683 format %{ %}
4684 interface(REG_INTER);
4685 %}
4687 operand a1_RegN() %{
4688 constraint(ALLOC_IN_RC(a1_reg));
4689 match(RegN);
4690 match(mRegN);
4692 format %{ %}
4693 interface(REG_INTER);
4694 %}
4696 operand a2_RegN() %{
4697 constraint(ALLOC_IN_RC(a2_reg));
4698 match(RegN);
4699 match(mRegN);
4701 format %{ %}
4702 interface(REG_INTER);
4703 %}
4705 operand a3_RegN() %{
4706 constraint(ALLOC_IN_RC(a3_reg));
4707 match(RegN);
4708 match(mRegN);
4710 format %{ %}
4711 interface(REG_INTER);
4712 %}
4714 operand a4_RegN() %{
4715 constraint(ALLOC_IN_RC(a4_reg));
4716 match(RegN);
4717 match(mRegN);
4719 format %{ %}
4720 interface(REG_INTER);
4721 %}
4723 operand a5_RegN() %{
4724 constraint(ALLOC_IN_RC(a5_reg));
4725 match(RegN);
4726 match(mRegN);
4728 format %{ %}
4729 interface(REG_INTER);
4730 %}
4732 operand a6_RegN() %{
4733 constraint(ALLOC_IN_RC(a6_reg));
4734 match(RegN);
4735 match(mRegN);
4737 format %{ %}
4738 interface(REG_INTER);
4739 %}
4741 operand a7_RegN() %{
4742 constraint(ALLOC_IN_RC(a7_reg));
4743 match(RegN);
4744 match(mRegN);
4746 format %{ %}
4747 interface(REG_INTER);
4748 %}
4750 operand s0_RegN() %{
4751 constraint(ALLOC_IN_RC(s0_reg));
4752 match(RegN);
4753 match(mRegN);
4755 format %{ %}
4756 interface(REG_INTER);
4757 %}
4759 operand s1_RegN() %{
4760 constraint(ALLOC_IN_RC(s1_reg));
4761 match(RegN);
4762 match(mRegN);
4764 format %{ %}
4765 interface(REG_INTER);
4766 %}
4768 operand s2_RegN() %{
4769 constraint(ALLOC_IN_RC(s2_reg));
4770 match(RegN);
4771 match(mRegN);
4773 format %{ %}
4774 interface(REG_INTER);
4775 %}
4777 operand s3_RegN() %{
4778 constraint(ALLOC_IN_RC(s3_reg));
4779 match(RegN);
4780 match(mRegN);
4782 format %{ %}
4783 interface(REG_INTER);
4784 %}
4786 operand s4_RegN() %{
4787 constraint(ALLOC_IN_RC(s4_reg));
4788 match(RegN);
4789 match(mRegN);
4791 format %{ %}
4792 interface(REG_INTER);
4793 %}
4795 operand s5_RegN() %{
4796 constraint(ALLOC_IN_RC(s5_reg));
4797 match(RegN);
4798 match(mRegN);
4800 format %{ %}
4801 interface(REG_INTER);
4802 %}
4804 operand s6_RegN() %{
4805 constraint(ALLOC_IN_RC(s6_reg));
4806 match(RegN);
4807 match(mRegN);
4809 format %{ %}
4810 interface(REG_INTER);
4811 %}
4813 operand s7_RegN() %{
4814 constraint(ALLOC_IN_RC(s7_reg));
4815 match(RegN);
4816 match(mRegN);
4818 format %{ %}
4819 interface(REG_INTER);
4820 %}
4822 operand v0_RegN() %{
4823 constraint(ALLOC_IN_RC(v0_reg));
4824 match(RegN);
4825 match(mRegN);
4827 format %{ %}
4828 interface(REG_INTER);
4829 %}
4831 operand v1_RegN() %{
4832 constraint(ALLOC_IN_RC(v1_reg));
4833 match(RegN);
4834 match(mRegN);
4836 format %{ %}
4837 interface(REG_INTER);
4838 %}
4840 // Pointer Register
4841 operand mRegP() %{
4842 constraint(ALLOC_IN_RC(p_reg));
4843 match(RegP);
4845 format %{ %}
4846 interface(REG_INTER);
4847 %}
4849 operand no_T8_mRegP() %{
4850 constraint(ALLOC_IN_RC(no_T8_p_reg));
4851 match(RegP);
4852 match(mRegP);
4854 format %{ %}
4855 interface(REG_INTER);
4856 %}
4858 operand s0_RegP()
4859 %{
4860 constraint(ALLOC_IN_RC(s0_long_reg));
4861 match(RegP);
4862 match(mRegP);
4863 match(no_T8_mRegP);
4865 format %{ %}
4866 interface(REG_INTER);
4867 %}
4869 operand s1_RegP()
4870 %{
4871 constraint(ALLOC_IN_RC(s1_long_reg));
4872 match(RegP);
4873 match(mRegP);
4874 match(no_T8_mRegP);
4876 format %{ %}
4877 interface(REG_INTER);
4878 %}
4880 operand s2_RegP()
4881 %{
4882 constraint(ALLOC_IN_RC(s2_long_reg));
4883 match(RegP);
4884 match(mRegP);
4885 match(no_T8_mRegP);
4887 format %{ %}
4888 interface(REG_INTER);
4889 %}
4891 operand s3_RegP()
4892 %{
4893 constraint(ALLOC_IN_RC(s3_long_reg));
4894 match(RegP);
4895 match(mRegP);
4896 match(no_T8_mRegP);
4898 format %{ %}
4899 interface(REG_INTER);
4900 %}
4902 operand s4_RegP()
4903 %{
4904 constraint(ALLOC_IN_RC(s4_long_reg));
4905 match(RegP);
4906 match(mRegP);
4907 match(no_T8_mRegP);
4909 format %{ %}
4910 interface(REG_INTER);
4911 %}
4913 operand s5_RegP()
4914 %{
4915 constraint(ALLOC_IN_RC(s5_long_reg));
4916 match(RegP);
4917 match(mRegP);
4918 match(no_T8_mRegP);
4920 format %{ %}
4921 interface(REG_INTER);
4922 %}
4924 operand s6_RegP()
4925 %{
4926 constraint(ALLOC_IN_RC(s6_long_reg));
4927 match(RegP);
4928 match(mRegP);
4929 match(no_T8_mRegP);
4931 format %{ %}
4932 interface(REG_INTER);
4933 %}
4935 operand s7_RegP()
4936 %{
4937 constraint(ALLOC_IN_RC(s7_long_reg));
4938 match(RegP);
4939 match(mRegP);
4940 match(no_T8_mRegP);
4942 format %{ %}
4943 interface(REG_INTER);
4944 %}
4946 operand t0_RegP()
4947 %{
4948 constraint(ALLOC_IN_RC(t0_long_reg));
4949 match(RegP);
4950 match(mRegP);
4951 match(no_T8_mRegP);
4953 format %{ %}
4954 interface(REG_INTER);
4955 %}
4957 operand t1_RegP()
4958 %{
4959 constraint(ALLOC_IN_RC(t1_long_reg));
4960 match(RegP);
4961 match(mRegP);
4962 match(no_T8_mRegP);
4964 format %{ %}
4965 interface(REG_INTER);
4966 %}
4968 operand t2_RegP()
4969 %{
4970 constraint(ALLOC_IN_RC(t2_long_reg));
4971 match(RegP);
4972 match(mRegP);
4973 match(no_T8_mRegP);
4975 format %{ %}
4976 interface(REG_INTER);
4977 %}
4979 operand t3_RegP()
4980 %{
4981 constraint(ALLOC_IN_RC(t3_long_reg));
4982 match(RegP);
4983 match(mRegP);
4984 match(no_T8_mRegP);
4986 format %{ %}
4987 interface(REG_INTER);
4988 %}
4990 operand t8_RegP()
4991 %{
4992 constraint(ALLOC_IN_RC(t8_long_reg));
4993 match(RegP);
4994 match(mRegP);
4996 format %{ %}
4997 interface(REG_INTER);
4998 %}
5000 operand t9_RegP()
5001 %{
5002 constraint(ALLOC_IN_RC(t9_long_reg));
5003 match(RegP);
5004 match(mRegP);
5005 match(no_T8_mRegP);
5007 format %{ %}
5008 interface(REG_INTER);
5009 %}
5011 operand a0_RegP()
5012 %{
5013 constraint(ALLOC_IN_RC(a0_long_reg));
5014 match(RegP);
5015 match(mRegP);
5016 match(no_T8_mRegP);
5018 format %{ %}
5019 interface(REG_INTER);
5020 %}
5022 operand a1_RegP()
5023 %{
5024 constraint(ALLOC_IN_RC(a1_long_reg));
5025 match(RegP);
5026 match(mRegP);
5027 match(no_T8_mRegP);
5029 format %{ %}
5030 interface(REG_INTER);
5031 %}
5033 operand a2_RegP()
5034 %{
5035 constraint(ALLOC_IN_RC(a2_long_reg));
5036 match(RegP);
5037 match(mRegP);
5038 match(no_T8_mRegP);
5040 format %{ %}
5041 interface(REG_INTER);
5042 %}
5044 operand a3_RegP()
5045 %{
5046 constraint(ALLOC_IN_RC(a3_long_reg));
5047 match(RegP);
5048 match(mRegP);
5049 match(no_T8_mRegP);
5051 format %{ %}
5052 interface(REG_INTER);
5053 %}
5055 operand a4_RegP()
5056 %{
5057 constraint(ALLOC_IN_RC(a4_long_reg));
5058 match(RegP);
5059 match(mRegP);
5060 match(no_T8_mRegP);
5062 format %{ %}
5063 interface(REG_INTER);
5064 %}
5067 operand a5_RegP()
5068 %{
5069 constraint(ALLOC_IN_RC(a5_long_reg));
5070 match(RegP);
5071 match(mRegP);
5072 match(no_T8_mRegP);
5074 format %{ %}
5075 interface(REG_INTER);
5076 %}
5078 operand a6_RegP()
5079 %{
5080 constraint(ALLOC_IN_RC(a6_long_reg));
5081 match(RegP);
5082 match(mRegP);
5083 match(no_T8_mRegP);
5085 format %{ %}
5086 interface(REG_INTER);
5087 %}
5089 operand a7_RegP()
5090 %{
5091 constraint(ALLOC_IN_RC(a7_long_reg));
5092 match(RegP);
5093 match(mRegP);
5094 match(no_T8_mRegP);
5096 format %{ %}
5097 interface(REG_INTER);
5098 %}
5100 operand v0_RegP()
5101 %{
5102 constraint(ALLOC_IN_RC(v0_long_reg));
5103 match(RegP);
5104 match(mRegP);
5105 match(no_T8_mRegP);
5107 format %{ %}
5108 interface(REG_INTER);
5109 %}
5111 operand v1_RegP()
5112 %{
5113 constraint(ALLOC_IN_RC(v1_long_reg));
5114 match(RegP);
5115 match(mRegP);
5116 match(no_T8_mRegP);
5118 format %{ %}
5119 interface(REG_INTER);
5120 %}
5122 /*
5123 operand mSPRegP(mRegP reg) %{
5124 constraint(ALLOC_IN_RC(sp_reg));
5125 match(reg);
5127 format %{ "SP" %}
5128 interface(REG_INTER);
5129 %}
5131 operand mFPRegP(mRegP reg) %{
5132 constraint(ALLOC_IN_RC(fp_reg));
5133 match(reg);
5135 format %{ "FP" %}
5136 interface(REG_INTER);
5137 %}
5138 */
5140 operand mRegL() %{
5141 constraint(ALLOC_IN_RC(long_reg));
5142 match(RegL);
5144 format %{ %}
5145 interface(REG_INTER);
5146 %}
5148 operand v0RegL() %{
5149 constraint(ALLOC_IN_RC(v0_long_reg));
5150 match(RegL);
5151 match(mRegL);
5153 format %{ %}
5154 interface(REG_INTER);
5155 %}
5157 operand v1RegL() %{
5158 constraint(ALLOC_IN_RC(v1_long_reg));
5159 match(RegL);
5160 match(mRegL);
5162 format %{ %}
5163 interface(REG_INTER);
5164 %}
5166 operand a0RegL() %{
5167 constraint(ALLOC_IN_RC(a0_long_reg));
5168 match(RegL);
5169 match(mRegL);
5171 format %{ "A0" %}
5172 interface(REG_INTER);
5173 %}
5175 operand a1RegL() %{
5176 constraint(ALLOC_IN_RC(a1_long_reg));
5177 match(RegL);
5178 match(mRegL);
5180 format %{ %}
5181 interface(REG_INTER);
5182 %}
5184 operand a2RegL() %{
5185 constraint(ALLOC_IN_RC(a2_long_reg));
5186 match(RegL);
5187 match(mRegL);
5189 format %{ %}
5190 interface(REG_INTER);
5191 %}
5193 operand a3RegL() %{
5194 constraint(ALLOC_IN_RC(a3_long_reg));
5195 match(RegL);
5196 match(mRegL);
5198 format %{ %}
5199 interface(REG_INTER);
5200 %}
5202 operand t0RegL() %{
5203 constraint(ALLOC_IN_RC(t0_long_reg));
5204 match(RegL);
5205 match(mRegL);
5207 format %{ %}
5208 interface(REG_INTER);
5209 %}
5211 operand t1RegL() %{
5212 constraint(ALLOC_IN_RC(t1_long_reg));
5213 match(RegL);
5214 match(mRegL);
5216 format %{ %}
5217 interface(REG_INTER);
5218 %}
5220 operand t2RegL() %{
5221 constraint(ALLOC_IN_RC(t2_long_reg));
5222 match(RegL);
5223 match(mRegL);
5225 format %{ %}
5226 interface(REG_INTER);
5227 %}
5229 operand t3RegL() %{
5230 constraint(ALLOC_IN_RC(t3_long_reg));
5231 match(RegL);
5232 match(mRegL);
5234 format %{ %}
5235 interface(REG_INTER);
5236 %}
5238 operand t8RegL() %{
5239 constraint(ALLOC_IN_RC(t8_long_reg));
5240 match(RegL);
5241 match(mRegL);
5243 format %{ %}
5244 interface(REG_INTER);
5245 %}
5247 operand a4RegL() %{
5248 constraint(ALLOC_IN_RC(a4_long_reg));
5249 match(RegL);
5250 match(mRegL);
5252 format %{ %}
5253 interface(REG_INTER);
5254 %}
5256 operand a5RegL() %{
5257 constraint(ALLOC_IN_RC(a5_long_reg));
5258 match(RegL);
5259 match(mRegL);
5261 format %{ %}
5262 interface(REG_INTER);
5263 %}
5265 operand a6RegL() %{
5266 constraint(ALLOC_IN_RC(a6_long_reg));
5267 match(RegL);
5268 match(mRegL);
5270 format %{ %}
5271 interface(REG_INTER);
5272 %}
5274 operand a7RegL() %{
5275 constraint(ALLOC_IN_RC(a7_long_reg));
5276 match(RegL);
5277 match(mRegL);
5279 format %{ %}
5280 interface(REG_INTER);
5281 %}
5283 operand s0RegL() %{
5284 constraint(ALLOC_IN_RC(s0_long_reg));
5285 match(RegL);
5286 match(mRegL);
5288 format %{ %}
5289 interface(REG_INTER);
5290 %}
5292 operand s1RegL() %{
5293 constraint(ALLOC_IN_RC(s1_long_reg));
5294 match(RegL);
5295 match(mRegL);
5297 format %{ %}
5298 interface(REG_INTER);
5299 %}
5301 operand s2RegL() %{
5302 constraint(ALLOC_IN_RC(s2_long_reg));
5303 match(RegL);
5304 match(mRegL);
5306 format %{ %}
5307 interface(REG_INTER);
5308 %}
5310 operand s3RegL() %{
5311 constraint(ALLOC_IN_RC(s3_long_reg));
5312 match(RegL);
5313 match(mRegL);
5315 format %{ %}
5316 interface(REG_INTER);
5317 %}
5319 operand s4RegL() %{
5320 constraint(ALLOC_IN_RC(s4_long_reg));
5321 match(RegL);
5322 match(mRegL);
5324 format %{ %}
5325 interface(REG_INTER);
5326 %}
5328 operand s7RegL() %{
5329 constraint(ALLOC_IN_RC(s7_long_reg));
5330 match(RegL);
5331 match(mRegL);
5333 format %{ %}
5334 interface(REG_INTER);
5335 %}
5337 // Floating register operands
5338 operand regF() %{
5339 constraint(ALLOC_IN_RC(flt_reg));
5340 match(RegF);
5342 format %{ %}
5343 interface(REG_INTER);
5344 %}
5346 //Double Precision Floating register operands
5347 operand regD() %{
5348 constraint(ALLOC_IN_RC(dbl_reg));
5349 match(RegD);
5351 format %{ %}
5352 interface(REG_INTER);
5353 %}
5355 //----------Memory Operands----------------------------------------------------
5356 // Indirect Memory Operand
5357 operand indirect(mRegP reg) %{
5358 constraint(ALLOC_IN_RC(p_reg));
5359 match(reg);
5361 format %{ "[$reg] @ indirect" %}
5362 interface(MEMORY_INTER) %{
5363 base($reg);
5364 index(0x0); /* NO_INDEX */
5365 scale(0x0);
5366 disp(0x0);
5367 %}
5368 %}
5370 // Indirect Memory Plus Short Offset Operand
5371 operand indOffset8(mRegP reg, immL8 off)
5372 %{
5373 constraint(ALLOC_IN_RC(p_reg));
5374 match(AddP reg off);
5376 op_cost(10);
5377 format %{ "[$reg + $off (8-bit)] @ indOffset8" %}
5378 interface(MEMORY_INTER) %{
5379 base($reg);
5380 index(0x0); /* NO_INDEX */
5381 scale(0x0);
5382 disp($off);
5383 %}
5384 %}
5386 // Indirect Memory Times Scale Plus Index Register
5387 operand indIndexScale(mRegP reg, mRegL lreg, immI2 scale)
5388 %{
5389 constraint(ALLOC_IN_RC(p_reg));
5390 match(AddP reg (LShiftL lreg scale));
5392 op_cost(10);
5393 format %{"[$reg + $lreg << $scale] @ indIndexScale" %}
5394 interface(MEMORY_INTER) %{
5395 base($reg);
5396 index($lreg);
5397 scale($scale);
5398 disp(0x0);
5399 %}
5400 %}
5403 // [base + index + offset]
5404 operand baseIndexOffset8(mRegP base, mRegL index, immL8 off)
5405 %{
5406 constraint(ALLOC_IN_RC(p_reg));
5407 op_cost(5);
5408 match(AddP (AddP base index) off);
5410 format %{ "[$base + $index + $off (8-bit)] @ baseIndexOffset8" %}
5411 interface(MEMORY_INTER) %{
5412 base($base);
5413 index($index);
5414 scale(0x0);
5415 disp($off);
5416 %}
5417 %}
5419 // [base + index + offset]
5420 operand baseIndexOffset8_convI2L(mRegP base, mRegI index, immL8 off)
5421 %{
5422 constraint(ALLOC_IN_RC(p_reg));
5423 op_cost(5);
5424 match(AddP (AddP base (ConvI2L index)) off);
5426 format %{ "[$base + $index + $off (8-bit)] @ baseIndexOffset8_convI2L" %}
5427 interface(MEMORY_INTER) %{
5428 base($base);
5429 index($index);
5430 scale(0x0);
5431 disp($off);
5432 %}
5433 %}
5435 // Indirect Memory Times Scale Plus Index Register Plus Offset Operand
5436 operand indIndexScaleOffset8(mRegP reg, immL8 off, mRegL lreg, immI2 scale)
5437 %{
5438 constraint(ALLOC_IN_RC(p_reg));
5439 match(AddP (AddP reg (LShiftL lreg scale)) off);
5441 op_cost(10);
5442 format %{"[$reg + $off + $lreg << $scale] @ indIndexScaleOffset8" %}
5443 interface(MEMORY_INTER) %{
5444 base($reg);
5445 index($lreg);
5446 scale($scale);
5447 disp($off);
5448 %}
5449 %}
5451 operand indIndexScaleOffset8_convI2L(mRegP reg, immL8 off, mRegI ireg, immI2 scale)
5452 %{
5453 constraint(ALLOC_IN_RC(p_reg));
5454 match(AddP (AddP reg (LShiftL (ConvI2L ireg) scale)) off);
5456 op_cost(10);
5457 format %{"[$reg + $off + $ireg << $scale] @ indIndexScaleOffset8_convI2L" %}
5458 interface(MEMORY_INTER) %{
5459 base($reg);
5460 index($ireg);
5461 scale($scale);
5462 disp($off);
5463 %}
5464 %}
5466 // [base + index<<scale + offset]
5467 operand basePosIndexScaleOffset8(mRegP base, mRegI index, immL8 off, immI_0_31 scale)
5468 %{
5469 constraint(ALLOC_IN_RC(p_reg));
5470 //predicate(n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
5471 op_cost(10);
5472 match(AddP (AddP base (LShiftL (ConvI2L index) scale)) off);
5474 format %{ "[$base + $index << $scale + $off (8-bit)] @ basePosIndexScaleOffset8" %}
5475 interface(MEMORY_INTER) %{
5476 base($base);
5477 index($index);
5478 scale($scale);
5479 disp($off);
5480 %}
5481 %}
5483 // Indirect Memory Times Scale Plus Index Register Plus Offset Operand
5484 operand indIndexScaleOffsetNarrow(mRegN reg, immL8 off, mRegL lreg, immI2 scale)
5485 %{
5486 predicate(Universe::narrow_oop_shift() == 0);
5487 constraint(ALLOC_IN_RC(p_reg));
5488 match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
5490 op_cost(10);
5491 format %{"[$reg + $off + $lreg << $scale] @ indIndexScaleOffsetNarrow" %}
5492 interface(MEMORY_INTER) %{
5493 base($reg);
5494 index($lreg);
5495 scale($scale);
5496 disp($off);
5497 %}
5498 %}
5500 // [base + index<<scale + offset] for compressd Oops
5501 operand indPosIndexI2LScaleOffset8Narrow(mRegN base, mRegI index, immL8 off, immI_0_31 scale)
5502 %{
5503 constraint(ALLOC_IN_RC(p_reg));
5504 //predicate(Universe::narrow_oop_shift() == 0 && n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
5505 predicate(Universe::narrow_oop_shift() == 0);
5506 op_cost(10);
5507 match(AddP (AddP (DecodeN base) (LShiftL (ConvI2L index) scale)) off);
5509 format %{ "[$base + $index << $scale + $off (8-bit)] @ indPosIndexI2LScaleOffset8Narrow" %}
5510 interface(MEMORY_INTER) %{
5511 base($base);
5512 index($index);
5513 scale($scale);
5514 disp($off);
5515 %}
5516 %}
5518 //FIXME: I think it's better to limit the immI to be 16-bit at most!
5519 // Indirect Memory Plus Long Offset Operand
5520 operand indOffset32(mRegP reg, immL32 off) %{
5521 constraint(ALLOC_IN_RC(p_reg));
5522 op_cost(20);
5523 match(AddP reg off);
5525 format %{ "[$reg + $off (32-bit)] @ indOffset32" %}
5526 interface(MEMORY_INTER) %{
5527 base($reg);
5528 index(0x0); /* NO_INDEX */
5529 scale(0x0);
5530 disp($off);
5531 %}
5532 %}
5534 // Indirect Memory Plus Index Register
5535 operand indIndex(mRegP addr, mRegL index) %{
5536 constraint(ALLOC_IN_RC(p_reg));
5537 match(AddP addr index);
5539 op_cost(20);
5540 format %{"[$addr + $index] @ indIndex" %}
5541 interface(MEMORY_INTER) %{
5542 base($addr);
5543 index($index);
5544 scale(0x0);
5545 disp(0x0);
5546 %}
5547 %}
5549 operand indirectNarrowKlass(mRegN reg)
5550 %{
5551 predicate(Universe::narrow_klass_shift() == 0);
5552 constraint(ALLOC_IN_RC(p_reg));
5553 op_cost(10);
5554 match(DecodeNKlass reg);
5556 format %{ "[$reg] @ indirectNarrowKlass" %}
5557 interface(MEMORY_INTER) %{
5558 base($reg);
5559 index(0x0);
5560 scale(0x0);
5561 disp(0x0);
5562 %}
5563 %}
5565 operand indOffset8NarrowKlass(mRegN reg, immL8 off)
5566 %{
5567 predicate(Universe::narrow_klass_shift() == 0);
5568 constraint(ALLOC_IN_RC(p_reg));
5569 op_cost(10);
5570 match(AddP (DecodeNKlass reg) off);
5572 format %{ "[$reg + $off (8-bit)] @ indOffset8NarrowKlass" %}
5573 interface(MEMORY_INTER) %{
5574 base($reg);
5575 index(0x0);
5576 scale(0x0);
5577 disp($off);
5578 %}
5579 %}
5581 operand indOffset32NarrowKlass(mRegN reg, immL32 off)
5582 %{
5583 predicate(Universe::narrow_klass_shift() == 0);
5584 constraint(ALLOC_IN_RC(p_reg));
5585 op_cost(10);
5586 match(AddP (DecodeNKlass reg) off);
5588 format %{ "[$reg + $off (32-bit)] @ indOffset32NarrowKlass" %}
5589 interface(MEMORY_INTER) %{
5590 base($reg);
5591 index(0x0);
5592 scale(0x0);
5593 disp($off);
5594 %}
5595 %}
5597 operand indIndexOffsetNarrowKlass(mRegN reg, mRegL lreg, immL32 off)
5598 %{
5599 predicate(Universe::narrow_klass_shift() == 0);
5600 constraint(ALLOC_IN_RC(p_reg));
5601 match(AddP (AddP (DecodeNKlass reg) lreg) off);
5603 op_cost(10);
5604 format %{"[$reg + $off + $lreg] @ indIndexOffsetNarrowKlass" %}
5605 interface(MEMORY_INTER) %{
5606 base($reg);
5607 index($lreg);
5608 scale(0x0);
5609 disp($off);
5610 %}
5611 %}
5613 operand indIndexNarrowKlass(mRegN reg, mRegL lreg)
5614 %{
5615 predicate(Universe::narrow_klass_shift() == 0);
5616 constraint(ALLOC_IN_RC(p_reg));
5617 match(AddP (DecodeNKlass reg) lreg);
5619 op_cost(10);
5620 format %{"[$reg + $lreg] @ indIndexNarrowKlass" %}
5621 interface(MEMORY_INTER) %{
5622 base($reg);
5623 index($lreg);
5624 scale(0x0);
5625 disp(0x0);
5626 %}
5627 %}
5629 // Indirect Memory Operand
5630 operand indirectNarrow(mRegN reg)
5631 %{
5632 predicate(Universe::narrow_oop_shift() == 0);
5633 constraint(ALLOC_IN_RC(p_reg));
5634 op_cost(10);
5635 match(DecodeN reg);
5637 format %{ "[$reg] @ indirectNarrow" %}
5638 interface(MEMORY_INTER) %{
5639 base($reg);
5640 index(0x0);
5641 scale(0x0);
5642 disp(0x0);
5643 %}
5644 %}
5646 // Indirect Memory Plus Short Offset Operand
5647 operand indOffset8Narrow(mRegN reg, immL8 off)
5648 %{
5649 predicate(Universe::narrow_oop_shift() == 0);
5650 constraint(ALLOC_IN_RC(p_reg));
5651 op_cost(10);
5652 match(AddP (DecodeN reg) off);
5654 format %{ "[$reg + $off (8-bit)] @ indOffset8Narrow" %}
5655 interface(MEMORY_INTER) %{
5656 base($reg);
5657 index(0x0);
5658 scale(0x0);
5659 disp($off);
5660 %}
5661 %}
5663 // Indirect Memory Plus Index Register Plus Offset Operand
5664 operand indIndexOffset8Narrow(mRegN reg, mRegL lreg, immL8 off)
5665 %{
5666 predicate(Universe::narrow_oop_shift() == 0);
5667 constraint(ALLOC_IN_RC(p_reg));
5668 match(AddP (AddP (DecodeN reg) lreg) off);
5670 op_cost(10);
5671 format %{"[$reg + $off + $lreg] @ indIndexOffset8Narrow" %}
5672 interface(MEMORY_INTER) %{
5673 base($reg);
5674 index($lreg);
5675 scale(0x0);
5676 disp($off);
5677 %}
5678 %}
5680 //----------Load Long Memory Operands------------------------------------------
5681 // The load-long idiom will use it's address expression again after loading
5682 // the first word of the long. If the load-long destination overlaps with
5683 // registers used in the addressing expression, the 2nd half will be loaded
5684 // from a clobbered address. Fix this by requiring that load-long use
5685 // address registers that do not overlap with the load-long target.
5687 // load-long support
5688 operand load_long_RegP() %{
5689 constraint(ALLOC_IN_RC(p_reg));
5690 match(RegP);
5691 match(mRegP);
5692 op_cost(100);
5693 format %{ %}
5694 interface(REG_INTER);
5695 %}
5697 // Indirect Memory Operand Long
5698 operand load_long_indirect(load_long_RegP reg) %{
5699 constraint(ALLOC_IN_RC(p_reg));
5700 match(reg);
5702 format %{ "[$reg]" %}
5703 interface(MEMORY_INTER) %{
5704 base($reg);
5705 index(0x0);
5706 scale(0x0);
5707 disp(0x0);
5708 %}
5709 %}
5711 // Indirect Memory Plus Long Offset Operand
5712 operand load_long_indOffset32(load_long_RegP reg, immL32 off) %{
5713 match(AddP reg off);
5715 format %{ "[$reg + $off]" %}
5716 interface(MEMORY_INTER) %{
5717 base($reg);
5718 index(0x0);
5719 scale(0x0);
5720 disp($off);
5721 %}
5722 %}
5724 //----------Conditional Branch Operands----------------------------------------
5725 // Comparison Op - This is the operation of the comparison, and is limited to
5726 // the following set of codes:
5727 // L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
5728 //
5729 // Other attributes of the comparison, such as unsignedness, are specified
5730 // by the comparison instruction that sets a condition code flags register.
5731 // That result is represented by a flags operand whose subtype is appropriate
5732 // to the unsignedness (etc.) of the comparison.
5733 //
5734 // Later, the instruction which matches both the Comparison Op (a Bool) and
5735 // the flags (produced by the Cmp) specifies the coding of the comparison op
5736 // by matching a specific subtype of Bool operand below, such as cmpOpU.
5738 // Comparision Code
5739 operand cmpOp() %{
5740 match(Bool);
5742 format %{ "" %}
5743 interface(COND_INTER) %{
5744 equal(0x01);
5745 not_equal(0x02);
5746 greater(0x03);
5747 greater_equal(0x04);
5748 less(0x05);
5749 less_equal(0x06);
5750 overflow(0x7);
5751 no_overflow(0x8);
5752 %}
5753 %}
5756 // Comparision Code
5757 // Comparison Code, unsigned compare. Used by FP also, with
5758 // C2 (unordered) turned into GT or LT already. The other bits
5759 // C0 and C3 are turned into Carry & Zero flags.
5760 operand cmpOpU() %{
5761 match(Bool);
5763 format %{ "" %}
5764 interface(COND_INTER) %{
5765 equal(0x01);
5766 not_equal(0x02);
5767 greater(0x03);
5768 greater_equal(0x04);
5769 less(0x05);
5770 less_equal(0x06);
5771 overflow(0x7);
5772 no_overflow(0x8);
5773 %}
5774 %}
5776 /*
5777 // Comparison Code, unsigned compare. Used by FP also, with
5778 // C2 (unordered) turned into GT or LT already. The other bits
5779 // C0 and C3 are turned into Carry & Zero flags.
5780 operand cmpOpU() %{
5781 match(Bool);
5783 format %{ "" %}
5784 interface(COND_INTER) %{
5785 equal(0x4);
5786 not_equal(0x5);
5787 less(0x2);
5788 greater_equal(0x3);
5789 less_equal(0x6);
5790 greater(0x7);
5791 %}
5792 %}
5793 */
5794 /*
5795 // Comparison Code for FP conditional move
5796 operand cmpOp_fcmov() %{
5797 match(Bool);
5799 format %{ "" %}
5800 interface(COND_INTER) %{
5801 equal (0x01);
5802 not_equal (0x02);
5803 greater (0x03);
5804 greater_equal(0x04);
5805 less (0x05);
5806 less_equal (0x06);
5807 %}
5808 %}
5810 // Comparision Code used in long compares
5811 operand cmpOp_commute() %{
5812 match(Bool);
5814 format %{ "" %}
5815 interface(COND_INTER) %{
5816 equal(0x4);
5817 not_equal(0x5);
5818 less(0xF);
5819 greater_equal(0xE);
5820 less_equal(0xD);
5821 greater(0xC);
5822 %}
5823 %}
5824 */
5826 //----------Special Memory Operands--------------------------------------------
5827 // Stack Slot Operand - This operand is used for loading and storing temporary
5828 // values on the stack where a match requires a value to
5829 // flow through memory.
5830 operand stackSlotP(sRegP reg) %{
5831 constraint(ALLOC_IN_RC(stack_slots));
5832 // No match rule because this operand is only generated in matching
5833 op_cost(50);
5834 format %{ "[$reg]" %}
5835 interface(MEMORY_INTER) %{
5836 base(0x1d); // SP
5837 index(0x0); // No Index
5838 scale(0x0); // No Scale
5839 disp($reg); // Stack Offset
5840 %}
5841 %}
5843 operand stackSlotI(sRegI reg) %{
5844 constraint(ALLOC_IN_RC(stack_slots));
5845 // No match rule because this operand is only generated in matching
5846 op_cost(50);
5847 format %{ "[$reg]" %}
5848 interface(MEMORY_INTER) %{
5849 base(0x1d); // SP
5850 index(0x0); // No Index
5851 scale(0x0); // No Scale
5852 disp($reg); // Stack Offset
5853 %}
5854 %}
5856 operand stackSlotF(sRegF reg) %{
5857 constraint(ALLOC_IN_RC(stack_slots));
5858 // No match rule because this operand is only generated in matching
5859 op_cost(50);
5860 format %{ "[$reg]" %}
5861 interface(MEMORY_INTER) %{
5862 base(0x1d); // SP
5863 index(0x0); // No Index
5864 scale(0x0); // No Scale
5865 disp($reg); // Stack Offset
5866 %}
5867 %}
5869 operand stackSlotD(sRegD reg) %{
5870 constraint(ALLOC_IN_RC(stack_slots));
5871 // No match rule because this operand is only generated in matching
5872 op_cost(50);
5873 format %{ "[$reg]" %}
5874 interface(MEMORY_INTER) %{
5875 base(0x1d); // SP
5876 index(0x0); // No Index
5877 scale(0x0); // No Scale
5878 disp($reg); // Stack Offset
5879 %}
5880 %}
5882 operand stackSlotL(sRegL reg) %{
5883 constraint(ALLOC_IN_RC(stack_slots));
5884 // No match rule because this operand is only generated in matching
5885 op_cost(50);
5886 format %{ "[$reg]" %}
5887 interface(MEMORY_INTER) %{
5888 base(0x1d); // SP
5889 index(0x0); // No Index
5890 scale(0x0); // No Scale
5891 disp($reg); // Stack Offset
5892 %}
5893 %}
5896 //------------------------OPERAND CLASSES--------------------------------------
5897 //opclass memory( direct, indirect, indOffset16, indOffset32, indOffset32X, indIndexOffset );
5898 opclass memory( indirect, indirectNarrow, indOffset8, indOffset32, indIndex, indIndexScale, load_long_indirect, load_long_indOffset32, baseIndexOffset8, baseIndexOffset8_convI2L, indIndexScaleOffset8, indIndexScaleOffset8_convI2L, basePosIndexScaleOffset8, indIndexScaleOffsetNarrow, indPosIndexI2LScaleOffset8Narrow, indOffset8Narrow, indIndexOffset8Narrow);
5901 //----------PIPELINE-----------------------------------------------------------
5902 // Rules which define the behavior of the target architectures pipeline.
5904 pipeline %{
5906 //----------ATTRIBUTES---------------------------------------------------------
5907 attributes %{
5908 fixed_size_instructions; // Fixed size instructions
5909 branch_has_delay_slot; // branch have delay slot in gs2
5910 max_instructions_per_bundle = 1; // 1 instruction per bundle
5911 max_bundles_per_cycle = 4; // Up to 4 bundles per cycle
5912 bundle_unit_size=4;
5913 instruction_unit_size = 4; // An instruction is 4 bytes long
5914 instruction_fetch_unit_size = 16; // The processor fetches one line
5915 instruction_fetch_units = 1; // of 16 bytes
5917 // List of nop instructions
5918 nops( MachNop );
5919 %}
5921 //----------RESOURCES----------------------------------------------------------
5922 // Resources are the functional units available to the machine
5924 resources(D1, D2, D3, D4, DECODE = D1 | D2 | D3| D4, ALU1, ALU2, ALU = ALU1 | ALU2, FPU1, FPU2, FPU = FPU1 | FPU2, MEM, BR);
5926 //----------PIPELINE DESCRIPTION-----------------------------------------------
5927 // Pipeline Description specifies the stages in the machine's pipeline
5929 // IF: fetch
5930 // ID: decode
5931 // RD: read
5932 // CA: caculate
5933 // WB: write back
5934 // CM: commit
5936 pipe_desc(IF, ID, RD, CA, WB, CM);
5939 //----------PIPELINE CLASSES---------------------------------------------------
5940 // Pipeline Classes describe the stages in which input and output are
5941 // referenced by the hardware pipeline.
5943 //No.1 Integer ALU reg-reg operation : dst <-- reg1 op reg2
5944 pipe_class ialu_regI_regI(mRegI dst, mRegI src1, mRegI src2) %{
5945 single_instruction;
5946 src1 : RD(read);
5947 src2 : RD(read);
5948 dst : WB(write)+1;
5949 DECODE : ID;
5950 ALU : CA;
5951 %}
5953 //No.19 Integer mult operation : dst <-- reg1 mult reg2
5954 pipe_class ialu_mult(mRegI dst, mRegI src1, mRegI src2) %{
5955 src1 : RD(read);
5956 src2 : RD(read);
5957 dst : WB(write)+5;
5958 DECODE : ID;
5959 ALU2 : CA;
5960 %}
5962 pipe_class mulL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
5963 src1 : RD(read);
5964 src2 : RD(read);
5965 dst : WB(write)+10;
5966 DECODE : ID;
5967 ALU2 : CA;
5968 %}
5970 //No.19 Integer div operation : dst <-- reg1 div reg2
5971 pipe_class ialu_div(mRegI dst, mRegI src1, mRegI src2) %{
5972 src1 : RD(read);
5973 src2 : RD(read);
5974 dst : WB(write)+10;
5975 DECODE : ID;
5976 ALU2 : CA;
5977 %}
5979 //No.19 Integer mod operation : dst <-- reg1 mod reg2
5980 pipe_class ialu_mod(mRegI dst, mRegI src1, mRegI src2) %{
5981 instruction_count(2);
5982 src1 : RD(read);
5983 src2 : RD(read);
5984 dst : WB(write)+10;
5985 DECODE : ID;
5986 ALU2 : CA;
5987 %}
5989 //No.15 Long ALU reg-reg operation : dst <-- reg1 op reg2
5990 pipe_class ialu_regL_regL(mRegL dst, mRegL src1, mRegL src2) %{
5991 instruction_count(2);
5992 src1 : RD(read);
5993 src2 : RD(read);
5994 dst : WB(write);
5995 DECODE : ID;
5996 ALU : CA;
5997 %}
5999 //No.18 Long ALU reg-imm16 operation : dst <-- reg1 op imm16
6000 pipe_class ialu_regL_imm16(mRegL dst, mRegL src) %{
6001 instruction_count(2);
6002 src : RD(read);
6003 dst : WB(write);
6004 DECODE : ID;
6005 ALU : CA;
6006 %}
6008 //no.16 load Long from memory :
6009 pipe_class ialu_loadL(mRegL dst, memory mem) %{
6010 instruction_count(2);
6011 mem : RD(read);
6012 dst : WB(write)+5;
6013 DECODE : ID;
6014 MEM : RD;
6015 %}
6017 //No.17 Store Long to Memory :
6018 pipe_class ialu_storeL(mRegL src, memory mem) %{
6019 instruction_count(2);
6020 mem : RD(read);
6021 src : RD(read);
6022 DECODE : ID;
6023 MEM : RD;
6024 %}
6026 //No.2 Integer ALU reg-imm16 operation : dst <-- reg1 op imm16
6027 pipe_class ialu_regI_imm16(mRegI dst, mRegI src) %{
6028 single_instruction;
6029 src : RD(read);
6030 dst : WB(write);
6031 DECODE : ID;
6032 ALU : CA;
6033 %}
6035 //No.3 Integer move operation : dst <-- reg
6036 pipe_class ialu_regI_mov(mRegI dst, mRegI src) %{
6037 src : RD(read);
6038 dst : WB(write);
6039 DECODE : ID;
6040 ALU : CA;
6041 %}
6043 //No.4 No instructions : do nothing
6044 pipe_class empty( ) %{
6045 instruction_count(0);
6046 %}
6048 //No.5 UnConditional branch :
6049 pipe_class pipe_jump( label labl ) %{
6050 multiple_bundles;
6051 DECODE : ID;
6052 BR : RD;
6053 %}
6055 //No.6 ALU Conditional branch :
6056 pipe_class pipe_alu_branch(mRegI src1, mRegI src2, label labl ) %{
6057 multiple_bundles;
6058 src1 : RD(read);
6059 src2 : RD(read);
6060 DECODE : ID;
6061 BR : RD;
6062 %}
6064 //no.7 load integer from memory :
6065 pipe_class ialu_loadI(mRegI dst, memory mem) %{
6066 mem : RD(read);
6067 dst : WB(write)+3;
6068 DECODE : ID;
6069 MEM : RD;
6070 %}
6072 //No.8 Store Integer to Memory :
6073 pipe_class ialu_storeI(mRegI src, memory mem) %{
6074 mem : RD(read);
6075 src : RD(read);
6076 DECODE : ID;
6077 MEM : RD;
6078 %}
6081 //No.10 Floating FPU reg-reg operation : dst <-- reg1 op reg2
6082 pipe_class fpu_regF_regF(regF dst, regF src1, regF src2) %{
6083 src1 : RD(read);
6084 src2 : RD(read);
6085 dst : WB(write);
6086 DECODE : ID;
6087 FPU : CA;
6088 %}
6090 //No.22 Floating div operation : dst <-- reg1 div reg2
6091 pipe_class fpu_div(regF dst, regF src1, regF src2) %{
6092 src1 : RD(read);
6093 src2 : RD(read);
6094 dst : WB(write);
6095 DECODE : ID;
6096 FPU2 : CA;
6097 %}
6099 pipe_class fcvt_I2D(regD dst, mRegI src) %{
6100 src : RD(read);
6101 dst : WB(write);
6102 DECODE : ID;
6103 FPU1 : CA;
6104 %}
6106 pipe_class fcvt_D2I(mRegI dst, regD src) %{
6107 src : RD(read);
6108 dst : WB(write);
6109 DECODE : ID;
6110 FPU1 : CA;
6111 %}
6113 pipe_class pipe_mfc1(mRegI dst, regD src) %{
6114 src : RD(read);
6115 dst : WB(write);
6116 DECODE : ID;
6117 MEM : RD;
6118 %}
6120 pipe_class pipe_mtc1(regD dst, mRegI src) %{
6121 src : RD(read);
6122 dst : WB(write);
6123 DECODE : ID;
6124 MEM : RD(5);
6125 %}
6127 //No.23 Floating sqrt operation : dst <-- reg1 sqrt reg2
6128 pipe_class fpu_sqrt(regF dst, regF src1, regF src2) %{
6129 multiple_bundles;
6130 src1 : RD(read);
6131 src2 : RD(read);
6132 dst : WB(write);
6133 DECODE : ID;
6134 FPU2 : CA;
6135 %}
6137 //No.11 Load Floating from Memory :
6138 pipe_class fpu_loadF(regF dst, memory mem) %{
6139 instruction_count(1);
6140 mem : RD(read);
6141 dst : WB(write)+3;
6142 DECODE : ID;
6143 MEM : RD;
6144 %}
6146 //No.12 Store Floating to Memory :
6147 pipe_class fpu_storeF(regF src, memory mem) %{
6148 instruction_count(1);
6149 mem : RD(read);
6150 src : RD(read);
6151 DECODE : ID;
6152 MEM : RD;
6153 %}
6155 //No.13 FPU Conditional branch :
6156 pipe_class pipe_fpu_branch(regF src1, regF src2, label labl ) %{
6157 multiple_bundles;
6158 src1 : RD(read);
6159 src2 : RD(read);
6160 DECODE : ID;
6161 BR : RD;
6162 %}
6164 //No.14 Floating FPU reg operation : dst <-- op reg
6165 pipe_class fpu1_regF(regF dst, regF src) %{
6166 src : RD(read);
6167 dst : WB(write);
6168 DECODE : ID;
6169 FPU : CA;
6170 %}
6172 pipe_class long_memory_op() %{
6173 instruction_count(10); multiple_bundles; force_serialization;
6174 fixed_latency(30);
6175 %}
6177 pipe_class simple_call() %{
6178 instruction_count(10); multiple_bundles; force_serialization;
6179 fixed_latency(200);
6180 BR : RD;
6181 %}
6183 pipe_class call() %{
6184 instruction_count(10); multiple_bundles; force_serialization;
6185 fixed_latency(200);
6186 %}
6188 //FIXME:
6189 //No.9 Piple slow : for multi-instructions
6190 pipe_class pipe_slow( ) %{
6191 instruction_count(20);
6192 force_serialization;
6193 multiple_bundles;
6194 fixed_latency(50);
6195 %}
6197 %}
6201 //----------INSTRUCTIONS-------------------------------------------------------
6202 //
6203 // match -- States which machine-independent subtree may be replaced
6204 // by this instruction.
6205 // ins_cost -- The estimated cost of this instruction is used by instruction
6206 // selection to identify a minimum cost tree of machine
6207 // instructions that matches a tree of machine-independent
6208 // instructions.
6209 // format -- A string providing the disassembly for this instruction.
6210 // The value of an instruction's operand may be inserted
6211 // by referring to it with a '$' prefix.
6212 // opcode -- Three instruction opcodes may be provided. These are referred
6213 // to within an encode class as $primary, $secondary, and $tertiary
6214 // respectively. The primary opcode is commonly used to
6215 // indicate the type of machine instruction, while secondary
6216 // and tertiary are often used for prefix options or addressing
6217 // modes.
6218 // ins_encode -- A list of encode classes with parameters. The encode class
6219 // name must have been defined in an 'enc_class' specification
6220 // in the encode section of the architecture description.
6223 // Load Integer
6224 instruct loadI(mRegI dst, memory mem) %{
6225 match(Set dst (LoadI mem));
6227 ins_cost(125);
6228 format %{ "lw $dst, $mem #@loadI" %}
6229 ins_encode (load_I_enc(dst, mem));
6230 ins_pipe( ialu_loadI );
6231 %}
6233 instruct loadI_convI2L(mRegL dst, memory mem) %{
6234 match(Set dst (ConvI2L (LoadI mem)));
6236 ins_cost(125);
6237 format %{ "lw $dst, $mem #@loadI_convI2L" %}
6238 ins_encode (load_I_enc(dst, mem));
6239 ins_pipe( ialu_loadI );
6240 %}
6242 // Load Integer (32 bit signed) to Byte (8 bit signed)
6243 instruct loadI2B(mRegI dst, memory mem, immI_24 twentyfour) %{
6244 match(Set dst (RShiftI (LShiftI (LoadI mem) twentyfour) twentyfour));
6246 ins_cost(125);
6247 format %{ "lb $dst, $mem\t# int -> byte #@loadI2B" %}
6248 ins_encode(load_B_enc(dst, mem));
6249 ins_pipe(ialu_loadI);
6250 %}
6252 // Load Integer (32 bit signed) to Unsigned Byte (8 bit UNsigned)
6253 instruct loadI2UB(mRegI dst, memory mem, immI_255 mask) %{
6254 match(Set dst (AndI (LoadI mem) mask));
6256 ins_cost(125);
6257 format %{ "lbu $dst, $mem\t# int -> ubyte #@loadI2UB" %}
6258 ins_encode(load_UB_enc(dst, mem));
6259 ins_pipe(ialu_loadI);
6260 %}
6262 // Load Integer (32 bit signed) to Short (16 bit signed)
6263 instruct loadI2S(mRegI dst, memory mem, immI_16 sixteen) %{
6264 match(Set dst (RShiftI (LShiftI (LoadI mem) sixteen) sixteen));
6266 ins_cost(125);
6267 format %{ "lh $dst, $mem\t# int -> short #@loadI2S" %}
6268 ins_encode(load_S_enc(dst, mem));
6269 ins_pipe(ialu_loadI);
6270 %}
6272 // Load Integer (32 bit signed) to Unsigned Short/Char (16 bit UNsigned)
6273 instruct loadI2US(mRegI dst, memory mem, immI_65535 mask) %{
6274 match(Set dst (AndI (LoadI mem) mask));
6276 ins_cost(125);
6277 format %{ "lhu $dst, $mem\t# int -> ushort/char #@loadI2US" %}
6278 ins_encode(load_C_enc(dst, mem));
6279 ins_pipe(ialu_loadI);
6280 %}
6282 // Load Long.
6283 instruct loadL(mRegL dst, memory mem) %{
6284 // predicate(!((LoadLNode*)n)->require_atomic_access());
6285 match(Set dst (LoadL mem));
6287 ins_cost(250);
6288 format %{ "ld $dst, $mem #@loadL" %}
6289 ins_encode(load_L_enc(dst, mem));
6290 ins_pipe( ialu_loadL );
6291 %}
6293 // Load Long - UNaligned
6294 instruct loadL_unaligned(mRegL dst, memory mem) %{
6295 match(Set dst (LoadL_unaligned mem));
6297 // FIXME: Jin: Need more effective ldl/ldr
6298 ins_cost(450);
6299 format %{ "ld $dst, $mem #@loadL_unaligned\n\t" %}
6300 ins_encode(load_L_enc(dst, mem));
6301 ins_pipe( ialu_loadL );
6302 %}
6304 // Store Long
6305 instruct storeL_reg(memory mem, mRegL src) %{
6306 match(Set mem (StoreL mem src));
6308 ins_cost(200);
6309 format %{ "sd $mem, $src #@storeL_reg\n" %}
6310 ins_encode(store_L_reg_enc(mem, src));
6311 ins_pipe( ialu_storeL );
6312 %}
6314 instruct storeL_immL0(memory mem, immL0 zero) %{
6315 match(Set mem (StoreL mem zero));
6317 ins_cost(180);
6318 format %{ "sd zero, $mem #@storeL_immL0" %}
6319 ins_encode(store_L_immL0_enc(mem, zero));
6320 ins_pipe( ialu_storeL );
6321 %}
6323 instruct storeL_imm(memory mem, immL src) %{
6324 match(Set mem (StoreL mem src));
6326 ins_cost(200);
6327 format %{ "sd $src, $mem #@storeL_imm" %}
6328 ins_encode(store_L_immL_enc(mem, src));
6329 ins_pipe( ialu_storeL );
6330 %}
6332 // Load Compressed Pointer
6333 instruct loadN(mRegN dst, memory mem)
6334 %{
6335 match(Set dst (LoadN mem));
6337 ins_cost(125); // XXX
6338 format %{ "lwu $dst, $mem\t# compressed ptr @ loadN" %}
6339 ins_encode (load_N_enc(dst, mem));
6340 ins_pipe( ialu_loadI ); // XXX
6341 %}
6343 instruct loadN2P(mRegP dst, memory mem)
6344 %{
6345 match(Set dst (DecodeN (LoadN mem)));
6346 predicate(Universe::narrow_oop_base() == NULL && Universe::narrow_oop_shift() == 0);
6348 ins_cost(125); // XXX
6349 format %{ "lwu $dst, $mem\t# @ loadN2P" %}
6350 ins_encode (load_N_enc(dst, mem));
6351 ins_pipe( ialu_loadI ); // XXX
6352 %}
6354 // Load Pointer
6355 instruct loadP(mRegP dst, memory mem) %{
6356 match(Set dst (LoadP mem));
6358 ins_cost(125);
6359 format %{ "ld $dst, $mem #@loadP" %}
6360 ins_encode (load_P_enc(dst, mem));
6361 ins_pipe( ialu_loadI );
6362 %}
6364 // Load Klass Pointer
6365 instruct loadKlass(mRegP dst, memory mem) %{
6366 match(Set dst (LoadKlass mem));
6368 ins_cost(125);
6369 format %{ "MOV $dst,$mem @ loadKlass" %}
6370 ins_encode (load_P_enc(dst, mem));
6371 ins_pipe( ialu_loadI );
6372 %}
6374 // Load narrow Klass Pointer
6375 instruct loadNKlass(mRegN dst, memory mem)
6376 %{
6377 match(Set dst (LoadNKlass mem));
6379 ins_cost(125); // XXX
6380 format %{ "lwu $dst, $mem\t# compressed klass ptr @ loadNKlass" %}
6381 ins_encode (load_N_enc(dst, mem));
6382 ins_pipe( ialu_loadI ); // XXX
6383 %}
6385 instruct loadN2PKlass(mRegP dst, memory mem)
6386 %{
6387 match(Set dst (DecodeNKlass (LoadNKlass mem)));
6388 predicate(Universe::narrow_klass_base() == NULL && Universe::narrow_klass_shift() == 0);
6390 ins_cost(125); // XXX
6391 format %{ "lwu $dst, $mem\t# compressed klass ptr @ loadN2PKlass" %}
6392 ins_encode (load_N_enc(dst, mem));
6393 ins_pipe( ialu_loadI ); // XXX
6394 %}
6396 // Load Constant
6397 instruct loadConI(mRegI dst, immI src) %{
6398 match(Set dst src);
6400 ins_cost(150);
6401 format %{ "mov $dst, $src #@loadConI" %}
6402 ins_encode %{
6403 Register dst = $dst$$Register;
6404 int value = $src$$constant;
6405 __ move(dst, value);
6406 %}
6407 ins_pipe( ialu_regI_regI );
6408 %}
6411 instruct loadConL_set64(mRegL dst, immL src) %{
6412 match(Set dst src);
6413 ins_cost(120);
6414 format %{ "li $dst, $src @ loadConL_set64" %}
6415 ins_encode %{
6416 __ set64($dst$$Register, $src$$constant);
6417 %}
6418 ins_pipe(ialu_regL_regL);
6419 %}
6421 /*
6422 // Load long value from constant table (predicated by immL_expensive).
6423 instruct loadConL_load(mRegL dst, immL_expensive src) %{
6424 match(Set dst src);
6425 ins_cost(150);
6426 format %{ "ld $dst, $constantoffset[$constanttablebase] # load long $src from table @ loadConL_ldx" %}
6427 ins_encode %{
6428 int con_offset = $constantoffset($src);
6430 if (Assembler::is_simm16(con_offset)) {
6431 __ ld($dst$$Register, $constanttablebase, con_offset);
6432 } else {
6433 __ set64(AT, con_offset);
6434 if (UseLoongsonISA) {
6435 __ gsldx($dst$$Register, $constanttablebase, AT, 0);
6436 } else {
6437 __ daddu(AT, $constanttablebase, AT);
6438 __ ld($dst$$Register, AT, 0);
6439 }
6440 }
6441 %}
6442 ins_pipe(ialu_loadI);
6443 %}
6444 */
6446 instruct loadConL16(mRegL dst, immL16 src) %{
6447 match(Set dst src);
6448 ins_cost(105);
6449 format %{ "mov $dst, $src #@loadConL16" %}
6450 ins_encode %{
6451 Register dst_reg = as_Register($dst$$reg);
6452 int value = $src$$constant;
6453 __ daddiu(dst_reg, R0, value);
6454 %}
6455 ins_pipe( ialu_regL_regL );
6456 %}
6459 instruct loadConL0(mRegL dst, immL0 src) %{
6460 match(Set dst src);
6461 ins_cost(100);
6462 format %{ "mov $dst, zero #@loadConL0" %}
6463 ins_encode %{
6464 Register dst_reg = as_Register($dst$$reg);
6465 __ daddu(dst_reg, R0, R0);
6466 %}
6467 ins_pipe( ialu_regL_regL );
6468 %}
6470 // Load Range
6471 instruct loadRange(mRegI dst, memory mem) %{
6472 match(Set dst (LoadRange mem));
6474 ins_cost(125);
6475 format %{ "MOV $dst,$mem @ loadRange" %}
6476 ins_encode(load_I_enc(dst, mem));
6477 ins_pipe( ialu_loadI );
6478 %}
6481 instruct storeP(memory mem, mRegP src ) %{
6482 match(Set mem (StoreP mem src));
6484 ins_cost(125);
6485 format %{ "sd $src, $mem #@storeP" %}
6486 ins_encode(store_P_reg_enc(mem, src));
6487 ins_pipe( ialu_storeI );
6488 %}
6490 // Store NULL Pointer, mark word, or other simple pointer constant.
6491 instruct storeImmP0(memory mem, immP0 zero) %{
6492 match(Set mem (StoreP mem zero));
6494 ins_cost(125);
6495 format %{ "mov $mem, $zero #@storeImmP0" %}
6496 ins_encode(store_P_immP0_enc(mem));
6497 ins_pipe( ialu_storeI );
6498 %}
6500 // Store NULL Pointer, mark word, or other simple pointer constant.
6501 instruct storeImmP(memory mem, immP31 src) %{
6502 match(Set mem (StoreP mem src));
6504 ins_cost(150);
6505 format %{ "mov $mem, $src #@storeImmP" %}
6506 ins_encode(store_P_immP_enc(mem, src));
6507 ins_pipe( ialu_storeI );
6508 %}
6510 // Store Byte Immediate
6511 instruct storeImmB(memory mem, immI8 src) %{
6512 match(Set mem (StoreB mem src));
6514 ins_cost(150);
6515 format %{ "movb $mem, $src #@storeImmB" %}
6516 ins_encode(store_B_immI_enc(mem, src));
6517 ins_pipe( ialu_storeI );
6518 %}
6520 // Store Compressed Pointer
6521 instruct storeN(memory mem, mRegN src)
6522 %{
6523 match(Set mem (StoreN mem src));
6525 ins_cost(125); // XXX
6526 format %{ "sw $mem, $src\t# compressed ptr @ storeN" %}
6527 ins_encode(store_N_reg_enc(mem, src));
6528 ins_pipe( ialu_storeI );
6529 %}
6531 instruct storeP2N(memory mem, mRegP src)
6532 %{
6533 match(Set mem (StoreN mem (EncodeP src)));
6534 predicate(Universe::narrow_oop_base() == NULL && Universe::narrow_oop_shift() == 0);
6536 ins_cost(125); // XXX
6537 format %{ "sw $mem, $src\t# @ storeP2N" %}
6538 ins_encode(store_N_reg_enc(mem, src));
6539 ins_pipe( ialu_storeI );
6540 %}
6542 instruct storeNKlass(memory mem, mRegN src)
6543 %{
6544 match(Set mem (StoreNKlass mem src));
6546 ins_cost(125); // XXX
6547 format %{ "sw $mem, $src\t# compressed klass ptr @ storeNKlass" %}
6548 ins_encode(store_N_reg_enc(mem, src));
6549 ins_pipe( ialu_storeI );
6550 %}
6552 instruct storeP2NKlass(memory mem, mRegP src)
6553 %{
6554 match(Set mem (StoreNKlass mem (EncodePKlass src)));
6555 predicate(Universe::narrow_klass_base() == NULL && Universe::narrow_klass_shift() == 0);
6557 ins_cost(125); // XXX
6558 format %{ "sw $mem, $src\t# @ storeP2NKlass" %}
6559 ins_encode(store_N_reg_enc(mem, src));
6560 ins_pipe( ialu_storeI );
6561 %}
6563 instruct storeImmN0(memory mem, immN0 zero)
6564 %{
6565 match(Set mem (StoreN mem zero));
6567 ins_cost(125); // XXX
6568 format %{ "storeN0 zero, $mem\t# compressed ptr" %}
6569 ins_encode(storeImmN0_enc(mem, zero));
6570 ins_pipe( ialu_storeI );
6571 %}
6573 instruct storeImmN(memory mem, immN src)
6574 %{
6575 match(Set mem (StoreN mem src));
6577 ins_cost(150);
6578 format %{ "storeImmN $mem, $src\t# compressed ptr @ storeImmN" %}
6579 ins_encode(storeImmN_enc(mem, src));
6580 ins_pipe( ialu_storeI );
6581 %}
6583 instruct storeImmNKlass(memory mem, immNKlass src)
6584 %{
6585 match(Set mem (StoreNKlass mem src));
6587 ins_cost(150); // XXX
6588 format %{ "sw $mem, $src\t# compressed klass ptr @ storeImmNKlass" %}
6589 ins_encode(storeImmNKlass_enc(mem, src));
6590 ins_pipe( ialu_storeI );
6591 %}
6593 // Store Byte
6594 instruct storeB(memory mem, mRegI src) %{
6595 match(Set mem (StoreB mem src));
6597 ins_cost(125);
6598 format %{ "sb $src, $mem #@storeB" %}
6599 ins_encode(store_B_reg_enc(mem, src));
6600 ins_pipe( ialu_storeI );
6601 %}
6603 instruct storeB_convL2I(memory mem, mRegL src) %{
6604 match(Set mem (StoreB mem (ConvL2I src)));
6606 ins_cost(125);
6607 format %{ "sb $src, $mem #@storeB_convL2I" %}
6608 ins_encode(store_B_reg_enc(mem, src));
6609 ins_pipe( ialu_storeI );
6610 %}
6612 // Load Byte (8bit signed)
6613 instruct loadB(mRegI dst, memory mem) %{
6614 match(Set dst (LoadB mem));
6616 ins_cost(125);
6617 format %{ "lb $dst, $mem #@loadB" %}
6618 ins_encode(load_B_enc(dst, mem));
6619 ins_pipe( ialu_loadI );
6620 %}
6622 instruct loadB_convI2L(mRegL dst, memory mem) %{
6623 match(Set dst (ConvI2L (LoadB mem)));
6625 ins_cost(125);
6626 format %{ "lb $dst, $mem #@loadB_convI2L" %}
6627 ins_encode(load_B_enc(dst, mem));
6628 ins_pipe( ialu_loadI );
6629 %}
6631 // Load Byte (8bit UNsigned)
6632 instruct loadUB(mRegI dst, memory mem) %{
6633 match(Set dst (LoadUB mem));
6635 ins_cost(125);
6636 format %{ "lbu $dst, $mem #@loadUB" %}
6637 ins_encode(load_UB_enc(dst, mem));
6638 ins_pipe( ialu_loadI );
6639 %}
6641 instruct loadUB_convI2L(mRegL dst, memory mem) %{
6642 match(Set dst (ConvI2L (LoadUB mem)));
6644 ins_cost(125);
6645 format %{ "lbu $dst, $mem #@loadUB_convI2L" %}
6646 ins_encode(load_UB_enc(dst, mem));
6647 ins_pipe( ialu_loadI );
6648 %}
6650 // Load Short (16bit signed)
6651 instruct loadS(mRegI dst, memory mem) %{
6652 match(Set dst (LoadS mem));
6654 ins_cost(125);
6655 format %{ "lh $dst, $mem #@loadS" %}
6656 ins_encode(load_S_enc(dst, mem));
6657 ins_pipe( ialu_loadI );
6658 %}
6660 // Load Short (16 bit signed) to Byte (8 bit signed)
6661 instruct loadS2B(mRegI dst, memory mem, immI_24 twentyfour) %{
6662 match(Set dst (RShiftI (LShiftI (LoadS mem) twentyfour) twentyfour));
6664 ins_cost(125);
6665 format %{ "lb $dst, $mem\t# short -> byte #@loadS2B" %}
6666 ins_encode(load_B_enc(dst, mem));
6667 ins_pipe(ialu_loadI);
6668 %}
6670 instruct loadS_convI2L(mRegL dst, memory mem) %{
6671 match(Set dst (ConvI2L (LoadS mem)));
6673 ins_cost(125);
6674 format %{ "lh $dst, $mem #@loadS_convI2L" %}
6675 ins_encode(load_S_enc(dst, mem));
6676 ins_pipe( ialu_loadI );
6677 %}
6679 // Store Integer Immediate
6680 instruct storeImmI(memory mem, immI src) %{
6681 match(Set mem (StoreI mem src));
6683 ins_cost(150);
6684 format %{ "mov $mem, $src #@storeImmI" %}
6685 ins_encode(store_I_immI_enc(mem, src));
6686 ins_pipe( ialu_storeI );
6687 %}
6689 // Store Integer
6690 instruct storeI(memory mem, mRegI src) %{
6691 match(Set mem (StoreI mem src));
6693 ins_cost(125);
6694 format %{ "sw $mem, $src #@storeI" %}
6695 ins_encode(store_I_reg_enc(mem, src));
6696 ins_pipe( ialu_storeI );
6697 %}
6699 instruct storeI_convL2I(memory mem, mRegL src) %{
6700 match(Set mem (StoreI mem (ConvL2I src)));
6702 ins_cost(125);
6703 format %{ "sw $mem, $src #@storeI_convL2I" %}
6704 ins_encode(store_I_reg_enc(mem, src));
6705 ins_pipe( ialu_storeI );
6706 %}
6708 // Load Float
6709 instruct loadF(regF dst, memory mem) %{
6710 match(Set dst (LoadF mem));
6712 ins_cost(150);
6713 format %{ "loadF $dst, $mem #@loadF" %}
6714 ins_encode(load_F_enc(dst, mem));
6715 ins_pipe( ialu_loadI );
6716 %}
6718 instruct loadConP_general(mRegP dst, immP src) %{
6719 match(Set dst src);
6721 ins_cost(120);
6722 format %{ "li $dst, $src #@loadConP_general" %}
6724 ins_encode %{
6725 Register dst = $dst$$Register;
6726 long* value = (long*)$src$$constant;
6728 if($src->constant_reloc() == relocInfo::metadata_type){
6729 int klass_index = __ oop_recorder()->find_index((Klass*)value);
6730 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
6732 __ relocate(rspec);
6733 __ patchable_set48(dst, (long)value);
6734 }else if($src->constant_reloc() == relocInfo::oop_type){
6735 int oop_index = __ oop_recorder()->find_index((jobject)value);
6736 RelocationHolder rspec = oop_Relocation::spec(oop_index);
6738 __ relocate(rspec);
6739 __ patchable_set48(dst, (long)value);
6740 } else if ($src->constant_reloc() == relocInfo::none) {
6741 __ set64(dst, (long)value);
6742 }
6743 %}
6745 ins_pipe( ialu_regI_regI );
6746 %}
6748 /*
6749 instruct loadConP_load(mRegP dst, immP_load src) %{
6750 match(Set dst src);
6752 ins_cost(100);
6753 format %{ "ld $dst, [$constanttablebase + $constantoffset] load from constant table: ptr=$src @ loadConP_load" %}
6755 ins_encode %{
6757 int con_offset = $constantoffset($src);
6759 if (Assembler::is_simm16(con_offset)) {
6760 __ ld($dst$$Register, $constanttablebase, con_offset);
6761 } else {
6762 __ set64(AT, con_offset);
6763 if (UseLoongsonISA) {
6764 __ gsldx($dst$$Register, $constanttablebase, AT, 0);
6765 } else {
6766 __ daddu(AT, $constanttablebase, AT);
6767 __ ld($dst$$Register, AT, 0);
6768 }
6769 }
6770 %}
6772 ins_pipe(ialu_loadI);
6773 %}
6774 */
6776 instruct loadConP_no_oop_cheap(mRegP dst, immP_no_oop_cheap src) %{
6777 match(Set dst src);
6779 ins_cost(80);
6780 format %{ "li $dst, $src @ loadConP_no_oop_cheap" %}
6782 ins_encode %{
6783 __ set64($dst$$Register, $src$$constant);
6784 %}
6786 ins_pipe(ialu_regI_regI);
6787 %}
6790 instruct loadConP_poll(mRegP dst, immP_poll src) %{
6791 match(Set dst src);
6793 ins_cost(50);
6794 format %{ "li $dst, $src #@loadConP_poll" %}
6796 ins_encode %{
6797 Register dst = $dst$$Register;
6798 intptr_t value = (intptr_t)$src$$constant;
6800 __ set64(dst, (jlong)value);
6801 %}
6803 ins_pipe( ialu_regI_regI );
6804 %}
6806 instruct loadConP0(mRegP dst, immP0 src)
6807 %{
6808 match(Set dst src);
6810 ins_cost(50);
6811 format %{ "mov $dst, R0\t# ptr" %}
6812 ins_encode %{
6813 Register dst_reg = $dst$$Register;
6814 __ daddu(dst_reg, R0, R0);
6815 %}
6816 ins_pipe( ialu_regI_regI );
6817 %}
6819 instruct loadConN0(mRegN dst, immN0 src) %{
6820 match(Set dst src);
6821 format %{ "move $dst, R0\t# compressed NULL ptr" %}
6822 ins_encode %{
6823 __ move($dst$$Register, R0);
6824 %}
6825 ins_pipe( ialu_regI_regI );
6826 %}
6828 instruct loadConN(mRegN dst, immN src) %{
6829 match(Set dst src);
6831 ins_cost(125);
6832 format %{ "li $dst, $src\t# compressed ptr @ loadConN" %}
6833 ins_encode %{
6834 Register dst = $dst$$Register;
6835 __ set_narrow_oop(dst, (jobject)$src$$constant);
6836 %}
6837 ins_pipe( ialu_regI_regI ); // XXX
6838 %}
6840 instruct loadConNKlass(mRegN dst, immNKlass src) %{
6841 match(Set dst src);
6843 ins_cost(125);
6844 format %{ "li $dst, $src\t# compressed klass ptr @ loadConNKlass" %}
6845 ins_encode %{
6846 Register dst = $dst$$Register;
6847 __ set_narrow_klass(dst, (Klass*)$src$$constant);
6848 %}
6849 ins_pipe( ialu_regI_regI ); // XXX
6850 %}
6852 //FIXME
6853 // Tail Call; Jump from runtime stub to Java code.
6854 // Also known as an 'interprocedural jump'.
6855 // Target of jump will eventually return to caller.
6856 // TailJump below removes the return address.
6857 instruct TailCalljmpInd(mRegP jump_target, mRegP method_oop) %{
6858 match(TailCall jump_target method_oop );
6859 ins_cost(300);
6860 format %{ "JMP $jump_target \t# @TailCalljmpInd" %}
6862 ins_encode %{
6863 Register target = $jump_target$$Register;
6864 Register oop = $method_oop$$Register;
6866 /* 2012/10/12 Jin: RA will be used in generate_forward_exception() */
6867 __ push(RA);
6869 __ move(S3, oop);
6870 __ jr(target);
6871 __ nop();
6872 %}
6874 ins_pipe( pipe_jump );
6875 %}
6877 // Create exception oop: created by stack-crawling runtime code.
6878 // Created exception is now available to this handler, and is setup
6879 // just prior to jumping to this handler. No code emitted.
6880 instruct CreateException( a0_RegP ex_oop )
6881 %{
6882 match(Set ex_oop (CreateEx));
6884 // use the following format syntax
6885 format %{ "# exception oop is in A0; no code emitted @CreateException" %}
6886 ins_encode %{
6887 /* Jin: X86 leaves this function empty */
6888 __ block_comment("CreateException is empty in X86/MIPS");
6889 %}
6890 ins_pipe( empty );
6891 // ins_pipe( pipe_jump );
6892 %}
6895 /* 2012/9/14 Jin: The mechanism of exception handling is clear now.
6897 - Common try/catch:
6898 2012/9/14 Jin: [stubGenerator_mips.cpp] generate_forward_exception()
6899 |- V0, V1 are created
6900 |- T9 <= SharedRuntime::exception_handler_for_return_address
6901 `- jr T9
6902 `- the caller's exception_handler
6903 `- jr OptoRuntime::exception_blob
6904 `- here
6905 - Rethrow(e.g. 'unwind'):
6906 * The callee:
6907 |- an exception is triggered during execution
6908 `- exits the callee method through RethrowException node
6909 |- The callee pushes exception_oop(T0) and exception_pc(RA)
6910 `- The callee jumps to OptoRuntime::rethrow_stub()
6911 * In OptoRuntime::rethrow_stub:
6912 |- The VM calls _rethrow_Java to determine the return address in the caller method
6913 `- exits the stub with tailjmpInd
6914 |- pops exception_oop(V0) and exception_pc(V1)
6915 `- jumps to the return address(usually an exception_handler)
6916 * The caller:
6917 `- continues processing the exception_blob with V0/V1
6918 */
6920 /*
6921 Disassembling OptoRuntime::rethrow_stub()
6923 ; locals
6924 0x2d3bf320: addiu sp, sp, 0xfffffff8
6925 0x2d3bf324: sw ra, 0x4(sp)
6926 0x2d3bf328: sw fp, 0x0(sp)
6927 0x2d3bf32c: addu fp, sp, zero
6928 0x2d3bf330: addiu sp, sp, 0xfffffff0
6929 0x2d3bf334: sw ra, 0x8(sp)
6930 0x2d3bf338: sw t0, 0x4(sp)
6931 0x2d3bf33c: sw sp, 0x0(sp)
6933 ; get_thread(S2)
6934 0x2d3bf340: addu s2, sp, zero
6935 0x2d3bf344: srl s2, s2, 12
6936 0x2d3bf348: sll s2, s2, 2
6937 0x2d3bf34c: lui at, 0x2c85
6938 0x2d3bf350: addu at, at, s2
6939 0x2d3bf354: lw s2, 0xffffcc80(at)
6941 0x2d3bf358: lw s0, 0x0(sp)
6942 0x2d3bf35c: sw s0, 0x118(s2) // last_sp -> threa
6943 0x2d3bf360: sw s2, 0xc(sp)
6945 ; OptoRuntime::rethrow_C(oopDesc* exception, JavaThread* thread, address ret_pc)
6946 0x2d3bf364: lw a0, 0x4(sp)
6947 0x2d3bf368: lw a1, 0xc(sp)
6948 0x2d3bf36c: lw a2, 0x8(sp)
6949 ;; Java_To_Runtime
6950 0x2d3bf370: lui t9, 0x2c34
6951 0x2d3bf374: addiu t9, t9, 0xffff8a48
6952 0x2d3bf378: jalr t9
6953 0x2d3bf37c: nop
6955 0x2d3bf380: addu s3, v0, zero ; S3: SharedRuntime::raw_exception_handler_for_return_address()
6957 0x2d3bf384: lw s0, 0xc(sp)
6958 0x2d3bf388: sw zero, 0x118(s0)
6959 0x2d3bf38c: sw zero, 0x11c(s0)
6960 0x2d3bf390: lw s1, 0x144(s0) ; ex_oop: S1
6961 0x2d3bf394: addu s2, s0, zero
6962 0x2d3bf398: sw zero, 0x144(s2)
6963 0x2d3bf39c: lw s0, 0x4(s2)
6964 0x2d3bf3a0: addiu s4, zero, 0x0
6965 0x2d3bf3a4: bne s0, s4, 0x2d3bf3d4
6966 0x2d3bf3a8: nop
6967 0x2d3bf3ac: addiu sp, sp, 0x10
6968 0x2d3bf3b0: addiu sp, sp, 0x8
6969 0x2d3bf3b4: lw ra, 0xfffffffc(sp)
6970 0x2d3bf3b8: lw fp, 0xfffffff8(sp)
6971 0x2d3bf3bc: lui at, 0x2b48
6972 0x2d3bf3c0: lw at, 0x100(at)
6974 ; tailjmpInd: Restores exception_oop & exception_pc
6975 0x2d3bf3c4: addu v1, ra, zero
6976 0x2d3bf3c8: addu v0, s1, zero
6977 0x2d3bf3cc: jr s3
6978 0x2d3bf3d0: nop
6979 ; Exception:
6980 0x2d3bf3d4: lui s1, 0x2cc8 ; generate_forward_exception()
6981 0x2d3bf3d8: addiu s1, s1, 0x40
6982 0x2d3bf3dc: addiu s2, zero, 0x0
6983 0x2d3bf3e0: addiu sp, sp, 0x10
6984 0x2d3bf3e4: addiu sp, sp, 0x8
6985 0x2d3bf3e8: lw ra, 0xfffffffc(sp)
6986 0x2d3bf3ec: lw fp, 0xfffffff8(sp)
6987 0x2d3bf3f0: lui at, 0x2b48
6988 0x2d3bf3f4: lw at, 0x100(at)
6989 ; TailCalljmpInd
6990 __ push(RA); ; to be used in generate_forward_exception()
6991 0x2d3bf3f8: addu t7, s2, zero
6992 0x2d3bf3fc: jr s1
6993 0x2d3bf400: nop
6994 */
6995 // Rethrow exception:
6996 // The exception oop will come in the first argument position.
6997 // Then JUMP (not call) to the rethrow stub code.
6998 instruct RethrowException()
6999 %{
7000 match(Rethrow);
7002 // use the following format syntax
7003 format %{ "JMP rethrow_stub #@RethrowException" %}
7004 ins_encode %{
7005 __ block_comment("@ RethrowException");
7007 cbuf.set_insts_mark();
7008 cbuf.relocate(cbuf.insts_mark(), runtime_call_Relocation::spec());
7010 // call OptoRuntime::rethrow_stub to get the exception handler in parent method
7011 __ patchable_jump((address)OptoRuntime::rethrow_stub());
7012 %}
7013 ins_pipe( pipe_jump );
7014 %}
7016 instruct branchConP_zero(cmpOpU cmp, mRegP op1, immP0 zero, label labl) %{
7017 match(If cmp (CmpP op1 zero));
7018 effect(USE labl);
7020 ins_cost(180);
7021 format %{ "b$cmp $op1, R0, $labl #@branchConP_zero" %}
7023 ins_encode %{
7024 Register op1 = $op1$$Register;
7025 Register op2 = R0;
7026 Label &L = *($labl$$label);
7027 int flag = $cmp$$cmpcode;
7029 switch(flag)
7030 {
7031 case 0x01: //equal
7032 if (&L)
7033 __ beq(op1, op2, L);
7034 else
7035 __ beq(op1, op2, (int)0);
7036 break;
7037 case 0x02: //not_equal
7038 if (&L)
7039 __ bne(op1, op2, L);
7040 else
7041 __ bne(op1, op2, (int)0);
7042 break;
7043 /*
7044 case 0x03: //above
7045 __ sltu(AT, op2, op1);
7046 if(&L)
7047 __ bne(R0, AT, L);
7048 else
7049 __ bne(R0, AT, (int)0);
7050 break;
7051 case 0x04: //above_equal
7052 __ sltu(AT, op1, op2);
7053 if(&L)
7054 __ beq(AT, R0, L);
7055 else
7056 __ beq(AT, R0, (int)0);
7057 break;
7058 case 0x05: //below
7059 __ sltu(AT, op1, op2);
7060 if(&L)
7061 __ bne(R0, AT, L);
7062 else
7063 __ bne(R0, AT, (int)0);
7064 break;
7065 case 0x06: //below_equal
7066 __ sltu(AT, op2, op1);
7067 if(&L)
7068 __ beq(AT, R0, L);
7069 else
7070 __ beq(AT, R0, (int)0);
7071 break;
7072 */
7073 default:
7074 Unimplemented();
7075 }
7076 __ nop();
7077 %}
7079 ins_pc_relative(1);
7080 ins_pipe( pipe_alu_branch );
7081 %}
7083 instruct branchConN2P_zero(cmpOpU cmp, mRegN op1, immP0 zero, label labl) %{
7084 match(If cmp (CmpP (DecodeN op1) zero));
7085 predicate(Universe::narrow_oop_base() == NULL && Universe::narrow_oop_shift() == 0);
7086 effect(USE labl);
7088 ins_cost(180);
7089 format %{ "b$cmp $op1, R0, $labl #@branchConN2P_zero" %}
7091 ins_encode %{
7092 Register op1 = $op1$$Register;
7093 Register op2 = R0;
7094 Label &L = *($labl$$label);
7095 int flag = $cmp$$cmpcode;
7097 switch(flag)
7098 {
7099 case 0x01: //equal
7100 if (&L)
7101 __ beq(op1, op2, L);
7102 else
7103 __ beq(op1, op2, (int)0);
7104 break;
7105 case 0x02: //not_equal
7106 if (&L)
7107 __ bne(op1, op2, L);
7108 else
7109 __ bne(op1, op2, (int)0);
7110 break;
7111 default:
7112 Unimplemented();
7113 }
7114 __ nop();
7115 %}
7117 ins_pc_relative(1);
7118 ins_pipe( pipe_alu_branch );
7119 %}
7122 instruct branchConP(cmpOpU cmp, mRegP op1, mRegP op2, label labl) %{
7123 match(If cmp (CmpP op1 op2));
7124 // predicate(can_branch_register(_kids[0]->_leaf, _kids[1]->_leaf));
7125 effect(USE labl);
7127 ins_cost(200);
7128 format %{ "b$cmp $op1, $op2, $labl #@branchConP" %}
7130 ins_encode %{
7131 Register op1 = $op1$$Register;
7132 Register op2 = $op2$$Register;
7133 Label &L = *($labl$$label);
7134 int flag = $cmp$$cmpcode;
7136 switch(flag)
7137 {
7138 case 0x01: //equal
7139 if (&L)
7140 __ beq(op1, op2, L);
7141 else
7142 __ beq(op1, op2, (int)0);
7143 break;
7144 case 0x02: //not_equal
7145 if (&L)
7146 __ bne(op1, op2, L);
7147 else
7148 __ bne(op1, op2, (int)0);
7149 break;
7150 case 0x03: //above
7151 __ sltu(AT, op2, op1);
7152 if(&L)
7153 __ bne(R0, AT, L);
7154 else
7155 __ bne(R0, AT, (int)0);
7156 break;
7157 case 0x04: //above_equal
7158 __ sltu(AT, op1, op2);
7159 if(&L)
7160 __ beq(AT, R0, L);
7161 else
7162 __ beq(AT, R0, (int)0);
7163 break;
7164 case 0x05: //below
7165 __ sltu(AT, op1, op2);
7166 if(&L)
7167 __ bne(R0, AT, L);
7168 else
7169 __ bne(R0, AT, (int)0);
7170 break;
7171 case 0x06: //below_equal
7172 __ sltu(AT, op2, op1);
7173 if(&L)
7174 __ beq(AT, R0, L);
7175 else
7176 __ beq(AT, R0, (int)0);
7177 break;
7178 default:
7179 Unimplemented();
7180 }
7181 __ nop();
7182 %}
7184 ins_pc_relative(1);
7185 ins_pipe( pipe_alu_branch );
7186 %}
7188 instruct cmpN_null_branch(cmpOp cmp, mRegN op1, immN0 null, label labl) %{
7189 match(If cmp (CmpN op1 null));
7190 effect(USE labl);
7192 ins_cost(180);
7193 format %{ "CMP $op1,0\t! compressed ptr\n\t"
7194 "BP$cmp $labl @ cmpN_null_branch" %}
7195 ins_encode %{
7196 Register op1 = $op1$$Register;
7197 Register op2 = R0;
7198 Label &L = *($labl$$label);
7199 int flag = $cmp$$cmpcode;
7201 switch(flag)
7202 {
7203 case 0x01: //equal
7204 if (&L)
7205 __ beq(op1, op2, L);
7206 else
7207 __ beq(op1, op2, (int)0);
7208 break;
7209 case 0x02: //not_equal
7210 if (&L)
7211 __ bne(op1, op2, L);
7212 else
7213 __ bne(op1, op2, (int)0);
7214 break;
7215 default:
7216 Unimplemented();
7217 }
7218 __ nop();
7219 %}
7220 //TODO: pipe_branchP or create pipe_branchN LEE
7221 ins_pc_relative(1);
7222 ins_pipe( pipe_alu_branch );
7223 %}
7225 instruct cmpN_reg_branch(cmpOp cmp, mRegN op1, mRegN op2, label labl) %{
7226 match(If cmp (CmpN op1 op2));
7227 effect(USE labl);
7229 ins_cost(180);
7230 format %{ "CMP $op1,$op2\t! compressed ptr\n\t"
7231 "BP$cmp $labl" %}
7232 ins_encode %{
7233 Register op1_reg = $op1$$Register;
7234 Register op2_reg = $op2$$Register;
7235 Label &L = *($labl$$label);
7236 int flag = $cmp$$cmpcode;
7238 switch(flag)
7239 {
7240 case 0x01: //equal
7241 if (&L)
7242 __ beq(op1_reg, op2_reg, L);
7243 else
7244 __ beq(op1_reg, op2_reg, (int)0);
7245 break;
7246 case 0x02: //not_equal
7247 if (&L)
7248 __ bne(op1_reg, op2_reg, L);
7249 else
7250 __ bne(op1_reg, op2_reg, (int)0);
7251 break;
7252 case 0x03: //above
7253 __ sltu(AT, op2_reg, op1_reg);
7254 if(&L)
7255 __ bne(R0, AT, L);
7256 else
7257 __ bne(R0, AT, (int)0);
7258 break;
7259 case 0x04: //above_equal
7260 __ sltu(AT, op1_reg, op2_reg);
7261 if(&L)
7262 __ beq(AT, R0, L);
7263 else
7264 __ beq(AT, R0, (int)0);
7265 break;
7266 case 0x05: //below
7267 __ sltu(AT, op1_reg, op2_reg);
7268 if(&L)
7269 __ bne(R0, AT, L);
7270 else
7271 __ bne(R0, AT, (int)0);
7272 break;
7273 case 0x06: //below_equal
7274 __ sltu(AT, op2_reg, op1_reg);
7275 if(&L)
7276 __ beq(AT, R0, L);
7277 else
7278 __ beq(AT, R0, (int)0);
7279 break;
7280 default:
7281 Unimplemented();
7282 }
7283 __ nop();
7284 %}
7285 ins_pc_relative(1);
7286 ins_pipe( pipe_alu_branch );
7287 %}
7289 instruct branchConIU_reg_reg(cmpOpU cmp, mRegI src1, mRegI src2, label labl) %{
7290 match( If cmp (CmpU src1 src2) );
7291 effect(USE labl);
7292 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_reg" %}
7294 ins_encode %{
7295 Register op1 = $src1$$Register;
7296 Register op2 = $src2$$Register;
7297 Label &L = *($labl$$label);
7298 int flag = $cmp$$cmpcode;
7300 switch(flag)
7301 {
7302 case 0x01: //equal
7303 if (&L)
7304 __ beq(op1, op2, L);
7305 else
7306 __ beq(op1, op2, (int)0);
7307 break;
7308 case 0x02: //not_equal
7309 if (&L)
7310 __ bne(op1, op2, L);
7311 else
7312 __ bne(op1, op2, (int)0);
7313 break;
7314 case 0x03: //above
7315 __ sltu(AT, op2, op1);
7316 if(&L)
7317 __ bne(AT, R0, L);
7318 else
7319 __ bne(AT, R0, (int)0);
7320 break;
7321 case 0x04: //above_equal
7322 __ sltu(AT, op1, op2);
7323 if(&L)
7324 __ beq(AT, R0, L);
7325 else
7326 __ beq(AT, R0, (int)0);
7327 break;
7328 case 0x05: //below
7329 __ sltu(AT, op1, op2);
7330 if(&L)
7331 __ bne(AT, R0, L);
7332 else
7333 __ bne(AT, R0, (int)0);
7334 break;
7335 case 0x06: //below_equal
7336 __ sltu(AT, op2, op1);
7337 if(&L)
7338 __ beq(AT, R0, L);
7339 else
7340 __ beq(AT, R0, (int)0);
7341 break;
7342 default:
7343 Unimplemented();
7344 }
7345 __ nop();
7346 %}
7348 ins_pc_relative(1);
7349 ins_pipe( pipe_alu_branch );
7350 %}
7353 instruct branchConIU_reg_imm(cmpOpU cmp, mRegI src1, immI src2, label labl) %{
7354 match( If cmp (CmpU src1 src2) );
7355 effect(USE labl);
7356 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_imm" %}
7358 ins_encode %{
7359 Register op1 = $src1$$Register;
7360 int val = $src2$$constant;
7361 Label &L = *($labl$$label);
7362 int flag = $cmp$$cmpcode;
7364 __ move(AT, val);
7365 switch(flag)
7366 {
7367 case 0x01: //equal
7368 if (&L)
7369 __ beq(op1, AT, L);
7370 else
7371 __ beq(op1, AT, (int)0);
7372 break;
7373 case 0x02: //not_equal
7374 if (&L)
7375 __ bne(op1, AT, L);
7376 else
7377 __ bne(op1, AT, (int)0);
7378 break;
7379 case 0x03: //above
7380 __ sltu(AT, AT, op1);
7381 if(&L)
7382 __ bne(R0, AT, L);
7383 else
7384 __ bne(R0, AT, (int)0);
7385 break;
7386 case 0x04: //above_equal
7387 __ sltu(AT, op1, AT);
7388 if(&L)
7389 __ beq(AT, R0, L);
7390 else
7391 __ beq(AT, R0, (int)0);
7392 break;
7393 case 0x05: //below
7394 __ sltu(AT, op1, AT);
7395 if(&L)
7396 __ bne(R0, AT, L);
7397 else
7398 __ bne(R0, AT, (int)0);
7399 break;
7400 case 0x06: //below_equal
7401 __ sltu(AT, AT, op1);
7402 if(&L)
7403 __ beq(AT, R0, L);
7404 else
7405 __ beq(AT, R0, (int)0);
7406 break;
7407 default:
7408 Unimplemented();
7409 }
7410 __ nop();
7411 %}
7413 ins_pc_relative(1);
7414 ins_pipe( pipe_alu_branch );
7415 %}
7417 instruct branchConI_reg_reg(cmpOp cmp, mRegI src1, mRegI src2, label labl) %{
7418 match( If cmp (CmpI src1 src2) );
7419 effect(USE labl);
7420 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_reg" %}
7422 ins_encode %{
7423 Register op1 = $src1$$Register;
7424 Register op2 = $src2$$Register;
7425 Label &L = *($labl$$label);
7426 int flag = $cmp$$cmpcode;
7428 switch(flag)
7429 {
7430 case 0x01: //equal
7431 if (&L)
7432 __ beq(op1, op2, L);
7433 else
7434 __ beq(op1, op2, (int)0);
7435 break;
7436 case 0x02: //not_equal
7437 if (&L)
7438 __ bne(op1, op2, L);
7439 else
7440 __ bne(op1, op2, (int)0);
7441 break;
7442 case 0x03: //above
7443 __ slt(AT, op2, op1);
7444 if(&L)
7445 __ bne(R0, AT, L);
7446 else
7447 __ bne(R0, AT, (int)0);
7448 break;
7449 case 0x04: //above_equal
7450 __ slt(AT, op1, op2);
7451 if(&L)
7452 __ beq(AT, R0, L);
7453 else
7454 __ beq(AT, R0, (int)0);
7455 break;
7456 case 0x05: //below
7457 __ slt(AT, op1, op2);
7458 if(&L)
7459 __ bne(R0, AT, L);
7460 else
7461 __ bne(R0, AT, (int)0);
7462 break;
7463 case 0x06: //below_equal
7464 __ slt(AT, op2, op1);
7465 if(&L)
7466 __ beq(AT, R0, L);
7467 else
7468 __ beq(AT, R0, (int)0);
7469 break;
7470 default:
7471 Unimplemented();
7472 }
7473 __ nop();
7474 %}
7476 ins_pc_relative(1);
7477 ins_pipe( pipe_alu_branch );
7478 %}
7480 instruct branchConI_reg_imm0(cmpOp cmp, mRegI src1, immI0 src2, label labl) %{
7481 match( If cmp (CmpI src1 src2) );
7482 effect(USE labl);
7483 ins_cost(170);
7484 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm0" %}
7486 ins_encode %{
7487 Register op1 = $src1$$Register;
7488 // int val = $src2$$constant;
7489 Label &L = *($labl$$label);
7490 int flag = $cmp$$cmpcode;
7492 //__ move(AT, val);
7493 switch(flag)
7494 {
7495 case 0x01: //equal
7496 if (&L)
7497 __ beq(op1, R0, L);
7498 else
7499 __ beq(op1, R0, (int)0);
7500 break;
7501 case 0x02: //not_equal
7502 if (&L)
7503 __ bne(op1, R0, L);
7504 else
7505 __ bne(op1, R0, (int)0);
7506 break;
7507 case 0x03: //greater
7508 if(&L)
7509 __ bgtz(op1, L);
7510 else
7511 __ bgtz(op1, (int)0);
7512 break;
7513 case 0x04: //greater_equal
7514 if(&L)
7515 __ bgez(op1, L);
7516 else
7517 __ bgez(op1, (int)0);
7518 break;
7519 case 0x05: //less
7520 if(&L)
7521 __ bltz(op1, L);
7522 else
7523 __ bltz(op1, (int)0);
7524 break;
7525 case 0x06: //less_equal
7526 if(&L)
7527 __ blez(op1, L);
7528 else
7529 __ blez(op1, (int)0);
7530 break;
7531 default:
7532 Unimplemented();
7533 }
7534 __ nop();
7535 %}
7537 ins_pc_relative(1);
7538 ins_pipe( pipe_alu_branch );
7539 %}
7542 instruct branchConI_reg_imm(cmpOp cmp, mRegI src1, immI src2, label labl) %{
7543 match( If cmp (CmpI src1 src2) );
7544 effect(USE labl);
7545 ins_cost(200);
7546 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm" %}
7548 ins_encode %{
7549 Register op1 = $src1$$Register;
7550 int val = $src2$$constant;
7551 Label &L = *($labl$$label);
7552 int flag = $cmp$$cmpcode;
7554 __ move(AT, val);
7555 switch(flag)
7556 {
7557 case 0x01: //equal
7558 if (&L)
7559 __ beq(op1, AT, L);
7560 else
7561 __ beq(op1, AT, (int)0);
7562 break;
7563 case 0x02: //not_equal
7564 if (&L)
7565 __ bne(op1, AT, L);
7566 else
7567 __ bne(op1, AT, (int)0);
7568 break;
7569 case 0x03: //greater
7570 __ slt(AT, AT, op1);
7571 if(&L)
7572 __ bne(R0, AT, L);
7573 else
7574 __ bne(R0, AT, (int)0);
7575 break;
7576 case 0x04: //greater_equal
7577 __ slt(AT, op1, AT);
7578 if(&L)
7579 __ beq(AT, R0, L);
7580 else
7581 __ beq(AT, R0, (int)0);
7582 break;
7583 case 0x05: //less
7584 __ slt(AT, op1, AT);
7585 if(&L)
7586 __ bne(R0, AT, L);
7587 else
7588 __ bne(R0, AT, (int)0);
7589 break;
7590 case 0x06: //less_equal
7591 __ slt(AT, AT, op1);
7592 if(&L)
7593 __ beq(AT, R0, L);
7594 else
7595 __ beq(AT, R0, (int)0);
7596 break;
7597 default:
7598 Unimplemented();
7599 }
7600 __ nop();
7601 %}
7603 ins_pc_relative(1);
7604 ins_pipe( pipe_alu_branch );
7605 %}
7607 instruct branchConIU_reg_imm0(cmpOpU cmp, mRegI src1, immI0 zero, label labl) %{
7608 match( If cmp (CmpU src1 zero) );
7609 effect(USE labl);
7610 format %{ "BR$cmp $src1, zero, $labl #@branchConIU_reg_imm0" %}
7612 ins_encode %{
7613 Register op1 = $src1$$Register;
7614 Label &L = *($labl$$label);
7615 int flag = $cmp$$cmpcode;
7617 switch(flag)
7618 {
7619 case 0x01: //equal
7620 if (&L)
7621 __ beq(op1, R0, L);
7622 else
7623 __ beq(op1, R0, (int)0);
7624 break;
7625 case 0x02: //not_equal
7626 if (&L)
7627 __ bne(op1, R0, L);
7628 else
7629 __ bne(op1, R0, (int)0);
7630 break;
7631 case 0x03: //above
7632 if(&L)
7633 __ bne(R0, op1, L);
7634 else
7635 __ bne(R0, op1, (int)0);
7636 break;
7637 case 0x04: //above_equal
7638 if(&L)
7639 __ beq(R0, R0, L);
7640 else
7641 __ beq(R0, R0, (int)0);
7642 break;
7643 case 0x05: //below
7644 return;
7645 break;
7646 case 0x06: //below_equal
7647 if(&L)
7648 __ beq(op1, R0, L);
7649 else
7650 __ beq(op1, R0, (int)0);
7651 break;
7652 default:
7653 Unimplemented();
7654 }
7655 __ nop();
7656 %}
7658 ins_pc_relative(1);
7659 ins_pipe( pipe_alu_branch );
7660 %}
7663 instruct branchConIU_reg_immI16(cmpOpU cmp, mRegI src1, immI16 src2, label labl) %{
7664 match( If cmp (CmpU src1 src2) );
7665 effect(USE labl);
7666 ins_cost(180);
7667 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_immI16" %}
7669 ins_encode %{
7670 Register op1 = $src1$$Register;
7671 int val = $src2$$constant;
7672 Label &L = *($labl$$label);
7673 int flag = $cmp$$cmpcode;
7675 switch(flag)
7676 {
7677 case 0x01: //equal
7678 __ move(AT, val);
7679 if (&L)
7680 __ beq(op1, AT, L);
7681 else
7682 __ beq(op1, AT, (int)0);
7683 break;
7684 case 0x02: //not_equal
7685 __ move(AT, val);
7686 if (&L)
7687 __ bne(op1, AT, L);
7688 else
7689 __ bne(op1, AT, (int)0);
7690 break;
7691 case 0x03: //above
7692 __ move(AT, val);
7693 __ sltu(AT, AT, op1);
7694 if(&L)
7695 __ bne(R0, AT, L);
7696 else
7697 __ bne(R0, AT, (int)0);
7698 break;
7699 case 0x04: //above_equal
7700 __ sltiu(AT, op1, val);
7701 if(&L)
7702 __ beq(AT, R0, L);
7703 else
7704 __ beq(AT, R0, (int)0);
7705 break;
7706 case 0x05: //below
7707 __ sltiu(AT, op1, val);
7708 if(&L)
7709 __ bne(R0, AT, L);
7710 else
7711 __ bne(R0, AT, (int)0);
7712 break;
7713 case 0x06: //below_equal
7714 __ move(AT, val);
7715 __ sltu(AT, AT, op1);
7716 if(&L)
7717 __ beq(AT, R0, L);
7718 else
7719 __ beq(AT, R0, (int)0);
7720 break;
7721 default:
7722 Unimplemented();
7723 }
7724 __ nop();
7725 %}
7727 ins_pc_relative(1);
7728 ins_pipe( pipe_alu_branch );
7729 %}
7732 instruct branchConL_regL_regL(cmpOp cmp, mRegL src1, mRegL src2, label labl) %{
7733 match( If cmp (CmpL src1 src2) );
7734 effect(USE labl);
7735 format %{ "BR$cmp $src1, $src2, $labl #@branchConL_regL_regL" %}
7736 ins_cost(250);
7738 ins_encode %{
7739 Register opr1_reg = as_Register($src1$$reg);
7740 Register opr2_reg = as_Register($src2$$reg);
7742 Label &target = *($labl$$label);
7743 int flag = $cmp$$cmpcode;
7745 switch(flag)
7746 {
7747 case 0x01: //equal
7748 if (&target)
7749 __ beq(opr1_reg, opr2_reg, target);
7750 else
7751 __ beq(opr1_reg, opr2_reg, (int)0);
7752 __ delayed()->nop();
7753 break;
7755 case 0x02: //not_equal
7756 if(&target)
7757 __ bne(opr1_reg, opr2_reg, target);
7758 else
7759 __ bne(opr1_reg, opr2_reg, (int)0);
7760 __ delayed()->nop();
7761 break;
7763 case 0x03: //greater
7764 __ slt(AT, opr2_reg, opr1_reg);
7765 if(&target)
7766 __ bne(AT, R0, target);
7767 else
7768 __ bne(AT, R0, (int)0);
7769 __ delayed()->nop();
7770 break;
7772 case 0x04: //greater_equal
7773 __ slt(AT, opr1_reg, opr2_reg);
7774 if(&target)
7775 __ beq(AT, R0, target);
7776 else
7777 __ beq(AT, R0, (int)0);
7778 __ delayed()->nop();
7780 break;
7782 case 0x05: //less
7783 __ slt(AT, opr1_reg, opr2_reg);
7784 if(&target)
7785 __ bne(AT, R0, target);
7786 else
7787 __ bne(AT, R0, (int)0);
7788 __ delayed()->nop();
7790 break;
7792 case 0x06: //less_equal
7793 __ slt(AT, opr2_reg, opr1_reg);
7795 if(&target)
7796 __ beq(AT, R0, target);
7797 else
7798 __ beq(AT, R0, (int)0);
7799 __ delayed()->nop();
7801 break;
7803 default:
7804 Unimplemented();
7805 }
7806 %}
7809 ins_pc_relative(1);
7810 ins_pipe( pipe_alu_branch );
7811 %}
7813 instruct branchConL_reg_immL16_sub(cmpOp cmp, mRegL src1, immL16_sub src2, label labl) %{
7814 match( If cmp (CmpL src1 src2) );
7815 effect(USE labl);
7816 ins_cost(180);
7817 format %{ "BR$cmp $src1, $src2, $labl #@branchConL_reg_immL16_sub" %}
7819 ins_encode %{
7820 Register op1 = $src1$$Register;
7821 int val = $src2$$constant;
7822 Label &L = *($labl$$label);
7823 int flag = $cmp$$cmpcode;
7825 __ daddiu(AT, op1, -1 * val);
7826 switch(flag)
7827 {
7828 case 0x01: //equal
7829 if (&L)
7830 __ beq(R0, AT, L);
7831 else
7832 __ beq(R0, AT, (int)0);
7833 break;
7834 case 0x02: //not_equal
7835 if (&L)
7836 __ bne(R0, AT, L);
7837 else
7838 __ bne(R0, AT, (int)0);
7839 break;
7840 case 0x03: //greater
7841 if(&L)
7842 __ bgtz(AT, L);
7843 else
7844 __ bgtz(AT, (int)0);
7845 break;
7846 case 0x04: //greater_equal
7847 if(&L)
7848 __ bgez(AT, L);
7849 else
7850 __ bgez(AT, (int)0);
7851 break;
7852 case 0x05: //less
7853 if(&L)
7854 __ bltz(AT, L);
7855 else
7856 __ bltz(AT, (int)0);
7857 break;
7858 case 0x06: //less_equal
7859 if(&L)
7860 __ blez(AT, L);
7861 else
7862 __ blez(AT, (int)0);
7863 break;
7864 default:
7865 Unimplemented();
7866 }
7867 __ nop();
7868 %}
7870 ins_pc_relative(1);
7871 ins_pipe( pipe_alu_branch );
7872 %}
7875 instruct branchConI_reg_imm16_sub(cmpOp cmp, mRegI src1, immI16_sub src2, label labl) %{
7876 match( If cmp (CmpI src1 src2) );
7877 effect(USE labl);
7878 ins_cost(180);
7879 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm16_sub" %}
7881 ins_encode %{
7882 Register op1 = $src1$$Register;
7883 int val = $src2$$constant;
7884 Label &L = *($labl$$label);
7885 int flag = $cmp$$cmpcode;
7887 __ addiu32(AT, op1, -1 * val);
7888 switch(flag)
7889 {
7890 case 0x01: //equal
7891 if (&L)
7892 __ beq(R0, AT, L);
7893 else
7894 __ beq(R0, AT, (int)0);
7895 break;
7896 case 0x02: //not_equal
7897 if (&L)
7898 __ bne(R0, AT, L);
7899 else
7900 __ bne(R0, AT, (int)0);
7901 break;
7902 case 0x03: //greater
7903 if(&L)
7904 __ bgtz(AT, L);
7905 else
7906 __ bgtz(AT, (int)0);
7907 break;
7908 case 0x04: //greater_equal
7909 if(&L)
7910 __ bgez(AT, L);
7911 else
7912 __ bgez(AT, (int)0);
7913 break;
7914 case 0x05: //less
7915 if(&L)
7916 __ bltz(AT, L);
7917 else
7918 __ bltz(AT, (int)0);
7919 break;
7920 case 0x06: //less_equal
7921 if(&L)
7922 __ blez(AT, L);
7923 else
7924 __ blez(AT, (int)0);
7925 break;
7926 default:
7927 Unimplemented();
7928 }
7929 __ nop();
7930 %}
7932 ins_pc_relative(1);
7933 ins_pipe( pipe_alu_branch );
7934 %}
7936 instruct branchConL_regL_immL0(cmpOp cmp, mRegL src1, immL0 zero, label labl) %{
7937 match( If cmp (CmpL src1 zero) );
7938 effect(USE labl);
7939 format %{ "BR$cmp $src1, zero, $labl #@branchConL_regL_immL0" %}
7940 ins_cost(150);
7942 ins_encode %{
7943 Register opr1_reg = as_Register($src1$$reg);
7944 Label &target = *($labl$$label);
7945 int flag = $cmp$$cmpcode;
7947 switch(flag)
7948 {
7949 case 0x01: //equal
7950 if (&target)
7951 __ beq(opr1_reg, R0, target);
7952 else
7953 __ beq(opr1_reg, R0, int(0));
7954 break;
7956 case 0x02: //not_equal
7957 if(&target)
7958 __ bne(opr1_reg, R0, target);
7959 else
7960 __ bne(opr1_reg, R0, (int)0);
7961 break;
7963 case 0x03: //greater
7964 if(&target)
7965 __ bgtz(opr1_reg, target);
7966 else
7967 __ bgtz(opr1_reg, (int)0);
7968 break;
7970 case 0x04: //greater_equal
7971 if(&target)
7972 __ bgez(opr1_reg, target);
7973 else
7974 __ bgez(opr1_reg, (int)0);
7975 break;
7977 case 0x05: //less
7978 __ slt(AT, opr1_reg, R0);
7979 if(&target)
7980 __ bne(AT, R0, target);
7981 else
7982 __ bne(AT, R0, (int)0);
7983 break;
7985 case 0x06: //less_equal
7986 if (&target)
7987 __ blez(opr1_reg, target);
7988 else
7989 __ blez(opr1_reg, int(0));
7990 break;
7992 default:
7993 Unimplemented();
7994 }
7995 __ delayed()->nop();
7996 %}
7999 ins_pc_relative(1);
8000 ins_pipe( pipe_alu_branch );
8001 %}
8004 //FIXME
8005 instruct branchConF_reg_reg(cmpOp cmp, regF src1, regF src2, label labl) %{
8006 match( If cmp (CmpF src1 src2) );
8007 effect(USE labl);
8008 format %{ "BR$cmp $src1, $src2, $labl #@branchConF_reg_reg" %}
8010 ins_encode %{
8011 FloatRegister reg_op1 = $src1$$FloatRegister;
8012 FloatRegister reg_op2 = $src2$$FloatRegister;
8013 Label &L = *($labl$$label);
8014 int flag = $cmp$$cmpcode;
8016 switch(flag)
8017 {
8018 case 0x01: //equal
8019 __ c_eq_s(reg_op1, reg_op2);
8020 if (&L)
8021 __ bc1t(L);
8022 else
8023 __ bc1t((int)0);
8024 break;
8025 case 0x02: //not_equal
8026 __ c_eq_s(reg_op1, reg_op2);
8027 if (&L)
8028 __ bc1f(L);
8029 else
8030 __ bc1f((int)0);
8031 break;
8032 case 0x03: //greater
8033 __ c_ule_s(reg_op1, reg_op2);
8034 if(&L)
8035 __ bc1f(L);
8036 else
8037 __ bc1f((int)0);
8038 break;
8039 case 0x04: //greater_equal
8040 __ c_ult_s(reg_op1, reg_op2);
8041 if(&L)
8042 __ bc1f(L);
8043 else
8044 __ bc1f((int)0);
8045 break;
8046 case 0x05: //less
8047 __ c_ult_s(reg_op1, reg_op2);
8048 if(&L)
8049 __ bc1t(L);
8050 else
8051 __ bc1t((int)0);
8052 break;
8053 case 0x06: //less_equal
8054 __ c_ule_s(reg_op1, reg_op2);
8055 if(&L)
8056 __ bc1t(L);
8057 else
8058 __ bc1t((int)0);
8059 break;
8060 default:
8061 Unimplemented();
8062 }
8063 __ nop();
8064 %}
8066 ins_pc_relative(1);
8067 ins_pipe(pipe_slow);
8068 %}
8070 instruct branchConD_reg_reg(cmpOp cmp, regD src1, regD src2, label labl) %{
8071 match( If cmp (CmpD src1 src2) );
8072 effect(USE labl);
8073 format %{ "BR$cmp $src1, $src2, $labl #@branchConD_reg_reg" %}
8075 ins_encode %{
8076 FloatRegister reg_op1 = $src1$$FloatRegister;
8077 FloatRegister reg_op2 = $src2$$FloatRegister;
8078 Label &L = *($labl$$label);
8079 int flag = $cmp$$cmpcode;
8081 switch(flag)
8082 {
8083 case 0x01: //equal
8084 __ c_eq_d(reg_op1, reg_op2);
8085 if (&L)
8086 __ bc1t(L);
8087 else
8088 __ bc1t((int)0);
8089 break;
8090 case 0x02: //not_equal
8091 //2016/4/19 aoqi: c_ueq_d cannot distinguish NaN from equal. Double.isNaN(Double) is implemented by 'f != f', so the use of c_ueq_d causes bugs.
8092 __ c_eq_d(reg_op1, reg_op2);
8093 if (&L)
8094 __ bc1f(L);
8095 else
8096 __ bc1f((int)0);
8097 break;
8098 case 0x03: //greater
8099 __ c_ule_d(reg_op1, reg_op2);
8100 if(&L)
8101 __ bc1f(L);
8102 else
8103 __ bc1f((int)0);
8104 break;
8105 case 0x04: //greater_equal
8106 __ c_ult_d(reg_op1, reg_op2);
8107 if(&L)
8108 __ bc1f(L);
8109 else
8110 __ bc1f((int)0);
8111 break;
8112 case 0x05: //less
8113 __ c_ult_d(reg_op1, reg_op2);
8114 if(&L)
8115 __ bc1t(L);
8116 else
8117 __ bc1t((int)0);
8118 break;
8119 case 0x06: //less_equal
8120 __ c_ule_d(reg_op1, reg_op2);
8121 if(&L)
8122 __ bc1t(L);
8123 else
8124 __ bc1t((int)0);
8125 break;
8126 default:
8127 Unimplemented();
8128 }
8129 __ nop();
8130 %}
8132 ins_pc_relative(1);
8133 ins_pipe(pipe_slow);
8134 %}
8137 // Call Runtime Instruction
8138 instruct CallRuntimeDirect(method meth) %{
8139 match(CallRuntime );
8140 effect(USE meth);
8142 ins_cost(300);
8143 format %{ "CALL,runtime #@CallRuntimeDirect" %}
8144 ins_encode( Java_To_Runtime( meth ) );
8145 ins_pipe( pipe_slow );
8146 ins_alignment(16);
8147 %}
8151 //------------------------MemBar Instructions-------------------------------
8152 //Memory barrier flavors
8154 instruct membar_acquire() %{
8155 match(MemBarAcquire);
8156 ins_cost(0);
8158 size(0);
8159 format %{ "MEMBAR-acquire (empty) @ membar_acquire" %}
8160 ins_encode();
8161 ins_pipe(empty);
8162 %}
8164 instruct load_fence() %{
8165 match(LoadFence);
8166 ins_cost(400);
8168 format %{ "MEMBAR @ load_fence" %}
8169 ins_encode %{
8170 __ sync();
8171 %}
8172 ins_pipe(pipe_slow);
8173 %}
8175 instruct membar_acquire_lock()
8176 %{
8177 match(MemBarAcquireLock);
8178 ins_cost(0);
8180 size(0);
8181 format %{ "MEMBAR-acquire (acquire as part of CAS in prior FastLock so empty encoding) @ membar_acquire_lock" %}
8182 ins_encode();
8183 ins_pipe(empty);
8184 %}
8186 instruct membar_release() %{
8187 match(MemBarRelease);
8188 ins_cost(400);
8190 format %{ "MEMBAR-release @ membar_release" %}
8192 ins_encode %{
8193 // Attention: DO NOT DELETE THIS GUY!
8194 __ sync();
8195 %}
8197 ins_pipe(pipe_slow);
8198 %}
8200 instruct store_fence() %{
8201 match(StoreFence);
8202 ins_cost(400);
8204 format %{ "MEMBAR @ store_fence" %}
8206 ins_encode %{
8207 __ sync();
8208 %}
8210 ins_pipe(pipe_slow);
8211 %}
8213 instruct membar_release_lock()
8214 %{
8215 match(MemBarReleaseLock);
8216 ins_cost(0);
8218 size(0);
8219 format %{ "MEMBAR-release-lock (release in FastUnlock so empty) @ membar_release_lock" %}
8220 ins_encode();
8221 ins_pipe(empty);
8222 %}
8225 instruct membar_volatile() %{
8226 match(MemBarVolatile);
8227 ins_cost(400);
8229 format %{ "MEMBAR-volatile" %}
8230 ins_encode %{
8231 if( !os::is_MP() ) return; // Not needed on single CPU
8232 __ sync();
8234 %}
8235 ins_pipe(pipe_slow);
8236 %}
8238 instruct unnecessary_membar_volatile() %{
8239 match(MemBarVolatile);
8240 predicate(Matcher::post_store_load_barrier(n));
8241 ins_cost(0);
8243 size(0);
8244 format %{ "MEMBAR-volatile (unnecessary so empty encoding) @ unnecessary_membar_volatile" %}
8245 ins_encode( );
8246 ins_pipe(empty);
8247 %}
8249 instruct membar_storestore() %{
8250 match(MemBarStoreStore);
8252 ins_cost(0);
8253 size(0);
8254 format %{ "MEMBAR-storestore (empty encoding) @ membar_storestore" %}
8255 ins_encode( );
8256 ins_pipe(empty);
8257 %}
8259 //----------Move Instructions--------------------------------------------------
8260 instruct castX2P(mRegP dst, mRegL src) %{
8261 match(Set dst (CastX2P src));
8262 format %{ "castX2P $dst, $src @ castX2P" %}
8263 ins_encode %{
8264 Register src = $src$$Register;
8265 Register dst = $dst$$Register;
8267 if(src != dst)
8268 __ move(dst, src);
8269 %}
8270 ins_cost(10);
8271 ins_pipe( ialu_regI_mov );
8272 %}
8274 instruct castP2X(mRegL dst, mRegP src ) %{
8275 match(Set dst (CastP2X src));
8277 format %{ "mov $dst, $src\t #@castP2X" %}
8278 ins_encode %{
8279 Register src = $src$$Register;
8280 Register dst = $dst$$Register;
8282 if(src != dst)
8283 __ move(dst, src);
8284 %}
8285 ins_pipe( ialu_regI_mov );
8286 %}
8288 instruct MoveF2I_reg_reg(mRegI dst, regF src) %{
8289 match(Set dst (MoveF2I src));
8290 effect(DEF dst, USE src);
8291 ins_cost(85);
8292 format %{ "MoveF2I $dst, $src @ MoveF2I_reg_reg" %}
8293 ins_encode %{
8294 Register dst = as_Register($dst$$reg);
8295 FloatRegister src = as_FloatRegister($src$$reg);
8297 __ mfc1(dst, src);
8298 %}
8299 ins_pipe( pipe_slow );
8300 %}
8302 instruct MoveI2F_reg_reg(regF dst, mRegI src) %{
8303 match(Set dst (MoveI2F src));
8304 effect(DEF dst, USE src);
8305 ins_cost(85);
8306 format %{ "MoveI2F $dst, $src @ MoveI2F_reg_reg" %}
8307 ins_encode %{
8308 Register src = as_Register($src$$reg);
8309 FloatRegister dst = as_FloatRegister($dst$$reg);
8311 __ mtc1(src, dst);
8312 %}
8313 ins_pipe( pipe_slow );
8314 %}
8316 instruct MoveD2L_reg_reg(mRegL dst, regD src) %{
8317 match(Set dst (MoveD2L src));
8318 effect(DEF dst, USE src);
8319 ins_cost(85);
8320 format %{ "MoveD2L $dst, $src @ MoveD2L_reg_reg" %}
8321 ins_encode %{
8322 Register dst = as_Register($dst$$reg);
8323 FloatRegister src = as_FloatRegister($src$$reg);
8325 __ dmfc1(dst, src);
8326 %}
8327 ins_pipe( pipe_slow );
8328 %}
8330 instruct MoveL2D_reg_reg(regD dst, mRegL src) %{
8331 match(Set dst (MoveL2D src));
8332 effect(DEF dst, USE src);
8333 ins_cost(85);
8334 format %{ "MoveL2D $dst, $src @ MoveL2D_reg_reg" %}
8335 ins_encode %{
8336 FloatRegister dst = as_FloatRegister($dst$$reg);
8337 Register src = as_Register($src$$reg);
8339 __ dmtc1(src, dst);
8340 %}
8341 ins_pipe( pipe_slow );
8342 %}
8344 //----------Conditional Move---------------------------------------------------
8345 // Conditional move
8346 instruct cmovI_cmpI_reg_reg(mRegI dst, mRegI src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8347 match(Set dst (CMoveI (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8348 ins_cost(80);
8349 format %{
8350 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpI_reg_reg\n"
8351 "\tCMOV $dst,$src \t @cmovI_cmpI_reg_reg"
8352 %}
8354 ins_encode %{
8355 Register op1 = $tmp1$$Register;
8356 Register op2 = $tmp2$$Register;
8357 Register dst = $dst$$Register;
8358 Register src = $src$$Register;
8359 int flag = $cop$$cmpcode;
8361 switch(flag)
8362 {
8363 case 0x01: //equal
8364 __ subu32(AT, op1, op2);
8365 __ movz(dst, src, AT);
8366 break;
8368 case 0x02: //not_equal
8369 __ subu32(AT, op1, op2);
8370 __ movn(dst, src, AT);
8371 break;
8373 case 0x03: //great
8374 __ slt(AT, op2, op1);
8375 __ movn(dst, src, AT);
8376 break;
8378 case 0x04: //great_equal
8379 __ slt(AT, op1, op2);
8380 __ movz(dst, src, AT);
8381 break;
8383 case 0x05: //less
8384 __ slt(AT, op1, op2);
8385 __ movn(dst, src, AT);
8386 break;
8388 case 0x06: //less_equal
8389 __ slt(AT, op2, op1);
8390 __ movz(dst, src, AT);
8391 break;
8393 default:
8394 Unimplemented();
8395 }
8396 %}
8398 ins_pipe( pipe_slow );
8399 %}
8401 instruct cmovI_cmpP_reg_reg(mRegI dst, mRegI src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
8402 match(Set dst (CMoveI (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
8403 ins_cost(80);
8404 format %{
8405 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpP_reg_reg\n\t"
8406 "CMOV $dst,$src\t @cmovI_cmpP_reg_reg"
8407 %}
8408 ins_encode %{
8409 Register op1 = $tmp1$$Register;
8410 Register op2 = $tmp2$$Register;
8411 Register dst = $dst$$Register;
8412 Register src = $src$$Register;
8413 int flag = $cop$$cmpcode;
8415 switch(flag)
8416 {
8417 case 0x01: //equal
8418 __ subu(AT, op1, op2);
8419 __ movz(dst, src, AT);
8420 break;
8422 case 0x02: //not_equal
8423 __ subu(AT, op1, op2);
8424 __ movn(dst, src, AT);
8425 break;
8427 case 0x03: //above
8428 __ sltu(AT, op2, op1);
8429 __ movn(dst, src, AT);
8430 break;
8432 case 0x04: //above_equal
8433 __ sltu(AT, op1, op2);
8434 __ movz(dst, src, AT);
8435 break;
8437 case 0x05: //below
8438 __ sltu(AT, op1, op2);
8439 __ movn(dst, src, AT);
8440 break;
8442 case 0x06: //below_equal
8443 __ sltu(AT, op2, op1);
8444 __ movz(dst, src, AT);
8445 break;
8447 default:
8448 Unimplemented();
8449 }
8450 %}
8452 ins_pipe( pipe_slow );
8453 %}
8455 instruct cmovI_cmpN_reg_reg(mRegI dst, mRegI src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8456 match(Set dst (CMoveI (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8457 ins_cost(80);
8458 format %{
8459 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpN_reg_reg\n\t"
8460 "CMOV $dst,$src\t @cmovI_cmpN_reg_reg"
8461 %}
8462 ins_encode %{
8463 Register op1 = $tmp1$$Register;
8464 Register op2 = $tmp2$$Register;
8465 Register dst = $dst$$Register;
8466 Register src = $src$$Register;
8467 int flag = $cop$$cmpcode;
8469 switch(flag)
8470 {
8471 case 0x01: //equal
8472 __ subu32(AT, op1, op2);
8473 __ movz(dst, src, AT);
8474 break;
8476 case 0x02: //not_equal
8477 __ subu32(AT, op1, op2);
8478 __ movn(dst, src, AT);
8479 break;
8481 case 0x03: //above
8482 __ sltu(AT, op2, op1);
8483 __ movn(dst, src, AT);
8484 break;
8486 case 0x04: //above_equal
8487 __ sltu(AT, op1, op2);
8488 __ movz(dst, src, AT);
8489 break;
8491 case 0x05: //below
8492 __ sltu(AT, op1, op2);
8493 __ movn(dst, src, AT);
8494 break;
8496 case 0x06: //below_equal
8497 __ sltu(AT, op2, op1);
8498 __ movz(dst, src, AT);
8499 break;
8501 default:
8502 Unimplemented();
8503 }
8504 %}
8506 ins_pipe( pipe_slow );
8507 %}
8509 instruct cmovP_cmpN_reg_reg(mRegP dst, mRegP src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8510 match(Set dst (CMoveP (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8511 ins_cost(80);
8512 format %{
8513 "CMPU$cop $tmp1,$tmp2\t @cmovP_cmpN_reg_reg\n\t"
8514 "CMOV $dst,$src\t @cmovP_cmpN_reg_reg"
8515 %}
8516 ins_encode %{
8517 Register op1 = $tmp1$$Register;
8518 Register op2 = $tmp2$$Register;
8519 Register dst = $dst$$Register;
8520 Register src = $src$$Register;
8521 int flag = $cop$$cmpcode;
8523 switch(flag)
8524 {
8525 case 0x01: //equal
8526 __ subu32(AT, op1, op2);
8527 __ movz(dst, src, AT);
8528 break;
8530 case 0x02: //not_equal
8531 __ subu32(AT, op1, op2);
8532 __ movn(dst, src, AT);
8533 break;
8535 case 0x03: //above
8536 __ sltu(AT, op2, op1);
8537 __ movn(dst, src, AT);
8538 break;
8540 case 0x04: //above_equal
8541 __ sltu(AT, op1, op2);
8542 __ movz(dst, src, AT);
8543 break;
8545 case 0x05: //below
8546 __ sltu(AT, op1, op2);
8547 __ movn(dst, src, AT);
8548 break;
8550 case 0x06: //below_equal
8551 __ sltu(AT, op2, op1);
8552 __ movz(dst, src, AT);
8553 break;
8555 default:
8556 Unimplemented();
8557 }
8558 %}
8560 ins_pipe( pipe_slow );
8561 %}
8563 instruct cmovN_cmpP_reg_reg(mRegN dst, mRegN src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
8564 match(Set dst (CMoveN (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
8565 ins_cost(80);
8566 format %{
8567 "CMPU$cop $tmp1,$tmp2\t @cmovN_cmpP_reg_reg\n\t"
8568 "CMOV $dst,$src\t @cmovN_cmpP_reg_reg"
8569 %}
8570 ins_encode %{
8571 Register op1 = $tmp1$$Register;
8572 Register op2 = $tmp2$$Register;
8573 Register dst = $dst$$Register;
8574 Register src = $src$$Register;
8575 int flag = $cop$$cmpcode;
8577 switch(flag)
8578 {
8579 case 0x01: //equal
8580 __ subu(AT, op1, op2);
8581 __ movz(dst, src, AT);
8582 break;
8584 case 0x02: //not_equal
8585 __ subu(AT, op1, op2);
8586 __ movn(dst, src, AT);
8587 break;
8589 case 0x03: //above
8590 __ sltu(AT, op2, op1);
8591 __ movn(dst, src, AT);
8592 break;
8594 case 0x04: //above_equal
8595 __ sltu(AT, op1, op2);
8596 __ movz(dst, src, AT);
8597 break;
8599 case 0x05: //below
8600 __ sltu(AT, op1, op2);
8601 __ movn(dst, src, AT);
8602 break;
8604 case 0x06: //below_equal
8605 __ sltu(AT, op2, op1);
8606 __ movz(dst, src, AT);
8607 break;
8609 default:
8610 Unimplemented();
8611 }
8612 %}
8614 ins_pipe( pipe_slow );
8615 %}
8617 instruct cmovP_cmpD_reg_reg(mRegP dst, mRegP src, regD tmp1, regD tmp2, cmpOp cop ) %{
8618 match(Set dst (CMoveP (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8619 ins_cost(80);
8620 format %{
8621 "CMP$cop $tmp1, $tmp2\t @cmovP_cmpD_reg_reg\n"
8622 "\tCMOV $dst,$src \t @cmovP_cmpD_reg_reg"
8623 %}
8624 ins_encode %{
8625 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8626 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8627 Register dst = as_Register($dst$$reg);
8628 Register src = as_Register($src$$reg);
8630 int flag = $cop$$cmpcode;
8632 switch(flag)
8633 {
8634 case 0x01: //equal
8635 __ c_eq_d(reg_op1, reg_op2);
8636 __ movt(dst, src);
8637 break;
8638 case 0x02: //not_equal
8639 __ c_eq_d(reg_op1, reg_op2);
8640 __ movf(dst, src);
8641 break;
8642 case 0x03: //greater
8643 __ c_ole_d(reg_op1, reg_op2);
8644 __ movf(dst, src);
8645 break;
8646 case 0x04: //greater_equal
8647 __ c_olt_d(reg_op1, reg_op2);
8648 __ movf(dst, src);
8649 break;
8650 case 0x05: //less
8651 __ c_ult_d(reg_op1, reg_op2);
8652 __ movt(dst, src);
8653 break;
8654 case 0x06: //less_equal
8655 __ c_ule_d(reg_op1, reg_op2);
8656 __ movt(dst, src);
8657 break;
8658 default:
8659 Unimplemented();
8660 }
8661 %}
8663 ins_pipe( pipe_slow );
8664 %}
8667 instruct cmovN_cmpN_reg_reg(mRegN dst, mRegN src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8668 match(Set dst (CMoveN (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8669 ins_cost(80);
8670 format %{
8671 "CMPU$cop $tmp1,$tmp2\t @cmovN_cmpN_reg_reg\n\t"
8672 "CMOV $dst,$src\t @cmovN_cmpN_reg_reg"
8673 %}
8674 ins_encode %{
8675 Register op1 = $tmp1$$Register;
8676 Register op2 = $tmp2$$Register;
8677 Register dst = $dst$$Register;
8678 Register src = $src$$Register;
8679 int flag = $cop$$cmpcode;
8681 switch(flag)
8682 {
8683 case 0x01: //equal
8684 __ subu32(AT, op1, op2);
8685 __ movz(dst, src, AT);
8686 break;
8688 case 0x02: //not_equal
8689 __ subu32(AT, op1, op2);
8690 __ movn(dst, src, AT);
8691 break;
8693 case 0x03: //above
8694 __ sltu(AT, op2, op1);
8695 __ movn(dst, src, AT);
8696 break;
8698 case 0x04: //above_equal
8699 __ sltu(AT, op1, op2);
8700 __ movz(dst, src, AT);
8701 break;
8703 case 0x05: //below
8704 __ sltu(AT, op1, op2);
8705 __ movn(dst, src, AT);
8706 break;
8708 case 0x06: //below_equal
8709 __ sltu(AT, op2, op1);
8710 __ movz(dst, src, AT);
8711 break;
8713 default:
8714 Unimplemented();
8715 }
8716 %}
8718 ins_pipe( pipe_slow );
8719 %}
8722 instruct cmovI_cmpU_reg_reg(mRegI dst, mRegI src, mRegI tmp1, mRegI tmp2, cmpOpU cop ) %{
8723 match(Set dst (CMoveI (Binary cop (CmpU tmp1 tmp2)) (Binary dst src)));
8724 ins_cost(80);
8725 format %{
8726 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpU_reg_reg\n\t"
8727 "CMOV $dst,$src\t @cmovI_cmpU_reg_reg"
8728 %}
8729 ins_encode %{
8730 Register op1 = $tmp1$$Register;
8731 Register op2 = $tmp2$$Register;
8732 Register dst = $dst$$Register;
8733 Register src = $src$$Register;
8734 int flag = $cop$$cmpcode;
8736 switch(flag)
8737 {
8738 case 0x01: //equal
8739 __ subu(AT, op1, op2);
8740 __ movz(dst, src, AT);
8741 break;
8743 case 0x02: //not_equal
8744 __ subu(AT, op1, op2);
8745 __ movn(dst, src, AT);
8746 break;
8748 case 0x03: //above
8749 __ sltu(AT, op2, op1);
8750 __ movn(dst, src, AT);
8751 break;
8753 case 0x04: //above_equal
8754 __ sltu(AT, op1, op2);
8755 __ movz(dst, src, AT);
8756 break;
8758 case 0x05: //below
8759 __ sltu(AT, op1, op2);
8760 __ movn(dst, src, AT);
8761 break;
8763 case 0x06: //below_equal
8764 __ sltu(AT, op2, op1);
8765 __ movz(dst, src, AT);
8766 break;
8768 default:
8769 Unimplemented();
8770 }
8771 %}
8773 ins_pipe( pipe_slow );
8774 %}
8776 instruct cmovI_cmpL_reg_reg(mRegI dst, mRegI src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8777 match(Set dst (CMoveI (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8778 ins_cost(80);
8779 format %{
8780 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpL_reg_reg\n"
8781 "\tCMOV $dst,$src \t @cmovI_cmpL_reg_reg"
8782 %}
8783 ins_encode %{
8784 Register opr1 = as_Register($tmp1$$reg);
8785 Register opr2 = as_Register($tmp2$$reg);
8786 Register dst = $dst$$Register;
8787 Register src = $src$$Register;
8788 int flag = $cop$$cmpcode;
8790 switch(flag)
8791 {
8792 case 0x01: //equal
8793 __ subu(AT, opr1, opr2);
8794 __ movz(dst, src, AT);
8795 break;
8797 case 0x02: //not_equal
8798 __ subu(AT, opr1, opr2);
8799 __ movn(dst, src, AT);
8800 break;
8802 case 0x03: //greater
8803 __ slt(AT, opr2, opr1);
8804 __ movn(dst, src, AT);
8805 break;
8807 case 0x04: //greater_equal
8808 __ slt(AT, opr1, opr2);
8809 __ movz(dst, src, AT);
8810 break;
8812 case 0x05: //less
8813 __ slt(AT, opr1, opr2);
8814 __ movn(dst, src, AT);
8815 break;
8817 case 0x06: //less_equal
8818 __ slt(AT, opr2, opr1);
8819 __ movz(dst, src, AT);
8820 break;
8822 default:
8823 Unimplemented();
8824 }
8825 %}
8827 ins_pipe( pipe_slow );
8828 %}
8830 instruct cmovP_cmpL_reg_reg(mRegP dst, mRegP src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8831 match(Set dst (CMoveP (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8832 ins_cost(80);
8833 format %{
8834 "CMP$cop $tmp1, $tmp2\t @cmovP_cmpL_reg_reg\n"
8835 "\tCMOV $dst,$src \t @cmovP_cmpL_reg_reg"
8836 %}
8837 ins_encode %{
8838 Register opr1 = as_Register($tmp1$$reg);
8839 Register opr2 = as_Register($tmp2$$reg);
8840 Register dst = $dst$$Register;
8841 Register src = $src$$Register;
8842 int flag = $cop$$cmpcode;
8844 switch(flag)
8845 {
8846 case 0x01: //equal
8847 __ subu(AT, opr1, opr2);
8848 __ movz(dst, src, AT);
8849 break;
8851 case 0x02: //not_equal
8852 __ subu(AT, opr1, opr2);
8853 __ movn(dst, src, AT);
8854 break;
8856 case 0x03: //greater
8857 __ slt(AT, opr2, opr1);
8858 __ movn(dst, src, AT);
8859 break;
8861 case 0x04: //greater_equal
8862 __ slt(AT, opr1, opr2);
8863 __ movz(dst, src, AT);
8864 break;
8866 case 0x05: //less
8867 __ slt(AT, opr1, opr2);
8868 __ movn(dst, src, AT);
8869 break;
8871 case 0x06: //less_equal
8872 __ slt(AT, opr2, opr1);
8873 __ movz(dst, src, AT);
8874 break;
8876 default:
8877 Unimplemented();
8878 }
8879 %}
8881 ins_pipe( pipe_slow );
8882 %}
8884 instruct cmovI_cmpD_reg_reg(mRegI dst, mRegI src, regD tmp1, regD tmp2, cmpOp cop ) %{
8885 match(Set dst (CMoveI (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8886 ins_cost(80);
8887 format %{
8888 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpD_reg_reg\n"
8889 "\tCMOV $dst,$src \t @cmovI_cmpD_reg_reg"
8890 %}
8891 ins_encode %{
8892 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8893 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8894 Register dst = as_Register($dst$$reg);
8895 Register src = as_Register($src$$reg);
8897 int flag = $cop$$cmpcode;
8899 switch(flag)
8900 {
8901 case 0x01: //equal
8902 __ c_eq_d(reg_op1, reg_op2);
8903 __ movt(dst, src);
8904 break;
8905 case 0x02: //not_equal
8906 //2016/4/19 aoqi: See instruct branchConD_reg_reg. The change in branchConD_reg_reg fixed a bug. It seems similar here, so I made thesame change.
8907 __ c_eq_d(reg_op1, reg_op2);
8908 __ movf(dst, src);
8909 break;
8910 case 0x03: //greater
8911 __ c_ole_d(reg_op1, reg_op2);
8912 __ movf(dst, src);
8913 break;
8914 case 0x04: //greater_equal
8915 __ c_olt_d(reg_op1, reg_op2);
8916 __ movf(dst, src);
8917 break;
8918 case 0x05: //less
8919 __ c_ult_d(reg_op1, reg_op2);
8920 __ movt(dst, src);
8921 break;
8922 case 0x06: //less_equal
8923 __ c_ule_d(reg_op1, reg_op2);
8924 __ movt(dst, src);
8925 break;
8926 default:
8927 Unimplemented();
8928 }
8929 %}
8931 ins_pipe( pipe_slow );
8932 %}
8935 instruct cmovP_cmpP_reg_reg(mRegP dst, mRegP src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
8936 match(Set dst (CMoveP (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
8937 ins_cost(80);
8938 format %{
8939 "CMPU$cop $tmp1,$tmp2\t @cmovP_cmpP_reg_reg\n\t"
8940 "CMOV $dst,$src\t @cmovP_cmpP_reg_reg"
8941 %}
8942 ins_encode %{
8943 Register op1 = $tmp1$$Register;
8944 Register op2 = $tmp2$$Register;
8945 Register dst = $dst$$Register;
8946 Register src = $src$$Register;
8947 int flag = $cop$$cmpcode;
8949 switch(flag)
8950 {
8951 case 0x01: //equal
8952 __ subu(AT, op1, op2);
8953 __ movz(dst, src, AT);
8954 break;
8956 case 0x02: //not_equal
8957 __ subu(AT, op1, op2);
8958 __ movn(dst, src, AT);
8959 break;
8961 case 0x03: //above
8962 __ sltu(AT, op2, op1);
8963 __ movn(dst, src, AT);
8964 break;
8966 case 0x04: //above_equal
8967 __ sltu(AT, op1, op2);
8968 __ movz(dst, src, AT);
8969 break;
8971 case 0x05: //below
8972 __ sltu(AT, op1, op2);
8973 __ movn(dst, src, AT);
8974 break;
8976 case 0x06: //below_equal
8977 __ sltu(AT, op2, op1);
8978 __ movz(dst, src, AT);
8979 break;
8981 default:
8982 Unimplemented();
8983 }
8984 %}
8986 ins_pipe( pipe_slow );
8987 %}
8989 instruct cmovP_cmpI_reg_reg(mRegP dst, mRegP src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8990 match(Set dst (CMoveP (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8991 ins_cost(80);
8992 format %{
8993 "CMP$cop $tmp1,$tmp2\t @cmovP_cmpI_reg_reg\n\t"
8994 "CMOV $dst,$src\t @cmovP_cmpI_reg_reg"
8995 %}
8996 ins_encode %{
8997 Register op1 = $tmp1$$Register;
8998 Register op2 = $tmp2$$Register;
8999 Register dst = $dst$$Register;
9000 Register src = $src$$Register;
9001 int flag = $cop$$cmpcode;
9003 switch(flag)
9004 {
9005 case 0x01: //equal
9006 __ subu32(AT, op1, op2);
9007 __ movz(dst, src, AT);
9008 break;
9010 case 0x02: //not_equal
9011 __ subu32(AT, op1, op2);
9012 __ movn(dst, src, AT);
9013 break;
9015 case 0x03: //above
9016 __ slt(AT, op2, op1);
9017 __ movn(dst, src, AT);
9018 break;
9020 case 0x04: //above_equal
9021 __ slt(AT, op1, op2);
9022 __ movz(dst, src, AT);
9023 break;
9025 case 0x05: //below
9026 __ slt(AT, op1, op2);
9027 __ movn(dst, src, AT);
9028 break;
9030 case 0x06: //below_equal
9031 __ slt(AT, op2, op1);
9032 __ movz(dst, src, AT);
9033 break;
9035 default:
9036 Unimplemented();
9037 }
9038 %}
9040 ins_pipe( pipe_slow );
9041 %}
9043 instruct cmovN_cmpI_reg_reg(mRegN dst, mRegN src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
9044 match(Set dst (CMoveN (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
9045 ins_cost(80);
9046 format %{
9047 "CMP$cop $tmp1,$tmp2\t @cmovN_cmpI_reg_reg\n\t"
9048 "CMOV $dst,$src\t @cmovN_cmpI_reg_reg"
9049 %}
9050 ins_encode %{
9051 Register op1 = $tmp1$$Register;
9052 Register op2 = $tmp2$$Register;
9053 Register dst = $dst$$Register;
9054 Register src = $src$$Register;
9055 int flag = $cop$$cmpcode;
9057 switch(flag)
9058 {
9059 case 0x01: //equal
9060 __ subu32(AT, op1, op2);
9061 __ movz(dst, src, AT);
9062 break;
9064 case 0x02: //not_equal
9065 __ subu32(AT, op1, op2);
9066 __ movn(dst, src, AT);
9067 break;
9069 case 0x03: //above
9070 __ slt(AT, op2, op1);
9071 __ movn(dst, src, AT);
9072 break;
9074 case 0x04: //above_equal
9075 __ slt(AT, op1, op2);
9076 __ movz(dst, src, AT);
9077 break;
9079 case 0x05: //below
9080 __ slt(AT, op1, op2);
9081 __ movn(dst, src, AT);
9082 break;
9084 case 0x06: //below_equal
9085 __ slt(AT, op2, op1);
9086 __ movz(dst, src, AT);
9087 break;
9089 default:
9090 Unimplemented();
9091 }
9092 %}
9094 ins_pipe( pipe_slow );
9095 %}
9098 instruct cmovL_cmpI_reg_reg(mRegL dst, mRegL src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
9099 match(Set dst (CMoveL (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
9100 ins_cost(80);
9101 format %{
9102 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpI_reg_reg\n"
9103 "\tCMOV $dst,$src \t @cmovL_cmpI_reg_reg"
9104 %}
9106 ins_encode %{
9107 Register op1 = $tmp1$$Register;
9108 Register op2 = $tmp2$$Register;
9109 Register dst = as_Register($dst$$reg);
9110 Register src = as_Register($src$$reg);
9111 int flag = $cop$$cmpcode;
9113 switch(flag)
9114 {
9115 case 0x01: //equal
9116 __ subu32(AT, op1, op2);
9117 __ movz(dst, src, AT);
9118 break;
9120 case 0x02: //not_equal
9121 __ subu32(AT, op1, op2);
9122 __ movn(dst, src, AT);
9123 break;
9125 case 0x03: //great
9126 __ slt(AT, op2, op1);
9127 __ movn(dst, src, AT);
9128 break;
9130 case 0x04: //great_equal
9131 __ slt(AT, op1, op2);
9132 __ movz(dst, src, AT);
9133 break;
9135 case 0x05: //less
9136 __ slt(AT, op1, op2);
9137 __ movn(dst, src, AT);
9138 break;
9140 case 0x06: //less_equal
9141 __ slt(AT, op2, op1);
9142 __ movz(dst, src, AT);
9143 break;
9145 default:
9146 Unimplemented();
9147 }
9148 %}
9150 ins_pipe( pipe_slow );
9151 %}
9153 instruct cmovL_cmpL_reg_reg(mRegL dst, mRegL src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
9154 match(Set dst (CMoveL (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
9155 ins_cost(80);
9156 format %{
9157 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpL_reg_reg\n"
9158 "\tCMOV $dst,$src \t @cmovL_cmpL_reg_reg"
9159 %}
9160 ins_encode %{
9161 Register opr1 = as_Register($tmp1$$reg);
9162 Register opr2 = as_Register($tmp2$$reg);
9163 Register dst = as_Register($dst$$reg);
9164 Register src = as_Register($src$$reg);
9165 int flag = $cop$$cmpcode;
9167 switch(flag)
9168 {
9169 case 0x01: //equal
9170 __ subu(AT, opr1, opr2);
9171 __ movz(dst, src, AT);
9172 break;
9174 case 0x02: //not_equal
9175 __ subu(AT, opr1, opr2);
9176 __ movn(dst, src, AT);
9177 break;
9179 case 0x03: //greater
9180 __ slt(AT, opr2, opr1);
9181 __ movn(dst, src, AT);
9182 break;
9184 case 0x04: //greater_equal
9185 __ slt(AT, opr1, opr2);
9186 __ movz(dst, src, AT);
9187 break;
9189 case 0x05: //less
9190 __ slt(AT, opr1, opr2);
9191 __ movn(dst, src, AT);
9192 break;
9194 case 0x06: //less_equal
9195 __ slt(AT, opr2, opr1);
9196 __ movz(dst, src, AT);
9197 break;
9199 default:
9200 Unimplemented();
9201 }
9202 %}
9204 ins_pipe( pipe_slow );
9205 %}
9207 instruct cmovL_cmpN_reg_reg(mRegL dst, mRegL src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
9208 match(Set dst (CMoveL (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
9209 ins_cost(80);
9210 format %{
9211 "CMPU$cop $tmp1,$tmp2\t @cmovL_cmpN_reg_reg\n\t"
9212 "CMOV $dst,$src\t @cmovL_cmpN_reg_reg"
9213 %}
9214 ins_encode %{
9215 Register op1 = $tmp1$$Register;
9216 Register op2 = $tmp2$$Register;
9217 Register dst = $dst$$Register;
9218 Register src = $src$$Register;
9219 int flag = $cop$$cmpcode;
9221 switch(flag)
9222 {
9223 case 0x01: //equal
9224 __ subu32(AT, op1, op2);
9225 __ movz(dst, src, AT);
9226 break;
9228 case 0x02: //not_equal
9229 __ subu32(AT, op1, op2);
9230 __ movn(dst, src, AT);
9231 break;
9233 case 0x03: //above
9234 __ sltu(AT, op2, op1);
9235 __ movn(dst, src, AT);
9236 break;
9238 case 0x04: //above_equal
9239 __ sltu(AT, op1, op2);
9240 __ movz(dst, src, AT);
9241 break;
9243 case 0x05: //below
9244 __ sltu(AT, op1, op2);
9245 __ movn(dst, src, AT);
9246 break;
9248 case 0x06: //below_equal
9249 __ sltu(AT, op2, op1);
9250 __ movz(dst, src, AT);
9251 break;
9253 default:
9254 Unimplemented();
9255 }
9256 %}
9258 ins_pipe( pipe_slow );
9259 %}
9262 instruct cmovL_cmpD_reg_reg(mRegL dst, mRegL src, regD tmp1, regD tmp2, cmpOp cop ) %{
9263 match(Set dst (CMoveL (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
9264 ins_cost(80);
9265 format %{
9266 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpD_reg_reg\n"
9267 "\tCMOV $dst,$src \t @cmovL_cmpD_reg_reg"
9268 %}
9269 ins_encode %{
9270 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
9271 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
9272 Register dst = as_Register($dst$$reg);
9273 Register src = as_Register($src$$reg);
9275 int flag = $cop$$cmpcode;
9277 switch(flag)
9278 {
9279 case 0x01: //equal
9280 __ c_eq_d(reg_op1, reg_op2);
9281 __ movt(dst, src);
9282 break;
9283 case 0x02: //not_equal
9284 __ c_eq_d(reg_op1, reg_op2);
9285 __ movf(dst, src);
9286 break;
9287 case 0x03: //greater
9288 __ c_ole_d(reg_op1, reg_op2);
9289 __ movf(dst, src);
9290 break;
9291 case 0x04: //greater_equal
9292 __ c_olt_d(reg_op1, reg_op2);
9293 __ movf(dst, src);
9294 break;
9295 case 0x05: //less
9296 __ c_ult_d(reg_op1, reg_op2);
9297 __ movt(dst, src);
9298 break;
9299 case 0x06: //less_equal
9300 __ c_ule_d(reg_op1, reg_op2);
9301 __ movt(dst, src);
9302 break;
9303 default:
9304 Unimplemented();
9305 }
9306 %}
9308 ins_pipe( pipe_slow );
9309 %}
9311 instruct cmovD_cmpD_reg_reg(regD dst, regD src, regD tmp1, regD tmp2, cmpOp cop ) %{
9312 match(Set dst (CMoveD (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
9313 ins_cost(200);
9314 format %{
9315 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpD_reg_reg\n"
9316 "\tCMOV $dst,$src \t @cmovD_cmpD_reg_reg"
9317 %}
9318 ins_encode %{
9319 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
9320 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
9321 FloatRegister dst = as_FloatRegister($dst$$reg);
9322 FloatRegister src = as_FloatRegister($src$$reg);
9324 int flag = $cop$$cmpcode;
9326 switch(flag)
9327 {
9328 case 0x01: //equal
9329 __ c_eq_d(reg_op1, reg_op2);
9330 __ movt_d(dst, src);
9331 break;
9332 case 0x02: //not_equal
9333 __ c_eq_d(reg_op1, reg_op2);
9334 __ movf_d(dst, src);
9335 break;
9336 case 0x03: //greater
9337 __ c_ole_d(reg_op1, reg_op2);
9338 __ movf_d(dst, src);
9339 break;
9340 case 0x04: //greater_equal
9341 __ c_olt_d(reg_op1, reg_op2);
9342 __ movf_d(dst, src);
9343 break;
9344 case 0x05: //less
9345 __ c_ult_d(reg_op1, reg_op2);
9346 __ movt_d(dst, src);
9347 break;
9348 case 0x06: //less_equal
9349 __ c_ule_d(reg_op1, reg_op2);
9350 __ movt_d(dst, src);
9351 break;
9352 default:
9353 Unimplemented();
9354 }
9355 %}
9357 ins_pipe( pipe_slow );
9358 %}
9360 instruct cmovF_cmpI_reg_reg(regF dst, regF src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
9361 match(Set dst (CMoveF (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
9362 ins_cost(200);
9363 format %{
9364 "CMP$cop $tmp1, $tmp2\t @cmovF_cmpI_reg_reg\n"
9365 "\tCMOV $dst, $src \t @cmovF_cmpI_reg_reg"
9366 %}
9368 ins_encode %{
9369 Register op1 = $tmp1$$Register;
9370 Register op2 = $tmp2$$Register;
9371 FloatRegister dst = as_FloatRegister($dst$$reg);
9372 FloatRegister src = as_FloatRegister($src$$reg);
9373 int flag = $cop$$cmpcode;
9374 Label L;
9376 switch(flag)
9377 {
9378 case 0x01: //equal
9379 __ bne(op1, op2, L);
9380 __ nop();
9381 __ mov_s(dst, src);
9382 __ bind(L);
9383 break;
9384 case 0x02: //not_equal
9385 __ beq(op1, op2, L);
9386 __ nop();
9387 __ mov_s(dst, src);
9388 __ bind(L);
9389 break;
9390 case 0x03: //great
9391 __ slt(AT, op2, op1);
9392 __ beq(AT, R0, L);
9393 __ nop();
9394 __ mov_s(dst, src);
9395 __ bind(L);
9396 break;
9397 case 0x04: //great_equal
9398 __ slt(AT, op1, op2);
9399 __ bne(AT, R0, L);
9400 __ nop();
9401 __ mov_s(dst, src);
9402 __ bind(L);
9403 break;
9404 case 0x05: //less
9405 __ slt(AT, op1, op2);
9406 __ beq(AT, R0, L);
9407 __ nop();
9408 __ mov_s(dst, src);
9409 __ bind(L);
9410 break;
9411 case 0x06: //less_equal
9412 __ slt(AT, op2, op1);
9413 __ bne(AT, R0, L);
9414 __ nop();
9415 __ mov_s(dst, src);
9416 __ bind(L);
9417 break;
9418 default:
9419 Unimplemented();
9420 }
9421 %}
9423 ins_pipe( pipe_slow );
9424 %}
9426 instruct cmovD_cmpI_reg_reg(regD dst, regD src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
9427 match(Set dst (CMoveD (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
9428 ins_cost(200);
9429 format %{
9430 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpI_reg_reg\n"
9431 "\tCMOV $dst, $src \t @cmovD_cmpI_reg_reg"
9432 %}
9434 ins_encode %{
9435 Register op1 = $tmp1$$Register;
9436 Register op2 = $tmp2$$Register;
9437 FloatRegister dst = as_FloatRegister($dst$$reg);
9438 FloatRegister src = as_FloatRegister($src$$reg);
9439 int flag = $cop$$cmpcode;
9440 Label L;
9442 switch(flag)
9443 {
9444 case 0x01: //equal
9445 __ bne(op1, op2, L);
9446 __ nop();
9447 __ mov_d(dst, src);
9448 __ bind(L);
9449 break;
9450 case 0x02: //not_equal
9451 __ beq(op1, op2, L);
9452 __ nop();
9453 __ mov_d(dst, src);
9454 __ bind(L);
9455 break;
9456 case 0x03: //great
9457 __ slt(AT, op2, op1);
9458 __ beq(AT, R0, L);
9459 __ nop();
9460 __ mov_d(dst, src);
9461 __ bind(L);
9462 break;
9463 case 0x04: //great_equal
9464 __ slt(AT, op1, op2);
9465 __ bne(AT, R0, L);
9466 __ nop();
9467 __ mov_d(dst, src);
9468 __ bind(L);
9469 break;
9470 case 0x05: //less
9471 __ slt(AT, op1, op2);
9472 __ beq(AT, R0, L);
9473 __ nop();
9474 __ mov_d(dst, src);
9475 __ bind(L);
9476 break;
9477 case 0x06: //less_equal
9478 __ slt(AT, op2, op1);
9479 __ bne(AT, R0, L);
9480 __ nop();
9481 __ mov_d(dst, src);
9482 __ bind(L);
9483 break;
9484 default:
9485 Unimplemented();
9486 }
9487 %}
9489 ins_pipe( pipe_slow );
9490 %}
9492 instruct cmovD_cmpP_reg_reg(regD dst, regD src, mRegP tmp1, mRegP tmp2, cmpOp cop ) %{
9493 match(Set dst (CMoveD (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
9494 ins_cost(200);
9495 format %{
9496 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpP_reg_reg\n"
9497 "\tCMOV $dst, $src \t @cmovD_cmpP_reg_reg"
9498 %}
9500 ins_encode %{
9501 Register op1 = $tmp1$$Register;
9502 Register op2 = $tmp2$$Register;
9503 FloatRegister dst = as_FloatRegister($dst$$reg);
9504 FloatRegister src = as_FloatRegister($src$$reg);
9505 int flag = $cop$$cmpcode;
9506 Label L;
9508 switch(flag)
9509 {
9510 case 0x01: //equal
9511 __ bne(op1, op2, L);
9512 __ nop();
9513 __ mov_d(dst, src);
9514 __ bind(L);
9515 break;
9516 case 0x02: //not_equal
9517 __ beq(op1, op2, L);
9518 __ nop();
9519 __ mov_d(dst, src);
9520 __ bind(L);
9521 break;
9522 case 0x03: //great
9523 __ slt(AT, op2, op1);
9524 __ beq(AT, R0, L);
9525 __ nop();
9526 __ mov_d(dst, src);
9527 __ bind(L);
9528 break;
9529 case 0x04: //great_equal
9530 __ slt(AT, op1, op2);
9531 __ bne(AT, R0, L);
9532 __ nop();
9533 __ mov_d(dst, src);
9534 __ bind(L);
9535 break;
9536 case 0x05: //less
9537 __ slt(AT, op1, op2);
9538 __ beq(AT, R0, L);
9539 __ nop();
9540 __ mov_d(dst, src);
9541 __ bind(L);
9542 break;
9543 case 0x06: //less_equal
9544 __ slt(AT, op2, op1);
9545 __ bne(AT, R0, L);
9546 __ nop();
9547 __ mov_d(dst, src);
9548 __ bind(L);
9549 break;
9550 default:
9551 Unimplemented();
9552 }
9553 %}
9555 ins_pipe( pipe_slow );
9556 %}
9558 //FIXME
9559 instruct cmovI_cmpF_reg_reg(mRegI dst, mRegI src, regF tmp1, regF tmp2, cmpOp cop ) %{
9560 match(Set dst (CMoveI (Binary cop (CmpF tmp1 tmp2)) (Binary dst src)));
9561 ins_cost(80);
9562 format %{
9563 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpF_reg_reg\n"
9564 "\tCMOV $dst,$src \t @cmovI_cmpF_reg_reg"
9565 %}
9567 ins_encode %{
9568 FloatRegister reg_op1 = $tmp1$$FloatRegister;
9569 FloatRegister reg_op2 = $tmp2$$FloatRegister;
9570 Register dst = $dst$$Register;
9571 Register src = $src$$Register;
9572 int flag = $cop$$cmpcode;
9574 switch(flag)
9575 {
9576 case 0x01: //equal
9577 __ c_eq_s(reg_op1, reg_op2);
9578 __ movt(dst, src);
9579 break;
9580 case 0x02: //not_equal
9581 __ c_eq_s(reg_op1, reg_op2);
9582 __ movf(dst, src);
9583 break;
9584 case 0x03: //greater
9585 __ c_ole_s(reg_op1, reg_op2);
9586 __ movf(dst, src);
9587 break;
9588 case 0x04: //greater_equal
9589 __ c_olt_s(reg_op1, reg_op2);
9590 __ movf(dst, src);
9591 break;
9592 case 0x05: //less
9593 __ c_ult_s(reg_op1, reg_op2);
9594 __ movt(dst, src);
9595 break;
9596 case 0x06: //less_equal
9597 __ c_ule_s(reg_op1, reg_op2);
9598 __ movt(dst, src);
9599 break;
9600 default:
9601 Unimplemented();
9602 }
9603 %}
9604 ins_pipe( pipe_slow );
9605 %}
9607 instruct cmovF_cmpF_reg_reg(regF dst, regF src, regF tmp1, regF tmp2, cmpOp cop ) %{
9608 match(Set dst (CMoveF (Binary cop (CmpF tmp1 tmp2)) (Binary dst src)));
9609 ins_cost(200);
9610 format %{
9611 "CMP$cop $tmp1, $tmp2\t @cmovF_cmpF_reg_reg\n"
9612 "\tCMOV $dst,$src \t @cmovF_cmpF_reg_reg"
9613 %}
9615 ins_encode %{
9616 FloatRegister reg_op1 = $tmp1$$FloatRegister;
9617 FloatRegister reg_op2 = $tmp2$$FloatRegister;
9618 FloatRegister dst = $dst$$FloatRegister;
9619 FloatRegister src = $src$$FloatRegister;
9620 int flag = $cop$$cmpcode;
9622 switch(flag)
9623 {
9624 case 0x01: //equal
9625 __ c_eq_s(reg_op1, reg_op2);
9626 __ movt_s(dst, src);
9627 break;
9628 case 0x02: //not_equal
9629 __ c_eq_s(reg_op1, reg_op2);
9630 __ movf_s(dst, src);
9631 break;
9632 case 0x03: //greater
9633 __ c_ole_s(reg_op1, reg_op2);
9634 __ movf_s(dst, src);
9635 break;
9636 case 0x04: //greater_equal
9637 __ c_olt_s(reg_op1, reg_op2);
9638 __ movf_s(dst, src);
9639 break;
9640 case 0x05: //less
9641 __ c_ult_s(reg_op1, reg_op2);
9642 __ movt_s(dst, src);
9643 break;
9644 case 0x06: //less_equal
9645 __ c_ule_s(reg_op1, reg_op2);
9646 __ movt_s(dst, src);
9647 break;
9648 default:
9649 Unimplemented();
9650 }
9651 %}
9652 ins_pipe( pipe_slow );
9653 %}
9655 // Manifest a CmpL result in an integer register. Very painful.
9656 // This is the test to avoid.
9657 instruct cmpL3_reg_reg(mRegI dst, mRegL src1, mRegL src2) %{
9658 match(Set dst (CmpL3 src1 src2));
9659 ins_cost(1000);
9660 format %{ "cmpL3 $dst, $src1, $src2 @ cmpL3_reg_reg" %}
9661 ins_encode %{
9662 Register opr1 = as_Register($src1$$reg);
9663 Register opr2 = as_Register($src2$$reg);
9664 Register dst = as_Register($dst$$reg);
9666 Label Done;
9668 __ subu(AT, opr1, opr2);
9669 __ bltz(AT, Done);
9670 __ delayed()->daddiu(dst, R0, -1);
9672 __ move(dst, 1);
9673 __ movz(dst, R0, AT);
9675 __ bind(Done);
9676 %}
9677 ins_pipe( pipe_slow );
9678 %}
9680 //
9681 // less_rsult = -1
9682 // greater_result = 1
9683 // equal_result = 0
9684 // nan_result = -1
9685 //
9686 instruct cmpF3_reg_reg(mRegI dst, regF src1, regF src2) %{
9687 match(Set dst (CmpF3 src1 src2));
9688 ins_cost(1000);
9689 format %{ "cmpF3 $dst, $src1, $src2 @ cmpF3_reg_reg" %}
9690 ins_encode %{
9691 FloatRegister src1 = as_FloatRegister($src1$$reg);
9692 FloatRegister src2 = as_FloatRegister($src2$$reg);
9693 Register dst = as_Register($dst$$reg);
9695 Label Done;
9697 __ c_ult_s(src1, src2);
9698 __ bc1t(Done);
9699 __ delayed()->daddiu(dst, R0, -1);
9701 __ c_eq_s(src1, src2);
9702 __ move(dst, 1);
9703 __ movt(dst, R0);
9705 __ bind(Done);
9706 %}
9707 ins_pipe( pipe_slow );
9708 %}
9710 instruct cmpD3_reg_reg(mRegI dst, regD src1, regD src2) %{
9711 match(Set dst (CmpD3 src1 src2));
9712 ins_cost(1000);
9713 format %{ "cmpD3 $dst, $src1, $src2 @ cmpD3_reg_reg" %}
9714 ins_encode %{
9715 FloatRegister src1 = as_FloatRegister($src1$$reg);
9716 FloatRegister src2 = as_FloatRegister($src2$$reg);
9717 Register dst = as_Register($dst$$reg);
9719 Label Done;
9721 __ c_ult_d(src1, src2);
9722 __ bc1t(Done);
9723 __ delayed()->daddiu(dst, R0, -1);
9725 __ c_eq_d(src1, src2);
9726 __ move(dst, 1);
9727 __ movt(dst, R0);
9729 __ bind(Done);
9730 %}
9731 ins_pipe( pipe_slow );
9732 %}
9734 instruct clear_array(mRegL cnt, mRegP base, Universe dummy) %{
9735 match(Set dummy (ClearArray cnt base));
9736 format %{ "CLEAR_ARRAY base = $base, cnt = $cnt # Clear doublewords" %}
9737 ins_encode %{
9738 //Assume cnt is the number of bytes in an array to be cleared,
9739 //and base points to the starting address of the array.
9740 Register base = $base$$Register;
9741 Register num = $cnt$$Register;
9742 Label Loop, done;
9744 __ beq(num, R0, done);
9745 __ delayed()->daddu(AT, base, R0);
9747 __ move(T9, num); /* T9 = words */
9749 __ bind(Loop);
9750 __ sd(R0, AT, 0);
9751 __ daddi(T9, T9, -1);
9752 __ bne(T9, R0, Loop);
9753 __ delayed()->daddi(AT, AT, wordSize);
9755 __ bind(done);
9756 %}
9757 ins_pipe( pipe_slow );
9758 %}
9760 instruct string_compare(a4_RegP str1, mA5RegI cnt1, a6_RegP str2, mA7RegI cnt2, no_Ax_mRegI result) %{
9761 match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
9762 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2);
9764 format %{ "String Compare $str1[len: $cnt1], $str2[len: $cnt2] -> $result @ string_compare" %}
9765 ins_encode %{
9766 // Get the first character position in both strings
9767 // [8] char array, [12] offset, [16] count
9768 Register str1 = $str1$$Register;
9769 Register str2 = $str2$$Register;
9770 Register cnt1 = $cnt1$$Register;
9771 Register cnt2 = $cnt2$$Register;
9772 Register result = $result$$Register;
9774 Label L, Loop, haveResult, done;
9776 // compute the and difference of lengths (in result)
9777 __ subu(result, cnt1, cnt2); // result holds the difference of two lengths
9779 // compute the shorter length (in cnt1)
9780 __ slt(AT, cnt2, cnt1);
9781 __ movn(cnt1, cnt2, AT);
9783 // Now the shorter length is in cnt1 and cnt2 can be used as a tmp register
9784 __ bind(Loop); // Loop begin
9785 __ beq(cnt1, R0, done);
9786 __ delayed()->lhu(AT, str1, 0);;
9788 // compare current character
9789 __ lhu(cnt2, str2, 0);
9790 __ bne(AT, cnt2, haveResult);
9791 __ delayed()->addi(str1, str1, 2);
9792 __ addi(str2, str2, 2);
9793 __ b(Loop);
9794 __ delayed()->addi(cnt1, cnt1, -1); // Loop end
9796 __ bind(haveResult);
9797 __ subu(result, AT, cnt2);
9799 __ bind(done);
9800 %}
9802 ins_pipe( pipe_slow );
9803 %}
9805 // intrinsic optimization
9806 instruct string_equals(a4_RegP str1, a5_RegP str2, mA6RegI cnt, mA7RegI temp, no_Ax_mRegI result) %{
9807 match(Set result (StrEquals (Binary str1 str2) cnt));
9808 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL temp);
9810 format %{ "String Equal $str1, $str2, len:$cnt tmp:$temp -> $result @ string_equals" %}
9811 ins_encode %{
9812 // Get the first character position in both strings
9813 // [8] char array, [12] offset, [16] count
9814 Register str1 = $str1$$Register;
9815 Register str2 = $str2$$Register;
9816 Register cnt = $cnt$$Register;
9817 Register tmp = $temp$$Register;
9818 Register result = $result$$Register;
9820 Label Loop, done;
9823 __ beq(str1, str2, done); // same char[] ?
9824 __ daddiu(result, R0, 1);
9826 __ bind(Loop); // Loop begin
9827 __ beq(cnt, R0, done);
9828 __ daddiu(result, R0, 1); // count == 0
9830 // compare current character
9831 __ lhu(AT, str1, 0);;
9832 __ lhu(tmp, str2, 0);
9833 __ bne(AT, tmp, done);
9834 __ delayed()->daddi(result, R0, 0);
9835 __ addi(str1, str1, 2);
9836 __ addi(str2, str2, 2);
9837 __ b(Loop);
9838 __ delayed()->addi(cnt, cnt, -1); // Loop end
9840 __ bind(done);
9841 %}
9843 ins_pipe( pipe_slow );
9844 %}
9846 //----------Arithmetic Instructions-------------------------------------------
9847 //----------Addition Instructions---------------------------------------------
9848 instruct addI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9849 match(Set dst (AddI src1 src2));
9851 format %{ "add $dst, $src1, $src2 #@addI_Reg_Reg" %}
9852 ins_encode %{
9853 Register dst = $dst$$Register;
9854 Register src1 = $src1$$Register;
9855 Register src2 = $src2$$Register;
9856 __ addu32(dst, src1, src2);
9857 %}
9858 ins_pipe( ialu_regI_regI );
9859 %}
9861 instruct addI_Reg_imm(mRegI dst, mRegI src1, immI src2) %{
9862 match(Set dst (AddI src1 src2));
9864 format %{ "add $dst, $src1, $src2 #@addI_Reg_imm" %}
9865 ins_encode %{
9866 Register dst = $dst$$Register;
9867 Register src1 = $src1$$Register;
9868 int imm = $src2$$constant;
9870 if(Assembler::is_simm16(imm)) {
9871 __ addiu32(dst, src1, imm);
9872 } else {
9873 __ move(AT, imm);
9874 __ addu32(dst, src1, AT);
9875 }
9876 %}
9877 ins_pipe( ialu_regI_regI );
9878 %}
9880 instruct addP_reg_reg(mRegP dst, mRegP src1, mRegL src2) %{
9881 match(Set dst (AddP src1 src2));
9883 format %{ "dadd $dst, $src1, $src2 #@addP_reg_reg" %}
9885 ins_encode %{
9886 Register dst = $dst$$Register;
9887 Register src1 = $src1$$Register;
9888 Register src2 = $src2$$Register;
9889 __ daddu(dst, src1, src2);
9890 %}
9892 ins_pipe( ialu_regI_regI );
9893 %}
9895 instruct addP_reg_reg_convI2L(mRegP dst, mRegP src1, mRegI src2) %{
9896 match(Set dst (AddP src1 (ConvI2L src2)));
9898 format %{ "dadd $dst, $src1, $src2 #@addP_reg_reg_convI2L" %}
9900 ins_encode %{
9901 Register dst = $dst$$Register;
9902 Register src1 = $src1$$Register;
9903 Register src2 = $src2$$Register;
9904 __ daddu(dst, src1, src2);
9905 %}
9907 ins_pipe( ialu_regI_regI );
9908 %}
9910 instruct addP_reg_imm(mRegP dst, mRegP src1, immL src2) %{
9911 match(Set dst (AddP src1 src2));
9913 format %{ "daddi $dst, $src1, $src2 #@addP_reg_imm" %}
9914 ins_encode %{
9915 Register src1 = $src1$$Register;
9916 long src2 = $src2$$constant;
9917 Register dst = $dst$$Register;
9919 if(Assembler::is_simm16(src2)) {
9920 __ daddiu(dst, src1, src2);
9921 } else {
9922 __ set64(AT, src2);
9923 __ daddu(dst, src1, AT);
9924 }
9925 %}
9926 ins_pipe( ialu_regI_imm16 );
9927 %}
9929 // Add Long Register with Register
9930 instruct addL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
9931 match(Set dst (AddL src1 src2));
9932 ins_cost(200);
9933 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_Reg\t" %}
9935 ins_encode %{
9936 Register dst_reg = as_Register($dst$$reg);
9937 Register src1_reg = as_Register($src1$$reg);
9938 Register src2_reg = as_Register($src2$$reg);
9940 __ daddu(dst_reg, src1_reg, src2_reg);
9941 %}
9943 ins_pipe( ialu_regL_regL );
9944 %}
9946 instruct addL_Reg_imm(mRegL dst, mRegL src1, immL16 src2)
9947 %{
9948 match(Set dst (AddL src1 src2));
9950 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_imm " %}
9951 ins_encode %{
9952 Register dst_reg = as_Register($dst$$reg);
9953 Register src1_reg = as_Register($src1$$reg);
9954 int src2_imm = $src2$$constant;
9956 __ daddiu(dst_reg, src1_reg, src2_imm);
9957 %}
9959 ins_pipe( ialu_regL_regL );
9960 %}
9962 instruct addL_RegI2L_imm(mRegL dst, mRegI src1, immL16 src2)
9963 %{
9964 match(Set dst (AddL (ConvI2L src1) src2));
9966 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_imm " %}
9967 ins_encode %{
9968 Register dst_reg = as_Register($dst$$reg);
9969 Register src1_reg = as_Register($src1$$reg);
9970 int src2_imm = $src2$$constant;
9972 __ daddiu(dst_reg, src1_reg, src2_imm);
9973 %}
9975 ins_pipe( ialu_regL_regL );
9976 %}
9978 instruct addL_RegI2L_Reg(mRegL dst, mRegI src1, mRegL src2) %{
9979 match(Set dst (AddL (ConvI2L src1) src2));
9980 ins_cost(200);
9981 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_Reg\t" %}
9983 ins_encode %{
9984 Register dst_reg = as_Register($dst$$reg);
9985 Register src1_reg = as_Register($src1$$reg);
9986 Register src2_reg = as_Register($src2$$reg);
9988 __ daddu(dst_reg, src1_reg, src2_reg);
9989 %}
9991 ins_pipe( ialu_regL_regL );
9992 %}
9994 instruct addL_RegI2L_RegI2L(mRegL dst, mRegI src1, mRegI src2) %{
9995 match(Set dst (AddL (ConvI2L src1) (ConvI2L src2)));
9996 ins_cost(200);
9997 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_RegI2L\t" %}
9999 ins_encode %{
10000 Register dst_reg = as_Register($dst$$reg);
10001 Register src1_reg = as_Register($src1$$reg);
10002 Register src2_reg = as_Register($src2$$reg);
10004 __ daddu(dst_reg, src1_reg, src2_reg);
10005 %}
10007 ins_pipe( ialu_regL_regL );
10008 %}
10010 instruct addL_Reg_RegI2L(mRegL dst, mRegL src1, mRegI src2) %{
10011 match(Set dst (AddL src1 (ConvI2L src2)));
10012 ins_cost(200);
10013 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_RegI2L\t" %}
10015 ins_encode %{
10016 Register dst_reg = as_Register($dst$$reg);
10017 Register src1_reg = as_Register($src1$$reg);
10018 Register src2_reg = as_Register($src2$$reg);
10020 __ daddu(dst_reg, src1_reg, src2_reg);
10021 %}
10023 ins_pipe( ialu_regL_regL );
10024 %}
10026 //----------Subtraction Instructions-------------------------------------------
10027 // Integer Subtraction Instructions
10028 instruct subI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10029 match(Set dst (SubI src1 src2));
10030 ins_cost(100);
10032 format %{ "sub $dst, $src1, $src2 #@subI_Reg_Reg" %}
10033 ins_encode %{
10034 Register dst = $dst$$Register;
10035 Register src1 = $src1$$Register;
10036 Register src2 = $src2$$Register;
10037 __ subu32(dst, src1, src2);
10038 %}
10039 ins_pipe( ialu_regI_regI );
10040 %}
10042 instruct subI_Reg_immI16_sub(mRegI dst, mRegI src1, immI16_sub src2) %{
10043 match(Set dst (SubI src1 src2));
10044 ins_cost(80);
10046 format %{ "sub $dst, $src1, $src2 #@subI_Reg_immI16_sub" %}
10047 ins_encode %{
10048 Register dst = $dst$$Register;
10049 Register src1 = $src1$$Register;
10050 __ addiu32(dst, src1, -1 * $src2$$constant);
10051 %}
10052 ins_pipe( ialu_regI_regI );
10053 %}
10055 instruct negI_Reg(mRegI dst, immI0 zero, mRegI src) %{
10056 match(Set dst (SubI zero src));
10057 ins_cost(80);
10059 format %{ "neg $dst, $src #@negI_Reg" %}
10060 ins_encode %{
10061 Register dst = $dst$$Register;
10062 Register src = $src$$Register;
10063 __ subu32(dst, R0, src);
10064 %}
10065 ins_pipe( ialu_regI_regI );
10066 %}
10068 instruct negL_Reg(mRegL dst, immL0 zero, mRegL src) %{
10069 match(Set dst (SubL zero src));
10070 ins_cost(80);
10072 format %{ "neg $dst, $src #@negL_Reg" %}
10073 ins_encode %{
10074 Register dst = $dst$$Register;
10075 Register src = $src$$Register;
10076 __ subu(dst, R0, src);
10077 %}
10078 ins_pipe( ialu_regI_regI );
10079 %}
10081 instruct subL_Reg_immL16_sub(mRegL dst, mRegL src1, immL16_sub src2) %{
10082 match(Set dst (SubL src1 src2));
10083 ins_cost(80);
10085 format %{ "sub $dst, $src1, $src2 #@subL_Reg_immL16_sub" %}
10086 ins_encode %{
10087 Register dst = $dst$$Register;
10088 Register src1 = $src1$$Register;
10089 __ daddiu(dst, src1, -1 * $src2$$constant);
10090 %}
10091 ins_pipe( ialu_regI_regI );
10092 %}
10094 // Subtract Long Register with Register.
10095 instruct subL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10096 match(Set dst (SubL src1 src2));
10097 ins_cost(100);
10098 format %{ "SubL $dst, $src1, $src2 @ subL_Reg_Reg" %}
10099 ins_encode %{
10100 Register dst = as_Register($dst$$reg);
10101 Register src1 = as_Register($src1$$reg);
10102 Register src2 = as_Register($src2$$reg);
10104 __ subu(dst, src1, src2);
10105 %}
10106 ins_pipe( ialu_regL_regL );
10107 %}
10109 instruct subL_Reg_RegI2L(mRegL dst, mRegL src1, mRegI src2) %{
10110 match(Set dst (SubL src1 (ConvI2L src2)));
10111 ins_cost(100);
10112 format %{ "SubL $dst, $src1, $src2 @ subL_Reg_RegI2L" %}
10113 ins_encode %{
10114 Register dst = as_Register($dst$$reg);
10115 Register src1 = as_Register($src1$$reg);
10116 Register src2 = as_Register($src2$$reg);
10118 __ subu(dst, src1, src2);
10119 %}
10120 ins_pipe( ialu_regL_regL );
10121 %}
10123 instruct subL_RegI2L_Reg(mRegL dst, mRegI src1, mRegL src2) %{
10124 match(Set dst (SubL (ConvI2L src1) src2));
10125 ins_cost(200);
10126 format %{ "SubL $dst, $src1, $src2 @ subL_RegI2L_Reg" %}
10127 ins_encode %{
10128 Register dst = as_Register($dst$$reg);
10129 Register src1 = as_Register($src1$$reg);
10130 Register src2 = as_Register($src2$$reg);
10132 __ subu(dst, src1, src2);
10133 %}
10134 ins_pipe( ialu_regL_regL );
10135 %}
10137 instruct subL_RegI2L_RegI2L(mRegL dst, mRegI src1, mRegI src2) %{
10138 match(Set dst (SubL (ConvI2L src1) (ConvI2L src2)));
10139 ins_cost(200);
10140 format %{ "SubL $dst, $src1, $src2 @ subL_RegI2L_RegI2L" %}
10141 ins_encode %{
10142 Register dst = as_Register($dst$$reg);
10143 Register src1 = as_Register($src1$$reg);
10144 Register src2 = as_Register($src2$$reg);
10146 __ subu(dst, src1, src2);
10147 %}
10148 ins_pipe( ialu_regL_regL );
10149 %}
10151 // Integer MOD with Register
10152 instruct modI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10153 match(Set dst (ModI src1 src2));
10154 ins_cost(300);
10155 format %{ "modi $dst, $src1, $src2 @ modI_Reg_Reg" %}
10156 ins_encode %{
10157 Register dst = $dst$$Register;
10158 Register src1 = $src1$$Register;
10159 Register src2 = $src2$$Register;
10161 //if (UseLoongsonISA) {
10162 if (0) {
10163 // 2016.08.10
10164 // Experiments show that gsmod is slower that div+mfhi.
10165 // So I just disable it here.
10166 __ gsmod(dst, src1, src2);
10167 } else {
10168 __ div(src1, src2);
10169 __ mfhi(dst);
10170 }
10171 %}
10173 //ins_pipe( ialu_mod );
10174 ins_pipe( ialu_regI_regI );
10175 %}
10177 instruct modL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
10178 match(Set dst (ModL src1 src2));
10179 format %{ "modL $dst, $src1, $src2 @modL_reg_reg" %}
10181 ins_encode %{
10182 Register dst = as_Register($dst$$reg);
10183 Register op1 = as_Register($src1$$reg);
10184 Register op2 = as_Register($src2$$reg);
10186 if (UseLoongsonISA) {
10187 __ gsdmod(dst, op1, op2);
10188 } else {
10189 __ ddiv(op1, op2);
10190 __ mfhi(dst);
10191 }
10192 %}
10193 ins_pipe( pipe_slow );
10194 %}
10196 instruct mulI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10197 match(Set dst (MulI src1 src2));
10199 ins_cost(300);
10200 format %{ "mul $dst, $src1, $src2 @ mulI_Reg_Reg" %}
10201 ins_encode %{
10202 Register src1 = $src1$$Register;
10203 Register src2 = $src2$$Register;
10204 Register dst = $dst$$Register;
10206 __ mul(dst, src1, src2);
10207 %}
10208 ins_pipe( ialu_mult );
10209 %}
10211 instruct maddI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2, mRegI src3) %{
10212 match(Set dst (AddI (MulI src1 src2) src3));
10214 ins_cost(999);
10215 format %{ "madd $dst, $src1 * $src2 + $src3 #@maddI_Reg_Reg" %}
10216 ins_encode %{
10217 Register src1 = $src1$$Register;
10218 Register src2 = $src2$$Register;
10219 Register src3 = $src3$$Register;
10220 Register dst = $dst$$Register;
10222 __ mtlo(src3);
10223 __ madd(src1, src2);
10224 __ mflo(dst);
10225 %}
10226 ins_pipe( ialu_mult );
10227 %}
10229 instruct divI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10230 match(Set dst (DivI src1 src2));
10232 ins_cost(300);
10233 format %{ "div $dst, $src1, $src2 @ divI_Reg_Reg" %}
10234 ins_encode %{
10235 Register src1 = $src1$$Register;
10236 Register src2 = $src2$$Register;
10237 Register dst = $dst$$Register;
10239 /* 2012/4/21 Jin: In MIPS, div does not cause exception.
10240 We must trap an exception manually. */
10241 __ teq(R0, src2, 0x7);
10243 if (UseLoongsonISA) {
10244 __ gsdiv(dst, src1, src2);
10245 } else {
10246 __ div(src1, src2);
10248 __ nop();
10249 __ nop();
10250 __ mflo(dst);
10251 }
10252 %}
10253 ins_pipe( ialu_mod );
10254 %}
10256 instruct divF_Reg_Reg(regF dst, regF src1, regF src2) %{
10257 match(Set dst (DivF src1 src2));
10259 ins_cost(300);
10260 format %{ "divF $dst, $src1, $src2 @ divF_Reg_Reg" %}
10261 ins_encode %{
10262 FloatRegister src1 = $src1$$FloatRegister;
10263 FloatRegister src2 = $src2$$FloatRegister;
10264 FloatRegister dst = $dst$$FloatRegister;
10266 /* Here do we need to trap an exception manually ? */
10267 __ div_s(dst, src1, src2);
10268 %}
10269 ins_pipe( pipe_slow );
10270 %}
10272 instruct divD_Reg_Reg(regD dst, regD src1, regD src2) %{
10273 match(Set dst (DivD src1 src2));
10275 ins_cost(300);
10276 format %{ "divD $dst, $src1, $src2 @ divD_Reg_Reg" %}
10277 ins_encode %{
10278 FloatRegister src1 = $src1$$FloatRegister;
10279 FloatRegister src2 = $src2$$FloatRegister;
10280 FloatRegister dst = $dst$$FloatRegister;
10282 /* Here do we need to trap an exception manually ? */
10283 __ div_d(dst, src1, src2);
10284 %}
10285 ins_pipe( pipe_slow );
10286 %}
10288 instruct mulL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
10289 match(Set dst (MulL src1 src2));
10290 format %{ "mulL $dst, $src1, $src2 @mulL_reg_reg" %}
10291 ins_encode %{
10292 Register dst = as_Register($dst$$reg);
10293 Register op1 = as_Register($src1$$reg);
10294 Register op2 = as_Register($src2$$reg);
10296 if (UseLoongsonISA) {
10297 __ gsdmult(dst, op1, op2);
10298 } else {
10299 __ dmult(op1, op2);
10300 __ mflo(dst);
10301 }
10302 %}
10303 ins_pipe( pipe_slow );
10304 %}
10306 instruct mulL_reg_regI2L(mRegL dst, mRegL src1, mRegI src2) %{
10307 match(Set dst (MulL src1 (ConvI2L src2)));
10308 format %{ "mulL $dst, $src1, $src2 @mulL_reg_regI2L" %}
10309 ins_encode %{
10310 Register dst = as_Register($dst$$reg);
10311 Register op1 = as_Register($src1$$reg);
10312 Register op2 = as_Register($src2$$reg);
10314 if (UseLoongsonISA) {
10315 __ gsdmult(dst, op1, op2);
10316 } else {
10317 __ dmult(op1, op2);
10318 __ mflo(dst);
10319 }
10320 %}
10321 ins_pipe( pipe_slow );
10322 %}
10324 instruct divL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
10325 match(Set dst (DivL src1 src2));
10326 format %{ "divL $dst, $src1, $src2 @divL_reg_reg" %}
10328 ins_encode %{
10329 Register dst = as_Register($dst$$reg);
10330 Register op1 = as_Register($src1$$reg);
10331 Register op2 = as_Register($src2$$reg);
10333 if (UseLoongsonISA) {
10334 __ gsddiv(dst, op1, op2);
10335 } else {
10336 __ ddiv(op1, op2);
10337 __ mflo(dst);
10338 }
10339 %}
10340 ins_pipe( pipe_slow );
10341 %}
10343 instruct addF_reg_reg(regF dst, regF src1, regF src2) %{
10344 match(Set dst (AddF src1 src2));
10345 format %{ "AddF $dst, $src1, $src2 @addF_reg_reg" %}
10346 ins_encode %{
10347 FloatRegister src1 = as_FloatRegister($src1$$reg);
10348 FloatRegister src2 = as_FloatRegister($src2$$reg);
10349 FloatRegister dst = as_FloatRegister($dst$$reg);
10351 __ add_s(dst, src1, src2);
10352 %}
10353 ins_pipe( fpu_regF_regF );
10354 %}
10356 instruct subF_reg_reg(regF dst, regF src1, regF src2) %{
10357 match(Set dst (SubF src1 src2));
10358 format %{ "SubF $dst, $src1, $src2 @subF_reg_reg" %}
10359 ins_encode %{
10360 FloatRegister src1 = as_FloatRegister($src1$$reg);
10361 FloatRegister src2 = as_FloatRegister($src2$$reg);
10362 FloatRegister dst = as_FloatRegister($dst$$reg);
10364 __ sub_s(dst, src1, src2);
10365 %}
10366 ins_pipe( fpu_regF_regF );
10367 %}
10368 instruct addD_reg_reg(regD dst, regD src1, regD src2) %{
10369 match(Set dst (AddD src1 src2));
10370 format %{ "AddD $dst, $src1, $src2 @addD_reg_reg" %}
10371 ins_encode %{
10372 FloatRegister src1 = as_FloatRegister($src1$$reg);
10373 FloatRegister src2 = as_FloatRegister($src2$$reg);
10374 FloatRegister dst = as_FloatRegister($dst$$reg);
10376 __ add_d(dst, src1, src2);
10377 %}
10378 ins_pipe( fpu_regF_regF );
10379 %}
10381 instruct subD_reg_reg(regD dst, regD src1, regD src2) %{
10382 match(Set dst (SubD src1 src2));
10383 format %{ "SubD $dst, $src1, $src2 @subD_reg_reg" %}
10384 ins_encode %{
10385 FloatRegister src1 = as_FloatRegister($src1$$reg);
10386 FloatRegister src2 = as_FloatRegister($src2$$reg);
10387 FloatRegister dst = as_FloatRegister($dst$$reg);
10389 __ sub_d(dst, src1, src2);
10390 %}
10391 ins_pipe( fpu_regF_regF );
10392 %}
10394 instruct negF_reg(regF dst, regF src) %{
10395 match(Set dst (NegF src));
10396 format %{ "negF $dst, $src @negF_reg" %}
10397 ins_encode %{
10398 FloatRegister src = as_FloatRegister($src$$reg);
10399 FloatRegister dst = as_FloatRegister($dst$$reg);
10401 __ neg_s(dst, src);
10402 %}
10403 ins_pipe( fpu_regF_regF );
10404 %}
10406 instruct negD_reg(regD dst, regD src) %{
10407 match(Set dst (NegD src));
10408 format %{ "negD $dst, $src @negD_reg" %}
10409 ins_encode %{
10410 FloatRegister src = as_FloatRegister($src$$reg);
10411 FloatRegister dst = as_FloatRegister($dst$$reg);
10413 __ neg_d(dst, src);
10414 %}
10415 ins_pipe( fpu_regF_regF );
10416 %}
10419 instruct mulF_reg_reg(regF dst, regF src1, regF src2) %{
10420 match(Set dst (MulF src1 src2));
10421 format %{ "MULF $dst, $src1, $src2 @mulF_reg_reg" %}
10422 ins_encode %{
10423 FloatRegister src1 = $src1$$FloatRegister;
10424 FloatRegister src2 = $src2$$FloatRegister;
10425 FloatRegister dst = $dst$$FloatRegister;
10427 __ mul_s(dst, src1, src2);
10428 %}
10429 ins_pipe( fpu_regF_regF );
10430 %}
10432 instruct maddF_reg_reg(regF dst, regF src1, regF src2, regF src3) %{
10433 match(Set dst (AddF (MulF src1 src2) src3));
10434 // For compatibility reason (e.g. on the Loongson platform), disable this guy.
10435 ins_cost(44444);
10436 format %{ "maddF $dst, $src1, $src2, $src3 @maddF_reg_reg" %}
10437 ins_encode %{
10438 FloatRegister src1 = $src1$$FloatRegister;
10439 FloatRegister src2 = $src2$$FloatRegister;
10440 FloatRegister src3 = $src3$$FloatRegister;
10441 FloatRegister dst = $dst$$FloatRegister;
10443 __ madd_s(dst, src1, src2, src3);
10444 %}
10445 ins_pipe( fpu_regF_regF );
10446 %}
10448 // Mul two double precision floating piont number
10449 instruct mulD_reg_reg(regD dst, regD src1, regD src2) %{
10450 match(Set dst (MulD src1 src2));
10451 format %{ "MULD $dst, $src1, $src2 @mulD_reg_reg" %}
10452 ins_encode %{
10453 FloatRegister src1 = $src1$$FloatRegister;
10454 FloatRegister src2 = $src2$$FloatRegister;
10455 FloatRegister dst = $dst$$FloatRegister;
10457 __ mul_d(dst, src1, src2);
10458 %}
10459 ins_pipe( fpu_regF_regF );
10460 %}
10462 instruct maddD_reg_reg(regD dst, regD src1, regD src2, regD src3) %{
10463 match(Set dst (AddD (MulD src1 src2) src3));
10464 // For compatibility reason (e.g. on the Loongson platform), disable this guy.
10465 ins_cost(44444);
10466 format %{ "maddD $dst, $src1, $src2, $src3 @maddD_reg_reg" %}
10467 ins_encode %{
10468 FloatRegister src1 = $src1$$FloatRegister;
10469 FloatRegister src2 = $src2$$FloatRegister;
10470 FloatRegister src3 = $src3$$FloatRegister;
10471 FloatRegister dst = $dst$$FloatRegister;
10473 __ madd_d(dst, src1, src2, src3);
10474 %}
10475 ins_pipe( fpu_regF_regF );
10476 %}
10478 instruct absF_reg(regF dst, regF src) %{
10479 match(Set dst (AbsF src));
10480 ins_cost(100);
10481 format %{ "absF $dst, $src @absF_reg" %}
10482 ins_encode %{
10483 FloatRegister src = as_FloatRegister($src$$reg);
10484 FloatRegister dst = as_FloatRegister($dst$$reg);
10486 __ abs_s(dst, src);
10487 %}
10488 ins_pipe( fpu_regF_regF );
10489 %}
10492 // intrinsics for math_native.
10493 // AbsD SqrtD CosD SinD TanD LogD Log10D
10495 instruct absD_reg(regD dst, regD src) %{
10496 match(Set dst (AbsD src));
10497 ins_cost(100);
10498 format %{ "absD $dst, $src @absD_reg" %}
10499 ins_encode %{
10500 FloatRegister src = as_FloatRegister($src$$reg);
10501 FloatRegister dst = as_FloatRegister($dst$$reg);
10503 __ abs_d(dst, src);
10504 %}
10505 ins_pipe( fpu_regF_regF );
10506 %}
10508 instruct sqrtD_reg(regD dst, regD src) %{
10509 match(Set dst (SqrtD src));
10510 ins_cost(100);
10511 format %{ "SqrtD $dst, $src @sqrtD_reg" %}
10512 ins_encode %{
10513 FloatRegister src = as_FloatRegister($src$$reg);
10514 FloatRegister dst = as_FloatRegister($dst$$reg);
10516 __ sqrt_d(dst, src);
10517 %}
10518 ins_pipe( fpu_regF_regF );
10519 %}
10521 instruct sqrtF_reg(regF dst, regF src) %{
10522 match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
10523 ins_cost(100);
10524 format %{ "SqrtF $dst, $src @sqrtF_reg" %}
10525 ins_encode %{
10526 FloatRegister src = as_FloatRegister($src$$reg);
10527 FloatRegister dst = as_FloatRegister($dst$$reg);
10529 __ sqrt_s(dst, src);
10530 %}
10531 ins_pipe( fpu_regF_regF );
10532 %}
10533 //----------------------------------Logical Instructions----------------------
10534 //__________________________________Integer Logical Instructions-------------
10536 //And Instuctions
10537 // And Register with Immediate
10538 instruct andI_Reg_immI(mRegI dst, mRegI src1, immI src2) %{
10539 match(Set dst (AndI src1 src2));
10541 format %{ "and $dst, $src1, $src2 #@andI_Reg_immI" %}
10542 ins_encode %{
10543 Register dst = $dst$$Register;
10544 Register src = $src1$$Register;
10545 int val = $src2$$constant;
10547 __ move(AT, val);
10548 __ andr(dst, src, AT);
10549 %}
10550 ins_pipe( ialu_regI_regI );
10551 %}
10553 instruct andI_Reg_imm_0_65535(mRegI dst, mRegI src1, immI_0_65535 src2) %{
10554 match(Set dst (AndI src1 src2));
10555 ins_cost(60);
10557 format %{ "and $dst, $src1, $src2 #@andI_Reg_imm_0_65535" %}
10558 ins_encode %{
10559 Register dst = $dst$$Register;
10560 Register src = $src1$$Register;
10561 int val = $src2$$constant;
10563 __ andi(dst, src, val);
10564 %}
10565 ins_pipe( ialu_regI_regI );
10566 %}
10568 instruct andI_Reg_immI_nonneg_mask(mRegI dst, mRegI src1, immI_nonneg_mask mask) %{
10569 match(Set dst (AndI src1 mask));
10570 ins_cost(60);
10572 format %{ "and $dst, $src1, $mask #@andI_Reg_immI_nonneg_mask" %}
10573 ins_encode %{
10574 Register dst = $dst$$Register;
10575 Register src = $src1$$Register;
10576 int size = Assembler::is_int_mask($mask$$constant);
10578 __ ext(dst, src, 0, size);
10579 %}
10580 ins_pipe( ialu_regI_regI );
10581 %}
10583 instruct andL_Reg_immL_nonneg_mask(mRegL dst, mRegL src1, immL_nonneg_mask mask) %{
10584 match(Set dst (AndL src1 mask));
10585 ins_cost(60);
10587 format %{ "and $dst, $src1, $mask #@andL_Reg_immL_nonneg_mask" %}
10588 ins_encode %{
10589 Register dst = $dst$$Register;
10590 Register src = $src1$$Register;
10591 int size = Assembler::is_jlong_mask($mask$$constant);
10593 __ dext(dst, src, 0, size);
10594 %}
10595 ins_pipe( ialu_regI_regI );
10596 %}
10598 instruct xorI_Reg_imm_0_65535(mRegI dst, mRegI src1, immI_0_65535 src2) %{
10599 match(Set dst (XorI src1 src2));
10600 ins_cost(60);
10602 format %{ "xori $dst, $src1, $src2 #@xorI_Reg_imm_0_65535" %}
10603 ins_encode %{
10604 Register dst = $dst$$Register;
10605 Register src = $src1$$Register;
10606 int val = $src2$$constant;
10608 __ xori(dst, src, val);
10609 %}
10610 ins_pipe( ialu_regI_regI );
10611 %}
10613 instruct xorI_Reg_immI_M1(mRegI dst, mRegI src1, immI_M1 M1) %{
10614 match(Set dst (XorI src1 M1));
10615 predicate(UseLoongsonISA && Use3A2000);
10616 ins_cost(60);
10618 format %{ "xor $dst, $src1, $M1 #@xorI_Reg_immI_M1" %}
10619 ins_encode %{
10620 Register dst = $dst$$Register;
10621 Register src = $src1$$Register;
10623 __ gsorn(dst, R0, src);
10624 %}
10625 ins_pipe( ialu_regI_regI );
10626 %}
10628 instruct xorL2I_Reg_immI_M1(mRegI dst, mRegL src1, immI_M1 M1) %{
10629 match(Set dst (XorI (ConvL2I src1) M1));
10630 predicate(UseLoongsonISA && Use3A2000);
10631 ins_cost(60);
10633 format %{ "xor $dst, $src1, $M1 #@xorL2I_Reg_immI_M1" %}
10634 ins_encode %{
10635 Register dst = $dst$$Register;
10636 Register src = $src1$$Register;
10638 __ gsorn(dst, R0, src);
10639 %}
10640 ins_pipe( ialu_regI_regI );
10641 %}
10643 instruct xorL_Reg_imm_0_65535(mRegL dst, mRegL src1, immL_0_65535 src2) %{
10644 match(Set dst (XorL src1 src2));
10645 ins_cost(60);
10647 format %{ "xori $dst, $src1, $src2 #@xorL_Reg_imm_0_65535" %}
10648 ins_encode %{
10649 Register dst = $dst$$Register;
10650 Register src = $src1$$Register;
10651 int val = $src2$$constant;
10653 __ xori(dst, src, val);
10654 %}
10655 ins_pipe( ialu_regI_regI );
10656 %}
10658 /*
10659 instruct xorL_Reg_immL_M1(mRegL dst, mRegL src1, immL_M1 M1) %{
10660 match(Set dst (XorL src1 M1));
10661 predicate(UseLoongsonISA);
10662 ins_cost(60);
10664 format %{ "xor $dst, $src1, $M1 #@xorL_Reg_immL_M1" %}
10665 ins_encode %{
10666 Register dst = $dst$$Register;
10667 Register src = $src1$$Register;
10669 __ gsorn(dst, R0, src);
10670 %}
10671 ins_pipe( ialu_regI_regI );
10672 %}
10673 */
10675 instruct lbu_and_lmask(mRegI dst, memory mem, immI_255 mask) %{
10676 match(Set dst (AndI mask (LoadB mem)));
10677 ins_cost(60);
10679 format %{ "lhu $dst, $mem #@lbu_and_lmask" %}
10680 ins_encode(load_UB_enc(dst, mem));
10681 ins_pipe( ialu_loadI );
10682 %}
10684 instruct lbu_and_rmask(mRegI dst, memory mem, immI_255 mask) %{
10685 match(Set dst (AndI (LoadB mem) mask));
10686 ins_cost(60);
10688 format %{ "lhu $dst, $mem #@lbu_and_rmask" %}
10689 ins_encode(load_UB_enc(dst, mem));
10690 ins_pipe( ialu_loadI );
10691 %}
10693 instruct andI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10694 match(Set dst (AndI src1 src2));
10696 format %{ "and $dst, $src1, $src2 #@andI_Reg_Reg" %}
10697 ins_encode %{
10698 Register dst = $dst$$Register;
10699 Register src1 = $src1$$Register;
10700 Register src2 = $src2$$Register;
10701 __ andr(dst, src1, src2);
10702 %}
10703 ins_pipe( ialu_regI_regI );
10704 %}
10706 instruct andnI_Reg_nReg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10707 match(Set dst (AndI src1 (XorI src2 M1)));
10708 predicate(UseLoongsonISA && Use3A2000);
10710 format %{ "andn $dst, $src1, $src2 #@andnI_Reg_nReg" %}
10711 ins_encode %{
10712 Register dst = $dst$$Register;
10713 Register src1 = $src1$$Register;
10714 Register src2 = $src2$$Register;
10716 __ gsandn(dst, src1, src2);
10717 %}
10718 ins_pipe( ialu_regI_regI );
10719 %}
10721 instruct ornI_Reg_nReg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10722 match(Set dst (OrI src1 (XorI src2 M1)));
10723 predicate(UseLoongsonISA && Use3A2000);
10725 format %{ "orn $dst, $src1, $src2 #@ornI_Reg_nReg" %}
10726 ins_encode %{
10727 Register dst = $dst$$Register;
10728 Register src1 = $src1$$Register;
10729 Register src2 = $src2$$Register;
10731 __ gsorn(dst, src1, src2);
10732 %}
10733 ins_pipe( ialu_regI_regI );
10734 %}
10736 instruct andnI_nReg_Reg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10737 match(Set dst (AndI (XorI src1 M1) src2));
10738 predicate(UseLoongsonISA && Use3A2000);
10740 format %{ "andn $dst, $src2, $src1 #@andnI_nReg_Reg" %}
10741 ins_encode %{
10742 Register dst = $dst$$Register;
10743 Register src1 = $src1$$Register;
10744 Register src2 = $src2$$Register;
10746 __ gsandn(dst, src2, src1);
10747 %}
10748 ins_pipe( ialu_regI_regI );
10749 %}
10751 instruct ornI_nReg_Reg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10752 match(Set dst (OrI (XorI src1 M1) src2));
10753 predicate(UseLoongsonISA && Use3A2000);
10755 format %{ "orn $dst, $src2, $src1 #@ornI_nReg_Reg" %}
10756 ins_encode %{
10757 Register dst = $dst$$Register;
10758 Register src1 = $src1$$Register;
10759 Register src2 = $src2$$Register;
10761 __ gsorn(dst, src2, src1);
10762 %}
10763 ins_pipe( ialu_regI_regI );
10764 %}
10766 // And Long Register with Register
10767 instruct andL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10768 match(Set dst (AndL src1 src2));
10769 format %{ "AND $dst, $src1, $src2 @ andL_Reg_Reg\n\t" %}
10770 ins_encode %{
10771 Register dst_reg = as_Register($dst$$reg);
10772 Register src1_reg = as_Register($src1$$reg);
10773 Register src2_reg = as_Register($src2$$reg);
10775 __ andr(dst_reg, src1_reg, src2_reg);
10776 %}
10777 ins_pipe( ialu_regL_regL );
10778 %}
10780 instruct andL_Reg_Reg_convI2L(mRegL dst, mRegL src1, mRegI src2) %{
10781 match(Set dst (AndL src1 (ConvI2L src2)));
10782 format %{ "AND $dst, $src1, $src2 @ andL_Reg_Reg_convI2L\n\t" %}
10783 ins_encode %{
10784 Register dst_reg = as_Register($dst$$reg);
10785 Register src1_reg = as_Register($src1$$reg);
10786 Register src2_reg = as_Register($src2$$reg);
10788 __ andr(dst_reg, src1_reg, src2_reg);
10789 %}
10790 ins_pipe( ialu_regL_regL );
10791 %}
10793 instruct andL_Reg_imm_0_65535(mRegL dst, mRegL src1, immL_0_65535 src2) %{
10794 match(Set dst (AndL src1 src2));
10795 ins_cost(60);
10797 format %{ "and $dst, $src1, $src2 #@andL_Reg_imm_0_65535" %}
10798 ins_encode %{
10799 Register dst = $dst$$Register;
10800 Register src = $src1$$Register;
10801 long val = $src2$$constant;
10803 __ andi(dst, src, val);
10804 %}
10805 ins_pipe( ialu_regI_regI );
10806 %}
10808 instruct andL2I_Reg_imm_0_65535(mRegI dst, mRegL src1, immL_0_65535 src2) %{
10809 match(Set dst (ConvL2I (AndL src1 src2)));
10810 ins_cost(60);
10812 format %{ "and $dst, $src1, $src2 #@andL2I_Reg_imm_0_65535" %}
10813 ins_encode %{
10814 Register dst = $dst$$Register;
10815 Register src = $src1$$Register;
10816 long val = $src2$$constant;
10818 __ andi(dst, src, val);
10819 %}
10820 ins_pipe( ialu_regI_regI );
10821 %}
10823 /*
10824 instruct andnL_Reg_nReg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10825 match(Set dst (AndL src1 (XorL src2 M1)));
10826 predicate(UseLoongsonISA);
10828 format %{ "andn $dst, $src1, $src2 #@andnL_Reg_nReg" %}
10829 ins_encode %{
10830 Register dst = $dst$$Register;
10831 Register src1 = $src1$$Register;
10832 Register src2 = $src2$$Register;
10834 __ gsandn(dst, src1, src2);
10835 %}
10836 ins_pipe( ialu_regI_regI );
10837 %}
10838 */
10840 /*
10841 instruct ornL_Reg_nReg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10842 match(Set dst (OrL src1 (XorL src2 M1)));
10843 predicate(UseLoongsonISA);
10845 format %{ "orn $dst, $src1, $src2 #@ornL_Reg_nReg" %}
10846 ins_encode %{
10847 Register dst = $dst$$Register;
10848 Register src1 = $src1$$Register;
10849 Register src2 = $src2$$Register;
10851 __ gsorn(dst, src1, src2);
10852 %}
10853 ins_pipe( ialu_regI_regI );
10854 %}
10855 */
10857 /*
10858 instruct andnL_nReg_Reg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10859 match(Set dst (AndL (XorL src1 M1) src2));
10860 predicate(UseLoongsonISA);
10862 format %{ "andn $dst, $src2, $src1 #@andnL_nReg_Reg" %}
10863 ins_encode %{
10864 Register dst = $dst$$Register;
10865 Register src1 = $src1$$Register;
10866 Register src2 = $src2$$Register;
10868 __ gsandn(dst, src2, src1);
10869 %}
10870 ins_pipe( ialu_regI_regI );
10871 %}
10872 */
10874 /*
10875 instruct ornL_nReg_Reg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10876 match(Set dst (OrL (XorL src1 M1) src2));
10877 predicate(UseLoongsonISA);
10879 format %{ "orn $dst, $src2, $src1 #@ornL_nReg_Reg" %}
10880 ins_encode %{
10881 Register dst = $dst$$Register;
10882 Register src1 = $src1$$Register;
10883 Register src2 = $src2$$Register;
10885 __ gsorn(dst, src2, src1);
10886 %}
10887 ins_pipe( ialu_regI_regI );
10888 %}
10889 */
10891 instruct andL_Reg_immL_M8(mRegL dst, immL_M8 M8) %{
10892 match(Set dst (AndL dst M8));
10893 ins_cost(60);
10895 format %{ "and $dst, $dst, $M8 #@andL_Reg_immL_M8" %}
10896 ins_encode %{
10897 Register dst = $dst$$Register;
10899 __ dins(dst, R0, 0, 3);
10900 %}
10901 ins_pipe( ialu_regI_regI );
10902 %}
10904 instruct andL_Reg_immL_M5(mRegL dst, immL_M5 M5) %{
10905 match(Set dst (AndL dst M5));
10906 ins_cost(60);
10908 format %{ "and $dst, $dst, $M5 #@andL_Reg_immL_M5" %}
10909 ins_encode %{
10910 Register dst = $dst$$Register;
10912 __ dins(dst, R0, 2, 1);
10913 %}
10914 ins_pipe( ialu_regI_regI );
10915 %}
10917 instruct andL_Reg_immL_M7(mRegL dst, immL_M7 M7) %{
10918 match(Set dst (AndL dst M7));
10919 ins_cost(60);
10921 format %{ "and $dst, $dst, $M7 #@andL_Reg_immL_M7" %}
10922 ins_encode %{
10923 Register dst = $dst$$Register;
10925 __ dins(dst, R0, 1, 2);
10926 %}
10927 ins_pipe( ialu_regI_regI );
10928 %}
10930 instruct andL_Reg_immL_M4(mRegL dst, immL_M4 M4) %{
10931 match(Set dst (AndL dst M4));
10932 ins_cost(60);
10934 format %{ "and $dst, $dst, $M4 #@andL_Reg_immL_M4" %}
10935 ins_encode %{
10936 Register dst = $dst$$Register;
10938 __ dins(dst, R0, 0, 2);
10939 %}
10940 ins_pipe( ialu_regI_regI );
10941 %}
10943 instruct andL_Reg_immL_M121(mRegL dst, immL_M121 M121) %{
10944 match(Set dst (AndL dst M121));
10945 ins_cost(60);
10947 format %{ "and $dst, $dst, $M121 #@andL_Reg_immL_M121" %}
10948 ins_encode %{
10949 Register dst = $dst$$Register;
10951 __ dins(dst, R0, 3, 4);
10952 %}
10953 ins_pipe( ialu_regI_regI );
10954 %}
10956 // Or Long Register with Register
10957 instruct orL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10958 match(Set dst (OrL src1 src2));
10959 format %{ "OR $dst, $src1, $src2 @ orL_Reg_Reg\t" %}
10960 ins_encode %{
10961 Register dst_reg = $dst$$Register;
10962 Register src1_reg = $src1$$Register;
10963 Register src2_reg = $src2$$Register;
10965 __ orr(dst_reg, src1_reg, src2_reg);
10966 %}
10967 ins_pipe( ialu_regL_regL );
10968 %}
10970 instruct orL_Reg_P2XReg(mRegL dst, mRegP src1, mRegL src2) %{
10971 match(Set dst (OrL (CastP2X src1) src2));
10972 format %{ "OR $dst, $src1, $src2 @ orL_Reg_P2XReg\t" %}
10973 ins_encode %{
10974 Register dst_reg = $dst$$Register;
10975 Register src1_reg = $src1$$Register;
10976 Register src2_reg = $src2$$Register;
10978 __ orr(dst_reg, src1_reg, src2_reg);
10979 %}
10980 ins_pipe( ialu_regL_regL );
10981 %}
10983 // Xor Long Register with Register
10984 instruct xorL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10985 match(Set dst (XorL src1 src2));
10986 format %{ "XOR $dst, $src1, $src2 @ xorL_Reg_Reg\t" %}
10987 ins_encode %{
10988 Register dst_reg = as_Register($dst$$reg);
10989 Register src1_reg = as_Register($src1$$reg);
10990 Register src2_reg = as_Register($src2$$reg);
10992 __ xorr(dst_reg, src1_reg, src2_reg);
10993 %}
10994 ins_pipe( ialu_regL_regL );
10995 %}
10997 // Shift Left by 8-bit immediate
10998 instruct salI_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
10999 match(Set dst (LShiftI src shift));
11001 format %{ "SHL $dst, $src, $shift #@salI_Reg_imm" %}
11002 ins_encode %{
11003 Register src = $src$$Register;
11004 Register dst = $dst$$Register;
11005 int shamt = $shift$$constant;
11007 __ sll(dst, src, shamt);
11008 %}
11009 ins_pipe( ialu_regI_regI );
11010 %}
11012 instruct salL2I_Reg_imm(mRegI dst, mRegL src, immI8 shift) %{
11013 match(Set dst (LShiftI (ConvL2I src) shift));
11015 format %{ "SHL $dst, $src, $shift #@salL2I_Reg_imm" %}
11016 ins_encode %{
11017 Register src = $src$$Register;
11018 Register dst = $dst$$Register;
11019 int shamt = $shift$$constant;
11021 __ sll(dst, src, shamt);
11022 %}
11023 ins_pipe( ialu_regI_regI );
11024 %}
11026 instruct salI_Reg_imm_and_M65536(mRegI dst, mRegI src, immI_16 shift, immI_M65536 mask) %{
11027 match(Set dst (AndI (LShiftI src shift) mask));
11029 format %{ "SHL $dst, $src, $shift #@salI_Reg_imm_and_M65536" %}
11030 ins_encode %{
11031 Register src = $src$$Register;
11032 Register dst = $dst$$Register;
11034 __ sll(dst, src, 16);
11035 %}
11036 ins_pipe( ialu_regI_regI );
11037 %}
11039 instruct land7_2_s(mRegI dst, mRegL src, immL7 seven, immI_16 sixteen)
11040 %{
11041 match(Set dst (RShiftI (LShiftI (ConvL2I (AndL src seven)) sixteen) sixteen));
11043 format %{ "andi $dst, $src, 7\t# @land7_2_s" %}
11044 ins_encode %{
11045 Register src = $src$$Register;
11046 Register dst = $dst$$Register;
11048 __ andi(dst, src, 7);
11049 %}
11050 ins_pipe(ialu_regI_regI);
11051 %}
11053 instruct ori2s(mRegI dst, mRegI src1, immI_0_32767 src2, immI_16 sixteen)
11054 %{
11055 match(Set dst (RShiftI (LShiftI (OrI src1 src2) sixteen) sixteen));
11057 format %{ "ori $dst, $src1, $src2\t# @ori2s" %}
11058 ins_encode %{
11059 Register src = $src1$$Register;
11060 int val = $src2$$constant;
11061 Register dst = $dst$$Register;
11063 __ ori(dst, src, val);
11064 %}
11065 ins_pipe(ialu_regI_regI);
11066 %}
11068 // Logical Shift Right by 16, followed by Arithmetic Shift Left by 16.
11069 // This idiom is used by the compiler the i2s bytecode.
11070 instruct i2s(mRegI dst, mRegI src, immI_16 sixteen)
11071 %{
11072 match(Set dst (RShiftI (LShiftI src sixteen) sixteen));
11074 format %{ "i2s $dst, $src\t# @i2s" %}
11075 ins_encode %{
11076 Register src = $src$$Register;
11077 Register dst = $dst$$Register;
11079 __ seh(dst, src);
11080 %}
11081 ins_pipe(ialu_regI_regI);
11082 %}
11084 // Logical Shift Right by 24, followed by Arithmetic Shift Left by 24.
11085 // This idiom is used by the compiler for the i2b bytecode.
11086 instruct i2b(mRegI dst, mRegI src, immI_24 twentyfour)
11087 %{
11088 match(Set dst (RShiftI (LShiftI src twentyfour) twentyfour));
11090 format %{ "i2b $dst, $src\t# @i2b" %}
11091 ins_encode %{
11092 Register src = $src$$Register;
11093 Register dst = $dst$$Register;
11095 __ seb(dst, src);
11096 %}
11097 ins_pipe(ialu_regI_regI);
11098 %}
11101 instruct salI_RegL2I_imm(mRegI dst, mRegL src, immI8 shift) %{
11102 match(Set dst (LShiftI (ConvL2I src) shift));
11104 format %{ "SHL $dst, $src, $shift #@salI_RegL2I_imm" %}
11105 ins_encode %{
11106 Register src = $src$$Register;
11107 Register dst = $dst$$Register;
11108 int shamt = $shift$$constant;
11110 __ sll(dst, src, shamt);
11111 %}
11112 ins_pipe( ialu_regI_regI );
11113 %}
11115 // Shift Left by 8-bit immediate
11116 instruct salI_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
11117 match(Set dst (LShiftI src shift));
11119 format %{ "SHL $dst, $src, $shift #@salI_Reg_Reg" %}
11120 ins_encode %{
11121 Register src = $src$$Register;
11122 Register dst = $dst$$Register;
11123 Register shamt = $shift$$Register;
11124 __ sllv(dst, src, shamt);
11125 %}
11126 ins_pipe( ialu_regI_regI );
11127 %}
11130 // Shift Left Long
11131 instruct salL_Reg_imm(mRegL dst, mRegL src, immI8 shift) %{
11132 //predicate(UseNewLongLShift);
11133 match(Set dst (LShiftL src shift));
11134 ins_cost(100);
11135 format %{ "salL $dst, $src, $shift @ salL_Reg_imm" %}
11136 ins_encode %{
11137 Register src_reg = as_Register($src$$reg);
11138 Register dst_reg = as_Register($dst$$reg);
11139 int shamt = $shift$$constant;
11141 if (__ is_simm(shamt, 5))
11142 __ dsll(dst_reg, src_reg, shamt);
11143 else
11144 {
11145 int sa = Assembler::low(shamt, 6);
11146 if (sa < 32) {
11147 __ dsll(dst_reg, src_reg, sa);
11148 } else {
11149 __ dsll32(dst_reg, src_reg, sa - 32);
11150 }
11151 }
11152 %}
11153 ins_pipe( ialu_regL_regL );
11154 %}
11156 instruct salL_RegI2L_imm(mRegL dst, mRegI src, immI8 shift) %{
11157 //predicate(UseNewLongLShift);
11158 match(Set dst (LShiftL (ConvI2L src) shift));
11159 ins_cost(100);
11160 format %{ "salL $dst, $src, $shift @ salL_RegI2L_imm" %}
11161 ins_encode %{
11162 Register src_reg = as_Register($src$$reg);
11163 Register dst_reg = as_Register($dst$$reg);
11164 int shamt = $shift$$constant;
11166 if (__ is_simm(shamt, 5))
11167 __ dsll(dst_reg, src_reg, shamt);
11168 else
11169 {
11170 int sa = Assembler::low(shamt, 6);
11171 if (sa < 32) {
11172 __ dsll(dst_reg, src_reg, sa);
11173 } else {
11174 __ dsll32(dst_reg, src_reg, sa - 32);
11175 }
11176 }
11177 %}
11178 ins_pipe( ialu_regL_regL );
11179 %}
11181 // Shift Left Long
11182 instruct salL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
11183 //predicate(UseNewLongLShift);
11184 match(Set dst (LShiftL src shift));
11185 ins_cost(100);
11186 format %{ "salL $dst, $src, $shift @ salL_Reg_Reg" %}
11187 ins_encode %{
11188 Register src_reg = as_Register($src$$reg);
11189 Register dst_reg = as_Register($dst$$reg);
11191 __ dsllv(dst_reg, src_reg, $shift$$Register);
11192 %}
11193 ins_pipe( ialu_regL_regL );
11194 %}
11196 instruct salL_convI2L_Reg_imm(mRegL dst, mRegI src, immI8 shift) %{
11197 match(Set dst (LShiftL (ConvI2L src) shift));
11198 ins_cost(100);
11199 format %{ "salL $dst, $src, $shift @ salL_convI2L_Reg_imm" %}
11200 ins_encode %{
11201 Register src_reg = as_Register($src$$reg);
11202 Register dst_reg = as_Register($dst$$reg);
11203 int shamt = $shift$$constant;
11205 if (__ is_simm(shamt, 5)) {
11206 __ dsll(dst_reg, src_reg, shamt);
11207 } else {
11208 int sa = Assembler::low(shamt, 6);
11209 if (sa < 32) {
11210 __ dsll(dst_reg, src_reg, sa);
11211 } else {
11212 __ dsll32(dst_reg, src_reg, sa - 32);
11213 }
11214 }
11215 %}
11216 ins_pipe( ialu_regL_regL );
11217 %}
11219 // Shift Right Long
11220 instruct sarL_Reg_imm(mRegL dst, mRegL src, immI8 shift) %{
11221 match(Set dst (RShiftL src shift));
11222 ins_cost(100);
11223 format %{ "sarL $dst, $src, $shift @ sarL_Reg_imm" %}
11224 ins_encode %{
11225 Register src_reg = as_Register($src$$reg);
11226 Register dst_reg = as_Register($dst$$reg);
11227 int shamt = ($shift$$constant & 0x3f);
11228 if (__ is_simm(shamt, 5))
11229 __ dsra(dst_reg, src_reg, shamt);
11230 else {
11231 int sa = Assembler::low(shamt, 6);
11232 if (sa < 32) {
11233 __ dsra(dst_reg, src_reg, sa);
11234 } else {
11235 __ dsra32(dst_reg, src_reg, sa - 32);
11236 }
11237 }
11238 %}
11239 ins_pipe( ialu_regL_regL );
11240 %}
11242 instruct sarL2I_Reg_immI_32_63(mRegI dst, mRegL src, immI_32_63 shift) %{
11243 match(Set dst (ConvL2I (RShiftL src shift)));
11244 ins_cost(100);
11245 format %{ "sarL $dst, $src, $shift @ sarL2I_Reg_immI_32_63" %}
11246 ins_encode %{
11247 Register src_reg = as_Register($src$$reg);
11248 Register dst_reg = as_Register($dst$$reg);
11249 int shamt = $shift$$constant;
11251 __ dsra32(dst_reg, src_reg, shamt - 32);
11252 %}
11253 ins_pipe( ialu_regL_regL );
11254 %}
11256 // Shift Right Long arithmetically
11257 instruct sarL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
11258 //predicate(UseNewLongLShift);
11259 match(Set dst (RShiftL src shift));
11260 ins_cost(100);
11261 format %{ "sarL $dst, $src, $shift @ sarL_Reg_Reg" %}
11262 ins_encode %{
11263 Register src_reg = as_Register($src$$reg);
11264 Register dst_reg = as_Register($dst$$reg);
11266 __ dsrav(dst_reg, src_reg, $shift$$Register);
11267 %}
11268 ins_pipe( ialu_regL_regL );
11269 %}
11271 // Shift Right Long logically
11272 instruct slrL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
11273 match(Set dst (URShiftL src shift));
11274 ins_cost(100);
11275 format %{ "slrL $dst, $src, $shift @ slrL_Reg_Reg" %}
11276 ins_encode %{
11277 Register src_reg = as_Register($src$$reg);
11278 Register dst_reg = as_Register($dst$$reg);
11280 __ dsrlv(dst_reg, src_reg, $shift$$Register);
11281 %}
11282 ins_pipe( ialu_regL_regL );
11283 %}
11285 instruct slrL_Reg_immI_0_31(mRegL dst, mRegL src, immI_0_31 shift) %{
11286 match(Set dst (URShiftL src shift));
11287 ins_cost(80);
11288 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_0_31" %}
11289 ins_encode %{
11290 Register src_reg = as_Register($src$$reg);
11291 Register dst_reg = as_Register($dst$$reg);
11292 int shamt = $shift$$constant;
11294 __ dsrl(dst_reg, src_reg, shamt);
11295 %}
11296 ins_pipe( ialu_regL_regL );
11297 %}
11299 instruct slrL_Reg_immI_0_31_and_max_int(mRegI dst, mRegL src, immI_0_31 shift, immI_MaxI max_int) %{
11300 match(Set dst (AndI (ConvL2I (URShiftL src shift)) max_int));
11301 ins_cost(80);
11302 format %{ "dext $dst, $src, $shift, 31 @ slrL_Reg_immI_0_31_and_max_int" %}
11303 ins_encode %{
11304 Register src_reg = as_Register($src$$reg);
11305 Register dst_reg = as_Register($dst$$reg);
11306 int shamt = $shift$$constant;
11308 __ dext(dst_reg, src_reg, shamt, 31);
11309 %}
11310 ins_pipe( ialu_regL_regL );
11311 %}
11313 instruct slrL_P2XReg_immI_0_31(mRegL dst, mRegP src, immI_0_31 shift) %{
11314 match(Set dst (URShiftL (CastP2X src) shift));
11315 ins_cost(80);
11316 format %{ "slrL $dst, $src, $shift @ slrL_P2XReg_immI_0_31" %}
11317 ins_encode %{
11318 Register src_reg = as_Register($src$$reg);
11319 Register dst_reg = as_Register($dst$$reg);
11320 int shamt = $shift$$constant;
11322 __ dsrl(dst_reg, src_reg, shamt);
11323 %}
11324 ins_pipe( ialu_regL_regL );
11325 %}
11327 instruct slrL_Reg_immI_32_63(mRegL dst, mRegL src, immI_32_63 shift) %{
11328 match(Set dst (URShiftL src shift));
11329 ins_cost(80);
11330 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_32_63" %}
11331 ins_encode %{
11332 Register src_reg = as_Register($src$$reg);
11333 Register dst_reg = as_Register($dst$$reg);
11334 int shamt = $shift$$constant;
11336 __ dsrl32(dst_reg, src_reg, shamt - 32);
11337 %}
11338 ins_pipe( ialu_regL_regL );
11339 %}
11341 instruct slrL_Reg_immI_convL2I(mRegI dst, mRegL src, immI_32_63 shift) %{
11342 match(Set dst (ConvL2I (URShiftL src shift)));
11343 predicate(n->in(1)->in(2)->get_int() > 32);
11344 ins_cost(80);
11345 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_convL2I" %}
11346 ins_encode %{
11347 Register src_reg = as_Register($src$$reg);
11348 Register dst_reg = as_Register($dst$$reg);
11349 int shamt = $shift$$constant;
11351 __ dsrl32(dst_reg, src_reg, shamt - 32);
11352 %}
11353 ins_pipe( ialu_regL_regL );
11354 %}
11356 instruct slrL_P2XReg_immI_32_63(mRegL dst, mRegP src, immI_32_63 shift) %{
11357 match(Set dst (URShiftL (CastP2X src) shift));
11358 ins_cost(80);
11359 format %{ "slrL $dst, $src, $shift @ slrL_P2XReg_immI_32_63" %}
11360 ins_encode %{
11361 Register src_reg = as_Register($src$$reg);
11362 Register dst_reg = as_Register($dst$$reg);
11363 int shamt = $shift$$constant;
11365 __ dsrl32(dst_reg, src_reg, shamt - 32);
11366 %}
11367 ins_pipe( ialu_regL_regL );
11368 %}
11370 // Xor Instructions
11371 // Xor Register with Register
11372 instruct xorI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
11373 match(Set dst (XorI src1 src2));
11375 format %{ "XOR $dst, $src1, $src2 #@xorI_Reg_Reg" %}
11377 ins_encode %{
11378 Register dst = $dst$$Register;
11379 Register src1 = $src1$$Register;
11380 Register src2 = $src2$$Register;
11381 __ xorr(dst, src1, src2);
11382 __ sll(dst, dst, 0); /* long -> int */
11383 %}
11385 ins_pipe( ialu_regI_regI );
11386 %}
11388 // Or Instructions
11389 // Or Register with Register
11390 instruct orI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
11391 match(Set dst (OrI src1 src2));
11393 format %{ "OR $dst, $src1, $src2 #@orI_Reg_Reg" %}
11394 ins_encode %{
11395 Register dst = $dst$$Register;
11396 Register src1 = $src1$$Register;
11397 Register src2 = $src2$$Register;
11398 __ orr(dst, src1, src2);
11399 %}
11401 ins_pipe( ialu_regI_regI );
11402 %}
11404 instruct rotI_shr_logical_Reg(mRegI dst, mRegI src, immI_0_31 rshift, immI_0_31 lshift, immI_1 one) %{
11405 match(Set dst (OrI (URShiftI src rshift) (LShiftI (AndI src one) lshift)));
11406 predicate(32 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int())));
11408 format %{ "rotr $dst, $src, 1 ...\n\t"
11409 "srl $dst, $dst, ($rshift-1) @ rotI_shr_logical_Reg" %}
11410 ins_encode %{
11411 Register dst = $dst$$Register;
11412 Register src = $src$$Register;
11413 int rshift = $rshift$$constant;
11415 __ rotr(dst, src, 1);
11416 if (rshift - 1) {
11417 __ srl(dst, dst, rshift - 1);
11418 }
11419 %}
11421 ins_pipe( ialu_regI_regI );
11422 %}
11424 instruct orI_Reg_castP2X(mRegL dst, mRegL src1, mRegP src2) %{
11425 match(Set dst (OrI src1 (CastP2X src2)));
11427 format %{ "OR $dst, $src1, $src2 #@orI_Reg_castP2X" %}
11428 ins_encode %{
11429 Register dst = $dst$$Register;
11430 Register src1 = $src1$$Register;
11431 Register src2 = $src2$$Register;
11432 __ orr(dst, src1, src2);
11433 %}
11435 ins_pipe( ialu_regI_regI );
11436 %}
11438 // Logical Shift Right by 8-bit immediate
11439 instruct shr_logical_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
11440 match(Set dst (URShiftI src shift));
11441 // effect(KILL cr);
11443 format %{ "SRL $dst, $src, $shift #@shr_logical_Reg_imm" %}
11444 ins_encode %{
11445 Register src = $src$$Register;
11446 Register dst = $dst$$Register;
11447 int shift = $shift$$constant;
11449 __ srl(dst, src, shift);
11450 %}
11451 ins_pipe( ialu_regI_regI );
11452 %}
11454 instruct shr_logical_Reg_imm_nonneg_mask(mRegI dst, mRegI src, immI_0_31 shift, immI_nonneg_mask mask) %{
11455 match(Set dst (AndI (URShiftI src shift) mask));
11457 format %{ "ext $dst, $src, $shift, one-bits($mask) #@shr_logical_Reg_imm_nonneg_mask" %}
11458 ins_encode %{
11459 Register src = $src$$Register;
11460 Register dst = $dst$$Register;
11461 int pos = $shift$$constant;
11462 int size = Assembler::is_int_mask($mask$$constant);
11464 __ ext(dst, src, pos, size);
11465 %}
11466 ins_pipe( ialu_regI_regI );
11467 %}
11469 instruct rolI_Reg_immI_0_31(mRegI dst, immI_0_31 lshift, immI_0_31 rshift)
11470 %{
11471 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
11472 match(Set dst (OrI (LShiftI dst lshift) (URShiftI dst rshift)));
11474 ins_cost(100);
11475 format %{ "rotr $dst, $dst, $rshift #@rolI_Reg_immI_0_31" %}
11476 ins_encode %{
11477 Register dst = $dst$$Register;
11478 int sa = $rshift$$constant;
11480 __ rotr(dst, dst, sa);
11481 %}
11482 ins_pipe( ialu_regI_regI );
11483 %}
11485 instruct rolL_Reg_immI_0_31(mRegL dst, immI_32_63 lshift, immI_0_31 rshift)
11486 %{
11487 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11488 match(Set dst (OrL (LShiftL dst lshift) (URShiftL dst rshift)));
11490 ins_cost(100);
11491 format %{ "rotr $dst, $dst, $rshift #@rolL_Reg_immI_0_31" %}
11492 ins_encode %{
11493 Register dst = $dst$$Register;
11494 int sa = $rshift$$constant;
11496 __ drotr(dst, dst, sa);
11497 %}
11498 ins_pipe( ialu_regI_regI );
11499 %}
11501 instruct rolL_Reg_immI_32_63(mRegL dst, immI_0_31 lshift, immI_32_63 rshift)
11502 %{
11503 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11504 match(Set dst (OrL (LShiftL dst lshift) (URShiftL dst rshift)));
11506 ins_cost(100);
11507 format %{ "rotr $dst, $dst, $rshift #@rolL_Reg_immI_32_63" %}
11508 ins_encode %{
11509 Register dst = $dst$$Register;
11510 int sa = $rshift$$constant;
11512 __ drotr32(dst, dst, sa - 32);
11513 %}
11514 ins_pipe( ialu_regI_regI );
11515 %}
11517 instruct rorI_Reg_immI_0_31(mRegI dst, immI_0_31 rshift, immI_0_31 lshift)
11518 %{
11519 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
11520 match(Set dst (OrI (URShiftI dst rshift) (LShiftI dst lshift)));
11522 ins_cost(100);
11523 format %{ "rotr $dst, $dst, $rshift #@rorI_Reg_immI_0_31" %}
11524 ins_encode %{
11525 Register dst = $dst$$Register;
11526 int sa = $rshift$$constant;
11528 __ rotr(dst, dst, sa);
11529 %}
11530 ins_pipe( ialu_regI_regI );
11531 %}
11533 instruct rorL_Reg_immI_0_31(mRegL dst, immI_0_31 rshift, immI_32_63 lshift)
11534 %{
11535 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11536 match(Set dst (OrL (URShiftL dst rshift) (LShiftL dst lshift)));
11538 ins_cost(100);
11539 format %{ "rotr $dst, $dst, $rshift #@rorL_Reg_immI_0_31" %}
11540 ins_encode %{
11541 Register dst = $dst$$Register;
11542 int sa = $rshift$$constant;
11544 __ drotr(dst, dst, sa);
11545 %}
11546 ins_pipe( ialu_regI_regI );
11547 %}
11549 instruct rorL_Reg_immI_32_63(mRegL dst, immI_32_63 rshift, immI_0_31 lshift)
11550 %{
11551 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11552 match(Set dst (OrL (URShiftL dst rshift) (LShiftL dst lshift)));
11554 ins_cost(100);
11555 format %{ "rotr $dst, $dst, $rshift #@rorL_Reg_immI_32_63" %}
11556 ins_encode %{
11557 Register dst = $dst$$Register;
11558 int sa = $rshift$$constant;
11560 __ drotr32(dst, dst, sa - 32);
11561 %}
11562 ins_pipe( ialu_regI_regI );
11563 %}
11565 // Logical Shift Right
11566 instruct shr_logical_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
11567 match(Set dst (URShiftI src shift));
11569 format %{ "SRL $dst, $src, $shift #@shr_logical_Reg_Reg" %}
11570 ins_encode %{
11571 Register src = $src$$Register;
11572 Register dst = $dst$$Register;
11573 Register shift = $shift$$Register;
11574 __ srlv(dst, src, shift);
11575 %}
11576 ins_pipe( ialu_regI_regI );
11577 %}
11580 instruct shr_arith_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
11581 match(Set dst (RShiftI src shift));
11582 // effect(KILL cr);
11584 format %{ "SRA $dst, $src, $shift #@shr_arith_Reg_imm" %}
11585 ins_encode %{
11586 Register src = $src$$Register;
11587 Register dst = $dst$$Register;
11588 int shift = $shift$$constant;
11589 __ sra(dst, src, shift);
11590 %}
11591 ins_pipe( ialu_regI_regI );
11592 %}
11594 instruct shr_arith_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
11595 match(Set dst (RShiftI src shift));
11596 // effect(KILL cr);
11598 format %{ "SRA $dst, $src, $shift #@shr_arith_Reg_Reg" %}
11599 ins_encode %{
11600 Register src = $src$$Register;
11601 Register dst = $dst$$Register;
11602 Register shift = $shift$$Register;
11603 __ srav(dst, src, shift);
11604 %}
11605 ins_pipe( ialu_regI_regI );
11606 %}
11608 //----------Convert Int to Boolean---------------------------------------------
11610 instruct convI2B(mRegI dst, mRegI src) %{
11611 match(Set dst (Conv2B src));
11613 ins_cost(100);
11614 format %{ "convI2B $dst, $src @ convI2B" %}
11615 ins_encode %{
11616 Register dst = as_Register($dst$$reg);
11617 Register src = as_Register($src$$reg);
11619 if (dst != src) {
11620 __ daddiu(dst, R0, 1);
11621 __ movz(dst, R0, src);
11622 } else {
11623 __ move(AT, src);
11624 __ daddiu(dst, R0, 1);
11625 __ movz(dst, R0, AT);
11626 }
11627 %}
11629 ins_pipe( ialu_regL_regL );
11630 %}
11632 instruct convI2L_reg( mRegL dst, mRegI src) %{
11633 match(Set dst (ConvI2L src));
11635 ins_cost(100);
11636 format %{ "SLL $dst, $src @ convI2L_reg\t" %}
11637 ins_encode %{
11638 Register dst = as_Register($dst$$reg);
11639 Register src = as_Register($src$$reg);
11641 if(dst != src) __ sll(dst, src, 0);
11642 %}
11643 ins_pipe( ialu_regL_regL );
11644 %}
11647 instruct convL2I_reg( mRegI dst, mRegL src ) %{
11648 match(Set dst (ConvL2I src));
11650 format %{ "MOV $dst, $src @ convL2I_reg" %}
11651 ins_encode %{
11652 Register dst = as_Register($dst$$reg);
11653 Register src = as_Register($src$$reg);
11655 __ sll(dst, src, 0);
11656 %}
11658 ins_pipe( ialu_regI_regI );
11659 %}
11661 instruct convL2I2L_reg( mRegL dst, mRegL src ) %{
11662 match(Set dst (ConvI2L (ConvL2I src)));
11664 format %{ "sll $dst, $src, 0 @ convL2I2L_reg" %}
11665 ins_encode %{
11666 Register dst = as_Register($dst$$reg);
11667 Register src = as_Register($src$$reg);
11669 __ sll(dst, src, 0);
11670 %}
11672 ins_pipe( ialu_regI_regI );
11673 %}
11675 instruct convL2D_reg( regD dst, mRegL src ) %{
11676 match(Set dst (ConvL2D src));
11677 format %{ "convL2D $dst, $src @ convL2D_reg" %}
11678 ins_encode %{
11679 Register src = as_Register($src$$reg);
11680 FloatRegister dst = as_FloatRegister($dst$$reg);
11682 __ dmtc1(src, dst);
11683 __ cvt_d_l(dst, dst);
11684 %}
11686 ins_pipe( pipe_slow );
11687 %}
11690 instruct convD2L_reg_fast( mRegL dst, regD src ) %{
11691 match(Set dst (ConvD2L src));
11692 ins_cost(150);
11693 format %{ "convD2L $dst, $src @ convD2L_reg_fast" %}
11694 ins_encode %{
11695 Register dst = as_Register($dst$$reg);
11696 FloatRegister src = as_FloatRegister($src$$reg);
11698 Label Done;
11700 __ trunc_l_d(F30, src);
11701 // max_long: 0x7fffffffffffffff
11702 // __ set64(AT, 0x7fffffffffffffff);
11703 __ daddiu(AT, R0, -1);
11704 __ dsrl(AT, AT, 1);
11705 __ dmfc1(dst, F30);
11707 __ bne(dst, AT, Done);
11708 __ delayed()->mtc1(R0, F30);
11710 __ cvt_d_w(F30, F30);
11711 __ c_ult_d(src, F30);
11712 __ bc1f(Done);
11713 __ delayed()->daddiu(T9, R0, -1);
11715 __ c_un_d(src, src); //NaN?
11716 __ subu(dst, T9, AT);
11717 __ movt(dst, R0);
11719 __ bind(Done);
11720 %}
11722 ins_pipe( pipe_slow );
11723 %}
11726 instruct convD2L_reg_slow( mRegL dst, regD src ) %{
11727 match(Set dst (ConvD2L src));
11728 ins_cost(250);
11729 format %{ "convD2L $dst, $src @ convD2L_reg_slow" %}
11730 ins_encode %{
11731 Register dst = as_Register($dst$$reg);
11732 FloatRegister src = as_FloatRegister($src$$reg);
11734 Label L;
11736 __ c_un_d(src, src); //NaN?
11737 __ bc1t(L);
11738 __ delayed();
11739 __ move(dst, R0);
11741 __ trunc_l_d(F30, src);
11742 __ cfc1(AT, 31);
11743 __ li(T9, 0x10000);
11744 __ andr(AT, AT, T9);
11745 __ beq(AT, R0, L);
11746 __ delayed()->dmfc1(dst, F30);
11748 __ mov_d(F12, src);
11749 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1);
11750 __ move(dst, V0);
11751 __ bind(L);
11752 %}
11754 ins_pipe( pipe_slow );
11755 %}
11758 instruct convF2I_reg_fast( mRegI dst, regF src ) %{
11759 match(Set dst (ConvF2I src));
11760 ins_cost(150);
11761 format %{ "convf2i $dst, $src @ convF2I_reg_fast" %}
11762 ins_encode %{
11763 Register dreg = $dst$$Register;
11764 FloatRegister fval = $src$$FloatRegister;
11765 Label L;
11767 __ trunc_w_s(F30, fval);
11768 __ move(AT, 0x7fffffff);
11769 __ mfc1(dreg, F30);
11770 __ c_un_s(fval, fval); //NaN?
11771 __ movt(dreg, R0);
11773 __ bne(AT, dreg, L);
11774 __ delayed()->lui(T9, 0x8000);
11776 __ mfc1(AT, fval);
11777 __ andr(AT, AT, T9);
11779 __ movn(dreg, T9, AT);
11781 __ bind(L);
11783 %}
11785 ins_pipe( pipe_slow );
11786 %}
11790 instruct convF2I_reg_slow( mRegI dst, regF src ) %{
11791 match(Set dst (ConvF2I src));
11792 ins_cost(250);
11793 format %{ "convf2i $dst, $src @ convF2I_reg_slow" %}
11794 ins_encode %{
11795 Register dreg = $dst$$Register;
11796 FloatRegister fval = $src$$FloatRegister;
11797 Label L;
11799 __ c_un_s(fval, fval); //NaN?
11800 __ bc1t(L);
11801 __ delayed();
11802 __ move(dreg, R0);
11804 __ trunc_w_s(F30, fval);
11806 /* Call SharedRuntime:f2i() to do valid convention */
11807 __ cfc1(AT, 31);
11808 __ li(T9, 0x10000);
11809 __ andr(AT, AT, T9);
11810 __ beq(AT, R0, L);
11811 __ delayed()->mfc1(dreg, F30);
11813 __ mov_s(F12, fval);
11815 /* 2014/01/08 Fu : This bug was found when running ezDS's control-panel.
11816 * J 982 C2 javax.swing.text.BoxView.layoutMajorAxis(II[I[I)V (283 bytes) @ 0x000000555c46aa74
11817 *
11818 * An interger array index has been assigned to V0, and then changed from 1 to Integer.MAX_VALUE.
11819 * V0 is corrupted during call_VM_leaf(), and should be preserved.
11820 */
11821 __ push(fval);
11822 if(dreg != V0) {
11823 __ push(V0);
11824 }
11825 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
11826 if(dreg != V0) {
11827 __ move(dreg, V0);
11828 __ pop(V0);
11829 }
11830 __ pop(fval);
11831 __ bind(L);
11832 %}
11834 ins_pipe( pipe_slow );
11835 %}
11838 instruct convF2L_reg_fast( mRegL dst, regF src ) %{
11839 match(Set dst (ConvF2L src));
11840 ins_cost(150);
11841 format %{ "convf2l $dst, $src @ convF2L_reg_fast" %}
11842 ins_encode %{
11843 Register dreg = $dst$$Register;
11844 FloatRegister fval = $src$$FloatRegister;
11845 Label L;
11847 __ trunc_l_s(F30, fval);
11848 __ daddiu(AT, R0, -1);
11849 __ dsrl(AT, AT, 1);
11850 __ dmfc1(dreg, F30);
11851 __ c_un_s(fval, fval); //NaN?
11852 __ movt(dreg, R0);
11854 __ bne(AT, dreg, L);
11855 __ delayed()->lui(T9, 0x8000);
11857 __ mfc1(AT, fval);
11858 __ andr(AT, AT, T9);
11860 __ dsll32(T9, T9, 0);
11861 __ movn(dreg, T9, AT);
11863 __ bind(L);
11864 %}
11866 ins_pipe( pipe_slow );
11867 %}
11870 instruct convF2L_reg_slow( mRegL dst, regF src ) %{
11871 match(Set dst (ConvF2L src));
11872 ins_cost(250);
11873 format %{ "convf2l $dst, $src @ convF2L_reg_slow" %}
11874 ins_encode %{
11875 Register dst = as_Register($dst$$reg);
11876 FloatRegister fval = $src$$FloatRegister;
11877 Label L;
11879 __ c_un_s(fval, fval); //NaN?
11880 __ bc1t(L);
11881 __ delayed();
11882 __ move(dst, R0);
11884 __ trunc_l_s(F30, fval);
11885 __ cfc1(AT, 31);
11886 __ li(T9, 0x10000);
11887 __ andr(AT, AT, T9);
11888 __ beq(AT, R0, L);
11889 __ delayed()->dmfc1(dst, F30);
11891 __ mov_s(F12, fval);
11892 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
11893 __ move(dst, V0);
11894 __ bind(L);
11895 %}
11897 ins_pipe( pipe_slow );
11898 %}
11900 instruct convL2F_reg( regF dst, mRegL src ) %{
11901 match(Set dst (ConvL2F src));
11902 format %{ "convl2f $dst, $src @ convL2F_reg" %}
11903 ins_encode %{
11904 FloatRegister dst = $dst$$FloatRegister;
11905 Register src = as_Register($src$$reg);
11906 Label L;
11908 __ dmtc1(src, dst);
11909 __ cvt_s_l(dst, dst);
11910 %}
11912 ins_pipe( pipe_slow );
11913 %}
11915 instruct convI2F_reg( regF dst, mRegI src ) %{
11916 match(Set dst (ConvI2F src));
11917 format %{ "convi2f $dst, $src @ convI2F_reg" %}
11918 ins_encode %{
11919 Register src = $src$$Register;
11920 FloatRegister dst = $dst$$FloatRegister;
11922 __ mtc1(src, dst);
11923 __ cvt_s_w(dst, dst);
11924 %}
11926 ins_pipe( fpu_regF_regF );
11927 %}
11929 instruct cmpLTMask_immI0( mRegI dst, mRegI p, immI0 zero ) %{
11930 match(Set dst (CmpLTMask p zero));
11931 ins_cost(100);
11933 format %{ "sra $dst, $p, 31 @ cmpLTMask_immI0" %}
11934 ins_encode %{
11935 Register src = $p$$Register;
11936 Register dst = $dst$$Register;
11938 __ sra(dst, src, 31);
11939 %}
11940 ins_pipe( pipe_slow );
11941 %}
11944 instruct cmpLTMask( mRegI dst, mRegI p, mRegI q ) %{
11945 match(Set dst (CmpLTMask p q));
11946 ins_cost(400);
11948 format %{ "cmpLTMask $dst, $p, $q @ cmpLTMask" %}
11949 ins_encode %{
11950 Register p = $p$$Register;
11951 Register q = $q$$Register;
11952 Register dst = $dst$$Register;
11954 __ slt(dst, p, q);
11955 __ subu(dst, R0, dst);
11956 %}
11957 ins_pipe( pipe_slow );
11958 %}
11960 instruct convP2B(mRegI dst, mRegP src) %{
11961 match(Set dst (Conv2B src));
11963 ins_cost(100);
11964 format %{ "convP2B $dst, $src @ convP2B" %}
11965 ins_encode %{
11966 Register dst = as_Register($dst$$reg);
11967 Register src = as_Register($src$$reg);
11969 if (dst != src) {
11970 __ daddiu(dst, R0, 1);
11971 __ movz(dst, R0, src);
11972 } else {
11973 __ move(AT, src);
11974 __ daddiu(dst, R0, 1);
11975 __ movz(dst, R0, AT);
11976 }
11977 %}
11979 ins_pipe( ialu_regL_regL );
11980 %}
11983 instruct convI2D_reg_reg(regD dst, mRegI src) %{
11984 match(Set dst (ConvI2D src));
11985 format %{ "conI2D $dst, $src @convI2D_reg" %}
11986 ins_encode %{
11987 Register src = $src$$Register;
11988 FloatRegister dst = $dst$$FloatRegister;
11989 __ mtc1(src, dst);
11990 __ cvt_d_w(dst, dst);
11991 %}
11992 ins_pipe( fpu_regF_regF );
11993 %}
11995 instruct convF2D_reg_reg(regD dst, regF src) %{
11996 match(Set dst (ConvF2D src));
11997 format %{ "convF2D $dst, $src\t# @convF2D_reg_reg" %}
11998 ins_encode %{
11999 FloatRegister dst = $dst$$FloatRegister;
12000 FloatRegister src = $src$$FloatRegister;
12002 __ cvt_d_s(dst, src);
12003 %}
12004 ins_pipe( fpu_regF_regF );
12005 %}
12007 instruct convD2F_reg_reg(regF dst, regD src) %{
12008 match(Set dst (ConvD2F src));
12009 format %{ "convD2F $dst, $src\t# @convD2F_reg_reg" %}
12010 ins_encode %{
12011 FloatRegister dst = $dst$$FloatRegister;
12012 FloatRegister src = $src$$FloatRegister;
12014 __ cvt_s_d(dst, src);
12015 %}
12016 ins_pipe( fpu_regF_regF );
12017 %}
12020 // Convert a double to an int. If the double is a NAN, stuff a zero in instead.
12021 instruct convD2I_reg_reg_fast( mRegI dst, regD src ) %{
12022 match(Set dst (ConvD2I src));
12024 ins_cost(150);
12025 format %{ "convD2I $dst, $src\t# @ convD2I_reg_reg_fast" %}
12027 ins_encode %{
12028 FloatRegister src = $src$$FloatRegister;
12029 Register dst = $dst$$Register;
12031 Label Done;
12033 __ trunc_w_d(F30, src);
12034 // max_int: 2147483647
12035 __ move(AT, 0x7fffffff);
12036 __ mfc1(dst, F30);
12038 __ bne(dst, AT, Done);
12039 __ delayed()->mtc1(R0, F30);
12041 __ cvt_d_w(F30, F30);
12042 __ c_ult_d(src, F30);
12043 __ bc1f(Done);
12044 __ delayed()->addiu(T9, R0, -1);
12046 __ c_un_d(src, src); //NaN?
12047 __ subu32(dst, T9, AT);
12048 __ movt(dst, R0);
12050 __ bind(Done);
12051 %}
12052 ins_pipe( pipe_slow );
12053 %}
12056 instruct convD2I_reg_reg_slow( mRegI dst, regD src ) %{
12057 match(Set dst (ConvD2I src));
12059 ins_cost(250);
12060 format %{ "convD2I $dst, $src\t# @ convD2I_reg_reg_slow" %}
12062 ins_encode %{
12063 FloatRegister src = $src$$FloatRegister;
12064 Register dst = $dst$$Register;
12065 Label L;
12067 __ trunc_w_d(F30, src);
12068 __ cfc1(AT, 31);
12069 __ li(T9, 0x10000);
12070 __ andr(AT, AT, T9);
12071 __ beq(AT, R0, L);
12072 __ delayed()->mfc1(dst, F30);
12074 __ mov_d(F12, src);
12075 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 1);
12076 __ move(dst, V0);
12077 __ bind(L);
12079 %}
12080 ins_pipe( pipe_slow );
12081 %}
12083 // Convert oop pointer into compressed form
12084 instruct encodeHeapOop(mRegN dst, mRegP src) %{
12085 predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
12086 match(Set dst (EncodeP src));
12087 format %{ "encode_heap_oop $dst,$src" %}
12088 ins_encode %{
12089 Register src = $src$$Register;
12090 Register dst = $dst$$Register;
12092 __ encode_heap_oop(dst, src);
12093 %}
12094 ins_pipe( ialu_regL_regL );
12095 %}
12097 instruct encodeHeapOop_not_null(mRegN dst, mRegP src) %{
12098 predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
12099 match(Set dst (EncodeP src));
12100 format %{ "encode_heap_oop_not_null $dst,$src @ encodeHeapOop_not_null" %}
12101 ins_encode %{
12102 __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
12103 %}
12104 ins_pipe( ialu_regL_regL );
12105 %}
12107 instruct decodeHeapOop(mRegP dst, mRegN src) %{
12108 predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
12109 n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
12110 match(Set dst (DecodeN src));
12111 format %{ "decode_heap_oop $dst,$src @ decodeHeapOop" %}
12112 ins_encode %{
12113 Register s = $src$$Register;
12114 Register d = $dst$$Register;
12116 __ decode_heap_oop(d, s);
12117 %}
12118 ins_pipe( ialu_regL_regL );
12119 %}
12121 instruct decodeHeapOop_not_null(mRegP dst, mRegN src) %{
12122 predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
12123 n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
12124 match(Set dst (DecodeN src));
12125 format %{ "decode_heap_oop_not_null $dst,$src @ decodeHeapOop_not_null" %}
12126 ins_encode %{
12127 Register s = $src$$Register;
12128 Register d = $dst$$Register;
12129 if (s != d) {
12130 __ decode_heap_oop_not_null(d, s);
12131 } else {
12132 __ decode_heap_oop_not_null(d);
12133 }
12134 %}
12135 ins_pipe( ialu_regL_regL );
12136 %}
12138 instruct encodeKlass_not_null(mRegN dst, mRegP src) %{
12139 match(Set dst (EncodePKlass src));
12140 format %{ "encode_heap_oop_not_null $dst,$src @ encodeKlass_not_null" %}
12141 ins_encode %{
12142 __ encode_klass_not_null($dst$$Register, $src$$Register);
12143 %}
12144 ins_pipe( ialu_regL_regL );
12145 %}
12147 instruct decodeKlass_not_null(mRegP dst, mRegN src) %{
12148 match(Set dst (DecodeNKlass src));
12149 format %{ "decode_heap_klass_not_null $dst,$src" %}
12150 ins_encode %{
12151 Register s = $src$$Register;
12152 Register d = $dst$$Register;
12153 if (s != d) {
12154 __ decode_klass_not_null(d, s);
12155 } else {
12156 __ decode_klass_not_null(d);
12157 }
12158 %}
12159 ins_pipe( ialu_regL_regL );
12160 %}
12162 //FIXME
12163 instruct tlsLoadP(mRegP dst) %{
12164 match(Set dst (ThreadLocal));
12166 ins_cost(0);
12167 format %{ " get_thread in $dst #@tlsLoadP" %}
12168 ins_encode %{
12169 Register dst = $dst$$Register;
12170 #ifdef OPT_THREAD
12171 __ move(dst, TREG);
12172 #else
12173 __ get_thread(dst);
12174 #endif
12175 %}
12177 ins_pipe( ialu_loadI );
12178 %}
12181 instruct checkCastPP( mRegP dst ) %{
12182 match(Set dst (CheckCastPP dst));
12184 format %{ "#checkcastPP of $dst (empty encoding) #@chekCastPP" %}
12185 ins_encode( /*empty encoding*/ );
12186 ins_pipe( empty );
12187 %}
12189 instruct castPP(mRegP dst)
12190 %{
12191 match(Set dst (CastPP dst));
12193 size(0);
12194 format %{ "# castPP of $dst" %}
12195 ins_encode(/* empty encoding */);
12196 ins_pipe(empty);
12197 %}
12199 instruct castII( mRegI dst ) %{
12200 match(Set dst (CastII dst));
12201 format %{ "#castII of $dst empty encoding" %}
12202 ins_encode( /*empty encoding*/ );
12203 ins_cost(0);
12204 ins_pipe( empty );
12205 %}
12207 // Return Instruction
12208 // Remove the return address & jump to it.
12209 instruct Ret() %{
12210 match(Return);
12211 format %{ "RET #@Ret" %}
12213 ins_encode %{
12214 __ jr(RA);
12215 __ nop();
12216 %}
12218 ins_pipe( pipe_jump );
12219 %}
12221 /*
12222 // For Loongson CPUs, jr seems too slow, so this rule shouldn't be imported.
12223 instruct jumpXtnd(mRegL switch_val) %{
12224 match(Jump switch_val);
12226 ins_cost(350);
12228 format %{ "load T9 <-- [$constanttablebase, $switch_val, $constantoffset] @ jumpXtnd\n\t"
12229 "jr T9\n\t"
12230 "nop" %}
12231 ins_encode %{
12232 Register table_base = $constanttablebase;
12233 int con_offset = $constantoffset;
12234 Register switch_reg = $switch_val$$Register;
12236 if (UseLoongsonISA) {
12237 if (Assembler::is_simm(con_offset, 8)) {
12238 __ gsldx(T9, table_base, switch_reg, con_offset);
12239 } else if (Assembler::is_simm16(con_offset)) {
12240 __ daddu(T9, table_base, switch_reg);
12241 __ ld(T9, T9, con_offset);
12242 } else {
12243 __ move(T9, con_offset);
12244 __ daddu(AT, table_base, switch_reg);
12245 __ gsldx(T9, AT, T9, 0);
12246 }
12247 } else {
12248 if (Assembler::is_simm16(con_offset)) {
12249 __ daddu(T9, table_base, switch_reg);
12250 __ ld(T9, T9, con_offset);
12251 } else {
12252 __ move(T9, con_offset);
12253 __ daddu(AT, table_base, switch_reg);
12254 __ daddu(AT, T9, AT);
12255 __ ld(T9, AT, 0);
12256 }
12257 }
12259 __ jr(T9);
12260 __ nop();
12262 %}
12263 ins_pipe(pipe_jump);
12264 %}
12265 */
12267 // Jump Direct - Label defines a relative address from JMP
12268 instruct jmpDir(label labl) %{
12269 match(Goto);
12270 effect(USE labl);
12272 ins_cost(300);
12273 format %{ "JMP $labl #@jmpDir" %}
12275 ins_encode %{
12276 Label &L = *($labl$$label);
12277 if(&L)
12278 __ b(L);
12279 else
12280 __ b(int(0));
12281 __ nop();
12282 %}
12284 ins_pipe( pipe_jump );
12285 ins_pc_relative(1);
12286 %}
12290 // Tail Jump; remove the return address; jump to target.
12291 // TailCall above leaves the return address around.
12292 // TailJump is used in only one place, the rethrow_Java stub (fancy_jump=2).
12293 // ex_oop (Exception Oop) is needed in %o0 at the jump. As there would be a
12294 // "restore" before this instruction (in Epilogue), we need to materialize it
12295 // in %i0.
12296 //FIXME
12297 instruct tailjmpInd(mRegP jump_target,mRegP ex_oop) %{
12298 match( TailJump jump_target ex_oop );
12299 ins_cost(200);
12300 format %{ "Jmp $jump_target ; ex_oop = $ex_oop #@tailjmpInd" %}
12301 ins_encode %{
12302 Register target = $jump_target$$Register;
12304 /* 2012/9/14 Jin: V0, V1 are indicated in:
12305 * [stubGenerator_mips.cpp] generate_forward_exception()
12306 * [runtime_mips.cpp] OptoRuntime::generate_exception_blob()
12307 */
12308 Register oop = $ex_oop$$Register;
12309 Register exception_oop = V0;
12310 Register exception_pc = V1;
12312 __ move(exception_pc, RA);
12313 __ move(exception_oop, oop);
12315 __ jr(target);
12316 __ nop();
12317 %}
12318 ins_pipe( pipe_jump );
12319 %}
12321 // ============================================================================
12322 // Procedure Call/Return Instructions
12323 // Call Java Static Instruction
12324 // Note: If this code changes, the corresponding ret_addr_offset() and
12325 // compute_padding() functions will have to be adjusted.
12326 instruct CallStaticJavaDirect(method meth) %{
12327 match(CallStaticJava);
12328 effect(USE meth);
12330 ins_cost(300);
12331 format %{ "CALL,static #@CallStaticJavaDirect " %}
12332 ins_encode( Java_Static_Call( meth ) );
12333 ins_pipe( pipe_slow );
12334 ins_pc_relative(1);
12335 %}
12337 // Call Java Dynamic Instruction
12338 // Note: If this code changes, the corresponding ret_addr_offset() and
12339 // compute_padding() functions will have to be adjusted.
12340 instruct CallDynamicJavaDirect(method meth) %{
12341 match(CallDynamicJava);
12342 effect(USE meth);
12344 ins_cost(300);
12345 format %{"MOV IC_Klass, (oop)-1\n\t"
12346 "CallDynamic @ CallDynamicJavaDirect" %}
12347 ins_encode( Java_Dynamic_Call( meth ) );
12348 ins_pipe( pipe_slow );
12349 ins_pc_relative(1);
12350 %}
12352 instruct CallLeafNoFPDirect(method meth) %{
12353 match(CallLeafNoFP);
12354 effect(USE meth);
12356 ins_cost(300);
12357 format %{ "CALL_LEAF_NOFP,runtime " %}
12358 ins_encode(Java_To_Runtime(meth));
12359 ins_pipe( pipe_slow );
12360 ins_pc_relative(1);
12361 ins_alignment(16);
12362 %}
12364 // Prefetch instructions.
12366 instruct prefetchrNTA( memory mem ) %{
12367 match(PrefetchRead mem);
12368 ins_cost(125);
12370 format %{ "pref $mem\t# Prefetch into non-temporal cache for read @ prefetchrNTA" %}
12371 ins_encode %{
12372 int base = $mem$$base;
12373 int index = $mem$$index;
12374 int scale = $mem$$scale;
12375 int disp = $mem$$disp;
12377 if( index != 0 ) {
12378 if (scale == 0) {
12379 __ daddu(AT, as_Register(base), as_Register(index));
12380 } else {
12381 __ dsll(AT, as_Register(index), scale);
12382 __ daddu(AT, as_Register(base), AT);
12383 }
12384 } else {
12385 __ move(AT, as_Register(base));
12386 }
12387 if( Assembler::is_simm16(disp) ) {
12388 __ daddiu(AT, as_Register(base), disp);
12389 __ daddiu(AT, AT, disp);
12390 } else {
12391 __ move(T9, disp);
12392 __ daddu(AT, as_Register(base), T9);
12393 }
12394 __ pref(0, AT, 0); //hint: 0:load
12395 %}
12396 ins_pipe(pipe_slow);
12397 %}
12399 instruct prefetchwNTA( memory mem ) %{
12400 match(PrefetchWrite mem);
12401 ins_cost(125);
12402 format %{ "pref $mem\t# Prefetch to non-temporal cache for write @ prefetchwNTA" %}
12403 ins_encode %{
12404 int base = $mem$$base;
12405 int index = $mem$$index;
12406 int scale = $mem$$scale;
12407 int disp = $mem$$disp;
12409 if( index != 0 ) {
12410 if (scale == 0) {
12411 __ daddu(AT, as_Register(base), as_Register(index));
12412 } else {
12413 __ dsll(AT, as_Register(index), scale);
12414 __ daddu(AT, as_Register(base), AT);
12415 }
12416 } else {
12417 __ move(AT, as_Register(base));
12418 }
12419 if( Assembler::is_simm16(disp) ) {
12420 __ daddiu(AT, as_Register(base), disp);
12421 __ daddiu(AT, AT, disp);
12422 } else {
12423 __ move(T9, disp);
12424 __ daddu(AT, as_Register(base), T9);
12425 }
12426 __ pref(1, AT, 0); //hint: 1:store
12427 %}
12428 ins_pipe(pipe_slow);
12429 %}
12431 // Prefetch instructions for allocation.
12433 instruct prefetchAllocNTA( memory mem ) %{
12434 match(PrefetchAllocation mem);
12435 ins_cost(125);
12436 format %{ "pref $mem\t# Prefetch allocation @ prefetchAllocNTA" %}
12437 ins_encode %{
12438 int base = $mem$$base;
12439 int index = $mem$$index;
12440 int scale = $mem$$scale;
12441 int disp = $mem$$disp;
12443 Register dst = R0;
12445 if( index != 0 ) {
12446 if( Assembler::is_simm16(disp) ) {
12447 if( UseLoongsonISA ) {
12448 if (scale == 0) {
12449 __ gslbx(dst, as_Register(base), as_Register(index), disp);
12450 } else {
12451 __ dsll(AT, as_Register(index), scale);
12452 __ gslbx(dst, as_Register(base), AT, disp);
12453 }
12454 } else {
12455 if (scale == 0) {
12456 __ addu(AT, as_Register(base), as_Register(index));
12457 } else {
12458 __ dsll(AT, as_Register(index), scale);
12459 __ addu(AT, as_Register(base), AT);
12460 }
12461 __ lb(dst, AT, disp);
12462 }
12463 } else {
12464 if (scale == 0) {
12465 __ addu(AT, as_Register(base), as_Register(index));
12466 } else {
12467 __ dsll(AT, as_Register(index), scale);
12468 __ addu(AT, as_Register(base), AT);
12469 }
12470 __ move(T9, disp);
12471 if( UseLoongsonISA ) {
12472 __ gslbx(dst, AT, T9, 0);
12473 } else {
12474 __ addu(AT, AT, T9);
12475 __ lb(dst, AT, 0);
12476 }
12477 }
12478 } else {
12479 if( Assembler::is_simm16(disp) ) {
12480 __ lb(dst, as_Register(base), disp);
12481 } else {
12482 __ move(T9, disp);
12483 if( UseLoongsonISA ) {
12484 __ gslbx(dst, as_Register(base), T9, 0);
12485 } else {
12486 __ addu(AT, as_Register(base), T9);
12487 __ lb(dst, AT, 0);
12488 }
12489 }
12490 }
12491 %}
12492 ins_pipe(pipe_slow);
12493 %}
12496 // Call runtime without safepoint
12497 instruct CallLeafDirect(method meth) %{
12498 match(CallLeaf);
12499 effect(USE meth);
12501 ins_cost(300);
12502 format %{ "CALL_LEAF,runtime #@CallLeafDirect " %}
12503 ins_encode(Java_To_Runtime(meth));
12504 ins_pipe( pipe_slow );
12505 ins_pc_relative(1);
12506 ins_alignment(16);
12507 %}
12509 // Load Char (16bit unsigned)
12510 instruct loadUS(mRegI dst, memory mem) %{
12511 match(Set dst (LoadUS mem));
12513 ins_cost(125);
12514 format %{ "loadUS $dst,$mem @ loadC" %}
12515 ins_encode(load_C_enc(dst, mem));
12516 ins_pipe( ialu_loadI );
12517 %}
12519 instruct loadUS_convI2L(mRegL dst, memory mem) %{
12520 match(Set dst (ConvI2L (LoadUS mem)));
12522 ins_cost(125);
12523 format %{ "loadUS $dst,$mem @ loadUS_convI2L" %}
12524 ins_encode(load_C_enc(dst, mem));
12525 ins_pipe( ialu_loadI );
12526 %}
12528 // Store Char (16bit unsigned)
12529 instruct storeC(memory mem, mRegI src) %{
12530 match(Set mem (StoreC mem src));
12532 ins_cost(125);
12533 format %{ "storeC $src, $mem @ storeC" %}
12534 ins_encode(store_C_reg_enc(mem, src));
12535 ins_pipe( ialu_loadI );
12536 %}
12538 instruct storeC0(memory mem, immI0 zero) %{
12539 match(Set mem (StoreC mem zero));
12541 ins_cost(125);
12542 format %{ "storeC $zero, $mem @ storeC0" %}
12543 ins_encode(store_C0_enc(mem));
12544 ins_pipe( ialu_loadI );
12545 %}
12548 instruct loadConF0(regF dst, immF0 zero) %{
12549 match(Set dst zero);
12550 ins_cost(100);
12552 format %{ "mov $dst, zero @ loadConF0\n"%}
12553 ins_encode %{
12554 FloatRegister dst = $dst$$FloatRegister;
12556 __ mtc1(R0, dst);
12557 %}
12558 ins_pipe( fpu_loadF );
12559 %}
12562 instruct loadConF(regF dst, immF src) %{
12563 match(Set dst src);
12564 ins_cost(125);
12566 format %{ "lwc1 $dst, $constantoffset[$constanttablebase] # load FLOAT $src from table @ loadConF" %}
12567 ins_encode %{
12568 int con_offset = $constantoffset($src);
12570 if (Assembler::is_simm16(con_offset)) {
12571 __ lwc1($dst$$FloatRegister, $constanttablebase, con_offset);
12572 } else {
12573 __ set64(AT, con_offset);
12574 if (UseLoongsonISA) {
12575 __ gslwxc1($dst$$FloatRegister, $constanttablebase, AT, 0);
12576 } else {
12577 __ daddu(AT, $constanttablebase, AT);
12578 __ lwc1($dst$$FloatRegister, AT, 0);
12579 }
12580 }
12581 %}
12582 ins_pipe( fpu_loadF );
12583 %}
12586 instruct loadConD0(regD dst, immD0 zero) %{
12587 match(Set dst zero);
12588 ins_cost(100);
12590 format %{ "mov $dst, zero @ loadConD0"%}
12591 ins_encode %{
12592 FloatRegister dst = as_FloatRegister($dst$$reg);
12594 __ dmtc1(R0, dst);
12595 %}
12596 ins_pipe( fpu_loadF );
12597 %}
12599 instruct loadConD(regD dst, immD src) %{
12600 match(Set dst src);
12601 ins_cost(125);
12603 format %{ "ldc1 $dst, $constantoffset[$constanttablebase] # load DOUBLE $src from table @ loadConD" %}
12604 ins_encode %{
12605 int con_offset = $constantoffset($src);
12607 if (Assembler::is_simm16(con_offset)) {
12608 __ ldc1($dst$$FloatRegister, $constanttablebase, con_offset);
12609 } else {
12610 __ set64(AT, con_offset);
12611 if (UseLoongsonISA) {
12612 __ gsldxc1($dst$$FloatRegister, $constanttablebase, AT, 0);
12613 } else {
12614 __ daddu(AT, $constanttablebase, AT);
12615 __ ldc1($dst$$FloatRegister, AT, 0);
12616 }
12617 }
12618 %}
12619 ins_pipe( fpu_loadF );
12620 %}
12622 // Store register Float value (it is faster than store from FPU register)
12623 instruct storeF_reg( memory mem, regF src) %{
12624 match(Set mem (StoreF mem src));
12626 ins_cost(50);
12627 format %{ "store $mem, $src\t# store float @ storeF_reg" %}
12628 ins_encode(store_F_reg_enc(mem, src));
12629 ins_pipe( fpu_storeF );
12630 %}
12632 instruct storeF_imm0( memory mem, immF0 zero) %{
12633 match(Set mem (StoreF mem zero));
12635 ins_cost(40);
12636 format %{ "store $mem, zero\t# store float @ storeF_imm0" %}
12637 ins_encode %{
12638 int base = $mem$$base;
12639 int index = $mem$$index;
12640 int scale = $mem$$scale;
12641 int disp = $mem$$disp;
12643 if( index != 0 ) {
12644 if ( UseLoongsonISA ) {
12645 if ( Assembler::is_simm(disp, 8) ) {
12646 if ( scale == 0 ) {
12647 __ gsswx(R0, as_Register(base), as_Register(index), disp);
12648 } else {
12649 __ dsll(T9, as_Register(index), scale);
12650 __ gsswx(R0, as_Register(base), T9, disp);
12651 }
12652 } else if ( Assembler::is_simm16(disp) ) {
12653 if ( scale == 0 ) {
12654 __ daddu(AT, as_Register(base), as_Register(index));
12655 } else {
12656 __ dsll(T9, as_Register(index), scale);
12657 __ daddu(AT, as_Register(base), T9);
12658 }
12659 __ sw(R0, AT, disp);
12660 } else {
12661 if ( scale == 0 ) {
12662 __ move(T9, disp);
12663 __ daddu(AT, as_Register(index), T9);
12664 __ gsswx(R0, as_Register(base), AT, 0);
12665 } else {
12666 __ dsll(T9, as_Register(index), scale);
12667 __ move(AT, disp);
12668 __ daddu(AT, AT, T9);
12669 __ gsswx(R0, as_Register(base), AT, 0);
12670 }
12671 }
12672 } else { //not use loongson isa
12673 if(scale != 0) {
12674 __ dsll(T9, as_Register(index), scale);
12675 __ daddu(AT, as_Register(base), T9);
12676 } else {
12677 __ daddu(AT, as_Register(base), as_Register(index));
12678 }
12679 if( Assembler::is_simm16(disp) ) {
12680 __ sw(R0, AT, disp);
12681 } else {
12682 __ move(T9, disp);
12683 __ daddu(AT, AT, T9);
12684 __ sw(R0, AT, 0);
12685 }
12686 }
12687 } else { //index is 0
12688 if ( UseLoongsonISA ) {
12689 if ( Assembler::is_simm16(disp) ) {
12690 __ sw(R0, as_Register(base), disp);
12691 } else {
12692 __ move(T9, disp);
12693 __ gsswx(R0, as_Register(base), T9, 0);
12694 }
12695 } else {
12696 if( Assembler::is_simm16(disp) ) {
12697 __ sw(R0, as_Register(base), disp);
12698 } else {
12699 __ move(T9, disp);
12700 __ daddu(AT, as_Register(base), T9);
12701 __ sw(R0, AT, 0);
12702 }
12703 }
12704 }
12705 %}
12706 ins_pipe( ialu_storeI );
12707 %}
12709 // Load Double
12710 instruct loadD(regD dst, memory mem) %{
12711 match(Set dst (LoadD mem));
12713 ins_cost(150);
12714 format %{ "loadD $dst, $mem #@loadD" %}
12715 ins_encode(load_D_enc(dst, mem));
12716 ins_pipe( ialu_loadI );
12717 %}
12719 // Load Double - UNaligned
12720 instruct loadD_unaligned(regD dst, memory mem ) %{
12721 match(Set dst (LoadD_unaligned mem));
12722 ins_cost(250);
12723 // FIXME: Jin: Need more effective ldl/ldr
12724 format %{ "loadD_unaligned $dst, $mem #@loadD_unaligned" %}
12725 ins_encode(load_D_enc(dst, mem));
12726 ins_pipe( ialu_loadI );
12727 %}
12729 instruct storeD_reg( memory mem, regD src) %{
12730 match(Set mem (StoreD mem src));
12732 ins_cost(50);
12733 format %{ "store $mem, $src\t# store float @ storeD_reg" %}
12734 ins_encode(store_D_reg_enc(mem, src));
12735 ins_pipe( fpu_storeF );
12736 %}
12738 instruct storeD_imm0( memory mem, immD0 zero) %{
12739 match(Set mem (StoreD mem zero));
12741 ins_cost(40);
12742 format %{ "store $mem, zero\t# store float @ storeD_imm0" %}
12743 ins_encode %{
12744 int base = $mem$$base;
12745 int index = $mem$$index;
12746 int scale = $mem$$scale;
12747 int disp = $mem$$disp;
12749 __ mtc1(R0, F30);
12750 __ cvt_d_w(F30, F30);
12752 if( index != 0 ) {
12753 if ( UseLoongsonISA ) {
12754 if ( Assembler::is_simm(disp, 8) ) {
12755 if (scale == 0) {
12756 __ gssdxc1(F30, as_Register(base), as_Register(index), disp);
12757 } else {
12758 __ dsll(T9, as_Register(index), scale);
12759 __ gssdxc1(F30, as_Register(base), T9, disp);
12760 }
12761 } else if ( Assembler::is_simm16(disp) ) {
12762 if (scale == 0) {
12763 __ daddu(AT, as_Register(base), as_Register(index));
12764 __ sdc1(F30, AT, disp);
12765 } else {
12766 __ dsll(T9, as_Register(index), scale);
12767 __ daddu(AT, as_Register(base), T9);
12768 __ sdc1(F30, AT, disp);
12769 }
12770 } else {
12771 if (scale == 0) {
12772 __ move(T9, disp);
12773 __ daddu(AT, as_Register(index), T9);
12774 __ gssdxc1(F30, as_Register(base), AT, 0);
12775 } else {
12776 __ move(T9, disp);
12777 __ dsll(AT, as_Register(index), scale);
12778 __ daddu(AT, AT, T9);
12779 __ gssdxc1(F30, as_Register(base), AT, 0);
12780 }
12781 }
12782 } else { // not use loongson isa
12783 if(scale != 0) {
12784 __ dsll(T9, as_Register(index), scale);
12785 __ daddu(AT, as_Register(base), T9);
12786 } else {
12787 __ daddu(AT, as_Register(base), as_Register(index));
12788 }
12789 if( Assembler::is_simm16(disp) ) {
12790 __ sdc1(F30, AT, disp);
12791 } else {
12792 __ move(T9, disp);
12793 __ daddu(AT, AT, T9);
12794 __ sdc1(F30, AT, 0);
12795 }
12796 }
12797 } else {// index is 0
12798 if ( UseLoongsonISA ) {
12799 if ( Assembler::is_simm16(disp) ) {
12800 __ sdc1(F30, as_Register(base), disp);
12801 } else {
12802 __ move(T9, disp);
12803 __ gssdxc1(F30, as_Register(base), T9, 0);
12804 }
12805 } else {
12806 if( Assembler::is_simm16(disp) ) {
12807 __ sdc1(F30, as_Register(base), disp);
12808 } else {
12809 __ move(T9, disp);
12810 __ daddu(AT, as_Register(base), T9);
12811 __ sdc1(F30, AT, 0);
12812 }
12813 }
12814 }
12815 %}
12816 ins_pipe( ialu_storeI );
12817 %}
12819 instruct loadSSI(mRegI dst, stackSlotI src)
12820 %{
12821 match(Set dst src);
12823 ins_cost(125);
12824 format %{ "lw $dst, $src\t# int stk @ loadSSI" %}
12825 ins_encode %{
12826 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSI) !");
12827 __ lw($dst$$Register, SP, $src$$disp);
12828 %}
12829 ins_pipe(ialu_loadI);
12830 %}
12832 instruct storeSSI(stackSlotI dst, mRegI src)
12833 %{
12834 match(Set dst src);
12836 ins_cost(100);
12837 format %{ "sw $dst, $src\t# int stk @ storeSSI" %}
12838 ins_encode %{
12839 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSI) !");
12840 __ sw($src$$Register, SP, $dst$$disp);
12841 %}
12842 ins_pipe(ialu_storeI);
12843 %}
12845 instruct loadSSL(mRegL dst, stackSlotL src)
12846 %{
12847 match(Set dst src);
12849 ins_cost(125);
12850 format %{ "ld $dst, $src\t# long stk @ loadSSL" %}
12851 ins_encode %{
12852 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSL) !");
12853 __ ld($dst$$Register, SP, $src$$disp);
12854 %}
12855 ins_pipe(ialu_loadI);
12856 %}
12858 instruct storeSSL(stackSlotL dst, mRegL src)
12859 %{
12860 match(Set dst src);
12862 ins_cost(100);
12863 format %{ "sd $dst, $src\t# long stk @ storeSSL" %}
12864 ins_encode %{
12865 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSL) !");
12866 __ sd($src$$Register, SP, $dst$$disp);
12867 %}
12868 ins_pipe(ialu_storeI);
12869 %}
12871 instruct loadSSP(mRegP dst, stackSlotP src)
12872 %{
12873 match(Set dst src);
12875 ins_cost(125);
12876 format %{ "ld $dst, $src\t# ptr stk @ loadSSP" %}
12877 ins_encode %{
12878 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSP) !");
12879 __ ld($dst$$Register, SP, $src$$disp);
12880 %}
12881 ins_pipe(ialu_loadI);
12882 %}
12884 instruct storeSSP(stackSlotP dst, mRegP src)
12885 %{
12886 match(Set dst src);
12888 ins_cost(100);
12889 format %{ "sd $dst, $src\t# ptr stk @ storeSSP" %}
12890 ins_encode %{
12891 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSP) !");
12892 __ sd($src$$Register, SP, $dst$$disp);
12893 %}
12894 ins_pipe(ialu_storeI);
12895 %}
12897 instruct loadSSF(regF dst, stackSlotF src)
12898 %{
12899 match(Set dst src);
12901 ins_cost(125);
12902 format %{ "lwc1 $dst, $src\t# float stk @ loadSSF" %}
12903 ins_encode %{
12904 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSF) !");
12905 __ lwc1($dst$$FloatRegister, SP, $src$$disp);
12906 %}
12907 ins_pipe(ialu_loadI);
12908 %}
12910 instruct storeSSF(stackSlotF dst, regF src)
12911 %{
12912 match(Set dst src);
12914 ins_cost(100);
12915 format %{ "swc1 $dst, $src\t# float stk @ storeSSF" %}
12916 ins_encode %{
12917 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSF) !");
12918 __ swc1($src$$FloatRegister, SP, $dst$$disp);
12919 %}
12920 ins_pipe(fpu_storeF);
12921 %}
12923 // Use the same format since predicate() can not be used here.
12924 instruct loadSSD(regD dst, stackSlotD src)
12925 %{
12926 match(Set dst src);
12928 ins_cost(125);
12929 format %{ "ldc1 $dst, $src\t# double stk @ loadSSD" %}
12930 ins_encode %{
12931 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSD) !");
12932 __ ldc1($dst$$FloatRegister, SP, $src$$disp);
12933 %}
12934 ins_pipe(ialu_loadI);
12935 %}
12937 instruct storeSSD(stackSlotD dst, regD src)
12938 %{
12939 match(Set dst src);
12941 ins_cost(100);
12942 format %{ "sdc1 $dst, $src\t# double stk @ storeSSD" %}
12943 ins_encode %{
12944 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSD) !");
12945 __ sdc1($src$$FloatRegister, SP, $dst$$disp);
12946 %}
12947 ins_pipe(fpu_storeF);
12948 %}
12950 instruct cmpFastLock( FlagsReg cr, mRegP object, s0_RegP box, mRegI tmp, mRegP scr) %{
12951 match( Set cr (FastLock object box) );
12952 effect( TEMP tmp, TEMP scr, USE_KILL box );
12953 ins_cost(300);
12954 format %{ "FASTLOCK $cr $object, $box, $tmp #@ cmpFastLock" %}
12955 ins_encode %{
12956 __ fast_lock($object$$Register, $box$$Register, $tmp$$Register, $scr$$Register);
12957 %}
12959 ins_pipe( pipe_slow );
12960 ins_pc_relative(1);
12961 %}
12963 instruct cmpFastUnlock( FlagsReg cr, mRegP object, s0_RegP box, mRegP tmp ) %{
12964 match( Set cr (FastUnlock object box) );
12965 effect( TEMP tmp, USE_KILL box );
12966 ins_cost(300);
12967 format %{ "FASTUNLOCK $object, $box, $tmp #@cmpFastUnlock" %}
12968 ins_encode %{
12969 __ fast_unlock($object$$Register, $box$$Register, $tmp$$Register);
12970 %}
12972 ins_pipe( pipe_slow );
12973 ins_pc_relative(1);
12974 %}
12976 // Store CMS card-mark Immediate
12977 instruct storeImmCM(memory mem, immI8 src) %{
12978 match(Set mem (StoreCM mem src));
12980 ins_cost(150);
12981 format %{ "MOV8 $mem,$src\t! CMS card-mark imm0" %}
12982 // opcode(0xC6);
12983 ins_encode(store_B_immI_enc_sync(mem, src));
12984 ins_pipe( ialu_storeI );
12985 %}
12987 // Die now
12988 instruct ShouldNotReachHere( )
12989 %{
12990 match(Halt);
12991 ins_cost(300);
12993 // Use the following format syntax
12994 format %{ "ILLTRAP ;#@ShouldNotReachHere" %}
12995 ins_encode %{
12996 // Here we should emit illtrap !
12998 __ stop("in ShoudNotReachHere");
13000 %}
13001 ins_pipe( pipe_jump );
13002 %}
13004 instruct leaP8Narrow(mRegP dst, indOffset8Narrow mem)
13005 %{
13006 predicate(Universe::narrow_oop_shift() == 0);
13007 match(Set dst mem);
13009 ins_cost(110);
13010 format %{ "leaq $dst, $mem\t# ptr off8narrow @ leaP8Narrow" %}
13011 ins_encode %{
13012 Register dst = $dst$$Register;
13013 Register base = as_Register($mem$$base);
13014 int disp = $mem$$disp;
13016 __ daddiu(dst, base, disp);
13017 %}
13018 ins_pipe( ialu_regI_imm16 );
13019 %}
13021 instruct leaPPosIdxScaleOff8(mRegP dst, basePosIndexScaleOffset8 mem)
13022 %{
13023 match(Set dst mem);
13025 ins_cost(110);
13026 format %{ "leaq $dst, $mem\t# @ PosIdxScaleOff8" %}
13027 ins_encode %{
13028 Register dst = $dst$$Register;
13029 Register base = as_Register($mem$$base);
13030 Register index = as_Register($mem$$index);
13031 int scale = $mem$$scale;
13032 int disp = $mem$$disp;
13034 if (scale == 0) {
13035 __ daddu(AT, base, index);
13036 __ daddiu(dst, AT, disp);
13037 } else {
13038 __ dsll(AT, index, scale);
13039 __ daddu(AT, base, AT);
13040 __ daddiu(dst, AT, disp);
13041 }
13042 %}
13044 ins_pipe( ialu_regI_imm16 );
13045 %}
13047 instruct leaPIdxScale(mRegP dst, indIndexScale mem)
13048 %{
13049 match(Set dst mem);
13051 ins_cost(110);
13052 format %{ "leaq $dst, $mem\t# @ leaPIdxScale" %}
13053 ins_encode %{
13054 Register dst = $dst$$Register;
13055 Register base = as_Register($mem$$base);
13056 Register index = as_Register($mem$$index);
13057 int scale = $mem$$scale;
13059 if (scale == 0) {
13060 __ daddu(dst, base, index);
13061 } else {
13062 __ dsll(AT, index, scale);
13063 __ daddu(dst, base, AT);
13064 }
13065 %}
13067 ins_pipe( ialu_regI_imm16 );
13068 %}
13070 // Jump Direct Conditional - Label defines a relative address from Jcc+1
13071 instruct jmpLoopEnd(cmpOp cop, mRegI src1, mRegI src2, label labl) %{
13072 match(CountedLoopEnd cop (CmpI src1 src2));
13073 effect(USE labl);
13075 ins_cost(300);
13076 format %{ "J$cop $src1, $src2, $labl\t# Loop end @ jmpLoopEnd" %}
13077 ins_encode %{
13078 Register op1 = $src1$$Register;
13079 Register op2 = $src2$$Register;
13080 Label &L = *($labl$$label);
13081 int flag = $cop$$cmpcode;
13083 switch(flag)
13084 {
13085 case 0x01: //equal
13086 if (&L)
13087 __ beq(op1, op2, L);
13088 else
13089 __ beq(op1, op2, (int)0);
13090 break;
13091 case 0x02: //not_equal
13092 if (&L)
13093 __ bne(op1, op2, L);
13094 else
13095 __ bne(op1, op2, (int)0);
13096 break;
13097 case 0x03: //above
13098 __ slt(AT, op2, op1);
13099 if(&L)
13100 __ bne(AT, R0, L);
13101 else
13102 __ bne(AT, R0, (int)0);
13103 break;
13104 case 0x04: //above_equal
13105 __ slt(AT, op1, op2);
13106 if(&L)
13107 __ beq(AT, R0, L);
13108 else
13109 __ beq(AT, R0, (int)0);
13110 break;
13111 case 0x05: //below
13112 __ slt(AT, op1, op2);
13113 if(&L)
13114 __ bne(AT, R0, L);
13115 else
13116 __ bne(AT, R0, (int)0);
13117 break;
13118 case 0x06: //below_equal
13119 __ slt(AT, op2, op1);
13120 if(&L)
13121 __ beq(AT, R0, L);
13122 else
13123 __ beq(AT, R0, (int)0);
13124 break;
13125 default:
13126 Unimplemented();
13127 }
13128 __ nop();
13129 %}
13130 ins_pipe( pipe_jump );
13131 ins_pc_relative(1);
13132 %}
13135 instruct jmpLoopEnd_reg_imm16_sub(cmpOp cop, mRegI src1, immI16_sub src2, label labl) %{
13136 match(CountedLoopEnd cop (CmpI src1 src2));
13137 effect(USE labl);
13139 ins_cost(250);
13140 format %{ "J$cop $src1, $src2, $labl\t# Loop end @ jmpLoopEnd_reg_imm16_sub" %}
13141 ins_encode %{
13142 Register op1 = $src1$$Register;
13143 int op2 = $src2$$constant;
13144 Label &L = *($labl$$label);
13145 int flag = $cop$$cmpcode;
13147 __ addiu32(AT, op1, -1 * op2);
13149 switch(flag)
13150 {
13151 case 0x01: //equal
13152 if (&L)
13153 __ beq(AT, R0, L);
13154 else
13155 __ beq(AT, R0, (int)0);
13156 break;
13157 case 0x02: //not_equal
13158 if (&L)
13159 __ bne(AT, R0, L);
13160 else
13161 __ bne(AT, R0, (int)0);
13162 break;
13163 case 0x03: //above
13164 if(&L)
13165 __ bgtz(AT, L);
13166 else
13167 __ bgtz(AT, (int)0);
13168 break;
13169 case 0x04: //above_equal
13170 if(&L)
13171 __ bgez(AT, L);
13172 else
13173 __ bgez(AT,(int)0);
13174 break;
13175 case 0x05: //below
13176 if(&L)
13177 __ bltz(AT, L);
13178 else
13179 __ bltz(AT, (int)0);
13180 break;
13181 case 0x06: //below_equal
13182 if(&L)
13183 __ blez(AT, L);
13184 else
13185 __ blez(AT, (int)0);
13186 break;
13187 default:
13188 Unimplemented();
13189 }
13190 __ nop();
13191 %}
13192 ins_pipe( pipe_jump );
13193 ins_pc_relative(1);
13194 %}
13197 /*
13198 // Jump Direct Conditional - Label defines a relative address from Jcc+1
13199 instruct jmpLoopEndU(cmpOpU cop, eFlagsRegU cmp, label labl) %{
13200 match(CountedLoopEnd cop cmp);
13201 effect(USE labl);
13203 ins_cost(300);
13204 format %{ "J$cop,u $labl\t# Loop end" %}
13205 size(6);
13206 opcode(0x0F, 0x80);
13207 ins_encode( Jcc( cop, labl) );
13208 ins_pipe( pipe_jump );
13209 ins_pc_relative(1);
13210 %}
13212 instruct jmpLoopEndUCF(cmpOpUCF cop, eFlagsRegUCF cmp, label labl) %{
13213 match(CountedLoopEnd cop cmp);
13214 effect(USE labl);
13216 ins_cost(200);
13217 format %{ "J$cop,u $labl\t# Loop end" %}
13218 opcode(0x0F, 0x80);
13219 ins_encode( Jcc( cop, labl) );
13220 ins_pipe( pipe_jump );
13221 ins_pc_relative(1);
13222 %}
13223 */
13225 // This match pattern is created for StoreIConditional since I cannot match IfNode without a RegFlags! fujie 2012/07/17
13226 instruct jmpCon_flags(cmpOp cop, FlagsReg cr, label labl) %{
13227 match(If cop cr);
13228 effect(USE labl);
13230 ins_cost(300);
13231 format %{ "J$cop $labl #mips uses AT as eflag @jmpCon_flags" %}
13233 ins_encode %{
13234 Label &L = *($labl$$label);
13235 switch($cop$$cmpcode)
13236 {
13237 case 0x01: //equal
13238 if (&L)
13239 __ bne(AT, R0, L);
13240 else
13241 __ bne(AT, R0, (int)0);
13242 break;
13243 case 0x02: //not equal
13244 if (&L)
13245 __ beq(AT, R0, L);
13246 else
13247 __ beq(AT, R0, (int)0);
13248 break;
13249 default:
13250 Unimplemented();
13251 }
13252 __ nop();
13253 %}
13255 ins_pipe( pipe_jump );
13256 ins_pc_relative(1);
13257 %}
13260 // ============================================================================
13261 // The 2nd slow-half of a subtype check. Scan the subklass's 2ndary superklass
13262 // array for an instance of the superklass. Set a hidden internal cache on a
13263 // hit (cache is checked with exposed code in gen_subtype_check()). Return
13264 // NZ for a miss or zero for a hit. The encoding ALSO sets flags.
13265 instruct partialSubtypeCheck( mRegP result, no_T8_mRegP sub, no_T8_mRegP super, mT8RegI tmp ) %{
13266 match(Set result (PartialSubtypeCheck sub super));
13267 effect(KILL tmp);
13268 ins_cost(1100); // slightly larger than the next version
13269 format %{ "partialSubtypeCheck result=$result, sub=$sub, super=$super, tmp=$tmp " %}
13271 ins_encode( enc_PartialSubtypeCheck(result, sub, super, tmp) );
13272 ins_pipe( pipe_slow );
13273 %}
13276 // Conditional-store of an int value.
13277 // ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG on Intel.
13278 instruct storeIConditional( memory mem, mRegI oldval, mRegI newval, FlagsReg cr ) %{
13279 match(Set cr (StoreIConditional mem (Binary oldval newval)));
13280 // effect(KILL oldval);
13281 format %{ "CMPXCHG $newval, $mem, $oldval \t# @storeIConditional" %}
13283 ins_encode %{
13284 Register oldval = $oldval$$Register;
13285 Register newval = $newval$$Register;
13286 Address addr(as_Register($mem$$base), $mem$$disp);
13287 Label again, failure;
13289 // int base = $mem$$base;
13290 int index = $mem$$index;
13291 int scale = $mem$$scale;
13292 int disp = $mem$$disp;
13294 guarantee(Assembler::is_simm16(disp), "");
13296 if( index != 0 ) {
13297 __ stop("in storeIConditional: index != 0");
13298 } else {
13299 __ bind(again);
13300 if(!Use3A2000) __ sync();
13301 __ ll(AT, addr);
13302 __ bne(AT, oldval, failure);
13303 __ delayed()->addu(AT, R0, R0);
13305 __ addu(AT, newval, R0);
13306 __ sc(AT, addr);
13307 __ beq(AT, R0, again);
13308 __ delayed()->addiu(AT, R0, 0xFF);
13309 __ bind(failure);
13310 __ sync();
13311 }
13312 %}
13314 ins_pipe( long_memory_op );
13315 %}
13317 // Conditional-store of a long value.
13318 // ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG.
13319 instruct storeLConditional(memory mem, t2RegL oldval, mRegL newval, FlagsReg cr )
13320 %{
13321 match(Set cr (StoreLConditional mem (Binary oldval newval)));
13322 effect(KILL oldval);
13324 format %{ "cmpxchg $mem, $newval\t# If $oldval == $mem then store $newval into $mem" %}
13325 ins_encode%{
13326 Register oldval = $oldval$$Register;
13327 Register newval = $newval$$Register;
13328 Address addr((Register)$mem$$base, $mem$$disp);
13330 int index = $mem$$index;
13331 int scale = $mem$$scale;
13332 int disp = $mem$$disp;
13334 guarantee(Assembler::is_simm16(disp), "");
13336 if( index != 0 ) {
13337 __ stop("in storeIConditional: index != 0");
13338 } else {
13339 __ cmpxchg(newval, addr, oldval);
13340 }
13341 %}
13342 ins_pipe( long_memory_op );
13343 %}
13346 instruct compareAndSwapI( mRegI res, mRegP mem_ptr, mS2RegI oldval, mRegI newval) %{
13347 match(Set res (CompareAndSwapI mem_ptr (Binary oldval newval)));
13348 effect(KILL oldval);
13349 // match(CompareAndSwapI mem_ptr (Binary oldval newval));
13350 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapI\n\t"
13351 "MOV $res, 1 @ compareAndSwapI\n\t"
13352 "BNE AT, R0 @ compareAndSwapI\n\t"
13353 "MOV $res, 0 @ compareAndSwapI\n"
13354 "L:" %}
13355 ins_encode %{
13356 Register newval = $newval$$Register;
13357 Register oldval = $oldval$$Register;
13358 Register res = $res$$Register;
13359 Address addr($mem_ptr$$Register, 0);
13360 Label L;
13362 __ cmpxchg32(newval, addr, oldval);
13363 __ move(res, AT);
13364 %}
13365 ins_pipe( long_memory_op );
13366 %}
13368 //FIXME:
13369 instruct compareAndSwapP( mRegI res, mRegP mem_ptr, s2_RegP oldval, mRegP newval) %{
13370 match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval)));
13371 effect(KILL oldval);
13372 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapP\n\t"
13373 "MOV $res, AT @ compareAndSwapP\n\t"
13374 "L:" %}
13375 ins_encode %{
13376 Register newval = $newval$$Register;
13377 Register oldval = $oldval$$Register;
13378 Register res = $res$$Register;
13379 Address addr($mem_ptr$$Register, 0);
13380 Label L;
13382 __ cmpxchg(newval, addr, oldval);
13383 __ move(res, AT);
13384 %}
13385 ins_pipe( long_memory_op );
13386 %}
13388 instruct compareAndSwapN( mRegI res, mRegP mem_ptr, t2_RegN oldval, mRegN newval) %{
13389 match(Set res (CompareAndSwapN mem_ptr (Binary oldval newval)));
13390 effect(KILL oldval);
13391 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapN\n\t"
13392 "MOV $res, AT @ compareAndSwapN\n\t"
13393 "L:" %}
13394 ins_encode %{
13395 Register newval = $newval$$Register;
13396 Register oldval = $oldval$$Register;
13397 Register res = $res$$Register;
13398 Address addr($mem_ptr$$Register, 0);
13399 Label L;
13401 /* 2013/7/19 Jin: cmpxchg32 is implemented with ll/sc, which will do sign extension.
13402 * Thus, we should extend oldval's sign for correct comparision.
13403 */
13404 __ sll(oldval, oldval, 0);
13406 __ cmpxchg32(newval, addr, oldval);
13407 __ move(res, AT);
13408 %}
13409 ins_pipe( long_memory_op );
13410 %}
13412 //----------Max and Min--------------------------------------------------------
13413 // Min Instructions
13414 ////
13415 // *** Min and Max using the conditional move are slower than the
13416 // *** branch version on a Pentium III.
13417 // // Conditional move for min
13418 //instruct cmovI_reg_lt( eRegI op2, eRegI op1, eFlagsReg cr ) %{
13419 // effect( USE_DEF op2, USE op1, USE cr );
13420 // format %{ "CMOVlt $op2,$op1\t! min" %}
13421 // opcode(0x4C,0x0F);
13422 // ins_encode( OpcS, OpcP, RegReg( op2, op1 ) );
13423 // ins_pipe( pipe_cmov_reg );
13424 //%}
13425 //
13426 //// Min Register with Register (P6 version)
13427 //instruct minI_eReg_p6( eRegI op1, eRegI op2 ) %{
13428 // predicate(VM_Version::supports_cmov() );
13429 // match(Set op2 (MinI op1 op2));
13430 // ins_cost(200);
13431 // expand %{
13432 // eFlagsReg cr;
13433 // compI_eReg(cr,op1,op2);
13434 // cmovI_reg_lt(op2,op1,cr);
13435 // %}
13436 //%}
13438 // Min Register with Register (generic version)
13439 instruct minI_Reg_Reg(mRegI dst, mRegI src) %{
13440 match(Set dst (MinI dst src));
13441 //effect(KILL flags);
13442 ins_cost(80);
13444 format %{ "MIN $dst, $src @minI_Reg_Reg" %}
13445 ins_encode %{
13446 Register dst = $dst$$Register;
13447 Register src = $src$$Register;
13449 __ slt(AT, src, dst);
13450 __ movn(dst, src, AT);
13452 %}
13454 ins_pipe( pipe_slow );
13455 %}
13457 // Max Register with Register
13458 // *** Min and Max using the conditional move are slower than the
13459 // *** branch version on a Pentium III.
13460 // // Conditional move for max
13461 //instruct cmovI_reg_gt( eRegI op2, eRegI op1, eFlagsReg cr ) %{
13462 // effect( USE_DEF op2, USE op1, USE cr );
13463 // format %{ "CMOVgt $op2,$op1\t! max" %}
13464 // opcode(0x4F,0x0F);
13465 // ins_encode( OpcS, OpcP, RegReg( op2, op1 ) );
13466 // ins_pipe( pipe_cmov_reg );
13467 //%}
13468 //
13469 // // Max Register with Register (P6 version)
13470 //instruct maxI_eReg_p6( eRegI op1, eRegI op2 ) %{
13471 // predicate(VM_Version::supports_cmov() );
13472 // match(Set op2 (MaxI op1 op2));
13473 // ins_cost(200);
13474 // expand %{
13475 // eFlagsReg cr;
13476 // compI_eReg(cr,op1,op2);
13477 // cmovI_reg_gt(op2,op1,cr);
13478 // %}
13479 //%}
13481 // Max Register with Register (generic version)
13482 instruct maxI_Reg_Reg(mRegI dst, mRegI src) %{
13483 match(Set dst (MaxI dst src));
13484 ins_cost(80);
13486 format %{ "MAX $dst, $src @maxI_Reg_Reg" %}
13488 ins_encode %{
13489 Register dst = $dst$$Register;
13490 Register src = $src$$Register;
13492 __ slt(AT, dst, src);
13493 __ movn(dst, src, AT);
13495 %}
13497 ins_pipe( pipe_slow );
13498 %}
13500 instruct maxI_Reg_zero(mRegI dst, immI0 zero) %{
13501 match(Set dst (MaxI dst zero));
13502 ins_cost(50);
13504 format %{ "MAX $dst, 0 @maxI_Reg_zero" %}
13506 ins_encode %{
13507 Register dst = $dst$$Register;
13509 __ slt(AT, dst, R0);
13510 __ movn(dst, R0, AT);
13512 %}
13514 ins_pipe( pipe_slow );
13515 %}
13517 instruct zerox_long_reg_reg(mRegL dst, mRegL src, immL_32bits mask)
13518 %{
13519 match(Set dst (AndL src mask));
13521 format %{ "movl $dst, $src\t# zero-extend long @ zerox_long_reg_reg" %}
13522 ins_encode %{
13523 Register dst = $dst$$Register;
13524 Register src = $src$$Register;
13526 __ dext(dst, src, 0, 32);
13527 %}
13528 ins_pipe(ialu_regI_regI);
13529 %}
13531 instruct combine_i2l(mRegL dst, mRegI src1, immL_32bits mask, mRegI src2, immI_32 shift32)
13532 %{
13533 match(Set dst (OrL (AndL (ConvI2L src1) mask) (LShiftL (ConvI2L src2) shift32)));
13535 format %{ "combine_i2l $dst, $src2(H), $src1(L) @ combine_i2l" %}
13536 ins_encode %{
13537 Register dst = $dst$$Register;
13538 Register src1 = $src1$$Register;
13539 Register src2 = $src2$$Register;
13541 if (src1 == dst) {
13542 __ dinsu(dst, src2, 32, 32);
13543 } else if (src2 == dst) {
13544 __ dsll32(dst, dst, 0);
13545 __ dins(dst, src1, 0, 32);
13546 } else {
13547 __ dext(dst, src1, 0, 32);
13548 __ dinsu(dst, src2, 32, 32);
13549 }
13550 %}
13551 ins_pipe(ialu_regI_regI);
13552 %}
13554 // Zero-extend convert int to long
13555 instruct convI2L_reg_reg_zex(mRegL dst, mRegI src, immL_32bits mask)
13556 %{
13557 match(Set dst (AndL (ConvI2L src) mask));
13559 format %{ "movl $dst, $src\t# i2l zero-extend @ convI2L_reg_reg_zex" %}
13560 ins_encode %{
13561 Register dst = $dst$$Register;
13562 Register src = $src$$Register;
13564 __ dext(dst, src, 0, 32);
13565 %}
13566 ins_pipe(ialu_regI_regI);
13567 %}
13569 instruct convL2I2L_reg_reg_zex(mRegL dst, mRegL src, immL_32bits mask)
13570 %{
13571 match(Set dst (AndL (ConvI2L (ConvL2I src)) mask));
13573 format %{ "movl $dst, $src\t# i2l zero-extend @ convL2I2L_reg_reg_zex" %}
13574 ins_encode %{
13575 Register dst = $dst$$Register;
13576 Register src = $src$$Register;
13578 __ dext(dst, src, 0, 32);
13579 %}
13580 ins_pipe(ialu_regI_regI);
13581 %}
13583 // Match loading integer and casting it to unsigned int in long register.
13584 // LoadI + ConvI2L + AndL 0xffffffff.
13585 instruct loadUI2L_rmask(mRegL dst, memory mem, immL_32bits mask) %{
13586 match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
13588 format %{ "lwu $dst, $mem \t// zero-extend to long @ loadUI2L_rmask" %}
13589 ins_encode (load_N_enc(dst, mem));
13590 ins_pipe(ialu_loadI);
13591 %}
13593 instruct loadUI2L_lmask(mRegL dst, memory mem, immL_32bits mask) %{
13594 match(Set dst (AndL mask (ConvI2L (LoadI mem))));
13596 format %{ "lwu $dst, $mem \t// zero-extend to long @ loadUI2L_lmask" %}
13597 ins_encode (load_N_enc(dst, mem));
13598 ins_pipe(ialu_loadI);
13599 %}
13602 // ============================================================================
13603 // Safepoint Instruction
13604 instruct safePoint_poll_reg(mRegP poll) %{
13605 match(SafePoint poll);
13606 predicate(false);
13607 effect(USE poll);
13609 ins_cost(125);
13610 format %{ "Safepoint @ [$poll] : poll for GC @ safePoint_poll_reg" %}
13612 ins_encode %{
13613 Register poll_reg = $poll$$Register;
13615 __ block_comment("Safepoint:");
13616 __ relocate(relocInfo::poll_type);
13617 __ lw(AT, poll_reg, 0);
13618 %}
13620 ins_pipe( ialu_storeI );
13621 %}
13623 instruct safePoint_poll() %{
13624 match(SafePoint);
13626 ins_cost(105);
13627 format %{ "poll for GC @ safePoint_poll" %}
13629 ins_encode %{
13630 __ block_comment("Safepoint:");
13631 __ set64(T9, (long)os::get_polling_page());
13632 __ relocate(relocInfo::poll_type);
13633 __ lw(AT, T9, 0);
13634 %}
13636 ins_pipe( ialu_storeI );
13637 %}
13639 //----------Arithmetic Conversion Instructions---------------------------------
13641 instruct roundFloat_nop(regF dst)
13642 %{
13643 match(Set dst (RoundFloat dst));
13645 ins_cost(0);
13646 ins_encode();
13647 ins_pipe(empty);
13648 %}
13650 instruct roundDouble_nop(regD dst)
13651 %{
13652 match(Set dst (RoundDouble dst));
13654 ins_cost(0);
13655 ins_encode();
13656 ins_pipe(empty);
13657 %}
13659 //---------- Zeros Count Instructions ------------------------------------------
13660 // CountLeadingZerosINode CountTrailingZerosINode
13661 instruct countLeadingZerosI(mRegI dst, mRegI src) %{
13662 predicate(UseCountLeadingZerosInstruction);
13663 match(Set dst (CountLeadingZerosI src));
13665 format %{ "clz $dst, $src\t# count leading zeros (int)" %}
13666 ins_encode %{
13667 __ clz($dst$$Register, $src$$Register);
13668 %}
13669 ins_pipe( ialu_regL_regL );
13670 %}
13672 instruct countLeadingZerosL(mRegI dst, mRegL src) %{
13673 predicate(UseCountLeadingZerosInstruction);
13674 match(Set dst (CountLeadingZerosL src));
13676 format %{ "dclz $dst, $src\t# count leading zeros (long)" %}
13677 ins_encode %{
13678 __ dclz($dst$$Register, $src$$Register);
13679 %}
13680 ins_pipe( ialu_regL_regL );
13681 %}
13683 instruct countTrailingZerosI(mRegI dst, mRegI src) %{
13684 predicate(UseCountTrailingZerosInstruction);
13685 match(Set dst (CountTrailingZerosI src));
13687 format %{ "ctz $dst, $src\t# count trailing zeros (int)" %}
13688 ins_encode %{
13689 // ctz and dctz is gs instructions.
13690 __ ctz($dst$$Register, $src$$Register);
13691 %}
13692 ins_pipe( ialu_regL_regL );
13693 %}
13695 instruct countTrailingZerosL(mRegI dst, mRegL src) %{
13696 predicate(UseCountTrailingZerosInstruction);
13697 match(Set dst (CountTrailingZerosL src));
13699 format %{ "dcto $dst, $src\t# count trailing zeros (long)" %}
13700 ins_encode %{
13701 __ dctz($dst$$Register, $src$$Register);
13702 %}
13703 ins_pipe( ialu_regL_regL );
13704 %}
13706 // ====================VECTOR INSTRUCTIONS=====================================
13708 // Load vectors (8 bytes long)
13709 instruct loadV8(vecD dst, memory mem) %{
13710 predicate(n->as_LoadVector()->memory_size() == 8);
13711 match(Set dst (LoadVector mem));
13712 ins_cost(125);
13713 format %{ "load $dst, $mem\t! load vector (8 bytes)" %}
13714 ins_encode(load_D_enc(dst, mem));
13715 ins_pipe( fpu_loadF );
13716 %}
13718 // Store vectors (8 bytes long)
13719 instruct storeV8(memory mem, vecD src) %{
13720 predicate(n->as_StoreVector()->memory_size() == 8);
13721 match(Set mem (StoreVector mem src));
13722 ins_cost(145);
13723 format %{ "store $mem, $src\t! store vector (8 bytes)" %}
13724 ins_encode(store_D_reg_enc(mem, src));
13725 ins_pipe( fpu_storeF );
13726 %}
13728 instruct Repl8B_DSP(vecD dst, mRegI src) %{
13729 predicate(n->as_Vector()->length() == 8 && Use3A2000);
13730 match(Set dst (ReplicateB src));
13731 ins_cost(100);
13732 format %{ "replv_ob AT, $src\n\t"
13733 "dmtc1 AT, $dst\t! replicate8B" %}
13734 ins_encode %{
13735 __ replv_ob(AT, $src$$Register);
13736 __ dmtc1(AT, $dst$$FloatRegister);
13737 %}
13738 ins_pipe( pipe_mtc1 );
13739 %}
13741 instruct Repl8B(vecD dst, mRegI src) %{
13742 predicate(n->as_Vector()->length() == 8);
13743 match(Set dst (ReplicateB src));
13744 ins_cost(140);
13745 format %{ "move AT, $src\n\t"
13746 "dins AT, AT, 8, 8\n\t"
13747 "dins AT, AT, 16, 16\n\t"
13748 "dinsu AT, AT, 32, 32\n\t"
13749 "dmtc1 AT, $dst\t! replicate8B" %}
13750 ins_encode %{
13751 __ move(AT, $src$$Register);
13752 __ dins(AT, AT, 8, 8);
13753 __ dins(AT, AT, 16, 16);
13754 __ dinsu(AT, AT, 32, 32);
13755 __ dmtc1(AT, $dst$$FloatRegister);
13756 %}
13757 ins_pipe( pipe_mtc1 );
13758 %}
13760 instruct Repl8B_imm_DSP(vecD dst, immI con) %{
13761 predicate(n->as_Vector()->length() == 8 && Use3A2000);
13762 match(Set dst (ReplicateB con));
13763 ins_cost(110);
13764 format %{ "repl_ob AT, [$con]\n\t"
13765 "dmtc1 AT, $dst,0x00\t! replicate8B($con)" %}
13766 ins_encode %{
13767 int val = $con$$constant;
13768 __ repl_ob(AT, val);
13769 __ dmtc1(AT, $dst$$FloatRegister);
13770 %}
13771 ins_pipe( pipe_mtc1 );
13772 %}
13774 instruct Repl8B_imm(vecD dst, immI con) %{
13775 predicate(n->as_Vector()->length() == 8);
13776 match(Set dst (ReplicateB con));
13777 ins_cost(150);
13778 format %{ "move AT, [$con]\n\t"
13779 "dins AT, AT, 8, 8\n\t"
13780 "dins AT, AT, 16, 16\n\t"
13781 "dinsu AT, AT, 32, 32\n\t"
13782 "dmtc1 AT, $dst,0x00\t! replicate8B($con)" %}
13783 ins_encode %{
13784 __ move(AT, $con$$constant);
13785 __ dins(AT, AT, 8, 8);
13786 __ dins(AT, AT, 16, 16);
13787 __ dinsu(AT, AT, 32, 32);
13788 __ dmtc1(AT, $dst$$FloatRegister);
13789 %}
13790 ins_pipe( pipe_mtc1 );
13791 %}
13793 instruct Repl8B_zero(vecD dst, immI0 zero) %{
13794 predicate(n->as_Vector()->length() == 8);
13795 match(Set dst (ReplicateB zero));
13796 ins_cost(90);
13797 format %{ "dmtc1 R0, $dst\t! replicate8B zero" %}
13798 ins_encode %{
13799 __ dmtc1(R0, $dst$$FloatRegister);
13800 %}
13801 ins_pipe( pipe_mtc1 );
13802 %}
13804 instruct Repl8B_M1(vecD dst, immI_M1 M1) %{
13805 predicate(n->as_Vector()->length() == 8);
13806 match(Set dst (ReplicateB M1));
13807 ins_cost(80);
13808 format %{ "dmtc1 -1, $dst\t! replicate8B -1" %}
13809 ins_encode %{
13810 __ nor(AT, R0, R0);
13811 __ dmtc1(AT, $dst$$FloatRegister);
13812 %}
13813 ins_pipe( pipe_mtc1 );
13814 %}
13816 instruct Repl4S_DSP(vecD dst, mRegI src) %{
13817 predicate(n->as_Vector()->length() == 4 && Use3A2000);
13818 match(Set dst (ReplicateS src));
13819 ins_cost(100);
13820 format %{ "replv_qh AT, $src\n\t"
13821 "dmtc1 AT, $dst\t! replicate4S" %}
13822 ins_encode %{
13823 __ replv_qh(AT, $src$$Register);
13824 __ dmtc1(AT, $dst$$FloatRegister);
13825 %}
13826 ins_pipe( pipe_mtc1 );
13827 %}
13829 instruct Repl4S(vecD dst, mRegI src) %{
13830 predicate(n->as_Vector()->length() == 4);
13831 match(Set dst (ReplicateS src));
13832 ins_cost(120);
13833 format %{ "move AT, $src \n\t"
13834 "dins AT, AT, 16, 16\n\t"
13835 "dinsu AT, AT, 32, 32\n\t"
13836 "dmtc1 AT, $dst\t! replicate4S" %}
13837 ins_encode %{
13838 __ move(AT, $src$$Register);
13839 __ dins(AT, AT, 16, 16);
13840 __ dinsu(AT, AT, 32, 32);
13841 __ dmtc1(AT, $dst$$FloatRegister);
13842 %}
13843 ins_pipe( pipe_mtc1 );
13844 %}
13846 instruct Repl4S_imm_DSP(vecD dst, immI con) %{
13847 predicate(n->as_Vector()->length() == 4 && Use3A2000);
13848 match(Set dst (ReplicateS con));
13849 ins_cost(100);
13850 format %{ "replv_qh AT, [$con]\n\t"
13851 "dmtc1 AT, $dst\t! replicate4S($con)" %}
13852 ins_encode %{
13853 int val = $con$$constant;
13854 if ( Assembler::is_simm(val, 10)) {
13855 //repl_qh supports 10 bits immediate
13856 __ repl_qh(AT, val);
13857 } else {
13858 __ li32(AT, val);
13859 __ replv_qh(AT, AT);
13860 }
13861 __ dmtc1(AT, $dst$$FloatRegister);
13862 %}
13863 ins_pipe( pipe_mtc1 );
13864 %}
13866 instruct Repl4S_imm(vecD dst, immI con) %{
13867 predicate(n->as_Vector()->length() == 4);
13868 match(Set dst (ReplicateS con));
13869 ins_cost(110);
13870 format %{ "move AT, [$con]\n\t"
13871 "dins AT, AT, 16, 16\n\t"
13872 "dinsu AT, AT, 32, 32\n\t"
13873 "dmtc1 AT, $dst\t! replicate4S($con)" %}
13874 ins_encode %{
13875 __ move(AT, $con$$constant);
13876 __ dins(AT, AT, 16, 16);
13877 __ dinsu(AT, AT, 32, 32);
13878 __ dmtc1(AT, $dst$$FloatRegister);
13879 %}
13880 ins_pipe( pipe_mtc1 );
13881 %}
13883 instruct Repl4S_zero(vecD dst, immI0 zero) %{
13884 predicate(n->as_Vector()->length() == 4);
13885 match(Set dst (ReplicateS zero));
13886 format %{ "dmtc1 R0, $dst\t! replicate4S zero" %}
13887 ins_encode %{
13888 __ dmtc1(R0, $dst$$FloatRegister);
13889 %}
13890 ins_pipe( pipe_mtc1 );
13891 %}
13893 instruct Repl4S_M1(vecD dst, immI_M1 M1) %{
13894 predicate(n->as_Vector()->length() == 4);
13895 match(Set dst (ReplicateS M1));
13896 format %{ "dmtc1 -1, $dst\t! replicate4S -1" %}
13897 ins_encode %{
13898 __ nor(AT, R0, R0);
13899 __ dmtc1(AT, $dst$$FloatRegister);
13900 %}
13901 ins_pipe( pipe_mtc1 );
13902 %}
13904 // Replicate integer (4 byte) scalar to be vector
13905 instruct Repl2I(vecD dst, mRegI src) %{
13906 predicate(n->as_Vector()->length() == 2);
13907 match(Set dst (ReplicateI src));
13908 format %{ "dins AT, $src, 0, 32\n\t"
13909 "dinsu AT, $src, 32, 32\n\t"
13910 "dmtc1 AT, $dst\t! replicate2I" %}
13911 ins_encode %{
13912 __ dins(AT, $src$$Register, 0, 32);
13913 __ dinsu(AT, $src$$Register, 32, 32);
13914 __ dmtc1(AT, $dst$$FloatRegister);
13915 %}
13916 ins_pipe( pipe_mtc1 );
13917 %}
13919 // Replicate integer (4 byte) scalar immediate to be vector by loading from const table.
13920 instruct Repl2I_imm(vecD dst, immI con, mA7RegI tmp) %{
13921 predicate(n->as_Vector()->length() == 2);
13922 match(Set dst (ReplicateI con));
13923 effect(KILL tmp);
13924 format %{ "li32 AT, [$con], 32\n\t"
13925 "dinsu AT, AT\n\t"
13926 "dmtc1 AT, $dst\t! replicate2I($con)" %}
13927 ins_encode %{
13928 int val = $con$$constant;
13929 __ li32(AT, val);
13930 __ dinsu(AT, AT, 32, 32);
13931 __ dmtc1(AT, $dst$$FloatRegister);
13932 %}
13933 ins_pipe( pipe_mtc1 );
13934 %}
13936 // Replicate integer (4 byte) scalar zero to be vector
13937 instruct Repl2I_zero(vecD dst, immI0 zero) %{
13938 predicate(n->as_Vector()->length() == 2);
13939 match(Set dst (ReplicateI zero));
13940 format %{ "dmtc1 R0, $dst\t! replicate2I zero" %}
13941 ins_encode %{
13942 __ dmtc1(R0, $dst$$FloatRegister);
13943 %}
13944 ins_pipe( pipe_mtc1 );
13945 %}
13947 // Replicate integer (4 byte) scalar -1 to be vector
13948 instruct Repl2I_M1(vecD dst, immI_M1 M1) %{
13949 predicate(n->as_Vector()->length() == 2);
13950 match(Set dst (ReplicateI M1));
13951 format %{ "dmtc1 -1, $dst\t! replicate2I -1, use AT" %}
13952 ins_encode %{
13953 __ nor(AT, R0, R0);
13954 __ dmtc1(AT, $dst$$FloatRegister);
13955 %}
13956 ins_pipe( pipe_mtc1 );
13957 %}
13959 // Replicate float (4 byte) scalar to be vector
13960 instruct Repl2F(vecD dst, regF src) %{
13961 predicate(n->as_Vector()->length() == 2);
13962 match(Set dst (ReplicateF src));
13963 format %{ "cvt.ps $dst, $src, $src\t! replicate2F" %}
13964 ins_encode %{
13965 __ cvt_ps_s($dst$$FloatRegister, $src$$FloatRegister, $src$$FloatRegister);
13966 %}
13967 ins_pipe( pipe_slow );
13968 %}
13970 // Replicate float (4 byte) scalar zero to be vector
13971 instruct Repl2F_zero(vecD dst, immF0 zero) %{
13972 predicate(n->as_Vector()->length() == 2);
13973 match(Set dst (ReplicateF zero));
13974 format %{ "dmtc1 R0, $dst\t! replicate2F zero" %}
13975 ins_encode %{
13976 __ dmtc1(R0, $dst$$FloatRegister);
13977 %}
13978 ins_pipe( pipe_mtc1 );
13979 %}
13982 // ====================VECTOR ARITHMETIC=======================================
13984 // --------------------------------- ADD --------------------------------------
13986 // Floats vector add
13987 // kernel does not have emulation of PS instructions yet, so PS instructions is disabled.
13988 instruct vadd2F(vecD dst, vecD src) %{
13989 predicate(n->as_Vector()->length() == 2);
13990 match(Set dst (AddVF dst src));
13991 format %{ "add.ps $dst,$src\t! add packed2F" %}
13992 ins_encode %{
13993 __ add_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
13994 %}
13995 ins_pipe( pipe_slow );
13996 %}
13998 instruct vadd2F3(vecD dst, vecD src1, vecD src2) %{
13999 predicate(n->as_Vector()->length() == 2);
14000 match(Set dst (AddVF src1 src2));
14001 format %{ "add.ps $dst,$src1,$src2\t! add packed2F" %}
14002 ins_encode %{
14003 __ add_ps($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
14004 %}
14005 ins_pipe( fpu_regF_regF );
14006 %}
14008 // --------------------------------- SUB --------------------------------------
14010 // Floats vector sub
14011 instruct vsub2F(vecD dst, vecD src) %{
14012 predicate(n->as_Vector()->length() == 2);
14013 match(Set dst (SubVF dst src));
14014 format %{ "sub.ps $dst,$src\t! sub packed2F" %}
14015 ins_encode %{
14016 __ sub_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
14017 %}
14018 ins_pipe( fpu_regF_regF );
14019 %}
14021 // --------------------------------- MUL --------------------------------------
14023 // Floats vector mul
14024 instruct vmul2F(vecD dst, vecD src) %{
14025 predicate(n->as_Vector()->length() == 2);
14026 match(Set dst (MulVF dst src));
14027 format %{ "mul.ps $dst, $src\t! mul packed2F" %}
14028 ins_encode %{
14029 __ mul_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
14030 %}
14031 ins_pipe( fpu_regF_regF );
14032 %}
14034 instruct vmul2F3(vecD dst, vecD src1, vecD src2) %{
14035 predicate(n->as_Vector()->length() == 2);
14036 match(Set dst (MulVF src1 src2));
14037 format %{ "mul.ps $dst, $src1, $src2\t! mul packed2F" %}
14038 ins_encode %{
14039 __ mul_ps($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
14040 %}
14041 ins_pipe( fpu_regF_regF );
14042 %}
14044 // --------------------------------- DIV --------------------------------------
14045 // MIPS do not have div.ps
14047 // --------------------------------- MADD --------------------------------------
14048 // Floats vector madd
14049 //instruct vmadd2F(vecD dst, vecD src1, vecD src2, vecD src3) %{
14050 // predicate(n->as_Vector()->length() == 2);
14051 // match(Set dst (AddVF (MulVF src1 src2) src3));
14052 // ins_cost(50);
14053 // format %{ "madd.ps $dst, $src3, $src1, $src2\t! madd packed2F" %}
14054 // ins_encode %{
14055 // __ madd_ps($dst$$FloatRegister, $src3$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
14056 // %}
14057 // ins_pipe( fpu_regF_regF );
14058 //%}
14061 //----------PEEPHOLE RULES-----------------------------------------------------
14062 // These must follow all instruction definitions as they use the names
14063 // defined in the instructions definitions.
14064 //
14065 // peepmatch ( root_instr_name [preceeding_instruction]* );
14066 //
14067 // peepconstraint %{
14068 // (instruction_number.operand_name relational_op instruction_number.operand_name
14069 // [, ...] );
14070 // // instruction numbers are zero-based using left to right order in peepmatch
14071 //
14072 // peepreplace ( instr_name ( [instruction_number.operand_name]* ) );
14073 // // provide an instruction_number.operand_name for each operand that appears
14074 // // in the replacement instruction's match rule
14075 //
14076 // ---------VM FLAGS---------------------------------------------------------
14077 //
14078 // All peephole optimizations can be turned off using -XX:-OptoPeephole
14079 //
14080 // Each peephole rule is given an identifying number starting with zero and
14081 // increasing by one in the order seen by the parser. An individual peephole
14082 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
14083 // on the command-line.
14084 //
14085 // ---------CURRENT LIMITATIONS----------------------------------------------
14086 //
14087 // Only match adjacent instructions in same basic block
14088 // Only equality constraints
14089 // Only constraints between operands, not (0.dest_reg == EAX_enc)
14090 // Only one replacement instruction
14091 //
14092 // ---------EXAMPLE----------------------------------------------------------
14093 //
14094 // // pertinent parts of existing instructions in architecture description
14095 // instruct movI(eRegI dst, eRegI src) %{
14096 // match(Set dst (CopyI src));
14097 // %}
14098 //
14099 // instruct incI_eReg(eRegI dst, immI1 src, eFlagsReg cr) %{
14100 // match(Set dst (AddI dst src));
14101 // effect(KILL cr);
14102 // %}
14103 //
14104 // // Change (inc mov) to lea
14105 // peephole %{
14106 // // increment preceeded by register-register move
14107 // peepmatch ( incI_eReg movI );
14108 // // require that the destination register of the increment
14109 // // match the destination register of the move
14110 // peepconstraint ( 0.dst == 1.dst );
14111 // // construct a replacement instruction that sets
14112 // // the destination to ( move's source register + one )
14113 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
14114 // %}
14115 //
14116 // Implementation no longer uses movX instructions since
14117 // machine-independent system no longer uses CopyX nodes.
14118 //
14119 // peephole %{
14120 // peepmatch ( incI_eReg movI );
14121 // peepconstraint ( 0.dst == 1.dst );
14122 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
14123 // %}
14124 //
14125 // peephole %{
14126 // peepmatch ( decI_eReg movI );
14127 // peepconstraint ( 0.dst == 1.dst );
14128 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
14129 // %}
14130 //
14131 // peephole %{
14132 // peepmatch ( addI_eReg_imm movI );
14133 // peepconstraint ( 0.dst == 1.dst );
14134 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
14135 // %}
14136 //
14137 // peephole %{
14138 // peepmatch ( addP_eReg_imm movP );
14139 // peepconstraint ( 0.dst == 1.dst );
14140 // peepreplace ( leaP_eReg_immI( 0.dst 1.src 0.src ) );
14141 // %}
14143 // // Change load of spilled value to only a spill
14144 // instruct storeI(memory mem, eRegI src) %{
14145 // match(Set mem (StoreI mem src));
14146 // %}
14147 //
14148 // instruct loadI(eRegI dst, memory mem) %{
14149 // match(Set dst (LoadI mem));
14150 // %}
14151 //
14152 //peephole %{
14153 // peepmatch ( loadI storeI );
14154 // peepconstraint ( 1.src == 0.dst, 1.mem == 0.mem );
14155 // peepreplace ( storeI( 1.mem 1.mem 1.src ) );
14156 //%}
14158 //----------SMARTSPILL RULES---------------------------------------------------
14159 // These must follow all instruction definitions as they use the names
14160 // defined in the instructions definitions.