Mon, 13 Mar 2017 21:54:36 +0800
load_klass(AT, receiver) --> load_klass(T9, receiver)
1 //
2 // Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
3 // Copyright (c) 2015, 2016, Loongson Technology. All rights reserved.
4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 //
6 // This code is free software; you can redistribute it and/or modify it
7 // under the terms of the GNU General Public License version 2 only, as
8 // published by the Free Software Foundation.
9 //
10 // This code is distributed in the hope that it will be useful, but WITHOUT
11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 // version 2 for more details (a copy is included in the LICENSE file that
14 // accompanied this code).
15 //
16 // You should have received a copy of the GNU General Public License version
17 // 2 along with this work; if not, write to the Free Software Foundation,
18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 //
20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 // or visit www.oracle.com if you need additional information or have any
22 // questions.
23 //
24 //
26 // GodSon3 Architecture Description File
28 //----------REGISTER DEFINITION BLOCK------------------------------------------
29 // This information is used by the matcher and the register allocator to
30 // describe individual registers and classes of registers within the target
31 // archtecture.
33 // format:
34 // reg_def name (call convention, c-call convention, ideal type, encoding);
35 // call convention :
36 // NS = No-Save
37 // SOC = Save-On-Call
38 // SOE = Save-On-Entry
39 // AS = Always-Save
40 // ideal type :
41 // see opto/opcodes.hpp for more info
42 // reg_class name (reg, ...);
43 // alloc_class name (reg, ...);
44 register %{
46 // General Registers
47 // Integer Registers
48 reg_def R0 ( NS, NS, Op_RegI, 0, VMRegImpl::Bad());
49 reg_def AT ( NS, NS, Op_RegI, 1, AT->as_VMReg());
50 reg_def AT_H ( NS, NS, Op_RegI, 1, AT->as_VMReg()->next());
51 reg_def V0 (SOC, SOC, Op_RegI, 2, V0->as_VMReg());
52 reg_def V0_H (SOC, SOC, Op_RegI, 2, V0->as_VMReg()->next());
53 reg_def V1 (SOC, SOC, Op_RegI, 3, V1->as_VMReg());
54 reg_def V1_H (SOC, SOC, Op_RegI, 3, V1->as_VMReg()->next());
55 reg_def A0 (SOC, SOC, Op_RegI, 4, A0->as_VMReg());
56 reg_def A0_H (SOC, SOC, Op_RegI, 4, A0->as_VMReg()->next());
57 reg_def A1 (SOC, SOC, Op_RegI, 5, A1->as_VMReg());
58 reg_def A1_H (SOC, SOC, Op_RegI, 5, A1->as_VMReg()->next());
59 reg_def A2 (SOC, SOC, Op_RegI, 6, A2->as_VMReg());
60 reg_def A2_H (SOC, SOC, Op_RegI, 6, A2->as_VMReg()->next());
61 reg_def A3 (SOC, SOC, Op_RegI, 7, A3->as_VMReg());
62 reg_def A3_H (SOC, SOC, Op_RegI, 7, A3->as_VMReg()->next());
63 reg_def A4 (SOC, SOC, Op_RegI, 8, A4->as_VMReg());
64 reg_def A4_H (SOC, SOC, Op_RegI, 8, A4->as_VMReg()->next());
65 reg_def A5 (SOC, SOC, Op_RegI, 9, A5->as_VMReg());
66 reg_def A5_H (SOC, SOC, Op_RegI, 9, A5->as_VMReg()->next());
67 reg_def A6 (SOC, SOC, Op_RegI, 10, A6->as_VMReg());
68 reg_def A6_H (SOC, SOC, Op_RegI, 10, A6->as_VMReg()->next());
69 reg_def A7 (SOC, SOC, Op_RegI, 11, A7->as_VMReg());
70 reg_def A7_H (SOC, SOC, Op_RegI, 11, A7->as_VMReg()->next());
71 reg_def T0 (SOC, SOC, Op_RegI, 12, T0->as_VMReg());
72 reg_def T0_H (SOC, SOC, Op_RegI, 12, T0->as_VMReg()->next());
73 reg_def T1 (SOC, SOC, Op_RegI, 13, T1->as_VMReg());
74 reg_def T1_H (SOC, SOC, Op_RegI, 13, T1->as_VMReg()->next());
75 reg_def T2 (SOC, SOC, Op_RegI, 14, T2->as_VMReg());
76 reg_def T2_H (SOC, SOC, Op_RegI, 14, T2->as_VMReg()->next());
77 reg_def T3 (SOC, SOC, Op_RegI, 15, T3->as_VMReg());
78 reg_def T3_H (SOC, SOC, Op_RegI, 15, T3->as_VMReg()->next());
79 reg_def S0 (SOC, SOE, Op_RegI, 16, S0->as_VMReg());
80 reg_def S0_H (SOC, SOE, Op_RegI, 16, S0->as_VMReg()->next());
81 reg_def S1 (SOC, SOE, Op_RegI, 17, S1->as_VMReg());
82 reg_def S1_H (SOC, SOE, Op_RegI, 17, S1->as_VMReg()->next());
83 reg_def S2 (SOC, SOE, Op_RegI, 18, S2->as_VMReg());
84 reg_def S2_H (SOC, SOE, Op_RegI, 18, S2->as_VMReg()->next());
85 reg_def S3 (SOC, SOE, Op_RegI, 19, S3->as_VMReg());
86 reg_def S3_H (SOC, SOE, Op_RegI, 19, S3->as_VMReg()->next());
87 reg_def S4 (SOC, SOE, Op_RegI, 20, S4->as_VMReg());
88 reg_def S4_H (SOC, SOE, Op_RegI, 20, S4->as_VMReg()->next());
89 reg_def S5 (SOC, SOE, Op_RegI, 21, S5->as_VMReg());
90 reg_def S5_H (SOC, SOE, Op_RegI, 21, S5->as_VMReg()->next());
91 reg_def S6 (SOC, SOE, Op_RegI, 22, S6->as_VMReg());
92 reg_def S6_H (SOC, SOE, Op_RegI, 22, S6->as_VMReg()->next());
93 reg_def S7 (SOC, SOE, Op_RegI, 23, S7->as_VMReg());
94 reg_def S7_H (SOC, SOE, Op_RegI, 23, S7->as_VMReg()->next());
95 reg_def T8 (SOC, SOC, Op_RegI, 24, T8->as_VMReg());
96 reg_def T8_H (SOC, SOC, Op_RegI, 24, T8->as_VMReg()->next());
97 reg_def T9 (SOC, SOC, Op_RegI, 25, T9->as_VMReg());
98 reg_def T9_H (SOC, SOC, Op_RegI, 25, T9->as_VMReg()->next());
100 // Special Registers
101 reg_def K0 ( NS, NS, Op_RegI, 26, K0->as_VMReg());
102 reg_def K1 ( NS, NS, Op_RegI, 27, K1->as_VMReg());
103 reg_def GP ( NS, NS, Op_RegI, 28, GP->as_VMReg());
104 reg_def GP_H ( NS, NS, Op_RegI, 28, GP->as_VMReg()->next());
105 reg_def SP ( NS, NS, Op_RegI, 29, SP->as_VMReg());
106 reg_def SP_H ( NS, NS, Op_RegI, 29, SP->as_VMReg()->next());
107 reg_def FP ( NS, NS, Op_RegI, 30, FP->as_VMReg());
108 reg_def FP_H ( NS, NS, Op_RegI, 30, FP->as_VMReg()->next());
109 reg_def RA ( NS, NS, Op_RegI, 31, RA->as_VMReg());
110 reg_def RA_H ( NS, NS, Op_RegI, 31, RA->as_VMReg()->next());
112 // Floating registers.
113 reg_def F0 ( SOC, SOC, Op_RegF, 0, F0->as_VMReg());
114 reg_def F0_H ( SOC, SOC, Op_RegF, 0, F0->as_VMReg()->next());
115 reg_def F1 ( SOC, SOC, Op_RegF, 1, F1->as_VMReg());
116 reg_def F1_H ( SOC, SOC, Op_RegF, 1, F1->as_VMReg()->next());
117 reg_def F2 ( SOC, SOC, Op_RegF, 2, F2->as_VMReg());
118 reg_def F2_H ( SOC, SOC, Op_RegF, 2, F2->as_VMReg()->next());
119 reg_def F3 ( SOC, SOC, Op_RegF, 3, F3->as_VMReg());
120 reg_def F3_H ( SOC, SOC, Op_RegF, 3, F3->as_VMReg()->next());
121 reg_def F4 ( SOC, SOC, Op_RegF, 4, F4->as_VMReg());
122 reg_def F4_H ( SOC, SOC, Op_RegF, 4, F4->as_VMReg()->next());
123 reg_def F5 ( SOC, SOC, Op_RegF, 5, F5->as_VMReg());
124 reg_def F5_H ( SOC, SOC, Op_RegF, 5, F5->as_VMReg()->next());
125 reg_def F6 ( SOC, SOC, Op_RegF, 6, F6->as_VMReg());
126 reg_def F6_H ( SOC, SOC, Op_RegF, 6, F6->as_VMReg()->next());
127 reg_def F7 ( SOC, SOC, Op_RegF, 7, F7->as_VMReg());
128 reg_def F7_H ( SOC, SOC, Op_RegF, 7, F7->as_VMReg()->next());
129 reg_def F8 ( SOC, SOC, Op_RegF, 8, F8->as_VMReg());
130 reg_def F8_H ( SOC, SOC, Op_RegF, 8, F8->as_VMReg()->next());
131 reg_def F9 ( SOC, SOC, Op_RegF, 9, F9->as_VMReg());
132 reg_def F9_H ( SOC, SOC, Op_RegF, 9, F9->as_VMReg()->next());
133 reg_def F10 ( SOC, SOC, Op_RegF, 10, F10->as_VMReg());
134 reg_def F10_H ( SOC, SOC, Op_RegF, 10, F10->as_VMReg()->next());
135 reg_def F11 ( SOC, SOC, Op_RegF, 11, F11->as_VMReg());
136 reg_def F11_H ( SOC, SOC, Op_RegF, 11, F11->as_VMReg()->next());
137 reg_def F12 ( SOC, SOC, Op_RegF, 12, F12->as_VMReg());
138 reg_def F12_H ( SOC, SOC, Op_RegF, 12, F12->as_VMReg()->next());
139 reg_def F13 ( SOC, SOC, Op_RegF, 13, F13->as_VMReg());
140 reg_def F13_H ( SOC, SOC, Op_RegF, 13, F13->as_VMReg()->next());
141 reg_def F14 ( SOC, SOC, Op_RegF, 14, F14->as_VMReg());
142 reg_def F14_H ( SOC, SOC, Op_RegF, 14, F14->as_VMReg()->next());
143 reg_def F15 ( SOC, SOC, Op_RegF, 15, F15->as_VMReg());
144 reg_def F15_H ( SOC, SOC, Op_RegF, 15, F15->as_VMReg()->next());
145 reg_def F16 ( SOC, SOC, Op_RegF, 16, F16->as_VMReg());
146 reg_def F16_H ( SOC, SOC, Op_RegF, 16, F16->as_VMReg()->next());
147 reg_def F17 ( SOC, SOC, Op_RegF, 17, F17->as_VMReg());
148 reg_def F17_H ( SOC, SOC, Op_RegF, 17, F17->as_VMReg()->next());
149 reg_def F18 ( SOC, SOC, Op_RegF, 18, F18->as_VMReg());
150 reg_def F18_H ( SOC, SOC, Op_RegF, 18, F18->as_VMReg()->next());
151 reg_def F19 ( SOC, SOC, Op_RegF, 19, F19->as_VMReg());
152 reg_def F19_H ( SOC, SOC, Op_RegF, 19, F19->as_VMReg()->next());
153 reg_def F20 ( SOC, SOC, Op_RegF, 20, F20->as_VMReg());
154 reg_def F20_H ( SOC, SOC, Op_RegF, 20, F20->as_VMReg()->next());
155 reg_def F21 ( SOC, SOC, Op_RegF, 21, F21->as_VMReg());
156 reg_def F21_H ( SOC, SOC, Op_RegF, 21, F21->as_VMReg()->next());
157 reg_def F22 ( SOC, SOC, Op_RegF, 22, F22->as_VMReg());
158 reg_def F22_H ( SOC, SOC, Op_RegF, 22, F22->as_VMReg()->next());
159 reg_def F23 ( SOC, SOC, Op_RegF, 23, F23->as_VMReg());
160 reg_def F23_H ( SOC, SOC, Op_RegF, 23, F23->as_VMReg()->next());
161 reg_def F24 ( SOC, SOC, Op_RegF, 24, F24->as_VMReg());
162 reg_def F24_H ( SOC, SOC, Op_RegF, 24, F24->as_VMReg()->next());
163 reg_def F25 ( SOC, SOC, Op_RegF, 25, F25->as_VMReg());
164 reg_def F25_H ( SOC, SOC, Op_RegF, 25, F25->as_VMReg()->next());
165 reg_def F26 ( SOC, SOC, Op_RegF, 26, F26->as_VMReg());
166 reg_def F26_H ( SOC, SOC, Op_RegF, 26, F26->as_VMReg()->next());
167 reg_def F27 ( SOC, SOC, Op_RegF, 27, F27->as_VMReg());
168 reg_def F27_H ( SOC, SOC, Op_RegF, 27, F27->as_VMReg()->next());
169 reg_def F28 ( SOC, SOC, Op_RegF, 28, F28->as_VMReg());
170 reg_def F28_H ( SOC, SOC, Op_RegF, 28, F28->as_VMReg()->next());
171 reg_def F29 ( SOC, SOC, Op_RegF, 29, F29->as_VMReg());
172 reg_def F29_H ( SOC, SOC, Op_RegF, 29, F29->as_VMReg()->next());
173 reg_def F30 ( SOC, SOC, Op_RegF, 30, F30->as_VMReg());
174 reg_def F30_H ( SOC, SOC, Op_RegF, 30, F30->as_VMReg()->next());
175 reg_def F31 ( SOC, SOC, Op_RegF, 31, F31->as_VMReg());
176 reg_def F31_H ( SOC, SOC, Op_RegF, 31, F31->as_VMReg()->next());
179 // ----------------------------
180 // Special Registers
181 // Condition Codes Flag Registers
182 reg_def MIPS_FLAG (SOC, SOC, Op_RegFlags, 1, as_Register(1)->as_VMReg());
183 //S6 is used for get_thread(S6)
184 //S5 is uesd for heapbase of compressed oop
185 alloc_class chunk0(
186 S7, S7_H,
187 S0, S0_H,
188 S1, S1_H,
189 S2, S2_H,
190 S4, S4_H,
191 S5, S5_H,
192 S6, S6_H,
193 S3, S3_H,
194 T2, T2_H,
195 T3, T3_H,
196 T8, T8_H,
197 T9, T9_H,
198 T1, T1_H, // inline_cache_reg
199 V1, V1_H,
200 A7, A7_H,
201 A6, A6_H,
202 A5, A5_H,
203 A4, A4_H,
204 V0, V0_H,
205 A3, A3_H,
206 A2, A2_H,
207 A1, A1_H,
208 A0, A0_H,
209 T0, T0_H,
210 GP, GP_H
211 RA, RA_H,
212 SP, SP_H, // stack_pointer
213 FP, FP_H // frame_pointer
214 );
216 alloc_class chunk1( F0, F0_H,
217 F1, F1_H,
218 F2, F2_H,
219 F3, F3_H,
220 F4, F4_H,
221 F5, F5_H,
222 F6, F6_H,
223 F7, F7_H,
224 F8, F8_H,
225 F9, F9_H,
226 F10, F10_H,
227 F11, F11_H,
228 F20, F20_H,
229 F21, F21_H,
230 F22, F22_H,
231 F23, F23_H,
232 F24, F24_H,
233 F25, F25_H,
234 F26, F26_H,
235 F27, F27_H,
236 F28, F28_H,
237 F19, F19_H,
238 F18, F18_H,
239 F17, F17_H,
240 F16, F16_H,
241 F15, F15_H,
242 F14, F14_H,
243 F13, F13_H,
244 F12, F12_H,
245 F29, F29_H,
246 F30, F30_H,
247 F31, F31_H);
249 alloc_class chunk2(MIPS_FLAG);
251 reg_class s_reg( S0, S1, S2, S3, S4, S5, S6, S7 );
252 reg_class s0_reg( S0 );
253 reg_class s1_reg( S1 );
254 reg_class s2_reg( S2 );
255 reg_class s3_reg( S3 );
256 reg_class s4_reg( S4 );
257 reg_class s5_reg( S5 );
258 reg_class s6_reg( S6 );
259 reg_class s7_reg( S7 );
261 reg_class t_reg( T0, T1, T2, T3, T8, T9 );
262 reg_class t0_reg( T0 );
263 reg_class t1_reg( T1 );
264 reg_class t2_reg( T2 );
265 reg_class t3_reg( T3 );
266 reg_class t8_reg( T8 );
267 reg_class t9_reg( T9 );
269 reg_class a_reg( A0, A1, A2, A3, A4, A5, A6, A7 );
270 reg_class a0_reg( A0 );
271 reg_class a1_reg( A1 );
272 reg_class a2_reg( A2 );
273 reg_class a3_reg( A3 );
274 reg_class a4_reg( A4 );
275 reg_class a5_reg( A5 );
276 reg_class a6_reg( A6 );
277 reg_class a7_reg( A7 );
279 reg_class v0_reg( V0 );
280 reg_class v1_reg( V1 );
282 reg_class sp_reg( SP, SP_H );
283 reg_class fp_reg( FP, FP_H );
285 reg_class mips_flags(MIPS_FLAG);
287 reg_class v0_long_reg( V0, V0_H );
288 reg_class v1_long_reg( V1, V1_H );
289 reg_class a0_long_reg( A0, A0_H );
290 reg_class a1_long_reg( A1, A1_H );
291 reg_class a2_long_reg( A2, A2_H );
292 reg_class a3_long_reg( A3, A3_H );
293 reg_class a4_long_reg( A4, A4_H );
294 reg_class a5_long_reg( A5, A5_H );
295 reg_class a6_long_reg( A6, A6_H );
296 reg_class a7_long_reg( A7, A7_H );
297 reg_class t0_long_reg( T0, T0_H );
298 reg_class t1_long_reg( T1, T1_H );
299 reg_class t2_long_reg( T2, T2_H );
300 reg_class t3_long_reg( T3, T3_H );
301 reg_class t8_long_reg( T8, T8_H );
302 reg_class t9_long_reg( T9, T9_H );
303 reg_class s0_long_reg( S0, S0_H );
304 reg_class s1_long_reg( S1, S1_H );
305 reg_class s2_long_reg( S2, S2_H );
306 reg_class s3_long_reg( S3, S3_H );
307 reg_class s4_long_reg( S4, S4_H );
308 reg_class s5_long_reg( S5, S5_H );
309 reg_class s6_long_reg( S6, S6_H );
310 reg_class s7_long_reg( S7, S7_H );
312 reg_class int_reg( S7, S0, S1, S2, S4, S3, T8, T2, T3, T1, V1, A7, A6, A5, A4, V0, A3, A2, A1, A0, T0 );
314 reg_class no_Ax_int_reg( S7, S0, S1, S2, S4, S3, T8, T2, T3, T1, V1, V0, T0 );
316 reg_class p_reg(
317 S7, S7_H,
318 S0, S0_H,
319 S1, S1_H,
320 S2, S2_H,
321 S4, S4_H,
322 S3, S3_H,
323 T8, T8_H,
324 T2, T2_H,
325 T3, T3_H,
326 T1, T1_H,
327 A7, A7_H,
328 A6, A6_H,
329 A5, A5_H,
330 A4, A4_H,
331 A3, A3_H,
332 A2, A2_H,
333 A1, A1_H,
334 A0, A0_H,
335 T0, T0_H
336 );
338 reg_class no_T8_p_reg(
339 S7, S7_H,
340 S0, S0_H,
341 S1, S1_H,
342 S2, S2_H,
343 S4, S4_H,
344 S3, S3_H,
345 T2, T2_H,
346 T3, T3_H,
347 T1, T1_H,
348 A7, A7_H,
349 A6, A6_H,
350 A5, A5_H,
351 A4, A4_H,
352 A3, A3_H,
353 A2, A2_H,
354 A1, A1_H,
355 A0, A0_H,
356 T0, T0_H
357 );
359 reg_class long_reg(
360 S7, S7_H,
361 S0, S0_H,
362 S1, S1_H,
363 S2, S2_H,
364 S4, S4_H,
365 S3, S3_H,
366 T8, T8_H,
367 T2, T2_H,
368 T3, T3_H,
369 T1, T1_H,
370 A7, A7_H,
371 A6, A6_H,
372 A5, A5_H,
373 A4, A4_H,
374 A3, A3_H,
375 A2, A2_H,
376 A1, A1_H,
377 A0, A0_H,
378 T0, T0_H
379 );
382 // Floating point registers.
383 // 2012/8/23 Fu: F30/F31 are used as temporary registers in D2I
384 // 2016/12/1 aoqi: F31 are not used as temporary registers in D2I
385 reg_class flt_reg( F0, F1, F2, F3, F4, F5, F6, F7, F8, F9, F10, F11, F12, F13, F14, F15, F16, F17 F18, F19, F20, F21, F22, F23, F24, F25, F26, F27, F28, F29, F31);
386 reg_class dbl_reg( F0, F0_H,
387 F1, F1_H,
388 F2, F2_H,
389 F3, F3_H,
390 F4, F4_H,
391 F5, F5_H,
392 F6, F6_H,
393 F7, F7_H,
394 F8, F8_H,
395 F9, F9_H,
396 F10, F10_H,
397 F11, F11_H,
398 F12, F12_H,
399 F13, F13_H,
400 F14, F14_H,
401 F15, F15_H,
402 F16, F16_H,
403 F17, F17_H,
404 F18, F18_H,
405 F19, F19_H,
406 F20, F20_H,
407 F21, F21_H,
408 F22, F22_H,
409 F23, F23_H,
410 F24, F24_H,
411 F25, F25_H,
412 F26, F26_H,
413 F27, F27_H,
414 F28, F28_H,
415 F29, F29_H,
416 F31, F31_H);
418 reg_class flt_arg0( F12 );
419 reg_class dbl_arg0( F12, F12_H );
420 reg_class dbl_arg1( F14, F14_H );
422 %}
424 //----------DEFINITION BLOCK---------------------------------------------------
425 // Define name --> value mappings to inform the ADLC of an integer valued name
426 // Current support includes integer values in the range [0, 0x7FFFFFFF]
427 // Format:
428 // int_def <name> ( <int_value>, <expression>);
429 // Generated Code in ad_<arch>.hpp
430 // #define <name> (<expression>)
431 // // value == <int_value>
432 // Generated code in ad_<arch>.cpp adlc_verification()
433 // assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
434 //
435 definitions %{
436 int_def DEFAULT_COST ( 100, 100);
437 int_def HUGE_COST (1000000, 1000000);
439 // Memory refs are twice as expensive as run-of-the-mill.
440 int_def MEMORY_REF_COST ( 200, DEFAULT_COST * 2);
442 // Branches are even more expensive.
443 int_def BRANCH_COST ( 300, DEFAULT_COST * 3);
444 // we use jr instruction to construct call, so more expensive
445 // by yjl 2/28/2006
446 int_def CALL_COST ( 500, DEFAULT_COST * 5);
447 /*
448 int_def EQUAL ( 1, 1 );
449 int_def NOT_EQUAL ( 2, 2 );
450 int_def GREATER ( 3, 3 );
451 int_def GREATER_EQUAL ( 4, 4 );
452 int_def LESS ( 5, 5 );
453 int_def LESS_EQUAL ( 6, 6 );
454 */
455 %}
459 //----------SOURCE BLOCK-------------------------------------------------------
460 // This is a block of C++ code which provides values, functions, and
461 // definitions necessary in the rest of the architecture description
463 source_hpp %{
464 // Header information of the source block.
465 // Method declarations/definitions which are used outside
466 // the ad-scope can conveniently be defined here.
467 //
468 // To keep related declarations/definitions/uses close together,
469 // we switch between source %{ }% and source_hpp %{ }% freely as needed.
471 class CallStubImpl {
473 //--------------------------------------------------------------
474 //---< Used for optimization in Compile::shorten_branches >---
475 //--------------------------------------------------------------
477 public:
478 // Size of call trampoline stub.
479 static uint size_call_trampoline() {
480 return 0; // no call trampolines on this platform
481 }
483 // number of relocations needed by a call trampoline stub
484 static uint reloc_call_trampoline() {
485 return 0; // no call trampolines on this platform
486 }
487 };
489 class HandlerImpl {
491 public:
493 static int emit_exception_handler(CodeBuffer &cbuf);
494 static int emit_deopt_handler(CodeBuffer& cbuf);
496 static uint size_exception_handler() {
497 // NativeCall instruction size is the same as NativeJump.
498 // exception handler starts out as jump and can be patched to
499 // a call be deoptimization. (4932387)
500 // Note that this value is also credited (in output.cpp) to
501 // the size of the code section.
502 // return NativeJump::instruction_size;
503 int size = NativeCall::instruction_size;
504 return round_to(size, 16);
505 }
507 #ifdef _LP64
508 static uint size_deopt_handler() {
509 int size = NativeCall::instruction_size;
510 return round_to(size, 16);
511 }
512 #else
513 static uint size_deopt_handler() {
514 // NativeCall instruction size is the same as NativeJump.
515 // exception handler starts out as jump and can be patched to
516 // a call be deoptimization. (4932387)
517 // Note that this value is also credited (in output.cpp) to
518 // the size of the code section.
519 return 5 + NativeJump::instruction_size; // pushl(); jmp;
520 }
521 #endif
522 };
524 %} // end source_hpp
526 source %{
528 #define NO_INDEX 0
529 #define RELOC_IMM64 Assembler::imm_operand
530 #define RELOC_DISP32 Assembler::disp32_operand
533 #define __ _masm.
536 // Emit exception handler code.
537 // Stuff framesize into a register and call a VM stub routine.
538 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf) {
539 /*
540 // Note that the code buffer's insts_mark is always relative to insts.
541 // That's why we must use the macroassembler to generate a handler.
542 MacroAssembler _masm(&cbuf);
543 address base = __ start_a_stub(size_exception_handler());
544 if (base == NULL) return 0; // CodeBuffer::expand failed
545 int offset = __ offset();
546 __ jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
547 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
548 __ end_a_stub();
549 return offset;
550 */
551 // Note that the code buffer's insts_mark is always relative to insts.
552 // That's why we must use the macroassembler to generate a handler.
553 MacroAssembler _masm(&cbuf);
554 address base =
555 __ start_a_stub(size_exception_handler());
556 if (base == NULL) return 0; // CodeBuffer::expand failed
557 int offset = __ offset();
559 __ block_comment("; emit_exception_handler");
561 /* 2012/9/25 FIXME Jin: According to X86, we should use direct jumpt.
562 * * However, this will trigger an assert after the 40th method:
563 * *
564 * * 39 b java.lang.Throwable::<init> (25 bytes)
565 * * --- ns java.lang.Throwable::fillInStackTrace
566 * * 40 !b java.net.URLClassLoader::findClass (29 bytes)
567 * * /vm/opto/runtime.cpp, 900 , assert(caller.is_compiled_frame(),"must be")
568 * * 40 made not entrant (2) java.net.URLClassLoader::findClass (29 bytes)
569 * *
570 * * If we change from JR to JALR, the assert will disappear, but WebClient will
571 * * fail after the 403th method with unknown reason.
572 * */
573 cbuf.set_insts_mark();
574 __ relocate(relocInfo::runtime_call_type);
576 __ patchable_set48(T9, (long)OptoRuntime::exception_blob()->entry_point());
577 __ jr(T9);
578 __ delayed()->nop();
579 __ align(16);
580 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
581 __ end_a_stub();
582 return offset;
583 }
585 // Emit deopt handler code.
586 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) {
587 // Note that the code buffer's insts_mark is always relative to insts.
588 // That's why we must use the macroassembler to generate a handler.
589 MacroAssembler _masm(&cbuf);
590 address base =
591 __ start_a_stub(size_deopt_handler());
593 // FIXME
594 if (base == NULL) return 0; // CodeBuffer::expand failed
595 int offset = __ offset();
597 __ block_comment("; emit_deopt_handler");
599 cbuf.set_insts_mark();
600 __ relocate(relocInfo::runtime_call_type);
602 __ patchable_set48(T9, (long)SharedRuntime::deopt_blob()->unpack());
603 __ jalr(T9);
604 __ delayed()->nop();
605 __ align(16);
606 assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
607 __ end_a_stub();
608 return offset;
609 }
612 const bool Matcher::match_rule_supported(int opcode) {
613 if (!has_match_rule(opcode))
614 return false;
616 switch (opcode) {
617 //Op_CountLeadingZerosI Op_CountLeadingZerosL can be deleted, all MIPS CPUs support clz & dclz.
618 case Op_CountLeadingZerosI:
619 case Op_CountLeadingZerosL:
620 if (!UseCountLeadingZerosInstruction)
621 return false;
622 break;
623 case Op_CountTrailingZerosI:
624 case Op_CountTrailingZerosL:
625 if (!UseCountTrailingZerosInstruction)
626 return false;
627 break;
628 }
630 return true; // Per default match rules are supported.
631 }
633 //FIXME
634 // emit call stub, compiled java to interpreter
635 void emit_java_to_interp(CodeBuffer &cbuf ) {
636 // Stub is fixed up when the corresponding call is converted from calling
637 // compiled code to calling interpreted code.
638 // mov rbx,0
639 // jmp -1
641 address mark = cbuf.insts_mark(); // get mark within main instrs section
643 // Note that the code buffer's insts_mark is always relative to insts.
644 // That's why we must use the macroassembler to generate a stub.
645 MacroAssembler _masm(&cbuf);
647 address base =
648 __ start_a_stub(Compile::MAX_stubs_size);
649 if (base == NULL) return; // CodeBuffer::expand failed
650 // static stub relocation stores the instruction address of the call
652 __ relocate(static_stub_Relocation::spec(mark), 0);
654 /* 2012/10/29 Jin: Rmethod contains methodOop, it should be relocated for GC */
655 /*
656 int oop_index = __ oop_recorder()->allocate_index(NULL);
657 RelocationHolder rspec = oop_Relocation::spec(oop_index);
658 __ relocate(rspec);
659 */
661 // static stub relocation also tags the methodOop in the code-stream.
662 __ patchable_set48(S3, (long)0);
663 // This is recognized as unresolved by relocs/nativeInst/ic code
665 __ relocate(relocInfo::runtime_call_type);
667 cbuf.set_insts_mark();
668 address call_pc = (address)-1;
669 __ patchable_set48(AT, (long)call_pc);
670 __ jr(AT);
671 __ nop();
672 __ align(16);
673 __ end_a_stub();
674 // Update current stubs pointer and restore code_end.
675 }
677 // size of call stub, compiled java to interpretor
678 uint size_java_to_interp() {
679 int size = 4 * 4 + NativeCall::instruction_size; // sizeof(li48) + NativeCall::instruction_size
680 return round_to(size, 16);
681 }
683 // relocation entries for call stub, compiled java to interpreter
684 uint reloc_java_to_interp() {
685 return 16; // in emit_java_to_interp + in Java_Static_Call
686 }
688 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
689 if( Assembler::is_simm16(offset) ) return true;
690 else {
691 assert(false, "Not implemented yet !" );
692 Unimplemented();
693 }
694 }
697 // No additional cost for CMOVL.
698 const int Matcher::long_cmove_cost() { return 0; }
700 // No CMOVF/CMOVD with SSE2
701 const int Matcher::float_cmove_cost() { return ConditionalMoveLimit; }
703 // Does the CPU require late expand (see block.cpp for description of late expand)?
704 const bool Matcher::require_postalloc_expand = false;
706 // Should the Matcher clone shifts on addressing modes, expecting them
707 // to be subsumed into complex addressing expressions or compute them
708 // into registers? True for Intel but false for most RISCs
709 const bool Matcher::clone_shift_expressions = false;
711 // Do we need to mask the count passed to shift instructions or does
712 // the cpu only look at the lower 5/6 bits anyway?
713 const bool Matcher::need_masked_shift_count = false;
715 bool Matcher::narrow_oop_use_complex_address() {
716 NOT_LP64(ShouldNotCallThis());
717 assert(UseCompressedOops, "only for compressed oops code");
718 return false;
719 }
721 bool Matcher::narrow_klass_use_complex_address() {
722 NOT_LP64(ShouldNotCallThis());
723 assert(UseCompressedClassPointers, "only for compressed klass code");
724 return false;
725 }
727 // This is UltraSparc specific, true just means we have fast l2f conversion
728 const bool Matcher::convL2FSupported(void) {
729 return true;
730 }
732 // Max vector size in bytes. 0 if not supported.
733 const int Matcher::vector_width_in_bytes(BasicType bt) {
734 assert(MaxVectorSize == 8, "");
735 return 8;
736 }
738 // Vector ideal reg
739 const int Matcher::vector_ideal_reg(int size) {
740 assert(MaxVectorSize == 8, "");
741 switch(size) {
742 case 8: return Op_VecD;
743 }
744 ShouldNotReachHere();
745 return 0;
746 }
748 // Only lowest bits of xmm reg are used for vector shift count.
749 const int Matcher::vector_shift_count_ideal_reg(int size) {
750 fatal("vector shift is not supported");
751 return Node::NotAMachineReg;
752 }
754 // Limits on vector size (number of elements) loaded into vector.
755 const int Matcher::max_vector_size(const BasicType bt) {
756 assert(is_java_primitive(bt), "only primitive type vectors");
757 return vector_width_in_bytes(bt)/type2aelembytes(bt);
758 }
760 const int Matcher::min_vector_size(const BasicType bt) {
761 return max_vector_size(bt); // Same as max.
762 }
764 // MIPS supports misaligned vectors store/load? FIXME
765 const bool Matcher::misaligned_vectors_ok() {
766 return false;
767 //return !AlignVector; // can be changed by flag
768 }
770 // Register for DIVI projection of divmodI
771 RegMask Matcher::divI_proj_mask() {
772 ShouldNotReachHere();
773 return RegMask();
774 }
776 // Register for MODI projection of divmodI
777 RegMask Matcher::modI_proj_mask() {
778 ShouldNotReachHere();
779 return RegMask();
780 }
782 // Register for DIVL projection of divmodL
783 RegMask Matcher::divL_proj_mask() {
784 ShouldNotReachHere();
785 return RegMask();
786 }
788 int Matcher::regnum_to_fpu_offset(int regnum) {
789 return regnum - 32; // The FP registers are in the second chunk
790 }
793 const bool Matcher::isSimpleConstant64(jlong value) {
794 // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
795 return true;
796 }
799 // Return whether or not this register is ever used as an argument. This
800 // function is used on startup to build the trampoline stubs in generateOptoStub.
801 // Registers not mentioned will be killed by the VM call in the trampoline, and
802 // arguments in those registers not be available to the callee.
803 bool Matcher::can_be_java_arg( int reg ) {
804 /* Refer to: [sharedRuntime_mips_64.cpp] SharedRuntime::java_calling_convention() */
805 if ( reg == T0_num || reg == T0_H_num
806 || reg == A0_num || reg == A0_H_num
807 || reg == A1_num || reg == A1_H_num
808 || reg == A2_num || reg == A2_H_num
809 || reg == A3_num || reg == A3_H_num
810 || reg == A4_num || reg == A4_H_num
811 || reg == A5_num || reg == A5_H_num
812 || reg == A6_num || reg == A6_H_num
813 || reg == A7_num || reg == A7_H_num )
814 return true;
816 if ( reg == F12_num || reg == F12_H_num
817 || reg == F13_num || reg == F13_H_num
818 || reg == F14_num || reg == F14_H_num
819 || reg == F15_num || reg == F15_H_num
820 || reg == F16_num || reg == F16_H_num
821 || reg == F17_num || reg == F17_H_num
822 || reg == F18_num || reg == F18_H_num
823 || reg == F19_num || reg == F19_H_num )
824 return true;
826 return false;
827 }
829 bool Matcher::is_spillable_arg( int reg ) {
830 return can_be_java_arg(reg);
831 }
833 bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) {
834 return false;
835 }
837 // Register for MODL projection of divmodL
838 RegMask Matcher::modL_proj_mask() {
839 ShouldNotReachHere();
840 return RegMask();
841 }
843 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
844 return FP_REG_mask();
845 }
847 // MIPS doesn't support AES intrinsics
848 const bool Matcher::pass_original_key_for_aes() {
849 return false;
850 }
852 // The address of the call instruction needs to be 16-byte aligned to
853 // ensure that it does not span a cache line so that it can be patched.
855 int CallStaticJavaDirectNode::compute_padding(int current_offset) const {
856 //lui
857 //ori
858 //dsll
859 //ori
861 //jalr
862 //nop
864 return round_to(current_offset, alignment_required()) - current_offset;
865 }
867 // The address of the call instruction needs to be 16-byte aligned to
868 // ensure that it does not span a cache line so that it can be patched.
869 int CallDynamicJavaDirectNode::compute_padding(int current_offset) const {
870 //loadIC <--- skip
872 //lui
873 //ori
874 //nop
875 //nop
877 //jalr
878 //nop
880 current_offset += 4 * 4;
881 return round_to(current_offset, alignment_required()) - current_offset;
882 }
884 int CallLeafNoFPDirectNode::compute_padding(int current_offset) const {
885 //lui
886 //ori
887 //dsll
888 //ori
890 //jalr
891 //nop
893 return round_to(current_offset, alignment_required()) - current_offset;
894 }
896 int CallLeafDirectNode::compute_padding(int current_offset) const {
897 //lui
898 //ori
899 //dsll
900 //ori
902 //jalr
903 //nop
905 return round_to(current_offset, alignment_required()) - current_offset;
906 }
908 int CallRuntimeDirectNode::compute_padding(int current_offset) const {
909 //lui
910 //ori
911 //dsll
912 //ori
914 //jalr
915 //nop
917 return round_to(current_offset, alignment_required()) - current_offset;
918 }
920 // If CPU can load and store mis-aligned doubles directly then no fixup is
921 // needed. Else we split the double into 2 integer pieces and move it
922 // piece-by-piece. Only happens when passing doubles into C code as the
923 // Java calling convention forces doubles to be aligned.
924 const bool Matcher::misaligned_doubles_ok = false;
925 // Do floats take an entire double register or just half?
926 //const bool Matcher::float_in_double = true;
927 bool Matcher::float_in_double() { return false; }
928 // Threshold size for cleararray.
929 const int Matcher::init_array_short_size = 8 * BytesPerLong;
930 // Do ints take an entire long register or just half?
931 const bool Matcher::int_in_long = true;
932 // Is it better to copy float constants, or load them directly from memory?
933 // Intel can load a float constant from a direct address, requiring no
934 // extra registers. Most RISCs will have to materialize an address into a
935 // register first, so they would do better to copy the constant from stack.
936 const bool Matcher::rematerialize_float_constants = false;
937 // Advertise here if the CPU requires explicit rounding operations
938 // to implement the UseStrictFP mode.
939 const bool Matcher::strict_fp_requires_explicit_rounding = false;
940 // The ecx parameter to rep stos for the ClearArray node is in dwords.
941 const bool Matcher::init_array_count_is_in_bytes = false;
944 // Indicate if the safepoint node needs the polling page as an input.
945 // Since MIPS doesn't have absolute addressing, it needs.
946 bool SafePointNode::needs_polling_address_input() {
947 return false;
948 }
950 // !!!!! Special hack to get all type of calls to specify the byte offset
951 // from the start of the call to the point where the return address
952 // will point.
953 int MachCallStaticJavaNode::ret_addr_offset() {
954 //lui
955 //ori
956 //nop
957 //nop
958 //jalr
959 //nop
960 return 24;
961 }
963 int MachCallDynamicJavaNode::ret_addr_offset() {
964 //lui IC_Klass,
965 //ori IC_Klass,
966 //dsll IC_Klass
967 //ori IC_Klass
969 //lui T9
970 //ori T9
971 //nop
972 //nop
973 //jalr T9
974 //nop
975 return 4 * 4 + 4 * 6;
976 }
978 //=============================================================================
980 // Figure out which register class each belongs in: rc_int, rc_float, rc_stack
981 enum RC { rc_bad, rc_int, rc_float, rc_stack };
982 static enum RC rc_class( OptoReg::Name reg ) {
983 if( !OptoReg::is_valid(reg) ) return rc_bad;
984 if (OptoReg::is_stack(reg)) return rc_stack;
985 VMReg r = OptoReg::as_VMReg(reg);
986 if (r->is_Register()) return rc_int;
987 assert(r->is_FloatRegister(), "must be");
988 return rc_float;
989 }
991 uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream* st ) const {
992 // Get registers to move
993 OptoReg::Name src_second = ra_->get_reg_second(in(1));
994 OptoReg::Name src_first = ra_->get_reg_first(in(1));
995 OptoReg::Name dst_second = ra_->get_reg_second(this );
996 OptoReg::Name dst_first = ra_->get_reg_first(this );
998 enum RC src_second_rc = rc_class(src_second);
999 enum RC src_first_rc = rc_class(src_first);
1000 enum RC dst_second_rc = rc_class(dst_second);
1001 enum RC dst_first_rc = rc_class(dst_first);
1003 assert(OptoReg::is_valid(src_first) && OptoReg::is_valid(dst_first), "must move at least 1 register" );
1005 // Generate spill code!
1006 int size = 0;
1008 if( src_first == dst_first && src_second == dst_second )
1009 return 0; // Self copy, no move
1011 if (src_first_rc == rc_stack) {
1012 // mem ->
1013 if (dst_first_rc == rc_stack) {
1014 // mem -> mem
1015 assert(src_second != dst_first, "overlap");
1016 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1017 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1018 // 64-bit
1019 int src_offset = ra_->reg2offset(src_first);
1020 int dst_offset = ra_->reg2offset(dst_first);
1021 if (cbuf) {
1022 MacroAssembler _masm(cbuf);
1023 __ ld(AT, Address(SP, src_offset));
1024 __ sd(AT, Address(SP, dst_offset));
1025 #ifndef PRODUCT
1026 } else {
1027 if(!do_size){
1028 if (size != 0) st->print("\n\t");
1029 st->print("ld AT, [SP + #%d]\t# 64-bit mem-mem spill 1\n\t"
1030 "sd AT, [SP + #%d]",
1031 src_offset, dst_offset);
1032 }
1033 #endif
1034 }
1035 size += 8;
1036 } else {
1037 // 32-bit
1038 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1039 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1040 // No pushl/popl, so:
1041 int src_offset = ra_->reg2offset(src_first);
1042 int dst_offset = ra_->reg2offset(dst_first);
1043 if (cbuf) {
1044 MacroAssembler _masm(cbuf);
1045 __ lw(AT, Address(SP, src_offset));
1046 __ sw(AT, Address(SP, dst_offset));
1047 #ifndef PRODUCT
1048 } else {
1049 if(!do_size){
1050 if (size != 0) st->print("\n\t");
1051 st->print("lw AT, [SP + #%d] spill 2\n\t"
1052 "sw AT, [SP + #%d]\n\t",
1053 src_offset, dst_offset);
1054 }
1055 #endif
1056 }
1057 size += 8;
1058 }
1059 return size;
1060 } else if (dst_first_rc == rc_int) {
1061 // mem -> gpr
1062 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1063 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1064 // 64-bit
1065 int offset = ra_->reg2offset(src_first);
1066 if (cbuf) {
1067 MacroAssembler _masm(cbuf);
1068 __ ld(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1069 #ifndef PRODUCT
1070 } else {
1071 if(!do_size){
1072 if (size != 0) st->print("\n\t");
1073 st->print("ld %s, [SP + #%d]\t# spill 3",
1074 Matcher::regName[dst_first],
1075 offset);
1076 }
1077 #endif
1078 }
1079 size += 4;
1080 } else {
1081 // 32-bit
1082 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1083 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1084 int offset = ra_->reg2offset(src_first);
1085 if (cbuf) {
1086 MacroAssembler _masm(cbuf);
1087 if (this->ideal_reg() == Op_RegI)
1088 __ lw(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1089 else
1090 __ lwu(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1091 #ifndef PRODUCT
1092 } else {
1093 if(!do_size){
1094 if (size != 0) st->print("\n\t");
1095 if (this->ideal_reg() == Op_RegI)
1096 st->print("lw %s, [SP + #%d]\t# spill 4",
1097 Matcher::regName[dst_first],
1098 offset);
1099 else
1100 st->print("lwu %s, [SP + #%d]\t# spill 5",
1101 Matcher::regName[dst_first],
1102 offset);
1103 }
1104 #endif
1105 }
1106 size += 4;
1107 }
1108 return size;
1109 } else if (dst_first_rc == rc_float) {
1110 // mem-> xmm
1111 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1112 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1113 // 64-bit
1114 int offset = ra_->reg2offset(src_first);
1115 if (cbuf) {
1116 MacroAssembler _masm(cbuf);
1117 __ ldc1( as_FloatRegister(Matcher::_regEncode[dst_first]), Address(SP, offset));
1118 #ifndef PRODUCT
1119 } else {
1120 if(!do_size){
1121 if (size != 0) st->print("\n\t");
1122 st->print("ldc1 %s, [SP + #%d]\t# spill 6",
1123 Matcher::regName[dst_first],
1124 offset);
1125 }
1126 #endif
1127 }
1128 size += 4;
1129 } else {
1130 // 32-bit
1131 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1132 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1133 int offset = ra_->reg2offset(src_first);
1134 if (cbuf) {
1135 MacroAssembler _masm(cbuf);
1136 __ lwc1( as_FloatRegister(Matcher::_regEncode[dst_first]), Address(SP, offset));
1137 #ifndef PRODUCT
1138 } else {
1139 if(!do_size){
1140 if (size != 0) st->print("\n\t");
1141 st->print("lwc1 %s, [SP + #%d]\t# spill 7",
1142 Matcher::regName[dst_first],
1143 offset);
1144 }
1145 #endif
1146 }
1147 size += 4;
1148 }
1149 return size;
1150 }
1151 } else if (src_first_rc == rc_int) {
1152 // gpr ->
1153 if (dst_first_rc == rc_stack) {
1154 // gpr -> mem
1155 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1156 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1157 // 64-bit
1158 int offset = ra_->reg2offset(dst_first);
1159 if (cbuf) {
1160 MacroAssembler _masm(cbuf);
1161 __ sd(as_Register(Matcher::_regEncode[src_first]), Address(SP, offset));
1162 #ifndef PRODUCT
1163 } else {
1164 if(!do_size){
1165 if (size != 0) st->print("\n\t");
1166 st->print("sd %s, [SP + #%d] # spill 8",
1167 Matcher::regName[src_first],
1168 offset);
1169 }
1170 #endif
1171 }
1172 size += 4;
1173 } else {
1174 // 32-bit
1175 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1176 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1177 int offset = ra_->reg2offset(dst_first);
1178 if (cbuf) {
1179 MacroAssembler _masm(cbuf);
1180 __ sw(as_Register(Matcher::_regEncode[src_first]), Address(SP, offset));
1181 #ifndef PRODUCT
1182 } else {
1183 if(!do_size){
1184 if (size != 0) st->print("\n\t");
1185 st->print("sw %s, [SP + #%d]\t# spill 9",
1186 Matcher::regName[src_first], offset);
1187 }
1188 #endif
1189 }
1190 size += 4;
1191 }
1192 return size;
1193 } else if (dst_first_rc == rc_int) {
1194 // gpr -> gpr
1195 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1196 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1197 // 64-bit
1198 if (cbuf) {
1199 MacroAssembler _masm(cbuf);
1200 __ move(as_Register(Matcher::_regEncode[dst_first]),
1201 as_Register(Matcher::_regEncode[src_first]));
1202 #ifndef PRODUCT
1203 } else {
1204 if(!do_size){
1205 if (size != 0) st->print("\n\t");
1206 st->print("move(64bit) %s <-- %s\t# spill 10",
1207 Matcher::regName[dst_first],
1208 Matcher::regName[src_first]);
1209 }
1210 #endif
1211 }
1212 size += 4;
1213 return size;
1214 } else {
1215 // 32-bit
1216 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1217 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1218 if (cbuf) {
1219 MacroAssembler _masm(cbuf);
1220 if (this->ideal_reg() == Op_RegI)
1221 __ move_u32(as_Register(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first]));
1222 else
1223 __ daddu(as_Register(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first]), R0);
1225 #ifndef PRODUCT
1226 } else {
1227 if(!do_size){
1228 if (size != 0) st->print("\n\t");
1229 st->print("move(32-bit) %s <-- %s\t# spill 11",
1230 Matcher::regName[dst_first],
1231 Matcher::regName[src_first]);
1232 }
1233 #endif
1234 }
1235 size += 4;
1236 return size;
1237 }
1238 } else if (dst_first_rc == rc_float) {
1239 // gpr -> xmm
1240 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1241 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1242 // 64-bit
1243 if (cbuf) {
1244 MacroAssembler _masm(cbuf);
1245 __ dmtc1(as_Register(Matcher::_regEncode[src_first]), as_FloatRegister(Matcher::_regEncode[dst_first]));
1246 #ifndef PRODUCT
1247 } else {
1248 if(!do_size){
1249 if (size != 0) st->print("\n\t");
1250 st->print("dmtc1 %s, %s\t# spill 12",
1251 Matcher::regName[dst_first],
1252 Matcher::regName[src_first]);
1253 }
1254 #endif
1255 }
1256 size += 4;
1257 } else {
1258 // 32-bit
1259 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1260 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1261 if (cbuf) {
1262 MacroAssembler _masm(cbuf);
1263 __ mtc1( as_Register(Matcher::_regEncode[src_first]), as_FloatRegister(Matcher::_regEncode[dst_first]) );
1264 #ifndef PRODUCT
1265 } else {
1266 if(!do_size){
1267 if (size != 0) st->print("\n\t");
1268 st->print("mtc1 %s, %s\t# spill 13",
1269 Matcher::regName[dst_first],
1270 Matcher::regName[src_first]);
1271 }
1272 #endif
1273 }
1274 size += 4;
1275 }
1276 return size;
1277 }
1278 } else if (src_first_rc == rc_float) {
1279 // xmm ->
1280 if (dst_first_rc == rc_stack) {
1281 // xmm -> mem
1282 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1283 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1284 // 64-bit
1285 int offset = ra_->reg2offset(dst_first);
1286 if (cbuf) {
1287 MacroAssembler _masm(cbuf);
1288 __ sdc1( as_FloatRegister(Matcher::_regEncode[src_first]), Address(SP, offset) );
1289 #ifndef PRODUCT
1290 } else {
1291 if(!do_size){
1292 if (size != 0) st->print("\n\t");
1293 st->print("sdc1 %s, [SP + #%d]\t# spill 14",
1294 Matcher::regName[src_first],
1295 offset);
1296 }
1297 #endif
1298 }
1299 size += 4;
1300 } else {
1301 // 32-bit
1302 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1303 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1304 int offset = ra_->reg2offset(dst_first);
1305 if (cbuf) {
1306 MacroAssembler _masm(cbuf);
1307 __ swc1(as_FloatRegister(Matcher::_regEncode[src_first]), Address(SP, offset));
1308 #ifndef PRODUCT
1309 } else {
1310 if(!do_size){
1311 if (size != 0) st->print("\n\t");
1312 st->print("swc1 %s, [SP + #%d]\t# spill 15",
1313 Matcher::regName[src_first],
1314 offset);
1315 }
1316 #endif
1317 }
1318 size += 4;
1319 }
1320 return size;
1321 } else if (dst_first_rc == rc_int) {
1322 // xmm -> gpr
1323 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1324 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1325 // 64-bit
1326 if (cbuf) {
1327 MacroAssembler _masm(cbuf);
1328 __ dmfc1( as_Register(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1329 #ifndef PRODUCT
1330 } else {
1331 if(!do_size){
1332 if (size != 0) st->print("\n\t");
1333 st->print("dmfc1 %s, %s\t# spill 16",
1334 Matcher::regName[dst_first],
1335 Matcher::regName[src_first]);
1336 }
1337 #endif
1338 }
1339 size += 4;
1340 } else {
1341 // 32-bit
1342 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1343 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1344 if (cbuf) {
1345 MacroAssembler _masm(cbuf);
1346 __ mfc1( as_Register(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1347 #ifndef PRODUCT
1348 } else {
1349 if(!do_size){
1350 if (size != 0) st->print("\n\t");
1351 st->print("mfc1 %s, %s\t# spill 17",
1352 Matcher::regName[dst_first],
1353 Matcher::regName[src_first]);
1354 }
1355 #endif
1356 }
1357 size += 4;
1358 }
1359 return size;
1360 } else if (dst_first_rc == rc_float) {
1361 // xmm -> xmm
1362 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1363 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1364 // 64-bit
1365 if (cbuf) {
1366 MacroAssembler _masm(cbuf);
1367 __ mov_d( as_FloatRegister(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1368 #ifndef PRODUCT
1369 } else {
1370 if(!do_size){
1371 if (size != 0) st->print("\n\t");
1372 st->print("mov_d %s <-- %s\t# spill 18",
1373 Matcher::regName[dst_first],
1374 Matcher::regName[src_first]);
1375 }
1376 #endif
1377 }
1378 size += 4;
1379 } else {
1380 // 32-bit
1381 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1382 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1383 if (cbuf) {
1384 MacroAssembler _masm(cbuf);
1385 __ mov_s( as_FloatRegister(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1386 #ifndef PRODUCT
1387 } else {
1388 if(!do_size){
1389 if (size != 0) st->print("\n\t");
1390 st->print("mov_s %s <-- %s\t# spill 19",
1391 Matcher::regName[dst_first],
1392 Matcher::regName[src_first]);
1393 }
1394 #endif
1395 }
1396 size += 4;
1397 }
1398 return size;
1399 }
1400 }
1402 assert(0," foo ");
1403 Unimplemented();
1404 return size;
1406 }
1408 #ifndef PRODUCT
1409 void MachSpillCopyNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1410 implementation( NULL, ra_, false, st );
1411 }
1412 #endif
1414 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1415 implementation( &cbuf, ra_, false, NULL );
1416 }
1418 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
1419 return implementation( NULL, ra_, true, NULL );
1420 }
1422 //=============================================================================
1423 #
1425 #ifndef PRODUCT
1426 void MachBreakpointNode::format( PhaseRegAlloc *, outputStream* st ) const {
1427 st->print("INT3");
1428 }
1429 #endif
1431 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc* ra_) const {
1432 MacroAssembler _masm(&cbuf);
1433 __ int3();
1434 }
1436 uint MachBreakpointNode::size(PhaseRegAlloc* ra_) const {
1437 return MachNode::size(ra_);
1438 }
1441 //=============================================================================
1442 #ifndef PRODUCT
1443 void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1444 Compile *C = ra_->C;
1445 int framesize = C->frame_size_in_bytes();
1447 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1449 st->print("daddiu SP, SP, %d # Rlease stack @ MachEpilogNode",framesize);
1450 st->cr(); st->print("\t");
1451 if (UseLoongsonISA) {
1452 st->print("gslq RA, FP, SP, %d # Restore FP & RA @ MachEpilogNode", -wordSize*2);
1453 } else {
1454 st->print("ld RA, SP, %d # Restore RA @ MachEpilogNode", -wordSize);
1455 st->cr(); st->print("\t");
1456 st->print("ld FP, SP, %d # Restore FP @ MachEpilogNode", -wordSize*2);
1457 }
1459 if( do_polling() && C->is_method_compilation() ) {
1460 st->print("Poll Safepoint # MachEpilogNode");
1461 }
1462 }
1463 #endif
1465 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1466 Compile *C = ra_->C;
1467 MacroAssembler _masm(&cbuf);
1468 int framesize = C->frame_size_in_bytes();
1470 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1472 __ daddiu(SP, SP, framesize);
1474 if (UseLoongsonISA) {
1475 __ gslq(RA, FP, SP, -wordSize*2);
1476 } else {
1477 __ ld(RA, SP, -wordSize );
1478 __ ld(FP, SP, -wordSize*2 );
1479 }
1481 if( do_polling() && C->is_method_compilation() ) {
1482 __ set64(AT, (long)os::get_polling_page());
1483 __ relocate(relocInfo::poll_return_type);
1484 __ lw(AT, AT, 0);
1485 }
1486 }
1488 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
1489 return MachNode::size(ra_); // too many variables; just compute it the hard way fujie debug
1490 }
1492 int MachEpilogNode::reloc() const {
1493 return 0; // a large enough number
1494 }
1496 const Pipeline * MachEpilogNode::pipeline() const {
1497 return MachNode::pipeline_class();
1498 }
1500 int MachEpilogNode::safepoint_offset() const { return 0; }
1502 //=============================================================================
1504 #ifndef PRODUCT
1505 void BoxLockNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1506 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1507 int reg = ra_->get_reg_first(this);
1508 st->print("ADDI %s, SP, %d @BoxLockNode",Matcher::regName[reg],offset);
1509 }
1510 #endif
1513 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
1514 return 4;
1515 }
1517 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1518 MacroAssembler _masm(&cbuf);
1519 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1520 int reg = ra_->get_encode(this);
1522 __ addi(as_Register(reg), SP, offset);
1523 /*
1524 if( offset >= 128 ) {
1525 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
1526 emit_rm(cbuf, 0x2, reg, 0x04);
1527 emit_rm(cbuf, 0x0, 0x04, SP_enc);
1528 emit_d32(cbuf, offset);
1529 }
1530 else {
1531 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
1532 emit_rm(cbuf, 0x1, reg, 0x04);
1533 emit_rm(cbuf, 0x0, 0x04, SP_enc);
1534 emit_d8(cbuf, offset);
1535 }
1536 */
1537 }
1540 //static int sizeof_FFree_Float_Stack_All = -1;
1542 int MachCallRuntimeNode::ret_addr_offset() {
1543 //lui
1544 //ori
1545 //dsll
1546 //ori
1547 //jalr
1548 //nop
1549 assert(NativeCall::instruction_size == 24, "in MachCallRuntimeNode::ret_addr_offset()");
1550 return NativeCall::instruction_size;
1551 // return 16;
1552 }
1558 //=============================================================================
1559 #ifndef PRODUCT
1560 void MachNopNode::format( PhaseRegAlloc *, outputStream* st ) const {
1561 st->print("NOP \t# %d bytes pad for loops and calls", 4 * _count);
1562 }
1563 #endif
1565 void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc * ) const {
1566 MacroAssembler _masm(&cbuf);
1567 int i = 0;
1568 for(i = 0; i < _count; i++)
1569 __ nop();
1570 }
1572 uint MachNopNode::size(PhaseRegAlloc *) const {
1573 return 4 * _count;
1574 }
1575 const Pipeline* MachNopNode::pipeline() const {
1576 return MachNode::pipeline_class();
1577 }
1579 //=============================================================================
1581 //=============================================================================
1582 #ifndef PRODUCT
1583 void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1584 st->print_cr("load_klass(AT, T0)");
1585 st->print_cr("\tbeq(AT, iCache, L)");
1586 st->print_cr("\tnop");
1587 st->print_cr("\tjmp(SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type)");
1588 st->print_cr("\tnop");
1589 st->print_cr("\tnop");
1590 st->print_cr(" L:");
1591 }
1592 #endif
1595 void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1596 MacroAssembler _masm(&cbuf);
1597 #ifdef ASSERT
1598 //uint code_size = cbuf.code_size();
1599 #endif
1600 int ic_reg = Matcher::inline_cache_reg_encode();
1601 Label L;
1602 Register receiver = T0;
1603 Register iCache = as_Register(ic_reg);
1604 __ load_klass(T9, receiver);
1605 __ beq(T9, iCache, L);
1606 __ nop();
1608 __ relocate(relocInfo::runtime_call_type);
1609 __ patchable_set48(T9, (long)SharedRuntime::get_ic_miss_stub());
1610 __ jr(T9);
1611 __ nop();
1613 /* WARNING these NOPs are critical so that verified entry point is properly
1614 * 8 bytes aligned for patching by NativeJump::patch_verified_entry() */
1615 __ align(CodeEntryAlignment);
1616 __ bind(L);
1617 }
1619 uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
1620 return MachNode::size(ra_);
1621 }
1625 //=============================================================================
1627 const RegMask& MachConstantBaseNode::_out_RegMask = P_REG_mask();
1629 int Compile::ConstantTable::calculate_table_base_offset() const {
1630 return 0; // absolute addressing, no offset
1631 }
1633 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
1634 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
1635 ShouldNotReachHere();
1636 }
1638 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
1639 Compile* C = ra_->C;
1640 Compile::ConstantTable& constant_table = C->constant_table();
1641 MacroAssembler _masm(&cbuf);
1643 Register Rtoc = as_Register(ra_->get_encode(this));
1644 CodeSection* consts_section = __ code()->consts();
1645 int consts_size = consts_section->align_at_start(consts_section->size());
1646 assert(constant_table.size() == consts_size, "must be equal");
1648 if (consts_section->size()) {
1649 // Materialize the constant table base.
1650 address baseaddr = consts_section->start() + -(constant_table.table_base_offset());
1651 // RelocationHolder rspec = internal_word_Relocation::spec(baseaddr);
1652 __ relocate(relocInfo::internal_pc_type);
1653 __ patchable_set48(Rtoc, (long)baseaddr);
1654 }
1655 }
1657 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
1658 // patchable_set48 (4 insts)
1659 return 4 * 4;
1660 }
1662 #ifndef PRODUCT
1663 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
1664 Register r = as_Register(ra_->get_encode(this));
1665 st->print("patchable_set48 %s, &constanttable (constant table base) @ MachConstantBaseNode", r->name());
1666 }
1667 #endif
1670 //=============================================================================
1671 #ifndef PRODUCT
1672 void MachPrologNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1673 Compile* C = ra_->C;
1675 int framesize = C->frame_size_in_bytes();
1676 int bangsize = C->bang_size_in_bytes();
1677 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1679 // Calls to C2R adapters often do not accept exceptional returns.
1680 // We require that their callers must bang for them. But be careful, because
1681 // some VM calls (such as call site linkage) can use several kilobytes of
1682 // stack. But the stack safety zone should account for that.
1683 // See bugs 4446381, 4468289, 4497237.
1684 if (C->need_stack_bang(bangsize)) {
1685 st->print_cr("# stack bang"); st->print("\t");
1686 }
1687 if (UseLoongsonISA) {
1688 st->print("gssq RA, FP, %d(SP) @ MachPrologNode\n\t", -wordSize*2);
1689 } else {
1690 st->print("sd RA, %d(SP) @ MachPrologNode\n\t", -wordSize);
1691 st->print("sd FP, %d(SP) @ MachPrologNode\n\t", -wordSize*2);
1692 }
1693 st->print("daddiu FP, SP, -%d \n\t", wordSize*2);
1694 st->print("daddiu SP, SP, -%d \t",framesize);
1695 }
1696 #endif
1699 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1700 Compile* C = ra_->C;
1701 MacroAssembler _masm(&cbuf);
1703 int framesize = C->frame_size_in_bytes();
1704 int bangsize = C->bang_size_in_bytes();
1706 // __ verified_entry(framesize, C->need_stack_bang(bangsize)?bangsize:0, false);
1708 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1710 if (C->need_stack_bang(framesize)) {
1711 __ generate_stack_overflow_check(framesize);
1712 }
1714 if (UseLoongsonISA) {
1715 __ gssq(RA, FP, SP, -wordSize*2);
1716 } else {
1717 __ sd(RA, SP, -wordSize);
1718 __ sd(FP, SP, -wordSize*2);
1719 }
1720 __ daddiu(FP, SP, -wordSize*2);
1721 __ daddiu(SP, SP, -framesize);
1722 __ nop(); /* 2013.10.22 Jin: Make enough room for patch_verified_entry() */
1723 __ nop();
1725 C->set_frame_complete(cbuf.insts_size());
1726 if (C->has_mach_constant_base_node()) {
1727 // NOTE: We set the table base offset here because users might be
1728 // emitted before MachConstantBaseNode.
1729 Compile::ConstantTable& constant_table = C->constant_table();
1730 constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
1731 }
1733 }
1736 uint MachPrologNode::size(PhaseRegAlloc *ra_) const {
1737 //fprintf(stderr, "\nPrologNode::size(ra_)= %d \n", MachNode::size(ra_));//fujie debug
1738 return MachNode::size(ra_); // too many variables; just compute it the hard way
1739 }
1741 int MachPrologNode::reloc() const {
1742 return 0; // a large enough number
1743 }
1745 %}
1747 //----------ENCODING BLOCK-----------------------------------------------------
1748 // This block specifies the encoding classes used by the compiler to output
1749 // byte streams. Encoding classes generate functions which are called by
1750 // Machine Instruction Nodes in order to generate the bit encoding of the
1751 // instruction. Operands specify their base encoding interface with the
1752 // interface keyword. There are currently supported four interfaces,
1753 // REG_INTER, CONST_INTER, MEMORY_INTER, & COND_INTER. REG_INTER causes an
1754 // operand to generate a function which returns its register number when
1755 // queried. CONST_INTER causes an operand to generate a function which
1756 // returns the value of the constant when queried. MEMORY_INTER causes an
1757 // operand to generate four functions which return the Base Register, the
1758 // Index Register, the Scale Value, and the Offset Value of the operand when
1759 // queried. COND_INTER causes an operand to generate six functions which
1760 // return the encoding code (ie - encoding bits for the instruction)
1761 // associated with each basic boolean condition for a conditional instruction.
1762 // Instructions specify two basic values for encoding. They use the
1763 // ins_encode keyword to specify their encoding class (which must be one of
1764 // the class names specified in the encoding block), and they use the
1765 // opcode keyword to specify, in order, their primary, secondary, and
1766 // tertiary opcode. Only the opcode sections which a particular instruction
1767 // needs for encoding need to be specified.
1768 encode %{
1769 /*
1770 Alias:
1771 1044 b java.io.ObjectInputStream::readHandle (130 bytes)
1772 118 B14: # B19 B15 <- B13 Freq: 0.899955
1773 118 add S1, S2, V0 #@addP_reg_reg
1774 11c lb S0, [S1 + #-8257524] #@loadB
1775 120 BReq S0, #3, B19 #@branchConI_reg_imm P=0.100000 C=-1.000000
1776 */
1777 //Load byte signed
1778 enc_class load_B_enc (mRegI dst, memory mem) %{
1779 MacroAssembler _masm(&cbuf);
1780 int dst = $dst$$reg;
1781 int base = $mem$$base;
1782 int index = $mem$$index;
1783 int scale = $mem$$scale;
1784 int disp = $mem$$disp;
1786 if( index != 0 ) {
1787 if( Assembler::is_simm16(disp) ) {
1788 if( UseLoongsonISA ) {
1789 if (scale == 0) {
1790 __ gslbx(as_Register(dst), as_Register(base), as_Register(index), disp);
1791 } else {
1792 __ dsll(AT, as_Register(index), scale);
1793 __ gslbx(as_Register(dst), as_Register(base), AT, disp);
1794 }
1795 } else {
1796 if (scale == 0) {
1797 __ addu(AT, as_Register(base), as_Register(index));
1798 } else {
1799 __ dsll(AT, as_Register(index), scale);
1800 __ addu(AT, as_Register(base), AT);
1801 }
1802 __ lb(as_Register(dst), AT, disp);
1803 }
1804 } else {
1805 if (scale == 0) {
1806 __ addu(AT, as_Register(base), as_Register(index));
1807 } else {
1808 __ dsll(AT, as_Register(index), scale);
1809 __ addu(AT, as_Register(base), AT);
1810 }
1811 __ move(T9, disp);
1812 if( UseLoongsonISA ) {
1813 __ gslbx(as_Register(dst), AT, T9, 0);
1814 } else {
1815 __ addu(AT, AT, T9);
1816 __ lb(as_Register(dst), AT, 0);
1817 }
1818 }
1819 } else {
1820 if( Assembler::is_simm16(disp) ) {
1821 __ lb(as_Register(dst), as_Register(base), disp);
1822 } else {
1823 __ move(T9, disp);
1824 if( UseLoongsonISA ) {
1825 __ gslbx(as_Register(dst), as_Register(base), T9, 0);
1826 } else {
1827 __ addu(AT, as_Register(base), T9);
1828 __ lb(as_Register(dst), AT, 0);
1829 }
1830 }
1831 }
1832 %}
1834 //Load byte unsigned
1835 enc_class load_UB_enc (mRegI dst, memory mem) %{
1836 MacroAssembler _masm(&cbuf);
1837 int dst = $dst$$reg;
1838 int base = $mem$$base;
1839 int index = $mem$$index;
1840 int scale = $mem$$scale;
1841 int disp = $mem$$disp;
1843 if( index != 0 ) {
1844 if (scale == 0) {
1845 __ daddu(AT, as_Register(base), as_Register(index));
1846 } else {
1847 __ dsll(AT, as_Register(index), scale);
1848 __ daddu(AT, as_Register(base), AT);
1849 }
1850 if( Assembler::is_simm16(disp) ) {
1851 __ lbu(as_Register(dst), AT, disp);
1852 } else {
1853 __ move(T9, disp);
1854 __ daddu(AT, AT, T9);
1855 __ lbu(as_Register(dst), AT, 0);
1856 }
1857 } else {
1858 if( Assembler::is_simm16(disp) ) {
1859 __ lbu(as_Register(dst), as_Register(base), disp);
1860 } else {
1861 __ move(T9, disp);
1862 __ daddu(AT, as_Register(base), T9);
1863 __ lbu(as_Register(dst), AT, 0);
1864 }
1865 }
1866 %}
1868 enc_class store_B_reg_enc (memory mem, mRegI src) %{
1869 MacroAssembler _masm(&cbuf);
1870 int src = $src$$reg;
1871 int base = $mem$$base;
1872 int index = $mem$$index;
1873 int scale = $mem$$scale;
1874 int disp = $mem$$disp;
1876 if( index != 0 ) {
1877 if (scale == 0) {
1878 if( Assembler::is_simm(disp, 8) ) {
1879 if (UseLoongsonISA) {
1880 __ gssbx(as_Register(src), as_Register(base), as_Register(index), disp);
1881 } else {
1882 __ addu(AT, as_Register(base), as_Register(index));
1883 __ sb(as_Register(src), AT, disp);
1884 }
1885 } else if( Assembler::is_simm16(disp) ) {
1886 __ addu(AT, as_Register(base), as_Register(index));
1887 __ sb(as_Register(src), AT, disp);
1888 } else {
1889 __ addu(AT, as_Register(base), as_Register(index));
1890 __ move(T9, disp);
1891 if (UseLoongsonISA) {
1892 __ gssbx(as_Register(src), AT, T9, 0);
1893 } else {
1894 __ addu(AT, AT, T9);
1895 __ sb(as_Register(src), AT, 0);
1896 }
1897 }
1898 } else {
1899 __ dsll(AT, as_Register(index), scale);
1900 if( Assembler::is_simm(disp, 8) ) {
1901 if (UseLoongsonISA) {
1902 __ gssbx(as_Register(src), AT, as_Register(base), disp);
1903 } else {
1904 __ addu(AT, as_Register(base), AT);
1905 __ sb(as_Register(src), AT, disp);
1906 }
1907 } else if( Assembler::is_simm16(disp) ) {
1908 __ addu(AT, as_Register(base), AT);
1909 __ sb(as_Register(src), AT, disp);
1910 } else {
1911 __ addu(AT, as_Register(base), AT);
1912 __ move(T9, disp);
1913 if (UseLoongsonISA) {
1914 __ gssbx(as_Register(src), AT, T9, 0);
1915 } else {
1916 __ addu(AT, AT, T9);
1917 __ sb(as_Register(src), AT, 0);
1918 }
1919 }
1920 }
1921 } else {
1922 if( Assembler::is_simm16(disp) ) {
1923 __ sb(as_Register(src), as_Register(base), disp);
1924 } else {
1925 __ move(T9, disp);
1926 if (UseLoongsonISA) {
1927 __ gssbx(as_Register(src), as_Register(base), T9, 0);
1928 } else {
1929 __ addu(AT, as_Register(base), T9);
1930 __ sb(as_Register(src), AT, 0);
1931 }
1932 }
1933 }
1934 %}
1936 enc_class store_B_immI_enc (memory mem, immI8 src) %{
1937 MacroAssembler _masm(&cbuf);
1938 int base = $mem$$base;
1939 int index = $mem$$index;
1940 int scale = $mem$$scale;
1941 int disp = $mem$$disp;
1942 int value = $src$$constant;
1944 if( index != 0 ) {
1945 if (!UseLoongsonISA) {
1946 if (scale == 0) {
1947 __ daddu(AT, as_Register(base), as_Register(index));
1948 } else {
1949 __ dsll(AT, as_Register(index), scale);
1950 __ daddu(AT, as_Register(base), AT);
1951 }
1952 if( Assembler::is_simm16(disp) ) {
1953 if (value == 0) {
1954 __ sb(R0, AT, disp);
1955 } else {
1956 __ move(T9, value);
1957 __ sb(T9, AT, disp);
1958 }
1959 } else {
1960 if (value == 0) {
1961 __ move(T9, disp);
1962 __ daddu(AT, AT, T9);
1963 __ sb(R0, AT, 0);
1964 } else {
1965 __ move(T9, disp);
1966 __ daddu(AT, AT, T9);
1967 __ move(T9, value);
1968 __ sb(T9, AT, 0);
1969 }
1970 }
1971 } else {
1973 if (scale == 0) {
1974 if( Assembler::is_simm(disp, 8) ) {
1975 if (value == 0) {
1976 __ gssbx(R0, as_Register(base), as_Register(index), disp);
1977 } else {
1978 __ move(T9, value);
1979 __ gssbx(T9, as_Register(base), as_Register(index), disp);
1980 }
1981 } else if( Assembler::is_simm16(disp) ) {
1982 __ daddu(AT, as_Register(base), as_Register(index));
1983 if (value == 0) {
1984 __ sb(R0, AT, disp);
1985 } else {
1986 __ move(T9, value);
1987 __ sb(T9, AT, disp);
1988 }
1989 } else {
1990 if (value == 0) {
1991 __ daddu(AT, as_Register(base), as_Register(index));
1992 __ move(T9, disp);
1993 __ gssbx(R0, AT, T9, 0);
1994 } else {
1995 __ move(AT, disp);
1996 __ move(T9, value);
1997 __ daddu(AT, as_Register(base), AT);
1998 __ gssbx(T9, AT, as_Register(index), 0);
1999 }
2000 }
2002 } else {
2004 if( Assembler::is_simm(disp, 8) ) {
2005 __ dsll(AT, as_Register(index), scale);
2006 if (value == 0) {
2007 __ gssbx(R0, as_Register(base), AT, disp);
2008 } else {
2009 __ move(T9, value);
2010 __ gssbx(T9, as_Register(base), AT, disp);
2011 }
2012 } else if( Assembler::is_simm16(disp) ) {
2013 __ dsll(AT, as_Register(index), scale);
2014 __ daddu(AT, as_Register(base), AT);
2015 if (value == 0) {
2016 __ sb(R0, AT, disp);
2017 } else {
2018 __ move(T9, value);
2019 __ sb(T9, AT, disp);
2020 }
2021 } else {
2022 __ dsll(AT, as_Register(index), scale);
2023 if (value == 0) {
2024 __ daddu(AT, as_Register(base), AT);
2025 __ move(T9, disp);
2026 __ gssbx(R0, AT, T9, 0);
2027 } else {
2028 __ move(T9, disp);
2029 __ daddu(AT, AT, T9);
2030 __ move(T9, value);
2031 __ gssbx(T9, as_Register(base), AT, 0);
2032 }
2033 }
2034 }
2035 }
2036 } else {
2037 if( Assembler::is_simm16(disp) ) {
2038 if (value == 0) {
2039 __ sb(R0, as_Register(base), disp);
2040 } else {
2041 __ move(AT, value);
2042 __ sb(AT, as_Register(base), disp);
2043 }
2044 } else {
2045 if (value == 0) {
2046 __ move(T9, disp);
2047 if (UseLoongsonISA) {
2048 __ gssbx(R0, as_Register(base), T9, 0);
2049 } else {
2050 __ daddu(AT, as_Register(base), T9);
2051 __ sb(R0, AT, 0);
2052 }
2053 } else {
2054 __ move(T9, disp);
2055 if (UseLoongsonISA) {
2056 __ move(AT, value);
2057 __ gssbx(AT, as_Register(base), T9, 0);
2058 } else {
2059 __ daddu(AT, as_Register(base), T9);
2060 __ move(T9, value);
2061 __ sb(T9, AT, 0);
2062 }
2063 }
2064 }
2065 }
2066 %}
2069 enc_class store_B_immI_enc_sync (memory mem, immI8 src) %{
2070 MacroAssembler _masm(&cbuf);
2071 int base = $mem$$base;
2072 int index = $mem$$index;
2073 int scale = $mem$$scale;
2074 int disp = $mem$$disp;
2075 int value = $src$$constant;
2077 if( index != 0 ) {
2078 if ( UseLoongsonISA ) {
2079 if ( Assembler::is_simm(disp,8) ) {
2080 if ( scale == 0 ) {
2081 if ( value == 0 ) {
2082 __ gssbx(R0, as_Register(base), as_Register(index), disp);
2083 } else {
2084 __ move(AT, value);
2085 __ gssbx(AT, as_Register(base), as_Register(index), disp);
2086 }
2087 } else {
2088 __ dsll(AT, as_Register(index), scale);
2089 if ( value == 0 ) {
2090 __ gssbx(R0, as_Register(base), AT, disp);
2091 } else {
2092 __ move(T9, value);
2093 __ gssbx(T9, as_Register(base), AT, disp);
2094 }
2095 }
2096 } else if ( Assembler::is_simm16(disp) ) {
2097 if ( scale == 0 ) {
2098 __ daddu(AT, as_Register(base), as_Register(index));
2099 if ( value == 0 ){
2100 __ sb(R0, AT, disp);
2101 } else {
2102 __ move(T9, value);
2103 __ sb(T9, AT, disp);
2104 }
2105 } else {
2106 __ dsll(AT, as_Register(index), scale);
2107 __ daddu(AT, as_Register(base), AT);
2108 if ( value == 0 ) {
2109 __ sb(R0, AT, disp);
2110 } else {
2111 __ move(T9, value);
2112 __ sb(T9, AT, disp);
2113 }
2114 }
2115 } else {
2116 if ( scale == 0 ) {
2117 __ move(AT, disp);
2118 __ daddu(AT, as_Register(index), AT);
2119 if ( value == 0 ) {
2120 __ gssbx(R0, as_Register(base), AT, 0);
2121 } else {
2122 __ move(T9, value);
2123 __ gssbx(T9, as_Register(base), AT, 0);
2124 }
2125 } else {
2126 __ dsll(AT, as_Register(index), scale);
2127 __ move(T9, disp);
2128 __ daddu(AT, AT, T9);
2129 if ( value == 0 ) {
2130 __ gssbx(R0, as_Register(base), AT, 0);
2131 } else {
2132 __ move(T9, value);
2133 __ gssbx(T9, as_Register(base), AT, 0);
2134 }
2135 }
2136 }
2137 } else { //not use loongson isa
2138 if (scale == 0) {
2139 __ daddu(AT, as_Register(base), as_Register(index));
2140 } else {
2141 __ dsll(AT, as_Register(index), scale);
2142 __ daddu(AT, as_Register(base), AT);
2143 }
2144 if( Assembler::is_simm16(disp) ) {
2145 if (value == 0) {
2146 __ sb(R0, AT, disp);
2147 } else {
2148 __ move(T9, value);
2149 __ sb(T9, AT, disp);
2150 }
2151 } else {
2152 if (value == 0) {
2153 __ move(T9, disp);
2154 __ daddu(AT, AT, T9);
2155 __ sb(R0, AT, 0);
2156 } else {
2157 __ move(T9, disp);
2158 __ daddu(AT, AT, T9);
2159 __ move(T9, value);
2160 __ sb(T9, AT, 0);
2161 }
2162 }
2163 }
2164 } else {
2165 if ( UseLoongsonISA ){
2166 if ( Assembler::is_simm16(disp) ){
2167 if ( value == 0 ) {
2168 __ sb(R0, as_Register(base), disp);
2169 } else {
2170 __ move(AT, value);
2171 __ sb(AT, as_Register(base), disp);
2172 }
2173 } else {
2174 __ move(AT, disp);
2175 if ( value == 0 ) {
2176 __ gssbx(R0, as_Register(base), AT, 0);
2177 } else {
2178 __ move(T9, value);
2179 __ gssbx(T9, as_Register(base), AT, 0);
2180 }
2181 }
2182 } else {
2183 if( Assembler::is_simm16(disp) ) {
2184 if (value == 0) {
2185 __ sb(R0, as_Register(base), disp);
2186 } else {
2187 __ move(AT, value);
2188 __ sb(AT, as_Register(base), disp);
2189 }
2190 } else {
2191 if (value == 0) {
2192 __ move(T9, disp);
2193 __ daddu(AT, as_Register(base), T9);
2194 __ sb(R0, AT, 0);
2195 } else {
2196 __ move(T9, disp);
2197 __ daddu(AT, as_Register(base), T9);
2198 __ move(T9, value);
2199 __ sb(T9, AT, 0);
2200 }
2201 }
2202 }
2203 }
2205 __ sync();
2206 %}
2208 // Load Short (16bit signed)
2209 enc_class load_S_enc (mRegI dst, memory mem) %{
2210 MacroAssembler _masm(&cbuf);
2211 int dst = $dst$$reg;
2212 int base = $mem$$base;
2213 int index = $mem$$index;
2214 int scale = $mem$$scale;
2215 int disp = $mem$$disp;
2217 if( index != 0 ) {
2218 if ( UseLoongsonISA ) {
2219 if ( Assembler::is_simm(disp, 8) ) {
2220 if (scale == 0) {
2221 __ gslhx(as_Register(dst), as_Register(base), as_Register(index), disp);
2222 } else {
2223 __ dsll(AT, as_Register(index), scale);
2224 __ gslhx(as_Register(dst), as_Register(base), AT, disp);
2225 }
2226 } else if ( Assembler::is_simm16(disp) ) {
2227 if (scale == 0) {
2228 __ daddu(AT, as_Register(base), as_Register(index));
2229 __ lh(as_Register(dst), AT, disp);
2230 } else {
2231 __ dsll(AT, as_Register(index), scale);
2232 __ daddu(AT, as_Register(base), AT);
2233 __ lh(as_Register(dst), AT, disp);
2234 }
2235 } else {
2236 if (scale == 0) {
2237 __ move(AT, disp);
2238 __ daddu(AT, as_Register(index), AT);
2239 __ gslhx(as_Register(dst), as_Register(base), AT, 0);
2240 } else {
2241 __ dsll(AT, as_Register(index), scale);
2242 __ move(T9, disp);
2243 __ daddu(AT, AT, T9);
2244 __ gslhx(as_Register(dst), as_Register(base), AT, 0);
2245 }
2246 }
2247 } else { // not use loongson isa
2248 if (scale == 0) {
2249 __ daddu(AT, as_Register(base), as_Register(index));
2250 } else {
2251 __ dsll(AT, as_Register(index), scale);
2252 __ daddu(AT, as_Register(base), AT);
2253 }
2254 if( Assembler::is_simm16(disp) ) {
2255 __ lh(as_Register(dst), AT, disp);
2256 } else {
2257 __ move(T9, disp);
2258 __ daddu(AT, AT, T9);
2259 __ lh(as_Register(dst), AT, 0);
2260 }
2261 }
2262 } else { // index is 0
2263 if ( UseLoongsonISA ) {
2264 if ( Assembler::is_simm16(disp) ) {
2265 __ lh(as_Register(dst), as_Register(base), disp);
2266 } else {
2267 __ move(T9, disp);
2268 __ gslhx(as_Register(dst), as_Register(base), T9, 0);
2269 }
2270 } else { //not use loongson isa
2271 if( Assembler::is_simm16(disp) ) {
2272 __ lh(as_Register(dst), as_Register(base), disp);
2273 } else {
2274 __ move(T9, disp);
2275 __ daddu(AT, as_Register(base), T9);
2276 __ lh(as_Register(dst), AT, 0);
2277 }
2278 }
2279 }
2280 %}
2282 // Load Char (16bit unsigned)
2283 enc_class load_C_enc (mRegI dst, memory mem) %{
2284 MacroAssembler _masm(&cbuf);
2285 int dst = $dst$$reg;
2286 int base = $mem$$base;
2287 int index = $mem$$index;
2288 int scale = $mem$$scale;
2289 int disp = $mem$$disp;
2291 if( index != 0 ) {
2292 if (scale == 0) {
2293 __ daddu(AT, as_Register(base), as_Register(index));
2294 } else {
2295 __ dsll(AT, as_Register(index), scale);
2296 __ daddu(AT, as_Register(base), AT);
2297 }
2298 if( Assembler::is_simm16(disp) ) {
2299 __ lhu(as_Register(dst), AT, disp);
2300 } else {
2301 __ move(T9, disp);
2302 __ addu(AT, AT, T9);
2303 __ lhu(as_Register(dst), AT, 0);
2304 }
2305 } else {
2306 if( Assembler::is_simm16(disp) ) {
2307 __ lhu(as_Register(dst), as_Register(base), disp);
2308 } else {
2309 __ move(T9, disp);
2310 __ daddu(AT, as_Register(base), T9);
2311 __ lhu(as_Register(dst), AT, 0);
2312 }
2313 }
2314 %}
2316 // Store Char (16bit unsigned)
2317 enc_class store_C_reg_enc (memory mem, mRegI src) %{
2318 MacroAssembler _masm(&cbuf);
2319 int src = $src$$reg;
2320 int base = $mem$$base;
2321 int index = $mem$$index;
2322 int scale = $mem$$scale;
2323 int disp = $mem$$disp;
2325 if( index != 0 ) {
2326 if( Assembler::is_simm16(disp) ) {
2327 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2328 if (scale == 0) {
2329 __ gsshx(as_Register(src), as_Register(base), as_Register(index), disp);
2330 } else {
2331 __ dsll(AT, as_Register(index), scale);
2332 __ gsshx(as_Register(src), as_Register(base), AT, disp);
2333 }
2334 } else {
2335 if (scale == 0) {
2336 __ addu(AT, as_Register(base), as_Register(index));
2337 } else {
2338 __ dsll(AT, as_Register(index), scale);
2339 __ addu(AT, as_Register(base), AT);
2340 }
2341 __ sh(as_Register(src), AT, disp);
2342 }
2343 } else {
2344 if (scale == 0) {
2345 __ addu(AT, as_Register(base), as_Register(index));
2346 } else {
2347 __ dsll(AT, as_Register(index), scale);
2348 __ addu(AT, as_Register(base), AT);
2349 }
2350 __ move(T9, disp);
2351 if( UseLoongsonISA ) {
2352 __ gsshx(as_Register(src), AT, T9, 0);
2353 } else {
2354 __ addu(AT, AT, T9);
2355 __ sh(as_Register(src), AT, 0);
2356 }
2357 }
2358 } else {
2359 if( Assembler::is_simm16(disp) ) {
2360 __ sh(as_Register(src), as_Register(base), disp);
2361 } else {
2362 __ move(T9, disp);
2363 if( UseLoongsonISA ) {
2364 __ gsshx(as_Register(src), as_Register(base), T9, 0);
2365 } else {
2366 __ addu(AT, as_Register(base), T9);
2367 __ sh(as_Register(src), AT, 0);
2368 }
2369 }
2370 }
2371 %}
2373 enc_class store_C0_enc (memory mem) %{
2374 MacroAssembler _masm(&cbuf);
2375 int base = $mem$$base;
2376 int index = $mem$$index;
2377 int scale = $mem$$scale;
2378 int disp = $mem$$disp;
2380 if( index != 0 ) {
2381 if( Assembler::is_simm16(disp) ) {
2382 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2383 if (scale == 0) {
2384 __ gsshx(R0, as_Register(base), as_Register(index), disp);
2385 } else {
2386 __ dsll(AT, as_Register(index), scale);
2387 __ gsshx(R0, as_Register(base), AT, disp);
2388 }
2389 } else {
2390 if (scale == 0) {
2391 __ addu(AT, as_Register(base), as_Register(index));
2392 } else {
2393 __ dsll(AT, as_Register(index), scale);
2394 __ addu(AT, as_Register(base), AT);
2395 }
2396 __ sh(R0, AT, disp);
2397 }
2398 } else {
2399 if (scale == 0) {
2400 __ addu(AT, as_Register(base), as_Register(index));
2401 } else {
2402 __ dsll(AT, as_Register(index), scale);
2403 __ addu(AT, as_Register(base), AT);
2404 }
2405 __ move(T9, disp);
2406 if( UseLoongsonISA ) {
2407 __ gsshx(R0, AT, T9, 0);
2408 } else {
2409 __ addu(AT, AT, T9);
2410 __ sh(R0, AT, 0);
2411 }
2412 }
2413 } else {
2414 if( Assembler::is_simm16(disp) ) {
2415 __ sh(R0, as_Register(base), disp);
2416 } else {
2417 __ move(T9, disp);
2418 if( UseLoongsonISA ) {
2419 __ gsshx(R0, as_Register(base), T9, 0);
2420 } else {
2421 __ addu(AT, as_Register(base), T9);
2422 __ sh(R0, AT, 0);
2423 }
2424 }
2425 }
2426 %}
2428 enc_class load_I_enc (mRegI dst, memory mem) %{
2429 MacroAssembler _masm(&cbuf);
2430 int dst = $dst$$reg;
2431 int base = $mem$$base;
2432 int index = $mem$$index;
2433 int scale = $mem$$scale;
2434 int disp = $mem$$disp;
2436 if( index != 0 ) {
2437 if( Assembler::is_simm16(disp) ) {
2438 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2439 if (scale == 0) {
2440 __ gslwx(as_Register(dst), as_Register(base), as_Register(index), disp);
2441 } else {
2442 __ dsll(AT, as_Register(index), scale);
2443 __ gslwx(as_Register(dst), as_Register(base), AT, disp);
2444 }
2445 } else {
2446 if (scale == 0) {
2447 __ addu(AT, as_Register(base), as_Register(index));
2448 } else {
2449 __ dsll(AT, as_Register(index), scale);
2450 __ addu(AT, as_Register(base), AT);
2451 }
2452 __ lw(as_Register(dst), AT, disp);
2453 }
2454 } else {
2455 if (scale == 0) {
2456 __ addu(AT, as_Register(base), as_Register(index));
2457 } else {
2458 __ dsll(AT, as_Register(index), scale);
2459 __ addu(AT, as_Register(base), AT);
2460 }
2461 __ move(T9, disp);
2462 if( UseLoongsonISA ) {
2463 __ gslwx(as_Register(dst), AT, T9, 0);
2464 } else {
2465 __ addu(AT, AT, T9);
2466 __ lw(as_Register(dst), AT, 0);
2467 }
2468 }
2469 } else {
2470 if( Assembler::is_simm16(disp) ) {
2471 __ lw(as_Register(dst), as_Register(base), disp);
2472 } else {
2473 __ move(T9, disp);
2474 if( UseLoongsonISA ) {
2475 __ gslwx(as_Register(dst), as_Register(base), T9, 0);
2476 } else {
2477 __ addu(AT, as_Register(base), T9);
2478 __ lw(as_Register(dst), AT, 0);
2479 }
2480 }
2481 }
2482 %}
2484 enc_class store_I_reg_enc (memory mem, mRegI src) %{
2485 MacroAssembler _masm(&cbuf);
2486 int src = $src$$reg;
2487 int base = $mem$$base;
2488 int index = $mem$$index;
2489 int scale = $mem$$scale;
2490 int disp = $mem$$disp;
2492 if( index != 0 ) {
2493 if( Assembler::is_simm16(disp) ) {
2494 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2495 if (scale == 0) {
2496 __ gsswx(as_Register(src), as_Register(base), as_Register(index), disp);
2497 } else {
2498 __ dsll(AT, as_Register(index), scale);
2499 __ gsswx(as_Register(src), as_Register(base), AT, disp);
2500 }
2501 } else {
2502 if (scale == 0) {
2503 __ addu(AT, as_Register(base), as_Register(index));
2504 } else {
2505 __ dsll(AT, as_Register(index), scale);
2506 __ addu(AT, as_Register(base), AT);
2507 }
2508 __ sw(as_Register(src), AT, disp);
2509 }
2510 } else {
2511 if (scale == 0) {
2512 __ addu(AT, as_Register(base), as_Register(index));
2513 } else {
2514 __ dsll(AT, as_Register(index), scale);
2515 __ addu(AT, as_Register(base), AT);
2516 }
2517 __ move(T9, disp);
2518 if( UseLoongsonISA ) {
2519 __ gsswx(as_Register(src), AT, T9, 0);
2520 } else {
2521 __ addu(AT, AT, T9);
2522 __ sw(as_Register(src), AT, 0);
2523 }
2524 }
2525 } else {
2526 if( Assembler::is_simm16(disp) ) {
2527 __ sw(as_Register(src), as_Register(base), disp);
2528 } else {
2529 __ move(T9, disp);
2530 if( UseLoongsonISA ) {
2531 __ gsswx(as_Register(src), as_Register(base), T9, 0);
2532 } else {
2533 __ addu(AT, as_Register(base), T9);
2534 __ sw(as_Register(src), AT, 0);
2535 }
2536 }
2537 }
2538 %}
2540 enc_class store_I_immI_enc (memory mem, immI src) %{
2541 MacroAssembler _masm(&cbuf);
2542 int base = $mem$$base;
2543 int index = $mem$$index;
2544 int scale = $mem$$scale;
2545 int disp = $mem$$disp;
2546 int value = $src$$constant;
2548 if( index != 0 ) {
2549 if ( UseLoongsonISA ) {
2550 if ( Assembler::is_simm(disp, 8) ) {
2551 if ( scale == 0 ) {
2552 if ( value == 0 ) {
2553 __ gsswx(R0, as_Register(base), as_Register(index), disp);
2554 } else {
2555 __ move(T9, value);
2556 __ gsswx(T9, as_Register(base), as_Register(index), disp);
2557 }
2558 } else {
2559 __ dsll(AT, as_Register(index), scale);
2560 if ( value == 0 ) {
2561 __ gsswx(R0, as_Register(base), AT, disp);
2562 } else {
2563 __ move(T9, value);
2564 __ gsswx(T9, as_Register(base), AT, disp);
2565 }
2566 }
2567 } else if ( Assembler::is_simm16(disp) ) {
2568 if ( scale == 0 ) {
2569 __ daddu(AT, as_Register(base), as_Register(index));
2570 if ( value == 0 ) {
2571 __ sw(R0, AT, disp);
2572 } else {
2573 __ move(T9, value);
2574 __ sw(T9, AT, disp);
2575 }
2576 } else {
2577 __ dsll(AT, as_Register(index), scale);
2578 __ daddu(AT, as_Register(base), AT);
2579 if ( value == 0 ) {
2580 __ sw(R0, AT, disp);
2581 } else {
2582 __ move(T9, value);
2583 __ sw(T9, AT, disp);
2584 }
2585 }
2586 } else {
2587 if ( scale == 0 ) {
2588 __ move(T9, disp);
2589 __ daddu(AT, as_Register(index), T9);
2590 if ( value ==0 ) {
2591 __ gsswx(R0, as_Register(base), AT, 0);
2592 } else {
2593 __ move(T9, value);
2594 __ gsswx(T9, as_Register(base), AT, 0);
2595 }
2596 } else {
2597 __ dsll(AT, as_Register(index), scale);
2598 __ move(T9, disp);
2599 __ daddu(AT, AT, T9);
2600 if ( value == 0 ) {
2601 __ gsswx(R0, as_Register(base), AT, 0);
2602 } else {
2603 __ move(T9, value);
2604 __ gsswx(T9, as_Register(base), AT, 0);
2605 }
2606 }
2607 }
2608 } else { //not use loongson isa
2609 if (scale == 0) {
2610 __ daddu(AT, as_Register(base), as_Register(index));
2611 } else {
2612 __ dsll(AT, as_Register(index), scale);
2613 __ daddu(AT, as_Register(base), AT);
2614 }
2615 if( Assembler::is_simm16(disp) ) {
2616 if (value == 0) {
2617 __ sw(R0, AT, disp);
2618 } else {
2619 __ move(T9, value);
2620 __ sw(T9, AT, disp);
2621 }
2622 } else {
2623 if (value == 0) {
2624 __ move(T9, disp);
2625 __ daddu(AT, AT, T9);
2626 __ sw(R0, AT, 0);
2627 } else {
2628 __ move(T9, disp);
2629 __ daddu(AT, AT, T9);
2630 __ move(T9, value);
2631 __ sw(T9, AT, 0);
2632 }
2633 }
2634 }
2635 } else {
2636 if ( UseLoongsonISA ) {
2637 if ( Assembler::is_simm16(disp) ) {
2638 if ( value == 0 ) {
2639 __ sw(R0, as_Register(base), disp);
2640 } else {
2641 __ move(AT, value);
2642 __ sw(AT, as_Register(base), disp);
2643 }
2644 } else {
2645 __ move(T9, disp);
2646 if ( value == 0 ) {
2647 __ gsswx(R0, as_Register(base), T9, 0);
2648 } else {
2649 __ move(AT, value);
2650 __ gsswx(AT, as_Register(base), T9, 0);
2651 }
2652 }
2653 } else {
2654 if( Assembler::is_simm16(disp) ) {
2655 if (value == 0) {
2656 __ sw(R0, as_Register(base), disp);
2657 } else {
2658 __ move(AT, value);
2659 __ sw(AT, as_Register(base), disp);
2660 }
2661 } else {
2662 if (value == 0) {
2663 __ move(T9, disp);
2664 __ daddu(AT, as_Register(base), T9);
2665 __ sw(R0, AT, 0);
2666 } else {
2667 __ move(T9, disp);
2668 __ daddu(AT, as_Register(base), T9);
2669 __ move(T9, value);
2670 __ sw(T9, AT, 0);
2671 }
2672 }
2673 }
2674 }
2675 %}
2677 enc_class load_N_enc (mRegN dst, memory mem) %{
2678 MacroAssembler _masm(&cbuf);
2679 int dst = $dst$$reg;
2680 int base = $mem$$base;
2681 int index = $mem$$index;
2682 int scale = $mem$$scale;
2683 int disp = $mem$$disp;
2684 relocInfo::relocType disp_reloc = $mem->disp_reloc();
2685 assert(disp_reloc == relocInfo::none, "cannot have disp");
2687 if( index != 0 ) {
2688 if (scale == 0) {
2689 __ daddu(AT, as_Register(base), as_Register(index));
2690 } else {
2691 __ dsll(AT, as_Register(index), scale);
2692 __ daddu(AT, as_Register(base), AT);
2693 }
2694 if( Assembler::is_simm16(disp) ) {
2695 __ lwu(as_Register(dst), AT, disp);
2696 } else {
2697 __ set64(T9, disp);
2698 __ daddu(AT, AT, T9);
2699 __ lwu(as_Register(dst), AT, 0);
2700 }
2701 } else {
2702 if( Assembler::is_simm16(disp) ) {
2703 __ lwu(as_Register(dst), as_Register(base), disp);
2704 } else {
2705 __ set64(T9, disp);
2706 __ daddu(AT, as_Register(base), T9);
2707 __ lwu(as_Register(dst), AT, 0);
2708 }
2709 }
2711 %}
2714 enc_class load_P_enc (mRegP dst, memory mem) %{
2715 MacroAssembler _masm(&cbuf);
2716 int dst = $dst$$reg;
2717 int base = $mem$$base;
2718 int index = $mem$$index;
2719 int scale = $mem$$scale;
2720 int disp = $mem$$disp;
2721 relocInfo::relocType disp_reloc = $mem->disp_reloc();
2722 assert(disp_reloc == relocInfo::none, "cannot have disp");
2724 if( index != 0 ) {
2725 if ( UseLoongsonISA ) {
2726 if ( Assembler::is_simm(disp, 8) ) {
2727 if ( scale != 0 ) {
2728 __ dsll(AT, as_Register(index), scale);
2729 __ gsldx(as_Register(dst), as_Register(base), AT, disp);
2730 } else {
2731 __ gsldx(as_Register(dst), as_Register(base), as_Register(index), disp);
2732 }
2733 } else if ( Assembler::is_simm16(disp) ){
2734 if ( scale != 0 ) {
2735 __ dsll(AT, as_Register(index), scale);
2736 __ daddu(AT, AT, as_Register(base));
2737 } else {
2738 __ daddu(AT, as_Register(index), as_Register(base));
2739 }
2740 __ ld(as_Register(dst), AT, disp);
2741 } else {
2742 if ( scale != 0 ) {
2743 __ dsll(AT, as_Register(index), scale);
2744 __ move(T9, disp);
2745 __ daddu(AT, AT, T9);
2746 } else {
2747 __ move(T9, disp);
2748 __ daddu(AT, as_Register(index), T9);
2749 }
2750 __ gsldx(as_Register(dst), as_Register(base), AT, 0);
2751 }
2752 } else { //not use loongson isa
2753 if (scale == 0) {
2754 __ daddu(AT, as_Register(base), as_Register(index));
2755 } else {
2756 __ dsll(AT, as_Register(index), scale);
2757 __ daddu(AT, as_Register(base), AT);
2758 }
2759 if( Assembler::is_simm16(disp) ) {
2760 __ ld(as_Register(dst), AT, disp);
2761 } else {
2762 __ set64(T9, disp);
2763 __ daddu(AT, AT, T9);
2764 __ ld(as_Register(dst), AT, 0);
2765 }
2766 }
2767 } else {
2768 if ( UseLoongsonISA ) {
2769 if ( Assembler::is_simm16(disp) ){
2770 __ ld(as_Register(dst), as_Register(base), disp);
2771 } else {
2772 __ set64(T9, disp);
2773 __ gsldx(as_Register(dst), as_Register(base), T9, 0);
2774 }
2775 } else { //not use loongson isa
2776 if( Assembler::is_simm16(disp) ) {
2777 __ ld(as_Register(dst), as_Register(base), disp);
2778 } else {
2779 __ set64(T9, disp);
2780 __ daddu(AT, as_Register(base), T9);
2781 __ ld(as_Register(dst), AT, 0);
2782 }
2783 }
2784 }
2785 // if( disp_reloc != relocInfo::none) __ ld(as_Register(dst), as_Register(dst), 0);
2786 %}
2788 enc_class store_P_reg_enc (memory mem, mRegP src) %{
2789 MacroAssembler _masm(&cbuf);
2790 int src = $src$$reg;
2791 int base = $mem$$base;
2792 int index = $mem$$index;
2793 int scale = $mem$$scale;
2794 int disp = $mem$$disp;
2796 if( index != 0 ) {
2797 if ( UseLoongsonISA ){
2798 if ( Assembler::is_simm(disp, 8) ) {
2799 if ( scale == 0 ) {
2800 __ gssdx(as_Register(src), as_Register(base), as_Register(index), disp);
2801 } else {
2802 __ dsll(AT, as_Register(index), scale);
2803 __ gssdx(as_Register(src), as_Register(base), AT, disp);
2804 }
2805 } else if ( Assembler::is_simm16(disp) ) {
2806 if ( scale == 0 ) {
2807 __ daddu(AT, as_Register(base), as_Register(index));
2808 } else {
2809 __ dsll(AT, as_Register(index), scale);
2810 __ daddu(AT, as_Register(base), AT);
2811 }
2812 __ sd(as_Register(src), AT, disp);
2813 } else {
2814 if ( scale == 0 ) {
2815 __ move(T9, disp);
2816 __ daddu(AT, as_Register(index), T9);
2817 } else {
2818 __ dsll(AT, as_Register(index), scale);
2819 __ move(T9, disp);
2820 __ daddu(AT, AT, T9);
2821 }
2822 __ gssdx(as_Register(src), as_Register(base), AT, 0);
2823 }
2824 } else { //not use loongson isa
2825 if (scale == 0) {
2826 __ daddu(AT, as_Register(base), as_Register(index));
2827 } else {
2828 __ dsll(AT, as_Register(index), scale);
2829 __ daddu(AT, as_Register(base), AT);
2830 }
2831 if( Assembler::is_simm16(disp) ) {
2832 __ sd(as_Register(src), AT, disp);
2833 } else {
2834 __ move(T9, disp);
2835 __ daddu(AT, AT, T9);
2836 __ sd(as_Register(src), AT, 0);
2837 }
2838 }
2839 } else {
2840 if ( UseLoongsonISA ) {
2841 if ( Assembler::is_simm16(disp) ) {
2842 __ sd(as_Register(src), as_Register(base), disp);
2843 } else {
2844 __ move(T9, disp);
2845 __ gssdx(as_Register(src), as_Register(base), T9, 0);
2846 }
2847 } else {
2848 if( Assembler::is_simm16(disp) ) {
2849 __ sd(as_Register(src), as_Register(base), disp);
2850 } else {
2851 __ move(T9, disp);
2852 __ daddu(AT, as_Register(base), T9);
2853 __ sd(as_Register(src), AT, 0);
2854 }
2855 }
2856 }
2857 %}
2859 enc_class store_N_reg_enc (memory mem, mRegN src) %{
2860 MacroAssembler _masm(&cbuf);
2861 int src = $src$$reg;
2862 int base = $mem$$base;
2863 int index = $mem$$index;
2864 int scale = $mem$$scale;
2865 int disp = $mem$$disp;
2867 if( index != 0 ) {
2868 if ( UseLoongsonISA ){
2869 if ( Assembler::is_simm(disp, 8) ) {
2870 if ( scale == 0 ) {
2871 __ gsswx(as_Register(src), as_Register(base), as_Register(index), disp);
2872 } else {
2873 __ dsll(AT, as_Register(index), scale);
2874 __ gsswx(as_Register(src), as_Register(base), AT, disp);
2875 }
2876 } else if ( Assembler::is_simm16(disp) ) {
2877 if ( scale == 0 ) {
2878 __ daddu(AT, as_Register(base), as_Register(index));
2879 } else {
2880 __ dsll(AT, as_Register(index), scale);
2881 __ daddu(AT, as_Register(base), AT);
2882 }
2883 __ sw(as_Register(src), AT, disp);
2884 } else {
2885 if ( scale == 0 ) {
2886 __ move(T9, disp);
2887 __ daddu(AT, as_Register(index), T9);
2888 } else {
2889 __ dsll(AT, as_Register(index), scale);
2890 __ move(T9, disp);
2891 __ daddu(AT, AT, T9);
2892 }
2893 __ gsswx(as_Register(src), as_Register(base), AT, 0);
2894 }
2895 } else { //not use loongson isa
2896 if (scale == 0) {
2897 __ daddu(AT, as_Register(base), as_Register(index));
2898 } else {
2899 __ dsll(AT, as_Register(index), scale);
2900 __ daddu(AT, as_Register(base), AT);
2901 }
2902 if( Assembler::is_simm16(disp) ) {
2903 __ sw(as_Register(src), AT, disp);
2904 } else {
2905 __ move(T9, disp);
2906 __ daddu(AT, AT, T9);
2907 __ sw(as_Register(src), AT, 0);
2908 }
2909 }
2910 } else {
2911 if ( UseLoongsonISA ) {
2912 if ( Assembler::is_simm16(disp) ) {
2913 __ sw(as_Register(src), as_Register(base), disp);
2914 } else {
2915 __ move(T9, disp);
2916 __ gsswx(as_Register(src), as_Register(base), T9, 0);
2917 }
2918 } else {
2919 if( Assembler::is_simm16(disp) ) {
2920 __ sw(as_Register(src), as_Register(base), disp);
2921 } else {
2922 __ move(T9, disp);
2923 __ daddu(AT, as_Register(base), T9);
2924 __ sw(as_Register(src), AT, 0);
2925 }
2926 }
2927 }
2928 %}
2930 enc_class store_P_immP0_enc (memory mem) %{
2931 MacroAssembler _masm(&cbuf);
2932 int base = $mem$$base;
2933 int index = $mem$$index;
2934 int scale = $mem$$scale;
2935 int disp = $mem$$disp;
2937 if( index != 0 ) {
2938 if (scale == 0) {
2939 if( Assembler::is_simm16(disp) ) {
2940 if (UseLoongsonISA && Assembler::is_simm(disp, 8)) {
2941 __ gssdx(R0, as_Register(base), as_Register(index), disp);
2942 } else {
2943 __ daddu(AT, as_Register(base), as_Register(index));
2944 __ sd(R0, AT, disp);
2945 }
2946 } else {
2947 __ daddu(AT, as_Register(base), as_Register(index));
2948 __ move(T9, disp);
2949 if(UseLoongsonISA) {
2950 __ gssdx(R0, AT, T9, 0);
2951 } else {
2952 __ daddu(AT, AT, T9);
2953 __ sd(R0, AT, 0);
2954 }
2955 }
2956 } else {
2957 __ dsll(AT, as_Register(index), scale);
2958 if( Assembler::is_simm16(disp) ) {
2959 if (UseLoongsonISA && Assembler::is_simm(disp, 8)) {
2960 __ gssdx(R0, as_Register(base), AT, disp);
2961 } else {
2962 __ daddu(AT, as_Register(base), AT);
2963 __ sd(R0, AT, disp);
2964 }
2965 } else {
2966 __ daddu(AT, as_Register(base), AT);
2967 __ move(T9, disp);
2968 if (UseLoongsonISA) {
2969 __ gssdx(R0, AT, T9, 0);
2970 } else {
2971 __ daddu(AT, AT, T9);
2972 __ sd(R0, AT, 0);
2973 }
2974 }
2975 }
2976 } else {
2977 if( Assembler::is_simm16(disp) ) {
2978 __ sd(R0, as_Register(base), disp);
2979 } else {
2980 __ move(T9, disp);
2981 if (UseLoongsonISA) {
2982 __ gssdx(R0, as_Register(base), T9, 0);
2983 } else {
2984 __ daddu(AT, as_Register(base), T9);
2985 __ sd(R0, AT, 0);
2986 }
2987 }
2988 }
2989 %}
2992 enc_class storeImmN0_enc(memory mem, ImmN0 src) %{
2993 MacroAssembler _masm(&cbuf);
2994 int base = $mem$$base;
2995 int index = $mem$$index;
2996 int scale = $mem$$scale;
2997 int disp = $mem$$disp;
2999 if(index!=0){
3000 if (scale == 0) {
3001 __ daddu(AT, as_Register(base), as_Register(index));
3002 } else {
3003 __ dsll(AT, as_Register(index), scale);
3004 __ daddu(AT, as_Register(base), AT);
3005 }
3007 if( Assembler::is_simm16(disp) ) {
3008 __ sw(R0, AT, disp);
3009 } else {
3010 __ move(T9, disp);
3011 __ daddu(AT, AT, T9);
3012 __ sw(R0, AT, 0);
3013 }
3014 }
3015 else {
3016 if( Assembler::is_simm16(disp) ) {
3017 __ sw(R0, as_Register(base), disp);
3018 } else {
3019 __ move(T9, disp);
3020 __ daddu(AT, as_Register(base), T9);
3021 __ sw(R0, AT, 0);
3022 }
3023 }
3024 %}
3026 enc_class load_L_enc (mRegL dst, memory mem) %{
3027 MacroAssembler _masm(&cbuf);
3028 int base = $mem$$base;
3029 int index = $mem$$index;
3030 int scale = $mem$$scale;
3031 int disp = $mem$$disp;
3032 Register dst_reg = as_Register($dst$$reg);
3034 /*********************2013/03/27**************************
3035 * Jin: $base may contain a null object.
3036 * Server JIT force the exception_offset to be the pos of
3037 * the first instruction.
3038 * I insert such a 'null_check' at the beginning.
3039 *******************************************************/
3041 __ lw(AT, as_Register(base), 0);
3043 /*********************2012/10/04**************************
3044 * Error case found in SortTest
3045 * 337 b java.util.Arrays::sort1 (401 bytes)
3046 * B73:
3047 * d34 lw T4.lo, [T4 + #16] #@loadL-lo
3048 * lw T4.hi, [T4 + #16]+4 #@loadL-hi
3049 *
3050 * The original instructions generated here are :
3051 * __ lw(dst_lo, as_Register(base), disp);
3052 * __ lw(dst_hi, as_Register(base), disp + 4);
3053 *******************************************************/
3055 if( index != 0 ) {
3056 if (scale == 0) {
3057 __ daddu(AT, as_Register(base), as_Register(index));
3058 } else {
3059 __ dsll(AT, as_Register(index), scale);
3060 __ daddu(AT, as_Register(base), AT);
3061 }
3062 if( Assembler::is_simm16(disp) ) {
3063 __ ld(dst_reg, AT, disp);
3064 } else {
3065 __ move(T9, disp);
3066 __ daddu(AT, AT, T9);
3067 __ ld(dst_reg, AT, 0);
3068 }
3069 } else {
3070 if( Assembler::is_simm16(disp) ) {
3071 __ move(AT, as_Register(base));
3072 __ ld(dst_reg, AT, disp);
3073 } else {
3074 __ move(T9, disp);
3075 __ daddu(AT, as_Register(base), T9);
3076 __ ld(dst_reg, AT, 0);
3077 }
3078 }
3079 %}
3081 enc_class store_L_reg_enc (memory mem, mRegL src) %{
3082 MacroAssembler _masm(&cbuf);
3083 int base = $mem$$base;
3084 int index = $mem$$index;
3085 int scale = $mem$$scale;
3086 int disp = $mem$$disp;
3087 Register src_reg = as_Register($src$$reg);
3089 if( index != 0 ) {
3090 if (scale == 0) {
3091 __ daddu(AT, as_Register(base), as_Register(index));
3092 } else {
3093 __ dsll(AT, as_Register(index), scale);
3094 __ daddu(AT, as_Register(base), AT);
3095 }
3096 if( Assembler::is_simm16(disp) ) {
3097 __ sd(src_reg, AT, disp);
3098 } else {
3099 __ move(T9, disp);
3100 __ daddu(AT, AT, T9);
3101 __ sd(src_reg, AT, 0);
3102 }
3103 } else {
3104 if( Assembler::is_simm16(disp) ) {
3105 __ move(AT, as_Register(base));
3106 __ sd(src_reg, AT, disp);
3107 } else {
3108 __ move(T9, disp);
3109 __ daddu(AT, as_Register(base), T9);
3110 __ sd(src_reg, AT, 0);
3111 }
3112 }
3113 %}
3115 enc_class store_L_immL0_enc (memory mem, immL0 src) %{
3116 MacroAssembler _masm(&cbuf);
3117 int base = $mem$$base;
3118 int index = $mem$$index;
3119 int scale = $mem$$scale;
3120 int disp = $mem$$disp;
3122 if( index != 0 ) {
3123 if (scale == 0) {
3124 __ daddu(AT, as_Register(base), as_Register(index));
3125 } else {
3126 __ dsll(AT, as_Register(index), scale);
3127 __ daddu(AT, as_Register(base), AT);
3128 }
3129 if( Assembler::is_simm16(disp) ) {
3130 __ sd(R0, AT, disp);
3131 } else {
3132 __ move(T9, disp);
3133 __ addu(AT, AT, T9);
3134 __ sd(R0, AT, 0);
3135 }
3136 } else {
3137 if( Assembler::is_simm16(disp) ) {
3138 __ move(AT, as_Register(base));
3139 __ sd(R0, AT, disp);
3140 } else {
3141 __ move(T9, disp);
3142 __ addu(AT, as_Register(base), T9);
3143 __ sd(R0, AT, 0);
3144 }
3145 }
3146 %}
3148 enc_class load_F_enc (regF dst, memory mem) %{
3149 MacroAssembler _masm(&cbuf);
3150 int base = $mem$$base;
3151 int index = $mem$$index;
3152 int scale = $mem$$scale;
3153 int disp = $mem$$disp;
3154 FloatRegister dst = $dst$$FloatRegister;
3156 if( index != 0 ) {
3157 if( Assembler::is_simm16(disp) ) {
3158 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3159 if (scale == 0) {
3160 __ gslwxc1(dst, as_Register(base), as_Register(index), disp);
3161 } else {
3162 __ dsll(AT, as_Register(index), scale);
3163 __ gslwxc1(dst, as_Register(base), AT, disp);
3164 }
3165 } else {
3166 if (scale == 0) {
3167 __ daddu(AT, as_Register(base), as_Register(index));
3168 } else {
3169 __ dsll(AT, as_Register(index), scale);
3170 __ daddu(AT, as_Register(base), AT);
3171 }
3172 __ lwc1(dst, AT, disp);
3173 }
3174 } else {
3175 if (scale == 0) {
3176 __ daddu(AT, as_Register(base), as_Register(index));
3177 } else {
3178 __ dsll(AT, as_Register(index), scale);
3179 __ daddu(AT, as_Register(base), AT);
3180 }
3181 __ move(T9, disp);
3182 if( UseLoongsonISA ) {
3183 __ gslwxc1(dst, AT, T9, 0);
3184 } else {
3185 __ daddu(AT, AT, T9);
3186 __ lwc1(dst, AT, 0);
3187 }
3188 }
3189 } else {
3190 if( Assembler::is_simm16(disp) ) {
3191 __ lwc1(dst, as_Register(base), disp);
3192 } else {
3193 __ move(T9, disp);
3194 if( UseLoongsonISA ) {
3195 __ gslwxc1(dst, as_Register(base), T9, 0);
3196 } else {
3197 __ daddu(AT, as_Register(base), T9);
3198 __ lwc1(dst, AT, 0);
3199 }
3200 }
3201 }
3202 %}
3204 enc_class store_F_reg_enc (memory mem, regF src) %{
3205 MacroAssembler _masm(&cbuf);
3206 int base = $mem$$base;
3207 int index = $mem$$index;
3208 int scale = $mem$$scale;
3209 int disp = $mem$$disp;
3210 FloatRegister src = $src$$FloatRegister;
3212 if( index != 0 ) {
3213 if( Assembler::is_simm16(disp) ) {
3214 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3215 if (scale == 0) {
3216 __ gsswxc1(src, as_Register(base), as_Register(index), disp);
3217 } else {
3218 __ dsll(AT, as_Register(index), scale);
3219 __ gsswxc1(src, as_Register(base), AT, disp);
3220 }
3221 } else {
3222 if (scale == 0) {
3223 __ daddu(AT, as_Register(base), as_Register(index));
3224 } else {
3225 __ dsll(AT, as_Register(index), scale);
3226 __ daddu(AT, as_Register(base), AT);
3227 }
3228 __ swc1(src, AT, disp);
3229 }
3230 } else {
3231 if (scale == 0) {
3232 __ daddu(AT, as_Register(base), as_Register(index));
3233 } else {
3234 __ dsll(AT, as_Register(index), scale);
3235 __ daddu(AT, as_Register(base), AT);
3236 }
3237 __ move(T9, disp);
3238 if( UseLoongsonISA ) {
3239 __ gsswxc1(src, AT, T9, 0);
3240 } else {
3241 __ daddu(AT, AT, T9);
3242 __ swc1(src, AT, 0);
3243 }
3244 }
3245 } else {
3246 if( Assembler::is_simm16(disp) ) {
3247 __ swc1(src, as_Register(base), disp);
3248 } else {
3249 __ move(T9, disp);
3250 if( UseLoongsonISA ) {
3251 __ gslwxc1(src, as_Register(base), T9, 0);
3252 } else {
3253 __ daddu(AT, as_Register(base), T9);
3254 __ swc1(src, AT, 0);
3255 }
3256 }
3257 }
3258 %}
3260 enc_class load_D_enc (regD dst, memory mem) %{
3261 MacroAssembler _masm(&cbuf);
3262 int base = $mem$$base;
3263 int index = $mem$$index;
3264 int scale = $mem$$scale;
3265 int disp = $mem$$disp;
3266 FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3268 if( index != 0 ) {
3269 if( Assembler::is_simm16(disp) ) {
3270 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3271 if (scale == 0) {
3272 __ gsldxc1(dst_reg, as_Register(base), as_Register(index), disp);
3273 } else {
3274 __ dsll(AT, as_Register(index), scale);
3275 __ gsldxc1(dst_reg, as_Register(base), AT, disp);
3276 }
3277 } else {
3278 if (scale == 0) {
3279 __ daddu(AT, as_Register(base), as_Register(index));
3280 } else {
3281 __ dsll(AT, as_Register(index), scale);
3282 __ daddu(AT, as_Register(base), AT);
3283 }
3284 __ ldc1(dst_reg, AT, disp);
3285 }
3286 } else {
3287 if (scale == 0) {
3288 __ daddu(AT, as_Register(base), as_Register(index));
3289 } else {
3290 __ dsll(AT, as_Register(index), scale);
3291 __ daddu(AT, as_Register(base), AT);
3292 }
3293 __ move(T9, disp);
3294 if( UseLoongsonISA ) {
3295 __ gsldxc1(dst_reg, AT, T9, 0);
3296 } else {
3297 __ addu(AT, AT, T9);
3298 __ ldc1(dst_reg, AT, 0);
3299 }
3300 }
3301 } else {
3302 if( Assembler::is_simm16(disp) ) {
3303 __ ldc1(dst_reg, as_Register(base), disp);
3304 } else {
3305 __ move(T9, disp);
3306 if( UseLoongsonISA ) {
3307 __ gsldxc1(dst_reg, as_Register(base), T9, 0);
3308 } else {
3309 __ addu(AT, as_Register(base), T9);
3310 __ ldc1(dst_reg, AT, 0);
3311 }
3312 }
3313 }
3314 %}
3316 enc_class store_D_reg_enc (memory mem, regD src) %{
3317 MacroAssembler _masm(&cbuf);
3318 int base = $mem$$base;
3319 int index = $mem$$index;
3320 int scale = $mem$$scale;
3321 int disp = $mem$$disp;
3322 FloatRegister src_reg = as_FloatRegister($src$$reg);
3324 if( index != 0 ) {
3325 if( Assembler::is_simm16(disp) ) {
3326 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3327 if (scale == 0) {
3328 __ gssdxc1(src_reg, as_Register(base), as_Register(index), disp);
3329 } else {
3330 __ dsll(AT, as_Register(index), scale);
3331 __ gssdxc1(src_reg, as_Register(base), AT, disp);
3332 }
3333 } else {
3334 if (scale == 0) {
3335 __ daddu(AT, as_Register(base), as_Register(index));
3336 } else {
3337 __ dsll(AT, as_Register(index), scale);
3338 __ daddu(AT, as_Register(base), AT);
3339 }
3340 __ sdc1(src_reg, AT, disp);
3341 }
3342 } else {
3343 if (scale == 0) {
3344 __ daddu(AT, as_Register(base), as_Register(index));
3345 } else {
3346 __ dsll(AT, as_Register(index), scale);
3347 __ daddu(AT, as_Register(base), AT);
3348 }
3349 __ move(T9, disp);
3350 if( UseLoongsonISA ) {
3351 __ gssdxc1(src_reg, AT, T9, 0);
3352 } else {
3353 __ addu(AT, AT, T9);
3354 __ sdc1(src_reg, AT, 0);
3355 }
3356 }
3357 } else {
3358 if( Assembler::is_simm16(disp) ) {
3359 __ sdc1(src_reg, as_Register(base), disp);
3360 } else {
3361 __ move(T9, disp);
3362 if( UseLoongsonISA ) {
3363 __ gssdxc1(src_reg, as_Register(base), T9, 0);
3364 } else {
3365 __ addu(AT, as_Register(base), T9);
3366 __ sdc1(src_reg, AT, 0);
3367 }
3368 }
3369 }
3370 %}
3372 enc_class Java_To_Runtime (method meth) %{ // CALL Java_To_Runtime, Java_To_Runtime_Leaf
3373 MacroAssembler _masm(&cbuf);
3374 // This is the instruction starting address for relocation info.
3375 __ block_comment("Java_To_Runtime");
3376 cbuf.set_insts_mark();
3377 __ relocate(relocInfo::runtime_call_type);
3379 __ patchable_set48(T9, (long)$meth$$method);
3380 __ jalr(T9);
3381 __ nop();
3382 %}
3384 enc_class Java_Static_Call (method meth) %{ // JAVA STATIC CALL
3385 // CALL to fixup routine. Fixup routine uses ScopeDesc info to determine
3386 // who we intended to call.
3387 MacroAssembler _masm(&cbuf);
3388 cbuf.set_insts_mark();
3390 if ( !_method ) {
3391 __ relocate(relocInfo::runtime_call_type);
3392 } else if(_optimized_virtual) {
3393 __ relocate(relocInfo::opt_virtual_call_type);
3394 } else {
3395 __ relocate(relocInfo::static_call_type);
3396 }
3398 __ patchable_set48(T9, $meth$$method);
3399 __ jalr(T9);
3400 __ nop();
3401 if( _method ) { // Emit stub for static call
3402 emit_java_to_interp(cbuf);
3403 }
3404 %}
3407 /*
3408 * [Ref: LIR_Assembler::ic_call() ]
3409 */
3410 enc_class Java_Dynamic_Call (method meth) %{ // JAVA DYNAMIC CALL
3411 MacroAssembler _masm(&cbuf);
3412 __ block_comment("Java_Dynamic_Call");
3413 __ ic_call((address)$meth$$method);
3414 %}
3417 enc_class Set_Flags_After_Fast_Lock_Unlock(FlagsReg cr) %{
3418 Register flags = $cr$$Register;
3419 Label L;
3421 MacroAssembler _masm(&cbuf);
3423 __ addu(flags, R0, R0);
3424 __ beq(AT, R0, L);
3425 __ delayed()->nop();
3426 __ move(flags, 0xFFFFFFFF);
3427 __ bind(L);
3428 %}
3430 enc_class enc_PartialSubtypeCheck(mRegP result, mRegP sub, mRegP super, mRegI tmp) %{
3431 Register result = $result$$Register;
3432 Register sub = $sub$$Register;
3433 Register super = $super$$Register;
3434 Register length = $tmp$$Register;
3435 Register tmp = T9;
3436 Label miss;
3438 /* 2012/9/28 Jin: result may be the same as sub
3439 * 47c B40: # B21 B41 <- B20 Freq: 0.155379
3440 * 47c partialSubtypeCheck result=S1, sub=S1, super=S3, length=S0
3441 * 4bc mov S2, NULL #@loadConP
3442 * 4c0 beq S1, S2, B21 #@branchConP P=0.999999 C=-1.000000
3443 */
3444 MacroAssembler _masm(&cbuf);
3445 Label done;
3446 __ check_klass_subtype_slow_path(sub, super, length, tmp,
3447 NULL, &miss,
3448 /*set_cond_codes:*/ true);
3449 /* 2013/7/22 Jin: Refer to X86_64's RDI */
3450 __ move(result, 0);
3451 __ b(done);
3452 __ nop();
3454 __ bind(miss);
3455 __ move(result, 1);
3456 __ bind(done);
3457 %}
3459 %}
3462 //---------MIPS FRAME--------------------------------------------------------------
3463 // Definition of frame structure and management information.
3464 //
3465 // S T A C K L A Y O U T Allocators stack-slot number
3466 // | (to get allocators register number
3467 // G Owned by | | v add SharedInfo::stack0)
3468 // r CALLER | |
3469 // o | +--------+ pad to even-align allocators stack-slot
3470 // w V | pad0 | numbers; owned by CALLER
3471 // t -----------+--------+----> Matcher::_in_arg_limit, unaligned
3472 // h ^ | in | 5
3473 // | | args | 4 Holes in incoming args owned by SELF
3474 // | | old | | 3
3475 // | | SP-+--------+----> Matcher::_old_SP, even aligned
3476 // v | | ret | 3 return address
3477 // Owned by +--------+
3478 // Self | pad2 | 2 pad to align old SP
3479 // | +--------+ 1
3480 // | | locks | 0
3481 // | +--------+----> SharedInfo::stack0, even aligned
3482 // | | pad1 | 11 pad to align new SP
3483 // | +--------+
3484 // | | | 10
3485 // | | spills | 9 spills
3486 // V | | 8 (pad0 slot for callee)
3487 // -----------+--------+----> Matcher::_out_arg_limit, unaligned
3488 // ^ | out | 7
3489 // | | args | 6 Holes in outgoing args owned by CALLEE
3490 // Owned by new | |
3491 // Callee SP-+--------+----> Matcher::_new_SP, even aligned
3492 // | |
3493 //
3494 // Note 1: Only region 8-11 is determined by the allocator. Region 0-5 is
3495 // known from SELF's arguments and the Java calling convention.
3496 // Region 6-7 is determined per call site.
3497 // Note 2: If the calling convention leaves holes in the incoming argument
3498 // area, those holes are owned by SELF. Holes in the outgoing area
3499 // are owned by the CALLEE. Holes should not be nessecary in the
3500 // incoming area, as the Java calling convention is completely under
3501 // the control of the AD file. Doubles can be sorted and packed to
3502 // avoid holes. Holes in the outgoing arguments may be nessecary for
3503 // varargs C calling conventions.
3504 // Note 3: Region 0-3 is even aligned, with pad2 as needed. Region 3-5 is
3505 // even aligned with pad0 as needed.
3506 // Region 6 is even aligned. Region 6-7 is NOT even aligned;
3507 // region 6-11 is even aligned; it may be padded out more so that
3508 // the region from SP to FP meets the minimum stack alignment.
3509 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
3510 // alignment. Region 11, pad1, may be dynamically extended so that
3511 // SP meets the minimum alignment.
3514 frame %{
3516 stack_direction(TOWARDS_LOW);
3518 // These two registers define part of the calling convention
3519 // between compiled code and the interpreter.
3520 // SEE StartI2CNode::calling_convention & StartC2INode::calling_convention & StartOSRNode::calling_convention
3521 // for more information. by yjl 3/16/2006
3523 inline_cache_reg(T1); // Inline Cache Register
3524 interpreter_method_oop_reg(S3); // Method Oop Register when calling interpreter
3525 /*
3526 inline_cache_reg(T1); // Inline Cache Register or methodOop for I2C
3527 interpreter_arg_ptr_reg(A0); // Argument pointer for I2C adapters
3528 */
3530 // Optional: name the operand used by cisc-spilling to access [stack_pointer + offset]
3531 cisc_spilling_operand_name(indOffset32);
3533 // Number of stack slots consumed by locking an object
3534 // generate Compile::sync_stack_slots
3535 #ifdef _LP64
3536 sync_stack_slots(2);
3537 #else
3538 sync_stack_slots(1);
3539 #endif
3541 frame_pointer(SP);
3543 // Interpreter stores its frame pointer in a register which is
3544 // stored to the stack by I2CAdaptors.
3545 // I2CAdaptors convert from interpreted java to compiled java.
3547 interpreter_frame_pointer(FP);
3549 // generate Matcher::stack_alignment
3550 stack_alignment(StackAlignmentInBytes); //wordSize = sizeof(char*);
3552 // Number of stack slots between incoming argument block and the start of
3553 // a new frame. The PROLOG must add this many slots to the stack. The
3554 // EPILOG must remove this many slots. Intel needs one slot for
3555 // return address.
3556 // generate Matcher::in_preserve_stack_slots
3557 //in_preserve_stack_slots(VerifyStackAtCalls + 2); //Now VerifyStackAtCalls is defined as false ! Leave one stack slot for ra and fp
3558 in_preserve_stack_slots(4); //Now VerifyStackAtCalls is defined as false ! Leave two stack slots for ra and fp
3560 // Number of outgoing stack slots killed above the out_preserve_stack_slots
3561 // for calls to C. Supports the var-args backing area for register parms.
3562 varargs_C_out_slots_killed(0);
3564 // The after-PROLOG location of the return address. Location of
3565 // return address specifies a type (REG or STACK) and a number
3566 // representing the register number (i.e. - use a register name) or
3567 // stack slot.
3568 // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
3569 // Otherwise, it is above the locks and verification slot and alignment word
3570 //return_addr(STACK -1+ round_to(1+VerifyStackAtCalls+Compile::current()->sync()*Compile::current()->sync_stack_slots(),WordsPerLong));
3571 return_addr(REG RA);
3573 // Body of function which returns an integer array locating
3574 // arguments either in registers or in stack slots. Passed an array
3575 // of ideal registers called "sig" and a "length" count. Stack-slot
3576 // offsets are based on outgoing arguments, i.e. a CALLER setting up
3577 // arguments for a CALLEE. Incoming stack arguments are
3578 // automatically biased by the preserve_stack_slots field above.
3581 // will generated to Matcher::calling_convention(OptoRegPair *sig, uint length, bool is_outgoing)
3582 // StartNode::calling_convention call this. by yjl 3/16/2006
3583 calling_convention %{
3584 SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
3585 %}
3590 // Body of function which returns an integer array locating
3591 // arguments either in registers or in stack slots. Passed an array
3592 // of ideal registers called "sig" and a "length" count. Stack-slot
3593 // offsets are based on outgoing arguments, i.e. a CALLER setting up
3594 // arguments for a CALLEE. Incoming stack arguments are
3595 // automatically biased by the preserve_stack_slots field above.
3598 // SEE CallRuntimeNode::calling_convention for more information. by yjl 3/16/2006
3599 c_calling_convention %{
3600 (void) SharedRuntime::c_calling_convention(sig_bt, regs, /*regs2=*/NULL, length);
3601 %}
3604 // Location of C & interpreter return values
3605 // register(s) contain(s) return value for Op_StartI2C and Op_StartOSR.
3606 // SEE Matcher::match. by yjl 3/16/2006
3607 c_return_value %{
3608 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3609 /* -- , -- , Op_RegN, Op_RegI, Op_RegP, Op_RegF, Op_RegD, Op_RegL */
3610 static int lo[Op_RegL+1] = { 0, 0, V0_num, V0_num, V0_num, F0_num, F0_num, V0_num };
3611 static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, V0_H_num, OptoReg::Bad, F0_H_num, V0_H_num };
3612 return OptoRegPair(hi[ideal_reg],lo[ideal_reg]);
3613 %}
3615 // Location of return values
3616 // register(s) contain(s) return value for Op_StartC2I and Op_Start.
3617 // SEE Matcher::match. by yjl 3/16/2006
3619 return_value %{
3620 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3621 /* -- , -- , Op_RegN, Op_RegI, Op_RegP, Op_RegF, Op_RegD, Op_RegL */
3622 static int lo[Op_RegL+1] = { 0, 0, V0_num, V0_num, V0_num, F0_num, F0_num, V0_num };
3623 static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, V0_H_num, OptoReg::Bad, F0_H_num, V0_H_num};
3624 return OptoRegPair(hi[ideal_reg],lo[ideal_reg]);
3625 %}
3627 %}
3629 //----------ATTRIBUTES---------------------------------------------------------
3630 //----------Operand Attributes-------------------------------------------------
3631 op_attrib op_cost(0); // Required cost attribute
3633 //----------Instruction Attributes---------------------------------------------
3634 ins_attrib ins_cost(100); // Required cost attribute
3635 ins_attrib ins_size(32); // Required size attribute (in bits)
3636 ins_attrib ins_pc_relative(0); // Required PC Relative flag
3637 ins_attrib ins_short_branch(0); // Required flag: is this instruction a
3638 // non-matching short branch variant of some
3639 // long branch?
3640 ins_attrib ins_alignment(4); // Required alignment attribute (must be a power of 2)
3641 // specifies the alignment that some part of the instruction (not
3642 // necessarily the start) requires. If > 1, a compute_padding()
3643 // function must be provided for the instruction
3645 //----------OPERANDS-----------------------------------------------------------
3646 // Operand definitions must precede instruction definitions for correct parsing
3647 // in the ADLC because operands constitute user defined types which are used in
3648 // instruction definitions.
3650 // Vectors
3651 operand vecD() %{
3652 constraint(ALLOC_IN_RC(dbl_reg));
3653 match(VecD);
3655 format %{ %}
3656 interface(REG_INTER);
3657 %}
3659 // Flags register, used as output of compare instructions
3660 operand FlagsReg() %{
3661 constraint(ALLOC_IN_RC(mips_flags));
3662 match(RegFlags);
3664 format %{ "EFLAGS" %}
3665 interface(REG_INTER);
3666 %}
3668 //----------Simple Operands----------------------------------------------------
3669 //TODO: Should we need to define some more special immediate number ?
3670 // Immediate Operands
3671 // Integer Immediate
3672 operand immI() %{
3673 match(ConI);
3674 //TODO: should not match immI8 here LEE
3675 match(immI8);
3677 op_cost(20);
3678 format %{ %}
3679 interface(CONST_INTER);
3680 %}
3682 // Long Immediate 8-bit
3683 operand immL8()
3684 %{
3685 predicate(-0x80L <= n->get_long() && n->get_long() < 0x80L);
3686 match(ConL);
3688 op_cost(5);
3689 format %{ %}
3690 interface(CONST_INTER);
3691 %}
3693 // Constant for test vs zero
3694 operand immI0() %{
3695 predicate(n->get_int() == 0);
3696 match(ConI);
3698 op_cost(0);
3699 format %{ %}
3700 interface(CONST_INTER);
3701 %}
3703 // Constant for increment
3704 operand immI1() %{
3705 predicate(n->get_int() == 1);
3706 match(ConI);
3708 op_cost(0);
3709 format %{ %}
3710 interface(CONST_INTER);
3711 %}
3713 // Constant for decrement
3714 operand immI_M1() %{
3715 predicate(n->get_int() == -1);
3716 match(ConI);
3718 op_cost(0);
3719 format %{ %}
3720 interface(CONST_INTER);
3721 %}
3723 operand immI_MaxI() %{
3724 predicate(n->get_int() == 2147483647);
3725 match(ConI);
3727 op_cost(0);
3728 format %{ %}
3729 interface(CONST_INTER);
3730 %}
3732 // Valid scale values for addressing modes
3733 operand immI2() %{
3734 predicate(0 <= n->get_int() && (n->get_int() <= 3));
3735 match(ConI);
3737 format %{ %}
3738 interface(CONST_INTER);
3739 %}
3741 operand immI8() %{
3742 predicate((-128 <= n->get_int()) && (n->get_int() <= 127));
3743 match(ConI);
3745 op_cost(5);
3746 format %{ %}
3747 interface(CONST_INTER);
3748 %}
3750 operand immI16() %{
3751 predicate((-32768 <= n->get_int()) && (n->get_int() <= 32767));
3752 match(ConI);
3754 op_cost(10);
3755 format %{ %}
3756 interface(CONST_INTER);
3757 %}
3759 // Constant for long shifts
3760 operand immI_32() %{
3761 predicate( n->get_int() == 32 );
3762 match(ConI);
3764 op_cost(0);
3765 format %{ %}
3766 interface(CONST_INTER);
3767 %}
3769 operand immI_63() %{
3770 predicate( n->get_int() == 63 );
3771 match(ConI);
3773 op_cost(0);
3774 format %{ %}
3775 interface(CONST_INTER);
3776 %}
3778 operand immI_0_31() %{
3779 predicate( n->get_int() >= 0 && n->get_int() <= 31 );
3780 match(ConI);
3782 op_cost(0);
3783 format %{ %}
3784 interface(CONST_INTER);
3785 %}
3787 // Operand for non-negtive integer mask
3788 operand immI_nonneg_mask() %{
3789 predicate( (n->get_int() >= 0) && (Assembler::is_int_mask(n->get_int()) != -1) );
3790 match(ConI);
3792 op_cost(0);
3793 format %{ %}
3794 interface(CONST_INTER);
3795 %}
3797 operand immI_32_63() %{
3798 predicate( n->get_int() >= 32 && n->get_int() <= 63 );
3799 match(ConI);
3800 op_cost(0);
3802 format %{ %}
3803 interface(CONST_INTER);
3804 %}
3806 operand immI16_sub() %{
3807 predicate((-32767 <= n->get_int()) && (n->get_int() <= 32768));
3808 match(ConI);
3810 op_cost(10);
3811 format %{ %}
3812 interface(CONST_INTER);
3813 %}
3815 operand immI_0_32767() %{
3816 predicate( n->get_int() >= 0 && n->get_int() <= 32767 );
3817 match(ConI);
3818 op_cost(0);
3820 format %{ %}
3821 interface(CONST_INTER);
3822 %}
3824 operand immI_0_65535() %{
3825 predicate( n->get_int() >= 0 && n->get_int() <= 65535 );
3826 match(ConI);
3827 op_cost(0);
3829 format %{ %}
3830 interface(CONST_INTER);
3831 %}
3833 operand immI_1() %{
3834 predicate( n->get_int() == 1 );
3835 match(ConI);
3837 op_cost(0);
3838 format %{ %}
3839 interface(CONST_INTER);
3840 %}
3842 operand immI_2() %{
3843 predicate( n->get_int() == 2 );
3844 match(ConI);
3846 op_cost(0);
3847 format %{ %}
3848 interface(CONST_INTER);
3849 %}
3851 operand immI_3() %{
3852 predicate( n->get_int() == 3 );
3853 match(ConI);
3855 op_cost(0);
3856 format %{ %}
3857 interface(CONST_INTER);
3858 %}
3860 operand immI_7() %{
3861 predicate( n->get_int() == 7 );
3862 match(ConI);
3864 format %{ %}
3865 interface(CONST_INTER);
3866 %}
3868 // Immediates for special shifts (sign extend)
3870 // Constants for increment
3871 operand immI_16() %{
3872 predicate( n->get_int() == 16 );
3873 match(ConI);
3875 format %{ %}
3876 interface(CONST_INTER);
3877 %}
3879 operand immI_24() %{
3880 predicate( n->get_int() == 24 );
3881 match(ConI);
3883 format %{ %}
3884 interface(CONST_INTER);
3885 %}
3887 // Constant for byte-wide masking
3888 operand immI_255() %{
3889 predicate( n->get_int() == 255 );
3890 match(ConI);
3892 op_cost(0);
3893 format %{ %}
3894 interface(CONST_INTER);
3895 %}
3897 operand immI_65535() %{
3898 predicate( n->get_int() == 65535 );
3899 match(ConI);
3901 op_cost(5);
3902 format %{ %}
3903 interface(CONST_INTER);
3904 %}
3906 operand immI_65536() %{
3907 predicate( n->get_int() == 65536 );
3908 match(ConI);
3910 op_cost(5);
3911 format %{ %}
3912 interface(CONST_INTER);
3913 %}
3915 operand immI_M65536() %{
3916 predicate( n->get_int() == -65536 );
3917 match(ConI);
3919 op_cost(5);
3920 format %{ %}
3921 interface(CONST_INTER);
3922 %}
3924 // Pointer Immediate
3925 operand immP() %{
3926 match(ConP);
3928 op_cost(10);
3929 format %{ %}
3930 interface(CONST_INTER);
3931 %}
3933 // NULL Pointer Immediate
3934 operand immP0() %{
3935 predicate( n->get_ptr() == 0 );
3936 match(ConP);
3937 op_cost(0);
3939 format %{ %}
3940 interface(CONST_INTER);
3941 %}
3943 // Pointer Immediate: 64-bit
3944 operand immP_set() %{
3945 match(ConP);
3947 op_cost(5);
3948 // formats are generated automatically for constants and base registers
3949 format %{ %}
3950 interface(CONST_INTER);
3951 %}
3953 // Pointer Immediate: 64-bit
3954 operand immP_load() %{
3955 predicate(n->bottom_type()->isa_oop_ptr() || (MacroAssembler::insts_for_set64(n->get_ptr()) > 3));
3956 match(ConP);
3958 op_cost(5);
3959 // formats are generated automatically for constants and base registers
3960 format %{ %}
3961 interface(CONST_INTER);
3962 %}
3964 // Pointer Immediate: 64-bit
3965 operand immP_no_oop_cheap() %{
3966 predicate(!n->bottom_type()->isa_oop_ptr() && (MacroAssembler::insts_for_set64(n->get_ptr()) <= 3));
3967 match(ConP);
3969 op_cost(5);
3970 // formats are generated automatically for constants and base registers
3971 format %{ %}
3972 interface(CONST_INTER);
3973 %}
3975 // Pointer for polling page
3976 operand immP_poll() %{
3977 predicate(n->get_ptr() != 0 && n->get_ptr() == (intptr_t)os::get_polling_page());
3978 match(ConP);
3979 op_cost(5);
3981 format %{ %}
3982 interface(CONST_INTER);
3983 %}
3985 // Pointer Immediate
3986 operand immN() %{
3987 match(ConN);
3989 op_cost(10);
3990 format %{ %}
3991 interface(CONST_INTER);
3992 %}
3994 operand immNKlass() %{
3995 match(ConNKlass);
3997 op_cost(10);
3998 format %{ %}
3999 interface(CONST_INTER);
4000 %}
4002 // NULL Pointer Immediate
4003 operand immN0() %{
4004 predicate(n->get_narrowcon() == 0);
4005 match(ConN);
4007 op_cost(5);
4008 format %{ %}
4009 interface(CONST_INTER);
4010 %}
4012 // Long Immediate
4013 operand immL() %{
4014 match(ConL);
4016 op_cost(20);
4017 format %{ %}
4018 interface(CONST_INTER);
4019 %}
4021 // Long Immediate zero
4022 operand immL0() %{
4023 predicate( n->get_long() == 0L );
4024 match(ConL);
4025 op_cost(0);
4027 format %{ %}
4028 interface(CONST_INTER);
4029 %}
4031 operand immL7() %{
4032 predicate( n->get_long() == 7L );
4033 match(ConL);
4034 op_cost(0);
4036 format %{ %}
4037 interface(CONST_INTER);
4038 %}
4040 operand immL_M1() %{
4041 predicate( n->get_long() == -1L );
4042 match(ConL);
4043 op_cost(0);
4045 format %{ %}
4046 interface(CONST_INTER);
4047 %}
4049 // bit 0..2 zero
4050 operand immL_M8() %{
4051 predicate( n->get_long() == -8L );
4052 match(ConL);
4053 op_cost(0);
4055 format %{ %}
4056 interface(CONST_INTER);
4057 %}
4059 // bit 2 zero
4060 operand immL_M5() %{
4061 predicate( n->get_long() == -5L );
4062 match(ConL);
4063 op_cost(0);
4065 format %{ %}
4066 interface(CONST_INTER);
4067 %}
4069 // bit 1..2 zero
4070 operand immL_M7() %{
4071 predicate( n->get_long() == -7L );
4072 match(ConL);
4073 op_cost(0);
4075 format %{ %}
4076 interface(CONST_INTER);
4077 %}
4079 // bit 0..1 zero
4080 operand immL_M4() %{
4081 predicate( n->get_long() == -4L );
4082 match(ConL);
4083 op_cost(0);
4085 format %{ %}
4086 interface(CONST_INTER);
4087 %}
4089 // bit 3..6 zero
4090 operand immL_M121() %{
4091 predicate( n->get_long() == -121L );
4092 match(ConL);
4093 op_cost(0);
4095 format %{ %}
4096 interface(CONST_INTER);
4097 %}
4099 // Long immediate from 0 to 127.
4100 // Used for a shorter form of long mul by 10.
4101 operand immL_127() %{
4102 predicate((0 <= n->get_long()) && (n->get_long() <= 127));
4103 match(ConL);
4104 op_cost(0);
4106 format %{ %}
4107 interface(CONST_INTER);
4108 %}
4110 // Operand for non-negtive long mask
4111 operand immL_nonneg_mask() %{
4112 predicate( (n->get_long() >= 0) && (Assembler::is_jlong_mask(n->get_long()) != -1) );
4113 match(ConL);
4115 op_cost(0);
4116 format %{ %}
4117 interface(CONST_INTER);
4118 %}
4120 operand immL_0_65535() %{
4121 predicate( n->get_long() >= 0 && n->get_long() <= 65535 );
4122 match(ConL);
4123 op_cost(0);
4125 format %{ %}
4126 interface(CONST_INTER);
4127 %}
4129 // Long Immediate: cheap (materialize in <= 3 instructions)
4130 operand immL_cheap() %{
4131 predicate(MacroAssembler::insts_for_set64(n->get_long()) <= 3);
4132 match(ConL);
4133 op_cost(0);
4135 format %{ %}
4136 interface(CONST_INTER);
4137 %}
4139 // Long Immediate: expensive (materialize in > 3 instructions)
4140 operand immL_expensive() %{
4141 predicate(MacroAssembler::insts_for_set64(n->get_long()) > 3);
4142 match(ConL);
4143 op_cost(0);
4145 format %{ %}
4146 interface(CONST_INTER);
4147 %}
4149 operand immL16() %{
4150 predicate((-32768 <= n->get_long()) && (n->get_long() <= 32767));
4151 match(ConL);
4153 op_cost(10);
4154 format %{ %}
4155 interface(CONST_INTER);
4156 %}
4158 operand immL16_sub() %{
4159 predicate((-32767 <= n->get_long()) && (n->get_long() <= 32768));
4160 match(ConL);
4162 op_cost(10);
4163 format %{ %}
4164 interface(CONST_INTER);
4165 %}
4167 // Long Immediate: low 32-bit mask
4168 operand immL_32bits() %{
4169 predicate(n->get_long() == 0xFFFFFFFFL);
4170 match(ConL);
4171 op_cost(20);
4173 format %{ %}
4174 interface(CONST_INTER);
4175 %}
4177 // Long Immediate 32-bit signed
4178 operand immL32()
4179 %{
4180 predicate(n->get_long() == (int) (n->get_long()));
4181 match(ConL);
4183 op_cost(15);
4184 format %{ %}
4185 interface(CONST_INTER);
4186 %}
4189 //single-precision floating-point zero
4190 operand immF0() %{
4191 predicate(jint_cast(n->getf()) == 0);
4192 match(ConF);
4194 op_cost(5);
4195 format %{ %}
4196 interface(CONST_INTER);
4197 %}
4199 //single-precision floating-point immediate
4200 operand immF() %{
4201 match(ConF);
4203 op_cost(20);
4204 format %{ %}
4205 interface(CONST_INTER);
4206 %}
4208 //double-precision floating-point zero
4209 operand immD0() %{
4210 predicate(jlong_cast(n->getd()) == 0);
4211 match(ConD);
4213 op_cost(5);
4214 format %{ %}
4215 interface(CONST_INTER);
4216 %}
4218 //double-precision floating-point immediate
4219 operand immD() %{
4220 match(ConD);
4222 op_cost(20);
4223 format %{ %}
4224 interface(CONST_INTER);
4225 %}
4227 // Register Operands
4228 // Integer Register
4229 operand mRegI() %{
4230 constraint(ALLOC_IN_RC(int_reg));
4231 match(RegI);
4233 format %{ %}
4234 interface(REG_INTER);
4235 %}
4237 operand no_Ax_mRegI() %{
4238 constraint(ALLOC_IN_RC(no_Ax_int_reg));
4239 match(RegI);
4240 match(mRegI);
4242 format %{ %}
4243 interface(REG_INTER);
4244 %}
4246 operand mS0RegI() %{
4247 constraint(ALLOC_IN_RC(s0_reg));
4248 match(RegI);
4249 match(mRegI);
4251 format %{ "S0" %}
4252 interface(REG_INTER);
4253 %}
4255 operand mS1RegI() %{
4256 constraint(ALLOC_IN_RC(s1_reg));
4257 match(RegI);
4258 match(mRegI);
4260 format %{ "S1" %}
4261 interface(REG_INTER);
4262 %}
4264 operand mS2RegI() %{
4265 constraint(ALLOC_IN_RC(s2_reg));
4266 match(RegI);
4267 match(mRegI);
4269 format %{ "S2" %}
4270 interface(REG_INTER);
4271 %}
4273 operand mS3RegI() %{
4274 constraint(ALLOC_IN_RC(s3_reg));
4275 match(RegI);
4276 match(mRegI);
4278 format %{ "S3" %}
4279 interface(REG_INTER);
4280 %}
4282 operand mS4RegI() %{
4283 constraint(ALLOC_IN_RC(s4_reg));
4284 match(RegI);
4285 match(mRegI);
4287 format %{ "S4" %}
4288 interface(REG_INTER);
4289 %}
4291 operand mS5RegI() %{
4292 constraint(ALLOC_IN_RC(s5_reg));
4293 match(RegI);
4294 match(mRegI);
4296 format %{ "S5" %}
4297 interface(REG_INTER);
4298 %}
4300 operand mS6RegI() %{
4301 constraint(ALLOC_IN_RC(s6_reg));
4302 match(RegI);
4303 match(mRegI);
4305 format %{ "S6" %}
4306 interface(REG_INTER);
4307 %}
4309 operand mS7RegI() %{
4310 constraint(ALLOC_IN_RC(s7_reg));
4311 match(RegI);
4312 match(mRegI);
4314 format %{ "S7" %}
4315 interface(REG_INTER);
4316 %}
4319 operand mT0RegI() %{
4320 constraint(ALLOC_IN_RC(t0_reg));
4321 match(RegI);
4322 match(mRegI);
4324 format %{ "T0" %}
4325 interface(REG_INTER);
4326 %}
4328 operand mT1RegI() %{
4329 constraint(ALLOC_IN_RC(t1_reg));
4330 match(RegI);
4331 match(mRegI);
4333 format %{ "T1" %}
4334 interface(REG_INTER);
4335 %}
4337 operand mT2RegI() %{
4338 constraint(ALLOC_IN_RC(t2_reg));
4339 match(RegI);
4340 match(mRegI);
4342 format %{ "T2" %}
4343 interface(REG_INTER);
4344 %}
4346 operand mT3RegI() %{
4347 constraint(ALLOC_IN_RC(t3_reg));
4348 match(RegI);
4349 match(mRegI);
4351 format %{ "T3" %}
4352 interface(REG_INTER);
4353 %}
4355 operand mT8RegI() %{
4356 constraint(ALLOC_IN_RC(t8_reg));
4357 match(RegI);
4358 match(mRegI);
4360 format %{ "T8" %}
4361 interface(REG_INTER);
4362 %}
4364 operand mT9RegI() %{
4365 constraint(ALLOC_IN_RC(t9_reg));
4366 match(RegI);
4367 match(mRegI);
4369 format %{ "T9" %}
4370 interface(REG_INTER);
4371 %}
4373 operand mA0RegI() %{
4374 constraint(ALLOC_IN_RC(a0_reg));
4375 match(RegI);
4376 match(mRegI);
4378 format %{ "A0" %}
4379 interface(REG_INTER);
4380 %}
4382 operand mA1RegI() %{
4383 constraint(ALLOC_IN_RC(a1_reg));
4384 match(RegI);
4385 match(mRegI);
4387 format %{ "A1" %}
4388 interface(REG_INTER);
4389 %}
4391 operand mA2RegI() %{
4392 constraint(ALLOC_IN_RC(a2_reg));
4393 match(RegI);
4394 match(mRegI);
4396 format %{ "A2" %}
4397 interface(REG_INTER);
4398 %}
4400 operand mA3RegI() %{
4401 constraint(ALLOC_IN_RC(a3_reg));
4402 match(RegI);
4403 match(mRegI);
4405 format %{ "A3" %}
4406 interface(REG_INTER);
4407 %}
4409 operand mA4RegI() %{
4410 constraint(ALLOC_IN_RC(a4_reg));
4411 match(RegI);
4412 match(mRegI);
4414 format %{ "A4" %}
4415 interface(REG_INTER);
4416 %}
4418 operand mA5RegI() %{
4419 constraint(ALLOC_IN_RC(a5_reg));
4420 match(RegI);
4421 match(mRegI);
4423 format %{ "A5" %}
4424 interface(REG_INTER);
4425 %}
4427 operand mA6RegI() %{
4428 constraint(ALLOC_IN_RC(a6_reg));
4429 match(RegI);
4430 match(mRegI);
4432 format %{ "A6" %}
4433 interface(REG_INTER);
4434 %}
4436 operand mA7RegI() %{
4437 constraint(ALLOC_IN_RC(a7_reg));
4438 match(RegI);
4439 match(mRegI);
4441 format %{ "A7" %}
4442 interface(REG_INTER);
4443 %}
4445 operand mV0RegI() %{
4446 constraint(ALLOC_IN_RC(v0_reg));
4447 match(RegI);
4448 match(mRegI);
4450 format %{ "V0" %}
4451 interface(REG_INTER);
4452 %}
4454 operand mV1RegI() %{
4455 constraint(ALLOC_IN_RC(v1_reg));
4456 match(RegI);
4457 match(mRegI);
4459 format %{ "V1" %}
4460 interface(REG_INTER);
4461 %}
4463 operand mRegN() %{
4464 constraint(ALLOC_IN_RC(int_reg));
4465 match(RegN);
4467 format %{ %}
4468 interface(REG_INTER);
4469 %}
4471 operand t0_RegN() %{
4472 constraint(ALLOC_IN_RC(t0_reg));
4473 match(RegN);
4474 match(mRegN);
4476 format %{ %}
4477 interface(REG_INTER);
4478 %}
4480 operand t1_RegN() %{
4481 constraint(ALLOC_IN_RC(t1_reg));
4482 match(RegN);
4483 match(mRegN);
4485 format %{ %}
4486 interface(REG_INTER);
4487 %}
4489 operand t2_RegN() %{
4490 constraint(ALLOC_IN_RC(t2_reg));
4491 match(RegN);
4492 match(mRegN);
4494 format %{ %}
4495 interface(REG_INTER);
4496 %}
4498 operand t3_RegN() %{
4499 constraint(ALLOC_IN_RC(t3_reg));
4500 match(RegN);
4501 match(mRegN);
4503 format %{ %}
4504 interface(REG_INTER);
4505 %}
4507 operand t8_RegN() %{
4508 constraint(ALLOC_IN_RC(t8_reg));
4509 match(RegN);
4510 match(mRegN);
4512 format %{ %}
4513 interface(REG_INTER);
4514 %}
4516 operand t9_RegN() %{
4517 constraint(ALLOC_IN_RC(t9_reg));
4518 match(RegN);
4519 match(mRegN);
4521 format %{ %}
4522 interface(REG_INTER);
4523 %}
4525 operand a0_RegN() %{
4526 constraint(ALLOC_IN_RC(a0_reg));
4527 match(RegN);
4528 match(mRegN);
4530 format %{ %}
4531 interface(REG_INTER);
4532 %}
4534 operand a1_RegN() %{
4535 constraint(ALLOC_IN_RC(a1_reg));
4536 match(RegN);
4537 match(mRegN);
4539 format %{ %}
4540 interface(REG_INTER);
4541 %}
4543 operand a2_RegN() %{
4544 constraint(ALLOC_IN_RC(a2_reg));
4545 match(RegN);
4546 match(mRegN);
4548 format %{ %}
4549 interface(REG_INTER);
4550 %}
4552 operand a3_RegN() %{
4553 constraint(ALLOC_IN_RC(a3_reg));
4554 match(RegN);
4555 match(mRegN);
4557 format %{ %}
4558 interface(REG_INTER);
4559 %}
4561 operand a4_RegN() %{
4562 constraint(ALLOC_IN_RC(a4_reg));
4563 match(RegN);
4564 match(mRegN);
4566 format %{ %}
4567 interface(REG_INTER);
4568 %}
4570 operand a5_RegN() %{
4571 constraint(ALLOC_IN_RC(a5_reg));
4572 match(RegN);
4573 match(mRegN);
4575 format %{ %}
4576 interface(REG_INTER);
4577 %}
4579 operand a6_RegN() %{
4580 constraint(ALLOC_IN_RC(a6_reg));
4581 match(RegN);
4582 match(mRegN);
4584 format %{ %}
4585 interface(REG_INTER);
4586 %}
4588 operand a7_RegN() %{
4589 constraint(ALLOC_IN_RC(a7_reg));
4590 match(RegN);
4591 match(mRegN);
4593 format %{ %}
4594 interface(REG_INTER);
4595 %}
4597 operand s0_RegN() %{
4598 constraint(ALLOC_IN_RC(s0_reg));
4599 match(RegN);
4600 match(mRegN);
4602 format %{ %}
4603 interface(REG_INTER);
4604 %}
4606 operand s1_RegN() %{
4607 constraint(ALLOC_IN_RC(s1_reg));
4608 match(RegN);
4609 match(mRegN);
4611 format %{ %}
4612 interface(REG_INTER);
4613 %}
4615 operand s2_RegN() %{
4616 constraint(ALLOC_IN_RC(s2_reg));
4617 match(RegN);
4618 match(mRegN);
4620 format %{ %}
4621 interface(REG_INTER);
4622 %}
4624 operand s3_RegN() %{
4625 constraint(ALLOC_IN_RC(s3_reg));
4626 match(RegN);
4627 match(mRegN);
4629 format %{ %}
4630 interface(REG_INTER);
4631 %}
4633 operand s4_RegN() %{
4634 constraint(ALLOC_IN_RC(s4_reg));
4635 match(RegN);
4636 match(mRegN);
4638 format %{ %}
4639 interface(REG_INTER);
4640 %}
4642 operand s5_RegN() %{
4643 constraint(ALLOC_IN_RC(s5_reg));
4644 match(RegN);
4645 match(mRegN);
4647 format %{ %}
4648 interface(REG_INTER);
4649 %}
4651 operand s6_RegN() %{
4652 constraint(ALLOC_IN_RC(s6_reg));
4653 match(RegN);
4654 match(mRegN);
4656 format %{ %}
4657 interface(REG_INTER);
4658 %}
4660 operand s7_RegN() %{
4661 constraint(ALLOC_IN_RC(s7_reg));
4662 match(RegN);
4663 match(mRegN);
4665 format %{ %}
4666 interface(REG_INTER);
4667 %}
4669 operand v0_RegN() %{
4670 constraint(ALLOC_IN_RC(v0_reg));
4671 match(RegN);
4672 match(mRegN);
4674 format %{ %}
4675 interface(REG_INTER);
4676 %}
4678 operand v1_RegN() %{
4679 constraint(ALLOC_IN_RC(v1_reg));
4680 match(RegN);
4681 match(mRegN);
4683 format %{ %}
4684 interface(REG_INTER);
4685 %}
4687 // Pointer Register
4688 operand mRegP() %{
4689 constraint(ALLOC_IN_RC(p_reg));
4690 match(RegP);
4692 format %{ %}
4693 interface(REG_INTER);
4694 %}
4696 operand no_T8_mRegP() %{
4697 constraint(ALLOC_IN_RC(no_T8_p_reg));
4698 match(RegP);
4699 match(mRegP);
4701 format %{ %}
4702 interface(REG_INTER);
4703 %}
4705 operand s0_RegP()
4706 %{
4707 constraint(ALLOC_IN_RC(s0_long_reg));
4708 match(RegP);
4709 match(mRegP);
4710 match(no_T8_mRegP);
4712 format %{ %}
4713 interface(REG_INTER);
4714 %}
4716 operand s1_RegP()
4717 %{
4718 constraint(ALLOC_IN_RC(s1_long_reg));
4719 match(RegP);
4720 match(mRegP);
4721 match(no_T8_mRegP);
4723 format %{ %}
4724 interface(REG_INTER);
4725 %}
4727 operand s2_RegP()
4728 %{
4729 constraint(ALLOC_IN_RC(s2_long_reg));
4730 match(RegP);
4731 match(mRegP);
4732 match(no_T8_mRegP);
4734 format %{ %}
4735 interface(REG_INTER);
4736 %}
4738 operand s3_RegP()
4739 %{
4740 constraint(ALLOC_IN_RC(s3_long_reg));
4741 match(RegP);
4742 match(mRegP);
4743 match(no_T8_mRegP);
4745 format %{ %}
4746 interface(REG_INTER);
4747 %}
4749 operand s4_RegP()
4750 %{
4751 constraint(ALLOC_IN_RC(s4_long_reg));
4752 match(RegP);
4753 match(mRegP);
4754 match(no_T8_mRegP);
4756 format %{ %}
4757 interface(REG_INTER);
4758 %}
4760 operand s5_RegP()
4761 %{
4762 constraint(ALLOC_IN_RC(s5_long_reg));
4763 match(RegP);
4764 match(mRegP);
4765 match(no_T8_mRegP);
4767 format %{ %}
4768 interface(REG_INTER);
4769 %}
4771 operand s6_RegP()
4772 %{
4773 constraint(ALLOC_IN_RC(s6_long_reg));
4774 match(RegP);
4775 match(mRegP);
4776 match(no_T8_mRegP);
4778 format %{ %}
4779 interface(REG_INTER);
4780 %}
4782 operand s7_RegP()
4783 %{
4784 constraint(ALLOC_IN_RC(s7_long_reg));
4785 match(RegP);
4786 match(mRegP);
4787 match(no_T8_mRegP);
4789 format %{ %}
4790 interface(REG_INTER);
4791 %}
4793 operand t0_RegP()
4794 %{
4795 constraint(ALLOC_IN_RC(t0_long_reg));
4796 match(RegP);
4797 match(mRegP);
4798 match(no_T8_mRegP);
4800 format %{ %}
4801 interface(REG_INTER);
4802 %}
4804 operand t1_RegP()
4805 %{
4806 constraint(ALLOC_IN_RC(t1_long_reg));
4807 match(RegP);
4808 match(mRegP);
4809 match(no_T8_mRegP);
4811 format %{ %}
4812 interface(REG_INTER);
4813 %}
4815 operand t2_RegP()
4816 %{
4817 constraint(ALLOC_IN_RC(t2_long_reg));
4818 match(RegP);
4819 match(mRegP);
4820 match(no_T8_mRegP);
4822 format %{ %}
4823 interface(REG_INTER);
4824 %}
4826 operand t3_RegP()
4827 %{
4828 constraint(ALLOC_IN_RC(t3_long_reg));
4829 match(RegP);
4830 match(mRegP);
4831 match(no_T8_mRegP);
4833 format %{ %}
4834 interface(REG_INTER);
4835 %}
4837 operand t8_RegP()
4838 %{
4839 constraint(ALLOC_IN_RC(t8_long_reg));
4840 match(RegP);
4841 match(mRegP);
4843 format %{ %}
4844 interface(REG_INTER);
4845 %}
4847 operand t9_RegP()
4848 %{
4849 constraint(ALLOC_IN_RC(t9_long_reg));
4850 match(RegP);
4851 match(mRegP);
4852 match(no_T8_mRegP);
4854 format %{ %}
4855 interface(REG_INTER);
4856 %}
4858 operand a0_RegP()
4859 %{
4860 constraint(ALLOC_IN_RC(a0_long_reg));
4861 match(RegP);
4862 match(mRegP);
4863 match(no_T8_mRegP);
4865 format %{ %}
4866 interface(REG_INTER);
4867 %}
4869 operand a1_RegP()
4870 %{
4871 constraint(ALLOC_IN_RC(a1_long_reg));
4872 match(RegP);
4873 match(mRegP);
4874 match(no_T8_mRegP);
4876 format %{ %}
4877 interface(REG_INTER);
4878 %}
4880 operand a2_RegP()
4881 %{
4882 constraint(ALLOC_IN_RC(a2_long_reg));
4883 match(RegP);
4884 match(mRegP);
4885 match(no_T8_mRegP);
4887 format %{ %}
4888 interface(REG_INTER);
4889 %}
4891 operand a3_RegP()
4892 %{
4893 constraint(ALLOC_IN_RC(a3_long_reg));
4894 match(RegP);
4895 match(mRegP);
4896 match(no_T8_mRegP);
4898 format %{ %}
4899 interface(REG_INTER);
4900 %}
4902 operand a4_RegP()
4903 %{
4904 constraint(ALLOC_IN_RC(a4_long_reg));
4905 match(RegP);
4906 match(mRegP);
4907 match(no_T8_mRegP);
4909 format %{ %}
4910 interface(REG_INTER);
4911 %}
4914 operand a5_RegP()
4915 %{
4916 constraint(ALLOC_IN_RC(a5_long_reg));
4917 match(RegP);
4918 match(mRegP);
4919 match(no_T8_mRegP);
4921 format %{ %}
4922 interface(REG_INTER);
4923 %}
4925 operand a6_RegP()
4926 %{
4927 constraint(ALLOC_IN_RC(a6_long_reg));
4928 match(RegP);
4929 match(mRegP);
4930 match(no_T8_mRegP);
4932 format %{ %}
4933 interface(REG_INTER);
4934 %}
4936 operand a7_RegP()
4937 %{
4938 constraint(ALLOC_IN_RC(a7_long_reg));
4939 match(RegP);
4940 match(mRegP);
4941 match(no_T8_mRegP);
4943 format %{ %}
4944 interface(REG_INTER);
4945 %}
4947 operand v0_RegP()
4948 %{
4949 constraint(ALLOC_IN_RC(v0_long_reg));
4950 match(RegP);
4951 match(mRegP);
4952 match(no_T8_mRegP);
4954 format %{ %}
4955 interface(REG_INTER);
4956 %}
4958 operand v1_RegP()
4959 %{
4960 constraint(ALLOC_IN_RC(v1_long_reg));
4961 match(RegP);
4962 match(mRegP);
4963 match(no_T8_mRegP);
4965 format %{ %}
4966 interface(REG_INTER);
4967 %}
4969 /*
4970 operand mSPRegP(mRegP reg) %{
4971 constraint(ALLOC_IN_RC(sp_reg));
4972 match(reg);
4974 format %{ "SP" %}
4975 interface(REG_INTER);
4976 %}
4978 operand mFPRegP(mRegP reg) %{
4979 constraint(ALLOC_IN_RC(fp_reg));
4980 match(reg);
4982 format %{ "FP" %}
4983 interface(REG_INTER);
4984 %}
4985 */
4987 operand mRegL() %{
4988 constraint(ALLOC_IN_RC(long_reg));
4989 match(RegL);
4991 format %{ %}
4992 interface(REG_INTER);
4993 %}
4995 operand v0RegL() %{
4996 constraint(ALLOC_IN_RC(v0_long_reg));
4997 match(RegL);
4998 match(mRegL);
5000 format %{ %}
5001 interface(REG_INTER);
5002 %}
5004 operand v1RegL() %{
5005 constraint(ALLOC_IN_RC(v1_long_reg));
5006 match(RegL);
5007 match(mRegL);
5009 format %{ %}
5010 interface(REG_INTER);
5011 %}
5013 operand a0RegL() %{
5014 constraint(ALLOC_IN_RC(a0_long_reg));
5015 match(RegL);
5016 match(mRegL);
5018 format %{ "A0" %}
5019 interface(REG_INTER);
5020 %}
5022 operand a1RegL() %{
5023 constraint(ALLOC_IN_RC(a1_long_reg));
5024 match(RegL);
5025 match(mRegL);
5027 format %{ %}
5028 interface(REG_INTER);
5029 %}
5031 operand a2RegL() %{
5032 constraint(ALLOC_IN_RC(a2_long_reg));
5033 match(RegL);
5034 match(mRegL);
5036 format %{ %}
5037 interface(REG_INTER);
5038 %}
5040 operand a3RegL() %{
5041 constraint(ALLOC_IN_RC(a3_long_reg));
5042 match(RegL);
5043 match(mRegL);
5045 format %{ %}
5046 interface(REG_INTER);
5047 %}
5049 operand t0RegL() %{
5050 constraint(ALLOC_IN_RC(t0_long_reg));
5051 match(RegL);
5052 match(mRegL);
5054 format %{ %}
5055 interface(REG_INTER);
5056 %}
5058 operand t1RegL() %{
5059 constraint(ALLOC_IN_RC(t1_long_reg));
5060 match(RegL);
5061 match(mRegL);
5063 format %{ %}
5064 interface(REG_INTER);
5065 %}
5067 operand t2RegL() %{
5068 constraint(ALLOC_IN_RC(t2_long_reg));
5069 match(RegL);
5070 match(mRegL);
5072 format %{ %}
5073 interface(REG_INTER);
5074 %}
5076 operand t3RegL() %{
5077 constraint(ALLOC_IN_RC(t3_long_reg));
5078 match(RegL);
5079 match(mRegL);
5081 format %{ %}
5082 interface(REG_INTER);
5083 %}
5085 operand t8RegL() %{
5086 constraint(ALLOC_IN_RC(t8_long_reg));
5087 match(RegL);
5088 match(mRegL);
5090 format %{ %}
5091 interface(REG_INTER);
5092 %}
5094 operand a4RegL() %{
5095 constraint(ALLOC_IN_RC(a4_long_reg));
5096 match(RegL);
5097 match(mRegL);
5099 format %{ %}
5100 interface(REG_INTER);
5101 %}
5103 operand a5RegL() %{
5104 constraint(ALLOC_IN_RC(a5_long_reg));
5105 match(RegL);
5106 match(mRegL);
5108 format %{ %}
5109 interface(REG_INTER);
5110 %}
5112 operand a6RegL() %{
5113 constraint(ALLOC_IN_RC(a6_long_reg));
5114 match(RegL);
5115 match(mRegL);
5117 format %{ %}
5118 interface(REG_INTER);
5119 %}
5121 operand a7RegL() %{
5122 constraint(ALLOC_IN_RC(a7_long_reg));
5123 match(RegL);
5124 match(mRegL);
5126 format %{ %}
5127 interface(REG_INTER);
5128 %}
5130 operand s0RegL() %{
5131 constraint(ALLOC_IN_RC(s0_long_reg));
5132 match(RegL);
5133 match(mRegL);
5135 format %{ %}
5136 interface(REG_INTER);
5137 %}
5139 operand s1RegL() %{
5140 constraint(ALLOC_IN_RC(s1_long_reg));
5141 match(RegL);
5142 match(mRegL);
5144 format %{ %}
5145 interface(REG_INTER);
5146 %}
5148 operand s2RegL() %{
5149 constraint(ALLOC_IN_RC(s2_long_reg));
5150 match(RegL);
5151 match(mRegL);
5153 format %{ %}
5154 interface(REG_INTER);
5155 %}
5157 operand s3RegL() %{
5158 constraint(ALLOC_IN_RC(s3_long_reg));
5159 match(RegL);
5160 match(mRegL);
5162 format %{ %}
5163 interface(REG_INTER);
5164 %}
5166 operand s4RegL() %{
5167 constraint(ALLOC_IN_RC(s4_long_reg));
5168 match(RegL);
5169 match(mRegL);
5171 format %{ %}
5172 interface(REG_INTER);
5173 %}
5175 operand s7RegL() %{
5176 constraint(ALLOC_IN_RC(s7_long_reg));
5177 match(RegL);
5178 match(mRegL);
5180 format %{ %}
5181 interface(REG_INTER);
5182 %}
5184 // Floating register operands
5185 operand regF() %{
5186 constraint(ALLOC_IN_RC(flt_reg));
5187 match(RegF);
5189 format %{ %}
5190 interface(REG_INTER);
5191 %}
5193 //Double Precision Floating register operands
5194 operand regD() %{
5195 constraint(ALLOC_IN_RC(dbl_reg));
5196 match(RegD);
5198 format %{ %}
5199 interface(REG_INTER);
5200 %}
5202 //----------Memory Operands----------------------------------------------------
5203 // Indirect Memory Operand
5204 operand indirect(mRegP reg) %{
5205 constraint(ALLOC_IN_RC(p_reg));
5206 match(reg);
5208 format %{ "[$reg] @ indirect" %}
5209 interface(MEMORY_INTER) %{
5210 base($reg);
5211 index(0x0); /* NO_INDEX */
5212 scale(0x0);
5213 disp(0x0);
5214 %}
5215 %}
5217 // Indirect Memory Plus Short Offset Operand
5218 operand indOffset8(mRegP reg, immL8 off)
5219 %{
5220 constraint(ALLOC_IN_RC(p_reg));
5221 match(AddP reg off);
5223 format %{ "[$reg + $off (8-bit)] @ indOffset8" %}
5224 interface(MEMORY_INTER) %{
5225 base($reg);
5226 index(0x0); /* NO_INDEX */
5227 scale(0x0);
5228 disp($off);
5229 %}
5230 %}
5232 // Indirect Memory Times Scale Plus Index Register
5233 operand indIndexScale(mRegP reg, mRegL lreg, immI2 scale)
5234 %{
5235 constraint(ALLOC_IN_RC(p_reg));
5236 match(AddP reg (LShiftL lreg scale));
5238 op_cost(10);
5239 format %{"[$reg + $lreg << $scale] @ indIndexScale" %}
5240 interface(MEMORY_INTER) %{
5241 base($reg);
5242 index($lreg);
5243 scale($scale);
5244 disp(0x0);
5245 %}
5246 %}
5249 // [base + index + offset]
5250 operand baseIndexOffset8(mRegP base, mRegL index, immL8 off)
5251 %{
5252 constraint(ALLOC_IN_RC(p_reg));
5253 op_cost(5);
5254 match(AddP (AddP base index) off);
5256 format %{ "[$base + $index + $off (8-bit)] @ baseIndexOffset8" %}
5257 interface(MEMORY_INTER) %{
5258 base($base);
5259 index($index);
5260 scale(0x0);
5261 disp($off);
5262 %}
5263 %}
5265 // [base + index + offset]
5266 operand baseIndexOffset8_convI2L(mRegP base, mRegI index, immL8 off)
5267 %{
5268 constraint(ALLOC_IN_RC(p_reg));
5269 op_cost(5);
5270 match(AddP (AddP base (ConvI2L index)) off);
5272 format %{ "[$base + $index + $off (8-bit)] @ baseIndexOffset8_convI2L" %}
5273 interface(MEMORY_INTER) %{
5274 base($base);
5275 index($index);
5276 scale(0x0);
5277 disp($off);
5278 %}
5279 %}
5281 // Indirect Memory Times Scale Plus Index Register Plus Offset Operand
5282 operand indIndexScaleOffset8(mRegP reg, immL8 off, mRegL lreg, immI2 scale)
5283 %{
5284 constraint(ALLOC_IN_RC(p_reg));
5285 match(AddP (AddP reg (LShiftL lreg scale)) off);
5287 op_cost(10);
5288 format %{"[$reg + $off + $lreg << $scale] @ indIndexScaleOffset8" %}
5289 interface(MEMORY_INTER) %{
5290 base($reg);
5291 index($lreg);
5292 scale($scale);
5293 disp($off);
5294 %}
5295 %}
5297 operand indIndexScaleOffset8_convI2L(mRegP reg, immL8 off, mRegI ireg, immI2 scale)
5298 %{
5299 constraint(ALLOC_IN_RC(p_reg));
5300 match(AddP (AddP reg (LShiftL (ConvI2L ireg) scale)) off);
5302 op_cost(10);
5303 format %{"[$reg + $off + $ireg << $scale] @ indIndexScaleOffset8_convI2L" %}
5304 interface(MEMORY_INTER) %{
5305 base($reg);
5306 index($ireg);
5307 scale($scale);
5308 disp($off);
5309 %}
5310 %}
5312 // [base + index<<scale + offset]
5313 operand basePosIndexScaleOffset8(mRegP base, mRegI index, immL8 off, immI_0_31 scale)
5314 %{
5315 constraint(ALLOC_IN_RC(p_reg));
5316 //predicate(n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
5317 op_cost(10);
5318 match(AddP (AddP base (LShiftL (ConvI2L index) scale)) off);
5320 format %{ "[$base + $index << $scale + $off (8-bit)] @ basePosIndexScaleOffset8" %}
5321 interface(MEMORY_INTER) %{
5322 base($base);
5323 index($index);
5324 scale($scale);
5325 disp($off);
5326 %}
5327 %}
5329 // Indirect Memory Times Scale Plus Index Register Plus Offset Operand
5330 operand indIndexScaleOffsetNarrow(mRegN reg, immL8 off, mRegL lreg, immI2 scale)
5331 %{
5332 predicate(Universe::narrow_oop_shift() == 0);
5333 constraint(ALLOC_IN_RC(p_reg));
5334 match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
5336 op_cost(10);
5337 format %{"[$reg + $off + $lreg << $scale] @ indIndexScaleOffsetNarrow" %}
5338 interface(MEMORY_INTER) %{
5339 base($reg);
5340 index($lreg);
5341 scale($scale);
5342 disp($off);
5343 %}
5344 %}
5346 // [base + index<<scale + offset] for compressd Oops
5347 operand indPosIndexI2LScaleOffset8Narrow(mRegN base, mRegI index, immL8 off, immI_0_31 scale)
5348 %{
5349 constraint(ALLOC_IN_RC(p_reg));
5350 //predicate(Universe::narrow_oop_shift() == 0 && n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
5351 predicate(Universe::narrow_oop_shift() == 0);
5352 op_cost(10);
5353 match(AddP (AddP (DecodeN base) (LShiftL (ConvI2L index) scale)) off);
5355 format %{ "[$base + $index << $scale + $off (8-bit)] @ indPosIndexI2LScaleOffset8Narrow" %}
5356 interface(MEMORY_INTER) %{
5357 base($base);
5358 index($index);
5359 scale($scale);
5360 disp($off);
5361 %}
5362 %}
5364 //FIXME: I think it's better to limit the immI to be 16-bit at most!
5365 // Indirect Memory Plus Long Offset Operand
5366 operand indOffset32(mRegP reg, immL32 off) %{
5367 constraint(ALLOC_IN_RC(p_reg));
5368 op_cost(20);
5369 match(AddP reg off);
5371 format %{ "[$reg + $off (32-bit)] @ indOffset32" %}
5372 interface(MEMORY_INTER) %{
5373 base($reg);
5374 index(0x0); /* NO_INDEX */
5375 scale(0x0);
5376 disp($off);
5377 %}
5378 %}
5380 // Indirect Memory Plus Index Register
5381 operand indIndex(mRegP addr, mRegL index) %{
5382 constraint(ALLOC_IN_RC(p_reg));
5383 match(AddP addr index);
5385 op_cost(20);
5386 format %{"[$addr + $index] @ indIndex" %}
5387 interface(MEMORY_INTER) %{
5388 base($addr);
5389 index($index);
5390 scale(0x0);
5391 disp(0x0);
5392 %}
5393 %}
5395 operand indirectNarrowKlass(mRegN reg)
5396 %{
5397 predicate(Universe::narrow_klass_shift() == 0);
5398 constraint(ALLOC_IN_RC(p_reg));
5399 op_cost(10);
5400 match(DecodeNKlass reg);
5402 format %{ "[$reg] @ indirectNarrowKlass" %}
5403 interface(MEMORY_INTER) %{
5404 base($reg);
5405 index(0x0);
5406 scale(0x0);
5407 disp(0x0);
5408 %}
5409 %}
5411 operand indOffset8NarrowKlass(mRegN reg, immL8 off)
5412 %{
5413 predicate(Universe::narrow_klass_shift() == 0);
5414 constraint(ALLOC_IN_RC(p_reg));
5415 op_cost(10);
5416 match(AddP (DecodeNKlass reg) off);
5418 format %{ "[$reg + $off (8-bit)] @ indOffset8NarrowKlass" %}
5419 interface(MEMORY_INTER) %{
5420 base($reg);
5421 index(0x0);
5422 scale(0x0);
5423 disp($off);
5424 %}
5425 %}
5427 operand indOffset32NarrowKlass(mRegN reg, immL32 off)
5428 %{
5429 predicate(Universe::narrow_klass_shift() == 0);
5430 constraint(ALLOC_IN_RC(p_reg));
5431 op_cost(10);
5432 match(AddP (DecodeNKlass reg) off);
5434 format %{ "[$reg + $off (32-bit)] @ indOffset32NarrowKlass" %}
5435 interface(MEMORY_INTER) %{
5436 base($reg);
5437 index(0x0);
5438 scale(0x0);
5439 disp($off);
5440 %}
5441 %}
5443 operand indIndexOffsetNarrowKlass(mRegN reg, mRegL lreg, immL32 off)
5444 %{
5445 predicate(Universe::narrow_klass_shift() == 0);
5446 constraint(ALLOC_IN_RC(p_reg));
5447 match(AddP (AddP (DecodeNKlass reg) lreg) off);
5449 op_cost(10);
5450 format %{"[$reg + $off + $lreg] @ indIndexOffsetNarrowKlass" %}
5451 interface(MEMORY_INTER) %{
5452 base($reg);
5453 index($lreg);
5454 scale(0x0);
5455 disp($off);
5456 %}
5457 %}
5459 operand indIndexNarrowKlass(mRegN reg, mRegL lreg)
5460 %{
5461 predicate(Universe::narrow_klass_shift() == 0);
5462 constraint(ALLOC_IN_RC(p_reg));
5463 match(AddP (DecodeNKlass reg) lreg);
5465 op_cost(10);
5466 format %{"[$reg + $lreg] @ indIndexNarrowKlass" %}
5467 interface(MEMORY_INTER) %{
5468 base($reg);
5469 index($lreg);
5470 scale(0x0);
5471 disp(0x0);
5472 %}
5473 %}
5475 // Indirect Memory Operand
5476 operand indirectNarrow(mRegN reg)
5477 %{
5478 predicate(Universe::narrow_oop_shift() == 0);
5479 constraint(ALLOC_IN_RC(p_reg));
5480 op_cost(10);
5481 match(DecodeN reg);
5483 format %{ "[$reg] @ indirectNarrow" %}
5484 interface(MEMORY_INTER) %{
5485 base($reg);
5486 index(0x0);
5487 scale(0x0);
5488 disp(0x0);
5489 %}
5490 %}
5492 // Indirect Memory Plus Short Offset Operand
5493 operand indOffset8Narrow(mRegN reg, immL8 off)
5494 %{
5495 predicate(Universe::narrow_oop_shift() == 0);
5496 constraint(ALLOC_IN_RC(p_reg));
5497 op_cost(10);
5498 match(AddP (DecodeN reg) off);
5500 format %{ "[$reg + $off (8-bit)] @ indOffset8Narrow" %}
5501 interface(MEMORY_INTER) %{
5502 base($reg);
5503 index(0x0);
5504 scale(0x0);
5505 disp($off);
5506 %}
5507 %}
5509 // Indirect Memory Plus Index Register Plus Offset Operand
5510 operand indIndexOffset8Narrow(mRegN reg, mRegL lreg, immL8 off)
5511 %{
5512 predicate(Universe::narrow_oop_shift() == 0);
5513 constraint(ALLOC_IN_RC(p_reg));
5514 match(AddP (AddP (DecodeN reg) lreg) off);
5516 op_cost(10);
5517 format %{"[$reg + $off + $lreg] @ indIndexOffset8Narrow" %}
5518 interface(MEMORY_INTER) %{
5519 base($reg);
5520 index($lreg);
5521 scale(0x0);
5522 disp($off);
5523 %}
5524 %}
5526 //----------Load Long Memory Operands------------------------------------------
5527 // The load-long idiom will use it's address expression again after loading
5528 // the first word of the long. If the load-long destination overlaps with
5529 // registers used in the addressing expression, the 2nd half will be loaded
5530 // from a clobbered address. Fix this by requiring that load-long use
5531 // address registers that do not overlap with the load-long target.
5533 // load-long support
5534 operand load_long_RegP() %{
5535 constraint(ALLOC_IN_RC(p_reg));
5536 match(RegP);
5537 match(mRegP);
5538 op_cost(100);
5539 format %{ %}
5540 interface(REG_INTER);
5541 %}
5543 // Indirect Memory Operand Long
5544 operand load_long_indirect(load_long_RegP reg) %{
5545 constraint(ALLOC_IN_RC(p_reg));
5546 match(reg);
5548 format %{ "[$reg]" %}
5549 interface(MEMORY_INTER) %{
5550 base($reg);
5551 index(0x0);
5552 scale(0x0);
5553 disp(0x0);
5554 %}
5555 %}
5557 // Indirect Memory Plus Long Offset Operand
5558 operand load_long_indOffset32(load_long_RegP reg, immL32 off) %{
5559 match(AddP reg off);
5561 format %{ "[$reg + $off]" %}
5562 interface(MEMORY_INTER) %{
5563 base($reg);
5564 index(0x0);
5565 scale(0x0);
5566 disp($off);
5567 %}
5568 %}
5570 //----------Conditional Branch Operands----------------------------------------
5571 // Comparison Op - This is the operation of the comparison, and is limited to
5572 // the following set of codes:
5573 // L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
5574 //
5575 // Other attributes of the comparison, such as unsignedness, are specified
5576 // by the comparison instruction that sets a condition code flags register.
5577 // That result is represented by a flags operand whose subtype is appropriate
5578 // to the unsignedness (etc.) of the comparison.
5579 //
5580 // Later, the instruction which matches both the Comparison Op (a Bool) and
5581 // the flags (produced by the Cmp) specifies the coding of the comparison op
5582 // by matching a specific subtype of Bool operand below, such as cmpOpU.
5584 // Comparision Code
5585 operand cmpOp() %{
5586 match(Bool);
5588 format %{ "" %}
5589 interface(COND_INTER) %{
5590 equal(0x01);
5591 not_equal(0x02);
5592 greater(0x03);
5593 greater_equal(0x04);
5594 less(0x05);
5595 less_equal(0x06);
5596 overflow(0x7);
5597 no_overflow(0x8);
5598 %}
5599 %}
5602 // Comparision Code
5603 // Comparison Code, unsigned compare. Used by FP also, with
5604 // C2 (unordered) turned into GT or LT already. The other bits
5605 // C0 and C3 are turned into Carry & Zero flags.
5606 operand cmpOpU() %{
5607 match(Bool);
5609 format %{ "" %}
5610 interface(COND_INTER) %{
5611 equal(0x01);
5612 not_equal(0x02);
5613 greater(0x03);
5614 greater_equal(0x04);
5615 less(0x05);
5616 less_equal(0x06);
5617 overflow(0x7);
5618 no_overflow(0x8);
5619 %}
5620 %}
5622 /*
5623 // Comparison Code, unsigned compare. Used by FP also, with
5624 // C2 (unordered) turned into GT or LT already. The other bits
5625 // C0 and C3 are turned into Carry & Zero flags.
5626 operand cmpOpU() %{
5627 match(Bool);
5629 format %{ "" %}
5630 interface(COND_INTER) %{
5631 equal(0x4);
5632 not_equal(0x5);
5633 less(0x2);
5634 greater_equal(0x3);
5635 less_equal(0x6);
5636 greater(0x7);
5637 %}
5638 %}
5639 */
5640 /*
5641 // Comparison Code for FP conditional move
5642 operand cmpOp_fcmov() %{
5643 match(Bool);
5645 format %{ "" %}
5646 interface(COND_INTER) %{
5647 equal (0x01);
5648 not_equal (0x02);
5649 greater (0x03);
5650 greater_equal(0x04);
5651 less (0x05);
5652 less_equal (0x06);
5653 %}
5654 %}
5656 // Comparision Code used in long compares
5657 operand cmpOp_commute() %{
5658 match(Bool);
5660 format %{ "" %}
5661 interface(COND_INTER) %{
5662 equal(0x4);
5663 not_equal(0x5);
5664 less(0xF);
5665 greater_equal(0xE);
5666 less_equal(0xD);
5667 greater(0xC);
5668 %}
5669 %}
5670 */
5672 //----------Special Memory Operands--------------------------------------------
5673 // Stack Slot Operand - This operand is used for loading and storing temporary
5674 // values on the stack where a match requires a value to
5675 // flow through memory.
5676 operand stackSlotP(sRegP reg) %{
5677 constraint(ALLOC_IN_RC(stack_slots));
5678 // No match rule because this operand is only generated in matching
5679 op_cost(50);
5680 format %{ "[$reg]" %}
5681 interface(MEMORY_INTER) %{
5682 base(0x1d); // SP
5683 index(0x0); // No Index
5684 scale(0x0); // No Scale
5685 disp($reg); // Stack Offset
5686 %}
5687 %}
5689 operand stackSlotI(sRegI reg) %{
5690 constraint(ALLOC_IN_RC(stack_slots));
5691 // No match rule because this operand is only generated in matching
5692 op_cost(50);
5693 format %{ "[$reg]" %}
5694 interface(MEMORY_INTER) %{
5695 base(0x1d); // SP
5696 index(0x0); // No Index
5697 scale(0x0); // No Scale
5698 disp($reg); // Stack Offset
5699 %}
5700 %}
5702 operand stackSlotF(sRegF reg) %{
5703 constraint(ALLOC_IN_RC(stack_slots));
5704 // No match rule because this operand is only generated in matching
5705 op_cost(50);
5706 format %{ "[$reg]" %}
5707 interface(MEMORY_INTER) %{
5708 base(0x1d); // SP
5709 index(0x0); // No Index
5710 scale(0x0); // No Scale
5711 disp($reg); // Stack Offset
5712 %}
5713 %}
5715 operand stackSlotD(sRegD reg) %{
5716 constraint(ALLOC_IN_RC(stack_slots));
5717 // No match rule because this operand is only generated in matching
5718 op_cost(50);
5719 format %{ "[$reg]" %}
5720 interface(MEMORY_INTER) %{
5721 base(0x1d); // SP
5722 index(0x0); // No Index
5723 scale(0x0); // No Scale
5724 disp($reg); // Stack Offset
5725 %}
5726 %}
5728 operand stackSlotL(sRegL reg) %{
5729 constraint(ALLOC_IN_RC(stack_slots));
5730 // No match rule because this operand is only generated in matching
5731 op_cost(50);
5732 format %{ "[$reg]" %}
5733 interface(MEMORY_INTER) %{
5734 base(0x1d); // SP
5735 index(0x0); // No Index
5736 scale(0x0); // No Scale
5737 disp($reg); // Stack Offset
5738 %}
5739 %}
5742 //------------------------OPERAND CLASSES--------------------------------------
5743 //opclass memory( direct, indirect, indOffset16, indOffset32, indOffset32X, indIndexOffset );
5744 opclass memory( indirect, indirectNarrow, indOffset8, indOffset32, indIndex, indIndexScale, load_long_indirect, load_long_indOffset32, baseIndexOffset8, baseIndexOffset8_convI2L, indIndexScaleOffset8, indIndexScaleOffset8_convI2L, basePosIndexScaleOffset8, indIndexScaleOffsetNarrow, indPosIndexI2LScaleOffset8Narrow, indOffset8Narrow, indIndexOffset8Narrow);
5747 //----------PIPELINE-----------------------------------------------------------
5748 // Rules which define the behavior of the target architectures pipeline.
5750 pipeline %{
5752 //----------ATTRIBUTES---------------------------------------------------------
5753 attributes %{
5754 fixed_size_instructions; // Fixed size instructions
5755 branch_has_delay_slot; // branch have delay slot in gs2
5756 max_instructions_per_bundle = 1; // 1 instruction per bundle
5757 max_bundles_per_cycle = 4; // Up to 4 bundles per cycle
5758 bundle_unit_size=4;
5759 instruction_unit_size = 4; // An instruction is 4 bytes long
5760 instruction_fetch_unit_size = 16; // The processor fetches one line
5761 instruction_fetch_units = 1; // of 16 bytes
5763 // List of nop instructions
5764 nops( MachNop );
5765 %}
5767 //----------RESOURCES----------------------------------------------------------
5768 // Resources are the functional units available to the machine
5770 resources(D1, D2, D3, D4, DECODE = D1 | D2 | D3| D4, ALU1, ALU2, ALU = ALU1 | ALU2, FPU1, FPU2, FPU = FPU1 | FPU2, MEM, BR);
5772 //----------PIPELINE DESCRIPTION-----------------------------------------------
5773 // Pipeline Description specifies the stages in the machine's pipeline
5775 // IF: fetch
5776 // ID: decode
5777 // RD: read
5778 // CA: caculate
5779 // WB: write back
5780 // CM: commit
5782 pipe_desc(IF, ID, RD, CA, WB, CM);
5785 //----------PIPELINE CLASSES---------------------------------------------------
5786 // Pipeline Classes describe the stages in which input and output are
5787 // referenced by the hardware pipeline.
5789 //No.1 Integer ALU reg-reg operation : dst <-- reg1 op reg2
5790 pipe_class ialu_regI_regI(mRegI dst, mRegI src1, mRegI src2) %{
5791 single_instruction;
5792 src1 : RD(read);
5793 src2 : RD(read);
5794 dst : WB(write)+1;
5795 DECODE : ID;
5796 ALU : CA;
5797 %}
5799 //No.19 Integer mult operation : dst <-- reg1 mult reg2
5800 pipe_class ialu_mult(mRegI dst, mRegI src1, mRegI src2) %{
5801 src1 : RD(read);
5802 src2 : RD(read);
5803 dst : WB(write)+5;
5804 DECODE : ID;
5805 ALU2 : CA;
5806 %}
5808 pipe_class mulL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
5809 src1 : RD(read);
5810 src2 : RD(read);
5811 dst : WB(write)+10;
5812 DECODE : ID;
5813 ALU2 : CA;
5814 %}
5816 //No.19 Integer div operation : dst <-- reg1 div reg2
5817 pipe_class ialu_div(mRegI dst, mRegI src1, mRegI src2) %{
5818 src1 : RD(read);
5819 src2 : RD(read);
5820 dst : WB(write)+10;
5821 DECODE : ID;
5822 ALU2 : CA;
5823 %}
5825 //No.19 Integer mod operation : dst <-- reg1 mod reg2
5826 pipe_class ialu_mod(mRegI dst, mRegI src1, mRegI src2) %{
5827 instruction_count(2);
5828 src1 : RD(read);
5829 src2 : RD(read);
5830 dst : WB(write)+10;
5831 DECODE : ID;
5832 ALU2 : CA;
5833 %}
5835 //No.15 Long ALU reg-reg operation : dst <-- reg1 op reg2
5836 pipe_class ialu_regL_regL(mRegL dst, mRegL src1, mRegL src2) %{
5837 instruction_count(2);
5838 src1 : RD(read);
5839 src2 : RD(read);
5840 dst : WB(write);
5841 DECODE : ID;
5842 ALU : CA;
5843 %}
5845 //No.18 Long ALU reg-imm16 operation : dst <-- reg1 op imm16
5846 pipe_class ialu_regL_imm16(mRegL dst, mRegL src) %{
5847 instruction_count(2);
5848 src : RD(read);
5849 dst : WB(write);
5850 DECODE : ID;
5851 ALU : CA;
5852 %}
5854 //no.16 load Long from memory :
5855 pipe_class ialu_loadL(mRegL dst, memory mem) %{
5856 instruction_count(2);
5857 mem : RD(read);
5858 dst : WB(write)+5;
5859 DECODE : ID;
5860 MEM : RD;
5861 %}
5863 //No.17 Store Long to Memory :
5864 pipe_class ialu_storeL(mRegL src, memory mem) %{
5865 instruction_count(2);
5866 mem : RD(read);
5867 src : RD(read);
5868 DECODE : ID;
5869 MEM : RD;
5870 %}
5872 //No.2 Integer ALU reg-imm16 operation : dst <-- reg1 op imm16
5873 pipe_class ialu_regI_imm16(mRegI dst, mRegI src) %{
5874 single_instruction;
5875 src : RD(read);
5876 dst : WB(write);
5877 DECODE : ID;
5878 ALU : CA;
5879 %}
5881 //No.3 Integer move operation : dst <-- reg
5882 pipe_class ialu_regI_mov(mRegI dst, mRegI src) %{
5883 src : RD(read);
5884 dst : WB(write);
5885 DECODE : ID;
5886 ALU : CA;
5887 %}
5889 //No.4 No instructions : do nothing
5890 pipe_class empty( ) %{
5891 instruction_count(0);
5892 %}
5894 //No.5 UnConditional branch :
5895 pipe_class pipe_jump( label labl ) %{
5896 multiple_bundles;
5897 DECODE : ID;
5898 BR : RD;
5899 %}
5901 //No.6 ALU Conditional branch :
5902 pipe_class pipe_alu_branch(mRegI src1, mRegI src2, label labl ) %{
5903 multiple_bundles;
5904 src1 : RD(read);
5905 src2 : RD(read);
5906 DECODE : ID;
5907 BR : RD;
5908 %}
5910 //no.7 load integer from memory :
5911 pipe_class ialu_loadI(mRegI dst, memory mem) %{
5912 mem : RD(read);
5913 dst : WB(write)+3;
5914 DECODE : ID;
5915 MEM : RD;
5916 %}
5918 //No.8 Store Integer to Memory :
5919 pipe_class ialu_storeI(mRegI src, memory mem) %{
5920 mem : RD(read);
5921 src : RD(read);
5922 DECODE : ID;
5923 MEM : RD;
5924 %}
5927 //No.10 Floating FPU reg-reg operation : dst <-- reg1 op reg2
5928 pipe_class fpu_regF_regF(regF dst, regF src1, regF src2) %{
5929 src1 : RD(read);
5930 src2 : RD(read);
5931 dst : WB(write);
5932 DECODE : ID;
5933 FPU : CA;
5934 %}
5936 //No.22 Floating div operation : dst <-- reg1 div reg2
5937 pipe_class fpu_div(regF dst, regF src1, regF src2) %{
5938 src1 : RD(read);
5939 src2 : RD(read);
5940 dst : WB(write);
5941 DECODE : ID;
5942 FPU2 : CA;
5943 %}
5945 pipe_class fcvt_I2D(regD dst, mRegI src) %{
5946 src : RD(read);
5947 dst : WB(write);
5948 DECODE : ID;
5949 FPU1 : CA;
5950 %}
5952 pipe_class fcvt_D2I(mRegI dst, regD src) %{
5953 src : RD(read);
5954 dst : WB(write);
5955 DECODE : ID;
5956 FPU1 : CA;
5957 %}
5959 pipe_class pipe_mfc1(mRegI dst, regD src) %{
5960 src : RD(read);
5961 dst : WB(write);
5962 DECODE : ID;
5963 MEM : RD;
5964 %}
5966 pipe_class pipe_mtc1(regD dst, mRegI src) %{
5967 src : RD(read);
5968 dst : WB(write);
5969 DECODE : ID;
5970 MEM : RD(5);
5971 %}
5973 //No.23 Floating sqrt operation : dst <-- reg1 sqrt reg2
5974 pipe_class fpu_sqrt(regF dst, regF src1, regF src2) %{
5975 multiple_bundles;
5976 src1 : RD(read);
5977 src2 : RD(read);
5978 dst : WB(write);
5979 DECODE : ID;
5980 FPU2 : CA;
5981 %}
5983 //No.11 Load Floating from Memory :
5984 pipe_class fpu_loadF(regF dst, memory mem) %{
5985 instruction_count(1);
5986 mem : RD(read);
5987 dst : WB(write)+3;
5988 DECODE : ID;
5989 MEM : RD;
5990 %}
5992 //No.12 Store Floating to Memory :
5993 pipe_class fpu_storeF(regF src, memory mem) %{
5994 instruction_count(1);
5995 mem : RD(read);
5996 src : RD(read);
5997 DECODE : ID;
5998 MEM : RD;
5999 %}
6001 //No.13 FPU Conditional branch :
6002 pipe_class pipe_fpu_branch(regF src1, regF src2, label labl ) %{
6003 multiple_bundles;
6004 src1 : RD(read);
6005 src2 : RD(read);
6006 DECODE : ID;
6007 BR : RD;
6008 %}
6010 //No.14 Floating FPU reg operation : dst <-- op reg
6011 pipe_class fpu1_regF(regF dst, regF src) %{
6012 src : RD(read);
6013 dst : WB(write);
6014 DECODE : ID;
6015 FPU : CA;
6016 %}
6018 pipe_class long_memory_op() %{
6019 instruction_count(10); multiple_bundles; force_serialization;
6020 fixed_latency(30);
6021 %}
6023 pipe_class simple_call() %{
6024 instruction_count(10); multiple_bundles; force_serialization;
6025 fixed_latency(200);
6026 BR : RD;
6027 %}
6029 pipe_class call() %{
6030 instruction_count(10); multiple_bundles; force_serialization;
6031 fixed_latency(200);
6032 %}
6034 //FIXME:
6035 //No.9 Piple slow : for multi-instructions
6036 pipe_class pipe_slow( ) %{
6037 instruction_count(20);
6038 force_serialization;
6039 multiple_bundles;
6040 fixed_latency(50);
6041 %}
6043 %}
6047 //----------INSTRUCTIONS-------------------------------------------------------
6048 //
6049 // match -- States which machine-independent subtree may be replaced
6050 // by this instruction.
6051 // ins_cost -- The estimated cost of this instruction is used by instruction
6052 // selection to identify a minimum cost tree of machine
6053 // instructions that matches a tree of machine-independent
6054 // instructions.
6055 // format -- A string providing the disassembly for this instruction.
6056 // The value of an instruction's operand may be inserted
6057 // by referring to it with a '$' prefix.
6058 // opcode -- Three instruction opcodes may be provided. These are referred
6059 // to within an encode class as $primary, $secondary, and $tertiary
6060 // respectively. The primary opcode is commonly used to
6061 // indicate the type of machine instruction, while secondary
6062 // and tertiary are often used for prefix options or addressing
6063 // modes.
6064 // ins_encode -- A list of encode classes with parameters. The encode class
6065 // name must have been defined in an 'enc_class' specification
6066 // in the encode section of the architecture description.
6069 // Load Integer
6070 instruct loadI(mRegI dst, memory mem) %{
6071 match(Set dst (LoadI mem));
6073 ins_cost(125);
6074 format %{ "lw $dst, $mem #@loadI" %}
6075 ins_encode (load_I_enc(dst, mem));
6076 ins_pipe( ialu_loadI );
6077 %}
6079 instruct loadI_convI2L(mRegL dst, memory mem) %{
6080 match(Set dst (ConvI2L (LoadI mem)));
6082 ins_cost(125);
6083 format %{ "lw $dst, $mem #@loadI_convI2L" %}
6084 ins_encode (load_I_enc(dst, mem));
6085 ins_pipe( ialu_loadI );
6086 %}
6088 // Load Integer (32 bit signed) to Byte (8 bit signed)
6089 instruct loadI2B(mRegI dst, memory mem, immI_24 twentyfour) %{
6090 match(Set dst (RShiftI (LShiftI (LoadI mem) twentyfour) twentyfour));
6092 ins_cost(125);
6093 format %{ "lb $dst, $mem\t# int -> byte #@loadI2B" %}
6094 ins_encode(load_B_enc(dst, mem));
6095 ins_pipe(ialu_loadI);
6096 %}
6098 // Load Integer (32 bit signed) to Unsigned Byte (8 bit UNsigned)
6099 instruct loadI2UB(mRegI dst, memory mem, immI_255 mask) %{
6100 match(Set dst (AndI (LoadI mem) mask));
6102 ins_cost(125);
6103 format %{ "lbu $dst, $mem\t# int -> ubyte #@loadI2UB" %}
6104 ins_encode(load_UB_enc(dst, mem));
6105 ins_pipe(ialu_loadI);
6106 %}
6108 // Load Integer (32 bit signed) to Short (16 bit signed)
6109 instruct loadI2S(mRegI dst, memory mem, immI_16 sixteen) %{
6110 match(Set dst (RShiftI (LShiftI (LoadI mem) sixteen) sixteen));
6112 ins_cost(125);
6113 format %{ "lh $dst, $mem\t# int -> short #@loadI2S" %}
6114 ins_encode(load_S_enc(dst, mem));
6115 ins_pipe(ialu_loadI);
6116 %}
6118 // Load Integer (32 bit signed) to Unsigned Short/Char (16 bit UNsigned)
6119 instruct loadI2US(mRegI dst, memory mem, immI_65535 mask) %{
6120 match(Set dst (AndI (LoadI mem) mask));
6122 ins_cost(125);
6123 format %{ "lhu $dst, $mem\t# int -> ushort/char #@loadI2US" %}
6124 ins_encode(load_C_enc(dst, mem));
6125 ins_pipe(ialu_loadI);
6126 %}
6128 // Load Long.
6129 instruct loadL(mRegL dst, memory mem) %{
6130 // predicate(!((LoadLNode*)n)->require_atomic_access());
6131 match(Set dst (LoadL mem));
6133 ins_cost(250);
6134 format %{ "ld $dst, $mem #@loadL" %}
6135 ins_encode(load_L_enc(dst, mem));
6136 ins_pipe( ialu_loadL );
6137 %}
6139 // Load Long - UNaligned
6140 instruct loadL_unaligned(mRegL dst, memory mem) %{
6141 match(Set dst (LoadL_unaligned mem));
6143 // FIXME: Jin: Need more effective ldl/ldr
6144 ins_cost(450);
6145 format %{ "ld $dst, $mem #@loadL_unaligned\n\t" %}
6146 ins_encode(load_L_enc(dst, mem));
6147 ins_pipe( ialu_loadL );
6148 %}
6150 // Store Long
6151 instruct storeL_reg(memory mem, mRegL src) %{
6152 match(Set mem (StoreL mem src));
6154 ins_cost(200);
6155 format %{ "sd $mem, $src #@storeL_reg\n" %}
6156 ins_encode(store_L_reg_enc(mem, src));
6157 ins_pipe( ialu_storeL );
6158 %}
6161 instruct storeL_immL0(memory mem, immL0 zero) %{
6162 match(Set mem (StoreL mem zero));
6164 ins_cost(180);
6165 format %{ "sd $mem, zero #@storeL_immL0" %}
6166 ins_encode(store_L_immL0_enc(mem, zero));
6167 ins_pipe( ialu_storeL );
6168 %}
6170 // Load Compressed Pointer
6171 instruct loadN(mRegN dst, memory mem)
6172 %{
6173 match(Set dst (LoadN mem));
6175 ins_cost(125); // XXX
6176 format %{ "lwu $dst, $mem\t# compressed ptr @ loadN" %}
6177 ins_encode (load_N_enc(dst, mem));
6178 ins_pipe( ialu_loadI ); // XXX
6179 %}
6181 // Load Pointer
6182 instruct loadP(mRegP dst, memory mem) %{
6183 match(Set dst (LoadP mem));
6185 ins_cost(125);
6186 format %{ "ld $dst, $mem #@loadP" %}
6187 ins_encode (load_P_enc(dst, mem));
6188 ins_pipe( ialu_loadI );
6189 %}
6191 // Load Klass Pointer
6192 instruct loadKlass(mRegP dst, memory mem) %{
6193 match(Set dst (LoadKlass mem));
6195 ins_cost(125);
6196 format %{ "MOV $dst,$mem @ loadKlass" %}
6197 ins_encode (load_P_enc(dst, mem));
6198 ins_pipe( ialu_loadI );
6199 %}
6201 // Load narrow Klass Pointer
6202 instruct loadNKlass(mRegN dst, memory mem)
6203 %{
6204 match(Set dst (LoadNKlass mem));
6206 ins_cost(125); // XXX
6207 format %{ "lwu $dst, $mem\t# compressed klass ptr @ loadNKlass" %}
6208 ins_encode (load_N_enc(dst, mem));
6209 ins_pipe( ialu_loadI ); // XXX
6210 %}
6212 // Load Constant
6213 instruct loadConI(mRegI dst, immI src) %{
6214 match(Set dst src);
6216 ins_cost(150);
6217 format %{ "mov $dst, $src #@loadConI" %}
6218 ins_encode %{
6219 Register dst = $dst$$Register;
6220 int value = $src$$constant;
6221 __ move(dst, value);
6222 %}
6223 ins_pipe( ialu_regI_regI );
6224 %}
6227 instruct loadConL_set64(mRegL dst, immL src) %{
6228 match(Set dst src);
6229 ins_cost(120);
6230 format %{ "li $dst, $src @ loadConL_set64" %}
6231 ins_encode %{
6232 __ set64($dst$$Register, $src$$constant);
6233 %}
6234 ins_pipe(ialu_regL_regL);
6235 %}
6237 /*
6238 // Load long value from constant table (predicated by immL_expensive).
6239 instruct loadConL_load(mRegL dst, immL_expensive src) %{
6240 match(Set dst src);
6241 ins_cost(150);
6242 format %{ "ld $dst, $constantoffset[$constanttablebase] # load long $src from table @ loadConL_ldx" %}
6243 ins_encode %{
6244 int con_offset = $constantoffset($src);
6246 if (Assembler::is_simm16(con_offset)) {
6247 __ ld($dst$$Register, $constanttablebase, con_offset);
6248 } else {
6249 __ set64(AT, con_offset);
6250 if (UseLoongsonISA) {
6251 __ gsldx($dst$$Register, $constanttablebase, AT, 0);
6252 } else {
6253 __ daddu(AT, $constanttablebase, AT);
6254 __ ld($dst$$Register, AT, 0);
6255 }
6256 }
6257 %}
6258 ins_pipe(ialu_loadI);
6259 %}
6260 */
6262 instruct loadConL16(mRegL dst, immL16 src) %{
6263 match(Set dst src);
6264 ins_cost(105);
6265 format %{ "mov $dst, $src #@loadConL16" %}
6266 ins_encode %{
6267 Register dst_reg = as_Register($dst$$reg);
6268 int value = $src$$constant;
6269 __ daddiu(dst_reg, R0, value);
6270 %}
6271 ins_pipe( ialu_regL_regL );
6272 %}
6275 instruct loadConL0(mRegL dst, immL0 src) %{
6276 match(Set dst src);
6277 ins_cost(100);
6278 format %{ "mov $dst, zero #@loadConL0" %}
6279 ins_encode %{
6280 Register dst_reg = as_Register($dst$$reg);
6281 __ daddu(dst_reg, R0, R0);
6282 %}
6283 ins_pipe( ialu_regL_regL );
6284 %}
6286 // Load Range
6287 instruct loadRange(mRegI dst, memory mem) %{
6288 match(Set dst (LoadRange mem));
6290 ins_cost(125);
6291 format %{ "MOV $dst,$mem @ loadRange" %}
6292 ins_encode(load_I_enc(dst, mem));
6293 ins_pipe( ialu_loadI );
6294 %}
6297 instruct storeP(memory mem, mRegP src ) %{
6298 match(Set mem (StoreP mem src));
6300 ins_cost(125);
6301 format %{ "sd $src, $mem #@storeP" %}
6302 ins_encode(store_P_reg_enc(mem, src));
6303 ins_pipe( ialu_storeI );
6304 %}
6306 // Store NULL Pointer, mark word, or other simple pointer constant.
6307 instruct storeImmP0(memory mem, immP0 zero) %{
6308 match(Set mem (StoreP mem zero));
6310 ins_cost(125);
6311 format %{ "mov $mem, $zero #@storeImmP0" %}
6312 ins_encode(store_P_immP0_enc(mem));
6313 ins_pipe( ialu_storeI );
6314 %}
6316 // Store Byte Immediate
6317 instruct storeImmB(memory mem, immI8 src) %{
6318 match(Set mem (StoreB mem src));
6320 ins_cost(150);
6321 format %{ "movb $mem, $src #@storeImmB" %}
6322 ins_encode(store_B_immI_enc(mem, src));
6323 ins_pipe( ialu_storeI );
6324 %}
6326 // Store Compressed Pointer
6327 instruct storeN(memory mem, mRegN src)
6328 %{
6329 match(Set mem (StoreN mem src));
6331 ins_cost(125); // XXX
6332 format %{ "sw $mem, $src\t# compressed ptr @ storeN" %}
6333 ins_encode(store_N_reg_enc(mem, src));
6334 ins_pipe( ialu_storeI );
6335 %}
6337 instruct storeNKlass(memory mem, mRegN src)
6338 %{
6339 match(Set mem (StoreNKlass mem src));
6341 ins_cost(125); // XXX
6342 format %{ "sw $mem, $src\t# compressed klass ptr @ storeNKlass" %}
6343 ins_encode(store_N_reg_enc(mem, src));
6344 ins_pipe( ialu_storeI );
6345 %}
6347 instruct storeImmN0(memory mem, immN0 zero)
6348 %{
6349 predicate(Universe::narrow_oop_base() == NULL && Universe::narrow_klass_base() == NULL);
6350 match(Set mem (StoreN mem zero));
6352 ins_cost(125); // XXX
6353 format %{ "storeN0 $mem, R12\t# compressed ptr" %}
6354 ins_encode(storeImmN0_enc(mem, zero));
6355 ins_pipe( ialu_storeI );
6356 %}
6358 // Store Byte
6359 instruct storeB(memory mem, mRegI src) %{
6360 match(Set mem (StoreB mem src));
6362 ins_cost(125);
6363 format %{ "sb $src, $mem #@storeB" %}
6364 ins_encode(store_B_reg_enc(mem, src));
6365 ins_pipe( ialu_storeI );
6366 %}
6368 instruct storeB_convL2I(memory mem, mRegL src) %{
6369 match(Set mem (StoreB mem (ConvL2I src)));
6371 ins_cost(125);
6372 format %{ "sb $src, $mem #@storeB_convL2I" %}
6373 ins_encode(store_B_reg_enc(mem, src));
6374 ins_pipe( ialu_storeI );
6375 %}
6377 // Load Byte (8bit signed)
6378 instruct loadB(mRegI dst, memory mem) %{
6379 match(Set dst (LoadB mem));
6381 ins_cost(125);
6382 format %{ "lb $dst, $mem #@loadB" %}
6383 ins_encode(load_B_enc(dst, mem));
6384 ins_pipe( ialu_loadI );
6385 %}
6387 instruct loadB_convI2L(mRegL dst, memory mem) %{
6388 match(Set dst (ConvI2L (LoadB mem)));
6390 ins_cost(125);
6391 format %{ "lb $dst, $mem #@loadB_convI2L" %}
6392 ins_encode(load_B_enc(dst, mem));
6393 ins_pipe( ialu_loadI );
6394 %}
6396 // Load Byte (8bit UNsigned)
6397 instruct loadUB(mRegI dst, memory mem) %{
6398 match(Set dst (LoadUB mem));
6400 ins_cost(125);
6401 format %{ "lbu $dst, $mem #@loadUB" %}
6402 ins_encode(load_UB_enc(dst, mem));
6403 ins_pipe( ialu_loadI );
6404 %}
6406 instruct loadUB_convI2L(mRegL dst, memory mem) %{
6407 match(Set dst (ConvI2L (LoadUB mem)));
6409 ins_cost(125);
6410 format %{ "lbu $dst, $mem #@loadUB_convI2L" %}
6411 ins_encode(load_UB_enc(dst, mem));
6412 ins_pipe( ialu_loadI );
6413 %}
6415 // Load Short (16bit signed)
6416 instruct loadS(mRegI dst, memory mem) %{
6417 match(Set dst (LoadS mem));
6419 ins_cost(125);
6420 format %{ "lh $dst, $mem #@loadS" %}
6421 ins_encode(load_S_enc(dst, mem));
6422 ins_pipe( ialu_loadI );
6423 %}
6425 // Load Short (16 bit signed) to Byte (8 bit signed)
6426 instruct loadS2B(mRegI dst, memory mem, immI_24 twentyfour) %{
6427 match(Set dst (RShiftI (LShiftI (LoadS mem) twentyfour) twentyfour));
6429 ins_cost(125);
6430 format %{ "lb $dst, $mem\t# short -> byte #@loadS2B" %}
6431 ins_encode(load_B_enc(dst, mem));
6432 ins_pipe(ialu_loadI);
6433 %}
6435 instruct loadS_convI2L(mRegL dst, memory mem) %{
6436 match(Set dst (ConvI2L (LoadS mem)));
6438 ins_cost(125);
6439 format %{ "lh $dst, $mem #@loadS_convI2L" %}
6440 ins_encode(load_S_enc(dst, mem));
6441 ins_pipe( ialu_loadI );
6442 %}
6444 // Store Integer Immediate
6445 instruct storeImmI(memory mem, immI src) %{
6446 match(Set mem (StoreI mem src));
6448 ins_cost(150);
6449 format %{ "mov $mem, $src #@storeImmI" %}
6450 ins_encode(store_I_immI_enc(mem, src));
6451 ins_pipe( ialu_storeI );
6452 %}
6454 // Store Integer
6455 instruct storeI(memory mem, mRegI src) %{
6456 match(Set mem (StoreI mem src));
6458 ins_cost(125);
6459 format %{ "sw $mem, $src #@storeI" %}
6460 ins_encode(store_I_reg_enc(mem, src));
6461 ins_pipe( ialu_storeI );
6462 %}
6464 instruct storeI_convL2I(memory mem, mRegL src) %{
6465 match(Set mem (StoreI mem (ConvL2I src)));
6467 ins_cost(125);
6468 format %{ "sw $mem, $src #@storeI_convL2I" %}
6469 ins_encode(store_I_reg_enc(mem, src));
6470 ins_pipe( ialu_storeI );
6471 %}
6473 // Load Float
6474 instruct loadF(regF dst, memory mem) %{
6475 match(Set dst (LoadF mem));
6477 ins_cost(150);
6478 format %{ "loadF $dst, $mem #@loadF" %}
6479 ins_encode(load_F_enc(dst, mem));
6480 ins_pipe( ialu_loadI );
6481 %}
6483 instruct loadConP_general(mRegP dst, immP src) %{
6484 match(Set dst src);
6486 ins_cost(120);
6487 format %{ "li $dst, $src #@loadConP_general" %}
6489 ins_encode %{
6490 Register dst = $dst$$Register;
6491 long* value = (long*)$src$$constant;
6493 if($src->constant_reloc() == relocInfo::metadata_type){
6494 int klass_index = __ oop_recorder()->find_index((Klass*)value);
6495 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
6497 __ relocate(rspec);
6498 __ patchable_set48(dst, (long)value);
6499 }else if($src->constant_reloc() == relocInfo::oop_type){
6500 int oop_index = __ oop_recorder()->find_index((jobject)value);
6501 RelocationHolder rspec = oop_Relocation::spec(oop_index);
6503 __ relocate(rspec);
6504 __ patchable_set48(dst, (long)value);
6505 } else if ($src->constant_reloc() == relocInfo::none) {
6506 __ set64(dst, (long)value);
6507 }
6508 %}
6510 ins_pipe( ialu_regI_regI );
6511 %}
6513 /*
6514 instruct loadConP_load(mRegP dst, immP_load src) %{
6515 match(Set dst src);
6517 ins_cost(100);
6518 format %{ "ld $dst, [$constanttablebase + $constantoffset] load from constant table: ptr=$src @ loadConP_load" %}
6520 ins_encode %{
6522 int con_offset = $constantoffset($src);
6524 if (Assembler::is_simm16(con_offset)) {
6525 __ ld($dst$$Register, $constanttablebase, con_offset);
6526 } else {
6527 __ set64(AT, con_offset);
6528 if (UseLoongsonISA) {
6529 __ gsldx($dst$$Register, $constanttablebase, AT, 0);
6530 } else {
6531 __ daddu(AT, $constanttablebase, AT);
6532 __ ld($dst$$Register, AT, 0);
6533 }
6534 }
6535 %}
6537 ins_pipe(ialu_loadI);
6538 %}
6539 */
6541 instruct loadConP_no_oop_cheap(mRegP dst, immP_no_oop_cheap src) %{
6542 match(Set dst src);
6544 ins_cost(80);
6545 format %{ "li $dst, $src @ loadConP_no_oop_cheap" %}
6547 ins_encode %{
6548 __ set64($dst$$Register, $src$$constant);
6549 %}
6551 ins_pipe(ialu_regI_regI);
6552 %}
6555 instruct loadConP_poll(mRegP dst, immP_poll src) %{
6556 match(Set dst src);
6558 ins_cost(50);
6559 format %{ "li $dst, $src #@loadConP_poll" %}
6561 ins_encode %{
6562 Register dst = $dst$$Register;
6563 intptr_t value = (intptr_t)$src$$constant;
6565 __ set64(dst, (jlong)value);
6566 %}
6568 ins_pipe( ialu_regI_regI );
6569 %}
6571 instruct loadConP0(mRegP dst, immP0 src)
6572 %{
6573 match(Set dst src);
6575 ins_cost(50);
6576 format %{ "mov $dst, R0\t# ptr" %}
6577 ins_encode %{
6578 Register dst_reg = $dst$$Register;
6579 __ daddu(dst_reg, R0, R0);
6580 %}
6581 ins_pipe( ialu_regI_regI );
6582 %}
6584 instruct loadConN0(mRegN dst, immN0 src) %{
6585 match(Set dst src);
6586 format %{ "move $dst, R0\t# compressed NULL ptr" %}
6587 ins_encode %{
6588 __ move($dst$$Register, R0);
6589 %}
6590 ins_pipe( ialu_regI_regI );
6591 %}
6593 instruct loadConN(mRegN dst, immN src) %{
6594 match(Set dst src);
6596 ins_cost(125);
6597 format %{ "li $dst, $src\t# compressed ptr @ loadConN" %}
6598 ins_encode %{
6599 Register dst = $dst$$Register;
6600 __ set_narrow_oop(dst, (jobject)$src$$constant);
6601 %}
6602 ins_pipe( ialu_regI_regI ); // XXX
6603 %}
6605 instruct loadConNKlass(mRegN dst, immNKlass src) %{
6606 match(Set dst src);
6608 ins_cost(125);
6609 format %{ "li $dst, $src\t# compressed klass ptr @ loadConNKlass" %}
6610 ins_encode %{
6611 Register dst = $dst$$Register;
6612 __ set_narrow_klass(dst, (Klass*)$src$$constant);
6613 %}
6614 ins_pipe( ialu_regI_regI ); // XXX
6615 %}
6617 //FIXME
6618 // Tail Call; Jump from runtime stub to Java code.
6619 // Also known as an 'interprocedural jump'.
6620 // Target of jump will eventually return to caller.
6621 // TailJump below removes the return address.
6622 instruct TailCalljmpInd(mRegP jump_target, mRegP method_oop) %{
6623 match(TailCall jump_target method_oop );
6624 ins_cost(300);
6625 format %{ "JMP $jump_target \t# @TailCalljmpInd" %}
6627 ins_encode %{
6628 Register target = $jump_target$$Register;
6629 Register oop = $method_oop$$Register;
6631 /* 2012/10/12 Jin: RA will be used in generate_forward_exception() */
6632 __ push(RA);
6634 __ move(S3, oop);
6635 __ jr(target);
6636 __ nop();
6637 %}
6639 ins_pipe( pipe_jump );
6640 %}
6642 // Create exception oop: created by stack-crawling runtime code.
6643 // Created exception is now available to this handler, and is setup
6644 // just prior to jumping to this handler. No code emitted.
6645 instruct CreateException( a0_RegP ex_oop )
6646 %{
6647 match(Set ex_oop (CreateEx));
6649 // use the following format syntax
6650 format %{ "# exception oop is in A0; no code emitted @CreateException" %}
6651 ins_encode %{
6652 /* Jin: X86 leaves this function empty */
6653 __ block_comment("CreateException is empty in X86/MIPS");
6654 %}
6655 ins_pipe( empty );
6656 // ins_pipe( pipe_jump );
6657 %}
6660 /* 2012/9/14 Jin: The mechanism of exception handling is clear now.
6662 - Common try/catch:
6663 2012/9/14 Jin: [stubGenerator_mips.cpp] generate_forward_exception()
6664 |- V0, V1 are created
6665 |- T9 <= SharedRuntime::exception_handler_for_return_address
6666 `- jr T9
6667 `- the caller's exception_handler
6668 `- jr OptoRuntime::exception_blob
6669 `- here
6670 - Rethrow(e.g. 'unwind'):
6671 * The callee:
6672 |- an exception is triggered during execution
6673 `- exits the callee method through RethrowException node
6674 |- The callee pushes exception_oop(T0) and exception_pc(RA)
6675 `- The callee jumps to OptoRuntime::rethrow_stub()
6676 * In OptoRuntime::rethrow_stub:
6677 |- The VM calls _rethrow_Java to determine the return address in the caller method
6678 `- exits the stub with tailjmpInd
6679 |- pops exception_oop(V0) and exception_pc(V1)
6680 `- jumps to the return address(usually an exception_handler)
6681 * The caller:
6682 `- continues processing the exception_blob with V0/V1
6683 */
6685 /*
6686 Disassembling OptoRuntime::rethrow_stub()
6688 ; locals
6689 0x2d3bf320: addiu sp, sp, 0xfffffff8
6690 0x2d3bf324: sw ra, 0x4(sp)
6691 0x2d3bf328: sw fp, 0x0(sp)
6692 0x2d3bf32c: addu fp, sp, zero
6693 0x2d3bf330: addiu sp, sp, 0xfffffff0
6694 0x2d3bf334: sw ra, 0x8(sp)
6695 0x2d3bf338: sw t0, 0x4(sp)
6696 0x2d3bf33c: sw sp, 0x0(sp)
6698 ; get_thread(S2)
6699 0x2d3bf340: addu s2, sp, zero
6700 0x2d3bf344: srl s2, s2, 12
6701 0x2d3bf348: sll s2, s2, 2
6702 0x2d3bf34c: lui at, 0x2c85
6703 0x2d3bf350: addu at, at, s2
6704 0x2d3bf354: lw s2, 0xffffcc80(at)
6706 0x2d3bf358: lw s0, 0x0(sp)
6707 0x2d3bf35c: sw s0, 0x118(s2) // last_sp -> threa
6708 0x2d3bf360: sw s2, 0xc(sp)
6710 ; OptoRuntime::rethrow_C(oopDesc* exception, JavaThread* thread, address ret_pc)
6711 0x2d3bf364: lw a0, 0x4(sp)
6712 0x2d3bf368: lw a1, 0xc(sp)
6713 0x2d3bf36c: lw a2, 0x8(sp)
6714 ;; Java_To_Runtime
6715 0x2d3bf370: lui t9, 0x2c34
6716 0x2d3bf374: addiu t9, t9, 0xffff8a48
6717 0x2d3bf378: jalr t9
6718 0x2d3bf37c: nop
6720 0x2d3bf380: addu s3, v0, zero ; S3: SharedRuntime::raw_exception_handler_for_return_address()
6722 0x2d3bf384: lw s0, 0xc(sp)
6723 0x2d3bf388: sw zero, 0x118(s0)
6724 0x2d3bf38c: sw zero, 0x11c(s0)
6725 0x2d3bf390: lw s1, 0x144(s0) ; ex_oop: S1
6726 0x2d3bf394: addu s2, s0, zero
6727 0x2d3bf398: sw zero, 0x144(s2)
6728 0x2d3bf39c: lw s0, 0x4(s2)
6729 0x2d3bf3a0: addiu s4, zero, 0x0
6730 0x2d3bf3a4: bne s0, s4, 0x2d3bf3d4
6731 0x2d3bf3a8: nop
6732 0x2d3bf3ac: addiu sp, sp, 0x10
6733 0x2d3bf3b0: addiu sp, sp, 0x8
6734 0x2d3bf3b4: lw ra, 0xfffffffc(sp)
6735 0x2d3bf3b8: lw fp, 0xfffffff8(sp)
6736 0x2d3bf3bc: lui at, 0x2b48
6737 0x2d3bf3c0: lw at, 0x100(at)
6739 ; tailjmpInd: Restores exception_oop & exception_pc
6740 0x2d3bf3c4: addu v1, ra, zero
6741 0x2d3bf3c8: addu v0, s1, zero
6742 0x2d3bf3cc: jr s3
6743 0x2d3bf3d0: nop
6744 ; Exception:
6745 0x2d3bf3d4: lui s1, 0x2cc8 ; generate_forward_exception()
6746 0x2d3bf3d8: addiu s1, s1, 0x40
6747 0x2d3bf3dc: addiu s2, zero, 0x0
6748 0x2d3bf3e0: addiu sp, sp, 0x10
6749 0x2d3bf3e4: addiu sp, sp, 0x8
6750 0x2d3bf3e8: lw ra, 0xfffffffc(sp)
6751 0x2d3bf3ec: lw fp, 0xfffffff8(sp)
6752 0x2d3bf3f0: lui at, 0x2b48
6753 0x2d3bf3f4: lw at, 0x100(at)
6754 ; TailCalljmpInd
6755 __ push(RA); ; to be used in generate_forward_exception()
6756 0x2d3bf3f8: addu t7, s2, zero
6757 0x2d3bf3fc: jr s1
6758 0x2d3bf400: nop
6759 */
6760 // Rethrow exception:
6761 // The exception oop will come in the first argument position.
6762 // Then JUMP (not call) to the rethrow stub code.
6763 instruct RethrowException()
6764 %{
6765 match(Rethrow);
6767 // use the following format syntax
6768 format %{ "JMP rethrow_stub #@RethrowException" %}
6769 ins_encode %{
6770 __ block_comment("@ RethrowException");
6772 cbuf.set_insts_mark();
6773 cbuf.relocate(cbuf.insts_mark(), runtime_call_Relocation::spec());
6775 // call OptoRuntime::rethrow_stub to get the exception handler in parent method
6776 __ patchable_set48(T9, (jlong)OptoRuntime::rethrow_stub());
6777 __ jr(T9);
6778 __ nop();
6779 %}
6780 ins_pipe( pipe_jump );
6781 %}
6783 instruct branchConP_zero(cmpOpU cmp, mRegP op1, immP0 zero, label labl) %{
6784 match(If cmp (CmpP op1 zero));
6785 effect(USE labl);
6787 ins_cost(180);
6788 format %{ "b$cmp $op1, R0, $labl #@branchConP_zero" %}
6790 ins_encode %{
6791 Register op1 = $op1$$Register;
6792 Register op2 = R0;
6793 Label &L = *($labl$$label);
6794 int flag = $cmp$$cmpcode;
6796 switch(flag)
6797 {
6798 case 0x01: //equal
6799 if (&L)
6800 __ beq(op1, op2, L);
6801 else
6802 __ beq(op1, op2, (int)0);
6803 break;
6804 case 0x02: //not_equal
6805 if (&L)
6806 __ bne(op1, op2, L);
6807 else
6808 __ bne(op1, op2, (int)0);
6809 break;
6810 /*
6811 case 0x03: //above
6812 __ sltu(AT, op2, op1);
6813 if(&L)
6814 __ bne(R0, AT, L);
6815 else
6816 __ bne(R0, AT, (int)0);
6817 break;
6818 case 0x04: //above_equal
6819 __ sltu(AT, op1, op2);
6820 if(&L)
6821 __ beq(AT, R0, L);
6822 else
6823 __ beq(AT, R0, (int)0);
6824 break;
6825 case 0x05: //below
6826 __ sltu(AT, op1, op2);
6827 if(&L)
6828 __ bne(R0, AT, L);
6829 else
6830 __ bne(R0, AT, (int)0);
6831 break;
6832 case 0x06: //below_equal
6833 __ sltu(AT, op2, op1);
6834 if(&L)
6835 __ beq(AT, R0, L);
6836 else
6837 __ beq(AT, R0, (int)0);
6838 break;
6839 */
6840 default:
6841 Unimplemented();
6842 }
6843 __ nop();
6844 %}
6846 ins_pc_relative(1);
6847 ins_pipe( pipe_alu_branch );
6848 %}
6851 instruct branchConP(cmpOpU cmp, mRegP op1, mRegP op2, label labl) %{
6852 match(If cmp (CmpP op1 op2));
6853 // predicate(can_branch_register(_kids[0]->_leaf, _kids[1]->_leaf));
6854 effect(USE labl);
6856 ins_cost(200);
6857 format %{ "b$cmp $op1, $op2, $labl #@branchConP" %}
6859 ins_encode %{
6860 Register op1 = $op1$$Register;
6861 Register op2 = $op2$$Register;
6862 Label &L = *($labl$$label);
6863 int flag = $cmp$$cmpcode;
6865 switch(flag)
6866 {
6867 case 0x01: //equal
6868 if (&L)
6869 __ beq(op1, op2, L);
6870 else
6871 __ beq(op1, op2, (int)0);
6872 break;
6873 case 0x02: //not_equal
6874 if (&L)
6875 __ bne(op1, op2, L);
6876 else
6877 __ bne(op1, op2, (int)0);
6878 break;
6879 case 0x03: //above
6880 __ sltu(AT, op2, op1);
6881 if(&L)
6882 __ bne(R0, AT, L);
6883 else
6884 __ bne(R0, AT, (int)0);
6885 break;
6886 case 0x04: //above_equal
6887 __ sltu(AT, op1, op2);
6888 if(&L)
6889 __ beq(AT, R0, L);
6890 else
6891 __ beq(AT, R0, (int)0);
6892 break;
6893 case 0x05: //below
6894 __ sltu(AT, op1, op2);
6895 if(&L)
6896 __ bne(R0, AT, L);
6897 else
6898 __ bne(R0, AT, (int)0);
6899 break;
6900 case 0x06: //below_equal
6901 __ sltu(AT, op2, op1);
6902 if(&L)
6903 __ beq(AT, R0, L);
6904 else
6905 __ beq(AT, R0, (int)0);
6906 break;
6907 default:
6908 Unimplemented();
6909 }
6910 __ nop();
6911 %}
6913 ins_pc_relative(1);
6914 ins_pipe( pipe_alu_branch );
6915 %}
6917 instruct cmpN_null_branch(cmpOp cmp, mRegN op1, immN0 null, label labl) %{
6918 match(If cmp (CmpN op1 null));
6919 effect(USE labl);
6921 ins_cost(180);
6922 format %{ "CMP $op1,0\t! compressed ptr\n\t"
6923 "BP$cmp $labl @ cmpN_null_branch" %}
6924 ins_encode %{
6925 Register op1 = $op1$$Register;
6926 Register op2 = R0;
6927 Label &L = *($labl$$label);
6928 int flag = $cmp$$cmpcode;
6930 switch(flag)
6931 {
6932 case 0x01: //equal
6933 if (&L)
6934 __ beq(op1, op2, L);
6935 else
6936 __ beq(op1, op2, (int)0);
6937 break;
6938 case 0x02: //not_equal
6939 if (&L)
6940 __ bne(op1, op2, L);
6941 else
6942 __ bne(op1, op2, (int)0);
6943 break;
6944 default:
6945 Unimplemented();
6946 }
6947 __ nop();
6948 %}
6949 //TODO: pipe_branchP or create pipe_branchN LEE
6950 ins_pc_relative(1);
6951 ins_pipe( pipe_alu_branch );
6952 %}
6954 instruct cmpN_reg_branch(cmpOp cmp, mRegN op1, mRegN op2, label labl) %{
6955 match(If cmp (CmpN op1 op2));
6956 effect(USE labl);
6958 ins_cost(180);
6959 format %{ "CMP $op1,$op2\t! compressed ptr\n\t"
6960 "BP$cmp $labl" %}
6961 ins_encode %{
6962 Register op1_reg = $op1$$Register;
6963 Register op2_reg = $op2$$Register;
6964 Label &L = *($labl$$label);
6965 int flag = $cmp$$cmpcode;
6967 switch(flag)
6968 {
6969 case 0x01: //equal
6970 if (&L)
6971 __ beq(op1_reg, op2_reg, L);
6972 else
6973 __ beq(op1_reg, op2_reg, (int)0);
6974 break;
6975 case 0x02: //not_equal
6976 if (&L)
6977 __ bne(op1_reg, op2_reg, L);
6978 else
6979 __ bne(op1_reg, op2_reg, (int)0);
6980 break;
6981 case 0x03: //above
6982 __ sltu(AT, op2_reg, op1_reg);
6983 if(&L)
6984 __ bne(R0, AT, L);
6985 else
6986 __ bne(R0, AT, (int)0);
6987 break;
6988 case 0x04: //above_equal
6989 __ sltu(AT, op1_reg, op2_reg);
6990 if(&L)
6991 __ beq(AT, R0, L);
6992 else
6993 __ beq(AT, R0, (int)0);
6994 break;
6995 case 0x05: //below
6996 __ sltu(AT, op1_reg, op2_reg);
6997 if(&L)
6998 __ bne(R0, AT, L);
6999 else
7000 __ bne(R0, AT, (int)0);
7001 break;
7002 case 0x06: //below_equal
7003 __ sltu(AT, op2_reg, op1_reg);
7004 if(&L)
7005 __ beq(AT, R0, L);
7006 else
7007 __ beq(AT, R0, (int)0);
7008 break;
7009 default:
7010 Unimplemented();
7011 }
7012 __ nop();
7013 %}
7014 ins_pc_relative(1);
7015 ins_pipe( pipe_alu_branch );
7016 %}
7018 instruct branchConIU_reg_reg(cmpOpU cmp, mRegI src1, mRegI src2, label labl) %{
7019 match( If cmp (CmpU src1 src2) );
7020 effect(USE labl);
7021 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_reg" %}
7023 ins_encode %{
7024 Register op1 = $src1$$Register;
7025 Register op2 = $src2$$Register;
7026 Label &L = *($labl$$label);
7027 int flag = $cmp$$cmpcode;
7029 switch(flag)
7030 {
7031 case 0x01: //equal
7032 if (&L)
7033 __ beq(op1, op2, L);
7034 else
7035 __ beq(op1, op2, (int)0);
7036 break;
7037 case 0x02: //not_equal
7038 if (&L)
7039 __ bne(op1, op2, L);
7040 else
7041 __ bne(op1, op2, (int)0);
7042 break;
7043 case 0x03: //above
7044 __ sltu(AT, op2, op1);
7045 if(&L)
7046 __ bne(AT, R0, L);
7047 else
7048 __ bne(AT, R0, (int)0);
7049 break;
7050 case 0x04: //above_equal
7051 __ sltu(AT, op1, op2);
7052 if(&L)
7053 __ beq(AT, R0, L);
7054 else
7055 __ beq(AT, R0, (int)0);
7056 break;
7057 case 0x05: //below
7058 __ sltu(AT, op1, op2);
7059 if(&L)
7060 __ bne(AT, R0, L);
7061 else
7062 __ bne(AT, R0, (int)0);
7063 break;
7064 case 0x06: //below_equal
7065 __ sltu(AT, op2, op1);
7066 if(&L)
7067 __ beq(AT, R0, L);
7068 else
7069 __ beq(AT, R0, (int)0);
7070 break;
7071 default:
7072 Unimplemented();
7073 }
7074 __ nop();
7075 %}
7077 ins_pc_relative(1);
7078 ins_pipe( pipe_alu_branch );
7079 %}
7082 instruct branchConIU_reg_imm(cmpOpU cmp, mRegI src1, immI src2, label labl) %{
7083 match( If cmp (CmpU src1 src2) );
7084 effect(USE labl);
7085 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_imm" %}
7087 ins_encode %{
7088 Register op1 = $src1$$Register;
7089 int val = $src2$$constant;
7090 Label &L = *($labl$$label);
7091 int flag = $cmp$$cmpcode;
7093 __ move(AT, val);
7094 switch(flag)
7095 {
7096 case 0x01: //equal
7097 if (&L)
7098 __ beq(op1, AT, L);
7099 else
7100 __ beq(op1, AT, (int)0);
7101 break;
7102 case 0x02: //not_equal
7103 if (&L)
7104 __ bne(op1, AT, L);
7105 else
7106 __ bne(op1, AT, (int)0);
7107 break;
7108 case 0x03: //above
7109 __ sltu(AT, AT, op1);
7110 if(&L)
7111 __ bne(R0, AT, L);
7112 else
7113 __ bne(R0, AT, (int)0);
7114 break;
7115 case 0x04: //above_equal
7116 __ sltu(AT, op1, AT);
7117 if(&L)
7118 __ beq(AT, R0, L);
7119 else
7120 __ beq(AT, R0, (int)0);
7121 break;
7122 case 0x05: //below
7123 __ sltu(AT, op1, AT);
7124 if(&L)
7125 __ bne(R0, AT, L);
7126 else
7127 __ bne(R0, AT, (int)0);
7128 break;
7129 case 0x06: //below_equal
7130 __ sltu(AT, AT, op1);
7131 if(&L)
7132 __ beq(AT, R0, L);
7133 else
7134 __ beq(AT, R0, (int)0);
7135 break;
7136 default:
7137 Unimplemented();
7138 }
7139 __ nop();
7140 %}
7142 ins_pc_relative(1);
7143 ins_pipe( pipe_alu_branch );
7144 %}
7146 instruct branchConI_reg_reg(cmpOp cmp, mRegI src1, mRegI src2, label labl) %{
7147 match( If cmp (CmpI src1 src2) );
7148 effect(USE labl);
7149 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_reg" %}
7151 ins_encode %{
7152 Register op1 = $src1$$Register;
7153 Register op2 = $src2$$Register;
7154 Label &L = *($labl$$label);
7155 int flag = $cmp$$cmpcode;
7157 switch(flag)
7158 {
7159 case 0x01: //equal
7160 if (&L)
7161 __ beq(op1, op2, L);
7162 else
7163 __ beq(op1, op2, (int)0);
7164 break;
7165 case 0x02: //not_equal
7166 if (&L)
7167 __ bne(op1, op2, L);
7168 else
7169 __ bne(op1, op2, (int)0);
7170 break;
7171 case 0x03: //above
7172 __ slt(AT, op2, op1);
7173 if(&L)
7174 __ bne(R0, AT, L);
7175 else
7176 __ bne(R0, AT, (int)0);
7177 break;
7178 case 0x04: //above_equal
7179 __ slt(AT, op1, op2);
7180 if(&L)
7181 __ beq(AT, R0, L);
7182 else
7183 __ beq(AT, R0, (int)0);
7184 break;
7185 case 0x05: //below
7186 __ slt(AT, op1, op2);
7187 if(&L)
7188 __ bne(R0, AT, L);
7189 else
7190 __ bne(R0, AT, (int)0);
7191 break;
7192 case 0x06: //below_equal
7193 __ slt(AT, op2, op1);
7194 if(&L)
7195 __ beq(AT, R0, L);
7196 else
7197 __ beq(AT, R0, (int)0);
7198 break;
7199 default:
7200 Unimplemented();
7201 }
7202 __ nop();
7203 %}
7205 ins_pc_relative(1);
7206 ins_pipe( pipe_alu_branch );
7207 %}
7209 instruct branchConI_reg_imm0(cmpOp cmp, mRegI src1, immI0 src2, label labl) %{
7210 match( If cmp (CmpI src1 src2) );
7211 effect(USE labl);
7212 ins_cost(170);
7213 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm0" %}
7215 ins_encode %{
7216 Register op1 = $src1$$Register;
7217 // int val = $src2$$constant;
7218 Label &L = *($labl$$label);
7219 int flag = $cmp$$cmpcode;
7221 //__ move(AT, val);
7222 switch(flag)
7223 {
7224 case 0x01: //equal
7225 if (&L)
7226 __ beq(op1, R0, L);
7227 else
7228 __ beq(op1, R0, (int)0);
7229 break;
7230 case 0x02: //not_equal
7231 if (&L)
7232 __ bne(op1, R0, L);
7233 else
7234 __ bne(op1, R0, (int)0);
7235 break;
7236 case 0x03: //greater
7237 if(&L)
7238 __ bgtz(op1, L);
7239 else
7240 __ bgtz(op1, (int)0);
7241 break;
7242 case 0x04: //greater_equal
7243 if(&L)
7244 __ bgez(op1, L);
7245 else
7246 __ bgez(op1, (int)0);
7247 break;
7248 case 0x05: //less
7249 if(&L)
7250 __ bltz(op1, L);
7251 else
7252 __ bltz(op1, (int)0);
7253 break;
7254 case 0x06: //less_equal
7255 if(&L)
7256 __ blez(op1, L);
7257 else
7258 __ blez(op1, (int)0);
7259 break;
7260 default:
7261 Unimplemented();
7262 }
7263 __ nop();
7264 %}
7266 ins_pc_relative(1);
7267 ins_pipe( pipe_alu_branch );
7268 %}
7271 instruct branchConI_reg_imm(cmpOp cmp, mRegI src1, immI src2, label labl) %{
7272 match( If cmp (CmpI src1 src2) );
7273 effect(USE labl);
7274 ins_cost(200);
7275 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm" %}
7277 ins_encode %{
7278 Register op1 = $src1$$Register;
7279 int val = $src2$$constant;
7280 Label &L = *($labl$$label);
7281 int flag = $cmp$$cmpcode;
7283 __ move(AT, val);
7284 switch(flag)
7285 {
7286 case 0x01: //equal
7287 if (&L)
7288 __ beq(op1, AT, L);
7289 else
7290 __ beq(op1, AT, (int)0);
7291 break;
7292 case 0x02: //not_equal
7293 if (&L)
7294 __ bne(op1, AT, L);
7295 else
7296 __ bne(op1, AT, (int)0);
7297 break;
7298 case 0x03: //greater
7299 __ slt(AT, AT, op1);
7300 if(&L)
7301 __ bne(R0, AT, L);
7302 else
7303 __ bne(R0, AT, (int)0);
7304 break;
7305 case 0x04: //greater_equal
7306 __ slt(AT, op1, AT);
7307 if(&L)
7308 __ beq(AT, R0, L);
7309 else
7310 __ beq(AT, R0, (int)0);
7311 break;
7312 case 0x05: //less
7313 __ slt(AT, op1, AT);
7314 if(&L)
7315 __ bne(R0, AT, L);
7316 else
7317 __ bne(R0, AT, (int)0);
7318 break;
7319 case 0x06: //less_equal
7320 __ slt(AT, AT, op1);
7321 if(&L)
7322 __ beq(AT, R0, L);
7323 else
7324 __ beq(AT, R0, (int)0);
7325 break;
7326 default:
7327 Unimplemented();
7328 }
7329 __ nop();
7330 %}
7332 ins_pc_relative(1);
7333 ins_pipe( pipe_alu_branch );
7334 %}
7336 instruct branchConIU_reg_imm0(cmpOpU cmp, mRegI src1, immI0 zero, label labl) %{
7337 match( If cmp (CmpU src1 zero) );
7338 effect(USE labl);
7339 format %{ "BR$cmp $src1, zero, $labl #@branchConIU_reg_imm0" %}
7341 ins_encode %{
7342 Register op1 = $src1$$Register;
7343 Label &L = *($labl$$label);
7344 int flag = $cmp$$cmpcode;
7346 switch(flag)
7347 {
7348 case 0x01: //equal
7349 if (&L)
7350 __ beq(op1, R0, L);
7351 else
7352 __ beq(op1, R0, (int)0);
7353 break;
7354 case 0x02: //not_equal
7355 if (&L)
7356 __ bne(op1, R0, L);
7357 else
7358 __ bne(op1, R0, (int)0);
7359 break;
7360 case 0x03: //above
7361 if(&L)
7362 __ bne(R0, op1, L);
7363 else
7364 __ bne(R0, op1, (int)0);
7365 break;
7366 case 0x04: //above_equal
7367 if(&L)
7368 __ beq(R0, R0, L);
7369 else
7370 __ beq(R0, R0, (int)0);
7371 break;
7372 case 0x05: //below
7373 return;
7374 break;
7375 case 0x06: //below_equal
7376 if(&L)
7377 __ beq(op1, R0, L);
7378 else
7379 __ beq(op1, R0, (int)0);
7380 break;
7381 default:
7382 Unimplemented();
7383 }
7384 __ nop();
7385 %}
7387 ins_pc_relative(1);
7388 ins_pipe( pipe_alu_branch );
7389 %}
7392 instruct branchConIU_reg_immI16(cmpOpU cmp, mRegI src1, immI16 src2, label labl) %{
7393 match( If cmp (CmpU src1 src2) );
7394 effect(USE labl);
7395 ins_cost(180);
7396 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_immI16" %}
7398 ins_encode %{
7399 Register op1 = $src1$$Register;
7400 int val = $src2$$constant;
7401 Label &L = *($labl$$label);
7402 int flag = $cmp$$cmpcode;
7404 switch(flag)
7405 {
7406 case 0x01: //equal
7407 __ move(AT, val);
7408 if (&L)
7409 __ beq(op1, AT, L);
7410 else
7411 __ beq(op1, AT, (int)0);
7412 break;
7413 case 0x02: //not_equal
7414 __ move(AT, val);
7415 if (&L)
7416 __ bne(op1, AT, L);
7417 else
7418 __ bne(op1, AT, (int)0);
7419 break;
7420 case 0x03: //above
7421 __ move(AT, val);
7422 __ sltu(AT, AT, op1);
7423 if(&L)
7424 __ bne(R0, AT, L);
7425 else
7426 __ bne(R0, AT, (int)0);
7427 break;
7428 case 0x04: //above_equal
7429 __ sltiu(AT, op1, val);
7430 if(&L)
7431 __ beq(AT, R0, L);
7432 else
7433 __ beq(AT, R0, (int)0);
7434 break;
7435 case 0x05: //below
7436 __ sltiu(AT, op1, val);
7437 if(&L)
7438 __ bne(R0, AT, L);
7439 else
7440 __ bne(R0, AT, (int)0);
7441 break;
7442 case 0x06: //below_equal
7443 __ move(AT, val);
7444 __ sltu(AT, AT, op1);
7445 if(&L)
7446 __ beq(AT, R0, L);
7447 else
7448 __ beq(AT, R0, (int)0);
7449 break;
7450 default:
7451 Unimplemented();
7452 }
7453 __ nop();
7454 %}
7456 ins_pc_relative(1);
7457 ins_pipe( pipe_alu_branch );
7458 %}
7461 instruct branchConL_regL_regL(cmpOp cmp, mRegL src1, mRegL src2, label labl) %{
7462 match( If cmp (CmpL src1 src2) );
7463 effect(USE labl);
7464 format %{ "BR$cmp $src1, $src2, $labl #@branchConL_regL_regL" %}
7465 ins_cost(250);
7467 ins_encode %{
7468 Register opr1_reg = as_Register($src1$$reg);
7469 Register opr2_reg = as_Register($src2$$reg);
7471 Label &target = *($labl$$label);
7472 int flag = $cmp$$cmpcode;
7474 switch(flag)
7475 {
7476 case 0x01: //equal
7477 if (&target)
7478 __ beq(opr1_reg, opr2_reg, target);
7479 else
7480 __ beq(opr1_reg, opr2_reg, (int)0);
7481 __ delayed()->nop();
7482 break;
7484 case 0x02: //not_equal
7485 if(&target)
7486 __ bne(opr1_reg, opr2_reg, target);
7487 else
7488 __ bne(opr1_reg, opr2_reg, (int)0);
7489 __ delayed()->nop();
7490 break;
7492 case 0x03: //greater
7493 __ slt(AT, opr2_reg, opr1_reg);
7494 if(&target)
7495 __ bne(AT, R0, target);
7496 else
7497 __ bne(AT, R0, (int)0);
7498 __ delayed()->nop();
7499 break;
7501 case 0x04: //greater_equal
7502 __ slt(AT, opr1_reg, opr2_reg);
7503 if(&target)
7504 __ beq(AT, R0, target);
7505 else
7506 __ beq(AT, R0, (int)0);
7507 __ delayed()->nop();
7509 break;
7511 case 0x05: //less
7512 __ slt(AT, opr1_reg, opr2_reg);
7513 if(&target)
7514 __ bne(AT, R0, target);
7515 else
7516 __ bne(AT, R0, (int)0);
7517 __ delayed()->nop();
7519 break;
7521 case 0x06: //less_equal
7522 __ slt(AT, opr2_reg, opr1_reg);
7524 if(&target)
7525 __ beq(AT, R0, target);
7526 else
7527 __ beq(AT, R0, (int)0);
7528 __ delayed()->nop();
7530 break;
7532 default:
7533 Unimplemented();
7534 }
7535 %}
7538 ins_pc_relative(1);
7539 ins_pipe( pipe_alu_branch );
7540 %}
7542 instruct branchConL_reg_immL16_sub(cmpOp cmp, mRegL src1, immL16_sub src2, label labl) %{
7543 match( If cmp (CmpL src1 src2) );
7544 effect(USE labl);
7545 ins_cost(180);
7546 format %{ "BR$cmp $src1, $src2, $labl #@branchConL_reg_immL16_sub" %}
7548 ins_encode %{
7549 Register op1 = $src1$$Register;
7550 int val = $src2$$constant;
7551 Label &L = *($labl$$label);
7552 int flag = $cmp$$cmpcode;
7554 __ daddiu(AT, op1, -1 * val);
7555 switch(flag)
7556 {
7557 case 0x01: //equal
7558 if (&L)
7559 __ beq(R0, AT, L);
7560 else
7561 __ beq(R0, AT, (int)0);
7562 break;
7563 case 0x02: //not_equal
7564 if (&L)
7565 __ bne(R0, AT, L);
7566 else
7567 __ bne(R0, AT, (int)0);
7568 break;
7569 case 0x03: //greater
7570 if(&L)
7571 __ bgtz(AT, L);
7572 else
7573 __ bgtz(AT, (int)0);
7574 break;
7575 case 0x04: //greater_equal
7576 if(&L)
7577 __ bgez(AT, L);
7578 else
7579 __ bgez(AT, (int)0);
7580 break;
7581 case 0x05: //less
7582 if(&L)
7583 __ bltz(AT, L);
7584 else
7585 __ bltz(AT, (int)0);
7586 break;
7587 case 0x06: //less_equal
7588 if(&L)
7589 __ blez(AT, L);
7590 else
7591 __ blez(AT, (int)0);
7592 break;
7593 default:
7594 Unimplemented();
7595 }
7596 __ nop();
7597 %}
7599 ins_pc_relative(1);
7600 ins_pipe( pipe_alu_branch );
7601 %}
7604 instruct branchConI_reg_imm16_sub(cmpOp cmp, mRegI src1, immI16_sub src2, label labl) %{
7605 match( If cmp (CmpI src1 src2) );
7606 effect(USE labl);
7607 ins_cost(180);
7608 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm16_sub" %}
7610 ins_encode %{
7611 Register op1 = $src1$$Register;
7612 int val = $src2$$constant;
7613 Label &L = *($labl$$label);
7614 int flag = $cmp$$cmpcode;
7616 __ addiu32(AT, op1, -1 * val);
7617 switch(flag)
7618 {
7619 case 0x01: //equal
7620 if (&L)
7621 __ beq(R0, AT, L);
7622 else
7623 __ beq(R0, AT, (int)0);
7624 break;
7625 case 0x02: //not_equal
7626 if (&L)
7627 __ bne(R0, AT, L);
7628 else
7629 __ bne(R0, AT, (int)0);
7630 break;
7631 case 0x03: //greater
7632 if(&L)
7633 __ bgtz(AT, L);
7634 else
7635 __ bgtz(AT, (int)0);
7636 break;
7637 case 0x04: //greater_equal
7638 if(&L)
7639 __ bgez(AT, L);
7640 else
7641 __ bgez(AT, (int)0);
7642 break;
7643 case 0x05: //less
7644 if(&L)
7645 __ bltz(AT, L);
7646 else
7647 __ bltz(AT, (int)0);
7648 break;
7649 case 0x06: //less_equal
7650 if(&L)
7651 __ blez(AT, L);
7652 else
7653 __ blez(AT, (int)0);
7654 break;
7655 default:
7656 Unimplemented();
7657 }
7658 __ nop();
7659 %}
7661 ins_pc_relative(1);
7662 ins_pipe( pipe_alu_branch );
7663 %}
7665 instruct branchConL_regL_immL0(cmpOp cmp, mRegL src1, immL0 zero, label labl) %{
7666 match( If cmp (CmpL src1 zero) );
7667 effect(USE labl);
7668 format %{ "BR$cmp $src1, zero, $labl #@branchConL_regL_immL0" %}
7669 ins_cost(150);
7671 ins_encode %{
7672 Register opr1_reg = as_Register($src1$$reg);
7673 Label &target = *($labl$$label);
7674 int flag = $cmp$$cmpcode;
7676 switch(flag)
7677 {
7678 case 0x01: //equal
7679 if (&target)
7680 __ beq(opr1_reg, R0, target);
7681 else
7682 __ beq(opr1_reg, R0, int(0));
7683 break;
7685 case 0x02: //not_equal
7686 if(&target)
7687 __ bne(opr1_reg, R0, target);
7688 else
7689 __ bne(opr1_reg, R0, (int)0);
7690 break;
7692 case 0x03: //greater
7693 if(&target)
7694 __ bgtz(opr1_reg, target);
7695 else
7696 __ bgtz(opr1_reg, (int)0);
7697 break;
7699 case 0x04: //greater_equal
7700 if(&target)
7701 __ bgez(opr1_reg, target);
7702 else
7703 __ bgez(opr1_reg, (int)0);
7704 break;
7706 case 0x05: //less
7707 __ slt(AT, opr1_reg, R0);
7708 if(&target)
7709 __ bne(AT, R0, target);
7710 else
7711 __ bne(AT, R0, (int)0);
7712 break;
7714 case 0x06: //less_equal
7715 if (&target)
7716 __ blez(opr1_reg, target);
7717 else
7718 __ blez(opr1_reg, int(0));
7719 break;
7721 default:
7722 Unimplemented();
7723 }
7724 __ delayed()->nop();
7725 %}
7728 ins_pc_relative(1);
7729 ins_pipe( pipe_alu_branch );
7730 %}
7733 //FIXME
7734 instruct branchConF_reg_reg(cmpOp cmp, regF src1, regF src2, label labl) %{
7735 match( If cmp (CmpF src1 src2) );
7736 effect(USE labl);
7737 format %{ "BR$cmp $src1, $src2, $labl #@branchConF_reg_reg" %}
7739 ins_encode %{
7740 FloatRegister reg_op1 = $src1$$FloatRegister;
7741 FloatRegister reg_op2 = $src2$$FloatRegister;
7742 Label &L = *($labl$$label);
7743 int flag = $cmp$$cmpcode;
7745 switch(flag)
7746 {
7747 case 0x01: //equal
7748 __ c_eq_s(reg_op1, reg_op2);
7749 if (&L)
7750 __ bc1t(L);
7751 else
7752 __ bc1t((int)0);
7753 break;
7754 case 0x02: //not_equal
7755 __ c_eq_s(reg_op1, reg_op2);
7756 if (&L)
7757 __ bc1f(L);
7758 else
7759 __ bc1f((int)0);
7760 break;
7761 case 0x03: //greater
7762 __ c_ule_s(reg_op1, reg_op2);
7763 if(&L)
7764 __ bc1f(L);
7765 else
7766 __ bc1f((int)0);
7767 break;
7768 case 0x04: //greater_equal
7769 __ c_ult_s(reg_op1, reg_op2);
7770 if(&L)
7771 __ bc1f(L);
7772 else
7773 __ bc1f((int)0);
7774 break;
7775 case 0x05: //less
7776 __ c_ult_s(reg_op1, reg_op2);
7777 if(&L)
7778 __ bc1t(L);
7779 else
7780 __ bc1t((int)0);
7781 break;
7782 case 0x06: //less_equal
7783 __ c_ule_s(reg_op1, reg_op2);
7784 if(&L)
7785 __ bc1t(L);
7786 else
7787 __ bc1t((int)0);
7788 break;
7789 default:
7790 Unimplemented();
7791 }
7792 __ nop();
7793 %}
7795 ins_pc_relative(1);
7796 ins_pipe(pipe_slow);
7797 %}
7799 instruct branchConD_reg_reg(cmpOp cmp, regD src1, regD src2, label labl) %{
7800 match( If cmp (CmpD src1 src2) );
7801 effect(USE labl);
7802 format %{ "BR$cmp $src1, $src2, $labl #@branchConD_reg_reg" %}
7804 ins_encode %{
7805 FloatRegister reg_op1 = $src1$$FloatRegister;
7806 FloatRegister reg_op2 = $src2$$FloatRegister;
7807 Label &L = *($labl$$label);
7808 int flag = $cmp$$cmpcode;
7810 switch(flag)
7811 {
7812 case 0x01: //equal
7813 __ c_eq_d(reg_op1, reg_op2);
7814 if (&L)
7815 __ bc1t(L);
7816 else
7817 __ bc1t((int)0);
7818 break;
7819 case 0x02: //not_equal
7820 //2016/4/19 aoqi: c_ueq_d cannot distinguish NaN from equal. Double.isNaN(Double) is implemented by 'f != f', so the use of c_ueq_d causes bugs.
7821 __ c_eq_d(reg_op1, reg_op2);
7822 if (&L)
7823 __ bc1f(L);
7824 else
7825 __ bc1f((int)0);
7826 break;
7827 case 0x03: //greater
7828 __ c_ule_d(reg_op1, reg_op2);
7829 if(&L)
7830 __ bc1f(L);
7831 else
7832 __ bc1f((int)0);
7833 break;
7834 case 0x04: //greater_equal
7835 __ c_ult_d(reg_op1, reg_op2);
7836 if(&L)
7837 __ bc1f(L);
7838 else
7839 __ bc1f((int)0);
7840 break;
7841 case 0x05: //less
7842 __ c_ult_d(reg_op1, reg_op2);
7843 if(&L)
7844 __ bc1t(L);
7845 else
7846 __ bc1t((int)0);
7847 break;
7848 case 0x06: //less_equal
7849 __ c_ule_d(reg_op1, reg_op2);
7850 if(&L)
7851 __ bc1t(L);
7852 else
7853 __ bc1t((int)0);
7854 break;
7855 default:
7856 Unimplemented();
7857 }
7858 __ nop();
7859 %}
7861 ins_pc_relative(1);
7862 ins_pipe(pipe_slow);
7863 %}
7866 // Call Runtime Instruction
7867 instruct CallRuntimeDirect(method meth) %{
7868 match(CallRuntime );
7869 effect(USE meth);
7871 ins_cost(300);
7872 format %{ "CALL,runtime #@CallRuntimeDirect" %}
7873 ins_encode( Java_To_Runtime( meth ) );
7874 ins_pipe( pipe_slow );
7875 ins_alignment(16);
7876 %}
7880 //------------------------MemBar Instructions-------------------------------
7881 //Memory barrier flavors
7883 instruct membar_acquire() %{
7884 match(MemBarAcquire);
7885 ins_cost(0);
7887 size(0);
7888 format %{ "MEMBAR-acquire (empty) @ membar_acquire" %}
7889 ins_encode();
7890 ins_pipe(empty);
7891 %}
7893 instruct load_fence() %{
7894 match(LoadFence);
7895 ins_cost(400);
7897 format %{ "MEMBAR @ load_fence" %}
7898 ins_encode %{
7899 __ sync();
7900 %}
7901 ins_pipe(pipe_slow);
7902 %}
7904 instruct membar_acquire_lock()
7905 %{
7906 match(MemBarAcquireLock);
7907 ins_cost(0);
7909 size(0);
7910 format %{ "MEMBAR-acquire (acquire as part of CAS in prior FastLock so empty encoding) @ membar_acquire_lock" %}
7911 ins_encode();
7912 ins_pipe(empty);
7913 %}
7915 instruct membar_release() %{
7916 match(MemBarRelease);
7917 ins_cost(0);
7919 size(0);
7920 format %{ "MEMBAR-release (empty) @ membar_release" %}
7921 ins_encode();
7922 ins_pipe(empty);
7923 %}
7925 instruct store_fence() %{
7926 match(StoreFence);
7927 ins_cost(400);
7929 format %{ "MEMBAR @ store_fence" %}
7931 ins_encode %{
7932 __ sync();
7933 %}
7935 ins_pipe(pipe_slow);
7936 %}
7938 instruct membar_release_lock()
7939 %{
7940 match(MemBarReleaseLock);
7941 ins_cost(0);
7943 size(0);
7944 format %{ "MEMBAR-release-lock (release in FastUnlock so empty) @ membar_release_lock" %}
7945 ins_encode();
7946 ins_pipe(empty);
7947 %}
7950 instruct membar_volatile() %{
7951 match(MemBarVolatile);
7952 ins_cost(400);
7954 format %{ "MEMBAR-volatile" %}
7955 ins_encode %{
7956 if( !os::is_MP() ) return; // Not needed on single CPU
7957 __ sync();
7959 %}
7960 ins_pipe(pipe_slow);
7961 %}
7963 instruct unnecessary_membar_volatile() %{
7964 match(MemBarVolatile);
7965 predicate(Matcher::post_store_load_barrier(n));
7966 ins_cost(0);
7968 size(0);
7969 format %{ "MEMBAR-volatile (unnecessary so empty encoding) @ unnecessary_membar_volatile" %}
7970 ins_encode( );
7971 ins_pipe(empty);
7972 %}
7974 instruct membar_storestore() %{
7975 match(MemBarStoreStore);
7977 ins_cost(0);
7978 size(0);
7979 format %{ "MEMBAR-storestore (empty encoding) @ membar_storestore" %}
7980 ins_encode( );
7981 ins_pipe(empty);
7982 %}
7984 //----------Move Instructions--------------------------------------------------
7985 instruct castX2P(mRegP dst, mRegL src) %{
7986 match(Set dst (CastX2P src));
7987 format %{ "castX2P $dst, $src @ castX2P" %}
7988 ins_encode %{
7989 Register src = $src$$Register;
7990 Register dst = $dst$$Register;
7992 if(src != dst)
7993 __ move(dst, src);
7994 %}
7995 ins_cost(10);
7996 ins_pipe( ialu_regI_mov );
7997 %}
7999 instruct castP2X(mRegL dst, mRegP src ) %{
8000 match(Set dst (CastP2X src));
8002 format %{ "mov $dst, $src\t #@castP2X" %}
8003 ins_encode %{
8004 Register src = $src$$Register;
8005 Register dst = $dst$$Register;
8007 if(src != dst)
8008 __ move(dst, src);
8009 %}
8010 ins_pipe( ialu_regI_mov );
8011 %}
8013 instruct MoveF2I_reg_reg(mRegI dst, regF src) %{
8014 match(Set dst (MoveF2I src));
8015 effect(DEF dst, USE src);
8016 ins_cost(85);
8017 format %{ "MoveF2I $dst, $src @ MoveF2I_reg_reg" %}
8018 ins_encode %{
8019 Register dst = as_Register($dst$$reg);
8020 FloatRegister src = as_FloatRegister($src$$reg);
8022 __ mfc1(dst, src);
8023 %}
8024 ins_pipe( pipe_slow );
8025 %}
8027 instruct MoveI2F_reg_reg(regF dst, mRegI src) %{
8028 match(Set dst (MoveI2F src));
8029 effect(DEF dst, USE src);
8030 ins_cost(85);
8031 format %{ "MoveI2F $dst, $src @ MoveI2F_reg_reg" %}
8032 ins_encode %{
8033 Register src = as_Register($src$$reg);
8034 FloatRegister dst = as_FloatRegister($dst$$reg);
8036 __ mtc1(src, dst);
8037 %}
8038 ins_pipe( pipe_slow );
8039 %}
8041 instruct MoveD2L_reg_reg(mRegL dst, regD src) %{
8042 match(Set dst (MoveD2L src));
8043 effect(DEF dst, USE src);
8044 ins_cost(85);
8045 format %{ "MoveD2L $dst, $src @ MoveD2L_reg_reg" %}
8046 ins_encode %{
8047 Register dst = as_Register($dst$$reg);
8048 FloatRegister src = as_FloatRegister($src$$reg);
8050 __ dmfc1(dst, src);
8051 %}
8052 ins_pipe( pipe_slow );
8053 %}
8055 instruct MoveL2D_reg_reg(regD dst, mRegL src) %{
8056 match(Set dst (MoveL2D src));
8057 effect(DEF dst, USE src);
8058 ins_cost(85);
8059 format %{ "MoveL2D $dst, $src @ MoveL2D_reg_reg" %}
8060 ins_encode %{
8061 FloatRegister dst = as_FloatRegister($dst$$reg);
8062 Register src = as_Register($src$$reg);
8064 __ dmtc1(src, dst);
8065 %}
8066 ins_pipe( pipe_slow );
8067 %}
8069 //----------Conditional Move---------------------------------------------------
8070 // Conditional move
8071 instruct cmovI_cmpI_reg_reg(mRegI dst, mRegI src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8072 match(Set dst (CMoveI (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8073 ins_cost(80);
8074 format %{
8075 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpI_reg_reg\n"
8076 "\tCMOV $dst,$src \t @cmovI_cmpI_reg_reg"
8077 %}
8079 ins_encode %{
8080 Register op1 = $tmp1$$Register;
8081 Register op2 = $tmp2$$Register;
8082 Register dst = $dst$$Register;
8083 Register src = $src$$Register;
8084 int flag = $cop$$cmpcode;
8086 switch(flag)
8087 {
8088 case 0x01: //equal
8089 __ subu32(AT, op1, op2);
8090 __ movz(dst, src, AT);
8091 break;
8093 case 0x02: //not_equal
8094 __ subu32(AT, op1, op2);
8095 __ movn(dst, src, AT);
8096 break;
8098 case 0x03: //great
8099 __ slt(AT, op2, op1);
8100 __ movn(dst, src, AT);
8101 break;
8103 case 0x04: //great_equal
8104 __ slt(AT, op1, op2);
8105 __ movz(dst, src, AT);
8106 break;
8108 case 0x05: //less
8109 __ slt(AT, op1, op2);
8110 __ movn(dst, src, AT);
8111 break;
8113 case 0x06: //less_equal
8114 __ slt(AT, op2, op1);
8115 __ movz(dst, src, AT);
8116 break;
8118 default:
8119 Unimplemented();
8120 }
8121 %}
8123 ins_pipe( pipe_slow );
8124 %}
8126 instruct cmovI_cmpP_reg_reg(mRegI dst, mRegI src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
8127 match(Set dst (CMoveI (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
8128 ins_cost(80);
8129 format %{
8130 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpP_reg_reg\n\t"
8131 "CMOV $dst,$src\t @cmovI_cmpP_reg_reg"
8132 %}
8133 ins_encode %{
8134 Register op1 = $tmp1$$Register;
8135 Register op2 = $tmp2$$Register;
8136 Register dst = $dst$$Register;
8137 Register src = $src$$Register;
8138 int flag = $cop$$cmpcode;
8140 switch(flag)
8141 {
8142 case 0x01: //equal
8143 __ subu(AT, op1, op2);
8144 __ movz(dst, src, AT);
8145 break;
8147 case 0x02: //not_equal
8148 __ subu(AT, op1, op2);
8149 __ movn(dst, src, AT);
8150 break;
8152 case 0x03: //above
8153 __ sltu(AT, op2, op1);
8154 __ movn(dst, src, AT);
8155 break;
8157 case 0x04: //above_equal
8158 __ sltu(AT, op1, op2);
8159 __ movz(dst, src, AT);
8160 break;
8162 case 0x05: //below
8163 __ sltu(AT, op1, op2);
8164 __ movn(dst, src, AT);
8165 break;
8167 case 0x06: //below_equal
8168 __ sltu(AT, op2, op1);
8169 __ movz(dst, src, AT);
8170 break;
8172 default:
8173 Unimplemented();
8174 }
8175 %}
8177 ins_pipe( pipe_slow );
8178 %}
8180 instruct cmovI_cmpN_reg_reg(mRegI dst, mRegI src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8181 match(Set dst (CMoveI (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8182 ins_cost(80);
8183 format %{
8184 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpN_reg_reg\n\t"
8185 "CMOV $dst,$src\t @cmovI_cmpN_reg_reg"
8186 %}
8187 ins_encode %{
8188 Register op1 = $tmp1$$Register;
8189 Register op2 = $tmp2$$Register;
8190 Register dst = $dst$$Register;
8191 Register src = $src$$Register;
8192 int flag = $cop$$cmpcode;
8194 switch(flag)
8195 {
8196 case 0x01: //equal
8197 __ subu32(AT, op1, op2);
8198 __ movz(dst, src, AT);
8199 break;
8201 case 0x02: //not_equal
8202 __ subu32(AT, op1, op2);
8203 __ movn(dst, src, AT);
8204 break;
8206 case 0x03: //above
8207 __ sltu(AT, op2, op1);
8208 __ movn(dst, src, AT);
8209 break;
8211 case 0x04: //above_equal
8212 __ sltu(AT, op1, op2);
8213 __ movz(dst, src, AT);
8214 break;
8216 case 0x05: //below
8217 __ sltu(AT, op1, op2);
8218 __ movn(dst, src, AT);
8219 break;
8221 case 0x06: //below_equal
8222 __ sltu(AT, op2, op1);
8223 __ movz(dst, src, AT);
8224 break;
8226 default:
8227 Unimplemented();
8228 }
8229 %}
8231 ins_pipe( pipe_slow );
8232 %}
8234 instruct cmovP_cmpN_reg_reg(mRegP dst, mRegP src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8235 match(Set dst (CMoveP (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8236 ins_cost(80);
8237 format %{
8238 "CMPU$cop $tmp1,$tmp2\t @cmovP_cmpN_reg_reg\n\t"
8239 "CMOV $dst,$src\t @cmovP_cmpN_reg_reg"
8240 %}
8241 ins_encode %{
8242 Register op1 = $tmp1$$Register;
8243 Register op2 = $tmp2$$Register;
8244 Register dst = $dst$$Register;
8245 Register src = $src$$Register;
8246 int flag = $cop$$cmpcode;
8248 switch(flag)
8249 {
8250 case 0x01: //equal
8251 __ subu32(AT, op1, op2);
8252 __ movz(dst, src, AT);
8253 break;
8255 case 0x02: //not_equal
8256 __ subu32(AT, op1, op2);
8257 __ movn(dst, src, AT);
8258 break;
8260 case 0x03: //above
8261 __ sltu(AT, op2, op1);
8262 __ movn(dst, src, AT);
8263 break;
8265 case 0x04: //above_equal
8266 __ sltu(AT, op1, op2);
8267 __ movz(dst, src, AT);
8268 break;
8270 case 0x05: //below
8271 __ sltu(AT, op1, op2);
8272 __ movn(dst, src, AT);
8273 break;
8275 case 0x06: //below_equal
8276 __ sltu(AT, op2, op1);
8277 __ movz(dst, src, AT);
8278 break;
8280 default:
8281 Unimplemented();
8282 }
8283 %}
8285 ins_pipe( pipe_slow );
8286 %}
8288 instruct cmovN_cmpP_reg_reg(mRegN dst, mRegN src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
8289 match(Set dst (CMoveN (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
8290 ins_cost(80);
8291 format %{
8292 "CMPU$cop $tmp1,$tmp2\t @cmovN_cmpP_reg_reg\n\t"
8293 "CMOV $dst,$src\t @cmovN_cmpP_reg_reg"
8294 %}
8295 ins_encode %{
8296 Register op1 = $tmp1$$Register;
8297 Register op2 = $tmp2$$Register;
8298 Register dst = $dst$$Register;
8299 Register src = $src$$Register;
8300 int flag = $cop$$cmpcode;
8302 switch(flag)
8303 {
8304 case 0x01: //equal
8305 __ subu(AT, op1, op2);
8306 __ movz(dst, src, AT);
8307 break;
8309 case 0x02: //not_equal
8310 __ subu(AT, op1, op2);
8311 __ movn(dst, src, AT);
8312 break;
8314 case 0x03: //above
8315 __ sltu(AT, op2, op1);
8316 __ movn(dst, src, AT);
8317 break;
8319 case 0x04: //above_equal
8320 __ sltu(AT, op1, op2);
8321 __ movz(dst, src, AT);
8322 break;
8324 case 0x05: //below
8325 __ sltu(AT, op1, op2);
8326 __ movn(dst, src, AT);
8327 break;
8329 case 0x06: //below_equal
8330 __ sltu(AT, op2, op1);
8331 __ movz(dst, src, AT);
8332 break;
8334 default:
8335 Unimplemented();
8336 }
8337 %}
8339 ins_pipe( pipe_slow );
8340 %}
8342 instruct cmovP_cmpD_reg_reg(mRegP dst, mRegP src, regD tmp1, regD tmp2, cmpOp cop ) %{
8343 match(Set dst (CMoveP (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8344 ins_cost(80);
8345 format %{
8346 "CMP$cop $tmp1, $tmp2\t @cmovP_cmpD_reg_reg\n"
8347 "\tCMOV $dst,$src \t @cmovP_cmpD_reg_reg"
8348 %}
8349 ins_encode %{
8350 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8351 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8352 Register dst = as_Register($dst$$reg);
8353 Register src = as_Register($src$$reg);
8355 int flag = $cop$$cmpcode;
8357 switch(flag)
8358 {
8359 case 0x01: //equal
8360 __ c_eq_d(reg_op1, reg_op2);
8361 __ movt(dst, src);
8362 break;
8363 case 0x02: //not_equal
8364 __ c_eq_d(reg_op1, reg_op2);
8365 __ movf(dst, src);
8366 break;
8367 case 0x03: //greater
8368 __ c_ole_d(reg_op1, reg_op2);
8369 __ movf(dst, src);
8370 break;
8371 case 0x04: //greater_equal
8372 __ c_olt_d(reg_op1, reg_op2);
8373 __ movf(dst, src);
8374 break;
8375 case 0x05: //less
8376 __ c_ult_d(reg_op1, reg_op2);
8377 __ movt(dst, src);
8378 break;
8379 case 0x06: //less_equal
8380 __ c_ule_d(reg_op1, reg_op2);
8381 __ movt(dst, src);
8382 break;
8383 default:
8384 Unimplemented();
8385 }
8386 %}
8388 ins_pipe( pipe_slow );
8389 %}
8392 instruct cmovN_cmpN_reg_reg(mRegN dst, mRegN src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8393 match(Set dst (CMoveN (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8394 ins_cost(80);
8395 format %{
8396 "CMPU$cop $tmp1,$tmp2\t @cmovN_cmpN_reg_reg\n\t"
8397 "CMOV $dst,$src\t @cmovN_cmpN_reg_reg"
8398 %}
8399 ins_encode %{
8400 Register op1 = $tmp1$$Register;
8401 Register op2 = $tmp2$$Register;
8402 Register dst = $dst$$Register;
8403 Register src = $src$$Register;
8404 int flag = $cop$$cmpcode;
8406 switch(flag)
8407 {
8408 case 0x01: //equal
8409 __ subu32(AT, op1, op2);
8410 __ movz(dst, src, AT);
8411 break;
8413 case 0x02: //not_equal
8414 __ subu32(AT, op1, op2);
8415 __ movn(dst, src, AT);
8416 break;
8418 case 0x03: //above
8419 __ sltu(AT, op2, op1);
8420 __ movn(dst, src, AT);
8421 break;
8423 case 0x04: //above_equal
8424 __ sltu(AT, op1, op2);
8425 __ movz(dst, src, AT);
8426 break;
8428 case 0x05: //below
8429 __ sltu(AT, op1, op2);
8430 __ movn(dst, src, AT);
8431 break;
8433 case 0x06: //below_equal
8434 __ sltu(AT, op2, op1);
8435 __ movz(dst, src, AT);
8436 break;
8438 default:
8439 Unimplemented();
8440 }
8441 %}
8443 ins_pipe( pipe_slow );
8444 %}
8447 instruct cmovI_cmpU_reg_reg(mRegI dst, mRegI src, mRegI tmp1, mRegI tmp2, cmpOpU cop ) %{
8448 match(Set dst (CMoveI (Binary cop (CmpU tmp1 tmp2)) (Binary dst src)));
8449 ins_cost(80);
8450 format %{
8451 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpU_reg_reg\n\t"
8452 "CMOV $dst,$src\t @cmovI_cmpU_reg_reg"
8453 %}
8454 ins_encode %{
8455 Register op1 = $tmp1$$Register;
8456 Register op2 = $tmp2$$Register;
8457 Register dst = $dst$$Register;
8458 Register src = $src$$Register;
8459 int flag = $cop$$cmpcode;
8461 switch(flag)
8462 {
8463 case 0x01: //equal
8464 __ subu(AT, op1, op2);
8465 __ movz(dst, src, AT);
8466 break;
8468 case 0x02: //not_equal
8469 __ subu(AT, op1, op2);
8470 __ movn(dst, src, AT);
8471 break;
8473 case 0x03: //above
8474 __ sltu(AT, op2, op1);
8475 __ movn(dst, src, AT);
8476 break;
8478 case 0x04: //above_equal
8479 __ sltu(AT, op1, op2);
8480 __ movz(dst, src, AT);
8481 break;
8483 case 0x05: //below
8484 __ sltu(AT, op1, op2);
8485 __ movn(dst, src, AT);
8486 break;
8488 case 0x06: //below_equal
8489 __ sltu(AT, op2, op1);
8490 __ movz(dst, src, AT);
8491 break;
8493 default:
8494 Unimplemented();
8495 }
8496 %}
8498 ins_pipe( pipe_slow );
8499 %}
8501 instruct cmovI_cmpL_reg_reg(mRegI dst, mRegI src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8502 match(Set dst (CMoveI (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8503 ins_cost(80);
8504 format %{
8505 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpL_reg_reg\n"
8506 "\tCMOV $dst,$src \t @cmovI_cmpL_reg_reg"
8507 %}
8508 ins_encode %{
8509 Register opr1 = as_Register($tmp1$$reg);
8510 Register opr2 = as_Register($tmp2$$reg);
8511 Register dst = $dst$$Register;
8512 Register src = $src$$Register;
8513 int flag = $cop$$cmpcode;
8515 switch(flag)
8516 {
8517 case 0x01: //equal
8518 __ subu(AT, opr1, opr2);
8519 __ movz(dst, src, AT);
8520 break;
8522 case 0x02: //not_equal
8523 __ subu(AT, opr1, opr2);
8524 __ movn(dst, src, AT);
8525 break;
8527 case 0x03: //greater
8528 __ slt(AT, opr2, opr1);
8529 __ movn(dst, src, AT);
8530 break;
8532 case 0x04: //greater_equal
8533 __ slt(AT, opr1, opr2);
8534 __ movz(dst, src, AT);
8535 break;
8537 case 0x05: //less
8538 __ slt(AT, opr1, opr2);
8539 __ movn(dst, src, AT);
8540 break;
8542 case 0x06: //less_equal
8543 __ slt(AT, opr2, opr1);
8544 __ movz(dst, src, AT);
8545 break;
8547 default:
8548 Unimplemented();
8549 }
8550 %}
8552 ins_pipe( pipe_slow );
8553 %}
8555 instruct cmovP_cmpL_reg_reg(mRegP dst, mRegP src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8556 match(Set dst (CMoveP (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8557 ins_cost(80);
8558 format %{
8559 "CMP$cop $tmp1, $tmp2\t @cmovP_cmpL_reg_reg\n"
8560 "\tCMOV $dst,$src \t @cmovP_cmpL_reg_reg"
8561 %}
8562 ins_encode %{
8563 Register opr1 = as_Register($tmp1$$reg);
8564 Register opr2 = as_Register($tmp2$$reg);
8565 Register dst = $dst$$Register;
8566 Register src = $src$$Register;
8567 int flag = $cop$$cmpcode;
8569 switch(flag)
8570 {
8571 case 0x01: //equal
8572 __ subu(AT, opr1, opr2);
8573 __ movz(dst, src, AT);
8574 break;
8576 case 0x02: //not_equal
8577 __ subu(AT, opr1, opr2);
8578 __ movn(dst, src, AT);
8579 break;
8581 case 0x03: //greater
8582 __ slt(AT, opr2, opr1);
8583 __ movn(dst, src, AT);
8584 break;
8586 case 0x04: //greater_equal
8587 __ slt(AT, opr1, opr2);
8588 __ movz(dst, src, AT);
8589 break;
8591 case 0x05: //less
8592 __ slt(AT, opr1, opr2);
8593 __ movn(dst, src, AT);
8594 break;
8596 case 0x06: //less_equal
8597 __ slt(AT, opr2, opr1);
8598 __ movz(dst, src, AT);
8599 break;
8601 default:
8602 Unimplemented();
8603 }
8604 %}
8606 ins_pipe( pipe_slow );
8607 %}
8609 instruct cmovI_cmpD_reg_reg(mRegI dst, mRegI src, regD tmp1, regD tmp2, cmpOp cop ) %{
8610 match(Set dst (CMoveI (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8611 ins_cost(80);
8612 format %{
8613 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpD_reg_reg\n"
8614 "\tCMOV $dst,$src \t @cmovI_cmpD_reg_reg"
8615 %}
8616 ins_encode %{
8617 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8618 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8619 Register dst = as_Register($dst$$reg);
8620 Register src = as_Register($src$$reg);
8622 int flag = $cop$$cmpcode;
8624 switch(flag)
8625 {
8626 case 0x01: //equal
8627 __ c_eq_d(reg_op1, reg_op2);
8628 __ movt(dst, src);
8629 break;
8630 case 0x02: //not_equal
8631 //2016/4/19 aoqi: See instruct branchConD_reg_reg. The change in branchConD_reg_reg fixed a bug. It seems similar here, so I made thesame change.
8632 __ c_eq_d(reg_op1, reg_op2);
8633 __ movf(dst, src);
8634 break;
8635 case 0x03: //greater
8636 __ c_ole_d(reg_op1, reg_op2);
8637 __ movf(dst, src);
8638 break;
8639 case 0x04: //greater_equal
8640 __ c_olt_d(reg_op1, reg_op2);
8641 __ movf(dst, src);
8642 break;
8643 case 0x05: //less
8644 __ c_ult_d(reg_op1, reg_op2);
8645 __ movt(dst, src);
8646 break;
8647 case 0x06: //less_equal
8648 __ c_ule_d(reg_op1, reg_op2);
8649 __ movt(dst, src);
8650 break;
8651 default:
8652 Unimplemented();
8653 }
8654 %}
8656 ins_pipe( pipe_slow );
8657 %}
8660 instruct cmovP_cmpP_reg_reg(mRegP dst, mRegP src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
8661 match(Set dst (CMoveP (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
8662 ins_cost(80);
8663 format %{
8664 "CMPU$cop $tmp1,$tmp2\t @cmovP_cmpP_reg_reg\n\t"
8665 "CMOV $dst,$src\t @cmovP_cmpP_reg_reg"
8666 %}
8667 ins_encode %{
8668 Register op1 = $tmp1$$Register;
8669 Register op2 = $tmp2$$Register;
8670 Register dst = $dst$$Register;
8671 Register src = $src$$Register;
8672 int flag = $cop$$cmpcode;
8674 switch(flag)
8675 {
8676 case 0x01: //equal
8677 __ subu(AT, op1, op2);
8678 __ movz(dst, src, AT);
8679 break;
8681 case 0x02: //not_equal
8682 __ subu(AT, op1, op2);
8683 __ movn(dst, src, AT);
8684 break;
8686 case 0x03: //above
8687 __ sltu(AT, op2, op1);
8688 __ movn(dst, src, AT);
8689 break;
8691 case 0x04: //above_equal
8692 __ sltu(AT, op1, op2);
8693 __ movz(dst, src, AT);
8694 break;
8696 case 0x05: //below
8697 __ sltu(AT, op1, op2);
8698 __ movn(dst, src, AT);
8699 break;
8701 case 0x06: //below_equal
8702 __ sltu(AT, op2, op1);
8703 __ movz(dst, src, AT);
8704 break;
8706 default:
8707 Unimplemented();
8708 }
8709 %}
8711 ins_pipe( pipe_slow );
8712 %}
8714 instruct cmovP_cmpI_reg_reg(mRegP dst, mRegP src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8715 match(Set dst (CMoveP (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8716 ins_cost(80);
8717 format %{
8718 "CMP$cop $tmp1,$tmp2\t @cmovP_cmpI_reg_reg\n\t"
8719 "CMOV $dst,$src\t @cmovP_cmpI_reg_reg"
8720 %}
8721 ins_encode %{
8722 Register op1 = $tmp1$$Register;
8723 Register op2 = $tmp2$$Register;
8724 Register dst = $dst$$Register;
8725 Register src = $src$$Register;
8726 int flag = $cop$$cmpcode;
8728 switch(flag)
8729 {
8730 case 0x01: //equal
8731 __ subu32(AT, op1, op2);
8732 __ movz(dst, src, AT);
8733 break;
8735 case 0x02: //not_equal
8736 __ subu32(AT, op1, op2);
8737 __ movn(dst, src, AT);
8738 break;
8740 case 0x03: //above
8741 __ slt(AT, op2, op1);
8742 __ movn(dst, src, AT);
8743 break;
8745 case 0x04: //above_equal
8746 __ slt(AT, op1, op2);
8747 __ movz(dst, src, AT);
8748 break;
8750 case 0x05: //below
8751 __ slt(AT, op1, op2);
8752 __ movn(dst, src, AT);
8753 break;
8755 case 0x06: //below_equal
8756 __ slt(AT, op2, op1);
8757 __ movz(dst, src, AT);
8758 break;
8760 default:
8761 Unimplemented();
8762 }
8763 %}
8765 ins_pipe( pipe_slow );
8766 %}
8768 instruct cmovN_cmpI_reg_reg(mRegN dst, mRegN src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8769 match(Set dst (CMoveN (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8770 ins_cost(80);
8771 format %{
8772 "CMP$cop $tmp1,$tmp2\t @cmovN_cmpI_reg_reg\n\t"
8773 "CMOV $dst,$src\t @cmovN_cmpI_reg_reg"
8774 %}
8775 ins_encode %{
8776 Register op1 = $tmp1$$Register;
8777 Register op2 = $tmp2$$Register;
8778 Register dst = $dst$$Register;
8779 Register src = $src$$Register;
8780 int flag = $cop$$cmpcode;
8782 switch(flag)
8783 {
8784 case 0x01: //equal
8785 __ subu32(AT, op1, op2);
8786 __ movz(dst, src, AT);
8787 break;
8789 case 0x02: //not_equal
8790 __ subu32(AT, op1, op2);
8791 __ movn(dst, src, AT);
8792 break;
8794 case 0x03: //above
8795 __ slt(AT, op2, op1);
8796 __ movn(dst, src, AT);
8797 break;
8799 case 0x04: //above_equal
8800 __ slt(AT, op1, op2);
8801 __ movz(dst, src, AT);
8802 break;
8804 case 0x05: //below
8805 __ slt(AT, op1, op2);
8806 __ movn(dst, src, AT);
8807 break;
8809 case 0x06: //below_equal
8810 __ slt(AT, op2, op1);
8811 __ movz(dst, src, AT);
8812 break;
8814 default:
8815 Unimplemented();
8816 }
8817 %}
8819 ins_pipe( pipe_slow );
8820 %}
8823 instruct cmovL_cmpI_reg_reg(mRegL dst, mRegL src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8824 match(Set dst (CMoveL (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8825 ins_cost(80);
8826 format %{
8827 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpI_reg_reg\n"
8828 "\tCMOV $dst,$src \t @cmovL_cmpI_reg_reg"
8829 %}
8831 ins_encode %{
8832 Register op1 = $tmp1$$Register;
8833 Register op2 = $tmp2$$Register;
8834 Register dst = as_Register($dst$$reg);
8835 Register src = as_Register($src$$reg);
8836 int flag = $cop$$cmpcode;
8838 switch(flag)
8839 {
8840 case 0x01: //equal
8841 __ subu32(AT, op1, op2);
8842 __ movz(dst, src, AT);
8843 break;
8845 case 0x02: //not_equal
8846 __ subu32(AT, op1, op2);
8847 __ movn(dst, src, AT);
8848 break;
8850 case 0x03: //great
8851 __ slt(AT, op2, op1);
8852 __ movn(dst, src, AT);
8853 break;
8855 case 0x04: //great_equal
8856 __ slt(AT, op1, op2);
8857 __ movz(dst, src, AT);
8858 break;
8860 case 0x05: //less
8861 __ slt(AT, op1, op2);
8862 __ movn(dst, src, AT);
8863 break;
8865 case 0x06: //less_equal
8866 __ slt(AT, op2, op1);
8867 __ movz(dst, src, AT);
8868 break;
8870 default:
8871 Unimplemented();
8872 }
8873 %}
8875 ins_pipe( pipe_slow );
8876 %}
8878 instruct cmovL_cmpL_reg_reg(mRegL dst, mRegL src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8879 match(Set dst (CMoveL (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8880 ins_cost(80);
8881 format %{
8882 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpL_reg_reg\n"
8883 "\tCMOV $dst,$src \t @cmovL_cmpL_reg_reg"
8884 %}
8885 ins_encode %{
8886 Register opr1 = as_Register($tmp1$$reg);
8887 Register opr2 = as_Register($tmp2$$reg);
8888 Register dst = as_Register($dst$$reg);
8889 Register src = as_Register($src$$reg);
8890 int flag = $cop$$cmpcode;
8892 switch(flag)
8893 {
8894 case 0x01: //equal
8895 __ subu(AT, opr1, opr2);
8896 __ movz(dst, src, AT);
8897 break;
8899 case 0x02: //not_equal
8900 __ subu(AT, opr1, opr2);
8901 __ movn(dst, src, AT);
8902 break;
8904 case 0x03: //greater
8905 __ slt(AT, opr2, opr1);
8906 __ movn(dst, src, AT);
8907 break;
8909 case 0x04: //greater_equal
8910 __ slt(AT, opr1, opr2);
8911 __ movz(dst, src, AT);
8912 break;
8914 case 0x05: //less
8915 __ slt(AT, opr1, opr2);
8916 __ movn(dst, src, AT);
8917 break;
8919 case 0x06: //less_equal
8920 __ slt(AT, opr2, opr1);
8921 __ movz(dst, src, AT);
8922 break;
8924 default:
8925 Unimplemented();
8926 }
8927 %}
8929 ins_pipe( pipe_slow );
8930 %}
8932 instruct cmovL_cmpN_reg_reg(mRegL dst, mRegL src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8933 match(Set dst (CMoveL (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8934 ins_cost(80);
8935 format %{
8936 "CMPU$cop $tmp1,$tmp2\t @cmovL_cmpN_reg_reg\n\t"
8937 "CMOV $dst,$src\t @cmovL_cmpN_reg_reg"
8938 %}
8939 ins_encode %{
8940 Register op1 = $tmp1$$Register;
8941 Register op2 = $tmp2$$Register;
8942 Register dst = $dst$$Register;
8943 Register src = $src$$Register;
8944 int flag = $cop$$cmpcode;
8946 switch(flag)
8947 {
8948 case 0x01: //equal
8949 __ subu32(AT, op1, op2);
8950 __ movz(dst, src, AT);
8951 break;
8953 case 0x02: //not_equal
8954 __ subu32(AT, op1, op2);
8955 __ movn(dst, src, AT);
8956 break;
8958 case 0x03: //above
8959 __ sltu(AT, op2, op1);
8960 __ movn(dst, src, AT);
8961 break;
8963 case 0x04: //above_equal
8964 __ sltu(AT, op1, op2);
8965 __ movz(dst, src, AT);
8966 break;
8968 case 0x05: //below
8969 __ sltu(AT, op1, op2);
8970 __ movn(dst, src, AT);
8971 break;
8973 case 0x06: //below_equal
8974 __ sltu(AT, op2, op1);
8975 __ movz(dst, src, AT);
8976 break;
8978 default:
8979 Unimplemented();
8980 }
8981 %}
8983 ins_pipe( pipe_slow );
8984 %}
8987 instruct cmovL_cmpD_reg_reg(mRegL dst, mRegL src, regD tmp1, regD tmp2, cmpOp cop ) %{
8988 match(Set dst (CMoveL (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8989 ins_cost(80);
8990 format %{
8991 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpD_reg_reg\n"
8992 "\tCMOV $dst,$src \t @cmovL_cmpD_reg_reg"
8993 %}
8994 ins_encode %{
8995 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8996 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8997 Register dst = as_Register($dst$$reg);
8998 Register src = as_Register($src$$reg);
9000 int flag = $cop$$cmpcode;
9002 switch(flag)
9003 {
9004 case 0x01: //equal
9005 __ c_eq_d(reg_op1, reg_op2);
9006 __ movt(dst, src);
9007 break;
9008 case 0x02: //not_equal
9009 __ c_eq_d(reg_op1, reg_op2);
9010 __ movf(dst, src);
9011 break;
9012 case 0x03: //greater
9013 __ c_ole_d(reg_op1, reg_op2);
9014 __ movf(dst, src);
9015 break;
9016 case 0x04: //greater_equal
9017 __ c_olt_d(reg_op1, reg_op2);
9018 __ movf(dst, src);
9019 break;
9020 case 0x05: //less
9021 __ c_ult_d(reg_op1, reg_op2);
9022 __ movt(dst, src);
9023 break;
9024 case 0x06: //less_equal
9025 __ c_ule_d(reg_op1, reg_op2);
9026 __ movt(dst, src);
9027 break;
9028 default:
9029 Unimplemented();
9030 }
9031 %}
9033 ins_pipe( pipe_slow );
9034 %}
9036 instruct cmovD_cmpD_reg_reg(regD dst, regD src, regD tmp1, regD tmp2, cmpOp cop ) %{
9037 match(Set dst (CMoveD (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
9038 ins_cost(200);
9039 format %{
9040 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpD_reg_reg\n"
9041 "\tCMOV $dst,$src \t @cmovD_cmpD_reg_reg"
9042 %}
9043 ins_encode %{
9044 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
9045 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
9046 FloatRegister dst = as_FloatRegister($dst$$reg);
9047 FloatRegister src = as_FloatRegister($src$$reg);
9049 int flag = $cop$$cmpcode;
9051 Label L;
9053 switch(flag)
9054 {
9055 case 0x01: //equal
9056 __ c_eq_d(reg_op1, reg_op2);
9057 __ bc1f(L);
9058 __ nop();
9059 __ mov_d(dst, src);
9060 __ bind(L);
9061 break;
9062 case 0x02: //not_equal
9063 //2016/4/19 aoqi: See instruct branchConD_reg_reg. The change in branchConD_reg_reg fixed a bug. It seems similar here, so I made thesame change.
9064 __ c_eq_d(reg_op1, reg_op2);
9065 __ bc1t(L);
9066 __ nop();
9067 __ mov_d(dst, src);
9068 __ bind(L);
9069 break;
9070 case 0x03: //greater
9071 __ c_ole_d(reg_op1, reg_op2);
9072 __ bc1t(L);
9073 __ nop();
9074 __ mov_d(dst, src);
9075 __ bind(L);
9076 break;
9077 case 0x04: //greater_equal
9078 __ c_olt_d(reg_op1, reg_op2);
9079 __ bc1t(L);
9080 __ nop();
9081 __ mov_d(dst, src);
9082 __ bind(L);
9083 break;
9084 case 0x05: //less
9085 __ c_ult_d(reg_op1, reg_op2);
9086 __ bc1f(L);
9087 __ nop();
9088 __ mov_d(dst, src);
9089 __ bind(L);
9090 break;
9091 case 0x06: //less_equal
9092 __ c_ule_d(reg_op1, reg_op2);
9093 __ bc1f(L);
9094 __ nop();
9095 __ mov_d(dst, src);
9096 __ bind(L);
9097 break;
9098 default:
9099 Unimplemented();
9100 }
9101 %}
9103 ins_pipe( pipe_slow );
9104 %}
9106 instruct cmovF_cmpI_reg_reg(regF dst, regF src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
9107 match(Set dst (CMoveF (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
9108 ins_cost(200);
9109 format %{
9110 "CMP$cop $tmp1, $tmp2\t @cmovF_cmpI_reg_reg\n"
9111 "\tCMOV $dst, $src \t @cmovF_cmpI_reg_reg"
9112 %}
9114 ins_encode %{
9115 Register op1 = $tmp1$$Register;
9116 Register op2 = $tmp2$$Register;
9117 FloatRegister dst = as_FloatRegister($dst$$reg);
9118 FloatRegister src = as_FloatRegister($src$$reg);
9119 int flag = $cop$$cmpcode;
9120 Label L;
9122 switch(flag)
9123 {
9124 case 0x01: //equal
9125 __ bne(op1, op2, L);
9126 __ nop();
9127 __ mov_s(dst, src);
9128 __ bind(L);
9129 break;
9130 case 0x02: //not_equal
9131 __ beq(op1, op2, L);
9132 __ nop();
9133 __ mov_s(dst, src);
9134 __ bind(L);
9135 break;
9136 case 0x03: //great
9137 __ slt(AT, op2, op1);
9138 __ beq(AT, R0, L);
9139 __ nop();
9140 __ mov_s(dst, src);
9141 __ bind(L);
9142 break;
9143 case 0x04: //great_equal
9144 __ slt(AT, op1, op2);
9145 __ bne(AT, R0, L);
9146 __ nop();
9147 __ mov_s(dst, src);
9148 __ bind(L);
9149 break;
9150 case 0x05: //less
9151 __ slt(AT, op1, op2);
9152 __ beq(AT, R0, L);
9153 __ nop();
9154 __ mov_s(dst, src);
9155 __ bind(L);
9156 break;
9157 case 0x06: //less_equal
9158 __ slt(AT, op2, op1);
9159 __ bne(AT, R0, L);
9160 __ nop();
9161 __ mov_s(dst, src);
9162 __ bind(L);
9163 break;
9164 default:
9165 Unimplemented();
9166 }
9167 %}
9169 ins_pipe( pipe_slow );
9170 %}
9172 instruct cmovD_cmpI_reg_reg(regD dst, regD src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
9173 match(Set dst (CMoveD (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
9174 ins_cost(200);
9175 format %{
9176 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpI_reg_reg\n"
9177 "\tCMOV $dst, $src \t @cmovD_cmpI_reg_reg"
9178 %}
9180 ins_encode %{
9181 Register op1 = $tmp1$$Register;
9182 Register op2 = $tmp2$$Register;
9183 FloatRegister dst = as_FloatRegister($dst$$reg);
9184 FloatRegister src = as_FloatRegister($src$$reg);
9185 int flag = $cop$$cmpcode;
9186 Label L;
9188 switch(flag)
9189 {
9190 case 0x01: //equal
9191 __ bne(op1, op2, L);
9192 __ nop();
9193 __ mov_d(dst, src);
9194 __ bind(L);
9195 break;
9196 case 0x02: //not_equal
9197 __ beq(op1, op2, L);
9198 __ nop();
9199 __ mov_d(dst, src);
9200 __ bind(L);
9201 break;
9202 case 0x03: //great
9203 __ slt(AT, op2, op1);
9204 __ beq(AT, R0, L);
9205 __ nop();
9206 __ mov_d(dst, src);
9207 __ bind(L);
9208 break;
9209 case 0x04: //great_equal
9210 __ slt(AT, op1, op2);
9211 __ bne(AT, R0, L);
9212 __ nop();
9213 __ mov_d(dst, src);
9214 __ bind(L);
9215 break;
9216 case 0x05: //less
9217 __ slt(AT, op1, op2);
9218 __ beq(AT, R0, L);
9219 __ nop();
9220 __ mov_d(dst, src);
9221 __ bind(L);
9222 break;
9223 case 0x06: //less_equal
9224 __ slt(AT, op2, op1);
9225 __ bne(AT, R0, L);
9226 __ nop();
9227 __ mov_d(dst, src);
9228 __ bind(L);
9229 break;
9230 default:
9231 Unimplemented();
9232 }
9233 %}
9235 ins_pipe( pipe_slow );
9236 %}
9238 instruct cmovD_cmpP_reg_reg(regD dst, regD src, mRegP tmp1, mRegP tmp2, cmpOp cop ) %{
9239 match(Set dst (CMoveD (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
9240 ins_cost(200);
9241 format %{
9242 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpP_reg_reg\n"
9243 "\tCMOV $dst, $src \t @cmovD_cmpP_reg_reg"
9244 %}
9246 ins_encode %{
9247 Register op1 = $tmp1$$Register;
9248 Register op2 = $tmp2$$Register;
9249 FloatRegister dst = as_FloatRegister($dst$$reg);
9250 FloatRegister src = as_FloatRegister($src$$reg);
9251 int flag = $cop$$cmpcode;
9252 Label L;
9254 switch(flag)
9255 {
9256 case 0x01: //equal
9257 __ bne(op1, op2, L);
9258 __ nop();
9259 __ mov_d(dst, src);
9260 __ bind(L);
9261 break;
9262 case 0x02: //not_equal
9263 __ beq(op1, op2, L);
9264 __ nop();
9265 __ mov_d(dst, src);
9266 __ bind(L);
9267 break;
9268 case 0x03: //great
9269 __ slt(AT, op2, op1);
9270 __ beq(AT, R0, L);
9271 __ nop();
9272 __ mov_d(dst, src);
9273 __ bind(L);
9274 break;
9275 case 0x04: //great_equal
9276 __ slt(AT, op1, op2);
9277 __ bne(AT, R0, L);
9278 __ nop();
9279 __ mov_d(dst, src);
9280 __ bind(L);
9281 break;
9282 case 0x05: //less
9283 __ slt(AT, op1, op2);
9284 __ beq(AT, R0, L);
9285 __ nop();
9286 __ mov_d(dst, src);
9287 __ bind(L);
9288 break;
9289 case 0x06: //less_equal
9290 __ slt(AT, op2, op1);
9291 __ bne(AT, R0, L);
9292 __ nop();
9293 __ mov_d(dst, src);
9294 __ bind(L);
9295 break;
9296 default:
9297 Unimplemented();
9298 }
9299 %}
9301 ins_pipe( pipe_slow );
9302 %}
9304 //FIXME
9305 instruct cmovI_cmpF_reg_reg(mRegI dst, mRegI src, regF tmp1, regF tmp2, cmpOp cop ) %{
9306 match(Set dst (CMoveI (Binary cop (CmpF tmp1 tmp2)) (Binary dst src)));
9307 ins_cost(80);
9308 format %{
9309 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpF_reg_reg\n"
9310 "\tCMOV $dst,$src \t @cmovI_cmpF_reg_reg"
9311 %}
9313 ins_encode %{
9314 FloatRegister reg_op1 = $tmp1$$FloatRegister;
9315 FloatRegister reg_op2 = $tmp2$$FloatRegister;
9316 Register dst = $dst$$Register;
9317 Register src = $src$$Register;
9318 int flag = $cop$$cmpcode;
9320 switch(flag)
9321 {
9322 case 0x01: //equal
9323 __ c_eq_s(reg_op1, reg_op2);
9324 __ movt(dst, src);
9325 break;
9326 case 0x02: //not_equal
9327 __ c_eq_s(reg_op1, reg_op2);
9328 __ movf(dst, src);
9329 break;
9330 case 0x03: //greater
9331 __ c_ole_s(reg_op1, reg_op2);
9332 __ movf(dst, src);
9333 break;
9334 case 0x04: //greater_equal
9335 __ c_olt_s(reg_op1, reg_op2);
9336 __ movf(dst, src);
9337 break;
9338 case 0x05: //less
9339 __ c_ult_s(reg_op1, reg_op2);
9340 __ movt(dst, src);
9341 break;
9342 case 0x06: //less_equal
9343 __ c_ule_s(reg_op1, reg_op2);
9344 __ movt(dst, src);
9345 break;
9346 default:
9347 Unimplemented();
9348 }
9349 %}
9350 ins_pipe( pipe_slow );
9351 %}
9353 instruct cmovF_cmpF_reg_reg(regF dst, regF src, regF tmp1, regF tmp2, cmpOp cop ) %{
9354 match(Set dst (CMoveF (Binary cop (CmpF tmp1 tmp2)) (Binary dst src)));
9355 ins_cost(200);
9356 format %{
9357 "CMP$cop $tmp1, $tmp2\t @cmovF_cmpF_reg_reg\n"
9358 "\tCMOV $dst,$src \t @cmovF_cmpF_reg_reg"
9359 %}
9361 ins_encode %{
9362 FloatRegister reg_op1 = $tmp1$$FloatRegister;
9363 FloatRegister reg_op2 = $tmp2$$FloatRegister;
9364 FloatRegister dst = $dst$$FloatRegister;
9365 FloatRegister src = $src$$FloatRegister;
9366 Label L;
9367 int flag = $cop$$cmpcode;
9369 switch(flag)
9370 {
9371 case 0x01: //equal
9372 __ c_eq_s(reg_op1, reg_op2);
9373 __ bc1f(L);
9374 __ nop();
9375 __ mov_s(dst, src);
9376 __ bind(L);
9377 break;
9378 case 0x02: //not_equal
9379 __ c_eq_s(reg_op1, reg_op2);
9380 __ bc1t(L);
9381 __ nop();
9382 __ mov_s(dst, src);
9383 __ bind(L);
9384 break;
9385 case 0x03: //greater
9386 __ c_ole_s(reg_op1, reg_op2);
9387 __ bc1t(L);
9388 __ nop();
9389 __ mov_s(dst, src);
9390 __ bind(L);
9391 break;
9392 case 0x04: //greater_equal
9393 __ c_olt_s(reg_op1, reg_op2);
9394 __ bc1t(L);
9395 __ nop();
9396 __ mov_s(dst, src);
9397 __ bind(L);
9398 break;
9399 case 0x05: //less
9400 __ c_ult_s(reg_op1, reg_op2);
9401 __ bc1f(L);
9402 __ nop();
9403 __ mov_s(dst, src);
9404 __ bind(L);
9405 break;
9406 case 0x06: //less_equal
9407 __ c_ule_s(reg_op1, reg_op2);
9408 __ bc1f(L);
9409 __ nop();
9410 __ mov_s(dst, src);
9411 __ bind(L);
9412 break;
9413 default:
9414 Unimplemented();
9415 }
9416 %}
9417 ins_pipe( pipe_slow );
9418 %}
9420 // Manifest a CmpL result in an integer register. Very painful.
9421 // This is the test to avoid.
9422 instruct cmpL3_reg_reg(mRegI dst, mRegL src1, mRegL src2) %{
9423 match(Set dst (CmpL3 src1 src2));
9424 ins_cost(1000);
9425 format %{ "cmpL3 $dst, $src1, $src2 @ cmpL3_reg_reg" %}
9426 ins_encode %{
9427 Register opr1 = as_Register($src1$$reg);
9428 Register opr2 = as_Register($src2$$reg);
9429 Register dst = as_Register($dst$$reg);
9431 Label Done;
9433 __ subu(AT, opr1, opr2);
9434 __ bltz(AT, Done);
9435 __ delayed()->daddiu(dst, R0, -1);
9437 __ move(dst, 1);
9438 __ movz(dst, R0, AT);
9440 __ bind(Done);
9441 %}
9442 ins_pipe( pipe_slow );
9443 %}
9445 //
9446 // less_rsult = -1
9447 // greater_result = 1
9448 // equal_result = 0
9449 // nan_result = -1
9450 //
9451 instruct cmpF3_reg_reg(mRegI dst, regF src1, regF src2) %{
9452 match(Set dst (CmpF3 src1 src2));
9453 ins_cost(1000);
9454 format %{ "cmpF3 $dst, $src1, $src2 @ cmpF3_reg_reg" %}
9455 ins_encode %{
9456 FloatRegister src1 = as_FloatRegister($src1$$reg);
9457 FloatRegister src2 = as_FloatRegister($src2$$reg);
9458 Register dst = as_Register($dst$$reg);
9460 Label Done;
9462 __ c_ult_s(src1, src2);
9463 __ bc1t(Done);
9464 __ delayed()->daddiu(dst, R0, -1);
9466 __ c_eq_s(src1, src2);
9467 __ move(dst, 1);
9468 __ movt(dst, R0);
9470 __ bind(Done);
9471 %}
9472 ins_pipe( pipe_slow );
9473 %}
9475 instruct cmpD3_reg_reg(mRegI dst, regD src1, regD src2) %{
9476 match(Set dst (CmpD3 src1 src2));
9477 ins_cost(1000);
9478 format %{ "cmpD3 $dst, $src1, $src2 @ cmpD3_reg_reg" %}
9479 ins_encode %{
9480 FloatRegister src1 = as_FloatRegister($src1$$reg);
9481 FloatRegister src2 = as_FloatRegister($src2$$reg);
9482 Register dst = as_Register($dst$$reg);
9484 Label Done;
9486 __ c_ult_d(src1, src2);
9487 __ bc1t(Done);
9488 __ delayed()->daddiu(dst, R0, -1);
9490 __ c_eq_d(src1, src2);
9491 __ move(dst, 1);
9492 __ movt(dst, R0);
9494 __ bind(Done);
9495 %}
9496 ins_pipe( pipe_slow );
9497 %}
9499 instruct clear_array(mRegL cnt, mRegP base, Universe dummy) %{
9500 match(Set dummy (ClearArray cnt base));
9501 format %{ "CLEAR_ARRAY base = $base, cnt = $cnt # Clear doublewords" %}
9502 ins_encode %{
9503 //Assume cnt is the number of bytes in an array to be cleared,
9504 //and base points to the starting address of the array.
9505 Register base = $base$$Register;
9506 Register num = $cnt$$Register;
9507 Label Loop, done;
9509 /* 2012/9/21 Jin: according to X86, $cnt is caculated by doublewords(8 bytes) */
9510 __ move(T9, num); /* T9 = words */
9511 __ beq(T9, R0, done);
9512 __ nop();
9513 __ move(AT, base);
9515 __ bind(Loop);
9516 __ sd(R0, Address(AT, 0));
9517 __ daddi(AT, AT, wordSize);
9518 __ daddi(T9, T9, -1);
9519 __ bne(T9, R0, Loop);
9520 __ delayed()->nop();
9521 __ bind(done);
9522 %}
9523 ins_pipe( pipe_slow );
9524 %}
9526 instruct string_compare(a4_RegP str1, mA5RegI cnt1, a6_RegP str2, mA7RegI cnt2, no_Ax_mRegI result) %{
9527 match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
9528 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2);
9530 format %{ "String Compare $str1[len: $cnt1], $str2[len: $cnt2] -> $result @ string_compare" %}
9531 ins_encode %{
9532 // Get the first character position in both strings
9533 // [8] char array, [12] offset, [16] count
9534 Register str1 = $str1$$Register;
9535 Register str2 = $str2$$Register;
9536 Register cnt1 = $cnt1$$Register;
9537 Register cnt2 = $cnt2$$Register;
9538 Register result = $result$$Register;
9540 Label L, Loop, haveResult, done;
9542 // compute the and difference of lengths (in result)
9543 __ subu(result, cnt1, cnt2); // result holds the difference of two lengths
9545 // compute the shorter length (in cnt1)
9546 __ slt(AT, cnt2, cnt1);
9547 __ movn(cnt1, cnt2, AT);
9549 // Now the shorter length is in cnt1 and cnt2 can be used as a tmp register
9550 __ bind(Loop); // Loop begin
9551 __ beq(cnt1, R0, done);
9552 __ delayed()->lhu(AT, str1, 0);;
9554 // compare current character
9555 __ lhu(cnt2, str2, 0);
9556 __ bne(AT, cnt2, haveResult);
9557 __ delayed()->addi(str1, str1, 2);
9558 __ addi(str2, str2, 2);
9559 __ b(Loop);
9560 __ delayed()->addi(cnt1, cnt1, -1); // Loop end
9562 __ bind(haveResult);
9563 __ subu(result, AT, cnt2);
9565 __ bind(done);
9566 %}
9568 ins_pipe( pipe_slow );
9569 %}
9571 // intrinsic optimization
9572 instruct string_equals(a4_RegP str1, a5_RegP str2, mA6RegI cnt, mA7RegI temp, no_Ax_mRegI result) %{
9573 match(Set result (StrEquals (Binary str1 str2) cnt));
9574 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL temp);
9576 format %{ "String Equal $str1, $str2, len:$cnt tmp:$temp -> $result @ string_equals" %}
9577 ins_encode %{
9578 // Get the first character position in both strings
9579 // [8] char array, [12] offset, [16] count
9580 Register str1 = $str1$$Register;
9581 Register str2 = $str2$$Register;
9582 Register cnt = $cnt$$Register;
9583 Register tmp = $temp$$Register;
9584 Register result = $result$$Register;
9586 Label Loop, done;
9589 __ beq(str1, str2, done); // same char[] ?
9590 __ daddiu(result, R0, 1);
9592 __ bind(Loop); // Loop begin
9593 __ beq(cnt, R0, done);
9594 __ daddiu(result, R0, 1); // count == 0
9596 // compare current character
9597 __ lhu(AT, str1, 0);;
9598 __ lhu(tmp, str2, 0);
9599 __ bne(AT, tmp, done);
9600 __ delayed()->daddi(result, R0, 0);
9601 __ addi(str1, str1, 2);
9602 __ addi(str2, str2, 2);
9603 __ b(Loop);
9604 __ delayed()->addi(cnt, cnt, -1); // Loop end
9606 __ bind(done);
9607 %}
9609 ins_pipe( pipe_slow );
9610 %}
9612 //----------Arithmetic Instructions-------------------------------------------
9613 //----------Addition Instructions---------------------------------------------
9614 instruct addI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9615 match(Set dst (AddI src1 src2));
9617 format %{ "add $dst, $src1, $src2 #@addI_Reg_Reg" %}
9618 ins_encode %{
9619 Register dst = $dst$$Register;
9620 Register src1 = $src1$$Register;
9621 Register src2 = $src2$$Register;
9622 __ addu32(dst, src1, src2);
9623 %}
9624 ins_pipe( ialu_regI_regI );
9625 %}
9627 instruct addI_Reg_imm(mRegI dst, mRegI src1, immI src2) %{
9628 match(Set dst (AddI src1 src2));
9630 format %{ "add $dst, $src1, $src2 #@addI_Reg_imm" %}
9631 ins_encode %{
9632 Register dst = $dst$$Register;
9633 Register src1 = $src1$$Register;
9634 int imm = $src2$$constant;
9636 if(Assembler::is_simm16(imm)) {
9637 __ addiu32(dst, src1, imm);
9638 } else {
9639 __ move(AT, imm);
9640 __ addu32(dst, src1, AT);
9641 }
9642 %}
9643 ins_pipe( ialu_regI_regI );
9644 %}
9646 instruct addP_reg_reg(mRegP dst, mRegP src1, mRegL src2) %{
9647 match(Set dst (AddP src1 src2));
9649 format %{ "dadd $dst, $src1, $src2 #@addP_reg_reg" %}
9651 ins_encode %{
9652 Register dst = $dst$$Register;
9653 Register src1 = $src1$$Register;
9654 Register src2 = $src2$$Register;
9655 __ daddu(dst, src1, src2);
9656 %}
9658 ins_pipe( ialu_regI_regI );
9659 %}
9661 instruct addP_reg_reg_convI2L(mRegP dst, mRegP src1, mRegI src2) %{
9662 match(Set dst (AddP src1 (ConvI2L src2)));
9664 format %{ "dadd $dst, $src1, $src2 #@addP_reg_reg_convI2L" %}
9666 ins_encode %{
9667 Register dst = $dst$$Register;
9668 Register src1 = $src1$$Register;
9669 Register src2 = $src2$$Register;
9670 __ daddu(dst, src1, src2);
9671 %}
9673 ins_pipe( ialu_regI_regI );
9674 %}
9676 instruct addP_reg_imm(mRegP dst, mRegP src1, immL src2) %{
9677 match(Set dst (AddP src1 src2));
9679 format %{ "daddi $dst, $src1, $src2 #@addP_reg_imm" %}
9680 ins_encode %{
9681 Register src1 = $src1$$Register;
9682 long src2 = $src2$$constant;
9683 Register dst = $dst$$Register;
9685 if(Assembler::is_simm16(src2)) {
9686 __ daddiu(dst, src1, src2);
9687 } else {
9688 __ set64(AT, src2);
9689 __ daddu(dst, src1, AT);
9690 }
9691 %}
9692 ins_pipe( ialu_regI_imm16 );
9693 %}
9695 // Add Long Register with Register
9696 instruct addL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
9697 match(Set dst (AddL src1 src2));
9698 ins_cost(200);
9699 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_Reg\t" %}
9701 ins_encode %{
9702 Register dst_reg = as_Register($dst$$reg);
9703 Register src1_reg = as_Register($src1$$reg);
9704 Register src2_reg = as_Register($src2$$reg);
9706 __ daddu(dst_reg, src1_reg, src2_reg);
9707 %}
9709 ins_pipe( ialu_regL_regL );
9710 %}
9712 instruct addL_Reg_imm(mRegL dst, mRegL src1, immL16 src2)
9713 %{
9714 match(Set dst (AddL src1 src2));
9716 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_imm " %}
9717 ins_encode %{
9718 Register dst_reg = as_Register($dst$$reg);
9719 Register src1_reg = as_Register($src1$$reg);
9720 int src2_imm = $src2$$constant;
9722 __ daddiu(dst_reg, src1_reg, src2_imm);
9723 %}
9725 ins_pipe( ialu_regL_regL );
9726 %}
9728 instruct addL_RegI2L_imm(mRegL dst, mRegI src1, immL16 src2)
9729 %{
9730 match(Set dst (AddL (ConvI2L src1) src2));
9732 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_imm " %}
9733 ins_encode %{
9734 Register dst_reg = as_Register($dst$$reg);
9735 Register src1_reg = as_Register($src1$$reg);
9736 int src2_imm = $src2$$constant;
9738 __ daddiu(dst_reg, src1_reg, src2_imm);
9739 %}
9741 ins_pipe( ialu_regL_regL );
9742 %}
9744 instruct addL_RegI2L_Reg(mRegL dst, mRegI src1, mRegL src2) %{
9745 match(Set dst (AddL (ConvI2L src1) src2));
9746 ins_cost(200);
9747 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_Reg\t" %}
9749 ins_encode %{
9750 Register dst_reg = as_Register($dst$$reg);
9751 Register src1_reg = as_Register($src1$$reg);
9752 Register src2_reg = as_Register($src2$$reg);
9754 __ daddu(dst_reg, src1_reg, src2_reg);
9755 %}
9757 ins_pipe( ialu_regL_regL );
9758 %}
9760 instruct addL_RegI2L_RegI2L(mRegL dst, mRegI src1, mRegI src2) %{
9761 match(Set dst (AddL (ConvI2L src1) (ConvI2L src2)));
9762 ins_cost(200);
9763 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_RegI2L\t" %}
9765 ins_encode %{
9766 Register dst_reg = as_Register($dst$$reg);
9767 Register src1_reg = as_Register($src1$$reg);
9768 Register src2_reg = as_Register($src2$$reg);
9770 __ daddu(dst_reg, src1_reg, src2_reg);
9771 %}
9773 ins_pipe( ialu_regL_regL );
9774 %}
9776 instruct addL_Reg_RegI2L(mRegL dst, mRegL src1, mRegI src2) %{
9777 match(Set dst (AddL src1 (ConvI2L src2)));
9778 ins_cost(200);
9779 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_RegI2L\t" %}
9781 ins_encode %{
9782 Register dst_reg = as_Register($dst$$reg);
9783 Register src1_reg = as_Register($src1$$reg);
9784 Register src2_reg = as_Register($src2$$reg);
9786 __ daddu(dst_reg, src1_reg, src2_reg);
9787 %}
9789 ins_pipe( ialu_regL_regL );
9790 %}
9792 //----------Subtraction Instructions-------------------------------------------
9793 // Integer Subtraction Instructions
9794 instruct subI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9795 match(Set dst (SubI src1 src2));
9796 ins_cost(100);
9798 format %{ "sub $dst, $src1, $src2 #@subI_Reg_Reg" %}
9799 ins_encode %{
9800 Register dst = $dst$$Register;
9801 Register src1 = $src1$$Register;
9802 Register src2 = $src2$$Register;
9803 __ subu32(dst, src1, src2);
9804 %}
9805 ins_pipe( ialu_regI_regI );
9806 %}
9808 instruct subI_Reg_immI16_sub(mRegI dst, mRegI src1, immI16_sub src2) %{
9809 match(Set dst (SubI src1 src2));
9810 ins_cost(80);
9812 format %{ "sub $dst, $src1, $src2 #@subI_Reg_immI16_sub" %}
9813 ins_encode %{
9814 Register dst = $dst$$Register;
9815 Register src1 = $src1$$Register;
9816 __ addiu32(dst, src1, -1 * $src2$$constant);
9817 %}
9818 ins_pipe( ialu_regI_regI );
9819 %}
9821 instruct negI_Reg(mRegI dst, immI0 zero, mRegI src) %{
9822 match(Set dst (SubI zero src));
9823 ins_cost(80);
9825 format %{ "neg $dst, $src #@negI_Reg" %}
9826 ins_encode %{
9827 Register dst = $dst$$Register;
9828 Register src = $src$$Register;
9829 __ subu32(dst, R0, src);
9830 %}
9831 ins_pipe( ialu_regI_regI );
9832 %}
9834 instruct negL_Reg(mRegL dst, immL0 zero, mRegL src) %{
9835 match(Set dst (SubL zero src));
9836 ins_cost(80);
9838 format %{ "neg $dst, $src #@negL_Reg" %}
9839 ins_encode %{
9840 Register dst = $dst$$Register;
9841 Register src = $src$$Register;
9842 __ subu(dst, R0, src);
9843 %}
9844 ins_pipe( ialu_regI_regI );
9845 %}
9847 instruct subL_Reg_immL16_sub(mRegL dst, mRegL src1, immL16_sub src2) %{
9848 match(Set dst (SubL src1 src2));
9849 ins_cost(80);
9851 format %{ "sub $dst, $src1, $src2 #@subL_Reg_immL16_sub" %}
9852 ins_encode %{
9853 Register dst = $dst$$Register;
9854 Register src1 = $src1$$Register;
9855 __ daddiu(dst, src1, -1 * $src2$$constant);
9856 %}
9857 ins_pipe( ialu_regI_regI );
9858 %}
9860 // Subtract Long Register with Register.
9861 instruct subL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
9862 match(Set dst (SubL src1 src2));
9863 ins_cost(100);
9864 format %{ "SubL $dst, $src1, $src2 @ subL_Reg_Reg" %}
9865 ins_encode %{
9866 Register dst = as_Register($dst$$reg);
9867 Register src1 = as_Register($src1$$reg);
9868 Register src2 = as_Register($src2$$reg);
9870 __ subu(dst, src1, src2);
9871 %}
9872 ins_pipe( ialu_regL_regL );
9873 %}
9875 instruct subL_Reg_RegI2L(mRegL dst, mRegL src1, mRegI src2) %{
9876 match(Set dst (SubL src1 (ConvI2L src2)));
9877 ins_cost(100);
9878 format %{ "SubL $dst, $src1, $src2 @ subL_Reg_RegI2L" %}
9879 ins_encode %{
9880 Register dst = as_Register($dst$$reg);
9881 Register src1 = as_Register($src1$$reg);
9882 Register src2 = as_Register($src2$$reg);
9884 __ subu(dst, src1, src2);
9885 %}
9886 ins_pipe( ialu_regL_regL );
9887 %}
9889 instruct subL_RegI2L_Reg(mRegL dst, mRegI src1, mRegL src2) %{
9890 match(Set dst (SubL (ConvI2L src1) src2));
9891 ins_cost(200);
9892 format %{ "SubL $dst, $src1, $src2 @ subL_RegI2L_Reg" %}
9893 ins_encode %{
9894 Register dst = as_Register($dst$$reg);
9895 Register src1 = as_Register($src1$$reg);
9896 Register src2 = as_Register($src2$$reg);
9898 __ subu(dst, src1, src2);
9899 %}
9900 ins_pipe( ialu_regL_regL );
9901 %}
9903 instruct subL_RegI2L_RegI2L(mRegL dst, mRegI src1, mRegI src2) %{
9904 match(Set dst (SubL (ConvI2L src1) (ConvI2L src2)));
9905 ins_cost(200);
9906 format %{ "SubL $dst, $src1, $src2 @ subL_RegI2L_RegI2L" %}
9907 ins_encode %{
9908 Register dst = as_Register($dst$$reg);
9909 Register src1 = as_Register($src1$$reg);
9910 Register src2 = as_Register($src2$$reg);
9912 __ subu(dst, src1, src2);
9913 %}
9914 ins_pipe( ialu_regL_regL );
9915 %}
9917 // Integer MOD with Register
9918 instruct modI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9919 match(Set dst (ModI src1 src2));
9920 ins_cost(300);
9921 format %{ "modi $dst, $src1, $src2 @ modI_Reg_Reg" %}
9922 ins_encode %{
9923 Register dst = $dst$$Register;
9924 Register src1 = $src1$$Register;
9925 Register src2 = $src2$$Register;
9927 //if (UseLoongsonISA) {
9928 if (0) {
9929 // 2016.08.10
9930 // Experiments show that gsmod is slower that div+mfhi.
9931 // So I just disable it here.
9932 __ gsmod(dst, src1, src2);
9933 } else {
9934 __ div(src1, src2);
9935 __ mfhi(dst);
9936 }
9937 %}
9939 //ins_pipe( ialu_mod );
9940 ins_pipe( ialu_regI_regI );
9941 %}
9943 instruct modL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
9944 match(Set dst (ModL src1 src2));
9945 format %{ "modL $dst, $src1, $src2 @modL_reg_reg" %}
9947 ins_encode %{
9948 Register dst = as_Register($dst$$reg);
9949 Register op1 = as_Register($src1$$reg);
9950 Register op2 = as_Register($src2$$reg);
9952 if (UseLoongsonISA) {
9953 __ gsdmod(dst, op1, op2);
9954 } else {
9955 __ ddiv(op1, op2);
9956 __ mfhi(dst);
9957 }
9958 %}
9959 ins_pipe( pipe_slow );
9960 %}
9962 instruct mulI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9963 match(Set dst (MulI src1 src2));
9965 ins_cost(300);
9966 format %{ "mul $dst, $src1, $src2 @ mulI_Reg_Reg" %}
9967 ins_encode %{
9968 Register src1 = $src1$$Register;
9969 Register src2 = $src2$$Register;
9970 Register dst = $dst$$Register;
9972 __ mul(dst, src1, src2);
9973 %}
9974 ins_pipe( ialu_mult );
9975 %}
9977 instruct maddI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2, mRegI src3) %{
9978 match(Set dst (AddI (MulI src1 src2) src3));
9980 ins_cost(999);
9981 format %{ "madd $dst, $src1 * $src2 + $src3 #@maddI_Reg_Reg" %}
9982 ins_encode %{
9983 Register src1 = $src1$$Register;
9984 Register src2 = $src2$$Register;
9985 Register src3 = $src3$$Register;
9986 Register dst = $dst$$Register;
9988 __ mtlo(src3);
9989 __ madd(src1, src2);
9990 __ mflo(dst);
9991 %}
9992 ins_pipe( ialu_mult );
9993 %}
9995 instruct divI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9996 match(Set dst (DivI src1 src2));
9998 ins_cost(300);
9999 format %{ "div $dst, $src1, $src2 @ divI_Reg_Reg" %}
10000 ins_encode %{
10001 Register src1 = $src1$$Register;
10002 Register src2 = $src2$$Register;
10003 Register dst = $dst$$Register;
10005 /* 2012/4/21 Jin: In MIPS, div does not cause exception.
10006 We must trap an exception manually. */
10007 __ teq(R0, src2, 0x7);
10009 if (UseLoongsonISA) {
10010 __ gsdiv(dst, src1, src2);
10011 } else {
10012 __ div(src1, src2);
10014 __ nop();
10015 __ nop();
10016 __ mflo(dst);
10017 }
10018 %}
10019 ins_pipe( ialu_mod );
10020 %}
10022 instruct divF_Reg_Reg(regF dst, regF src1, regF src2) %{
10023 match(Set dst (DivF src1 src2));
10025 ins_cost(300);
10026 format %{ "divF $dst, $src1, $src2 @ divF_Reg_Reg" %}
10027 ins_encode %{
10028 FloatRegister src1 = $src1$$FloatRegister;
10029 FloatRegister src2 = $src2$$FloatRegister;
10030 FloatRegister dst = $dst$$FloatRegister;
10032 /* Here do we need to trap an exception manually ? */
10033 __ div_s(dst, src1, src2);
10034 %}
10035 ins_pipe( pipe_slow );
10036 %}
10038 instruct divD_Reg_Reg(regD dst, regD src1, regD src2) %{
10039 match(Set dst (DivD src1 src2));
10041 ins_cost(300);
10042 format %{ "divD $dst, $src1, $src2 @ divD_Reg_Reg" %}
10043 ins_encode %{
10044 FloatRegister src1 = $src1$$FloatRegister;
10045 FloatRegister src2 = $src2$$FloatRegister;
10046 FloatRegister dst = $dst$$FloatRegister;
10048 /* Here do we need to trap an exception manually ? */
10049 __ div_d(dst, src1, src2);
10050 %}
10051 ins_pipe( pipe_slow );
10052 %}
10054 instruct mulL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
10055 match(Set dst (MulL src1 src2));
10056 format %{ "mulL $dst, $src1, $src2 @mulL_reg_reg" %}
10057 ins_encode %{
10058 Register dst = as_Register($dst$$reg);
10059 Register op1 = as_Register($src1$$reg);
10060 Register op2 = as_Register($src2$$reg);
10062 if (UseLoongsonISA) {
10063 __ gsdmult(dst, op1, op2);
10064 } else {
10065 __ dmult(op1, op2);
10066 __ mflo(dst);
10067 }
10068 %}
10069 ins_pipe( pipe_slow );
10070 %}
10072 instruct mulL_reg_regI2L(mRegL dst, mRegL src1, mRegI src2) %{
10073 match(Set dst (MulL src1 (ConvI2L src2)));
10074 format %{ "mulL $dst, $src1, $src2 @mulL_reg_regI2L" %}
10075 ins_encode %{
10076 Register dst = as_Register($dst$$reg);
10077 Register op1 = as_Register($src1$$reg);
10078 Register op2 = as_Register($src2$$reg);
10080 if (UseLoongsonISA) {
10081 __ gsdmult(dst, op1, op2);
10082 } else {
10083 __ dmult(op1, op2);
10084 __ mflo(dst);
10085 }
10086 %}
10087 ins_pipe( pipe_slow );
10088 %}
10090 instruct divL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
10091 match(Set dst (DivL src1 src2));
10092 format %{ "divL $dst, $src1, $src2 @divL_reg_reg" %}
10094 ins_encode %{
10095 Register dst = as_Register($dst$$reg);
10096 Register op1 = as_Register($src1$$reg);
10097 Register op2 = as_Register($src2$$reg);
10099 if (UseLoongsonISA) {
10100 __ gsddiv(dst, op1, op2);
10101 } else {
10102 __ ddiv(op1, op2);
10103 __ mflo(dst);
10104 }
10105 %}
10106 ins_pipe( pipe_slow );
10107 %}
10109 instruct addF_reg_reg(regF dst, regF src1, regF src2) %{
10110 match(Set dst (AddF src1 src2));
10111 format %{ "AddF $dst, $src1, $src2 @addF_reg_reg" %}
10112 ins_encode %{
10113 FloatRegister src1 = as_FloatRegister($src1$$reg);
10114 FloatRegister src2 = as_FloatRegister($src2$$reg);
10115 FloatRegister dst = as_FloatRegister($dst$$reg);
10117 __ add_s(dst, src1, src2);
10118 %}
10119 ins_pipe( fpu_regF_regF );
10120 %}
10122 instruct subF_reg_reg(regF dst, regF src1, regF src2) %{
10123 match(Set dst (SubF src1 src2));
10124 format %{ "SubF $dst, $src1, $src2 @subF_reg_reg" %}
10125 ins_encode %{
10126 FloatRegister src1 = as_FloatRegister($src1$$reg);
10127 FloatRegister src2 = as_FloatRegister($src2$$reg);
10128 FloatRegister dst = as_FloatRegister($dst$$reg);
10130 __ sub_s(dst, src1, src2);
10131 %}
10132 ins_pipe( fpu_regF_regF );
10133 %}
10134 instruct addD_reg_reg(regD dst, regD src1, regD src2) %{
10135 match(Set dst (AddD src1 src2));
10136 format %{ "AddD $dst, $src1, $src2 @addD_reg_reg" %}
10137 ins_encode %{
10138 FloatRegister src1 = as_FloatRegister($src1$$reg);
10139 FloatRegister src2 = as_FloatRegister($src2$$reg);
10140 FloatRegister dst = as_FloatRegister($dst$$reg);
10142 __ add_d(dst, src1, src2);
10143 %}
10144 ins_pipe( fpu_regF_regF );
10145 %}
10147 instruct subD_reg_reg(regD dst, regD src1, regD src2) %{
10148 match(Set dst (SubD src1 src2));
10149 format %{ "SubD $dst, $src1, $src2 @subD_reg_reg" %}
10150 ins_encode %{
10151 FloatRegister src1 = as_FloatRegister($src1$$reg);
10152 FloatRegister src2 = as_FloatRegister($src2$$reg);
10153 FloatRegister dst = as_FloatRegister($dst$$reg);
10155 __ sub_d(dst, src1, src2);
10156 %}
10157 ins_pipe( fpu_regF_regF );
10158 %}
10160 instruct negF_reg(regF dst, regF src) %{
10161 match(Set dst (NegF src));
10162 format %{ "negF $dst, $src @negF_reg" %}
10163 ins_encode %{
10164 FloatRegister src = as_FloatRegister($src$$reg);
10165 FloatRegister dst = as_FloatRegister($dst$$reg);
10167 __ neg_s(dst, src);
10168 %}
10169 ins_pipe( fpu_regF_regF );
10170 %}
10172 instruct negD_reg(regD dst, regD src) %{
10173 match(Set dst (NegD src));
10174 format %{ "negD $dst, $src @negD_reg" %}
10175 ins_encode %{
10176 FloatRegister src = as_FloatRegister($src$$reg);
10177 FloatRegister dst = as_FloatRegister($dst$$reg);
10179 __ neg_d(dst, src);
10180 %}
10181 ins_pipe( fpu_regF_regF );
10182 %}
10185 instruct mulF_reg_reg(regF dst, regF src1, regF src2) %{
10186 match(Set dst (MulF src1 src2));
10187 format %{ "MULF $dst, $src1, $src2 @mulF_reg_reg" %}
10188 ins_encode %{
10189 FloatRegister src1 = $src1$$FloatRegister;
10190 FloatRegister src2 = $src2$$FloatRegister;
10191 FloatRegister dst = $dst$$FloatRegister;
10193 __ mul_s(dst, src1, src2);
10194 %}
10195 ins_pipe( fpu_regF_regF );
10196 %}
10198 instruct maddF_reg_reg(regF dst, regF src1, regF src2, regF src3) %{
10199 match(Set dst (AddF (MulF src1 src2) src3));
10200 // For compatibility reason (e.g. on the Loongson platform), disable this guy.
10201 ins_cost(44444);
10202 format %{ "maddF $dst, $src1, $src2, $src3 @maddF_reg_reg" %}
10203 ins_encode %{
10204 FloatRegister src1 = $src1$$FloatRegister;
10205 FloatRegister src2 = $src2$$FloatRegister;
10206 FloatRegister src3 = $src3$$FloatRegister;
10207 FloatRegister dst = $dst$$FloatRegister;
10209 __ madd_s(dst, src1, src2, src3);
10210 %}
10211 ins_pipe( fpu_regF_regF );
10212 %}
10214 // Mul two double precision floating piont number
10215 instruct mulD_reg_reg(regD dst, regD src1, regD src2) %{
10216 match(Set dst (MulD src1 src2));
10217 format %{ "MULD $dst, $src1, $src2 @mulD_reg_reg" %}
10218 ins_encode %{
10219 FloatRegister src1 = $src1$$FloatRegister;
10220 FloatRegister src2 = $src2$$FloatRegister;
10221 FloatRegister dst = $dst$$FloatRegister;
10223 __ mul_d(dst, src1, src2);
10224 %}
10225 ins_pipe( fpu_regF_regF );
10226 %}
10228 instruct maddD_reg_reg(regD dst, regD src1, regD src2, regD src3) %{
10229 match(Set dst (AddD (MulD src1 src2) src3));
10230 // For compatibility reason (e.g. on the Loongson platform), disable this guy.
10231 ins_cost(44444);
10232 format %{ "maddD $dst, $src1, $src2, $src3 @maddD_reg_reg" %}
10233 ins_encode %{
10234 FloatRegister src1 = $src1$$FloatRegister;
10235 FloatRegister src2 = $src2$$FloatRegister;
10236 FloatRegister src3 = $src3$$FloatRegister;
10237 FloatRegister dst = $dst$$FloatRegister;
10239 __ madd_d(dst, src1, src2, src3);
10240 %}
10241 ins_pipe( fpu_regF_regF );
10242 %}
10244 instruct absF_reg(regF dst, regF src) %{
10245 match(Set dst (AbsF src));
10246 ins_cost(100);
10247 format %{ "absF $dst, $src @absF_reg" %}
10248 ins_encode %{
10249 FloatRegister src = as_FloatRegister($src$$reg);
10250 FloatRegister dst = as_FloatRegister($dst$$reg);
10252 __ abs_s(dst, src);
10253 %}
10254 ins_pipe( fpu_regF_regF );
10255 %}
10258 // intrinsics for math_native.
10259 // AbsD SqrtD CosD SinD TanD LogD Log10D
10261 instruct absD_reg(regD dst, regD src) %{
10262 match(Set dst (AbsD src));
10263 ins_cost(100);
10264 format %{ "absD $dst, $src @absD_reg" %}
10265 ins_encode %{
10266 FloatRegister src = as_FloatRegister($src$$reg);
10267 FloatRegister dst = as_FloatRegister($dst$$reg);
10269 __ abs_d(dst, src);
10270 %}
10271 ins_pipe( fpu_regF_regF );
10272 %}
10274 instruct sqrtD_reg(regD dst, regD src) %{
10275 match(Set dst (SqrtD src));
10276 ins_cost(100);
10277 format %{ "SqrtD $dst, $src @sqrtD_reg" %}
10278 ins_encode %{
10279 FloatRegister src = as_FloatRegister($src$$reg);
10280 FloatRegister dst = as_FloatRegister($dst$$reg);
10282 __ sqrt_d(dst, src);
10283 %}
10284 ins_pipe( fpu_regF_regF );
10285 %}
10287 instruct sqrtF_reg(regF dst, regF src) %{
10288 match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
10289 ins_cost(100);
10290 format %{ "SqrtF $dst, $src @sqrtF_reg" %}
10291 ins_encode %{
10292 FloatRegister src = as_FloatRegister($src$$reg);
10293 FloatRegister dst = as_FloatRegister($dst$$reg);
10295 __ sqrt_s(dst, src);
10296 %}
10297 ins_pipe( fpu_regF_regF );
10298 %}
10299 //----------------------------------Logical Instructions----------------------
10300 //__________________________________Integer Logical Instructions-------------
10302 //And Instuctions
10303 // And Register with Immediate
10304 instruct andI_Reg_immI(mRegI dst, mRegI src1, immI src2) %{
10305 match(Set dst (AndI src1 src2));
10307 format %{ "and $dst, $src1, $src2 #@andI_Reg_immI" %}
10308 ins_encode %{
10309 Register dst = $dst$$Register;
10310 Register src = $src1$$Register;
10311 int val = $src2$$constant;
10313 __ move(AT, val);
10314 __ andr(dst, src, AT);
10315 %}
10316 ins_pipe( ialu_regI_regI );
10317 %}
10319 instruct andI_Reg_imm_0_65535(mRegI dst, mRegI src1, immI_0_65535 src2) %{
10320 match(Set dst (AndI src1 src2));
10321 ins_cost(60);
10323 format %{ "and $dst, $src1, $src2 #@andI_Reg_imm_0_65535" %}
10324 ins_encode %{
10325 Register dst = $dst$$Register;
10326 Register src = $src1$$Register;
10327 int val = $src2$$constant;
10329 __ andi(dst, src, val);
10330 %}
10331 ins_pipe( ialu_regI_regI );
10332 %}
10334 instruct andI_Reg_immI_nonneg_mask(mRegI dst, mRegI src1, immI_nonneg_mask mask) %{
10335 match(Set dst (AndI src1 mask));
10336 ins_cost(60);
10338 format %{ "and $dst, $src1, $mask #@andI_Reg_immI_nonneg_mask" %}
10339 ins_encode %{
10340 Register dst = $dst$$Register;
10341 Register src = $src1$$Register;
10342 int size = Assembler::is_int_mask($mask$$constant);
10344 __ ext(dst, src, 0, size);
10345 %}
10346 ins_pipe( ialu_regI_regI );
10347 %}
10349 instruct andL_Reg_immL_nonneg_mask(mRegL dst, mRegL src1, immL_nonneg_mask mask) %{
10350 match(Set dst (AndL src1 mask));
10351 ins_cost(60);
10353 format %{ "and $dst, $src1, $mask #@andL_Reg_immL_nonneg_mask" %}
10354 ins_encode %{
10355 Register dst = $dst$$Register;
10356 Register src = $src1$$Register;
10357 int size = Assembler::is_jlong_mask($mask$$constant);
10359 __ dext(dst, src, 0, size);
10360 %}
10361 ins_pipe( ialu_regI_regI );
10362 %}
10364 instruct xorI_Reg_imm_0_65535(mRegI dst, mRegI src1, immI_0_65535 src2) %{
10365 match(Set dst (XorI src1 src2));
10366 ins_cost(60);
10368 format %{ "xori $dst, $src1, $src2 #@xorI_Reg_imm_0_65535" %}
10369 ins_encode %{
10370 Register dst = $dst$$Register;
10371 Register src = $src1$$Register;
10372 int val = $src2$$constant;
10374 __ xori(dst, src, val);
10375 %}
10376 ins_pipe( ialu_regI_regI );
10377 %}
10379 instruct xorI_Reg_immI_M1(mRegI dst, mRegI src1, immI_M1 M1) %{
10380 match(Set dst (XorI src1 M1));
10381 predicate(UseLoongsonISA && Use3A2000);
10382 ins_cost(60);
10384 format %{ "xor $dst, $src1, $M1 #@xorI_Reg_immI_M1" %}
10385 ins_encode %{
10386 Register dst = $dst$$Register;
10387 Register src = $src1$$Register;
10389 __ gsorn(dst, R0, src);
10390 %}
10391 ins_pipe( ialu_regI_regI );
10392 %}
10394 instruct xorL2I_Reg_immI_M1(mRegI dst, mRegL src1, immI_M1 M1) %{
10395 match(Set dst (XorI (ConvL2I src1) M1));
10396 predicate(UseLoongsonISA && Use3A2000);
10397 ins_cost(60);
10399 format %{ "xor $dst, $src1, $M1 #@xorL2I_Reg_immI_M1" %}
10400 ins_encode %{
10401 Register dst = $dst$$Register;
10402 Register src = $src1$$Register;
10404 __ gsorn(dst, R0, src);
10405 %}
10406 ins_pipe( ialu_regI_regI );
10407 %}
10409 instruct xorL_Reg_imm_0_65535(mRegL dst, mRegL src1, immL_0_65535 src2) %{
10410 match(Set dst (XorL src1 src2));
10411 ins_cost(60);
10413 format %{ "xori $dst, $src1, $src2 #@xorL_Reg_imm_0_65535" %}
10414 ins_encode %{
10415 Register dst = $dst$$Register;
10416 Register src = $src1$$Register;
10417 int val = $src2$$constant;
10419 __ xori(dst, src, val);
10420 %}
10421 ins_pipe( ialu_regI_regI );
10422 %}
10424 /*
10425 instruct xorL_Reg_immL_M1(mRegL dst, mRegL src1, immL_M1 M1) %{
10426 match(Set dst (XorL src1 M1));
10427 predicate(UseLoongsonISA);
10428 ins_cost(60);
10430 format %{ "xor $dst, $src1, $M1 #@xorL_Reg_immL_M1" %}
10431 ins_encode %{
10432 Register dst = $dst$$Register;
10433 Register src = $src1$$Register;
10435 __ gsorn(dst, R0, src);
10436 %}
10437 ins_pipe( ialu_regI_regI );
10438 %}
10439 */
10441 instruct lbu_and_lmask(mRegI dst, memory mem, immI_255 mask) %{
10442 match(Set dst (AndI mask (LoadB mem)));
10443 ins_cost(60);
10445 format %{ "lhu $dst, $mem #@lbu_and_lmask" %}
10446 ins_encode(load_UB_enc(dst, mem));
10447 ins_pipe( ialu_loadI );
10448 %}
10450 instruct lbu_and_rmask(mRegI dst, memory mem, immI_255 mask) %{
10451 match(Set dst (AndI (LoadB mem) mask));
10452 ins_cost(60);
10454 format %{ "lhu $dst, $mem #@lbu_and_rmask" %}
10455 ins_encode(load_UB_enc(dst, mem));
10456 ins_pipe( ialu_loadI );
10457 %}
10459 instruct andI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10460 match(Set dst (AndI src1 src2));
10462 format %{ "and $dst, $src1, $src2 #@andI_Reg_Reg" %}
10463 ins_encode %{
10464 Register dst = $dst$$Register;
10465 Register src1 = $src1$$Register;
10466 Register src2 = $src2$$Register;
10467 __ andr(dst, src1, src2);
10468 %}
10469 ins_pipe( ialu_regI_regI );
10470 %}
10472 instruct andnI_Reg_nReg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10473 match(Set dst (AndI src1 (XorI src2 M1)));
10474 predicate(UseLoongsonISA && Use3A2000);
10476 format %{ "andn $dst, $src1, $src2 #@andnI_Reg_nReg" %}
10477 ins_encode %{
10478 Register dst = $dst$$Register;
10479 Register src1 = $src1$$Register;
10480 Register src2 = $src2$$Register;
10482 __ gsandn(dst, src1, src2);
10483 %}
10484 ins_pipe( ialu_regI_regI );
10485 %}
10487 instruct ornI_Reg_nReg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10488 match(Set dst (OrI src1 (XorI src2 M1)));
10489 predicate(UseLoongsonISA && Use3A2000);
10491 format %{ "orn $dst, $src1, $src2 #@ornI_Reg_nReg" %}
10492 ins_encode %{
10493 Register dst = $dst$$Register;
10494 Register src1 = $src1$$Register;
10495 Register src2 = $src2$$Register;
10497 __ gsorn(dst, src1, src2);
10498 %}
10499 ins_pipe( ialu_regI_regI );
10500 %}
10502 instruct andnI_nReg_Reg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10503 match(Set dst (AndI (XorI src1 M1) src2));
10504 predicate(UseLoongsonISA && Use3A2000);
10506 format %{ "andn $dst, $src2, $src1 #@andnI_nReg_Reg" %}
10507 ins_encode %{
10508 Register dst = $dst$$Register;
10509 Register src1 = $src1$$Register;
10510 Register src2 = $src2$$Register;
10512 __ gsandn(dst, src2, src1);
10513 %}
10514 ins_pipe( ialu_regI_regI );
10515 %}
10517 instruct ornI_nReg_Reg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10518 match(Set dst (OrI (XorI src1 M1) src2));
10519 predicate(UseLoongsonISA && Use3A2000);
10521 format %{ "orn $dst, $src2, $src1 #@ornI_nReg_Reg" %}
10522 ins_encode %{
10523 Register dst = $dst$$Register;
10524 Register src1 = $src1$$Register;
10525 Register src2 = $src2$$Register;
10527 __ gsorn(dst, src2, src1);
10528 %}
10529 ins_pipe( ialu_regI_regI );
10530 %}
10532 // And Long Register with Register
10533 instruct andL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10534 match(Set dst (AndL src1 src2));
10535 format %{ "AND $dst, $src1, $src2 @ andL_Reg_Reg\n\t" %}
10536 ins_encode %{
10537 Register dst_reg = as_Register($dst$$reg);
10538 Register src1_reg = as_Register($src1$$reg);
10539 Register src2_reg = as_Register($src2$$reg);
10541 __ andr(dst_reg, src1_reg, src2_reg);
10542 %}
10543 ins_pipe( ialu_regL_regL );
10544 %}
10546 instruct andL_Reg_Reg_convI2L(mRegL dst, mRegL src1, mRegI src2) %{
10547 match(Set dst (AndL src1 (ConvI2L src2)));
10548 format %{ "AND $dst, $src1, $src2 @ andL_Reg_Reg_convI2L\n\t" %}
10549 ins_encode %{
10550 Register dst_reg = as_Register($dst$$reg);
10551 Register src1_reg = as_Register($src1$$reg);
10552 Register src2_reg = as_Register($src2$$reg);
10554 __ andr(dst_reg, src1_reg, src2_reg);
10555 %}
10556 ins_pipe( ialu_regL_regL );
10557 %}
10559 instruct andL_Reg_imm_0_65535(mRegL dst, mRegL src1, immL_0_65535 src2) %{
10560 match(Set dst (AndL src1 src2));
10561 ins_cost(60);
10563 format %{ "and $dst, $src1, $src2 #@andL_Reg_imm_0_65535" %}
10564 ins_encode %{
10565 Register dst = $dst$$Register;
10566 Register src = $src1$$Register;
10567 long val = $src2$$constant;
10569 __ andi(dst, src, val);
10570 %}
10571 ins_pipe( ialu_regI_regI );
10572 %}
10574 instruct andL2I_Reg_imm_0_65535(mRegI dst, mRegL src1, immL_0_65535 src2) %{
10575 match(Set dst (ConvL2I (AndL src1 src2)));
10576 ins_cost(60);
10578 format %{ "and $dst, $src1, $src2 #@andL2I_Reg_imm_0_65535" %}
10579 ins_encode %{
10580 Register dst = $dst$$Register;
10581 Register src = $src1$$Register;
10582 long val = $src2$$constant;
10584 __ andi(dst, src, val);
10585 %}
10586 ins_pipe( ialu_regI_regI );
10587 %}
10589 /*
10590 instruct andnL_Reg_nReg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10591 match(Set dst (AndL src1 (XorL src2 M1)));
10592 predicate(UseLoongsonISA);
10594 format %{ "andn $dst, $src1, $src2 #@andnL_Reg_nReg" %}
10595 ins_encode %{
10596 Register dst = $dst$$Register;
10597 Register src1 = $src1$$Register;
10598 Register src2 = $src2$$Register;
10600 __ gsandn(dst, src1, src2);
10601 %}
10602 ins_pipe( ialu_regI_regI );
10603 %}
10604 */
10606 /*
10607 instruct ornL_Reg_nReg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10608 match(Set dst (OrL src1 (XorL src2 M1)));
10609 predicate(UseLoongsonISA);
10611 format %{ "orn $dst, $src1, $src2 #@ornL_Reg_nReg" %}
10612 ins_encode %{
10613 Register dst = $dst$$Register;
10614 Register src1 = $src1$$Register;
10615 Register src2 = $src2$$Register;
10617 __ gsorn(dst, src1, src2);
10618 %}
10619 ins_pipe( ialu_regI_regI );
10620 %}
10621 */
10623 /*
10624 instruct andnL_nReg_Reg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10625 match(Set dst (AndL (XorL src1 M1) src2));
10626 predicate(UseLoongsonISA);
10628 format %{ "andn $dst, $src2, $src1 #@andnL_nReg_Reg" %}
10629 ins_encode %{
10630 Register dst = $dst$$Register;
10631 Register src1 = $src1$$Register;
10632 Register src2 = $src2$$Register;
10634 __ gsandn(dst, src2, src1);
10635 %}
10636 ins_pipe( ialu_regI_regI );
10637 %}
10638 */
10640 /*
10641 instruct ornL_nReg_Reg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10642 match(Set dst (OrL (XorL src1 M1) src2));
10643 predicate(UseLoongsonISA);
10645 format %{ "orn $dst, $src2, $src1 #@ornL_nReg_Reg" %}
10646 ins_encode %{
10647 Register dst = $dst$$Register;
10648 Register src1 = $src1$$Register;
10649 Register src2 = $src2$$Register;
10651 __ gsorn(dst, src2, src1);
10652 %}
10653 ins_pipe( ialu_regI_regI );
10654 %}
10655 */
10657 instruct andL_Reg_immL_M8(mRegL dst, immL_M8 M8) %{
10658 match(Set dst (AndL dst M8));
10659 ins_cost(60);
10661 format %{ "and $dst, $dst, $M8 #@andL_Reg_immL_M8" %}
10662 ins_encode %{
10663 Register dst = $dst$$Register;
10665 __ dins(dst, R0, 0, 3);
10666 %}
10667 ins_pipe( ialu_regI_regI );
10668 %}
10670 instruct andL_Reg_immL_M5(mRegL dst, immL_M5 M5) %{
10671 match(Set dst (AndL dst M5));
10672 ins_cost(60);
10674 format %{ "and $dst, $dst, $M5 #@andL_Reg_immL_M5" %}
10675 ins_encode %{
10676 Register dst = $dst$$Register;
10678 __ dins(dst, R0, 2, 1);
10679 %}
10680 ins_pipe( ialu_regI_regI );
10681 %}
10683 instruct andL_Reg_immL_M7(mRegL dst, immL_M7 M7) %{
10684 match(Set dst (AndL dst M7));
10685 ins_cost(60);
10687 format %{ "and $dst, $dst, $M7 #@andL_Reg_immL_M7" %}
10688 ins_encode %{
10689 Register dst = $dst$$Register;
10691 __ dins(dst, R0, 1, 2);
10692 %}
10693 ins_pipe( ialu_regI_regI );
10694 %}
10696 instruct andL_Reg_immL_M4(mRegL dst, immL_M4 M4) %{
10697 match(Set dst (AndL dst M4));
10698 ins_cost(60);
10700 format %{ "and $dst, $dst, $M4 #@andL_Reg_immL_M4" %}
10701 ins_encode %{
10702 Register dst = $dst$$Register;
10704 __ dins(dst, R0, 0, 2);
10705 %}
10706 ins_pipe( ialu_regI_regI );
10707 %}
10709 instruct andL_Reg_immL_M121(mRegL dst, immL_M121 M121) %{
10710 match(Set dst (AndL dst M121));
10711 ins_cost(60);
10713 format %{ "and $dst, $dst, $M121 #@andL_Reg_immL_M121" %}
10714 ins_encode %{
10715 Register dst = $dst$$Register;
10717 __ dins(dst, R0, 3, 4);
10718 %}
10719 ins_pipe( ialu_regI_regI );
10720 %}
10722 // Or Long Register with Register
10723 instruct orL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10724 match(Set dst (OrL src1 src2));
10725 format %{ "OR $dst, $src1, $src2 @ orL_Reg_Reg\t" %}
10726 ins_encode %{
10727 Register dst_reg = $dst$$Register;
10728 Register src1_reg = $src1$$Register;
10729 Register src2_reg = $src2$$Register;
10731 __ orr(dst_reg, src1_reg, src2_reg);
10732 %}
10733 ins_pipe( ialu_regL_regL );
10734 %}
10736 instruct orL_Reg_P2XReg(mRegL dst, mRegP src1, mRegL src2) %{
10737 match(Set dst (OrL (CastP2X src1) src2));
10738 format %{ "OR $dst, $src1, $src2 @ orL_Reg_P2XReg\t" %}
10739 ins_encode %{
10740 Register dst_reg = $dst$$Register;
10741 Register src1_reg = $src1$$Register;
10742 Register src2_reg = $src2$$Register;
10744 __ orr(dst_reg, src1_reg, src2_reg);
10745 %}
10746 ins_pipe( ialu_regL_regL );
10747 %}
10749 // Xor Long Register with Register
10750 instruct xorL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10751 match(Set dst (XorL src1 src2));
10752 format %{ "XOR $dst, $src1, $src2 @ xorL_Reg_Reg\t" %}
10753 ins_encode %{
10754 Register dst_reg = as_Register($dst$$reg);
10755 Register src1_reg = as_Register($src1$$reg);
10756 Register src2_reg = as_Register($src2$$reg);
10758 __ xorr(dst_reg, src1_reg, src2_reg);
10759 %}
10760 ins_pipe( ialu_regL_regL );
10761 %}
10763 // Shift Left by 8-bit immediate
10764 instruct salI_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
10765 match(Set dst (LShiftI src shift));
10767 format %{ "SHL $dst, $src, $shift #@salI_Reg_imm" %}
10768 ins_encode %{
10769 Register src = $src$$Register;
10770 Register dst = $dst$$Register;
10771 int shamt = $shift$$constant;
10773 __ sll(dst, src, shamt);
10774 %}
10775 ins_pipe( ialu_regI_regI );
10776 %}
10778 instruct salL2I_Reg_imm(mRegI dst, mRegL src, immI8 shift) %{
10779 match(Set dst (LShiftI (ConvL2I src) shift));
10781 format %{ "SHL $dst, $src, $shift #@salL2I_Reg_imm" %}
10782 ins_encode %{
10783 Register src = $src$$Register;
10784 Register dst = $dst$$Register;
10785 int shamt = $shift$$constant;
10787 __ sll(dst, src, shamt);
10788 %}
10789 ins_pipe( ialu_regI_regI );
10790 %}
10792 instruct salI_Reg_imm_and_M65536(mRegI dst, mRegI src, immI_16 shift, immI_M65536 mask) %{
10793 match(Set dst (AndI (LShiftI src shift) mask));
10795 format %{ "SHL $dst, $src, $shift #@salI_Reg_imm_and_M65536" %}
10796 ins_encode %{
10797 Register src = $src$$Register;
10798 Register dst = $dst$$Register;
10800 __ sll(dst, src, 16);
10801 %}
10802 ins_pipe( ialu_regI_regI );
10803 %}
10805 instruct land7_2_s(mRegI dst, mRegL src, immL7 seven, immI_16 sixteen)
10806 %{
10807 match(Set dst (RShiftI (LShiftI (ConvL2I (AndL src seven)) sixteen) sixteen));
10809 format %{ "andi $dst, $src, 7\t# @land7_2_s" %}
10810 ins_encode %{
10811 Register src = $src$$Register;
10812 Register dst = $dst$$Register;
10814 __ andi(dst, src, 7);
10815 %}
10816 ins_pipe(ialu_regI_regI);
10817 %}
10819 instruct ori2s(mRegI dst, mRegI src1, immI_0_32767 src2, immI_16 sixteen)
10820 %{
10821 match(Set dst (RShiftI (LShiftI (OrI src1 src2) sixteen) sixteen));
10823 format %{ "ori $dst, $src1, $src2\t# @ori2s" %}
10824 ins_encode %{
10825 Register src = $src1$$Register;
10826 int val = $src2$$constant;
10827 Register dst = $dst$$Register;
10829 __ ori(dst, src, val);
10830 %}
10831 ins_pipe(ialu_regI_regI);
10832 %}
10834 // Logical Shift Right by 16, followed by Arithmetic Shift Left by 16.
10835 // This idiom is used by the compiler the i2s bytecode.
10836 instruct i2s(mRegI dst, mRegI src, immI_16 sixteen)
10837 %{
10838 match(Set dst (RShiftI (LShiftI src sixteen) sixteen));
10840 format %{ "i2s $dst, $src\t# @i2s" %}
10841 ins_encode %{
10842 Register src = $src$$Register;
10843 Register dst = $dst$$Register;
10845 __ seh(dst, src);
10846 %}
10847 ins_pipe(ialu_regI_regI);
10848 %}
10850 // Logical Shift Right by 24, followed by Arithmetic Shift Left by 24.
10851 // This idiom is used by the compiler for the i2b bytecode.
10852 instruct i2b(mRegI dst, mRegI src, immI_24 twentyfour)
10853 %{
10854 match(Set dst (RShiftI (LShiftI src twentyfour) twentyfour));
10856 format %{ "i2b $dst, $src\t# @i2b" %}
10857 ins_encode %{
10858 Register src = $src$$Register;
10859 Register dst = $dst$$Register;
10861 __ seb(dst, src);
10862 %}
10863 ins_pipe(ialu_regI_regI);
10864 %}
10867 instruct salI_RegL2I_imm(mRegI dst, mRegL src, immI8 shift) %{
10868 match(Set dst (LShiftI (ConvL2I src) shift));
10870 format %{ "SHL $dst, $src, $shift #@salI_RegL2I_imm" %}
10871 ins_encode %{
10872 Register src = $src$$Register;
10873 Register dst = $dst$$Register;
10874 int shamt = $shift$$constant;
10876 __ sll(dst, src, shamt);
10877 %}
10878 ins_pipe( ialu_regI_regI );
10879 %}
10881 // Shift Left by 8-bit immediate
10882 instruct salI_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
10883 match(Set dst (LShiftI src shift));
10885 format %{ "SHL $dst, $src, $shift #@salI_Reg_Reg" %}
10886 ins_encode %{
10887 Register src = $src$$Register;
10888 Register dst = $dst$$Register;
10889 Register shamt = $shift$$Register;
10890 __ sllv(dst, src, shamt);
10891 %}
10892 ins_pipe( ialu_regI_regI );
10893 %}
10896 // Shift Left Long
10897 instruct salL_Reg_imm(mRegL dst, mRegL src, immI8 shift) %{
10898 //predicate(UseNewLongLShift);
10899 match(Set dst (LShiftL src shift));
10900 ins_cost(100);
10901 format %{ "salL $dst, $src, $shift @ salL_Reg_imm" %}
10902 ins_encode %{
10903 Register src_reg = as_Register($src$$reg);
10904 Register dst_reg = as_Register($dst$$reg);
10905 int shamt = $shift$$constant;
10907 if (__ is_simm(shamt, 5))
10908 __ dsll(dst_reg, src_reg, shamt);
10909 else
10910 {
10911 int sa = Assembler::low(shamt, 6);
10912 if (sa < 32) {
10913 __ dsll(dst_reg, src_reg, sa);
10914 } else {
10915 __ dsll32(dst_reg, src_reg, sa - 32);
10916 }
10917 }
10918 %}
10919 ins_pipe( ialu_regL_regL );
10920 %}
10922 instruct salL_RegI2L_imm(mRegL dst, mRegI src, immI8 shift) %{
10923 //predicate(UseNewLongLShift);
10924 match(Set dst (LShiftL (ConvI2L src) shift));
10925 ins_cost(100);
10926 format %{ "salL $dst, $src, $shift @ salL_RegI2L_imm" %}
10927 ins_encode %{
10928 Register src_reg = as_Register($src$$reg);
10929 Register dst_reg = as_Register($dst$$reg);
10930 int shamt = $shift$$constant;
10932 if (__ is_simm(shamt, 5))
10933 __ dsll(dst_reg, src_reg, shamt);
10934 else
10935 {
10936 int sa = Assembler::low(shamt, 6);
10937 if (sa < 32) {
10938 __ dsll(dst_reg, src_reg, sa);
10939 } else {
10940 __ dsll32(dst_reg, src_reg, sa - 32);
10941 }
10942 }
10943 %}
10944 ins_pipe( ialu_regL_regL );
10945 %}
10947 // Shift Left Long
10948 instruct salL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
10949 //predicate(UseNewLongLShift);
10950 match(Set dst (LShiftL src shift));
10951 ins_cost(100);
10952 format %{ "salL $dst, $src, $shift @ salL_Reg_Reg" %}
10953 ins_encode %{
10954 Register src_reg = as_Register($src$$reg);
10955 Register dst_reg = as_Register($dst$$reg);
10957 __ dsllv(dst_reg, src_reg, $shift$$Register);
10958 %}
10959 ins_pipe( ialu_regL_regL );
10960 %}
10962 instruct salL_convI2L_Reg_imm(mRegL dst, mRegI src, immI8 shift) %{
10963 match(Set dst (LShiftL (ConvI2L src) shift));
10964 ins_cost(100);
10965 format %{ "salL $dst, $src, $shift @ salL_convI2L_Reg_imm" %}
10966 ins_encode %{
10967 Register src_reg = as_Register($src$$reg);
10968 Register dst_reg = as_Register($dst$$reg);
10969 int shamt = $shift$$constant;
10971 if (__ is_simm(shamt, 5)) {
10972 __ dsll(dst_reg, src_reg, shamt);
10973 } else {
10974 int sa = Assembler::low(shamt, 6);
10975 if (sa < 32) {
10976 __ dsll(dst_reg, src_reg, sa);
10977 } else {
10978 __ dsll32(dst_reg, src_reg, sa - 32);
10979 }
10980 }
10981 %}
10982 ins_pipe( ialu_regL_regL );
10983 %}
10985 // Shift Right Long
10986 instruct sarL_Reg_imm(mRegL dst, mRegL src, immI8 shift) %{
10987 match(Set dst (RShiftL src shift));
10988 ins_cost(100);
10989 format %{ "sarL $dst, $src, $shift @ sarL_Reg_imm" %}
10990 ins_encode %{
10991 Register src_reg = as_Register($src$$reg);
10992 Register dst_reg = as_Register($dst$$reg);
10993 int shamt = ($shift$$constant & 0x3f);
10994 if (__ is_simm(shamt, 5))
10995 __ dsra(dst_reg, src_reg, shamt);
10996 else {
10997 int sa = Assembler::low(shamt, 6);
10998 if (sa < 32) {
10999 __ dsra(dst_reg, src_reg, sa);
11000 } else {
11001 __ dsra32(dst_reg, src_reg, sa - 32);
11002 }
11003 }
11004 %}
11005 ins_pipe( ialu_regL_regL );
11006 %}
11008 instruct sarL2I_Reg_immI_32_63(mRegI dst, mRegL src, immI_32_63 shift) %{
11009 match(Set dst (ConvL2I (RShiftL src shift)));
11010 ins_cost(100);
11011 format %{ "sarL $dst, $src, $shift @ sarL2I_Reg_immI_32_63" %}
11012 ins_encode %{
11013 Register src_reg = as_Register($src$$reg);
11014 Register dst_reg = as_Register($dst$$reg);
11015 int shamt = $shift$$constant;
11017 __ dsra32(dst_reg, src_reg, shamt - 32);
11018 %}
11019 ins_pipe( ialu_regL_regL );
11020 %}
11022 // Shift Right Long arithmetically
11023 instruct sarL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
11024 //predicate(UseNewLongLShift);
11025 match(Set dst (RShiftL src shift));
11026 ins_cost(100);
11027 format %{ "sarL $dst, $src, $shift @ sarL_Reg_Reg" %}
11028 ins_encode %{
11029 Register src_reg = as_Register($src$$reg);
11030 Register dst_reg = as_Register($dst$$reg);
11032 __ dsrav(dst_reg, src_reg, $shift$$Register);
11033 %}
11034 ins_pipe( ialu_regL_regL );
11035 %}
11037 // Shift Right Long logically
11038 instruct slrL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
11039 match(Set dst (URShiftL src shift));
11040 ins_cost(100);
11041 format %{ "slrL $dst, $src, $shift @ slrL_Reg_Reg" %}
11042 ins_encode %{
11043 Register src_reg = as_Register($src$$reg);
11044 Register dst_reg = as_Register($dst$$reg);
11046 __ dsrlv(dst_reg, src_reg, $shift$$Register);
11047 %}
11048 ins_pipe( ialu_regL_regL );
11049 %}
11051 instruct slrL_Reg_immI_0_31(mRegL dst, mRegL src, immI_0_31 shift) %{
11052 match(Set dst (URShiftL src shift));
11053 ins_cost(80);
11054 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_0_31" %}
11055 ins_encode %{
11056 Register src_reg = as_Register($src$$reg);
11057 Register dst_reg = as_Register($dst$$reg);
11058 int shamt = $shift$$constant;
11060 __ dsrl(dst_reg, src_reg, shamt);
11061 %}
11062 ins_pipe( ialu_regL_regL );
11063 %}
11065 instruct slrL_Reg_immI_0_31_and_max_int(mRegI dst, mRegL src, immI_0_31 shift, immI_MaxI max_int) %{
11066 match(Set dst (AndI (ConvL2I (URShiftL src shift)) max_int));
11067 ins_cost(80);
11068 format %{ "dext $dst, $src, $shift, 31 @ slrL_Reg_immI_0_31_and_max_int" %}
11069 ins_encode %{
11070 Register src_reg = as_Register($src$$reg);
11071 Register dst_reg = as_Register($dst$$reg);
11072 int shamt = $shift$$constant;
11074 __ dext(dst_reg, src_reg, shamt, 31);
11075 %}
11076 ins_pipe( ialu_regL_regL );
11077 %}
11079 instruct slrL_P2XReg_immI_0_31(mRegL dst, mRegP src, immI_0_31 shift) %{
11080 match(Set dst (URShiftL (CastP2X src) shift));
11081 ins_cost(80);
11082 format %{ "slrL $dst, $src, $shift @ slrL_P2XReg_immI_0_31" %}
11083 ins_encode %{
11084 Register src_reg = as_Register($src$$reg);
11085 Register dst_reg = as_Register($dst$$reg);
11086 int shamt = $shift$$constant;
11088 __ dsrl(dst_reg, src_reg, shamt);
11089 %}
11090 ins_pipe( ialu_regL_regL );
11091 %}
11093 instruct slrL_Reg_immI_32_63(mRegL dst, mRegL src, immI_32_63 shift) %{
11094 match(Set dst (URShiftL src shift));
11095 ins_cost(80);
11096 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_32_63" %}
11097 ins_encode %{
11098 Register src_reg = as_Register($src$$reg);
11099 Register dst_reg = as_Register($dst$$reg);
11100 int shamt = $shift$$constant;
11102 __ dsrl32(dst_reg, src_reg, shamt - 32);
11103 %}
11104 ins_pipe( ialu_regL_regL );
11105 %}
11107 instruct slrL_Reg_immI_convL2I(mRegI dst, mRegL src, immI_32_63 shift) %{
11108 match(Set dst (ConvL2I (URShiftL src shift)));
11109 predicate(n->in(1)->in(2)->get_int() > 32);
11110 ins_cost(80);
11111 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_convL2I" %}
11112 ins_encode %{
11113 Register src_reg = as_Register($src$$reg);
11114 Register dst_reg = as_Register($dst$$reg);
11115 int shamt = $shift$$constant;
11117 __ dsrl32(dst_reg, src_reg, shamt - 32);
11118 %}
11119 ins_pipe( ialu_regL_regL );
11120 %}
11122 instruct slrL_P2XReg_immI_32_63(mRegL dst, mRegP src, immI_32_63 shift) %{
11123 match(Set dst (URShiftL (CastP2X src) shift));
11124 ins_cost(80);
11125 format %{ "slrL $dst, $src, $shift @ slrL_P2XReg_immI_32_63" %}
11126 ins_encode %{
11127 Register src_reg = as_Register($src$$reg);
11128 Register dst_reg = as_Register($dst$$reg);
11129 int shamt = $shift$$constant;
11131 __ dsrl32(dst_reg, src_reg, shamt - 32);
11132 %}
11133 ins_pipe( ialu_regL_regL );
11134 %}
11136 // Xor Instructions
11137 // Xor Register with Register
11138 instruct xorI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
11139 match(Set dst (XorI src1 src2));
11141 format %{ "XOR $dst, $src1, $src2 #@xorI_Reg_Reg" %}
11143 ins_encode %{
11144 Register dst = $dst$$Register;
11145 Register src1 = $src1$$Register;
11146 Register src2 = $src2$$Register;
11147 __ xorr(dst, src1, src2);
11148 __ sll(dst, dst, 0); /* long -> int */
11149 %}
11151 ins_pipe( ialu_regI_regI );
11152 %}
11154 // Or Instructions
11155 // Or Register with Register
11156 instruct orI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
11157 match(Set dst (OrI src1 src2));
11159 format %{ "OR $dst, $src1, $src2 #@orI_Reg_Reg" %}
11160 ins_encode %{
11161 Register dst = $dst$$Register;
11162 Register src1 = $src1$$Register;
11163 Register src2 = $src2$$Register;
11164 __ orr(dst, src1, src2);
11165 %}
11167 ins_pipe( ialu_regI_regI );
11168 %}
11170 instruct rotI_shr_logical_Reg(mRegI dst, mRegI src, immI_0_31 rshift, immI_0_31 lshift, immI_1 one) %{
11171 match(Set dst (OrI (URShiftI src rshift) (LShiftI (AndI src one) lshift)));
11172 predicate(32 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int())));
11174 format %{ "rotr $dst, $src, 1 ...\n\t"
11175 "srl $dst, $dst, ($rshift-1) @ rotI_shr_logical_Reg" %}
11176 ins_encode %{
11177 Register dst = $dst$$Register;
11178 Register src = $src$$Register;
11179 int rshift = $rshift$$constant;
11181 __ rotr(dst, src, 1);
11182 if (rshift - 1) {
11183 __ srl(dst, dst, rshift - 1);
11184 }
11185 %}
11187 ins_pipe( ialu_regI_regI );
11188 %}
11190 instruct orI_Reg_castP2X(mRegL dst, mRegL src1, mRegP src2) %{
11191 match(Set dst (OrI src1 (CastP2X src2)));
11193 format %{ "OR $dst, $src1, $src2 #@orI_Reg_castP2X" %}
11194 ins_encode %{
11195 Register dst = $dst$$Register;
11196 Register src1 = $src1$$Register;
11197 Register src2 = $src2$$Register;
11198 __ orr(dst, src1, src2);
11199 %}
11201 ins_pipe( ialu_regI_regI );
11202 %}
11204 // Logical Shift Right by 8-bit immediate
11205 instruct shr_logical_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
11206 match(Set dst (URShiftI src shift));
11207 // effect(KILL cr);
11209 format %{ "SRL $dst, $src, $shift #@shr_logical_Reg_imm" %}
11210 ins_encode %{
11211 Register src = $src$$Register;
11212 Register dst = $dst$$Register;
11213 int shift = $shift$$constant;
11215 __ srl(dst, src, shift);
11216 %}
11217 ins_pipe( ialu_regI_regI );
11218 %}
11220 instruct shr_logical_Reg_imm_nonneg_mask(mRegI dst, mRegI src, immI_0_31 shift, immI_nonneg_mask mask) %{
11221 match(Set dst (AndI (URShiftI src shift) mask));
11223 format %{ "ext $dst, $src, $shift, one-bits($mask) #@shr_logical_Reg_imm_nonneg_mask" %}
11224 ins_encode %{
11225 Register src = $src$$Register;
11226 Register dst = $dst$$Register;
11227 int pos = $shift$$constant;
11228 int size = Assembler::is_int_mask($mask$$constant);
11230 __ ext(dst, src, pos, size);
11231 %}
11232 ins_pipe( ialu_regI_regI );
11233 %}
11235 instruct rolI_Reg_immI_0_31(mRegI dst, immI_0_31 lshift, immI_0_31 rshift)
11236 %{
11237 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
11238 match(Set dst (OrI (LShiftI dst lshift) (URShiftI dst rshift)));
11240 ins_cost(100);
11241 format %{ "rotr $dst, $dst, $rshift #@rolI_Reg_immI_0_31" %}
11242 ins_encode %{
11243 Register dst = $dst$$Register;
11244 int sa = $rshift$$constant;
11246 __ rotr(dst, dst, sa);
11247 %}
11248 ins_pipe( ialu_regI_regI );
11249 %}
11251 instruct rolL_Reg_immI_0_31(mRegL dst, immI_32_63 lshift, immI_0_31 rshift)
11252 %{
11253 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11254 match(Set dst (OrL (LShiftL dst lshift) (URShiftL dst rshift)));
11256 ins_cost(100);
11257 format %{ "rotr $dst, $dst, $rshift #@rolL_Reg_immI_0_31" %}
11258 ins_encode %{
11259 Register dst = $dst$$Register;
11260 int sa = $rshift$$constant;
11262 __ drotr(dst, dst, sa);
11263 %}
11264 ins_pipe( ialu_regI_regI );
11265 %}
11267 instruct rolL_Reg_immI_32_63(mRegL dst, immI_0_31 lshift, immI_32_63 rshift)
11268 %{
11269 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11270 match(Set dst (OrL (LShiftL dst lshift) (URShiftL dst rshift)));
11272 ins_cost(100);
11273 format %{ "rotr $dst, $dst, $rshift #@rolL_Reg_immI_32_63" %}
11274 ins_encode %{
11275 Register dst = $dst$$Register;
11276 int sa = $rshift$$constant;
11278 __ drotr32(dst, dst, sa - 32);
11279 %}
11280 ins_pipe( ialu_regI_regI );
11281 %}
11283 instruct rorI_Reg_immI_0_31(mRegI dst, immI_0_31 rshift, immI_0_31 lshift)
11284 %{
11285 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
11286 match(Set dst (OrI (URShiftI dst rshift) (LShiftI dst lshift)));
11288 ins_cost(100);
11289 format %{ "rotr $dst, $dst, $rshift #@rorI_Reg_immI_0_31" %}
11290 ins_encode %{
11291 Register dst = $dst$$Register;
11292 int sa = $rshift$$constant;
11294 __ rotr(dst, dst, sa);
11295 %}
11296 ins_pipe( ialu_regI_regI );
11297 %}
11299 instruct rorL_Reg_immI_0_31(mRegL dst, immI_0_31 rshift, immI_32_63 lshift)
11300 %{
11301 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11302 match(Set dst (OrL (URShiftL dst rshift) (LShiftL dst lshift)));
11304 ins_cost(100);
11305 format %{ "rotr $dst, $dst, $rshift #@rorL_Reg_immI_0_31" %}
11306 ins_encode %{
11307 Register dst = $dst$$Register;
11308 int sa = $rshift$$constant;
11310 __ drotr(dst, dst, sa);
11311 %}
11312 ins_pipe( ialu_regI_regI );
11313 %}
11315 instruct rorL_Reg_immI_32_63(mRegL dst, immI_32_63 rshift, immI_0_31 lshift)
11316 %{
11317 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11318 match(Set dst (OrL (URShiftL dst rshift) (LShiftL dst lshift)));
11320 ins_cost(100);
11321 format %{ "rotr $dst, $dst, $rshift #@rorL_Reg_immI_32_63" %}
11322 ins_encode %{
11323 Register dst = $dst$$Register;
11324 int sa = $rshift$$constant;
11326 __ drotr32(dst, dst, sa - 32);
11327 %}
11328 ins_pipe( ialu_regI_regI );
11329 %}
11331 // Logical Shift Right
11332 instruct shr_logical_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
11333 match(Set dst (URShiftI src shift));
11335 format %{ "SRL $dst, $src, $shift #@shr_logical_Reg_Reg" %}
11336 ins_encode %{
11337 Register src = $src$$Register;
11338 Register dst = $dst$$Register;
11339 Register shift = $shift$$Register;
11340 __ srlv(dst, src, shift);
11341 %}
11342 ins_pipe( ialu_regI_regI );
11343 %}
11346 instruct shr_arith_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
11347 match(Set dst (RShiftI src shift));
11348 // effect(KILL cr);
11350 format %{ "SRA $dst, $src, $shift #@shr_arith_Reg_imm" %}
11351 ins_encode %{
11352 Register src = $src$$Register;
11353 Register dst = $dst$$Register;
11354 int shift = $shift$$constant;
11355 __ sra(dst, src, shift);
11356 %}
11357 ins_pipe( ialu_regI_regI );
11358 %}
11360 instruct shr_arith_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
11361 match(Set dst (RShiftI src shift));
11362 // effect(KILL cr);
11364 format %{ "SRA $dst, $src, $shift #@shr_arith_Reg_Reg" %}
11365 ins_encode %{
11366 Register src = $src$$Register;
11367 Register dst = $dst$$Register;
11368 Register shift = $shift$$Register;
11369 __ srav(dst, src, shift);
11370 %}
11371 ins_pipe( ialu_regI_regI );
11372 %}
11374 //----------Convert Int to Boolean---------------------------------------------
11376 instruct convI2B(mRegI dst, mRegI src) %{
11377 match(Set dst (Conv2B src));
11379 ins_cost(100);
11380 format %{ "convI2B $dst, $src @ convI2B" %}
11381 ins_encode %{
11382 Register dst = as_Register($dst$$reg);
11383 Register src = as_Register($src$$reg);
11385 if (dst != src) {
11386 __ daddiu(dst, R0, 1);
11387 __ movz(dst, R0, src);
11388 } else {
11389 __ move(AT, src);
11390 __ daddiu(dst, R0, 1);
11391 __ movz(dst, R0, AT);
11392 }
11393 %}
11395 ins_pipe( ialu_regL_regL );
11396 %}
11398 instruct convI2L_reg( mRegL dst, mRegI src) %{
11399 match(Set dst (ConvI2L src));
11401 ins_cost(100);
11402 format %{ "SLL $dst, $src @ convI2L_reg\t" %}
11403 ins_encode %{
11404 Register dst = as_Register($dst$$reg);
11405 Register src = as_Register($src$$reg);
11407 if(dst != src) __ sll(dst, src, 0);
11408 %}
11409 ins_pipe( ialu_regL_regL );
11410 %}
11413 instruct convL2I_reg( mRegI dst, mRegL src ) %{
11414 match(Set dst (ConvL2I src));
11416 format %{ "MOV $dst, $src @ convL2I_reg" %}
11417 ins_encode %{
11418 Register dst = as_Register($dst$$reg);
11419 Register src = as_Register($src$$reg);
11421 __ sll(dst, src, 0);
11422 %}
11424 ins_pipe( ialu_regI_regI );
11425 %}
11427 instruct convL2I2L_reg( mRegL dst, mRegL src ) %{
11428 match(Set dst (ConvI2L (ConvL2I src)));
11430 format %{ "sll $dst, $src, 0 @ convL2I2L_reg" %}
11431 ins_encode %{
11432 Register dst = as_Register($dst$$reg);
11433 Register src = as_Register($src$$reg);
11435 __ sll(dst, src, 0);
11436 %}
11438 ins_pipe( ialu_regI_regI );
11439 %}
11441 instruct convL2D_reg( regD dst, mRegL src ) %{
11442 match(Set dst (ConvL2D src));
11443 format %{ "convL2D $dst, $src @ convL2D_reg" %}
11444 ins_encode %{
11445 Register src = as_Register($src$$reg);
11446 FloatRegister dst = as_FloatRegister($dst$$reg);
11448 __ dmtc1(src, dst);
11449 __ cvt_d_l(dst, dst);
11450 %}
11452 ins_pipe( pipe_slow );
11453 %}
11455 instruct convD2L_reg_fast( mRegL dst, regD src ) %{
11456 match(Set dst (ConvD2L src));
11457 ins_cost(150);
11458 format %{ "convD2L $dst, $src @ convD2L_reg_fast" %}
11459 ins_encode %{
11460 Register dst = as_Register($dst$$reg);
11461 FloatRegister src = as_FloatRegister($src$$reg);
11463 Label Done;
11465 __ trunc_l_d(F30, src);
11466 // max_long: 0x7fffffffffffffff
11467 // __ set64(AT, 0x7fffffffffffffff);
11468 __ daddiu(AT, R0, -1);
11469 __ dsrl(AT, AT, 1);
11470 __ dmfc1(dst, F30);
11472 __ bne(dst, AT, Done);
11473 __ delayed()->mtc1(R0, F30);
11475 __ cvt_d_w(F30, F30);
11476 __ c_ult_d(src, F30);
11477 __ bc1f(Done);
11478 __ delayed()->daddiu(T9, R0, -1);
11480 __ c_un_d(src, src); //NaN?
11481 __ subu(dst, T9, AT);
11482 __ movt(dst, R0);
11484 __ bind(Done);
11485 %}
11487 ins_pipe( pipe_slow );
11488 %}
11490 instruct convD2L_reg_slow( mRegL dst, regD src ) %{
11491 match(Set dst (ConvD2L src));
11492 ins_cost(250);
11493 format %{ "convD2L $dst, $src @ convD2L_reg_slow" %}
11494 ins_encode %{
11495 Register dst = as_Register($dst$$reg);
11496 FloatRegister src = as_FloatRegister($src$$reg);
11498 Label L;
11500 __ c_un_d(src, src); //NaN?
11501 __ bc1t(L);
11502 __ delayed();
11503 __ move(dst, R0);
11505 __ trunc_l_d(F30, src);
11506 __ cfc1(AT, 31);
11507 __ li(T9, 0x10000);
11508 __ andr(AT, AT, T9);
11509 __ beq(AT, R0, L);
11510 __ delayed()->dmfc1(dst, F30);
11512 __ mov_d(F12, src);
11513 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1);
11514 __ move(dst, V0);
11515 __ bind(L);
11516 %}
11518 ins_pipe( pipe_slow );
11519 %}
11521 instruct convF2I_reg_fast( mRegI dst, regF src ) %{
11522 match(Set dst (ConvF2I src));
11523 ins_cost(150);
11524 format %{ "convf2i $dst, $src @ convF2I_reg_fast" %}
11525 ins_encode %{
11526 Register dreg = $dst$$Register;
11527 FloatRegister fval = $src$$FloatRegister;
11529 __ trunc_w_s(F30, fval);
11530 __ mfc1(dreg, F30);
11531 __ c_un_s(fval, fval); //NaN?
11532 __ movt(dreg, R0);
11533 %}
11535 ins_pipe( pipe_slow );
11536 %}
11538 instruct convF2I_reg_slow( mRegI dst, regF src ) %{
11539 match(Set dst (ConvF2I src));
11540 ins_cost(250);
11541 format %{ "convf2i $dst, $src @ convF2I_reg_slow" %}
11542 ins_encode %{
11543 Register dreg = $dst$$Register;
11544 FloatRegister fval = $src$$FloatRegister;
11545 Label L;
11547 __ c_un_s(fval, fval); //NaN?
11548 __ bc1t(L);
11549 __ delayed();
11550 __ move(dreg, R0);
11552 __ trunc_w_s(F30, fval);
11554 /* Call SharedRuntime:f2i() to do valid convention */
11555 __ cfc1(AT, 31);
11556 __ li(T9, 0x10000);
11557 __ andr(AT, AT, T9);
11558 __ beq(AT, R0, L);
11559 __ delayed()->mfc1(dreg, F30);
11561 __ mov_s(F12, fval);
11563 /* 2014/01/08 Fu : This bug was found when running ezDS's control-panel.
11564 * J 982 C2 javax.swing.text.BoxView.layoutMajorAxis(II[I[I)V (283 bytes) @ 0x000000555c46aa74
11565 *
11566 * An interger array index has been assigned to V0, and then changed from 1 to Integer.MAX_VALUE.
11567 * V0 is corrupted during call_VM_leaf(), and should be preserved.
11568 */
11569 if(dreg != V0) {
11570 __ push(V0);
11571 }
11572 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
11573 if(dreg != V0) {
11574 __ move(dreg, V0);
11575 __ pop(V0);
11576 }
11577 __ bind(L);
11578 %}
11580 ins_pipe( pipe_slow );
11581 %}
11583 instruct convF2L_reg_fast( mRegL dst, regF src ) %{
11584 match(Set dst (ConvF2L src));
11585 ins_cost(150);
11586 format %{ "convf2l $dst, $src @ convF2L_reg_fast" %}
11587 ins_encode %{
11588 Register dreg = $dst$$Register;
11589 FloatRegister fval = $src$$FloatRegister;
11591 __ trunc_l_s(F30, fval);
11592 __ dmfc1(dreg, F30);
11593 __ c_un_s(fval, fval); //NaN?
11594 __ movt(dreg, R0);
11595 %}
11597 ins_pipe( pipe_slow );
11598 %}
11600 instruct convF2L_reg_slow( mRegL dst, regF src ) %{
11601 match(Set dst (ConvF2L src));
11602 ins_cost(250);
11603 format %{ "convf2l $dst, $src @ convF2L_reg_slow" %}
11604 ins_encode %{
11605 Register dst = as_Register($dst$$reg);
11606 FloatRegister fval = $src$$FloatRegister;
11607 Label L;
11609 __ c_un_s(fval, fval); //NaN?
11610 __ bc1t(L);
11611 __ delayed();
11612 __ move(dst, R0);
11614 __ trunc_l_s(F30, fval);
11615 __ cfc1(AT, 31);
11616 __ li(T9, 0x10000);
11617 __ andr(AT, AT, T9);
11618 __ beq(AT, R0, L);
11619 __ delayed()->dmfc1(dst, F30);
11621 __ mov_s(F12, fval);
11622 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
11623 __ move(dst, V0);
11624 __ bind(L);
11625 %}
11627 ins_pipe( pipe_slow );
11628 %}
11630 instruct convL2F_reg( regF dst, mRegL src ) %{
11631 match(Set dst (ConvL2F src));
11632 format %{ "convl2f $dst, $src @ convL2F_reg" %}
11633 ins_encode %{
11634 FloatRegister dst = $dst$$FloatRegister;
11635 Register src = as_Register($src$$reg);
11636 Label L;
11638 __ dmtc1(src, dst);
11639 __ cvt_s_l(dst, dst);
11640 %}
11642 ins_pipe( pipe_slow );
11643 %}
11645 instruct convI2F_reg( regF dst, mRegI src ) %{
11646 match(Set dst (ConvI2F src));
11647 format %{ "convi2f $dst, $src @ convI2F_reg" %}
11648 ins_encode %{
11649 Register src = $src$$Register;
11650 FloatRegister dst = $dst$$FloatRegister;
11652 __ mtc1(src, dst);
11653 __ cvt_s_w(dst, dst);
11654 %}
11656 ins_pipe( fpu_regF_regF );
11657 %}
11659 instruct cmpLTMask_immI0( mRegI dst, mRegI p, immI0 zero ) %{
11660 match(Set dst (CmpLTMask p zero));
11661 ins_cost(100);
11663 format %{ "sra $dst, $p, 31 @ cmpLTMask_immI0" %}
11664 ins_encode %{
11665 Register src = $p$$Register;
11666 Register dst = $dst$$Register;
11668 __ sra(dst, src, 31);
11669 %}
11670 ins_pipe( pipe_slow );
11671 %}
11674 instruct cmpLTMask( mRegI dst, mRegI p, mRegI q ) %{
11675 match(Set dst (CmpLTMask p q));
11676 ins_cost(400);
11678 format %{ "cmpLTMask $dst, $p, $q @ cmpLTMask" %}
11679 ins_encode %{
11680 Register p = $p$$Register;
11681 Register q = $q$$Register;
11682 Register dst = $dst$$Register;
11684 __ slt(dst, p, q);
11685 __ subu(dst, R0, dst);
11686 %}
11687 ins_pipe( pipe_slow );
11688 %}
11690 instruct convP2B(mRegI dst, mRegP src) %{
11691 match(Set dst (Conv2B src));
11693 ins_cost(100);
11694 format %{ "convP2B $dst, $src @ convP2B" %}
11695 ins_encode %{
11696 Register dst = as_Register($dst$$reg);
11697 Register src = as_Register($src$$reg);
11699 if (dst != src) {
11700 __ daddiu(dst, R0, 1);
11701 __ movz(dst, R0, src);
11702 } else {
11703 __ move(AT, src);
11704 __ daddiu(dst, R0, 1);
11705 __ movz(dst, R0, AT);
11706 }
11707 %}
11709 ins_pipe( ialu_regL_regL );
11710 %}
11713 instruct convI2D_reg_reg(regD dst, mRegI src) %{
11714 match(Set dst (ConvI2D src));
11715 format %{ "conI2D $dst, $src @convI2D_reg" %}
11716 ins_encode %{
11717 Register src = $src$$Register;
11718 FloatRegister dst = $dst$$FloatRegister;
11719 __ mtc1(src, dst);
11720 __ cvt_d_w(dst, dst);
11721 %}
11722 ins_pipe( fpu_regF_regF );
11723 %}
11725 instruct convF2D_reg_reg(regD dst, regF src) %{
11726 match(Set dst (ConvF2D src));
11727 format %{ "convF2D $dst, $src\t# @convF2D_reg_reg" %}
11728 ins_encode %{
11729 FloatRegister dst = $dst$$FloatRegister;
11730 FloatRegister src = $src$$FloatRegister;
11732 __ cvt_d_s(dst, src);
11733 %}
11734 ins_pipe( fpu_regF_regF );
11735 %}
11737 instruct convD2F_reg_reg(regF dst, regD src) %{
11738 match(Set dst (ConvD2F src));
11739 format %{ "convD2F $dst, $src\t# @convD2F_reg_reg" %}
11740 ins_encode %{
11741 FloatRegister dst = $dst$$FloatRegister;
11742 FloatRegister src = $src$$FloatRegister;
11744 __ cvt_s_d(dst, src);
11745 %}
11746 ins_pipe( fpu_regF_regF );
11747 %}
11749 // Convert a double to an int. If the double is a NAN, stuff a zero in instead.
11750 instruct convD2I_reg_reg_fast( mRegI dst, regD src ) %{
11751 match(Set dst (ConvD2I src));
11753 ins_cost(150);
11754 format %{ "convD2I $dst, $src\t# @ convD2I_reg_reg_fast" %}
11756 ins_encode %{
11757 FloatRegister src = $src$$FloatRegister;
11758 Register dst = $dst$$Register;
11760 Label Done;
11762 __ trunc_w_d(F30, src);
11763 // max_int: 2147483647
11764 __ move(AT, 0x7fffffff);
11765 __ mfc1(dst, F30);
11767 __ bne(dst, AT, Done);
11768 __ delayed()->mtc1(R0, F30);
11770 __ cvt_d_w(F30, F30);
11771 __ c_ult_d(src, F30);
11772 __ bc1f(Done);
11773 __ delayed()->addiu(T9, R0, -1);
11775 __ c_un_d(src, src); //NaN?
11776 __ subu32(dst, T9, AT);
11777 __ movt(dst, R0);
11779 __ bind(Done);
11780 %}
11781 ins_pipe( pipe_slow );
11782 %}
11784 instruct convD2I_reg_reg_slow( mRegI dst, regD src ) %{
11785 match(Set dst (ConvD2I src));
11787 ins_cost(250);
11788 format %{ "convD2I $dst, $src\t# @ convD2I_reg_reg_slow" %}
11790 ins_encode %{
11791 FloatRegister src = $src$$FloatRegister;
11792 Register dst = $dst$$Register;
11793 Label L;
11795 __ trunc_w_d(F30, src);
11796 __ cfc1(AT, 31);
11797 __ li(T9, 0x10000);
11798 __ andr(AT, AT, T9);
11799 __ beq(AT, R0, L);
11800 __ delayed()->mfc1(dst, F30);
11802 __ mov_d(F12, src);
11803 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 1);
11804 __ move(dst, V0);
11805 __ bind(L);
11807 %}
11808 ins_pipe( pipe_slow );
11809 %}
11811 // Convert oop pointer into compressed form
11812 instruct encodeHeapOop(mRegN dst, mRegP src) %{
11813 predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
11814 match(Set dst (EncodeP src));
11815 format %{ "encode_heap_oop $dst,$src" %}
11816 ins_encode %{
11817 Register src = $src$$Register;
11818 Register dst = $dst$$Register;
11819 if (src != dst) {
11820 __ move(dst, src);
11821 }
11822 __ encode_heap_oop(dst);
11823 %}
11824 ins_pipe( ialu_regL_regL );
11825 %}
11827 instruct encodeHeapOop_not_null(mRegN dst, mRegP src) %{
11828 predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
11829 match(Set dst (EncodeP src));
11830 format %{ "encode_heap_oop_not_null $dst,$src @ encodeHeapOop_not_null" %}
11831 ins_encode %{
11832 __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
11833 %}
11834 ins_pipe( ialu_regL_regL );
11835 %}
11837 instruct decodeHeapOop(mRegP dst, mRegN src) %{
11838 predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
11839 n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
11840 match(Set dst (DecodeN src));
11841 format %{ "decode_heap_oop $dst,$src @ decodeHeapOop" %}
11842 ins_encode %{
11843 Register s = $src$$Register;
11844 Register d = $dst$$Register;
11845 if (s != d) {
11846 __ move(d, s);
11847 }
11848 __ decode_heap_oop(d);
11849 %}
11850 ins_pipe( ialu_regL_regL );
11851 %}
11853 instruct decodeHeapOop_not_null(mRegP dst, mRegN src) %{
11854 predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
11855 n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
11856 match(Set dst (DecodeN src));
11857 format %{ "decode_heap_oop_not_null $dst,$src @ decodeHeapOop_not_null" %}
11858 ins_encode %{
11859 Register s = $src$$Register;
11860 Register d = $dst$$Register;
11861 if (s != d) {
11862 __ decode_heap_oop_not_null(d, s);
11863 } else {
11864 __ decode_heap_oop_not_null(d);
11865 }
11866 %}
11867 ins_pipe( ialu_regL_regL );
11868 %}
11870 instruct encodeKlass_not_null(mRegN dst, mRegP src) %{
11871 match(Set dst (EncodePKlass src));
11872 format %{ "encode_heap_oop_not_null $dst,$src @ encodeKlass_not_null" %}
11873 ins_encode %{
11874 __ encode_klass_not_null($dst$$Register, $src$$Register);
11875 %}
11876 ins_pipe( ialu_regL_regL );
11877 %}
11879 instruct decodeKlass_not_null(mRegP dst, mRegN src) %{
11880 match(Set dst (DecodeNKlass src));
11881 format %{ "decode_heap_klass_not_null $dst,$src" %}
11882 ins_encode %{
11883 Register s = $src$$Register;
11884 Register d = $dst$$Register;
11885 if (s != d) {
11886 __ decode_klass_not_null(d, s);
11887 } else {
11888 __ decode_klass_not_null(d);
11889 }
11890 %}
11891 ins_pipe( ialu_regL_regL );
11892 %}
11894 //FIXME
11895 instruct tlsLoadP(mRegP dst) %{
11896 match(Set dst (ThreadLocal));
11898 ins_cost(0);
11899 format %{ " get_thread in $dst #@tlsLoadP" %}
11900 ins_encode %{
11901 Register dst = $dst$$Register;
11902 #ifdef OPT_THREAD
11903 __ move(dst, TREG);
11904 #else
11905 __ get_thread(dst);
11906 #endif
11907 %}
11909 ins_pipe( ialu_loadI );
11910 %}
11913 instruct checkCastPP( mRegP dst ) %{
11914 match(Set dst (CheckCastPP dst));
11916 format %{ "#checkcastPP of $dst (empty encoding) #@chekCastPP" %}
11917 ins_encode( /*empty encoding*/ );
11918 ins_pipe( empty );
11919 %}
11921 instruct castPP(mRegP dst)
11922 %{
11923 match(Set dst (CastPP dst));
11925 size(0);
11926 format %{ "# castPP of $dst" %}
11927 ins_encode(/* empty encoding */);
11928 ins_pipe(empty);
11929 %}
11931 instruct castII( mRegI dst ) %{
11932 match(Set dst (CastII dst));
11933 format %{ "#castII of $dst empty encoding" %}
11934 ins_encode( /*empty encoding*/ );
11935 ins_cost(0);
11936 ins_pipe( empty );
11937 %}
11939 // Return Instruction
11940 // Remove the return address & jump to it.
11941 instruct Ret() %{
11942 match(Return);
11943 format %{ "RET #@Ret" %}
11945 ins_encode %{
11946 __ jr(RA);
11947 __ nop();
11948 %}
11950 ins_pipe( pipe_jump );
11951 %}
11953 /*
11954 // For Loongson CPUs, jr seems too slow, so this rule shouldn't be imported.
11955 instruct jumpXtnd(mRegL switch_val) %{
11956 match(Jump switch_val);
11958 ins_cost(350);
11960 format %{ "load T9 <-- [$constanttablebase, $switch_val, $constantoffset] @ jumpXtnd\n\t"
11961 "jr T9\n\t"
11962 "nop" %}
11963 ins_encode %{
11964 Register table_base = $constanttablebase;
11965 int con_offset = $constantoffset;
11966 Register switch_reg = $switch_val$$Register;
11968 if (UseLoongsonISA) {
11969 if (Assembler::is_simm(con_offset, 8)) {
11970 __ gsldx(T9, table_base, switch_reg, con_offset);
11971 } else if (Assembler::is_simm16(con_offset)) {
11972 __ daddu(T9, table_base, switch_reg);
11973 __ ld(T9, T9, con_offset);
11974 } else {
11975 __ move(T9, con_offset);
11976 __ daddu(AT, table_base, switch_reg);
11977 __ gsldx(T9, AT, T9, 0);
11978 }
11979 } else {
11980 if (Assembler::is_simm16(con_offset)) {
11981 __ daddu(T9, table_base, switch_reg);
11982 __ ld(T9, T9, con_offset);
11983 } else {
11984 __ move(T9, con_offset);
11985 __ daddu(AT, table_base, switch_reg);
11986 __ daddu(AT, T9, AT);
11987 __ ld(T9, AT, 0);
11988 }
11989 }
11991 __ jr(T9);
11992 __ nop();
11994 %}
11995 ins_pipe(pipe_jump);
11996 %}
11997 */
11999 // Jump Direct - Label defines a relative address from JMP
12000 instruct jmpDir(label labl) %{
12001 match(Goto);
12002 effect(USE labl);
12004 ins_cost(300);
12005 format %{ "JMP $labl #@jmpDir" %}
12007 ins_encode %{
12008 Label &L = *($labl$$label);
12009 if(&L)
12010 __ b(L);
12011 else
12012 __ b(int(0));
12013 __ nop();
12014 %}
12016 ins_pipe( pipe_jump );
12017 ins_pc_relative(1);
12018 %}
12022 // Tail Jump; remove the return address; jump to target.
12023 // TailCall above leaves the return address around.
12024 // TailJump is used in only one place, the rethrow_Java stub (fancy_jump=2).
12025 // ex_oop (Exception Oop) is needed in %o0 at the jump. As there would be a
12026 // "restore" before this instruction (in Epilogue), we need to materialize it
12027 // in %i0.
12028 //FIXME
12029 instruct tailjmpInd(mRegP jump_target,mRegP ex_oop) %{
12030 match( TailJump jump_target ex_oop );
12031 ins_cost(200);
12032 format %{ "Jmp $jump_target ; ex_oop = $ex_oop #@tailjmpInd" %}
12033 ins_encode %{
12034 Register target = $jump_target$$Register;
12036 /* 2012/9/14 Jin: V0, V1 are indicated in:
12037 * [stubGenerator_mips.cpp] generate_forward_exception()
12038 * [runtime_mips.cpp] OptoRuntime::generate_exception_blob()
12039 */
12040 Register oop = $ex_oop$$Register;
12041 Register exception_oop = V0;
12042 Register exception_pc = V1;
12044 __ move(exception_pc, RA);
12045 __ move(exception_oop, oop);
12047 __ jr(target);
12048 __ nop();
12049 %}
12050 ins_pipe( pipe_jump );
12051 %}
12053 // ============================================================================
12054 // Procedure Call/Return Instructions
12055 // Call Java Static Instruction
12056 // Note: If this code changes, the corresponding ret_addr_offset() and
12057 // compute_padding() functions will have to be adjusted.
12058 instruct CallStaticJavaDirect(method meth) %{
12059 match(CallStaticJava);
12060 effect(USE meth);
12062 ins_cost(300);
12063 format %{ "CALL,static #@CallStaticJavaDirect " %}
12064 ins_encode( Java_Static_Call( meth ) );
12065 ins_pipe( pipe_slow );
12066 ins_pc_relative(1);
12067 ins_alignment(16);
12068 %}
12070 // Call Java Dynamic Instruction
12071 // Note: If this code changes, the corresponding ret_addr_offset() and
12072 // compute_padding() functions will have to be adjusted.
12073 instruct CallDynamicJavaDirect(method meth) %{
12074 match(CallDynamicJava);
12075 effect(USE meth);
12077 ins_cost(300);
12078 format %{"MOV IC_Klass, (oop)-1\n\t"
12079 "CallDynamic @ CallDynamicJavaDirect" %}
12080 ins_encode( Java_Dynamic_Call( meth ) );
12081 ins_pipe( pipe_slow );
12082 ins_pc_relative(1);
12083 ins_alignment(16);
12084 %}
12086 instruct CallLeafNoFPDirect(method meth) %{
12087 match(CallLeafNoFP);
12088 effect(USE meth);
12090 ins_cost(300);
12091 format %{ "CALL_LEAF_NOFP,runtime " %}
12092 ins_encode(Java_To_Runtime(meth));
12093 ins_pipe( pipe_slow );
12094 ins_pc_relative(1);
12095 ins_alignment(16);
12096 %}
12098 // Prefetch instructions.
12100 instruct prefetchrNTA( memory mem ) %{
12101 match(PrefetchRead mem);
12102 ins_cost(125);
12104 format %{ "pref $mem\t# Prefetch into non-temporal cache for read @ prefetchrNTA" %}
12105 ins_encode %{
12106 int base = $mem$$base;
12107 int index = $mem$$index;
12108 int scale = $mem$$scale;
12109 int disp = $mem$$disp;
12111 if( index != 0 ) {
12112 if (scale == 0) {
12113 __ daddu(AT, as_Register(base), as_Register(index));
12114 } else {
12115 __ dsll(AT, as_Register(index), scale);
12116 __ daddu(AT, as_Register(base), AT);
12117 }
12118 } else {
12119 __ move(AT, as_Register(base));
12120 }
12121 if( Assembler::is_simm16(disp) ) {
12122 __ daddiu(AT, as_Register(base), disp);
12123 __ daddiu(AT, AT, disp);
12124 } else {
12125 __ move(T9, disp);
12126 __ daddu(AT, as_Register(base), T9);
12127 }
12128 __ pref(0, AT, 0); //hint: 0:load
12129 %}
12130 ins_pipe(pipe_slow);
12131 %}
12133 instruct prefetchwNTA( memory mem ) %{
12134 match(PrefetchWrite mem);
12135 ins_cost(125);
12136 format %{ "pref $mem\t# Prefetch to non-temporal cache for write @ prefetchwNTA" %}
12137 ins_encode %{
12138 int base = $mem$$base;
12139 int index = $mem$$index;
12140 int scale = $mem$$scale;
12141 int disp = $mem$$disp;
12143 if( index != 0 ) {
12144 if (scale == 0) {
12145 __ daddu(AT, as_Register(base), as_Register(index));
12146 } else {
12147 __ dsll(AT, as_Register(index), scale);
12148 __ daddu(AT, as_Register(base), AT);
12149 }
12150 } else {
12151 __ move(AT, as_Register(base));
12152 }
12153 if( Assembler::is_simm16(disp) ) {
12154 __ daddiu(AT, as_Register(base), disp);
12155 __ daddiu(AT, AT, disp);
12156 } else {
12157 __ move(T9, disp);
12158 __ daddu(AT, as_Register(base), T9);
12159 }
12160 __ pref(1, AT, 0); //hint: 1:store
12161 %}
12162 ins_pipe(pipe_slow);
12163 %}
12165 // Prefetch instructions for allocation.
12167 instruct prefetchAllocNTA( memory mem ) %{
12168 match(PrefetchAllocation mem);
12169 ins_cost(125);
12170 format %{ "pref $mem\t# Prefetch allocation @ prefetchAllocNTA" %}
12171 ins_encode %{
12172 int base = $mem$$base;
12173 int index = $mem$$index;
12174 int scale = $mem$$scale;
12175 int disp = $mem$$disp;
12177 Register dst = R0;
12179 if( index != 0 ) {
12180 if( Assembler::is_simm16(disp) ) {
12181 if( UseLoongsonISA ) {
12182 if (scale == 0) {
12183 __ gslbx(dst, as_Register(base), as_Register(index), disp);
12184 } else {
12185 __ dsll(AT, as_Register(index), scale);
12186 __ gslbx(dst, as_Register(base), AT, disp);
12187 }
12188 } else {
12189 if (scale == 0) {
12190 __ addu(AT, as_Register(base), as_Register(index));
12191 } else {
12192 __ dsll(AT, as_Register(index), scale);
12193 __ addu(AT, as_Register(base), AT);
12194 }
12195 __ lb(dst, AT, disp);
12196 }
12197 } else {
12198 if (scale == 0) {
12199 __ addu(AT, as_Register(base), as_Register(index));
12200 } else {
12201 __ dsll(AT, as_Register(index), scale);
12202 __ addu(AT, as_Register(base), AT);
12203 }
12204 __ move(T9, disp);
12205 if( UseLoongsonISA ) {
12206 __ gslbx(dst, AT, T9, 0);
12207 } else {
12208 __ addu(AT, AT, T9);
12209 __ lb(dst, AT, 0);
12210 }
12211 }
12212 } else {
12213 if( Assembler::is_simm16(disp) ) {
12214 __ lb(dst, as_Register(base), disp);
12215 } else {
12216 __ move(T9, disp);
12217 if( UseLoongsonISA ) {
12218 __ gslbx(dst, as_Register(base), T9, 0);
12219 } else {
12220 __ addu(AT, as_Register(base), T9);
12221 __ lb(dst, AT, 0);
12222 }
12223 }
12224 }
12225 %}
12226 ins_pipe(pipe_slow);
12227 %}
12230 // Call runtime without safepoint
12231 instruct CallLeafDirect(method meth) %{
12232 match(CallLeaf);
12233 effect(USE meth);
12235 ins_cost(300);
12236 format %{ "CALL_LEAF,runtime #@CallLeafDirect " %}
12237 ins_encode(Java_To_Runtime(meth));
12238 ins_pipe( pipe_slow );
12239 ins_pc_relative(1);
12240 ins_alignment(16);
12241 %}
12243 // Load Char (16bit unsigned)
12244 instruct loadUS(mRegI dst, memory mem) %{
12245 match(Set dst (LoadUS mem));
12247 ins_cost(125);
12248 format %{ "loadUS $dst,$mem @ loadC" %}
12249 ins_encode(load_C_enc(dst, mem));
12250 ins_pipe( ialu_loadI );
12251 %}
12253 instruct loadUS_convI2L(mRegL dst, memory mem) %{
12254 match(Set dst (ConvI2L (LoadUS mem)));
12256 ins_cost(125);
12257 format %{ "loadUS $dst,$mem @ loadUS_convI2L" %}
12258 ins_encode(load_C_enc(dst, mem));
12259 ins_pipe( ialu_loadI );
12260 %}
12262 // Store Char (16bit unsigned)
12263 instruct storeC(memory mem, mRegI src) %{
12264 match(Set mem (StoreC mem src));
12266 ins_cost(125);
12267 format %{ "storeC $src, $mem @ storeC" %}
12268 ins_encode(store_C_reg_enc(mem, src));
12269 ins_pipe( ialu_loadI );
12270 %}
12272 instruct storeC0(memory mem, immI0 zero) %{
12273 match(Set mem (StoreC mem zero));
12275 ins_cost(125);
12276 format %{ "storeC $zero, $mem @ storeC0" %}
12277 ins_encode(store_C0_enc(mem));
12278 ins_pipe( ialu_loadI );
12279 %}
12282 instruct loadConF0(regF dst, immF0 zero) %{
12283 match(Set dst zero);
12284 ins_cost(100);
12286 format %{ "mov $dst, zero @ loadConF0\n"%}
12287 ins_encode %{
12288 FloatRegister dst = $dst$$FloatRegister;
12290 __ mtc1(R0, dst);
12291 %}
12292 ins_pipe( fpu_loadF );
12293 %}
12296 instruct loadConF(regF dst, immF src) %{
12297 match(Set dst src);
12298 ins_cost(125);
12300 format %{ "lwc1 $dst, $constantoffset[$constanttablebase] # load FLOAT $src from table @ loadConF" %}
12301 ins_encode %{
12302 int con_offset = $constantoffset($src);
12304 if (Assembler::is_simm16(con_offset)) {
12305 __ lwc1($dst$$FloatRegister, $constanttablebase, con_offset);
12306 } else {
12307 __ set64(AT, con_offset);
12308 if (UseLoongsonISA) {
12309 __ gslwxc1($dst$$FloatRegister, $constanttablebase, AT, 0);
12310 } else {
12311 __ daddu(AT, $constanttablebase, AT);
12312 __ lwc1($dst$$FloatRegister, AT, 0);
12313 }
12314 }
12315 %}
12316 ins_pipe( fpu_loadF );
12317 %}
12320 instruct loadConD0(regD dst, immD0 zero) %{
12321 match(Set dst zero);
12322 ins_cost(100);
12324 format %{ "mov $dst, zero @ loadConD0"%}
12325 ins_encode %{
12326 FloatRegister dst = as_FloatRegister($dst$$reg);
12328 __ dmtc1(R0, dst);
12329 %}
12330 ins_pipe( fpu_loadF );
12331 %}
12333 instruct loadConD(regD dst, immD src) %{
12334 match(Set dst src);
12335 ins_cost(125);
12337 format %{ "ldc1 $dst, $constantoffset[$constanttablebase] # load DOUBLE $src from table @ loadConD" %}
12338 ins_encode %{
12339 int con_offset = $constantoffset($src);
12341 if (Assembler::is_simm16(con_offset)) {
12342 __ ldc1($dst$$FloatRegister, $constanttablebase, con_offset);
12343 } else {
12344 __ set64(AT, con_offset);
12345 if (UseLoongsonISA) {
12346 __ gsldxc1($dst$$FloatRegister, $constanttablebase, AT, 0);
12347 } else {
12348 __ daddu(AT, $constanttablebase, AT);
12349 __ ldc1($dst$$FloatRegister, AT, 0);
12350 }
12351 }
12352 %}
12353 ins_pipe( fpu_loadF );
12354 %}
12356 // Store register Float value (it is faster than store from FPU register)
12357 instruct storeF_reg( memory mem, regF src) %{
12358 match(Set mem (StoreF mem src));
12360 ins_cost(50);
12361 format %{ "store $mem, $src\t# store float @ storeF_reg" %}
12362 ins_encode(store_F_reg_enc(mem, src));
12363 ins_pipe( fpu_storeF );
12364 %}
12366 instruct storeF_imm0( memory mem, immF0 zero) %{
12367 match(Set mem (StoreF mem zero));
12369 ins_cost(40);
12370 format %{ "store $mem, zero\t# store float @ storeF_imm0" %}
12371 ins_encode %{
12372 int base = $mem$$base;
12373 int index = $mem$$index;
12374 int scale = $mem$$scale;
12375 int disp = $mem$$disp;
12377 if( index != 0 ) {
12378 if ( UseLoongsonISA ) {
12379 if ( Assembler::is_simm(disp, 8) ) {
12380 if ( scale == 0 ) {
12381 __ gsswx(R0, as_Register(base), as_Register(index), disp);
12382 } else {
12383 __ dsll(T9, as_Register(index), scale);
12384 __ gsswx(R0, as_Register(base), T9, disp);
12385 }
12386 } else if ( Assembler::is_simm16(disp) ) {
12387 if ( scale == 0 ) {
12388 __ daddu(AT, as_Register(base), as_Register(index));
12389 } else {
12390 __ dsll(T9, as_Register(index), scale);
12391 __ daddu(AT, as_Register(base), T9);
12392 }
12393 __ sw(R0, AT, disp);
12394 } else {
12395 if ( scale == 0 ) {
12396 __ move(T9, disp);
12397 __ daddu(AT, as_Register(index), T9);
12398 __ gsswx(R0, as_Register(base), AT, 0);
12399 } else {
12400 __ dsll(T9, as_Register(index), scale);
12401 __ move(AT, disp);
12402 __ daddu(AT, AT, T9);
12403 __ gsswx(R0, as_Register(base), AT, 0);
12404 }
12405 }
12406 } else { //not use loongson isa
12407 if(scale != 0) {
12408 __ dsll(T9, as_Register(index), scale);
12409 __ daddu(AT, as_Register(base), T9);
12410 } else {
12411 __ daddu(AT, as_Register(base), as_Register(index));
12412 }
12413 if( Assembler::is_simm16(disp) ) {
12414 __ sw(R0, AT, disp);
12415 } else {
12416 __ move(T9, disp);
12417 __ daddu(AT, AT, T9);
12418 __ sw(R0, AT, 0);
12419 }
12420 }
12421 } else { //index is 0
12422 if ( UseLoongsonISA ) {
12423 if ( Assembler::is_simm16(disp) ) {
12424 __ sw(R0, as_Register(base), disp);
12425 } else {
12426 __ move(T9, disp);
12427 __ gsswx(R0, as_Register(base), T9, 0);
12428 }
12429 } else {
12430 if( Assembler::is_simm16(disp) ) {
12431 __ sw(R0, as_Register(base), disp);
12432 } else {
12433 __ move(T9, disp);
12434 __ daddu(AT, as_Register(base), T9);
12435 __ sw(R0, AT, 0);
12436 }
12437 }
12438 }
12439 %}
12440 ins_pipe( ialu_storeI );
12441 %}
12443 // Load Double
12444 instruct loadD(regD dst, memory mem) %{
12445 match(Set dst (LoadD mem));
12447 ins_cost(150);
12448 format %{ "loadD $dst, $mem #@loadD" %}
12449 ins_encode(load_D_enc(dst, mem));
12450 ins_pipe( ialu_loadI );
12451 %}
12453 // Load Double - UNaligned
12454 instruct loadD_unaligned(regD dst, memory mem ) %{
12455 match(Set dst (LoadD_unaligned mem));
12456 ins_cost(250);
12457 // FIXME: Jin: Need more effective ldl/ldr
12458 format %{ "loadD_unaligned $dst, $mem #@loadD_unaligned" %}
12459 ins_encode(load_D_enc(dst, mem));
12460 ins_pipe( ialu_loadI );
12461 %}
12463 instruct storeD_reg( memory mem, regD src) %{
12464 match(Set mem (StoreD mem src));
12466 ins_cost(50);
12467 format %{ "store $mem, $src\t# store float @ storeD_reg" %}
12468 ins_encode(store_D_reg_enc(mem, src));
12469 ins_pipe( fpu_storeF );
12470 %}
12472 instruct storeD_imm0( memory mem, immD0 zero) %{
12473 match(Set mem (StoreD mem zero));
12475 ins_cost(40);
12476 format %{ "store $mem, zero\t# store float @ storeD_imm0" %}
12477 ins_encode %{
12478 int base = $mem$$base;
12479 int index = $mem$$index;
12480 int scale = $mem$$scale;
12481 int disp = $mem$$disp;
12483 __ mtc1(R0, F30);
12484 __ cvt_d_w(F30, F30);
12486 if( index != 0 ) {
12487 if ( UseLoongsonISA ) {
12488 if ( Assembler::is_simm(disp, 8) ) {
12489 if (scale == 0) {
12490 __ gssdxc1(F30, as_Register(base), as_Register(index), disp);
12491 } else {
12492 __ dsll(T9, as_Register(index), scale);
12493 __ gssdxc1(F30, as_Register(base), T9, disp);
12494 }
12495 } else if ( Assembler::is_simm16(disp) ) {
12496 if (scale == 0) {
12497 __ daddu(AT, as_Register(base), as_Register(index));
12498 __ sdc1(F30, AT, disp);
12499 } else {
12500 __ dsll(T9, as_Register(index), scale);
12501 __ daddu(AT, as_Register(base), T9);
12502 __ sdc1(F30, AT, disp);
12503 }
12504 } else {
12505 if (scale == 0) {
12506 __ move(T9, disp);
12507 __ daddu(AT, as_Register(index), T9);
12508 __ gssdxc1(F30, as_Register(base), AT, 0);
12509 } else {
12510 __ move(T9, disp);
12511 __ dsll(AT, as_Register(index), scale);
12512 __ daddu(AT, AT, T9);
12513 __ gssdxc1(F30, as_Register(base), AT, 0);
12514 }
12515 }
12516 } else { // not use loongson isa
12517 if(scale != 0) {
12518 __ dsll(T9, as_Register(index), scale);
12519 __ daddu(AT, as_Register(base), T9);
12520 } else {
12521 __ daddu(AT, as_Register(base), as_Register(index));
12522 }
12523 if( Assembler::is_simm16(disp) ) {
12524 __ sdc1(F30, AT, disp);
12525 } else {
12526 __ move(T9, disp);
12527 __ daddu(AT, AT, T9);
12528 __ sdc1(F30, AT, 0);
12529 }
12530 }
12531 } else {// index is 0
12532 if ( UseLoongsonISA ) {
12533 if ( Assembler::is_simm16(disp) ) {
12534 __ sdc1(F30, as_Register(base), disp);
12535 } else {
12536 __ move(T9, disp);
12537 __ gssdxc1(F30, as_Register(base), T9, 0);
12538 }
12539 } else {
12540 if( Assembler::is_simm16(disp) ) {
12541 __ sdc1(F30, as_Register(base), disp);
12542 } else {
12543 __ move(T9, disp);
12544 __ daddu(AT, as_Register(base), T9);
12545 __ sdc1(F30, AT, 0);
12546 }
12547 }
12548 }
12549 %}
12550 ins_pipe( ialu_storeI );
12551 %}
12553 instruct loadSSI(mRegI dst, stackSlotI src)
12554 %{
12555 match(Set dst src);
12557 ins_cost(125);
12558 format %{ "lw $dst, $src\t# int stk @ loadSSI" %}
12559 ins_encode %{
12560 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSI) !");
12561 __ lw($dst$$Register, SP, $src$$disp);
12562 %}
12563 ins_pipe(ialu_loadI);
12564 %}
12566 instruct storeSSI(stackSlotI dst, mRegI src)
12567 %{
12568 match(Set dst src);
12570 ins_cost(100);
12571 format %{ "sw $dst, $src\t# int stk @ storeSSI" %}
12572 ins_encode %{
12573 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSI) !");
12574 __ sw($src$$Register, SP, $dst$$disp);
12575 %}
12576 ins_pipe(ialu_storeI);
12577 %}
12579 instruct loadSSL(mRegL dst, stackSlotL src)
12580 %{
12581 match(Set dst src);
12583 ins_cost(125);
12584 format %{ "ld $dst, $src\t# long stk @ loadSSL" %}
12585 ins_encode %{
12586 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSL) !");
12587 __ ld($dst$$Register, SP, $src$$disp);
12588 %}
12589 ins_pipe(ialu_loadI);
12590 %}
12592 instruct storeSSL(stackSlotL dst, mRegL src)
12593 %{
12594 match(Set dst src);
12596 ins_cost(100);
12597 format %{ "sd $dst, $src\t# long stk @ storeSSL" %}
12598 ins_encode %{
12599 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSL) !");
12600 __ sd($src$$Register, SP, $dst$$disp);
12601 %}
12602 ins_pipe(ialu_storeI);
12603 %}
12605 instruct loadSSP(mRegP dst, stackSlotP src)
12606 %{
12607 match(Set dst src);
12609 ins_cost(125);
12610 format %{ "ld $dst, $src\t# ptr stk @ loadSSP" %}
12611 ins_encode %{
12612 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSP) !");
12613 __ ld($dst$$Register, SP, $src$$disp);
12614 %}
12615 ins_pipe(ialu_loadI);
12616 %}
12618 instruct storeSSP(stackSlotP dst, mRegP src)
12619 %{
12620 match(Set dst src);
12622 ins_cost(100);
12623 format %{ "sd $dst, $src\t# ptr stk @ storeSSP" %}
12624 ins_encode %{
12625 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSP) !");
12626 __ sd($src$$Register, SP, $dst$$disp);
12627 %}
12628 ins_pipe(ialu_storeI);
12629 %}
12631 instruct loadSSF(regF dst, stackSlotF src)
12632 %{
12633 match(Set dst src);
12635 ins_cost(125);
12636 format %{ "lwc1 $dst, $src\t# float stk @ loadSSF" %}
12637 ins_encode %{
12638 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSF) !");
12639 __ lwc1($dst$$FloatRegister, SP, $src$$disp);
12640 %}
12641 ins_pipe(ialu_loadI);
12642 %}
12644 instruct storeSSF(stackSlotF dst, regF src)
12645 %{
12646 match(Set dst src);
12648 ins_cost(100);
12649 format %{ "swc1 $dst, $src\t# float stk @ storeSSF" %}
12650 ins_encode %{
12651 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSF) !");
12652 __ swc1($src$$FloatRegister, SP, $dst$$disp);
12653 %}
12654 ins_pipe(fpu_storeF);
12655 %}
12657 // Use the same format since predicate() can not be used here.
12658 instruct loadSSD(regD dst, stackSlotD src)
12659 %{
12660 match(Set dst src);
12662 ins_cost(125);
12663 format %{ "ldc1 $dst, $src\t# double stk @ loadSSD" %}
12664 ins_encode %{
12665 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSD) !");
12666 __ ldc1($dst$$FloatRegister, SP, $src$$disp);
12667 %}
12668 ins_pipe(ialu_loadI);
12669 %}
12671 instruct storeSSD(stackSlotD dst, regD src)
12672 %{
12673 match(Set dst src);
12675 ins_cost(100);
12676 format %{ "sdc1 $dst, $src\t# double stk @ storeSSD" %}
12677 ins_encode %{
12678 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSD) !");
12679 __ sdc1($src$$FloatRegister, SP, $dst$$disp);
12680 %}
12681 ins_pipe(fpu_storeF);
12682 %}
12684 instruct cmpFastLock( FlagsReg cr, mRegP object, s0_RegP box, mRegI tmp, mRegP scr) %{
12685 match( Set cr (FastLock object box) );
12686 effect( TEMP tmp, TEMP scr, USE_KILL box );
12687 ins_cost(300);
12688 format %{ "FASTLOCK $cr $object, $box, $tmp #@ cmpFastLock" %}
12689 ins_encode %{
12690 __ fast_lock($object$$Register, $box$$Register, $tmp$$Register, $scr$$Register);
12691 %}
12693 ins_pipe( pipe_slow );
12694 ins_pc_relative(1);
12695 %}
12697 instruct cmpFastUnlock( FlagsReg cr, mRegP object, s0_RegP box, mRegP tmp ) %{
12698 match( Set cr (FastUnlock object box) );
12699 effect( TEMP tmp, USE_KILL box );
12700 ins_cost(300);
12701 format %{ "FASTUNLOCK $object, $box, $tmp #@cmpFastUnlock" %}
12702 ins_encode %{
12703 __ fast_unlock($object$$Register, $box$$Register, $tmp$$Register);
12704 %}
12706 ins_pipe( pipe_slow );
12707 ins_pc_relative(1);
12708 %}
12710 // Store CMS card-mark Immediate
12711 instruct storeImmCM(memory mem, immI8 src) %{
12712 match(Set mem (StoreCM mem src));
12714 ins_cost(150);
12715 format %{ "MOV8 $mem,$src\t! CMS card-mark imm0" %}
12716 // opcode(0xC6);
12717 ins_encode(store_B_immI_enc_sync(mem, src));
12718 ins_pipe( ialu_storeI );
12719 %}
12721 // Die now
12722 instruct ShouldNotReachHere( )
12723 %{
12724 match(Halt);
12725 ins_cost(300);
12727 // Use the following format syntax
12728 format %{ "ILLTRAP ;#@ShouldNotReachHere" %}
12729 ins_encode %{
12730 // Here we should emit illtrap !
12732 __ stop("in ShoudNotReachHere");
12734 %}
12735 ins_pipe( pipe_jump );
12736 %}
12738 instruct leaP8Narrow(mRegP dst, indOffset8Narrow mem)
12739 %{
12740 predicate(Universe::narrow_oop_shift() == 0);
12741 match(Set dst mem);
12743 ins_cost(110);
12744 format %{ "leaq $dst, $mem\t# ptr off8narrow @ leaP8Narrow" %}
12745 ins_encode %{
12746 Register dst = $dst$$Register;
12747 Register base = as_Register($mem$$base);
12748 int disp = $mem$$disp;
12750 __ daddiu(dst, base, disp);
12751 %}
12752 ins_pipe( ialu_regI_imm16 );
12753 %}
12755 instruct leaPPosIdxScaleOff8(mRegP dst, basePosIndexScaleOffset8 mem)
12756 %{
12757 match(Set dst mem);
12759 ins_cost(110);
12760 format %{ "leaq $dst, $mem\t# @ PosIdxScaleOff8" %}
12761 ins_encode %{
12762 Register dst = $dst$$Register;
12763 Register base = as_Register($mem$$base);
12764 Register index = as_Register($mem$$index);
12765 int scale = $mem$$scale;
12766 int disp = $mem$$disp;
12768 if (scale == 0) {
12769 __ daddu(AT, base, index);
12770 __ daddiu(dst, AT, disp);
12771 } else {
12772 __ dsll(AT, index, scale);
12773 __ daddu(AT, base, AT);
12774 __ daddiu(dst, AT, disp);
12775 }
12776 %}
12778 ins_pipe( ialu_regI_imm16 );
12779 %}
12781 instruct leaPIdxScale(mRegP dst, indIndexScale mem)
12782 %{
12783 match(Set dst mem);
12785 ins_cost(110);
12786 format %{ "leaq $dst, $mem\t# @ leaPIdxScale" %}
12787 ins_encode %{
12788 Register dst = $dst$$Register;
12789 Register base = as_Register($mem$$base);
12790 Register index = as_Register($mem$$index);
12791 int scale = $mem$$scale;
12793 if (scale == 0) {
12794 __ daddu(dst, base, index);
12795 } else {
12796 __ dsll(AT, index, scale);
12797 __ daddu(dst, base, AT);
12798 }
12799 %}
12801 ins_pipe( ialu_regI_imm16 );
12802 %}
12804 // Jump Direct Conditional - Label defines a relative address from Jcc+1
12805 instruct jmpLoopEnd(cmpOp cop, mRegI src1, mRegI src2, label labl) %{
12806 match(CountedLoopEnd cop (CmpI src1 src2));
12807 effect(USE labl);
12809 ins_cost(300);
12810 format %{ "J$cop $src1, $src2, $labl\t# Loop end @ jmpLoopEnd" %}
12811 ins_encode %{
12812 Register op1 = $src1$$Register;
12813 Register op2 = $src2$$Register;
12814 Label &L = *($labl$$label);
12815 int flag = $cop$$cmpcode;
12817 switch(flag)
12818 {
12819 case 0x01: //equal
12820 if (&L)
12821 __ beq(op1, op2, L);
12822 else
12823 __ beq(op1, op2, (int)0);
12824 break;
12825 case 0x02: //not_equal
12826 if (&L)
12827 __ bne(op1, op2, L);
12828 else
12829 __ bne(op1, op2, (int)0);
12830 break;
12831 case 0x03: //above
12832 __ slt(AT, op2, op1);
12833 if(&L)
12834 __ bne(AT, R0, L);
12835 else
12836 __ bne(AT, R0, (int)0);
12837 break;
12838 case 0x04: //above_equal
12839 __ slt(AT, op1, op2);
12840 if(&L)
12841 __ beq(AT, R0, L);
12842 else
12843 __ beq(AT, R0, (int)0);
12844 break;
12845 case 0x05: //below
12846 __ slt(AT, op1, op2);
12847 if(&L)
12848 __ bne(AT, R0, L);
12849 else
12850 __ bne(AT, R0, (int)0);
12851 break;
12852 case 0x06: //below_equal
12853 __ slt(AT, op2, op1);
12854 if(&L)
12855 __ beq(AT, R0, L);
12856 else
12857 __ beq(AT, R0, (int)0);
12858 break;
12859 default:
12860 Unimplemented();
12861 }
12862 __ nop();
12863 %}
12864 ins_pipe( pipe_jump );
12865 ins_pc_relative(1);
12866 %}
12869 instruct jmpLoopEnd_reg_imm16_sub(cmpOp cop, mRegI src1, immI16_sub src2, label labl) %{
12870 match(CountedLoopEnd cop (CmpI src1 src2));
12871 effect(USE labl);
12873 ins_cost(250);
12874 format %{ "J$cop $src1, $src2, $labl\t# Loop end @ jmpLoopEnd_reg_imm16_sub" %}
12875 ins_encode %{
12876 Register op1 = $src1$$Register;
12877 int op2 = $src2$$constant;
12878 Label &L = *($labl$$label);
12879 int flag = $cop$$cmpcode;
12881 __ addiu32(AT, op1, -1 * op2);
12883 switch(flag)
12884 {
12885 case 0x01: //equal
12886 if (&L)
12887 __ beq(AT, R0, L);
12888 else
12889 __ beq(AT, R0, (int)0);
12890 break;
12891 case 0x02: //not_equal
12892 if (&L)
12893 __ bne(AT, R0, L);
12894 else
12895 __ bne(AT, R0, (int)0);
12896 break;
12897 case 0x03: //above
12898 if(&L)
12899 __ bgtz(AT, L);
12900 else
12901 __ bgtz(AT, (int)0);
12902 break;
12903 case 0x04: //above_equal
12904 if(&L)
12905 __ bgez(AT, L);
12906 else
12907 __ bgez(AT,(int)0);
12908 break;
12909 case 0x05: //below
12910 if(&L)
12911 __ bltz(AT, L);
12912 else
12913 __ bltz(AT, (int)0);
12914 break;
12915 case 0x06: //below_equal
12916 if(&L)
12917 __ blez(AT, L);
12918 else
12919 __ blez(AT, (int)0);
12920 break;
12921 default:
12922 Unimplemented();
12923 }
12924 __ nop();
12925 %}
12926 ins_pipe( pipe_jump );
12927 ins_pc_relative(1);
12928 %}
12931 /*
12932 // Jump Direct Conditional - Label defines a relative address from Jcc+1
12933 instruct jmpLoopEndU(cmpOpU cop, eFlagsRegU cmp, label labl) %{
12934 match(CountedLoopEnd cop cmp);
12935 effect(USE labl);
12937 ins_cost(300);
12938 format %{ "J$cop,u $labl\t# Loop end" %}
12939 size(6);
12940 opcode(0x0F, 0x80);
12941 ins_encode( Jcc( cop, labl) );
12942 ins_pipe( pipe_jump );
12943 ins_pc_relative(1);
12944 %}
12946 instruct jmpLoopEndUCF(cmpOpUCF cop, eFlagsRegUCF cmp, label labl) %{
12947 match(CountedLoopEnd cop cmp);
12948 effect(USE labl);
12950 ins_cost(200);
12951 format %{ "J$cop,u $labl\t# Loop end" %}
12952 opcode(0x0F, 0x80);
12953 ins_encode( Jcc( cop, labl) );
12954 ins_pipe( pipe_jump );
12955 ins_pc_relative(1);
12956 %}
12957 */
12959 // This match pattern is created for StoreIConditional since I cannot match IfNode without a RegFlags! fujie 2012/07/17
12960 instruct jmpCon_flags(cmpOp cop, FlagsReg cr, label labl) %{
12961 match(If cop cr);
12962 effect(USE labl);
12964 ins_cost(300);
12965 format %{ "J$cop $labl #mips uses AT as eflag @jmpCon_flags" %}
12967 ins_encode %{
12968 Label &L = *($labl$$label);
12969 switch($cop$$cmpcode)
12970 {
12971 case 0x01: //equal
12972 if (&L)
12973 __ bne(AT, R0, L);
12974 else
12975 __ bne(AT, R0, (int)0);
12976 break;
12977 case 0x02: //not equal
12978 if (&L)
12979 __ beq(AT, R0, L);
12980 else
12981 __ beq(AT, R0, (int)0);
12982 break;
12983 default:
12984 Unimplemented();
12985 }
12986 __ nop();
12987 %}
12989 ins_pipe( pipe_jump );
12990 ins_pc_relative(1);
12991 %}
12994 // ============================================================================
12995 // The 2nd slow-half of a subtype check. Scan the subklass's 2ndary superklass
12996 // array for an instance of the superklass. Set a hidden internal cache on a
12997 // hit (cache is checked with exposed code in gen_subtype_check()). Return
12998 // NZ for a miss or zero for a hit. The encoding ALSO sets flags.
12999 instruct partialSubtypeCheck( mRegP result, no_T8_mRegP sub, no_T8_mRegP super, mT8RegI tmp ) %{
13000 match(Set result (PartialSubtypeCheck sub super));
13001 effect(KILL tmp);
13002 ins_cost(1100); // slightly larger than the next version
13003 format %{ "partialSubtypeCheck result=$result, sub=$sub, super=$super, tmp=$tmp " %}
13005 ins_encode( enc_PartialSubtypeCheck(result, sub, super, tmp) );
13006 ins_pipe( pipe_slow );
13007 %}
13010 // Conditional-store of an int value.
13011 // ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG on Intel.
13012 instruct storeIConditional( memory mem, mRegI oldval, mRegI newval, FlagsReg cr ) %{
13013 match(Set cr (StoreIConditional mem (Binary oldval newval)));
13014 // effect(KILL oldval);
13015 format %{ "CMPXCHG $newval, $mem, $oldval \t# @storeIConditional" %}
13017 ins_encode %{
13018 Register oldval = $oldval$$Register;
13019 Register newval = $newval$$Register;
13020 Address addr(as_Register($mem$$base), $mem$$disp);
13021 Label again, failure;
13023 // int base = $mem$$base;
13024 int index = $mem$$index;
13025 int scale = $mem$$scale;
13026 int disp = $mem$$disp;
13028 guarantee(Assembler::is_simm16(disp), "");
13030 if( index != 0 ) {
13031 __ stop("in storeIConditional: index != 0");
13032 } else {
13033 __ bind(again);
13034 if(UseSyncLevel <= 1000) __ sync();
13035 __ ll(AT, addr);
13036 __ bne(AT, oldval, failure);
13037 __ delayed()->addu(AT, R0, R0);
13039 __ addu(AT, newval, R0);
13040 __ sc(AT, addr);
13041 __ beq(AT, R0, again);
13042 __ delayed()->addiu(AT, R0, 0xFF);
13043 __ bind(failure);
13044 __ sync();
13045 }
13046 %}
13048 ins_pipe( long_memory_op );
13049 %}
13051 // Conditional-store of a long value.
13052 // ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG.
13053 instruct storeLConditional(memory mem, t2RegL oldval, mRegL newval, FlagsReg cr )
13054 %{
13055 match(Set cr (StoreLConditional mem (Binary oldval newval)));
13056 effect(KILL oldval);
13058 format %{ "cmpxchg $mem, $newval\t# If $oldval == $mem then store $newval into $mem" %}
13059 ins_encode%{
13060 Register oldval = $oldval$$Register;
13061 Register newval = $newval$$Register;
13062 Address addr((Register)$mem$$base, $mem$$disp);
13064 int index = $mem$$index;
13065 int scale = $mem$$scale;
13066 int disp = $mem$$disp;
13068 guarantee(Assembler::is_simm16(disp), "");
13070 if( index != 0 ) {
13071 __ stop("in storeIConditional: index != 0");
13072 } else {
13073 __ cmpxchg(newval, addr, oldval);
13074 }
13075 %}
13076 ins_pipe( long_memory_op );
13077 %}
13080 instruct compareAndSwapI( mRegI res, mRegP mem_ptr, mS2RegI oldval, mRegI newval) %{
13081 match(Set res (CompareAndSwapI mem_ptr (Binary oldval newval)));
13082 effect(KILL oldval);
13083 // match(CompareAndSwapI mem_ptr (Binary oldval newval));
13084 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapI\n\t"
13085 "MOV $res, 1 @ compareAndSwapI\n\t"
13086 "BNE AT, R0 @ compareAndSwapI\n\t"
13087 "MOV $res, 0 @ compareAndSwapI\n"
13088 "L:" %}
13089 ins_encode %{
13090 Register newval = $newval$$Register;
13091 Register oldval = $oldval$$Register;
13092 Register res = $res$$Register;
13093 Address addr($mem_ptr$$Register, 0);
13094 Label L;
13096 __ cmpxchg32(newval, addr, oldval);
13097 __ move(res, AT);
13098 %}
13099 ins_pipe( long_memory_op );
13100 %}
13102 //FIXME:
13103 instruct compareAndSwapP( mRegI res, mRegP mem_ptr, s2_RegP oldval, mRegP newval) %{
13104 match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval)));
13105 effect(KILL oldval);
13106 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapP\n\t"
13107 "MOV $res, AT @ compareAndSwapP\n\t"
13108 "L:" %}
13109 ins_encode %{
13110 Register newval = $newval$$Register;
13111 Register oldval = $oldval$$Register;
13112 Register res = $res$$Register;
13113 Address addr($mem_ptr$$Register, 0);
13114 Label L;
13116 __ cmpxchg(newval, addr, oldval);
13117 __ move(res, AT);
13118 %}
13119 ins_pipe( long_memory_op );
13120 %}
13122 instruct compareAndSwapN( mRegI res, mRegP mem_ptr, t2_RegN oldval, mRegN newval) %{
13123 match(Set res (CompareAndSwapN mem_ptr (Binary oldval newval)));
13124 effect(KILL oldval);
13125 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapN\n\t"
13126 "MOV $res, AT @ compareAndSwapN\n\t"
13127 "L:" %}
13128 ins_encode %{
13129 Register newval = $newval$$Register;
13130 Register oldval = $oldval$$Register;
13131 Register res = $res$$Register;
13132 Address addr($mem_ptr$$Register, 0);
13133 Label L;
13135 /* 2013/7/19 Jin: cmpxchg32 is implemented with ll/sc, which will do sign extension.
13136 * Thus, we should extend oldval's sign for correct comparision.
13137 */
13138 __ sll(oldval, oldval, 0);
13140 __ cmpxchg32(newval, addr, oldval);
13141 __ move(res, AT);
13142 %}
13143 ins_pipe( long_memory_op );
13144 %}
13146 //----------Max and Min--------------------------------------------------------
13147 // Min Instructions
13148 ////
13149 // *** Min and Max using the conditional move are slower than the
13150 // *** branch version on a Pentium III.
13151 // // Conditional move for min
13152 //instruct cmovI_reg_lt( eRegI op2, eRegI op1, eFlagsReg cr ) %{
13153 // effect( USE_DEF op2, USE op1, USE cr );
13154 // format %{ "CMOVlt $op2,$op1\t! min" %}
13155 // opcode(0x4C,0x0F);
13156 // ins_encode( OpcS, OpcP, RegReg( op2, op1 ) );
13157 // ins_pipe( pipe_cmov_reg );
13158 //%}
13159 //
13160 //// Min Register with Register (P6 version)
13161 //instruct minI_eReg_p6( eRegI op1, eRegI op2 ) %{
13162 // predicate(VM_Version::supports_cmov() );
13163 // match(Set op2 (MinI op1 op2));
13164 // ins_cost(200);
13165 // expand %{
13166 // eFlagsReg cr;
13167 // compI_eReg(cr,op1,op2);
13168 // cmovI_reg_lt(op2,op1,cr);
13169 // %}
13170 //%}
13172 // Min Register with Register (generic version)
13173 instruct minI_Reg_Reg(mRegI dst, mRegI src) %{
13174 match(Set dst (MinI dst src));
13175 //effect(KILL flags);
13176 ins_cost(80);
13178 format %{ "MIN $dst, $src @minI_Reg_Reg" %}
13179 ins_encode %{
13180 Register dst = $dst$$Register;
13181 Register src = $src$$Register;
13183 __ slt(AT, src, dst);
13184 __ movn(dst, src, AT);
13186 %}
13188 ins_pipe( pipe_slow );
13189 %}
13191 // Max Register with Register
13192 // *** Min and Max using the conditional move are slower than the
13193 // *** branch version on a Pentium III.
13194 // // Conditional move for max
13195 //instruct cmovI_reg_gt( eRegI op2, eRegI op1, eFlagsReg cr ) %{
13196 // effect( USE_DEF op2, USE op1, USE cr );
13197 // format %{ "CMOVgt $op2,$op1\t! max" %}
13198 // opcode(0x4F,0x0F);
13199 // ins_encode( OpcS, OpcP, RegReg( op2, op1 ) );
13200 // ins_pipe( pipe_cmov_reg );
13201 //%}
13202 //
13203 // // Max Register with Register (P6 version)
13204 //instruct maxI_eReg_p6( eRegI op1, eRegI op2 ) %{
13205 // predicate(VM_Version::supports_cmov() );
13206 // match(Set op2 (MaxI op1 op2));
13207 // ins_cost(200);
13208 // expand %{
13209 // eFlagsReg cr;
13210 // compI_eReg(cr,op1,op2);
13211 // cmovI_reg_gt(op2,op1,cr);
13212 // %}
13213 //%}
13215 // Max Register with Register (generic version)
13216 instruct maxI_Reg_Reg(mRegI dst, mRegI src) %{
13217 match(Set dst (MaxI dst src));
13218 ins_cost(80);
13220 format %{ "MAX $dst, $src @maxI_Reg_Reg" %}
13222 ins_encode %{
13223 Register dst = $dst$$Register;
13224 Register src = $src$$Register;
13226 __ slt(AT, dst, src);
13227 __ movn(dst, src, AT);
13229 %}
13231 ins_pipe( pipe_slow );
13232 %}
13234 instruct maxI_Reg_zero(mRegI dst, immI0 zero) %{
13235 match(Set dst (MaxI dst zero));
13236 ins_cost(50);
13238 format %{ "MAX $dst, 0 @maxI_Reg_zero" %}
13240 ins_encode %{
13241 Register dst = $dst$$Register;
13243 __ slt(AT, dst, R0);
13244 __ movn(dst, R0, AT);
13246 %}
13248 ins_pipe( pipe_slow );
13249 %}
13251 instruct zerox_long_reg_reg(mRegL dst, mRegL src, immL_32bits mask)
13252 %{
13253 match(Set dst (AndL src mask));
13255 format %{ "movl $dst, $src\t# zero-extend long @ zerox_long_reg_reg" %}
13256 ins_encode %{
13257 Register dst = $dst$$Register;
13258 Register src = $src$$Register;
13260 __ dext(dst, src, 0, 32);
13261 %}
13262 ins_pipe(ialu_regI_regI);
13263 %}
13265 instruct combine_i2l(mRegL dst, mRegI src1, immL_32bits mask, mRegI src2, immI_32 shift32)
13266 %{
13267 match(Set dst (OrL (AndL (ConvI2L src1) mask) (LShiftL (ConvI2L src2) shift32)));
13269 format %{ "combine_i2l $dst, $src2(H), $src1(L) @ combine_i2l" %}
13270 ins_encode %{
13271 Register dst = $dst$$Register;
13272 Register src1 = $src1$$Register;
13273 Register src2 = $src2$$Register;
13275 if (src1 == dst) {
13276 __ dinsu(dst, src2, 32, 32);
13277 } else if (src2 == dst) {
13278 __ dsll32(dst, dst, 0);
13279 __ dins(dst, src1, 0, 32);
13280 } else {
13281 __ dext(dst, src1, 0, 32);
13282 __ dinsu(dst, src2, 32, 32);
13283 }
13284 %}
13285 ins_pipe(ialu_regI_regI);
13286 %}
13288 // Zero-extend convert int to long
13289 instruct convI2L_reg_reg_zex(mRegL dst, mRegI src, immL_32bits mask)
13290 %{
13291 match(Set dst (AndL (ConvI2L src) mask));
13293 format %{ "movl $dst, $src\t# i2l zero-extend @ convI2L_reg_reg_zex" %}
13294 ins_encode %{
13295 Register dst = $dst$$Register;
13296 Register src = $src$$Register;
13298 __ dext(dst, src, 0, 32);
13299 %}
13300 ins_pipe(ialu_regI_regI);
13301 %}
13303 instruct convL2I2L_reg_reg_zex(mRegL dst, mRegL src, immL_32bits mask)
13304 %{
13305 match(Set dst (AndL (ConvI2L (ConvL2I src)) mask));
13307 format %{ "movl $dst, $src\t# i2l zero-extend @ convL2I2L_reg_reg_zex" %}
13308 ins_encode %{
13309 Register dst = $dst$$Register;
13310 Register src = $src$$Register;
13312 __ dext(dst, src, 0, 32);
13313 %}
13314 ins_pipe(ialu_regI_regI);
13315 %}
13317 // Match loading integer and casting it to unsigned int in long register.
13318 // LoadI + ConvI2L + AndL 0xffffffff.
13319 instruct loadUI2L_rmask(mRegL dst, memory mem, immL_32bits mask) %{
13320 match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
13322 format %{ "lwu $dst, $mem \t// zero-extend to long @ loadUI2L_rmask" %}
13323 ins_encode (load_N_enc(dst, mem));
13324 ins_pipe(ialu_loadI);
13325 %}
13327 instruct loadUI2L_lmask(mRegL dst, memory mem, immL_32bits mask) %{
13328 match(Set dst (AndL mask (ConvI2L (LoadI mem))));
13330 format %{ "lwu $dst, $mem \t// zero-extend to long @ loadUI2L_lmask" %}
13331 ins_encode (load_N_enc(dst, mem));
13332 ins_pipe(ialu_loadI);
13333 %}
13336 // ============================================================================
13337 // Safepoint Instruction
13338 instruct safePoint_poll_reg(mRegP poll) %{
13339 match(SafePoint poll);
13340 predicate(false);
13341 effect(USE poll);
13343 ins_cost(125);
13344 format %{ "Safepoint @ [$poll] : poll for GC @ safePoint_poll_reg" %}
13346 ins_encode %{
13347 Register poll_reg = $poll$$Register;
13349 __ block_comment("Safepoint:");
13350 __ relocate(relocInfo::poll_type);
13351 __ lw(AT, poll_reg, 0);
13352 %}
13354 ins_pipe( ialu_storeI );
13355 %}
13357 instruct safePoint_poll() %{
13358 match(SafePoint);
13360 ins_cost(105);
13361 format %{ "poll for GC @ safePoint_poll" %}
13363 ins_encode %{
13364 __ block_comment("Safepoint:");
13365 __ set64(T9, (long)os::get_polling_page());
13366 __ relocate(relocInfo::poll_type);
13367 __ lw(AT, T9, 0);
13368 %}
13370 ins_pipe( ialu_storeI );
13371 %}
13373 //----------Arithmetic Conversion Instructions---------------------------------
13375 instruct roundFloat_nop(regF dst)
13376 %{
13377 match(Set dst (RoundFloat dst));
13379 ins_cost(0);
13380 ins_encode();
13381 ins_pipe(empty);
13382 %}
13384 instruct roundDouble_nop(regD dst)
13385 %{
13386 match(Set dst (RoundDouble dst));
13388 ins_cost(0);
13389 ins_encode();
13390 ins_pipe(empty);
13391 %}
13393 //---------- Zeros Count Instructions ------------------------------------------
13394 // CountLeadingZerosINode CountTrailingZerosINode
13395 instruct countLeadingZerosI(mRegI dst, mRegI src) %{
13396 predicate(UseCountLeadingZerosInstruction);
13397 match(Set dst (CountLeadingZerosI src));
13399 format %{ "clz $dst, $src\t# count leading zeros (int)" %}
13400 ins_encode %{
13401 __ clz($dst$$Register, $src$$Register);
13402 %}
13403 ins_pipe( ialu_regL_regL );
13404 %}
13406 instruct countLeadingZerosL(mRegI dst, mRegL src) %{
13407 predicate(UseCountLeadingZerosInstruction);
13408 match(Set dst (CountLeadingZerosL src));
13410 format %{ "dclz $dst, $src\t# count leading zeros (long)" %}
13411 ins_encode %{
13412 __ dclz($dst$$Register, $src$$Register);
13413 %}
13414 ins_pipe( ialu_regL_regL );
13415 %}
13417 instruct countTrailingZerosI(mRegI dst, mRegI src) %{
13418 predicate(UseCountTrailingZerosInstruction);
13419 match(Set dst (CountTrailingZerosI src));
13421 format %{ "ctz $dst, $src\t# count trailing zeros (int)" %}
13422 ins_encode %{
13423 // ctz and dctz is gs instructions.
13424 __ ctz($dst$$Register, $src$$Register);
13425 %}
13426 ins_pipe( ialu_regL_regL );
13427 %}
13429 instruct countTrailingZerosL(mRegI dst, mRegL src) %{
13430 predicate(UseCountTrailingZerosInstruction);
13431 match(Set dst (CountTrailingZerosL src));
13433 format %{ "dcto $dst, $src\t# count trailing zeros (long)" %}
13434 ins_encode %{
13435 __ dctz($dst$$Register, $src$$Register);
13436 %}
13437 ins_pipe( ialu_regL_regL );
13438 %}
13440 // ====================VECTOR INSTRUCTIONS=====================================
13442 // Load vectors (8 bytes long)
13443 instruct loadV8(vecD dst, memory mem) %{
13444 predicate(n->as_LoadVector()->memory_size() == 8);
13445 match(Set dst (LoadVector mem));
13446 ins_cost(125);
13447 format %{ "load $dst, $mem\t! load vector (8 bytes)" %}
13448 ins_encode(load_D_enc(dst, mem));
13449 ins_pipe( fpu_loadF );
13450 %}
13452 // Store vectors (8 bytes long)
13453 instruct storeV8(memory mem, vecD src) %{
13454 predicate(n->as_StoreVector()->memory_size() == 8);
13455 match(Set mem (StoreVector mem src));
13456 ins_cost(145);
13457 format %{ "store $mem, $src\t! store vector (8 bytes)" %}
13458 ins_encode(store_D_reg_enc(mem, src));
13459 ins_pipe( fpu_storeF );
13460 %}
13462 instruct Repl8B_DSP(vecD dst, mRegI src) %{
13463 predicate(n->as_Vector()->length() == 8 && Use3A2000);
13464 match(Set dst (ReplicateB src));
13465 ins_cost(100);
13466 format %{ "replv_ob AT, $src\n\t"
13467 "dmtc1 AT, $dst\t! replicate8B" %}
13468 ins_encode %{
13469 __ replv_ob(AT, $src$$Register);
13470 __ dmtc1(AT, $dst$$FloatRegister);
13471 %}
13472 ins_pipe( pipe_mtc1 );
13473 %}
13475 instruct Repl8B(vecD dst, mRegI src) %{
13476 predicate(n->as_Vector()->length() == 8);
13477 match(Set dst (ReplicateB src));
13478 ins_cost(140);
13479 format %{ "move AT, $src\n\t"
13480 "dins AT, AT, 8, 8\n\t"
13481 "dins AT, AT, 16, 16\n\t"
13482 "dinsu AT, AT, 32, 32\n\t"
13483 "dmtc1 AT, $dst\t! replicate8B" %}
13484 ins_encode %{
13485 __ move(AT, $src$$Register);
13486 __ dins(AT, AT, 8, 8);
13487 __ dins(AT, AT, 16, 16);
13488 __ dinsu(AT, AT, 32, 32);
13489 __ dmtc1(AT, $dst$$FloatRegister);
13490 %}
13491 ins_pipe( pipe_mtc1 );
13492 %}
13494 instruct Repl8B_imm_DSP(vecD dst, immI con) %{
13495 predicate(n->as_Vector()->length() == 8 && Use3A2000);
13496 match(Set dst (ReplicateB con));
13497 ins_cost(110);
13498 format %{ "repl_ob AT, [$con]\n\t"
13499 "dmtc1 AT, $dst,0x00\t! replicate8B($con)" %}
13500 ins_encode %{
13501 int val = $con$$constant;
13502 __ repl_ob(AT, val);
13503 __ dmtc1(AT, $dst$$FloatRegister);
13504 %}
13505 ins_pipe( pipe_mtc1 );
13506 %}
13508 instruct Repl8B_imm(vecD dst, immI con) %{
13509 predicate(n->as_Vector()->length() == 8);
13510 match(Set dst (ReplicateB con));
13511 ins_cost(150);
13512 format %{ "move AT, [$con]\n\t"
13513 "dins AT, AT, 8, 8\n\t"
13514 "dins AT, AT, 16, 16\n\t"
13515 "dinsu AT, AT, 32, 32\n\t"
13516 "dmtc1 AT, $dst,0x00\t! replicate8B($con)" %}
13517 ins_encode %{
13518 __ move(AT, $con$$constant);
13519 __ dins(AT, AT, 8, 8);
13520 __ dins(AT, AT, 16, 16);
13521 __ dinsu(AT, AT, 32, 32);
13522 __ dmtc1(AT, $dst$$FloatRegister);
13523 %}
13524 ins_pipe( pipe_mtc1 );
13525 %}
13527 instruct Repl8B_zero(vecD dst, immI0 zero) %{
13528 predicate(n->as_Vector()->length() == 8);
13529 match(Set dst (ReplicateB zero));
13530 ins_cost(90);
13531 format %{ "dmtc1 R0, $dst\t! replicate8B zero" %}
13532 ins_encode %{
13533 __ dmtc1(R0, $dst$$FloatRegister);
13534 %}
13535 ins_pipe( pipe_mtc1 );
13536 %}
13538 instruct Repl8B_M1(vecD dst, immI_M1 M1) %{
13539 predicate(n->as_Vector()->length() == 8);
13540 match(Set dst (ReplicateB M1));
13541 ins_cost(80);
13542 format %{ "dmtc1 -1, $dst\t! replicate8B -1" %}
13543 ins_encode %{
13544 __ nor(AT, R0, R0);
13545 __ dmtc1(AT, $dst$$FloatRegister);
13546 %}
13547 ins_pipe( pipe_mtc1 );
13548 %}
13550 instruct Repl4S_DSP(vecD dst, mRegI src) %{
13551 predicate(n->as_Vector()->length() == 4 && Use3A2000);
13552 match(Set dst (ReplicateS src));
13553 ins_cost(100);
13554 format %{ "replv_qh AT, $src\n\t"
13555 "dmtc1 AT, $dst\t! replicate4S" %}
13556 ins_encode %{
13557 __ replv_qh(AT, $src$$Register);
13558 __ dmtc1(AT, $dst$$FloatRegister);
13559 %}
13560 ins_pipe( pipe_mtc1 );
13561 %}
13563 instruct Repl4S(vecD dst, mRegI src) %{
13564 predicate(n->as_Vector()->length() == 4);
13565 match(Set dst (ReplicateS src));
13566 ins_cost(120);
13567 format %{ "move AT, $src \n\t"
13568 "dins AT, AT, 16, 16\n\t"
13569 "dinsu AT, AT, 32, 32\n\t"
13570 "dmtc1 AT, $dst\t! replicate4S" %}
13571 ins_encode %{
13572 __ move(AT, $src$$Register);
13573 __ dins(AT, AT, 16, 16);
13574 __ dinsu(AT, AT, 32, 32);
13575 __ dmtc1(AT, $dst$$FloatRegister);
13576 %}
13577 ins_pipe( pipe_mtc1 );
13578 %}
13580 instruct Repl4S_imm_DSP(vecD dst, immI con) %{
13581 predicate(n->as_Vector()->length() == 4 && Use3A2000);
13582 match(Set dst (ReplicateS con));
13583 ins_cost(100);
13584 format %{ "replv_qh AT, [$con]\n\t"
13585 "dmtc1 AT, $dst\t! replicate4S($con)" %}
13586 ins_encode %{
13587 int val = $con$$constant;
13588 if ( Assembler::is_simm(val, 10)) {
13589 //repl_qh supports 10 bits immediate
13590 __ repl_qh(AT, val);
13591 } else {
13592 __ li32(AT, val);
13593 __ replv_qh(AT, AT);
13594 }
13595 __ dmtc1(AT, $dst$$FloatRegister);
13596 %}
13597 ins_pipe( pipe_mtc1 );
13598 %}
13600 instruct Repl4S_imm(vecD dst, immI con) %{
13601 predicate(n->as_Vector()->length() == 4);
13602 match(Set dst (ReplicateS con));
13603 ins_cost(110);
13604 format %{ "move AT, [$con]\n\t"
13605 "dins AT, AT, 16, 16\n\t"
13606 "dinsu AT, AT, 32, 32\n\t"
13607 "dmtc1 AT, $dst\t! replicate4S($con)" %}
13608 ins_encode %{
13609 __ move(AT, $con$$constant);
13610 __ dins(AT, AT, 16, 16);
13611 __ dinsu(AT, AT, 32, 32);
13612 __ dmtc1(AT, $dst$$FloatRegister);
13613 %}
13614 ins_pipe( pipe_mtc1 );
13615 %}
13617 instruct Repl4S_zero(vecD dst, immI0 zero) %{
13618 predicate(n->as_Vector()->length() == 4);
13619 match(Set dst (ReplicateS zero));
13620 format %{ "dmtc1 R0, $dst\t! replicate4S zero" %}
13621 ins_encode %{
13622 __ dmtc1(R0, $dst$$FloatRegister);
13623 %}
13624 ins_pipe( pipe_mtc1 );
13625 %}
13627 instruct Repl4S_M1(vecD dst, immI_M1 M1) %{
13628 predicate(n->as_Vector()->length() == 4);
13629 match(Set dst (ReplicateS M1));
13630 format %{ "dmtc1 -1, $dst\t! replicate4S -1" %}
13631 ins_encode %{
13632 __ nor(AT, R0, R0);
13633 __ dmtc1(AT, $dst$$FloatRegister);
13634 %}
13635 ins_pipe( pipe_mtc1 );
13636 %}
13638 // Replicate integer (4 byte) scalar to be vector
13639 instruct Repl2I(vecD dst, mRegI src) %{
13640 predicate(n->as_Vector()->length() == 2);
13641 match(Set dst (ReplicateI src));
13642 format %{ "dins AT, $src, 0, 32\n\t"
13643 "dinsu AT, $src, 32, 32\n\t"
13644 "dmtc1 AT, $dst\t! replicate2I" %}
13645 ins_encode %{
13646 __ dins(AT, $src$$Register, 0, 32);
13647 __ dinsu(AT, $src$$Register, 32, 32);
13648 __ dmtc1(AT, $dst$$FloatRegister);
13649 %}
13650 ins_pipe( pipe_mtc1 );
13651 %}
13653 // Replicate integer (4 byte) scalar immediate to be vector by loading from const table.
13654 instruct Repl2I_imm(vecD dst, immI con, mA7RegI tmp) %{
13655 predicate(n->as_Vector()->length() == 2);
13656 match(Set dst (ReplicateI con));
13657 effect(KILL tmp);
13658 format %{ "li32 AT, [$con], 32\n\t"
13659 "dinsu AT, AT\n\t"
13660 "dmtc1 AT, $dst\t! replicate2I($con)" %}
13661 ins_encode %{
13662 int val = $con$$constant;
13663 __ li32(AT, val);
13664 __ dinsu(AT, AT, 32, 32);
13665 __ dmtc1(AT, $dst$$FloatRegister);
13666 %}
13667 ins_pipe( pipe_mtc1 );
13668 %}
13670 // Replicate integer (4 byte) scalar zero to be vector
13671 instruct Repl2I_zero(vecD dst, immI0 zero) %{
13672 predicate(n->as_Vector()->length() == 2);
13673 match(Set dst (ReplicateI zero));
13674 format %{ "dmtc1 R0, $dst\t! replicate2I zero" %}
13675 ins_encode %{
13676 __ dmtc1(R0, $dst$$FloatRegister);
13677 %}
13678 ins_pipe( pipe_mtc1 );
13679 %}
13681 // Replicate integer (4 byte) scalar -1 to be vector
13682 instruct Repl2I_M1(vecD dst, immI_M1 M1) %{
13683 predicate(n->as_Vector()->length() == 2);
13684 match(Set dst (ReplicateI M1));
13685 format %{ "dmtc1 -1, $dst\t! replicate2I -1, use AT" %}
13686 ins_encode %{
13687 __ nor(AT, R0, R0);
13688 __ dmtc1(AT, $dst$$FloatRegister);
13689 %}
13690 ins_pipe( pipe_mtc1 );
13691 %}
13693 // Replicate float (4 byte) scalar to be vector
13694 instruct Repl2F(vecD dst, regF src) %{
13695 predicate(n->as_Vector()->length() == 2);
13696 match(Set dst (ReplicateF src));
13697 format %{ "cvt.ps $dst, $src, $src\t! replicate2F" %}
13698 ins_encode %{
13699 __ cvt_ps_s($dst$$FloatRegister, $src$$FloatRegister, $src$$FloatRegister);
13700 %}
13701 ins_pipe( pipe_slow );
13702 %}
13704 // Replicate float (4 byte) scalar zero to be vector
13705 instruct Repl2F_zero(vecD dst, immF0 zero) %{
13706 predicate(n->as_Vector()->length() == 2);
13707 match(Set dst (ReplicateF zero));
13708 format %{ "dmtc1 R0, $dst\t! replicate2F zero" %}
13709 ins_encode %{
13710 __ dmtc1(R0, $dst$$FloatRegister);
13711 %}
13712 ins_pipe( pipe_mtc1 );
13713 %}
13716 // ====================VECTOR ARITHMETIC=======================================
13718 // --------------------------------- ADD --------------------------------------
13720 // Floats vector add
13721 instruct vadd2F(vecD dst, vecD src) %{
13722 predicate(n->as_Vector()->length() == 2);
13723 match(Set dst (AddVF dst src));
13724 format %{ "add.ps $dst,$src\t! add packed2F" %}
13725 ins_encode %{
13726 __ add_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
13727 %}
13728 ins_pipe( pipe_slow );
13729 %}
13731 instruct vadd2F3(vecD dst, vecD src1, vecD src2) %{
13732 predicate(n->as_Vector()->length() == 2);
13733 match(Set dst (AddVF src1 src2));
13734 format %{ "add.ps $dst,$src1,$src2\t! add packed2F" %}
13735 ins_encode %{
13736 __ add_ps($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
13737 %}
13738 ins_pipe( fpu_regF_regF );
13739 %}
13741 // --------------------------------- SUB --------------------------------------
13743 // Floats vector sub
13744 instruct vsub2F(vecD dst, vecD src) %{
13745 predicate(n->as_Vector()->length() == 2);
13746 match(Set dst (SubVF dst src));
13747 format %{ "sub.ps $dst,$src\t! sub packed2F" %}
13748 ins_encode %{
13749 __ sub_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
13750 %}
13751 ins_pipe( fpu_regF_regF );
13752 %}
13754 // --------------------------------- MUL --------------------------------------
13756 // Floats vector mul
13757 instruct vmul2F(vecD dst, vecD src) %{
13758 predicate(n->as_Vector()->length() == 2);
13759 match(Set dst (MulVF dst src));
13760 format %{ "mul.ps $dst, $src\t! mul packed2F" %}
13761 ins_encode %{
13762 __ mul_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
13763 %}
13764 ins_pipe( fpu_regF_regF );
13765 %}
13767 instruct vmul2F3(vecD dst, vecD src1, vecD src2) %{
13768 predicate(n->as_Vector()->length() == 2);
13769 match(Set dst (MulVF src1 src2));
13770 format %{ "mul.ps $dst, $src1, $src2\t! mul packed2F" %}
13771 ins_encode %{
13772 __ mul_ps($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
13773 %}
13774 ins_pipe( fpu_regF_regF );
13775 %}
13777 // --------------------------------- DIV --------------------------------------
13778 // MIPS do not have div.ps
13781 //----------PEEPHOLE RULES-----------------------------------------------------
13782 // These must follow all instruction definitions as they use the names
13783 // defined in the instructions definitions.
13784 //
13785 // peepmatch ( root_instr_name [preceeding_instruction]* );
13786 //
13787 // peepconstraint %{
13788 // (instruction_number.operand_name relational_op instruction_number.operand_name
13789 // [, ...] );
13790 // // instruction numbers are zero-based using left to right order in peepmatch
13791 //
13792 // peepreplace ( instr_name ( [instruction_number.operand_name]* ) );
13793 // // provide an instruction_number.operand_name for each operand that appears
13794 // // in the replacement instruction's match rule
13795 //
13796 // ---------VM FLAGS---------------------------------------------------------
13797 //
13798 // All peephole optimizations can be turned off using -XX:-OptoPeephole
13799 //
13800 // Each peephole rule is given an identifying number starting with zero and
13801 // increasing by one in the order seen by the parser. An individual peephole
13802 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
13803 // on the command-line.
13804 //
13805 // ---------CURRENT LIMITATIONS----------------------------------------------
13806 //
13807 // Only match adjacent instructions in same basic block
13808 // Only equality constraints
13809 // Only constraints between operands, not (0.dest_reg == EAX_enc)
13810 // Only one replacement instruction
13811 //
13812 // ---------EXAMPLE----------------------------------------------------------
13813 //
13814 // // pertinent parts of existing instructions in architecture description
13815 // instruct movI(eRegI dst, eRegI src) %{
13816 // match(Set dst (CopyI src));
13817 // %}
13818 //
13819 // instruct incI_eReg(eRegI dst, immI1 src, eFlagsReg cr) %{
13820 // match(Set dst (AddI dst src));
13821 // effect(KILL cr);
13822 // %}
13823 //
13824 // // Change (inc mov) to lea
13825 // peephole %{
13826 // // increment preceeded by register-register move
13827 // peepmatch ( incI_eReg movI );
13828 // // require that the destination register of the increment
13829 // // match the destination register of the move
13830 // peepconstraint ( 0.dst == 1.dst );
13831 // // construct a replacement instruction that sets
13832 // // the destination to ( move's source register + one )
13833 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13834 // %}
13835 //
13836 // Implementation no longer uses movX instructions since
13837 // machine-independent system no longer uses CopyX nodes.
13838 //
13839 // peephole %{
13840 // peepmatch ( incI_eReg movI );
13841 // peepconstraint ( 0.dst == 1.dst );
13842 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13843 // %}
13844 //
13845 // peephole %{
13846 // peepmatch ( decI_eReg movI );
13847 // peepconstraint ( 0.dst == 1.dst );
13848 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13849 // %}
13850 //
13851 // peephole %{
13852 // peepmatch ( addI_eReg_imm movI );
13853 // peepconstraint ( 0.dst == 1.dst );
13854 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
13855 // %}
13856 //
13857 // peephole %{
13858 // peepmatch ( addP_eReg_imm movP );
13859 // peepconstraint ( 0.dst == 1.dst );
13860 // peepreplace ( leaP_eReg_immI( 0.dst 1.src 0.src ) );
13861 // %}
13863 // // Change load of spilled value to only a spill
13864 // instruct storeI(memory mem, eRegI src) %{
13865 // match(Set mem (StoreI mem src));
13866 // %}
13867 //
13868 // instruct loadI(eRegI dst, memory mem) %{
13869 // match(Set dst (LoadI mem));
13870 // %}
13871 //
13872 //peephole %{
13873 // peepmatch ( loadI storeI );
13874 // peepconstraint ( 1.src == 0.dst, 1.mem == 0.mem );
13875 // peepreplace ( storeI( 1.mem 1.mem 1.src ) );
13876 //%}
13878 //----------SMARTSPILL RULES---------------------------------------------------
13879 // These must follow all instruction definitions as they use the names
13880 // defined in the instructions definitions.