Wed, 05 Apr 2017 12:41:12 +0800
#5226 Disabled PS instructions
Kernel does not have emulation of PS instructions yet, so PS instructions is disabled.
Fixed a JVM crash caused by hotspot/test/compiler/6340864/TestFloatVect.java.
1 //
2 // Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
3 // Copyright (c) 2015, 2016, Loongson Technology. All rights reserved.
4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 //
6 // This code is free software; you can redistribute it and/or modify it
7 // under the terms of the GNU General Public License version 2 only, as
8 // published by the Free Software Foundation.
9 //
10 // This code is distributed in the hope that it will be useful, but WITHOUT
11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 // version 2 for more details (a copy is included in the LICENSE file that
14 // accompanied this code).
15 //
16 // You should have received a copy of the GNU General Public License version
17 // 2 along with this work; if not, write to the Free Software Foundation,
18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 //
20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 // or visit www.oracle.com if you need additional information or have any
22 // questions.
23 //
24 //
26 // GodSon3 Architecture Description File
28 //----------REGISTER DEFINITION BLOCK------------------------------------------
29 // This information is used by the matcher and the register allocator to
30 // describe individual registers and classes of registers within the target
31 // archtecture.
33 // format:
34 // reg_def name (call convention, c-call convention, ideal type, encoding);
35 // call convention :
36 // NS = No-Save
37 // SOC = Save-On-Call
38 // SOE = Save-On-Entry
39 // AS = Always-Save
40 // ideal type :
41 // see opto/opcodes.hpp for more info
42 // reg_class name (reg, ...);
43 // alloc_class name (reg, ...);
44 register %{
46 // General Registers
47 // Integer Registers
48 reg_def R0 ( NS, NS, Op_RegI, 0, VMRegImpl::Bad());
49 reg_def AT ( NS, NS, Op_RegI, 1, AT->as_VMReg());
50 reg_def AT_H ( NS, NS, Op_RegI, 1, AT->as_VMReg()->next());
51 reg_def V0 (SOC, SOC, Op_RegI, 2, V0->as_VMReg());
52 reg_def V0_H (SOC, SOC, Op_RegI, 2, V0->as_VMReg()->next());
53 reg_def V1 (SOC, SOC, Op_RegI, 3, V1->as_VMReg());
54 reg_def V1_H (SOC, SOC, Op_RegI, 3, V1->as_VMReg()->next());
55 reg_def A0 (SOC, SOC, Op_RegI, 4, A0->as_VMReg());
56 reg_def A0_H (SOC, SOC, Op_RegI, 4, A0->as_VMReg()->next());
57 reg_def A1 (SOC, SOC, Op_RegI, 5, A1->as_VMReg());
58 reg_def A1_H (SOC, SOC, Op_RegI, 5, A1->as_VMReg()->next());
59 reg_def A2 (SOC, SOC, Op_RegI, 6, A2->as_VMReg());
60 reg_def A2_H (SOC, SOC, Op_RegI, 6, A2->as_VMReg()->next());
61 reg_def A3 (SOC, SOC, Op_RegI, 7, A3->as_VMReg());
62 reg_def A3_H (SOC, SOC, Op_RegI, 7, A3->as_VMReg()->next());
63 reg_def A4 (SOC, SOC, Op_RegI, 8, A4->as_VMReg());
64 reg_def A4_H (SOC, SOC, Op_RegI, 8, A4->as_VMReg()->next());
65 reg_def A5 (SOC, SOC, Op_RegI, 9, A5->as_VMReg());
66 reg_def A5_H (SOC, SOC, Op_RegI, 9, A5->as_VMReg()->next());
67 reg_def A6 (SOC, SOC, Op_RegI, 10, A6->as_VMReg());
68 reg_def A6_H (SOC, SOC, Op_RegI, 10, A6->as_VMReg()->next());
69 reg_def A7 (SOC, SOC, Op_RegI, 11, A7->as_VMReg());
70 reg_def A7_H (SOC, SOC, Op_RegI, 11, A7->as_VMReg()->next());
71 reg_def T0 (SOC, SOC, Op_RegI, 12, T0->as_VMReg());
72 reg_def T0_H (SOC, SOC, Op_RegI, 12, T0->as_VMReg()->next());
73 reg_def T1 (SOC, SOC, Op_RegI, 13, T1->as_VMReg());
74 reg_def T1_H (SOC, SOC, Op_RegI, 13, T1->as_VMReg()->next());
75 reg_def T2 (SOC, SOC, Op_RegI, 14, T2->as_VMReg());
76 reg_def T2_H (SOC, SOC, Op_RegI, 14, T2->as_VMReg()->next());
77 reg_def T3 (SOC, SOC, Op_RegI, 15, T3->as_VMReg());
78 reg_def T3_H (SOC, SOC, Op_RegI, 15, T3->as_VMReg()->next());
79 reg_def S0 (SOC, SOE, Op_RegI, 16, S0->as_VMReg());
80 reg_def S0_H (SOC, SOE, Op_RegI, 16, S0->as_VMReg()->next());
81 reg_def S1 (SOC, SOE, Op_RegI, 17, S1->as_VMReg());
82 reg_def S1_H (SOC, SOE, Op_RegI, 17, S1->as_VMReg()->next());
83 reg_def S2 (SOC, SOE, Op_RegI, 18, S2->as_VMReg());
84 reg_def S2_H (SOC, SOE, Op_RegI, 18, S2->as_VMReg()->next());
85 reg_def S3 (SOC, SOE, Op_RegI, 19, S3->as_VMReg());
86 reg_def S3_H (SOC, SOE, Op_RegI, 19, S3->as_VMReg()->next());
87 reg_def S4 (SOC, SOE, Op_RegI, 20, S4->as_VMReg());
88 reg_def S4_H (SOC, SOE, Op_RegI, 20, S4->as_VMReg()->next());
89 reg_def S5 (SOC, SOE, Op_RegI, 21, S5->as_VMReg());
90 reg_def S5_H (SOC, SOE, Op_RegI, 21, S5->as_VMReg()->next());
91 reg_def S6 (SOC, SOE, Op_RegI, 22, S6->as_VMReg());
92 reg_def S6_H (SOC, SOE, Op_RegI, 22, S6->as_VMReg()->next());
93 reg_def S7 (SOC, SOE, Op_RegI, 23, S7->as_VMReg());
94 reg_def S7_H (SOC, SOE, Op_RegI, 23, S7->as_VMReg()->next());
95 reg_def T8 (SOC, SOC, Op_RegI, 24, T8->as_VMReg());
96 reg_def T8_H (SOC, SOC, Op_RegI, 24, T8->as_VMReg()->next());
97 reg_def T9 (SOC, SOC, Op_RegI, 25, T9->as_VMReg());
98 reg_def T9_H (SOC, SOC, Op_RegI, 25, T9->as_VMReg()->next());
100 // Special Registers
101 reg_def K0 ( NS, NS, Op_RegI, 26, K0->as_VMReg());
102 reg_def K1 ( NS, NS, Op_RegI, 27, K1->as_VMReg());
103 reg_def GP ( NS, NS, Op_RegI, 28, GP->as_VMReg());
104 reg_def GP_H ( NS, NS, Op_RegI, 28, GP->as_VMReg()->next());
105 reg_def SP ( NS, NS, Op_RegI, 29, SP->as_VMReg());
106 reg_def SP_H ( NS, NS, Op_RegI, 29, SP->as_VMReg()->next());
107 reg_def FP ( NS, NS, Op_RegI, 30, FP->as_VMReg());
108 reg_def FP_H ( NS, NS, Op_RegI, 30, FP->as_VMReg()->next());
109 reg_def RA ( NS, NS, Op_RegI, 31, RA->as_VMReg());
110 reg_def RA_H ( NS, NS, Op_RegI, 31, RA->as_VMReg()->next());
112 // Floating registers.
113 reg_def F0 ( SOC, SOC, Op_RegF, 0, F0->as_VMReg());
114 reg_def F0_H ( SOC, SOC, Op_RegF, 0, F0->as_VMReg()->next());
115 reg_def F1 ( SOC, SOC, Op_RegF, 1, F1->as_VMReg());
116 reg_def F1_H ( SOC, SOC, Op_RegF, 1, F1->as_VMReg()->next());
117 reg_def F2 ( SOC, SOC, Op_RegF, 2, F2->as_VMReg());
118 reg_def F2_H ( SOC, SOC, Op_RegF, 2, F2->as_VMReg()->next());
119 reg_def F3 ( SOC, SOC, Op_RegF, 3, F3->as_VMReg());
120 reg_def F3_H ( SOC, SOC, Op_RegF, 3, F3->as_VMReg()->next());
121 reg_def F4 ( SOC, SOC, Op_RegF, 4, F4->as_VMReg());
122 reg_def F4_H ( SOC, SOC, Op_RegF, 4, F4->as_VMReg()->next());
123 reg_def F5 ( SOC, SOC, Op_RegF, 5, F5->as_VMReg());
124 reg_def F5_H ( SOC, SOC, Op_RegF, 5, F5->as_VMReg()->next());
125 reg_def F6 ( SOC, SOC, Op_RegF, 6, F6->as_VMReg());
126 reg_def F6_H ( SOC, SOC, Op_RegF, 6, F6->as_VMReg()->next());
127 reg_def F7 ( SOC, SOC, Op_RegF, 7, F7->as_VMReg());
128 reg_def F7_H ( SOC, SOC, Op_RegF, 7, F7->as_VMReg()->next());
129 reg_def F8 ( SOC, SOC, Op_RegF, 8, F8->as_VMReg());
130 reg_def F8_H ( SOC, SOC, Op_RegF, 8, F8->as_VMReg()->next());
131 reg_def F9 ( SOC, SOC, Op_RegF, 9, F9->as_VMReg());
132 reg_def F9_H ( SOC, SOC, Op_RegF, 9, F9->as_VMReg()->next());
133 reg_def F10 ( SOC, SOC, Op_RegF, 10, F10->as_VMReg());
134 reg_def F10_H ( SOC, SOC, Op_RegF, 10, F10->as_VMReg()->next());
135 reg_def F11 ( SOC, SOC, Op_RegF, 11, F11->as_VMReg());
136 reg_def F11_H ( SOC, SOC, Op_RegF, 11, F11->as_VMReg()->next());
137 reg_def F12 ( SOC, SOC, Op_RegF, 12, F12->as_VMReg());
138 reg_def F12_H ( SOC, SOC, Op_RegF, 12, F12->as_VMReg()->next());
139 reg_def F13 ( SOC, SOC, Op_RegF, 13, F13->as_VMReg());
140 reg_def F13_H ( SOC, SOC, Op_RegF, 13, F13->as_VMReg()->next());
141 reg_def F14 ( SOC, SOC, Op_RegF, 14, F14->as_VMReg());
142 reg_def F14_H ( SOC, SOC, Op_RegF, 14, F14->as_VMReg()->next());
143 reg_def F15 ( SOC, SOC, Op_RegF, 15, F15->as_VMReg());
144 reg_def F15_H ( SOC, SOC, Op_RegF, 15, F15->as_VMReg()->next());
145 reg_def F16 ( SOC, SOC, Op_RegF, 16, F16->as_VMReg());
146 reg_def F16_H ( SOC, SOC, Op_RegF, 16, F16->as_VMReg()->next());
147 reg_def F17 ( SOC, SOC, Op_RegF, 17, F17->as_VMReg());
148 reg_def F17_H ( SOC, SOC, Op_RegF, 17, F17->as_VMReg()->next());
149 reg_def F18 ( SOC, SOC, Op_RegF, 18, F18->as_VMReg());
150 reg_def F18_H ( SOC, SOC, Op_RegF, 18, F18->as_VMReg()->next());
151 reg_def F19 ( SOC, SOC, Op_RegF, 19, F19->as_VMReg());
152 reg_def F19_H ( SOC, SOC, Op_RegF, 19, F19->as_VMReg()->next());
153 reg_def F20 ( SOC, SOC, Op_RegF, 20, F20->as_VMReg());
154 reg_def F20_H ( SOC, SOC, Op_RegF, 20, F20->as_VMReg()->next());
155 reg_def F21 ( SOC, SOC, Op_RegF, 21, F21->as_VMReg());
156 reg_def F21_H ( SOC, SOC, Op_RegF, 21, F21->as_VMReg()->next());
157 reg_def F22 ( SOC, SOC, Op_RegF, 22, F22->as_VMReg());
158 reg_def F22_H ( SOC, SOC, Op_RegF, 22, F22->as_VMReg()->next());
159 reg_def F23 ( SOC, SOC, Op_RegF, 23, F23->as_VMReg());
160 reg_def F23_H ( SOC, SOC, Op_RegF, 23, F23->as_VMReg()->next());
161 reg_def F24 ( SOC, SOC, Op_RegF, 24, F24->as_VMReg());
162 reg_def F24_H ( SOC, SOC, Op_RegF, 24, F24->as_VMReg()->next());
163 reg_def F25 ( SOC, SOC, Op_RegF, 25, F25->as_VMReg());
164 reg_def F25_H ( SOC, SOC, Op_RegF, 25, F25->as_VMReg()->next());
165 reg_def F26 ( SOC, SOC, Op_RegF, 26, F26->as_VMReg());
166 reg_def F26_H ( SOC, SOC, Op_RegF, 26, F26->as_VMReg()->next());
167 reg_def F27 ( SOC, SOC, Op_RegF, 27, F27->as_VMReg());
168 reg_def F27_H ( SOC, SOC, Op_RegF, 27, F27->as_VMReg()->next());
169 reg_def F28 ( SOC, SOC, Op_RegF, 28, F28->as_VMReg());
170 reg_def F28_H ( SOC, SOC, Op_RegF, 28, F28->as_VMReg()->next());
171 reg_def F29 ( SOC, SOC, Op_RegF, 29, F29->as_VMReg());
172 reg_def F29_H ( SOC, SOC, Op_RegF, 29, F29->as_VMReg()->next());
173 reg_def F30 ( SOC, SOC, Op_RegF, 30, F30->as_VMReg());
174 reg_def F30_H ( SOC, SOC, Op_RegF, 30, F30->as_VMReg()->next());
175 reg_def F31 ( SOC, SOC, Op_RegF, 31, F31->as_VMReg());
176 reg_def F31_H ( SOC, SOC, Op_RegF, 31, F31->as_VMReg()->next());
179 // ----------------------------
180 // Special Registers
181 // Condition Codes Flag Registers
182 reg_def MIPS_FLAG (SOC, SOC, Op_RegFlags, 1, as_Register(1)->as_VMReg());
183 //S6 is used for get_thread(S6)
184 //S5 is uesd for heapbase of compressed oop
185 alloc_class chunk0(
186 S7, S7_H,
187 S0, S0_H,
188 S1, S1_H,
189 S2, S2_H,
190 S4, S4_H,
191 S5, S5_H,
192 S6, S6_H,
193 S3, S3_H,
194 T2, T2_H,
195 T3, T3_H,
196 T8, T8_H,
197 T9, T9_H,
198 T1, T1_H, // inline_cache_reg
199 V1, V1_H,
200 A7, A7_H,
201 A6, A6_H,
202 A5, A5_H,
203 A4, A4_H,
204 V0, V0_H,
205 A3, A3_H,
206 A2, A2_H,
207 A1, A1_H,
208 A0, A0_H,
209 T0, T0_H,
210 GP, GP_H
211 RA, RA_H,
212 SP, SP_H, // stack_pointer
213 FP, FP_H // frame_pointer
214 );
216 alloc_class chunk1( F0, F0_H,
217 F1, F1_H,
218 F2, F2_H,
219 F3, F3_H,
220 F4, F4_H,
221 F5, F5_H,
222 F6, F6_H,
223 F7, F7_H,
224 F8, F8_H,
225 F9, F9_H,
226 F10, F10_H,
227 F11, F11_H,
228 F20, F20_H,
229 F21, F21_H,
230 F22, F22_H,
231 F23, F23_H,
232 F24, F24_H,
233 F25, F25_H,
234 F26, F26_H,
235 F27, F27_H,
236 F28, F28_H,
237 F19, F19_H,
238 F18, F18_H,
239 F17, F17_H,
240 F16, F16_H,
241 F15, F15_H,
242 F14, F14_H,
243 F13, F13_H,
244 F12, F12_H,
245 F29, F29_H,
246 F30, F30_H,
247 F31, F31_H);
249 alloc_class chunk2(MIPS_FLAG);
251 reg_class s_reg( S0, S1, S2, S3, S4, S5, S6, S7 );
252 reg_class s0_reg( S0 );
253 reg_class s1_reg( S1 );
254 reg_class s2_reg( S2 );
255 reg_class s3_reg( S3 );
256 reg_class s4_reg( S4 );
257 reg_class s5_reg( S5 );
258 reg_class s6_reg( S6 );
259 reg_class s7_reg( S7 );
261 reg_class t_reg( T0, T1, T2, T3, T8, T9 );
262 reg_class t0_reg( T0 );
263 reg_class t1_reg( T1 );
264 reg_class t2_reg( T2 );
265 reg_class t3_reg( T3 );
266 reg_class t8_reg( T8 );
267 reg_class t9_reg( T9 );
269 reg_class a_reg( A0, A1, A2, A3, A4, A5, A6, A7 );
270 reg_class a0_reg( A0 );
271 reg_class a1_reg( A1 );
272 reg_class a2_reg( A2 );
273 reg_class a3_reg( A3 );
274 reg_class a4_reg( A4 );
275 reg_class a5_reg( A5 );
276 reg_class a6_reg( A6 );
277 reg_class a7_reg( A7 );
279 reg_class v0_reg( V0 );
280 reg_class v1_reg( V1 );
282 reg_class sp_reg( SP, SP_H );
283 reg_class fp_reg( FP, FP_H );
285 reg_class mips_flags(MIPS_FLAG);
287 reg_class v0_long_reg( V0, V0_H );
288 reg_class v1_long_reg( V1, V1_H );
289 reg_class a0_long_reg( A0, A0_H );
290 reg_class a1_long_reg( A1, A1_H );
291 reg_class a2_long_reg( A2, A2_H );
292 reg_class a3_long_reg( A3, A3_H );
293 reg_class a4_long_reg( A4, A4_H );
294 reg_class a5_long_reg( A5, A5_H );
295 reg_class a6_long_reg( A6, A6_H );
296 reg_class a7_long_reg( A7, A7_H );
297 reg_class t0_long_reg( T0, T0_H );
298 reg_class t1_long_reg( T1, T1_H );
299 reg_class t2_long_reg( T2, T2_H );
300 reg_class t3_long_reg( T3, T3_H );
301 reg_class t8_long_reg( T8, T8_H );
302 reg_class t9_long_reg( T9, T9_H );
303 reg_class s0_long_reg( S0, S0_H );
304 reg_class s1_long_reg( S1, S1_H );
305 reg_class s2_long_reg( S2, S2_H );
306 reg_class s3_long_reg( S3, S3_H );
307 reg_class s4_long_reg( S4, S4_H );
308 reg_class s5_long_reg( S5, S5_H );
309 reg_class s6_long_reg( S6, S6_H );
310 reg_class s7_long_reg( S7, S7_H );
312 reg_class int_reg( S7, S0, S1, S2, S4, S3, T8, T2, T3, T1, V1, A7, A6, A5, A4, V0, A3, A2, A1, A0, T0 );
314 reg_class no_Ax_int_reg( S7, S0, S1, S2, S4, S3, T8, T2, T3, T1, V1, V0, T0 );
316 reg_class p_reg(
317 S7, S7_H,
318 S0, S0_H,
319 S1, S1_H,
320 S2, S2_H,
321 S4, S4_H,
322 S3, S3_H,
323 T8, T8_H,
324 T2, T2_H,
325 T3, T3_H,
326 T1, T1_H,
327 A7, A7_H,
328 A6, A6_H,
329 A5, A5_H,
330 A4, A4_H,
331 A3, A3_H,
332 A2, A2_H,
333 A1, A1_H,
334 A0, A0_H,
335 T0, T0_H
336 );
338 reg_class no_T8_p_reg(
339 S7, S7_H,
340 S0, S0_H,
341 S1, S1_H,
342 S2, S2_H,
343 S4, S4_H,
344 S3, S3_H,
345 T2, T2_H,
346 T3, T3_H,
347 T1, T1_H,
348 A7, A7_H,
349 A6, A6_H,
350 A5, A5_H,
351 A4, A4_H,
352 A3, A3_H,
353 A2, A2_H,
354 A1, A1_H,
355 A0, A0_H,
356 T0, T0_H
357 );
359 reg_class long_reg(
360 S7, S7_H,
361 S0, S0_H,
362 S1, S1_H,
363 S2, S2_H,
364 S4, S4_H,
365 S3, S3_H,
366 T8, T8_H,
367 T2, T2_H,
368 T3, T3_H,
369 T1, T1_H,
370 A7, A7_H,
371 A6, A6_H,
372 A5, A5_H,
373 A4, A4_H,
374 A3, A3_H,
375 A2, A2_H,
376 A1, A1_H,
377 A0, A0_H,
378 T0, T0_H
379 );
382 // Floating point registers.
383 // 2012/8/23 Fu: F30/F31 are used as temporary registers in D2I
384 // 2016/12/1 aoqi: F31 are not used as temporary registers in D2I
385 reg_class flt_reg( F0, F1, F2, F3, F4, F5, F6, F7, F8, F9, F10, F11, F12, F13, F14, F15, F16, F17 F18, F19, F20, F21, F22, F23, F24, F25, F26, F27, F28, F29, F31);
386 reg_class dbl_reg( F0, F0_H,
387 F1, F1_H,
388 F2, F2_H,
389 F3, F3_H,
390 F4, F4_H,
391 F5, F5_H,
392 F6, F6_H,
393 F7, F7_H,
394 F8, F8_H,
395 F9, F9_H,
396 F10, F10_H,
397 F11, F11_H,
398 F12, F12_H,
399 F13, F13_H,
400 F14, F14_H,
401 F15, F15_H,
402 F16, F16_H,
403 F17, F17_H,
404 F18, F18_H,
405 F19, F19_H,
406 F20, F20_H,
407 F21, F21_H,
408 F22, F22_H,
409 F23, F23_H,
410 F24, F24_H,
411 F25, F25_H,
412 F26, F26_H,
413 F27, F27_H,
414 F28, F28_H,
415 F29, F29_H,
416 F31, F31_H);
418 reg_class flt_arg0( F12 );
419 reg_class dbl_arg0( F12, F12_H );
420 reg_class dbl_arg1( F14, F14_H );
422 %}
424 //----------DEFINITION BLOCK---------------------------------------------------
425 // Define name --> value mappings to inform the ADLC of an integer valued name
426 // Current support includes integer values in the range [0, 0x7FFFFFFF]
427 // Format:
428 // int_def <name> ( <int_value>, <expression>);
429 // Generated Code in ad_<arch>.hpp
430 // #define <name> (<expression>)
431 // // value == <int_value>
432 // Generated code in ad_<arch>.cpp adlc_verification()
433 // assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
434 //
435 definitions %{
436 int_def DEFAULT_COST ( 100, 100);
437 int_def HUGE_COST (1000000, 1000000);
439 // Memory refs are twice as expensive as run-of-the-mill.
440 int_def MEMORY_REF_COST ( 200, DEFAULT_COST * 2);
442 // Branches are even more expensive.
443 int_def BRANCH_COST ( 300, DEFAULT_COST * 3);
444 // we use jr instruction to construct call, so more expensive
445 // by yjl 2/28/2006
446 int_def CALL_COST ( 500, DEFAULT_COST * 5);
447 /*
448 int_def EQUAL ( 1, 1 );
449 int_def NOT_EQUAL ( 2, 2 );
450 int_def GREATER ( 3, 3 );
451 int_def GREATER_EQUAL ( 4, 4 );
452 int_def LESS ( 5, 5 );
453 int_def LESS_EQUAL ( 6, 6 );
454 */
455 %}
459 //----------SOURCE BLOCK-------------------------------------------------------
460 // This is a block of C++ code which provides values, functions, and
461 // definitions necessary in the rest of the architecture description
463 source_hpp %{
464 // Header information of the source block.
465 // Method declarations/definitions which are used outside
466 // the ad-scope can conveniently be defined here.
467 //
468 // To keep related declarations/definitions/uses close together,
469 // we switch between source %{ }% and source_hpp %{ }% freely as needed.
471 class CallStubImpl {
473 //--------------------------------------------------------------
474 //---< Used for optimization in Compile::shorten_branches >---
475 //--------------------------------------------------------------
477 public:
478 // Size of call trampoline stub.
479 static uint size_call_trampoline() {
480 return 0; // no call trampolines on this platform
481 }
483 // number of relocations needed by a call trampoline stub
484 static uint reloc_call_trampoline() {
485 return 0; // no call trampolines on this platform
486 }
487 };
489 class HandlerImpl {
491 public:
493 static int emit_exception_handler(CodeBuffer &cbuf);
494 static int emit_deopt_handler(CodeBuffer& cbuf);
496 static uint size_exception_handler() {
497 // NativeCall instruction size is the same as NativeJump.
498 // exception handler starts out as jump and can be patched to
499 // a call be deoptimization. (4932387)
500 // Note that this value is also credited (in output.cpp) to
501 // the size of the code section.
502 // return NativeJump::instruction_size;
503 int size = NativeCall::instruction_size;
504 return round_to(size, 16);
505 }
507 #ifdef _LP64
508 static uint size_deopt_handler() {
509 int size = NativeCall::instruction_size;
510 return round_to(size, 16);
511 }
512 #else
513 static uint size_deopt_handler() {
514 // NativeCall instruction size is the same as NativeJump.
515 // exception handler starts out as jump and can be patched to
516 // a call be deoptimization. (4932387)
517 // Note that this value is also credited (in output.cpp) to
518 // the size of the code section.
519 return 5 + NativeJump::instruction_size; // pushl(); jmp;
520 }
521 #endif
522 };
524 %} // end source_hpp
526 source %{
528 #define NO_INDEX 0
529 #define RELOC_IMM64 Assembler::imm_operand
530 #define RELOC_DISP32 Assembler::disp32_operand
533 #define __ _masm.
536 // Emit exception handler code.
537 // Stuff framesize into a register and call a VM stub routine.
538 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf) {
539 // Note that the code buffer's insts_mark is always relative to insts.
540 // That's why we must use the macroassembler to generate a handler.
541 MacroAssembler _masm(&cbuf);
542 address base =
543 __ start_a_stub(size_exception_handler());
544 if (base == NULL) return 0; // CodeBuffer::expand failed
545 int offset = __ offset();
547 __ block_comment("; emit_exception_handler");
549 cbuf.set_insts_mark();
550 __ relocate(relocInfo::runtime_call_type);
551 __ patchable_jump((address)OptoRuntime::exception_blob()->entry_point());
552 __ align(16);
553 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
554 __ end_a_stub();
555 return offset;
556 }
558 // Emit deopt handler code.
559 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) {
560 // Note that the code buffer's insts_mark is always relative to insts.
561 // That's why we must use the macroassembler to generate a handler.
562 MacroAssembler _masm(&cbuf);
563 address base =
564 __ start_a_stub(size_deopt_handler());
566 // FIXME
567 if (base == NULL) return 0; // CodeBuffer::expand failed
568 int offset = __ offset();
570 __ block_comment("; emit_deopt_handler");
572 cbuf.set_insts_mark();
573 __ relocate(relocInfo::runtime_call_type);
574 __ patchable_call(SharedRuntime::deopt_blob()->unpack());
575 __ align(16);
576 assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
577 __ end_a_stub();
578 return offset;
579 }
582 const bool Matcher::match_rule_supported(int opcode) {
583 if (!has_match_rule(opcode))
584 return false;
586 switch (opcode) {
587 //Op_CountLeadingZerosI Op_CountLeadingZerosL can be deleted, all MIPS CPUs support clz & dclz.
588 case Op_CountLeadingZerosI:
589 case Op_CountLeadingZerosL:
590 if (!UseCountLeadingZerosInstruction)
591 return false;
592 break;
593 case Op_CountTrailingZerosI:
594 case Op_CountTrailingZerosL:
595 if (!UseCountTrailingZerosInstruction)
596 return false;
597 break;
598 }
600 return true; // Per default match rules are supported.
601 }
603 //FIXME
604 // emit call stub, compiled java to interpreter
605 void emit_java_to_interp(CodeBuffer &cbuf ) {
606 // Stub is fixed up when the corresponding call is converted from calling
607 // compiled code to calling interpreted code.
608 // mov rbx,0
609 // jmp -1
611 address mark = cbuf.insts_mark(); // get mark within main instrs section
613 // Note that the code buffer's insts_mark is always relative to insts.
614 // That's why we must use the macroassembler to generate a stub.
615 MacroAssembler _masm(&cbuf);
617 address base =
618 __ start_a_stub(Compile::MAX_stubs_size);
619 if (base == NULL) return; // CodeBuffer::expand failed
620 // static stub relocation stores the instruction address of the call
622 __ relocate(static_stub_Relocation::spec(mark), 0);
624 // static stub relocation also tags the methodOop in the code-stream.
625 __ patchable_set48(S3, (long)0);
626 // This is recognized as unresolved by relocs/nativeInst/ic code
628 __ relocate(relocInfo::runtime_call_type);
630 cbuf.set_insts_mark();
631 address call_pc = (address)-1;
632 __ patchable_jump(call_pc);
633 __ align(16);
634 __ end_a_stub();
635 // Update current stubs pointer and restore code_end.
636 }
638 // size of call stub, compiled java to interpretor
639 uint size_java_to_interp() {
640 int size = 4 * 4 + NativeCall::instruction_size; // sizeof(li48) + NativeCall::instruction_size
641 return round_to(size, 16);
642 }
644 // relocation entries for call stub, compiled java to interpreter
645 uint reloc_java_to_interp() {
646 return 16; // in emit_java_to_interp + in Java_Static_Call
647 }
649 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
650 if( Assembler::is_simm16(offset) ) return true;
651 else {
652 assert(false, "Not implemented yet !" );
653 Unimplemented();
654 }
655 }
658 // No additional cost for CMOVL.
659 const int Matcher::long_cmove_cost() { return 0; }
661 // No CMOVF/CMOVD with SSE2
662 const int Matcher::float_cmove_cost() { return ConditionalMoveLimit; }
664 // Does the CPU require late expand (see block.cpp for description of late expand)?
665 const bool Matcher::require_postalloc_expand = false;
667 // Should the Matcher clone shifts on addressing modes, expecting them
668 // to be subsumed into complex addressing expressions or compute them
669 // into registers? True for Intel but false for most RISCs
670 const bool Matcher::clone_shift_expressions = false;
672 // Do we need to mask the count passed to shift instructions or does
673 // the cpu only look at the lower 5/6 bits anyway?
674 const bool Matcher::need_masked_shift_count = false;
676 bool Matcher::narrow_oop_use_complex_address() {
677 NOT_LP64(ShouldNotCallThis());
678 assert(UseCompressedOops, "only for compressed oops code");
679 return false;
680 }
682 bool Matcher::narrow_klass_use_complex_address() {
683 NOT_LP64(ShouldNotCallThis());
684 assert(UseCompressedClassPointers, "only for compressed klass code");
685 return false;
686 }
688 // This is UltraSparc specific, true just means we have fast l2f conversion
689 const bool Matcher::convL2FSupported(void) {
690 return true;
691 }
693 // Max vector size in bytes. 0 if not supported.
694 const int Matcher::vector_width_in_bytes(BasicType bt) {
695 assert(MaxVectorSize == 8, "");
696 return 8;
697 }
699 // Vector ideal reg
700 const int Matcher::vector_ideal_reg(int size) {
701 assert(MaxVectorSize == 8, "");
702 switch(size) {
703 case 8: return Op_VecD;
704 }
705 ShouldNotReachHere();
706 return 0;
707 }
709 // Only lowest bits of xmm reg are used for vector shift count.
710 const int Matcher::vector_shift_count_ideal_reg(int size) {
711 fatal("vector shift is not supported");
712 return Node::NotAMachineReg;
713 }
715 // Limits on vector size (number of elements) loaded into vector.
716 const int Matcher::max_vector_size(const BasicType bt) {
717 assert(is_java_primitive(bt), "only primitive type vectors");
718 return vector_width_in_bytes(bt)/type2aelembytes(bt);
719 }
721 const int Matcher::min_vector_size(const BasicType bt) {
722 return max_vector_size(bt); // Same as max.
723 }
725 // MIPS supports misaligned vectors store/load? FIXME
726 const bool Matcher::misaligned_vectors_ok() {
727 return false;
728 //return !AlignVector; // can be changed by flag
729 }
731 // Register for DIVI projection of divmodI
732 RegMask Matcher::divI_proj_mask() {
733 ShouldNotReachHere();
734 return RegMask();
735 }
737 // Register for MODI projection of divmodI
738 RegMask Matcher::modI_proj_mask() {
739 ShouldNotReachHere();
740 return RegMask();
741 }
743 // Register for DIVL projection of divmodL
744 RegMask Matcher::divL_proj_mask() {
745 ShouldNotReachHere();
746 return RegMask();
747 }
749 int Matcher::regnum_to_fpu_offset(int regnum) {
750 return regnum - 32; // The FP registers are in the second chunk
751 }
754 const bool Matcher::isSimpleConstant64(jlong value) {
755 // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
756 return true;
757 }
760 // Return whether or not this register is ever used as an argument. This
761 // function is used on startup to build the trampoline stubs in generateOptoStub.
762 // Registers not mentioned will be killed by the VM call in the trampoline, and
763 // arguments in those registers not be available to the callee.
764 bool Matcher::can_be_java_arg( int reg ) {
765 /* Refer to: [sharedRuntime_mips_64.cpp] SharedRuntime::java_calling_convention() */
766 if ( reg == T0_num || reg == T0_H_num
767 || reg == A0_num || reg == A0_H_num
768 || reg == A1_num || reg == A1_H_num
769 || reg == A2_num || reg == A2_H_num
770 || reg == A3_num || reg == A3_H_num
771 || reg == A4_num || reg == A4_H_num
772 || reg == A5_num || reg == A5_H_num
773 || reg == A6_num || reg == A6_H_num
774 || reg == A7_num || reg == A7_H_num )
775 return true;
777 if ( reg == F12_num || reg == F12_H_num
778 || reg == F13_num || reg == F13_H_num
779 || reg == F14_num || reg == F14_H_num
780 || reg == F15_num || reg == F15_H_num
781 || reg == F16_num || reg == F16_H_num
782 || reg == F17_num || reg == F17_H_num
783 || reg == F18_num || reg == F18_H_num
784 || reg == F19_num || reg == F19_H_num )
785 return true;
787 return false;
788 }
790 bool Matcher::is_spillable_arg( int reg ) {
791 return can_be_java_arg(reg);
792 }
794 bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) {
795 return false;
796 }
798 // Register for MODL projection of divmodL
799 RegMask Matcher::modL_proj_mask() {
800 ShouldNotReachHere();
801 return RegMask();
802 }
804 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
805 return FP_REG_mask();
806 }
808 // MIPS doesn't support AES intrinsics
809 const bool Matcher::pass_original_key_for_aes() {
810 return false;
811 }
813 int CallLeafNoFPDirectNode::compute_padding(int current_offset) const {
814 //lui
815 //ori
816 //dsll
817 //ori
819 //jalr
820 //nop
822 return round_to(current_offset, alignment_required()) - current_offset;
823 }
825 int CallLeafDirectNode::compute_padding(int current_offset) const {
826 //lui
827 //ori
828 //dsll
829 //ori
831 //jalr
832 //nop
834 return round_to(current_offset, alignment_required()) - current_offset;
835 }
837 int CallRuntimeDirectNode::compute_padding(int current_offset) const {
838 //lui
839 //ori
840 //dsll
841 //ori
843 //jalr
844 //nop
846 return round_to(current_offset, alignment_required()) - current_offset;
847 }
849 // If CPU can load and store mis-aligned doubles directly then no fixup is
850 // needed. Else we split the double into 2 integer pieces and move it
851 // piece-by-piece. Only happens when passing doubles into C code as the
852 // Java calling convention forces doubles to be aligned.
853 const bool Matcher::misaligned_doubles_ok = false;
854 // Do floats take an entire double register or just half?
855 //const bool Matcher::float_in_double = true;
856 bool Matcher::float_in_double() { return false; }
857 // Threshold size for cleararray.
858 const int Matcher::init_array_short_size = 8 * BytesPerLong;
859 // Do ints take an entire long register or just half?
860 const bool Matcher::int_in_long = true;
861 // Is it better to copy float constants, or load them directly from memory?
862 // Intel can load a float constant from a direct address, requiring no
863 // extra registers. Most RISCs will have to materialize an address into a
864 // register first, so they would do better to copy the constant from stack.
865 const bool Matcher::rematerialize_float_constants = false;
866 // Advertise here if the CPU requires explicit rounding operations
867 // to implement the UseStrictFP mode.
868 const bool Matcher::strict_fp_requires_explicit_rounding = false;
869 // The ecx parameter to rep stos for the ClearArray node is in dwords.
870 const bool Matcher::init_array_count_is_in_bytes = false;
873 // Indicate if the safepoint node needs the polling page as an input.
874 // Since MIPS doesn't have absolute addressing, it needs.
875 bool SafePointNode::needs_polling_address_input() {
876 return false;
877 }
879 // !!!!! Special hack to get all type of calls to specify the byte offset
880 // from the start of the call to the point where the return address
881 // will point.
882 int MachCallStaticJavaNode::ret_addr_offset() {
883 //lui
884 //ori
885 //nop
886 //nop
887 //jalr
888 //nop
889 return 24;
890 }
892 int MachCallDynamicJavaNode::ret_addr_offset() {
893 //lui IC_Klass,
894 //ori IC_Klass,
895 //dsll IC_Klass
896 //ori IC_Klass
898 //lui T9
899 //ori T9
900 //nop
901 //nop
902 //jalr T9
903 //nop
904 return 4 * 4 + 4 * 6;
905 }
907 //=============================================================================
909 // Figure out which register class each belongs in: rc_int, rc_float, rc_stack
910 enum RC { rc_bad, rc_int, rc_float, rc_stack };
911 static enum RC rc_class( OptoReg::Name reg ) {
912 if( !OptoReg::is_valid(reg) ) return rc_bad;
913 if (OptoReg::is_stack(reg)) return rc_stack;
914 VMReg r = OptoReg::as_VMReg(reg);
915 if (r->is_Register()) return rc_int;
916 assert(r->is_FloatRegister(), "must be");
917 return rc_float;
918 }
920 uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream* st ) const {
921 // Get registers to move
922 OptoReg::Name src_second = ra_->get_reg_second(in(1));
923 OptoReg::Name src_first = ra_->get_reg_first(in(1));
924 OptoReg::Name dst_second = ra_->get_reg_second(this );
925 OptoReg::Name dst_first = ra_->get_reg_first(this );
927 enum RC src_second_rc = rc_class(src_second);
928 enum RC src_first_rc = rc_class(src_first);
929 enum RC dst_second_rc = rc_class(dst_second);
930 enum RC dst_first_rc = rc_class(dst_first);
932 assert(OptoReg::is_valid(src_first) && OptoReg::is_valid(dst_first), "must move at least 1 register" );
934 // Generate spill code!
935 int size = 0;
937 if( src_first == dst_first && src_second == dst_second )
938 return 0; // Self copy, no move
940 if (src_first_rc == rc_stack) {
941 // mem ->
942 if (dst_first_rc == rc_stack) {
943 // mem -> mem
944 assert(src_second != dst_first, "overlap");
945 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
946 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
947 // 64-bit
948 int src_offset = ra_->reg2offset(src_first);
949 int dst_offset = ra_->reg2offset(dst_first);
950 if (cbuf) {
951 MacroAssembler _masm(cbuf);
952 __ ld(AT, Address(SP, src_offset));
953 __ sd(AT, Address(SP, dst_offset));
954 #ifndef PRODUCT
955 } else {
956 if(!do_size){
957 if (size != 0) st->print("\n\t");
958 st->print("ld AT, [SP + #%d]\t# 64-bit mem-mem spill 1\n\t"
959 "sd AT, [SP + #%d]",
960 src_offset, dst_offset);
961 }
962 #endif
963 }
964 size += 8;
965 } else {
966 // 32-bit
967 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
968 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
969 // No pushl/popl, so:
970 int src_offset = ra_->reg2offset(src_first);
971 int dst_offset = ra_->reg2offset(dst_first);
972 if (cbuf) {
973 MacroAssembler _masm(cbuf);
974 __ lw(AT, Address(SP, src_offset));
975 __ sw(AT, Address(SP, dst_offset));
976 #ifndef PRODUCT
977 } else {
978 if(!do_size){
979 if (size != 0) st->print("\n\t");
980 st->print("lw AT, [SP + #%d] spill 2\n\t"
981 "sw AT, [SP + #%d]\n\t",
982 src_offset, dst_offset);
983 }
984 #endif
985 }
986 size += 8;
987 }
988 return size;
989 } else if (dst_first_rc == rc_int) {
990 // mem -> gpr
991 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
992 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
993 // 64-bit
994 int offset = ra_->reg2offset(src_first);
995 if (cbuf) {
996 MacroAssembler _masm(cbuf);
997 __ ld(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
998 #ifndef PRODUCT
999 } else {
1000 if(!do_size){
1001 if (size != 0) st->print("\n\t");
1002 st->print("ld %s, [SP + #%d]\t# spill 3",
1003 Matcher::regName[dst_first],
1004 offset);
1005 }
1006 #endif
1007 }
1008 size += 4;
1009 } else {
1010 // 32-bit
1011 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1012 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1013 int offset = ra_->reg2offset(src_first);
1014 if (cbuf) {
1015 MacroAssembler _masm(cbuf);
1016 if (this->ideal_reg() == Op_RegI)
1017 __ lw(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1018 else
1019 __ lwu(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1020 #ifndef PRODUCT
1021 } else {
1022 if(!do_size){
1023 if (size != 0) st->print("\n\t");
1024 if (this->ideal_reg() == Op_RegI)
1025 st->print("lw %s, [SP + #%d]\t# spill 4",
1026 Matcher::regName[dst_first],
1027 offset);
1028 else
1029 st->print("lwu %s, [SP + #%d]\t# spill 5",
1030 Matcher::regName[dst_first],
1031 offset);
1032 }
1033 #endif
1034 }
1035 size += 4;
1036 }
1037 return size;
1038 } else if (dst_first_rc == rc_float) {
1039 // mem-> xmm
1040 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1041 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1042 // 64-bit
1043 int offset = ra_->reg2offset(src_first);
1044 if (cbuf) {
1045 MacroAssembler _masm(cbuf);
1046 __ ldc1( as_FloatRegister(Matcher::_regEncode[dst_first]), Address(SP, offset));
1047 #ifndef PRODUCT
1048 } else {
1049 if(!do_size){
1050 if (size != 0) st->print("\n\t");
1051 st->print("ldc1 %s, [SP + #%d]\t# spill 6",
1052 Matcher::regName[dst_first],
1053 offset);
1054 }
1055 #endif
1056 }
1057 size += 4;
1058 } else {
1059 // 32-bit
1060 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1061 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1062 int offset = ra_->reg2offset(src_first);
1063 if (cbuf) {
1064 MacroAssembler _masm(cbuf);
1065 __ lwc1( as_FloatRegister(Matcher::_regEncode[dst_first]), Address(SP, offset));
1066 #ifndef PRODUCT
1067 } else {
1068 if(!do_size){
1069 if (size != 0) st->print("\n\t");
1070 st->print("lwc1 %s, [SP + #%d]\t# spill 7",
1071 Matcher::regName[dst_first],
1072 offset);
1073 }
1074 #endif
1075 }
1076 size += 4;
1077 }
1078 return size;
1079 }
1080 } else if (src_first_rc == rc_int) {
1081 // gpr ->
1082 if (dst_first_rc == rc_stack) {
1083 // gpr -> mem
1084 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1085 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1086 // 64-bit
1087 int offset = ra_->reg2offset(dst_first);
1088 if (cbuf) {
1089 MacroAssembler _masm(cbuf);
1090 __ sd(as_Register(Matcher::_regEncode[src_first]), Address(SP, offset));
1091 #ifndef PRODUCT
1092 } else {
1093 if(!do_size){
1094 if (size != 0) st->print("\n\t");
1095 st->print("sd %s, [SP + #%d] # spill 8",
1096 Matcher::regName[src_first],
1097 offset);
1098 }
1099 #endif
1100 }
1101 size += 4;
1102 } else {
1103 // 32-bit
1104 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1105 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1106 int offset = ra_->reg2offset(dst_first);
1107 if (cbuf) {
1108 MacroAssembler _masm(cbuf);
1109 __ sw(as_Register(Matcher::_regEncode[src_first]), Address(SP, offset));
1110 #ifndef PRODUCT
1111 } else {
1112 if(!do_size){
1113 if (size != 0) st->print("\n\t");
1114 st->print("sw %s, [SP + #%d]\t# spill 9",
1115 Matcher::regName[src_first], offset);
1116 }
1117 #endif
1118 }
1119 size += 4;
1120 }
1121 return size;
1122 } else if (dst_first_rc == rc_int) {
1123 // gpr -> gpr
1124 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1125 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1126 // 64-bit
1127 if (cbuf) {
1128 MacroAssembler _masm(cbuf);
1129 __ move(as_Register(Matcher::_regEncode[dst_first]),
1130 as_Register(Matcher::_regEncode[src_first]));
1131 #ifndef PRODUCT
1132 } else {
1133 if(!do_size){
1134 if (size != 0) st->print("\n\t");
1135 st->print("move(64bit) %s <-- %s\t# spill 10",
1136 Matcher::regName[dst_first],
1137 Matcher::regName[src_first]);
1138 }
1139 #endif
1140 }
1141 size += 4;
1142 return size;
1143 } else {
1144 // 32-bit
1145 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1146 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1147 if (cbuf) {
1148 MacroAssembler _masm(cbuf);
1149 if (this->ideal_reg() == Op_RegI)
1150 __ move_u32(as_Register(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first]));
1151 else
1152 __ daddu(as_Register(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first]), R0);
1154 #ifndef PRODUCT
1155 } else {
1156 if(!do_size){
1157 if (size != 0) st->print("\n\t");
1158 st->print("move(32-bit) %s <-- %s\t# spill 11",
1159 Matcher::regName[dst_first],
1160 Matcher::regName[src_first]);
1161 }
1162 #endif
1163 }
1164 size += 4;
1165 return size;
1166 }
1167 } else if (dst_first_rc == rc_float) {
1168 // gpr -> xmm
1169 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1170 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1171 // 64-bit
1172 if (cbuf) {
1173 MacroAssembler _masm(cbuf);
1174 __ dmtc1(as_Register(Matcher::_regEncode[src_first]), as_FloatRegister(Matcher::_regEncode[dst_first]));
1175 #ifndef PRODUCT
1176 } else {
1177 if(!do_size){
1178 if (size != 0) st->print("\n\t");
1179 st->print("dmtc1 %s, %s\t# spill 12",
1180 Matcher::regName[dst_first],
1181 Matcher::regName[src_first]);
1182 }
1183 #endif
1184 }
1185 size += 4;
1186 } else {
1187 // 32-bit
1188 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1189 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1190 if (cbuf) {
1191 MacroAssembler _masm(cbuf);
1192 __ mtc1( as_Register(Matcher::_regEncode[src_first]), as_FloatRegister(Matcher::_regEncode[dst_first]) );
1193 #ifndef PRODUCT
1194 } else {
1195 if(!do_size){
1196 if (size != 0) st->print("\n\t");
1197 st->print("mtc1 %s, %s\t# spill 13",
1198 Matcher::regName[dst_first],
1199 Matcher::regName[src_first]);
1200 }
1201 #endif
1202 }
1203 size += 4;
1204 }
1205 return size;
1206 }
1207 } else if (src_first_rc == rc_float) {
1208 // xmm ->
1209 if (dst_first_rc == rc_stack) {
1210 // xmm -> mem
1211 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1212 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1213 // 64-bit
1214 int offset = ra_->reg2offset(dst_first);
1215 if (cbuf) {
1216 MacroAssembler _masm(cbuf);
1217 __ sdc1( as_FloatRegister(Matcher::_regEncode[src_first]), Address(SP, offset) );
1218 #ifndef PRODUCT
1219 } else {
1220 if(!do_size){
1221 if (size != 0) st->print("\n\t");
1222 st->print("sdc1 %s, [SP + #%d]\t# spill 14",
1223 Matcher::regName[src_first],
1224 offset);
1225 }
1226 #endif
1227 }
1228 size += 4;
1229 } else {
1230 // 32-bit
1231 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1232 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1233 int offset = ra_->reg2offset(dst_first);
1234 if (cbuf) {
1235 MacroAssembler _masm(cbuf);
1236 __ swc1(as_FloatRegister(Matcher::_regEncode[src_first]), Address(SP, offset));
1237 #ifndef PRODUCT
1238 } else {
1239 if(!do_size){
1240 if (size != 0) st->print("\n\t");
1241 st->print("swc1 %s, [SP + #%d]\t# spill 15",
1242 Matcher::regName[src_first],
1243 offset);
1244 }
1245 #endif
1246 }
1247 size += 4;
1248 }
1249 return size;
1250 } else if (dst_first_rc == rc_int) {
1251 // xmm -> gpr
1252 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1253 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1254 // 64-bit
1255 if (cbuf) {
1256 MacroAssembler _masm(cbuf);
1257 __ dmfc1( as_Register(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1258 #ifndef PRODUCT
1259 } else {
1260 if(!do_size){
1261 if (size != 0) st->print("\n\t");
1262 st->print("dmfc1 %s, %s\t# spill 16",
1263 Matcher::regName[dst_first],
1264 Matcher::regName[src_first]);
1265 }
1266 #endif
1267 }
1268 size += 4;
1269 } else {
1270 // 32-bit
1271 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1272 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1273 if (cbuf) {
1274 MacroAssembler _masm(cbuf);
1275 __ mfc1( as_Register(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1276 #ifndef PRODUCT
1277 } else {
1278 if(!do_size){
1279 if (size != 0) st->print("\n\t");
1280 st->print("mfc1 %s, %s\t# spill 17",
1281 Matcher::regName[dst_first],
1282 Matcher::regName[src_first]);
1283 }
1284 #endif
1285 }
1286 size += 4;
1287 }
1288 return size;
1289 } else if (dst_first_rc == rc_float) {
1290 // xmm -> xmm
1291 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1292 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1293 // 64-bit
1294 if (cbuf) {
1295 MacroAssembler _masm(cbuf);
1296 __ mov_d( as_FloatRegister(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1297 #ifndef PRODUCT
1298 } else {
1299 if(!do_size){
1300 if (size != 0) st->print("\n\t");
1301 st->print("mov_d %s <-- %s\t# spill 18",
1302 Matcher::regName[dst_first],
1303 Matcher::regName[src_first]);
1304 }
1305 #endif
1306 }
1307 size += 4;
1308 } else {
1309 // 32-bit
1310 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1311 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1312 if (cbuf) {
1313 MacroAssembler _masm(cbuf);
1314 __ mov_s( as_FloatRegister(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1315 #ifndef PRODUCT
1316 } else {
1317 if(!do_size){
1318 if (size != 0) st->print("\n\t");
1319 st->print("mov_s %s <-- %s\t# spill 19",
1320 Matcher::regName[dst_first],
1321 Matcher::regName[src_first]);
1322 }
1323 #endif
1324 }
1325 size += 4;
1326 }
1327 return size;
1328 }
1329 }
1331 assert(0," foo ");
1332 Unimplemented();
1333 return size;
1335 }
1337 #ifndef PRODUCT
1338 void MachSpillCopyNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1339 implementation( NULL, ra_, false, st );
1340 }
1341 #endif
1343 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1344 implementation( &cbuf, ra_, false, NULL );
1345 }
1347 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
1348 return implementation( NULL, ra_, true, NULL );
1349 }
1351 //=============================================================================
1352 #
1354 #ifndef PRODUCT
1355 void MachBreakpointNode::format( PhaseRegAlloc *, outputStream* st ) const {
1356 st->print("INT3");
1357 }
1358 #endif
1360 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc* ra_) const {
1361 MacroAssembler _masm(&cbuf);
1362 __ int3();
1363 }
1365 uint MachBreakpointNode::size(PhaseRegAlloc* ra_) const {
1366 return MachNode::size(ra_);
1367 }
1370 //=============================================================================
1371 #ifndef PRODUCT
1372 void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1373 Compile *C = ra_->C;
1374 int framesize = C->frame_size_in_bytes();
1376 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1378 st->print("daddiu SP, SP, %d # Rlease stack @ MachEpilogNode",framesize);
1379 st->cr(); st->print("\t");
1380 if (UseLoongsonISA) {
1381 st->print("gslq RA, FP, SP, %d # Restore FP & RA @ MachEpilogNode", -wordSize*2);
1382 } else {
1383 st->print("ld RA, SP, %d # Restore RA @ MachEpilogNode", -wordSize);
1384 st->cr(); st->print("\t");
1385 st->print("ld FP, SP, %d # Restore FP @ MachEpilogNode", -wordSize*2);
1386 }
1388 if( do_polling() && C->is_method_compilation() ) {
1389 st->print("Poll Safepoint # MachEpilogNode");
1390 }
1391 }
1392 #endif
1394 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1395 Compile *C = ra_->C;
1396 MacroAssembler _masm(&cbuf);
1397 int framesize = C->frame_size_in_bytes();
1399 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1401 __ daddiu(SP, SP, framesize);
1403 if (UseLoongsonISA) {
1404 __ gslq(RA, FP, SP, -wordSize*2);
1405 } else {
1406 __ ld(RA, SP, -wordSize );
1407 __ ld(FP, SP, -wordSize*2 );
1408 }
1410 if( do_polling() && C->is_method_compilation() ) {
1411 __ set64(AT, (long)os::get_polling_page());
1412 __ relocate(relocInfo::poll_return_type);
1413 __ lw(AT, AT, 0);
1414 }
1415 }
1417 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
1418 return MachNode::size(ra_); // too many variables; just compute it the hard way fujie debug
1419 }
1421 int MachEpilogNode::reloc() const {
1422 return 0; // a large enough number
1423 }
1425 const Pipeline * MachEpilogNode::pipeline() const {
1426 return MachNode::pipeline_class();
1427 }
1429 int MachEpilogNode::safepoint_offset() const { return 0; }
1431 //=============================================================================
1433 #ifndef PRODUCT
1434 void BoxLockNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1435 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1436 int reg = ra_->get_reg_first(this);
1437 st->print("ADDI %s, SP, %d @BoxLockNode",Matcher::regName[reg],offset);
1438 }
1439 #endif
1442 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
1443 return 4;
1444 }
1446 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1447 MacroAssembler _masm(&cbuf);
1448 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1449 int reg = ra_->get_encode(this);
1451 __ addi(as_Register(reg), SP, offset);
1452 /*
1453 if( offset >= 128 ) {
1454 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
1455 emit_rm(cbuf, 0x2, reg, 0x04);
1456 emit_rm(cbuf, 0x0, 0x04, SP_enc);
1457 emit_d32(cbuf, offset);
1458 }
1459 else {
1460 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
1461 emit_rm(cbuf, 0x1, reg, 0x04);
1462 emit_rm(cbuf, 0x0, 0x04, SP_enc);
1463 emit_d8(cbuf, offset);
1464 }
1465 */
1466 }
1469 //static int sizeof_FFree_Float_Stack_All = -1;
1471 int MachCallRuntimeNode::ret_addr_offset() {
1472 //lui
1473 //ori
1474 //dsll
1475 //ori
1476 //jalr
1477 //nop
1478 assert(NativeCall::instruction_size == 24, "in MachCallRuntimeNode::ret_addr_offset()");
1479 return NativeCall::instruction_size;
1480 // return 16;
1481 }
1487 //=============================================================================
1488 #ifndef PRODUCT
1489 void MachNopNode::format( PhaseRegAlloc *, outputStream* st ) const {
1490 st->print("NOP \t# %d bytes pad for loops and calls", 4 * _count);
1491 }
1492 #endif
1494 void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc * ) const {
1495 MacroAssembler _masm(&cbuf);
1496 int i = 0;
1497 for(i = 0; i < _count; i++)
1498 __ nop();
1499 }
1501 uint MachNopNode::size(PhaseRegAlloc *) const {
1502 return 4 * _count;
1503 }
1504 const Pipeline* MachNopNode::pipeline() const {
1505 return MachNode::pipeline_class();
1506 }
1508 //=============================================================================
1510 //=============================================================================
1511 #ifndef PRODUCT
1512 void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1513 st->print_cr("load_klass(T9, T0)");
1514 st->print_cr("\tbeq(T9, iCache, L)");
1515 st->print_cr("\tnop");
1516 st->print_cr("\tjmp(SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type)");
1517 st->print_cr("\tnop");
1518 st->print_cr("\tnop");
1519 st->print_cr(" L:");
1520 }
1521 #endif
1524 void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1525 MacroAssembler _masm(&cbuf);
1526 #ifdef ASSERT
1527 //uint code_size = cbuf.code_size();
1528 #endif
1529 int ic_reg = Matcher::inline_cache_reg_encode();
1530 Label L;
1531 Register receiver = T0;
1532 Register iCache = as_Register(ic_reg);
1533 __ load_klass(T9, receiver);
1534 __ beq(T9, iCache, L);
1535 __ nop();
1537 __ relocate(relocInfo::runtime_call_type);
1538 __ patchable_jump((address)SharedRuntime::get_ic_miss_stub());
1540 /* WARNING these NOPs are critical so that verified entry point is properly
1541 * 8 bytes aligned for patching by NativeJump::patch_verified_entry() */
1542 __ align(CodeEntryAlignment);
1543 __ bind(L);
1544 }
1546 uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
1547 return MachNode::size(ra_);
1548 }
1552 //=============================================================================
1554 const RegMask& MachConstantBaseNode::_out_RegMask = P_REG_mask();
1556 int Compile::ConstantTable::calculate_table_base_offset() const {
1557 return 0; // absolute addressing, no offset
1558 }
1560 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
1561 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
1562 ShouldNotReachHere();
1563 }
1565 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
1566 Compile* C = ra_->C;
1567 Compile::ConstantTable& constant_table = C->constant_table();
1568 MacroAssembler _masm(&cbuf);
1570 Register Rtoc = as_Register(ra_->get_encode(this));
1571 CodeSection* consts_section = __ code()->consts();
1572 int consts_size = consts_section->align_at_start(consts_section->size());
1573 assert(constant_table.size() == consts_size, "must be equal");
1575 if (consts_section->size()) {
1576 // Materialize the constant table base.
1577 address baseaddr = consts_section->start() + -(constant_table.table_base_offset());
1578 // RelocationHolder rspec = internal_word_Relocation::spec(baseaddr);
1579 __ relocate(relocInfo::internal_pc_type);
1580 __ patchable_set48(Rtoc, (long)baseaddr);
1581 }
1582 }
1584 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
1585 // patchable_set48 (4 insts)
1586 return 4 * 4;
1587 }
1589 #ifndef PRODUCT
1590 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
1591 Register r = as_Register(ra_->get_encode(this));
1592 st->print("patchable_set48 %s, &constanttable (constant table base) @ MachConstantBaseNode", r->name());
1593 }
1594 #endif
1597 //=============================================================================
1598 #ifndef PRODUCT
1599 void MachPrologNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1600 Compile* C = ra_->C;
1602 int framesize = C->frame_size_in_bytes();
1603 int bangsize = C->bang_size_in_bytes();
1604 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1606 // Calls to C2R adapters often do not accept exceptional returns.
1607 // We require that their callers must bang for them. But be careful, because
1608 // some VM calls (such as call site linkage) can use several kilobytes of
1609 // stack. But the stack safety zone should account for that.
1610 // See bugs 4446381, 4468289, 4497237.
1611 if (C->need_stack_bang(bangsize)) {
1612 st->print_cr("# stack bang"); st->print("\t");
1613 }
1614 if (UseLoongsonISA) {
1615 st->print("gssq RA, FP, %d(SP) @ MachPrologNode\n\t", -wordSize*2);
1616 } else {
1617 st->print("sd RA, %d(SP) @ MachPrologNode\n\t", -wordSize);
1618 st->print("sd FP, %d(SP) @ MachPrologNode\n\t", -wordSize*2);
1619 }
1620 st->print("daddiu FP, SP, -%d \n\t", wordSize*2);
1621 st->print("daddiu SP, SP, -%d \t",framesize);
1622 }
1623 #endif
1626 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1627 Compile* C = ra_->C;
1628 MacroAssembler _masm(&cbuf);
1630 int framesize = C->frame_size_in_bytes();
1631 int bangsize = C->bang_size_in_bytes();
1633 // __ verified_entry(framesize, C->need_stack_bang(bangsize)?bangsize:0, false);
1635 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1637 if (C->need_stack_bang(framesize)) {
1638 __ generate_stack_overflow_check(framesize);
1639 }
1641 if (UseLoongsonISA) {
1642 __ gssq(RA, FP, SP, -wordSize*2);
1643 } else {
1644 __ sd(RA, SP, -wordSize);
1645 __ sd(FP, SP, -wordSize*2);
1646 }
1647 __ daddiu(FP, SP, -wordSize*2);
1648 __ daddiu(SP, SP, -framesize);
1649 __ nop(); /* 2013.10.22 Jin: Make enough room for patch_verified_entry() */
1650 __ nop();
1652 C->set_frame_complete(cbuf.insts_size());
1653 if (C->has_mach_constant_base_node()) {
1654 // NOTE: We set the table base offset here because users might be
1655 // emitted before MachConstantBaseNode.
1656 Compile::ConstantTable& constant_table = C->constant_table();
1657 constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
1658 }
1660 }
1663 uint MachPrologNode::size(PhaseRegAlloc *ra_) const {
1664 //fprintf(stderr, "\nPrologNode::size(ra_)= %d \n", MachNode::size(ra_));//fujie debug
1665 return MachNode::size(ra_); // too many variables; just compute it the hard way
1666 }
1668 int MachPrologNode::reloc() const {
1669 return 0; // a large enough number
1670 }
1672 %}
1674 //----------ENCODING BLOCK-----------------------------------------------------
1675 // This block specifies the encoding classes used by the compiler to output
1676 // byte streams. Encoding classes generate functions which are called by
1677 // Machine Instruction Nodes in order to generate the bit encoding of the
1678 // instruction. Operands specify their base encoding interface with the
1679 // interface keyword. There are currently supported four interfaces,
1680 // REG_INTER, CONST_INTER, MEMORY_INTER, & COND_INTER. REG_INTER causes an
1681 // operand to generate a function which returns its register number when
1682 // queried. CONST_INTER causes an operand to generate a function which
1683 // returns the value of the constant when queried. MEMORY_INTER causes an
1684 // operand to generate four functions which return the Base Register, the
1685 // Index Register, the Scale Value, and the Offset Value of the operand when
1686 // queried. COND_INTER causes an operand to generate six functions which
1687 // return the encoding code (ie - encoding bits for the instruction)
1688 // associated with each basic boolean condition for a conditional instruction.
1689 // Instructions specify two basic values for encoding. They use the
1690 // ins_encode keyword to specify their encoding class (which must be one of
1691 // the class names specified in the encoding block), and they use the
1692 // opcode keyword to specify, in order, their primary, secondary, and
1693 // tertiary opcode. Only the opcode sections which a particular instruction
1694 // needs for encoding need to be specified.
1695 encode %{
1697 //Load byte signed
1698 enc_class load_B_enc (mRegI dst, memory mem) %{
1699 MacroAssembler _masm(&cbuf);
1700 int dst = $dst$$reg;
1701 int base = $mem$$base;
1702 int index = $mem$$index;
1703 int scale = $mem$$scale;
1704 int disp = $mem$$disp;
1706 if( index != 0 ) {
1707 if( Assembler::is_simm16(disp) ) {
1708 if( UseLoongsonISA ) {
1709 if (scale == 0) {
1710 __ gslbx(as_Register(dst), as_Register(base), as_Register(index), disp);
1711 } else {
1712 __ dsll(AT, as_Register(index), scale);
1713 __ gslbx(as_Register(dst), as_Register(base), AT, disp);
1714 }
1715 } else {
1716 if (scale == 0) {
1717 __ addu(AT, as_Register(base), as_Register(index));
1718 } else {
1719 __ dsll(AT, as_Register(index), scale);
1720 __ addu(AT, as_Register(base), AT);
1721 }
1722 __ lb(as_Register(dst), AT, disp);
1723 }
1724 } else {
1725 if (scale == 0) {
1726 __ addu(AT, as_Register(base), as_Register(index));
1727 } else {
1728 __ dsll(AT, as_Register(index), scale);
1729 __ addu(AT, as_Register(base), AT);
1730 }
1731 __ move(T9, disp);
1732 if( UseLoongsonISA ) {
1733 __ gslbx(as_Register(dst), AT, T9, 0);
1734 } else {
1735 __ addu(AT, AT, T9);
1736 __ lb(as_Register(dst), AT, 0);
1737 }
1738 }
1739 } else {
1740 if( Assembler::is_simm16(disp) ) {
1741 __ lb(as_Register(dst), as_Register(base), disp);
1742 } else {
1743 __ move(T9, disp);
1744 if( UseLoongsonISA ) {
1745 __ gslbx(as_Register(dst), as_Register(base), T9, 0);
1746 } else {
1747 __ addu(AT, as_Register(base), T9);
1748 __ lb(as_Register(dst), AT, 0);
1749 }
1750 }
1751 }
1752 %}
1754 //Load byte unsigned
1755 enc_class load_UB_enc (mRegI dst, memory mem) %{
1756 MacroAssembler _masm(&cbuf);
1757 int dst = $dst$$reg;
1758 int base = $mem$$base;
1759 int index = $mem$$index;
1760 int scale = $mem$$scale;
1761 int disp = $mem$$disp;
1763 if( index != 0 ) {
1764 if (scale == 0) {
1765 __ daddu(AT, as_Register(base), as_Register(index));
1766 } else {
1767 __ dsll(AT, as_Register(index), scale);
1768 __ daddu(AT, as_Register(base), AT);
1769 }
1770 if( Assembler::is_simm16(disp) ) {
1771 __ lbu(as_Register(dst), AT, disp);
1772 } else {
1773 __ move(T9, disp);
1774 __ daddu(AT, AT, T9);
1775 __ lbu(as_Register(dst), AT, 0);
1776 }
1777 } else {
1778 if( Assembler::is_simm16(disp) ) {
1779 __ lbu(as_Register(dst), as_Register(base), disp);
1780 } else {
1781 __ move(T9, disp);
1782 __ daddu(AT, as_Register(base), T9);
1783 __ lbu(as_Register(dst), AT, 0);
1784 }
1785 }
1786 %}
1788 enc_class store_B_reg_enc (memory mem, mRegI src) %{
1789 MacroAssembler _masm(&cbuf);
1790 int src = $src$$reg;
1791 int base = $mem$$base;
1792 int index = $mem$$index;
1793 int scale = $mem$$scale;
1794 int disp = $mem$$disp;
1796 if( index != 0 ) {
1797 if (scale == 0) {
1798 if( Assembler::is_simm(disp, 8) ) {
1799 if (UseLoongsonISA) {
1800 __ gssbx(as_Register(src), as_Register(base), as_Register(index), disp);
1801 } else {
1802 __ addu(AT, as_Register(base), as_Register(index));
1803 __ sb(as_Register(src), AT, disp);
1804 }
1805 } else if( Assembler::is_simm16(disp) ) {
1806 __ addu(AT, as_Register(base), as_Register(index));
1807 __ sb(as_Register(src), AT, disp);
1808 } else {
1809 __ addu(AT, as_Register(base), as_Register(index));
1810 __ move(T9, disp);
1811 if (UseLoongsonISA) {
1812 __ gssbx(as_Register(src), AT, T9, 0);
1813 } else {
1814 __ addu(AT, AT, T9);
1815 __ sb(as_Register(src), AT, 0);
1816 }
1817 }
1818 } else {
1819 __ dsll(AT, as_Register(index), scale);
1820 if( Assembler::is_simm(disp, 8) ) {
1821 if (UseLoongsonISA) {
1822 __ gssbx(as_Register(src), AT, as_Register(base), disp);
1823 } else {
1824 __ addu(AT, as_Register(base), AT);
1825 __ sb(as_Register(src), AT, disp);
1826 }
1827 } else if( Assembler::is_simm16(disp) ) {
1828 __ addu(AT, as_Register(base), AT);
1829 __ sb(as_Register(src), AT, disp);
1830 } else {
1831 __ addu(AT, as_Register(base), AT);
1832 __ move(T9, disp);
1833 if (UseLoongsonISA) {
1834 __ gssbx(as_Register(src), AT, T9, 0);
1835 } else {
1836 __ addu(AT, AT, T9);
1837 __ sb(as_Register(src), AT, 0);
1838 }
1839 }
1840 }
1841 } else {
1842 if( Assembler::is_simm16(disp) ) {
1843 __ sb(as_Register(src), as_Register(base), disp);
1844 } else {
1845 __ move(T9, disp);
1846 if (UseLoongsonISA) {
1847 __ gssbx(as_Register(src), as_Register(base), T9, 0);
1848 } else {
1849 __ addu(AT, as_Register(base), T9);
1850 __ sb(as_Register(src), AT, 0);
1851 }
1852 }
1853 }
1854 %}
1856 enc_class store_B_immI_enc (memory mem, immI8 src) %{
1857 MacroAssembler _masm(&cbuf);
1858 int base = $mem$$base;
1859 int index = $mem$$index;
1860 int scale = $mem$$scale;
1861 int disp = $mem$$disp;
1862 int value = $src$$constant;
1864 if( index != 0 ) {
1865 if (!UseLoongsonISA) {
1866 if (scale == 0) {
1867 __ daddu(AT, as_Register(base), as_Register(index));
1868 } else {
1869 __ dsll(AT, as_Register(index), scale);
1870 __ daddu(AT, as_Register(base), AT);
1871 }
1872 if( Assembler::is_simm16(disp) ) {
1873 if (value == 0) {
1874 __ sb(R0, AT, disp);
1875 } else {
1876 __ move(T9, value);
1877 __ sb(T9, AT, disp);
1878 }
1879 } else {
1880 if (value == 0) {
1881 __ move(T9, disp);
1882 __ daddu(AT, AT, T9);
1883 __ sb(R0, AT, 0);
1884 } else {
1885 __ move(T9, disp);
1886 __ daddu(AT, AT, T9);
1887 __ move(T9, value);
1888 __ sb(T9, AT, 0);
1889 }
1890 }
1891 } else {
1893 if (scale == 0) {
1894 if( Assembler::is_simm(disp, 8) ) {
1895 if (value == 0) {
1896 __ gssbx(R0, as_Register(base), as_Register(index), disp);
1897 } else {
1898 __ move(T9, value);
1899 __ gssbx(T9, as_Register(base), as_Register(index), disp);
1900 }
1901 } else if( Assembler::is_simm16(disp) ) {
1902 __ daddu(AT, as_Register(base), as_Register(index));
1903 if (value == 0) {
1904 __ sb(R0, AT, disp);
1905 } else {
1906 __ move(T9, value);
1907 __ sb(T9, AT, disp);
1908 }
1909 } else {
1910 if (value == 0) {
1911 __ daddu(AT, as_Register(base), as_Register(index));
1912 __ move(T9, disp);
1913 __ gssbx(R0, AT, T9, 0);
1914 } else {
1915 __ move(AT, disp);
1916 __ move(T9, value);
1917 __ daddu(AT, as_Register(base), AT);
1918 __ gssbx(T9, AT, as_Register(index), 0);
1919 }
1920 }
1922 } else {
1924 if( Assembler::is_simm(disp, 8) ) {
1925 __ dsll(AT, as_Register(index), scale);
1926 if (value == 0) {
1927 __ gssbx(R0, as_Register(base), AT, disp);
1928 } else {
1929 __ move(T9, value);
1930 __ gssbx(T9, as_Register(base), AT, disp);
1931 }
1932 } else if( Assembler::is_simm16(disp) ) {
1933 __ dsll(AT, as_Register(index), scale);
1934 __ daddu(AT, as_Register(base), AT);
1935 if (value == 0) {
1936 __ sb(R0, AT, disp);
1937 } else {
1938 __ move(T9, value);
1939 __ sb(T9, AT, disp);
1940 }
1941 } else {
1942 __ dsll(AT, as_Register(index), scale);
1943 if (value == 0) {
1944 __ daddu(AT, as_Register(base), AT);
1945 __ move(T9, disp);
1946 __ gssbx(R0, AT, T9, 0);
1947 } else {
1948 __ move(T9, disp);
1949 __ daddu(AT, AT, T9);
1950 __ move(T9, value);
1951 __ gssbx(T9, as_Register(base), AT, 0);
1952 }
1953 }
1954 }
1955 }
1956 } else {
1957 if( Assembler::is_simm16(disp) ) {
1958 if (value == 0) {
1959 __ sb(R0, as_Register(base), disp);
1960 } else {
1961 __ move(AT, value);
1962 __ sb(AT, as_Register(base), disp);
1963 }
1964 } else {
1965 if (value == 0) {
1966 __ move(T9, disp);
1967 if (UseLoongsonISA) {
1968 __ gssbx(R0, as_Register(base), T9, 0);
1969 } else {
1970 __ daddu(AT, as_Register(base), T9);
1971 __ sb(R0, AT, 0);
1972 }
1973 } else {
1974 __ move(T9, disp);
1975 if (UseLoongsonISA) {
1976 __ move(AT, value);
1977 __ gssbx(AT, as_Register(base), T9, 0);
1978 } else {
1979 __ daddu(AT, as_Register(base), T9);
1980 __ move(T9, value);
1981 __ sb(T9, AT, 0);
1982 }
1983 }
1984 }
1985 }
1986 %}
1989 enc_class store_B_immI_enc_sync (memory mem, immI8 src) %{
1990 MacroAssembler _masm(&cbuf);
1991 int base = $mem$$base;
1992 int index = $mem$$index;
1993 int scale = $mem$$scale;
1994 int disp = $mem$$disp;
1995 int value = $src$$constant;
1997 if( index != 0 ) {
1998 if ( UseLoongsonISA ) {
1999 if ( Assembler::is_simm(disp,8) ) {
2000 if ( scale == 0 ) {
2001 if ( value == 0 ) {
2002 __ gssbx(R0, as_Register(base), as_Register(index), disp);
2003 } else {
2004 __ move(AT, value);
2005 __ gssbx(AT, as_Register(base), as_Register(index), disp);
2006 }
2007 } else {
2008 __ dsll(AT, as_Register(index), scale);
2009 if ( value == 0 ) {
2010 __ gssbx(R0, as_Register(base), AT, disp);
2011 } else {
2012 __ move(T9, value);
2013 __ gssbx(T9, as_Register(base), AT, disp);
2014 }
2015 }
2016 } else if ( Assembler::is_simm16(disp) ) {
2017 if ( scale == 0 ) {
2018 __ daddu(AT, as_Register(base), as_Register(index));
2019 if ( value == 0 ){
2020 __ sb(R0, AT, disp);
2021 } else {
2022 __ move(T9, value);
2023 __ sb(T9, AT, disp);
2024 }
2025 } else {
2026 __ dsll(AT, as_Register(index), scale);
2027 __ daddu(AT, as_Register(base), AT);
2028 if ( value == 0 ) {
2029 __ sb(R0, AT, disp);
2030 } else {
2031 __ move(T9, value);
2032 __ sb(T9, AT, disp);
2033 }
2034 }
2035 } else {
2036 if ( scale == 0 ) {
2037 __ move(AT, disp);
2038 __ daddu(AT, as_Register(index), AT);
2039 if ( value == 0 ) {
2040 __ gssbx(R0, as_Register(base), AT, 0);
2041 } else {
2042 __ move(T9, value);
2043 __ gssbx(T9, as_Register(base), AT, 0);
2044 }
2045 } else {
2046 __ dsll(AT, as_Register(index), scale);
2047 __ move(T9, disp);
2048 __ daddu(AT, AT, T9);
2049 if ( value == 0 ) {
2050 __ gssbx(R0, as_Register(base), AT, 0);
2051 } else {
2052 __ move(T9, value);
2053 __ gssbx(T9, as_Register(base), AT, 0);
2054 }
2055 }
2056 }
2057 } else { //not use loongson isa
2058 if (scale == 0) {
2059 __ daddu(AT, as_Register(base), as_Register(index));
2060 } else {
2061 __ dsll(AT, as_Register(index), scale);
2062 __ daddu(AT, as_Register(base), AT);
2063 }
2064 if( Assembler::is_simm16(disp) ) {
2065 if (value == 0) {
2066 __ sb(R0, AT, disp);
2067 } else {
2068 __ move(T9, value);
2069 __ sb(T9, AT, disp);
2070 }
2071 } else {
2072 if (value == 0) {
2073 __ move(T9, disp);
2074 __ daddu(AT, AT, T9);
2075 __ sb(R0, AT, 0);
2076 } else {
2077 __ move(T9, disp);
2078 __ daddu(AT, AT, T9);
2079 __ move(T9, value);
2080 __ sb(T9, AT, 0);
2081 }
2082 }
2083 }
2084 } else {
2085 if ( UseLoongsonISA ){
2086 if ( Assembler::is_simm16(disp) ){
2087 if ( value == 0 ) {
2088 __ sb(R0, as_Register(base), disp);
2089 } else {
2090 __ move(AT, value);
2091 __ sb(AT, as_Register(base), disp);
2092 }
2093 } else {
2094 __ move(AT, disp);
2095 if ( value == 0 ) {
2096 __ gssbx(R0, as_Register(base), AT, 0);
2097 } else {
2098 __ move(T9, value);
2099 __ gssbx(T9, as_Register(base), AT, 0);
2100 }
2101 }
2102 } else {
2103 if( Assembler::is_simm16(disp) ) {
2104 if (value == 0) {
2105 __ sb(R0, as_Register(base), disp);
2106 } else {
2107 __ move(AT, value);
2108 __ sb(AT, as_Register(base), disp);
2109 }
2110 } else {
2111 if (value == 0) {
2112 __ move(T9, disp);
2113 __ daddu(AT, as_Register(base), T9);
2114 __ sb(R0, AT, 0);
2115 } else {
2116 __ move(T9, disp);
2117 __ daddu(AT, as_Register(base), T9);
2118 __ move(T9, value);
2119 __ sb(T9, AT, 0);
2120 }
2121 }
2122 }
2123 }
2125 __ sync();
2126 %}
2128 // Load Short (16bit signed)
2129 enc_class load_S_enc (mRegI dst, memory mem) %{
2130 MacroAssembler _masm(&cbuf);
2131 int dst = $dst$$reg;
2132 int base = $mem$$base;
2133 int index = $mem$$index;
2134 int scale = $mem$$scale;
2135 int disp = $mem$$disp;
2137 if( index != 0 ) {
2138 if ( UseLoongsonISA ) {
2139 if ( Assembler::is_simm(disp, 8) ) {
2140 if (scale == 0) {
2141 __ gslhx(as_Register(dst), as_Register(base), as_Register(index), disp);
2142 } else {
2143 __ dsll(AT, as_Register(index), scale);
2144 __ gslhx(as_Register(dst), as_Register(base), AT, disp);
2145 }
2146 } else if ( Assembler::is_simm16(disp) ) {
2147 if (scale == 0) {
2148 __ daddu(AT, as_Register(base), as_Register(index));
2149 __ lh(as_Register(dst), AT, disp);
2150 } else {
2151 __ dsll(AT, as_Register(index), scale);
2152 __ daddu(AT, as_Register(base), AT);
2153 __ lh(as_Register(dst), AT, disp);
2154 }
2155 } else {
2156 if (scale == 0) {
2157 __ move(AT, disp);
2158 __ daddu(AT, as_Register(index), AT);
2159 __ gslhx(as_Register(dst), as_Register(base), AT, 0);
2160 } else {
2161 __ dsll(AT, as_Register(index), scale);
2162 __ move(T9, disp);
2163 __ daddu(AT, AT, T9);
2164 __ gslhx(as_Register(dst), as_Register(base), AT, 0);
2165 }
2166 }
2167 } else { // not use loongson isa
2168 if (scale == 0) {
2169 __ daddu(AT, as_Register(base), as_Register(index));
2170 } else {
2171 __ dsll(AT, as_Register(index), scale);
2172 __ daddu(AT, as_Register(base), AT);
2173 }
2174 if( Assembler::is_simm16(disp) ) {
2175 __ lh(as_Register(dst), AT, disp);
2176 } else {
2177 __ move(T9, disp);
2178 __ daddu(AT, AT, T9);
2179 __ lh(as_Register(dst), AT, 0);
2180 }
2181 }
2182 } else { // index is 0
2183 if ( UseLoongsonISA ) {
2184 if ( Assembler::is_simm16(disp) ) {
2185 __ lh(as_Register(dst), as_Register(base), disp);
2186 } else {
2187 __ move(T9, disp);
2188 __ gslhx(as_Register(dst), as_Register(base), T9, 0);
2189 }
2190 } else { //not use loongson isa
2191 if( Assembler::is_simm16(disp) ) {
2192 __ lh(as_Register(dst), as_Register(base), disp);
2193 } else {
2194 __ move(T9, disp);
2195 __ daddu(AT, as_Register(base), T9);
2196 __ lh(as_Register(dst), AT, 0);
2197 }
2198 }
2199 }
2200 %}
2202 // Load Char (16bit unsigned)
2203 enc_class load_C_enc (mRegI dst, memory mem) %{
2204 MacroAssembler _masm(&cbuf);
2205 int dst = $dst$$reg;
2206 int base = $mem$$base;
2207 int index = $mem$$index;
2208 int scale = $mem$$scale;
2209 int disp = $mem$$disp;
2211 if( index != 0 ) {
2212 if (scale == 0) {
2213 __ daddu(AT, as_Register(base), as_Register(index));
2214 } else {
2215 __ dsll(AT, as_Register(index), scale);
2216 __ daddu(AT, as_Register(base), AT);
2217 }
2218 if( Assembler::is_simm16(disp) ) {
2219 __ lhu(as_Register(dst), AT, disp);
2220 } else {
2221 __ move(T9, disp);
2222 __ addu(AT, AT, T9);
2223 __ lhu(as_Register(dst), AT, 0);
2224 }
2225 } else {
2226 if( Assembler::is_simm16(disp) ) {
2227 __ lhu(as_Register(dst), as_Register(base), disp);
2228 } else {
2229 __ move(T9, disp);
2230 __ daddu(AT, as_Register(base), T9);
2231 __ lhu(as_Register(dst), AT, 0);
2232 }
2233 }
2234 %}
2236 // Store Char (16bit unsigned)
2237 enc_class store_C_reg_enc (memory mem, mRegI src) %{
2238 MacroAssembler _masm(&cbuf);
2239 int src = $src$$reg;
2240 int base = $mem$$base;
2241 int index = $mem$$index;
2242 int scale = $mem$$scale;
2243 int disp = $mem$$disp;
2245 if( index != 0 ) {
2246 if( Assembler::is_simm16(disp) ) {
2247 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2248 if (scale == 0) {
2249 __ gsshx(as_Register(src), as_Register(base), as_Register(index), disp);
2250 } else {
2251 __ dsll(AT, as_Register(index), scale);
2252 __ gsshx(as_Register(src), as_Register(base), AT, disp);
2253 }
2254 } else {
2255 if (scale == 0) {
2256 __ addu(AT, as_Register(base), as_Register(index));
2257 } else {
2258 __ dsll(AT, as_Register(index), scale);
2259 __ addu(AT, as_Register(base), AT);
2260 }
2261 __ sh(as_Register(src), AT, disp);
2262 }
2263 } else {
2264 if (scale == 0) {
2265 __ addu(AT, as_Register(base), as_Register(index));
2266 } else {
2267 __ dsll(AT, as_Register(index), scale);
2268 __ addu(AT, as_Register(base), AT);
2269 }
2270 __ move(T9, disp);
2271 if( UseLoongsonISA ) {
2272 __ gsshx(as_Register(src), AT, T9, 0);
2273 } else {
2274 __ addu(AT, AT, T9);
2275 __ sh(as_Register(src), AT, 0);
2276 }
2277 }
2278 } else {
2279 if( Assembler::is_simm16(disp) ) {
2280 __ sh(as_Register(src), as_Register(base), disp);
2281 } else {
2282 __ move(T9, disp);
2283 if( UseLoongsonISA ) {
2284 __ gsshx(as_Register(src), as_Register(base), T9, 0);
2285 } else {
2286 __ addu(AT, as_Register(base), T9);
2287 __ sh(as_Register(src), AT, 0);
2288 }
2289 }
2290 }
2291 %}
2293 enc_class store_C0_enc (memory mem) %{
2294 MacroAssembler _masm(&cbuf);
2295 int base = $mem$$base;
2296 int index = $mem$$index;
2297 int scale = $mem$$scale;
2298 int disp = $mem$$disp;
2300 if( index != 0 ) {
2301 if( Assembler::is_simm16(disp) ) {
2302 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2303 if (scale == 0) {
2304 __ gsshx(R0, as_Register(base), as_Register(index), disp);
2305 } else {
2306 __ dsll(AT, as_Register(index), scale);
2307 __ gsshx(R0, as_Register(base), AT, disp);
2308 }
2309 } else {
2310 if (scale == 0) {
2311 __ addu(AT, as_Register(base), as_Register(index));
2312 } else {
2313 __ dsll(AT, as_Register(index), scale);
2314 __ addu(AT, as_Register(base), AT);
2315 }
2316 __ sh(R0, AT, disp);
2317 }
2318 } else {
2319 if (scale == 0) {
2320 __ addu(AT, as_Register(base), as_Register(index));
2321 } else {
2322 __ dsll(AT, as_Register(index), scale);
2323 __ addu(AT, as_Register(base), AT);
2324 }
2325 __ move(T9, disp);
2326 if( UseLoongsonISA ) {
2327 __ gsshx(R0, AT, T9, 0);
2328 } else {
2329 __ addu(AT, AT, T9);
2330 __ sh(R0, AT, 0);
2331 }
2332 }
2333 } else {
2334 if( Assembler::is_simm16(disp) ) {
2335 __ sh(R0, as_Register(base), disp);
2336 } else {
2337 __ move(T9, disp);
2338 if( UseLoongsonISA ) {
2339 __ gsshx(R0, as_Register(base), T9, 0);
2340 } else {
2341 __ addu(AT, as_Register(base), T9);
2342 __ sh(R0, AT, 0);
2343 }
2344 }
2345 }
2346 %}
2348 enc_class load_I_enc (mRegI dst, memory mem) %{
2349 MacroAssembler _masm(&cbuf);
2350 int dst = $dst$$reg;
2351 int base = $mem$$base;
2352 int index = $mem$$index;
2353 int scale = $mem$$scale;
2354 int disp = $mem$$disp;
2356 if( index != 0 ) {
2357 if( Assembler::is_simm16(disp) ) {
2358 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2359 if (scale == 0) {
2360 __ gslwx(as_Register(dst), as_Register(base), as_Register(index), disp);
2361 } else {
2362 __ dsll(AT, as_Register(index), scale);
2363 __ gslwx(as_Register(dst), as_Register(base), AT, disp);
2364 }
2365 } else {
2366 if (scale == 0) {
2367 __ addu(AT, as_Register(base), as_Register(index));
2368 } else {
2369 __ dsll(AT, as_Register(index), scale);
2370 __ addu(AT, as_Register(base), AT);
2371 }
2372 __ lw(as_Register(dst), AT, disp);
2373 }
2374 } else {
2375 if (scale == 0) {
2376 __ addu(AT, as_Register(base), as_Register(index));
2377 } else {
2378 __ dsll(AT, as_Register(index), scale);
2379 __ addu(AT, as_Register(base), AT);
2380 }
2381 __ move(T9, disp);
2382 if( UseLoongsonISA ) {
2383 __ gslwx(as_Register(dst), AT, T9, 0);
2384 } else {
2385 __ addu(AT, AT, T9);
2386 __ lw(as_Register(dst), AT, 0);
2387 }
2388 }
2389 } else {
2390 if( Assembler::is_simm16(disp) ) {
2391 __ lw(as_Register(dst), as_Register(base), disp);
2392 } else {
2393 __ move(T9, disp);
2394 if( UseLoongsonISA ) {
2395 __ gslwx(as_Register(dst), as_Register(base), T9, 0);
2396 } else {
2397 __ addu(AT, as_Register(base), T9);
2398 __ lw(as_Register(dst), AT, 0);
2399 }
2400 }
2401 }
2402 %}
2404 enc_class store_I_reg_enc (memory mem, mRegI src) %{
2405 MacroAssembler _masm(&cbuf);
2406 int src = $src$$reg;
2407 int base = $mem$$base;
2408 int index = $mem$$index;
2409 int scale = $mem$$scale;
2410 int disp = $mem$$disp;
2412 if( index != 0 ) {
2413 if( Assembler::is_simm16(disp) ) {
2414 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
2415 if (scale == 0) {
2416 __ gsswx(as_Register(src), as_Register(base), as_Register(index), disp);
2417 } else {
2418 __ dsll(AT, as_Register(index), scale);
2419 __ gsswx(as_Register(src), as_Register(base), AT, disp);
2420 }
2421 } else {
2422 if (scale == 0) {
2423 __ addu(AT, as_Register(base), as_Register(index));
2424 } else {
2425 __ dsll(AT, as_Register(index), scale);
2426 __ addu(AT, as_Register(base), AT);
2427 }
2428 __ sw(as_Register(src), AT, disp);
2429 }
2430 } else {
2431 if (scale == 0) {
2432 __ addu(AT, as_Register(base), as_Register(index));
2433 } else {
2434 __ dsll(AT, as_Register(index), scale);
2435 __ addu(AT, as_Register(base), AT);
2436 }
2437 __ move(T9, disp);
2438 if( UseLoongsonISA ) {
2439 __ gsswx(as_Register(src), AT, T9, 0);
2440 } else {
2441 __ addu(AT, AT, T9);
2442 __ sw(as_Register(src), AT, 0);
2443 }
2444 }
2445 } else {
2446 if( Assembler::is_simm16(disp) ) {
2447 __ sw(as_Register(src), as_Register(base), disp);
2448 } else {
2449 __ move(T9, disp);
2450 if( UseLoongsonISA ) {
2451 __ gsswx(as_Register(src), as_Register(base), T9, 0);
2452 } else {
2453 __ addu(AT, as_Register(base), T9);
2454 __ sw(as_Register(src), AT, 0);
2455 }
2456 }
2457 }
2458 %}
2460 enc_class store_I_immI_enc (memory mem, immI src) %{
2461 MacroAssembler _masm(&cbuf);
2462 int base = $mem$$base;
2463 int index = $mem$$index;
2464 int scale = $mem$$scale;
2465 int disp = $mem$$disp;
2466 int value = $src$$constant;
2468 if( index != 0 ) {
2469 if ( UseLoongsonISA ) {
2470 if ( Assembler::is_simm(disp, 8) ) {
2471 if ( scale == 0 ) {
2472 if ( value == 0 ) {
2473 __ gsswx(R0, as_Register(base), as_Register(index), disp);
2474 } else {
2475 __ move(T9, value);
2476 __ gsswx(T9, as_Register(base), as_Register(index), disp);
2477 }
2478 } else {
2479 __ dsll(AT, as_Register(index), scale);
2480 if ( value == 0 ) {
2481 __ gsswx(R0, as_Register(base), AT, disp);
2482 } else {
2483 __ move(T9, value);
2484 __ gsswx(T9, as_Register(base), AT, disp);
2485 }
2486 }
2487 } else if ( Assembler::is_simm16(disp) ) {
2488 if ( scale == 0 ) {
2489 __ daddu(AT, as_Register(base), as_Register(index));
2490 if ( value == 0 ) {
2491 __ sw(R0, AT, disp);
2492 } else {
2493 __ move(T9, value);
2494 __ sw(T9, AT, disp);
2495 }
2496 } else {
2497 __ dsll(AT, as_Register(index), scale);
2498 __ daddu(AT, as_Register(base), AT);
2499 if ( value == 0 ) {
2500 __ sw(R0, AT, disp);
2501 } else {
2502 __ move(T9, value);
2503 __ sw(T9, AT, disp);
2504 }
2505 }
2506 } else {
2507 if ( scale == 0 ) {
2508 __ move(T9, disp);
2509 __ daddu(AT, as_Register(index), T9);
2510 if ( value ==0 ) {
2511 __ gsswx(R0, as_Register(base), AT, 0);
2512 } else {
2513 __ move(T9, value);
2514 __ gsswx(T9, as_Register(base), AT, 0);
2515 }
2516 } else {
2517 __ dsll(AT, as_Register(index), scale);
2518 __ move(T9, disp);
2519 __ daddu(AT, AT, T9);
2520 if ( value == 0 ) {
2521 __ gsswx(R0, as_Register(base), AT, 0);
2522 } else {
2523 __ move(T9, value);
2524 __ gsswx(T9, as_Register(base), AT, 0);
2525 }
2526 }
2527 }
2528 } else { //not use loongson isa
2529 if (scale == 0) {
2530 __ daddu(AT, as_Register(base), as_Register(index));
2531 } else {
2532 __ dsll(AT, as_Register(index), scale);
2533 __ daddu(AT, as_Register(base), AT);
2534 }
2535 if( Assembler::is_simm16(disp) ) {
2536 if (value == 0) {
2537 __ sw(R0, AT, disp);
2538 } else {
2539 __ move(T9, value);
2540 __ sw(T9, AT, disp);
2541 }
2542 } else {
2543 if (value == 0) {
2544 __ move(T9, disp);
2545 __ daddu(AT, AT, T9);
2546 __ sw(R0, AT, 0);
2547 } else {
2548 __ move(T9, disp);
2549 __ daddu(AT, AT, T9);
2550 __ move(T9, value);
2551 __ sw(T9, AT, 0);
2552 }
2553 }
2554 }
2555 } else {
2556 if ( UseLoongsonISA ) {
2557 if ( Assembler::is_simm16(disp) ) {
2558 if ( value == 0 ) {
2559 __ sw(R0, as_Register(base), disp);
2560 } else {
2561 __ move(AT, value);
2562 __ sw(AT, as_Register(base), disp);
2563 }
2564 } else {
2565 __ move(T9, disp);
2566 if ( value == 0 ) {
2567 __ gsswx(R0, as_Register(base), T9, 0);
2568 } else {
2569 __ move(AT, value);
2570 __ gsswx(AT, as_Register(base), T9, 0);
2571 }
2572 }
2573 } else {
2574 if( Assembler::is_simm16(disp) ) {
2575 if (value == 0) {
2576 __ sw(R0, as_Register(base), disp);
2577 } else {
2578 __ move(AT, value);
2579 __ sw(AT, as_Register(base), disp);
2580 }
2581 } else {
2582 if (value == 0) {
2583 __ move(T9, disp);
2584 __ daddu(AT, as_Register(base), T9);
2585 __ sw(R0, AT, 0);
2586 } else {
2587 __ move(T9, disp);
2588 __ daddu(AT, as_Register(base), T9);
2589 __ move(T9, value);
2590 __ sw(T9, AT, 0);
2591 }
2592 }
2593 }
2594 }
2595 %}
2597 enc_class load_N_enc (mRegN dst, memory mem) %{
2598 MacroAssembler _masm(&cbuf);
2599 int dst = $dst$$reg;
2600 int base = $mem$$base;
2601 int index = $mem$$index;
2602 int scale = $mem$$scale;
2603 int disp = $mem$$disp;
2604 relocInfo::relocType disp_reloc = $mem->disp_reloc();
2605 assert(disp_reloc == relocInfo::none, "cannot have disp");
2607 if( index != 0 ) {
2608 if (scale == 0) {
2609 __ daddu(AT, as_Register(base), as_Register(index));
2610 } else {
2611 __ dsll(AT, as_Register(index), scale);
2612 __ daddu(AT, as_Register(base), AT);
2613 }
2614 if( Assembler::is_simm16(disp) ) {
2615 __ lwu(as_Register(dst), AT, disp);
2616 } else {
2617 __ set64(T9, disp);
2618 __ daddu(AT, AT, T9);
2619 __ lwu(as_Register(dst), AT, 0);
2620 }
2621 } else {
2622 if( Assembler::is_simm16(disp) ) {
2623 __ lwu(as_Register(dst), as_Register(base), disp);
2624 } else {
2625 __ set64(T9, disp);
2626 __ daddu(AT, as_Register(base), T9);
2627 __ lwu(as_Register(dst), AT, 0);
2628 }
2629 }
2631 %}
2634 enc_class load_P_enc (mRegP dst, memory mem) %{
2635 MacroAssembler _masm(&cbuf);
2636 int dst = $dst$$reg;
2637 int base = $mem$$base;
2638 int index = $mem$$index;
2639 int scale = $mem$$scale;
2640 int disp = $mem$$disp;
2641 relocInfo::relocType disp_reloc = $mem->disp_reloc();
2642 assert(disp_reloc == relocInfo::none, "cannot have disp");
2644 if( index != 0 ) {
2645 if ( UseLoongsonISA ) {
2646 if ( Assembler::is_simm(disp, 8) ) {
2647 if ( scale != 0 ) {
2648 __ dsll(AT, as_Register(index), scale);
2649 __ gsldx(as_Register(dst), as_Register(base), AT, disp);
2650 } else {
2651 __ gsldx(as_Register(dst), as_Register(base), as_Register(index), disp);
2652 }
2653 } else if ( Assembler::is_simm16(disp) ){
2654 if ( scale != 0 ) {
2655 __ dsll(AT, as_Register(index), scale);
2656 __ daddu(AT, AT, as_Register(base));
2657 } else {
2658 __ daddu(AT, as_Register(index), as_Register(base));
2659 }
2660 __ ld(as_Register(dst), AT, disp);
2661 } else {
2662 if ( scale != 0 ) {
2663 __ dsll(AT, as_Register(index), scale);
2664 __ move(T9, disp);
2665 __ daddu(AT, AT, T9);
2666 } else {
2667 __ move(T9, disp);
2668 __ daddu(AT, as_Register(index), T9);
2669 }
2670 __ gsldx(as_Register(dst), as_Register(base), AT, 0);
2671 }
2672 } else { //not use loongson isa
2673 if (scale == 0) {
2674 __ daddu(AT, as_Register(base), as_Register(index));
2675 } else {
2676 __ dsll(AT, as_Register(index), scale);
2677 __ daddu(AT, as_Register(base), AT);
2678 }
2679 if( Assembler::is_simm16(disp) ) {
2680 __ ld(as_Register(dst), AT, disp);
2681 } else {
2682 __ set64(T9, disp);
2683 __ daddu(AT, AT, T9);
2684 __ ld(as_Register(dst), AT, 0);
2685 }
2686 }
2687 } else {
2688 if ( UseLoongsonISA ) {
2689 if ( Assembler::is_simm16(disp) ){
2690 __ ld(as_Register(dst), as_Register(base), disp);
2691 } else {
2692 __ set64(T9, disp);
2693 __ gsldx(as_Register(dst), as_Register(base), T9, 0);
2694 }
2695 } else { //not use loongson isa
2696 if( Assembler::is_simm16(disp) ) {
2697 __ ld(as_Register(dst), as_Register(base), disp);
2698 } else {
2699 __ set64(T9, disp);
2700 __ daddu(AT, as_Register(base), T9);
2701 __ ld(as_Register(dst), AT, 0);
2702 }
2703 }
2704 }
2705 // if( disp_reloc != relocInfo::none) __ ld(as_Register(dst), as_Register(dst), 0);
2706 %}
2708 enc_class store_P_reg_enc (memory mem, mRegP src) %{
2709 MacroAssembler _masm(&cbuf);
2710 int src = $src$$reg;
2711 int base = $mem$$base;
2712 int index = $mem$$index;
2713 int scale = $mem$$scale;
2714 int disp = $mem$$disp;
2716 if( index != 0 ) {
2717 if ( UseLoongsonISA ){
2718 if ( Assembler::is_simm(disp, 8) ) {
2719 if ( scale == 0 ) {
2720 __ gssdx(as_Register(src), as_Register(base), as_Register(index), disp);
2721 } else {
2722 __ dsll(AT, as_Register(index), scale);
2723 __ gssdx(as_Register(src), as_Register(base), AT, disp);
2724 }
2725 } else if ( Assembler::is_simm16(disp) ) {
2726 if ( scale == 0 ) {
2727 __ daddu(AT, as_Register(base), as_Register(index));
2728 } else {
2729 __ dsll(AT, as_Register(index), scale);
2730 __ daddu(AT, as_Register(base), AT);
2731 }
2732 __ sd(as_Register(src), AT, disp);
2733 } else {
2734 if ( scale == 0 ) {
2735 __ move(T9, disp);
2736 __ daddu(AT, as_Register(index), T9);
2737 } else {
2738 __ dsll(AT, as_Register(index), scale);
2739 __ move(T9, disp);
2740 __ daddu(AT, AT, T9);
2741 }
2742 __ gssdx(as_Register(src), as_Register(base), AT, 0);
2743 }
2744 } else { //not use loongson isa
2745 if (scale == 0) {
2746 __ daddu(AT, as_Register(base), as_Register(index));
2747 } else {
2748 __ dsll(AT, as_Register(index), scale);
2749 __ daddu(AT, as_Register(base), AT);
2750 }
2751 if( Assembler::is_simm16(disp) ) {
2752 __ sd(as_Register(src), AT, disp);
2753 } else {
2754 __ move(T9, disp);
2755 __ daddu(AT, AT, T9);
2756 __ sd(as_Register(src), AT, 0);
2757 }
2758 }
2759 } else {
2760 if ( UseLoongsonISA ) {
2761 if ( Assembler::is_simm16(disp) ) {
2762 __ sd(as_Register(src), as_Register(base), disp);
2763 } else {
2764 __ move(T9, disp);
2765 __ gssdx(as_Register(src), as_Register(base), T9, 0);
2766 }
2767 } else {
2768 if( Assembler::is_simm16(disp) ) {
2769 __ sd(as_Register(src), as_Register(base), disp);
2770 } else {
2771 __ move(T9, disp);
2772 __ daddu(AT, as_Register(base), T9);
2773 __ sd(as_Register(src), AT, 0);
2774 }
2775 }
2776 }
2777 %}
2779 enc_class store_N_reg_enc (memory mem, mRegN src) %{
2780 MacroAssembler _masm(&cbuf);
2781 int src = $src$$reg;
2782 int base = $mem$$base;
2783 int index = $mem$$index;
2784 int scale = $mem$$scale;
2785 int disp = $mem$$disp;
2787 if( index != 0 ) {
2788 if ( UseLoongsonISA ){
2789 if ( Assembler::is_simm(disp, 8) ) {
2790 if ( scale == 0 ) {
2791 __ gsswx(as_Register(src), as_Register(base), as_Register(index), disp);
2792 } else {
2793 __ dsll(AT, as_Register(index), scale);
2794 __ gsswx(as_Register(src), as_Register(base), AT, disp);
2795 }
2796 } else if ( Assembler::is_simm16(disp) ) {
2797 if ( scale == 0 ) {
2798 __ daddu(AT, as_Register(base), as_Register(index));
2799 } else {
2800 __ dsll(AT, as_Register(index), scale);
2801 __ daddu(AT, as_Register(base), AT);
2802 }
2803 __ sw(as_Register(src), AT, disp);
2804 } else {
2805 if ( scale == 0 ) {
2806 __ move(T9, disp);
2807 __ daddu(AT, as_Register(index), T9);
2808 } else {
2809 __ dsll(AT, as_Register(index), scale);
2810 __ move(T9, disp);
2811 __ daddu(AT, AT, T9);
2812 }
2813 __ gsswx(as_Register(src), as_Register(base), AT, 0);
2814 }
2815 } else { //not use loongson isa
2816 if (scale == 0) {
2817 __ daddu(AT, as_Register(base), as_Register(index));
2818 } else {
2819 __ dsll(AT, as_Register(index), scale);
2820 __ daddu(AT, as_Register(base), AT);
2821 }
2822 if( Assembler::is_simm16(disp) ) {
2823 __ sw(as_Register(src), AT, disp);
2824 } else {
2825 __ move(T9, disp);
2826 __ daddu(AT, AT, T9);
2827 __ sw(as_Register(src), AT, 0);
2828 }
2829 }
2830 } else {
2831 if ( UseLoongsonISA ) {
2832 if ( Assembler::is_simm16(disp) ) {
2833 __ sw(as_Register(src), as_Register(base), disp);
2834 } else {
2835 __ move(T9, disp);
2836 __ gsswx(as_Register(src), as_Register(base), T9, 0);
2837 }
2838 } else {
2839 if( Assembler::is_simm16(disp) ) {
2840 __ sw(as_Register(src), as_Register(base), disp);
2841 } else {
2842 __ move(T9, disp);
2843 __ daddu(AT, as_Register(base), T9);
2844 __ sw(as_Register(src), AT, 0);
2845 }
2846 }
2847 }
2848 %}
2850 enc_class store_P_immP0_enc (memory mem) %{
2851 MacroAssembler _masm(&cbuf);
2852 int base = $mem$$base;
2853 int index = $mem$$index;
2854 int scale = $mem$$scale;
2855 int disp = $mem$$disp;
2857 if( index != 0 ) {
2858 if (scale == 0) {
2859 if( Assembler::is_simm16(disp) ) {
2860 if (UseLoongsonISA && Assembler::is_simm(disp, 8)) {
2861 __ gssdx(R0, as_Register(base), as_Register(index), disp);
2862 } else {
2863 __ daddu(AT, as_Register(base), as_Register(index));
2864 __ sd(R0, AT, disp);
2865 }
2866 } else {
2867 __ daddu(AT, as_Register(base), as_Register(index));
2868 __ move(T9, disp);
2869 if(UseLoongsonISA) {
2870 __ gssdx(R0, AT, T9, 0);
2871 } else {
2872 __ daddu(AT, AT, T9);
2873 __ sd(R0, AT, 0);
2874 }
2875 }
2876 } else {
2877 __ dsll(AT, as_Register(index), scale);
2878 if( Assembler::is_simm16(disp) ) {
2879 if (UseLoongsonISA && Assembler::is_simm(disp, 8)) {
2880 __ gssdx(R0, as_Register(base), AT, disp);
2881 } else {
2882 __ daddu(AT, as_Register(base), AT);
2883 __ sd(R0, AT, disp);
2884 }
2885 } else {
2886 __ daddu(AT, as_Register(base), AT);
2887 __ move(T9, disp);
2888 if (UseLoongsonISA) {
2889 __ gssdx(R0, AT, T9, 0);
2890 } else {
2891 __ daddu(AT, AT, T9);
2892 __ sd(R0, AT, 0);
2893 }
2894 }
2895 }
2896 } else {
2897 if( Assembler::is_simm16(disp) ) {
2898 __ sd(R0, as_Register(base), disp);
2899 } else {
2900 __ move(T9, disp);
2901 if (UseLoongsonISA) {
2902 __ gssdx(R0, as_Register(base), T9, 0);
2903 } else {
2904 __ daddu(AT, as_Register(base), T9);
2905 __ sd(R0, AT, 0);
2906 }
2907 }
2908 }
2909 %}
2911 enc_class store_P_immP_enc (memory mem, immP31 src) %{
2912 MacroAssembler _masm(&cbuf);
2913 int base = $mem$$base;
2914 int index = $mem$$index;
2915 int scale = $mem$$scale;
2916 int disp = $mem$$disp;
2917 long value = $src$$constant;
2919 if( index != 0 ) {
2920 if (scale == 0) {
2921 __ daddu(AT, as_Register(base), as_Register(index));
2922 } else {
2923 __ dsll(AT, as_Register(index), scale);
2924 __ daddu(AT, as_Register(base), AT);
2925 }
2926 if( Assembler::is_simm16(disp) ) {
2927 if (value == 0) {
2928 __ sd(R0, AT, disp);
2929 } else {
2930 __ move(T9, value);
2931 __ sd(T9, AT, disp);
2932 }
2933 } else {
2934 if (value == 0) {
2935 __ move(T9, disp);
2936 __ daddu(AT, AT, T9);
2937 __ sd(R0, AT, 0);
2938 } else {
2939 __ move(T9, disp);
2940 __ daddu(AT, AT, T9);
2941 __ move(T9, value);
2942 __ sd(T9, AT, 0);
2943 }
2944 }
2945 } else {
2946 if( Assembler::is_simm16(disp) ) {
2947 if (value == 0) {
2948 __ sd(R0, as_Register(base), disp);
2949 } else {
2950 __ move(AT, value);
2951 __ sd(AT, as_Register(base), disp);
2952 }
2953 } else {
2954 if (value == 0) {
2955 __ move(T9, disp);
2956 __ daddu(AT, as_Register(base), T9);
2957 __ sd(R0, AT, 0);
2958 } else {
2959 __ move(T9, disp);
2960 __ daddu(AT, as_Register(base), T9);
2961 __ move(T9, value);
2962 __ sd(T9, AT, 0);
2963 }
2964 }
2965 }
2966 %}
2968 enc_class storeImmN0_enc(memory mem, ImmN0 src) %{
2969 MacroAssembler _masm(&cbuf);
2970 int base = $mem$$base;
2971 int index = $mem$$index;
2972 int scale = $mem$$scale;
2973 int disp = $mem$$disp;
2975 if(index!=0){
2976 if (scale == 0) {
2977 __ daddu(AT, as_Register(base), as_Register(index));
2978 } else {
2979 __ dsll(AT, as_Register(index), scale);
2980 __ daddu(AT, as_Register(base), AT);
2981 }
2983 if( Assembler::is_simm16(disp) ) {
2984 __ sw(R0, AT, disp);
2985 } else {
2986 __ move(T9, disp);
2987 __ daddu(AT, AT, T9);
2988 __ sw(R0, AT, 0);
2989 }
2990 }
2991 else {
2992 if( Assembler::is_simm16(disp) ) {
2993 __ sw(R0, as_Register(base), disp);
2994 } else {
2995 __ move(T9, disp);
2996 __ daddu(AT, as_Register(base), T9);
2997 __ sw(R0, AT, 0);
2998 }
2999 }
3000 %}
3002 enc_class storeImmN_enc (memory mem, immN src) %{
3003 MacroAssembler _masm(&cbuf);
3004 int base = $mem$$base;
3005 int index = $mem$$index;
3006 int scale = $mem$$scale;
3007 int disp = $mem$$disp;
3008 long * value = (long *)$src$$constant;
3010 if (value == NULL) {
3011 guarantee(Assembler::is_simm16(disp), "FIXME: disp is not simm16!");
3012 if (index == 0) {
3013 __ sw(R0, as_Register(base), disp);
3014 } else {
3015 if (scale == 0) {
3016 __ daddu(AT, as_Register(base), as_Register(index));
3017 } else {
3018 __ dsll(AT, as_Register(index), scale);
3019 __ daddu(AT, as_Register(base), AT);
3020 }
3021 __ sw(R0, AT, disp);
3022 }
3024 return;
3025 }
3027 int oop_index = __ oop_recorder()->find_index((jobject)value);
3028 RelocationHolder rspec = oop_Relocation::spec(oop_index);
3030 guarantee(scale == 0, "FIXME: scale is not zero !");
3031 guarantee(value != 0, "FIXME: value is zero !");
3033 if (index != 0) {
3034 if (scale == 0) {
3035 __ daddu(AT, as_Register(base), as_Register(index));
3036 } else {
3037 __ dsll(AT, as_Register(index), scale);
3038 __ daddu(AT, as_Register(base), AT);
3039 }
3040 if( Assembler::is_simm16(disp) ) {
3041 if(rspec.type() != relocInfo::none) {
3042 __ relocate(rspec, Assembler::narrow_oop_operand);
3043 __ patchable_set48(T9, oop_index);
3044 } else {
3045 __ set64(T9, oop_index);
3046 }
3047 __ sw(T9, AT, disp);
3048 } else {
3049 __ move(T9, disp);
3050 __ addu(AT, AT, T9);
3052 if(rspec.type() != relocInfo::none) {
3053 __ relocate(rspec, Assembler::narrow_oop_operand);
3054 __ patchable_set48(T9, oop_index);
3055 } else {
3056 __ set64(T9, oop_index);
3057 }
3058 __ sw(T9, AT, 0);
3059 }
3060 }
3061 else {
3062 if( Assembler::is_simm16(disp) ) {
3063 if($src->constant_reloc() != relocInfo::none) {
3064 __ relocate(rspec, Assembler::narrow_oop_operand);
3065 __ patchable_set48(T9, oop_index);
3066 } else {
3067 __ set64(T9, oop_index);
3068 }
3069 __ sw(T9, as_Register(base), disp);
3070 } else {
3071 __ move(T9, disp);
3072 __ daddu(AT, as_Register(base), T9);
3074 if($src->constant_reloc() != relocInfo::none){
3075 __ relocate(rspec, Assembler::narrow_oop_operand);
3076 __ patchable_set48(T9, oop_index);
3077 } else {
3078 __ set64(T9, oop_index);
3079 }
3080 __ sw(T9, AT, 0);
3081 }
3082 }
3083 %}
3085 enc_class storeImmNKlass_enc (memory mem, immNKlass src) %{
3086 MacroAssembler _masm(&cbuf);
3088 assert (UseCompressedOops, "should only be used for compressed headers");
3089 assert (__ oop_recorder() != NULL, "this assembler needs an OopRecorder");
3091 int base = $mem$$base;
3092 int index = $mem$$index;
3093 int scale = $mem$$scale;
3094 int disp = $mem$$disp;
3095 long value = $src$$constant;
3097 int klass_index = __ oop_recorder()->find_index((Klass*)value);
3098 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
3099 long narrowp = Klass::encode_klass((Klass*)value);
3101 if(index!=0){
3102 if (scale == 0) {
3103 __ daddu(AT, as_Register(base), as_Register(index));
3104 } else {
3105 __ dsll(AT, as_Register(index), scale);
3106 __ daddu(AT, as_Register(base), AT);
3107 }
3109 if( Assembler::is_simm16(disp) ) {
3110 if(rspec.type() != relocInfo::none){
3111 __ relocate(rspec, Assembler::narrow_oop_operand);
3112 __ patchable_set48(T9, narrowp);
3113 } else {
3114 __ set64(T9, narrowp);
3115 }
3116 __ sw(T9, AT, disp);
3117 } else {
3118 __ move(T9, disp);
3119 __ daddu(AT, AT, T9);
3121 if(rspec.type() != relocInfo::none){
3122 __ relocate(rspec, Assembler::narrow_oop_operand);
3123 __ patchable_set48(T9, narrowp);
3124 } else {
3125 __ set64(T9, narrowp);
3126 }
3128 __ sw(T9, AT, 0);
3129 }
3130 } else {
3131 if( Assembler::is_simm16(disp) ) {
3132 if(rspec.type() != relocInfo::none){
3133 __ relocate(rspec, Assembler::narrow_oop_operand);
3134 __ patchable_set48(T9, narrowp);
3135 }
3136 else {
3137 __ set64(T9, narrowp);
3138 }
3139 __ sw(T9, as_Register(base), disp);
3140 } else {
3141 __ move(T9, disp);
3142 __ daddu(AT, as_Register(base), T9);
3144 if(rspec.type() != relocInfo::none){
3145 __ relocate(rspec, Assembler::narrow_oop_operand);
3146 __ patchable_set48(T9, narrowp);
3147 } else {
3148 __ set64(T9, narrowp);
3149 }
3150 __ sw(T9, AT, 0);
3151 }
3152 }
3153 %}
3155 enc_class load_L_enc (mRegL dst, memory mem) %{
3156 MacroAssembler _masm(&cbuf);
3157 int base = $mem$$base;
3158 int index = $mem$$index;
3159 int scale = $mem$$scale;
3160 int disp = $mem$$disp;
3161 Register dst_reg = as_Register($dst$$reg);
3163 // For implicit null check
3164 __ lb(AT, as_Register(base), 0);
3166 if( index != 0 ) {
3167 if (scale == 0) {
3168 __ daddu(AT, as_Register(base), as_Register(index));
3169 } else {
3170 __ dsll(AT, as_Register(index), scale);
3171 __ daddu(AT, as_Register(base), AT);
3172 }
3173 if( Assembler::is_simm16(disp) ) {
3174 __ ld(dst_reg, AT, disp);
3175 } else {
3176 __ move(T9, disp);
3177 __ daddu(AT, AT, T9);
3178 __ ld(dst_reg, AT, 0);
3179 }
3180 } else {
3181 if( Assembler::is_simm16(disp) ) {
3182 __ ld(dst_reg, as_Register(base), disp);
3183 } else {
3184 __ move(T9, disp);
3185 __ daddu(AT, as_Register(base), T9);
3186 __ ld(dst_reg, AT, 0);
3187 }
3188 }
3189 %}
3191 enc_class store_L_reg_enc (memory mem, mRegL src) %{
3192 MacroAssembler _masm(&cbuf);
3193 int base = $mem$$base;
3194 int index = $mem$$index;
3195 int scale = $mem$$scale;
3196 int disp = $mem$$disp;
3197 Register src_reg = as_Register($src$$reg);
3199 if( index != 0 ) {
3200 if (scale == 0) {
3201 __ daddu(AT, as_Register(base), as_Register(index));
3202 } else {
3203 __ dsll(AT, as_Register(index), scale);
3204 __ daddu(AT, as_Register(base), AT);
3205 }
3206 if( Assembler::is_simm16(disp) ) {
3207 __ sd(src_reg, AT, disp);
3208 } else {
3209 __ move(T9, disp);
3210 __ daddu(AT, AT, T9);
3211 __ sd(src_reg, AT, 0);
3212 }
3213 } else {
3214 if( Assembler::is_simm16(disp) ) {
3215 __ sd(src_reg, as_Register(base), disp);
3216 } else {
3217 __ move(T9, disp);
3218 __ daddu(AT, as_Register(base), T9);
3219 __ sd(src_reg, AT, 0);
3220 }
3221 }
3222 %}
3224 enc_class store_L_immL0_enc (memory mem, immL0 src) %{
3225 MacroAssembler _masm(&cbuf);
3226 int base = $mem$$base;
3227 int index = $mem$$index;
3228 int scale = $mem$$scale;
3229 int disp = $mem$$disp;
3231 if( index != 0 ) {
3232 // For implicit null check
3233 __ lb(AT, as_Register(base), 0);
3235 if (scale == 0) {
3236 __ daddu(AT, as_Register(base), as_Register(index));
3237 } else {
3238 __ dsll(AT, as_Register(index), scale);
3239 __ daddu(AT, as_Register(base), AT);
3240 }
3241 if( Assembler::is_simm16(disp) ) {
3242 __ sd(R0, AT, disp);
3243 } else {
3244 __ move(T9, disp);
3245 __ addu(AT, AT, T9);
3246 __ sd(R0, AT, 0);
3247 }
3248 } else {
3249 if( Assembler::is_simm16(disp) ) {
3250 __ sd(R0, as_Register(base), disp);
3251 } else {
3252 __ move(T9, disp);
3253 __ addu(AT, as_Register(base), T9);
3254 __ sd(R0, AT, 0);
3255 }
3256 }
3257 %}
3259 enc_class store_L_immL_enc (memory mem, immL src) %{
3260 MacroAssembler _masm(&cbuf);
3261 int base = $mem$$base;
3262 int index = $mem$$index;
3263 int scale = $mem$$scale;
3264 int disp = $mem$$disp;
3265 long imm = $src$$constant;
3267 if( index != 0 ) {
3268 if (scale == 0) {
3269 __ daddu(AT, as_Register(base), as_Register(index));
3270 } else {
3271 __ dsll(AT, as_Register(index), scale);
3272 __ daddu(AT, as_Register(base), AT);
3273 }
3274 if( Assembler::is_simm16(disp) ) {
3275 __ set64(T9, imm);
3276 __ sd(T9, AT, disp);
3277 } else {
3278 __ move(T9, disp);
3279 __ addu(AT, AT, T9);
3280 __ set64(T9, imm);
3281 __ sd(T9, AT, 0);
3282 }
3283 } else {
3284 if( Assembler::is_simm16(disp) ) {
3285 __ move(AT, as_Register(base));
3286 __ set64(T9, imm);
3287 __ sd(T9, AT, disp);
3288 } else {
3289 __ move(T9, disp);
3290 __ addu(AT, as_Register(base), T9);
3291 __ set64(T9, imm);
3292 __ sd(T9, AT, 0);
3293 }
3294 }
3295 %}
3297 enc_class load_F_enc (regF dst, memory mem) %{
3298 MacroAssembler _masm(&cbuf);
3299 int base = $mem$$base;
3300 int index = $mem$$index;
3301 int scale = $mem$$scale;
3302 int disp = $mem$$disp;
3303 FloatRegister dst = $dst$$FloatRegister;
3305 if( index != 0 ) {
3306 if( Assembler::is_simm16(disp) ) {
3307 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3308 if (scale == 0) {
3309 __ gslwxc1(dst, as_Register(base), as_Register(index), disp);
3310 } else {
3311 __ dsll(AT, as_Register(index), scale);
3312 __ gslwxc1(dst, as_Register(base), AT, disp);
3313 }
3314 } else {
3315 if (scale == 0) {
3316 __ daddu(AT, as_Register(base), as_Register(index));
3317 } else {
3318 __ dsll(AT, as_Register(index), scale);
3319 __ daddu(AT, as_Register(base), AT);
3320 }
3321 __ lwc1(dst, AT, disp);
3322 }
3323 } else {
3324 if (scale == 0) {
3325 __ daddu(AT, as_Register(base), as_Register(index));
3326 } else {
3327 __ dsll(AT, as_Register(index), scale);
3328 __ daddu(AT, as_Register(base), AT);
3329 }
3330 __ move(T9, disp);
3331 if( UseLoongsonISA ) {
3332 __ gslwxc1(dst, AT, T9, 0);
3333 } else {
3334 __ daddu(AT, AT, T9);
3335 __ lwc1(dst, AT, 0);
3336 }
3337 }
3338 } else {
3339 if( Assembler::is_simm16(disp) ) {
3340 __ lwc1(dst, as_Register(base), disp);
3341 } else {
3342 __ move(T9, disp);
3343 if( UseLoongsonISA ) {
3344 __ gslwxc1(dst, as_Register(base), T9, 0);
3345 } else {
3346 __ daddu(AT, as_Register(base), T9);
3347 __ lwc1(dst, AT, 0);
3348 }
3349 }
3350 }
3351 %}
3353 enc_class store_F_reg_enc (memory mem, regF src) %{
3354 MacroAssembler _masm(&cbuf);
3355 int base = $mem$$base;
3356 int index = $mem$$index;
3357 int scale = $mem$$scale;
3358 int disp = $mem$$disp;
3359 FloatRegister src = $src$$FloatRegister;
3361 if( index != 0 ) {
3362 if( Assembler::is_simm16(disp) ) {
3363 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3364 if (scale == 0) {
3365 __ gsswxc1(src, as_Register(base), as_Register(index), disp);
3366 } else {
3367 __ dsll(AT, as_Register(index), scale);
3368 __ gsswxc1(src, as_Register(base), AT, disp);
3369 }
3370 } else {
3371 if (scale == 0) {
3372 __ daddu(AT, as_Register(base), as_Register(index));
3373 } else {
3374 __ dsll(AT, as_Register(index), scale);
3375 __ daddu(AT, as_Register(base), AT);
3376 }
3377 __ swc1(src, AT, disp);
3378 }
3379 } else {
3380 if (scale == 0) {
3381 __ daddu(AT, as_Register(base), as_Register(index));
3382 } else {
3383 __ dsll(AT, as_Register(index), scale);
3384 __ daddu(AT, as_Register(base), AT);
3385 }
3386 __ move(T9, disp);
3387 if( UseLoongsonISA ) {
3388 __ gsswxc1(src, AT, T9, 0);
3389 } else {
3390 __ daddu(AT, AT, T9);
3391 __ swc1(src, AT, 0);
3392 }
3393 }
3394 } else {
3395 if( Assembler::is_simm16(disp) ) {
3396 __ swc1(src, as_Register(base), disp);
3397 } else {
3398 __ move(T9, disp);
3399 if( UseLoongsonISA ) {
3400 __ gslwxc1(src, as_Register(base), T9, 0);
3401 } else {
3402 __ daddu(AT, as_Register(base), T9);
3403 __ swc1(src, AT, 0);
3404 }
3405 }
3406 }
3407 %}
3409 enc_class load_D_enc (regD dst, memory mem) %{
3410 MacroAssembler _masm(&cbuf);
3411 int base = $mem$$base;
3412 int index = $mem$$index;
3413 int scale = $mem$$scale;
3414 int disp = $mem$$disp;
3415 FloatRegister dst_reg = as_FloatRegister($dst$$reg);
3417 if( index != 0 ) {
3418 if( Assembler::is_simm16(disp) ) {
3419 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3420 if (scale == 0) {
3421 __ gsldxc1(dst_reg, as_Register(base), as_Register(index), disp);
3422 } else {
3423 __ dsll(AT, as_Register(index), scale);
3424 __ gsldxc1(dst_reg, as_Register(base), AT, disp);
3425 }
3426 } else {
3427 if (scale == 0) {
3428 __ daddu(AT, as_Register(base), as_Register(index));
3429 } else {
3430 __ dsll(AT, as_Register(index), scale);
3431 __ daddu(AT, as_Register(base), AT);
3432 }
3433 __ ldc1(dst_reg, AT, disp);
3434 }
3435 } else {
3436 if (scale == 0) {
3437 __ daddu(AT, as_Register(base), as_Register(index));
3438 } else {
3439 __ dsll(AT, as_Register(index), scale);
3440 __ daddu(AT, as_Register(base), AT);
3441 }
3442 __ move(T9, disp);
3443 if( UseLoongsonISA ) {
3444 __ gsldxc1(dst_reg, AT, T9, 0);
3445 } else {
3446 __ addu(AT, AT, T9);
3447 __ ldc1(dst_reg, AT, 0);
3448 }
3449 }
3450 } else {
3451 if( Assembler::is_simm16(disp) ) {
3452 __ ldc1(dst_reg, as_Register(base), disp);
3453 } else {
3454 __ move(T9, disp);
3455 if( UseLoongsonISA ) {
3456 __ gsldxc1(dst_reg, as_Register(base), T9, 0);
3457 } else {
3458 __ addu(AT, as_Register(base), T9);
3459 __ ldc1(dst_reg, AT, 0);
3460 }
3461 }
3462 }
3463 %}
3465 enc_class store_D_reg_enc (memory mem, regD src) %{
3466 MacroAssembler _masm(&cbuf);
3467 int base = $mem$$base;
3468 int index = $mem$$index;
3469 int scale = $mem$$scale;
3470 int disp = $mem$$disp;
3471 FloatRegister src_reg = as_FloatRegister($src$$reg);
3473 if( index != 0 ) {
3474 if( Assembler::is_simm16(disp) ) {
3475 if( UseLoongsonISA && Assembler::is_simm(disp, 8) ) {
3476 if (scale == 0) {
3477 __ gssdxc1(src_reg, as_Register(base), as_Register(index), disp);
3478 } else {
3479 __ dsll(AT, as_Register(index), scale);
3480 __ gssdxc1(src_reg, as_Register(base), AT, disp);
3481 }
3482 } else {
3483 if (scale == 0) {
3484 __ daddu(AT, as_Register(base), as_Register(index));
3485 } else {
3486 __ dsll(AT, as_Register(index), scale);
3487 __ daddu(AT, as_Register(base), AT);
3488 }
3489 __ sdc1(src_reg, AT, disp);
3490 }
3491 } else {
3492 if (scale == 0) {
3493 __ daddu(AT, as_Register(base), as_Register(index));
3494 } else {
3495 __ dsll(AT, as_Register(index), scale);
3496 __ daddu(AT, as_Register(base), AT);
3497 }
3498 __ move(T9, disp);
3499 if( UseLoongsonISA ) {
3500 __ gssdxc1(src_reg, AT, T9, 0);
3501 } else {
3502 __ addu(AT, AT, T9);
3503 __ sdc1(src_reg, AT, 0);
3504 }
3505 }
3506 } else {
3507 if( Assembler::is_simm16(disp) ) {
3508 __ sdc1(src_reg, as_Register(base), disp);
3509 } else {
3510 __ move(T9, disp);
3511 if( UseLoongsonISA ) {
3512 __ gssdxc1(src_reg, as_Register(base), T9, 0);
3513 } else {
3514 __ addu(AT, as_Register(base), T9);
3515 __ sdc1(src_reg, AT, 0);
3516 }
3517 }
3518 }
3519 %}
3521 enc_class Java_To_Runtime (method meth) %{ // CALL Java_To_Runtime, Java_To_Runtime_Leaf
3522 MacroAssembler _masm(&cbuf);
3523 // This is the instruction starting address for relocation info.
3524 __ block_comment("Java_To_Runtime");
3525 cbuf.set_insts_mark();
3526 __ relocate(relocInfo::runtime_call_type);
3528 __ patchable_call((address)$meth$$method);
3529 %}
3531 enc_class Java_Static_Call (method meth) %{ // JAVA STATIC CALL
3532 // CALL to fixup routine. Fixup routine uses ScopeDesc info to determine
3533 // who we intended to call.
3534 MacroAssembler _masm(&cbuf);
3535 cbuf.set_insts_mark();
3537 if ( !_method ) {
3538 __ relocate(relocInfo::runtime_call_type);
3539 } else if(_optimized_virtual) {
3540 __ relocate(relocInfo::opt_virtual_call_type);
3541 } else {
3542 __ relocate(relocInfo::static_call_type);
3543 }
3545 __ patchable_call((address)($meth$$method));
3546 if( _method ) { // Emit stub for static call
3547 emit_java_to_interp(cbuf);
3548 }
3549 %}
3552 /*
3553 * [Ref: LIR_Assembler::ic_call() ]
3554 */
3555 enc_class Java_Dynamic_Call (method meth) %{ // JAVA DYNAMIC CALL
3556 MacroAssembler _masm(&cbuf);
3557 __ block_comment("Java_Dynamic_Call");
3558 __ ic_call((address)$meth$$method);
3559 %}
3562 enc_class Set_Flags_After_Fast_Lock_Unlock(FlagsReg cr) %{
3563 Register flags = $cr$$Register;
3564 Label L;
3566 MacroAssembler _masm(&cbuf);
3568 __ addu(flags, R0, R0);
3569 __ beq(AT, R0, L);
3570 __ delayed()->nop();
3571 __ move(flags, 0xFFFFFFFF);
3572 __ bind(L);
3573 %}
3575 enc_class enc_PartialSubtypeCheck(mRegP result, mRegP sub, mRegP super, mRegI tmp) %{
3576 Register result = $result$$Register;
3577 Register sub = $sub$$Register;
3578 Register super = $super$$Register;
3579 Register length = $tmp$$Register;
3580 Register tmp = T9;
3581 Label miss;
3583 /* 2012/9/28 Jin: result may be the same as sub
3584 * 47c B40: # B21 B41 <- B20 Freq: 0.155379
3585 * 47c partialSubtypeCheck result=S1, sub=S1, super=S3, length=S0
3586 * 4bc mov S2, NULL #@loadConP
3587 * 4c0 beq S1, S2, B21 #@branchConP P=0.999999 C=-1.000000
3588 */
3589 MacroAssembler _masm(&cbuf);
3590 Label done;
3591 __ check_klass_subtype_slow_path(sub, super, length, tmp,
3592 NULL, &miss,
3593 /*set_cond_codes:*/ true);
3594 /* 2013/7/22 Jin: Refer to X86_64's RDI */
3595 __ move(result, 0);
3596 __ b(done);
3597 __ nop();
3599 __ bind(miss);
3600 __ move(result, 1);
3601 __ bind(done);
3602 %}
3604 %}
3607 //---------MIPS FRAME--------------------------------------------------------------
3608 // Definition of frame structure and management information.
3609 //
3610 // S T A C K L A Y O U T Allocators stack-slot number
3611 // | (to get allocators register number
3612 // G Owned by | | v add SharedInfo::stack0)
3613 // r CALLER | |
3614 // o | +--------+ pad to even-align allocators stack-slot
3615 // w V | pad0 | numbers; owned by CALLER
3616 // t -----------+--------+----> Matcher::_in_arg_limit, unaligned
3617 // h ^ | in | 5
3618 // | | args | 4 Holes in incoming args owned by SELF
3619 // | | old | | 3
3620 // | | SP-+--------+----> Matcher::_old_SP, even aligned
3621 // v | | ret | 3 return address
3622 // Owned by +--------+
3623 // Self | pad2 | 2 pad to align old SP
3624 // | +--------+ 1
3625 // | | locks | 0
3626 // | +--------+----> SharedInfo::stack0, even aligned
3627 // | | pad1 | 11 pad to align new SP
3628 // | +--------+
3629 // | | | 10
3630 // | | spills | 9 spills
3631 // V | | 8 (pad0 slot for callee)
3632 // -----------+--------+----> Matcher::_out_arg_limit, unaligned
3633 // ^ | out | 7
3634 // | | args | 6 Holes in outgoing args owned by CALLEE
3635 // Owned by new | |
3636 // Callee SP-+--------+----> Matcher::_new_SP, even aligned
3637 // | |
3638 //
3639 // Note 1: Only region 8-11 is determined by the allocator. Region 0-5 is
3640 // known from SELF's arguments and the Java calling convention.
3641 // Region 6-7 is determined per call site.
3642 // Note 2: If the calling convention leaves holes in the incoming argument
3643 // area, those holes are owned by SELF. Holes in the outgoing area
3644 // are owned by the CALLEE. Holes should not be nessecary in the
3645 // incoming area, as the Java calling convention is completely under
3646 // the control of the AD file. Doubles can be sorted and packed to
3647 // avoid holes. Holes in the outgoing arguments may be nessecary for
3648 // varargs C calling conventions.
3649 // Note 3: Region 0-3 is even aligned, with pad2 as needed. Region 3-5 is
3650 // even aligned with pad0 as needed.
3651 // Region 6 is even aligned. Region 6-7 is NOT even aligned;
3652 // region 6-11 is even aligned; it may be padded out more so that
3653 // the region from SP to FP meets the minimum stack alignment.
3654 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
3655 // alignment. Region 11, pad1, may be dynamically extended so that
3656 // SP meets the minimum alignment.
3659 frame %{
3661 stack_direction(TOWARDS_LOW);
3663 // These two registers define part of the calling convention
3664 // between compiled code and the interpreter.
3665 // SEE StartI2CNode::calling_convention & StartC2INode::calling_convention & StartOSRNode::calling_convention
3666 // for more information. by yjl 3/16/2006
3668 inline_cache_reg(T1); // Inline Cache Register
3669 interpreter_method_oop_reg(S3); // Method Oop Register when calling interpreter
3670 /*
3671 inline_cache_reg(T1); // Inline Cache Register or methodOop for I2C
3672 interpreter_arg_ptr_reg(A0); // Argument pointer for I2C adapters
3673 */
3675 // Optional: name the operand used by cisc-spilling to access [stack_pointer + offset]
3676 cisc_spilling_operand_name(indOffset32);
3678 // Number of stack slots consumed by locking an object
3679 // generate Compile::sync_stack_slots
3680 #ifdef _LP64
3681 sync_stack_slots(2);
3682 #else
3683 sync_stack_slots(1);
3684 #endif
3686 frame_pointer(SP);
3688 // Interpreter stores its frame pointer in a register which is
3689 // stored to the stack by I2CAdaptors.
3690 // I2CAdaptors convert from interpreted java to compiled java.
3692 interpreter_frame_pointer(FP);
3694 // generate Matcher::stack_alignment
3695 stack_alignment(StackAlignmentInBytes); //wordSize = sizeof(char*);
3697 // Number of stack slots between incoming argument block and the start of
3698 // a new frame. The PROLOG must add this many slots to the stack. The
3699 // EPILOG must remove this many slots. Intel needs one slot for
3700 // return address.
3701 // generate Matcher::in_preserve_stack_slots
3702 //in_preserve_stack_slots(VerifyStackAtCalls + 2); //Now VerifyStackAtCalls is defined as false ! Leave one stack slot for ra and fp
3703 in_preserve_stack_slots(4); //Now VerifyStackAtCalls is defined as false ! Leave two stack slots for ra and fp
3705 // Number of outgoing stack slots killed above the out_preserve_stack_slots
3706 // for calls to C. Supports the var-args backing area for register parms.
3707 varargs_C_out_slots_killed(0);
3709 // The after-PROLOG location of the return address. Location of
3710 // return address specifies a type (REG or STACK) and a number
3711 // representing the register number (i.e. - use a register name) or
3712 // stack slot.
3713 // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
3714 // Otherwise, it is above the locks and verification slot and alignment word
3715 //return_addr(STACK -1+ round_to(1+VerifyStackAtCalls+Compile::current()->sync()*Compile::current()->sync_stack_slots(),WordsPerLong));
3716 return_addr(REG RA);
3718 // Body of function which returns an integer array locating
3719 // arguments either in registers or in stack slots. Passed an array
3720 // of ideal registers called "sig" and a "length" count. Stack-slot
3721 // offsets are based on outgoing arguments, i.e. a CALLER setting up
3722 // arguments for a CALLEE. Incoming stack arguments are
3723 // automatically biased by the preserve_stack_slots field above.
3726 // will generated to Matcher::calling_convention(OptoRegPair *sig, uint length, bool is_outgoing)
3727 // StartNode::calling_convention call this. by yjl 3/16/2006
3728 calling_convention %{
3729 SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
3730 %}
3735 // Body of function which returns an integer array locating
3736 // arguments either in registers or in stack slots. Passed an array
3737 // of ideal registers called "sig" and a "length" count. Stack-slot
3738 // offsets are based on outgoing arguments, i.e. a CALLER setting up
3739 // arguments for a CALLEE. Incoming stack arguments are
3740 // automatically biased by the preserve_stack_slots field above.
3743 // SEE CallRuntimeNode::calling_convention for more information. by yjl 3/16/2006
3744 c_calling_convention %{
3745 (void) SharedRuntime::c_calling_convention(sig_bt, regs, /*regs2=*/NULL, length);
3746 %}
3749 // Location of C & interpreter return values
3750 // register(s) contain(s) return value for Op_StartI2C and Op_StartOSR.
3751 // SEE Matcher::match. by yjl 3/16/2006
3752 c_return_value %{
3753 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3754 /* -- , -- , Op_RegN, Op_RegI, Op_RegP, Op_RegF, Op_RegD, Op_RegL */
3755 static int lo[Op_RegL+1] = { 0, 0, V0_num, V0_num, V0_num, F0_num, F0_num, V0_num };
3756 static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, V0_H_num, OptoReg::Bad, F0_H_num, V0_H_num };
3757 return OptoRegPair(hi[ideal_reg],lo[ideal_reg]);
3758 %}
3760 // Location of return values
3761 // register(s) contain(s) return value for Op_StartC2I and Op_Start.
3762 // SEE Matcher::match. by yjl 3/16/2006
3764 return_value %{
3765 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3766 /* -- , -- , Op_RegN, Op_RegI, Op_RegP, Op_RegF, Op_RegD, Op_RegL */
3767 static int lo[Op_RegL+1] = { 0, 0, V0_num, V0_num, V0_num, F0_num, F0_num, V0_num };
3768 static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, V0_H_num, OptoReg::Bad, F0_H_num, V0_H_num};
3769 return OptoRegPair(hi[ideal_reg],lo[ideal_reg]);
3770 %}
3772 %}
3774 //----------ATTRIBUTES---------------------------------------------------------
3775 //----------Operand Attributes-------------------------------------------------
3776 op_attrib op_cost(0); // Required cost attribute
3778 //----------Instruction Attributes---------------------------------------------
3779 ins_attrib ins_cost(100); // Required cost attribute
3780 ins_attrib ins_size(32); // Required size attribute (in bits)
3781 ins_attrib ins_pc_relative(0); // Required PC Relative flag
3782 ins_attrib ins_short_branch(0); // Required flag: is this instruction a
3783 // non-matching short branch variant of some
3784 // long branch?
3785 ins_attrib ins_alignment(4); // Required alignment attribute (must be a power of 2)
3786 // specifies the alignment that some part of the instruction (not
3787 // necessarily the start) requires. If > 1, a compute_padding()
3788 // function must be provided for the instruction
3790 //----------OPERANDS-----------------------------------------------------------
3791 // Operand definitions must precede instruction definitions for correct parsing
3792 // in the ADLC because operands constitute user defined types which are used in
3793 // instruction definitions.
3795 // Vectors
3796 operand vecD() %{
3797 constraint(ALLOC_IN_RC(dbl_reg));
3798 match(VecD);
3800 format %{ %}
3801 interface(REG_INTER);
3802 %}
3804 // Flags register, used as output of compare instructions
3805 operand FlagsReg() %{
3806 constraint(ALLOC_IN_RC(mips_flags));
3807 match(RegFlags);
3809 format %{ "EFLAGS" %}
3810 interface(REG_INTER);
3811 %}
3813 //----------Simple Operands----------------------------------------------------
3814 //TODO: Should we need to define some more special immediate number ?
3815 // Immediate Operands
3816 // Integer Immediate
3817 operand immI() %{
3818 match(ConI);
3819 //TODO: should not match immI8 here LEE
3820 match(immI8);
3822 op_cost(20);
3823 format %{ %}
3824 interface(CONST_INTER);
3825 %}
3827 // Long Immediate 8-bit
3828 operand immL8()
3829 %{
3830 predicate(-0x80L <= n->get_long() && n->get_long() < 0x80L);
3831 match(ConL);
3833 op_cost(5);
3834 format %{ %}
3835 interface(CONST_INTER);
3836 %}
3838 // Constant for test vs zero
3839 operand immI0() %{
3840 predicate(n->get_int() == 0);
3841 match(ConI);
3843 op_cost(0);
3844 format %{ %}
3845 interface(CONST_INTER);
3846 %}
3848 // Constant for increment
3849 operand immI1() %{
3850 predicate(n->get_int() == 1);
3851 match(ConI);
3853 op_cost(0);
3854 format %{ %}
3855 interface(CONST_INTER);
3856 %}
3858 // Constant for decrement
3859 operand immI_M1() %{
3860 predicate(n->get_int() == -1);
3861 match(ConI);
3863 op_cost(0);
3864 format %{ %}
3865 interface(CONST_INTER);
3866 %}
3868 operand immI_MaxI() %{
3869 predicate(n->get_int() == 2147483647);
3870 match(ConI);
3872 op_cost(0);
3873 format %{ %}
3874 interface(CONST_INTER);
3875 %}
3877 // Valid scale values for addressing modes
3878 operand immI2() %{
3879 predicate(0 <= n->get_int() && (n->get_int() <= 3));
3880 match(ConI);
3882 format %{ %}
3883 interface(CONST_INTER);
3884 %}
3886 operand immI8() %{
3887 predicate((-128 <= n->get_int()) && (n->get_int() <= 127));
3888 match(ConI);
3890 op_cost(5);
3891 format %{ %}
3892 interface(CONST_INTER);
3893 %}
3895 operand immI16() %{
3896 predicate((-32768 <= n->get_int()) && (n->get_int() <= 32767));
3897 match(ConI);
3899 op_cost(10);
3900 format %{ %}
3901 interface(CONST_INTER);
3902 %}
3904 // Constant for long shifts
3905 operand immI_32() %{
3906 predicate( n->get_int() == 32 );
3907 match(ConI);
3909 op_cost(0);
3910 format %{ %}
3911 interface(CONST_INTER);
3912 %}
3914 operand immI_63() %{
3915 predicate( n->get_int() == 63 );
3916 match(ConI);
3918 op_cost(0);
3919 format %{ %}
3920 interface(CONST_INTER);
3921 %}
3923 operand immI_0_31() %{
3924 predicate( n->get_int() >= 0 && n->get_int() <= 31 );
3925 match(ConI);
3927 op_cost(0);
3928 format %{ %}
3929 interface(CONST_INTER);
3930 %}
3932 // Operand for non-negtive integer mask
3933 operand immI_nonneg_mask() %{
3934 predicate( (n->get_int() >= 0) && (Assembler::is_int_mask(n->get_int()) != -1) );
3935 match(ConI);
3937 op_cost(0);
3938 format %{ %}
3939 interface(CONST_INTER);
3940 %}
3942 operand immI_32_63() %{
3943 predicate( n->get_int() >= 32 && n->get_int() <= 63 );
3944 match(ConI);
3945 op_cost(0);
3947 format %{ %}
3948 interface(CONST_INTER);
3949 %}
3951 operand immI16_sub() %{
3952 predicate((-32767 <= n->get_int()) && (n->get_int() <= 32768));
3953 match(ConI);
3955 op_cost(10);
3956 format %{ %}
3957 interface(CONST_INTER);
3958 %}
3960 operand immI_0_32767() %{
3961 predicate( n->get_int() >= 0 && n->get_int() <= 32767 );
3962 match(ConI);
3963 op_cost(0);
3965 format %{ %}
3966 interface(CONST_INTER);
3967 %}
3969 operand immI_0_65535() %{
3970 predicate( n->get_int() >= 0 && n->get_int() <= 65535 );
3971 match(ConI);
3972 op_cost(0);
3974 format %{ %}
3975 interface(CONST_INTER);
3976 %}
3978 operand immI_1() %{
3979 predicate( n->get_int() == 1 );
3980 match(ConI);
3982 op_cost(0);
3983 format %{ %}
3984 interface(CONST_INTER);
3985 %}
3987 operand immI_2() %{
3988 predicate( n->get_int() == 2 );
3989 match(ConI);
3991 op_cost(0);
3992 format %{ %}
3993 interface(CONST_INTER);
3994 %}
3996 operand immI_3() %{
3997 predicate( n->get_int() == 3 );
3998 match(ConI);
4000 op_cost(0);
4001 format %{ %}
4002 interface(CONST_INTER);
4003 %}
4005 operand immI_7() %{
4006 predicate( n->get_int() == 7 );
4007 match(ConI);
4009 format %{ %}
4010 interface(CONST_INTER);
4011 %}
4013 // Immediates for special shifts (sign extend)
4015 // Constants for increment
4016 operand immI_16() %{
4017 predicate( n->get_int() == 16 );
4018 match(ConI);
4020 format %{ %}
4021 interface(CONST_INTER);
4022 %}
4024 operand immI_24() %{
4025 predicate( n->get_int() == 24 );
4026 match(ConI);
4028 format %{ %}
4029 interface(CONST_INTER);
4030 %}
4032 // Constant for byte-wide masking
4033 operand immI_255() %{
4034 predicate( n->get_int() == 255 );
4035 match(ConI);
4037 op_cost(0);
4038 format %{ %}
4039 interface(CONST_INTER);
4040 %}
4042 operand immI_65535() %{
4043 predicate( n->get_int() == 65535 );
4044 match(ConI);
4046 op_cost(5);
4047 format %{ %}
4048 interface(CONST_INTER);
4049 %}
4051 operand immI_65536() %{
4052 predicate( n->get_int() == 65536 );
4053 match(ConI);
4055 op_cost(5);
4056 format %{ %}
4057 interface(CONST_INTER);
4058 %}
4060 operand immI_M65536() %{
4061 predicate( n->get_int() == -65536 );
4062 match(ConI);
4064 op_cost(5);
4065 format %{ %}
4066 interface(CONST_INTER);
4067 %}
4069 // Pointer Immediate
4070 operand immP() %{
4071 match(ConP);
4073 op_cost(10);
4074 format %{ %}
4075 interface(CONST_INTER);
4076 %}
4078 operand immP31()
4079 %{
4080 predicate(n->as_Type()->type()->reloc() == relocInfo::none
4081 && (n->get_ptr() >> 31) == 0);
4082 match(ConP);
4084 op_cost(5);
4085 format %{ %}
4086 interface(CONST_INTER);
4087 %}
4089 // NULL Pointer Immediate
4090 operand immP0() %{
4091 predicate( n->get_ptr() == 0 );
4092 match(ConP);
4093 op_cost(0);
4095 format %{ %}
4096 interface(CONST_INTER);
4097 %}
4099 // Pointer Immediate: 64-bit
4100 operand immP_set() %{
4101 match(ConP);
4103 op_cost(5);
4104 // formats are generated automatically for constants and base registers
4105 format %{ %}
4106 interface(CONST_INTER);
4107 %}
4109 // Pointer Immediate: 64-bit
4110 operand immP_load() %{
4111 predicate(n->bottom_type()->isa_oop_ptr() || (MacroAssembler::insts_for_set64(n->get_ptr()) > 3));
4112 match(ConP);
4114 op_cost(5);
4115 // formats are generated automatically for constants and base registers
4116 format %{ %}
4117 interface(CONST_INTER);
4118 %}
4120 // Pointer Immediate: 64-bit
4121 operand immP_no_oop_cheap() %{
4122 predicate(!n->bottom_type()->isa_oop_ptr() && (MacroAssembler::insts_for_set64(n->get_ptr()) <= 3));
4123 match(ConP);
4125 op_cost(5);
4126 // formats are generated automatically for constants and base registers
4127 format %{ %}
4128 interface(CONST_INTER);
4129 %}
4131 // Pointer for polling page
4132 operand immP_poll() %{
4133 predicate(n->get_ptr() != 0 && n->get_ptr() == (intptr_t)os::get_polling_page());
4134 match(ConP);
4135 op_cost(5);
4137 format %{ %}
4138 interface(CONST_INTER);
4139 %}
4141 // Pointer Immediate
4142 operand immN() %{
4143 match(ConN);
4145 op_cost(10);
4146 format %{ %}
4147 interface(CONST_INTER);
4148 %}
4150 operand immNKlass() %{
4151 match(ConNKlass);
4153 op_cost(10);
4154 format %{ %}
4155 interface(CONST_INTER);
4156 %}
4158 // NULL Pointer Immediate
4159 operand immN0() %{
4160 predicate(n->get_narrowcon() == 0);
4161 match(ConN);
4163 op_cost(5);
4164 format %{ %}
4165 interface(CONST_INTER);
4166 %}
4168 // Long Immediate
4169 operand immL() %{
4170 match(ConL);
4172 op_cost(20);
4173 format %{ %}
4174 interface(CONST_INTER);
4175 %}
4177 // Long Immediate zero
4178 operand immL0() %{
4179 predicate( n->get_long() == 0L );
4180 match(ConL);
4181 op_cost(0);
4183 format %{ %}
4184 interface(CONST_INTER);
4185 %}
4187 operand immL7() %{
4188 predicate( n->get_long() == 7L );
4189 match(ConL);
4190 op_cost(0);
4192 format %{ %}
4193 interface(CONST_INTER);
4194 %}
4196 operand immL_M1() %{
4197 predicate( n->get_long() == -1L );
4198 match(ConL);
4199 op_cost(0);
4201 format %{ %}
4202 interface(CONST_INTER);
4203 %}
4205 // bit 0..2 zero
4206 operand immL_M8() %{
4207 predicate( n->get_long() == -8L );
4208 match(ConL);
4209 op_cost(0);
4211 format %{ %}
4212 interface(CONST_INTER);
4213 %}
4215 // bit 2 zero
4216 operand immL_M5() %{
4217 predicate( n->get_long() == -5L );
4218 match(ConL);
4219 op_cost(0);
4221 format %{ %}
4222 interface(CONST_INTER);
4223 %}
4225 // bit 1..2 zero
4226 operand immL_M7() %{
4227 predicate( n->get_long() == -7L );
4228 match(ConL);
4229 op_cost(0);
4231 format %{ %}
4232 interface(CONST_INTER);
4233 %}
4235 // bit 0..1 zero
4236 operand immL_M4() %{
4237 predicate( n->get_long() == -4L );
4238 match(ConL);
4239 op_cost(0);
4241 format %{ %}
4242 interface(CONST_INTER);
4243 %}
4245 // bit 3..6 zero
4246 operand immL_M121() %{
4247 predicate( n->get_long() == -121L );
4248 match(ConL);
4249 op_cost(0);
4251 format %{ %}
4252 interface(CONST_INTER);
4253 %}
4255 // Long immediate from 0 to 127.
4256 // Used for a shorter form of long mul by 10.
4257 operand immL_127() %{
4258 predicate((0 <= n->get_long()) && (n->get_long() <= 127));
4259 match(ConL);
4260 op_cost(0);
4262 format %{ %}
4263 interface(CONST_INTER);
4264 %}
4266 // Operand for non-negtive long mask
4267 operand immL_nonneg_mask() %{
4268 predicate( (n->get_long() >= 0) && (Assembler::is_jlong_mask(n->get_long()) != -1) );
4269 match(ConL);
4271 op_cost(0);
4272 format %{ %}
4273 interface(CONST_INTER);
4274 %}
4276 operand immL_0_65535() %{
4277 predicate( n->get_long() >= 0 && n->get_long() <= 65535 );
4278 match(ConL);
4279 op_cost(0);
4281 format %{ %}
4282 interface(CONST_INTER);
4283 %}
4285 // Long Immediate: cheap (materialize in <= 3 instructions)
4286 operand immL_cheap() %{
4287 predicate(MacroAssembler::insts_for_set64(n->get_long()) <= 3);
4288 match(ConL);
4289 op_cost(0);
4291 format %{ %}
4292 interface(CONST_INTER);
4293 %}
4295 // Long Immediate: expensive (materialize in > 3 instructions)
4296 operand immL_expensive() %{
4297 predicate(MacroAssembler::insts_for_set64(n->get_long()) > 3);
4298 match(ConL);
4299 op_cost(0);
4301 format %{ %}
4302 interface(CONST_INTER);
4303 %}
4305 operand immL16() %{
4306 predicate((-32768 <= n->get_long()) && (n->get_long() <= 32767));
4307 match(ConL);
4309 op_cost(10);
4310 format %{ %}
4311 interface(CONST_INTER);
4312 %}
4314 operand immL16_sub() %{
4315 predicate((-32767 <= n->get_long()) && (n->get_long() <= 32768));
4316 match(ConL);
4318 op_cost(10);
4319 format %{ %}
4320 interface(CONST_INTER);
4321 %}
4323 // Long Immediate: low 32-bit mask
4324 operand immL_32bits() %{
4325 predicate(n->get_long() == 0xFFFFFFFFL);
4326 match(ConL);
4327 op_cost(20);
4329 format %{ %}
4330 interface(CONST_INTER);
4331 %}
4333 // Long Immediate 32-bit signed
4334 operand immL32()
4335 %{
4336 predicate(n->get_long() == (int) (n->get_long()));
4337 match(ConL);
4339 op_cost(15);
4340 format %{ %}
4341 interface(CONST_INTER);
4342 %}
4345 //single-precision floating-point zero
4346 operand immF0() %{
4347 predicate(jint_cast(n->getf()) == 0);
4348 match(ConF);
4350 op_cost(5);
4351 format %{ %}
4352 interface(CONST_INTER);
4353 %}
4355 //single-precision floating-point immediate
4356 operand immF() %{
4357 match(ConF);
4359 op_cost(20);
4360 format %{ %}
4361 interface(CONST_INTER);
4362 %}
4364 //double-precision floating-point zero
4365 operand immD0() %{
4366 predicate(jlong_cast(n->getd()) == 0);
4367 match(ConD);
4369 op_cost(5);
4370 format %{ %}
4371 interface(CONST_INTER);
4372 %}
4374 //double-precision floating-point immediate
4375 operand immD() %{
4376 match(ConD);
4378 op_cost(20);
4379 format %{ %}
4380 interface(CONST_INTER);
4381 %}
4383 // Register Operands
4384 // Integer Register
4385 operand mRegI() %{
4386 constraint(ALLOC_IN_RC(int_reg));
4387 match(RegI);
4389 format %{ %}
4390 interface(REG_INTER);
4391 %}
4393 operand no_Ax_mRegI() %{
4394 constraint(ALLOC_IN_RC(no_Ax_int_reg));
4395 match(RegI);
4396 match(mRegI);
4398 format %{ %}
4399 interface(REG_INTER);
4400 %}
4402 operand mS0RegI() %{
4403 constraint(ALLOC_IN_RC(s0_reg));
4404 match(RegI);
4405 match(mRegI);
4407 format %{ "S0" %}
4408 interface(REG_INTER);
4409 %}
4411 operand mS1RegI() %{
4412 constraint(ALLOC_IN_RC(s1_reg));
4413 match(RegI);
4414 match(mRegI);
4416 format %{ "S1" %}
4417 interface(REG_INTER);
4418 %}
4420 operand mS2RegI() %{
4421 constraint(ALLOC_IN_RC(s2_reg));
4422 match(RegI);
4423 match(mRegI);
4425 format %{ "S2" %}
4426 interface(REG_INTER);
4427 %}
4429 operand mS3RegI() %{
4430 constraint(ALLOC_IN_RC(s3_reg));
4431 match(RegI);
4432 match(mRegI);
4434 format %{ "S3" %}
4435 interface(REG_INTER);
4436 %}
4438 operand mS4RegI() %{
4439 constraint(ALLOC_IN_RC(s4_reg));
4440 match(RegI);
4441 match(mRegI);
4443 format %{ "S4" %}
4444 interface(REG_INTER);
4445 %}
4447 operand mS5RegI() %{
4448 constraint(ALLOC_IN_RC(s5_reg));
4449 match(RegI);
4450 match(mRegI);
4452 format %{ "S5" %}
4453 interface(REG_INTER);
4454 %}
4456 operand mS6RegI() %{
4457 constraint(ALLOC_IN_RC(s6_reg));
4458 match(RegI);
4459 match(mRegI);
4461 format %{ "S6" %}
4462 interface(REG_INTER);
4463 %}
4465 operand mS7RegI() %{
4466 constraint(ALLOC_IN_RC(s7_reg));
4467 match(RegI);
4468 match(mRegI);
4470 format %{ "S7" %}
4471 interface(REG_INTER);
4472 %}
4475 operand mT0RegI() %{
4476 constraint(ALLOC_IN_RC(t0_reg));
4477 match(RegI);
4478 match(mRegI);
4480 format %{ "T0" %}
4481 interface(REG_INTER);
4482 %}
4484 operand mT1RegI() %{
4485 constraint(ALLOC_IN_RC(t1_reg));
4486 match(RegI);
4487 match(mRegI);
4489 format %{ "T1" %}
4490 interface(REG_INTER);
4491 %}
4493 operand mT2RegI() %{
4494 constraint(ALLOC_IN_RC(t2_reg));
4495 match(RegI);
4496 match(mRegI);
4498 format %{ "T2" %}
4499 interface(REG_INTER);
4500 %}
4502 operand mT3RegI() %{
4503 constraint(ALLOC_IN_RC(t3_reg));
4504 match(RegI);
4505 match(mRegI);
4507 format %{ "T3" %}
4508 interface(REG_INTER);
4509 %}
4511 operand mT8RegI() %{
4512 constraint(ALLOC_IN_RC(t8_reg));
4513 match(RegI);
4514 match(mRegI);
4516 format %{ "T8" %}
4517 interface(REG_INTER);
4518 %}
4520 operand mT9RegI() %{
4521 constraint(ALLOC_IN_RC(t9_reg));
4522 match(RegI);
4523 match(mRegI);
4525 format %{ "T9" %}
4526 interface(REG_INTER);
4527 %}
4529 operand mA0RegI() %{
4530 constraint(ALLOC_IN_RC(a0_reg));
4531 match(RegI);
4532 match(mRegI);
4534 format %{ "A0" %}
4535 interface(REG_INTER);
4536 %}
4538 operand mA1RegI() %{
4539 constraint(ALLOC_IN_RC(a1_reg));
4540 match(RegI);
4541 match(mRegI);
4543 format %{ "A1" %}
4544 interface(REG_INTER);
4545 %}
4547 operand mA2RegI() %{
4548 constraint(ALLOC_IN_RC(a2_reg));
4549 match(RegI);
4550 match(mRegI);
4552 format %{ "A2" %}
4553 interface(REG_INTER);
4554 %}
4556 operand mA3RegI() %{
4557 constraint(ALLOC_IN_RC(a3_reg));
4558 match(RegI);
4559 match(mRegI);
4561 format %{ "A3" %}
4562 interface(REG_INTER);
4563 %}
4565 operand mA4RegI() %{
4566 constraint(ALLOC_IN_RC(a4_reg));
4567 match(RegI);
4568 match(mRegI);
4570 format %{ "A4" %}
4571 interface(REG_INTER);
4572 %}
4574 operand mA5RegI() %{
4575 constraint(ALLOC_IN_RC(a5_reg));
4576 match(RegI);
4577 match(mRegI);
4579 format %{ "A5" %}
4580 interface(REG_INTER);
4581 %}
4583 operand mA6RegI() %{
4584 constraint(ALLOC_IN_RC(a6_reg));
4585 match(RegI);
4586 match(mRegI);
4588 format %{ "A6" %}
4589 interface(REG_INTER);
4590 %}
4592 operand mA7RegI() %{
4593 constraint(ALLOC_IN_RC(a7_reg));
4594 match(RegI);
4595 match(mRegI);
4597 format %{ "A7" %}
4598 interface(REG_INTER);
4599 %}
4601 operand mV0RegI() %{
4602 constraint(ALLOC_IN_RC(v0_reg));
4603 match(RegI);
4604 match(mRegI);
4606 format %{ "V0" %}
4607 interface(REG_INTER);
4608 %}
4610 operand mV1RegI() %{
4611 constraint(ALLOC_IN_RC(v1_reg));
4612 match(RegI);
4613 match(mRegI);
4615 format %{ "V1" %}
4616 interface(REG_INTER);
4617 %}
4619 operand mRegN() %{
4620 constraint(ALLOC_IN_RC(int_reg));
4621 match(RegN);
4623 format %{ %}
4624 interface(REG_INTER);
4625 %}
4627 operand t0_RegN() %{
4628 constraint(ALLOC_IN_RC(t0_reg));
4629 match(RegN);
4630 match(mRegN);
4632 format %{ %}
4633 interface(REG_INTER);
4634 %}
4636 operand t1_RegN() %{
4637 constraint(ALLOC_IN_RC(t1_reg));
4638 match(RegN);
4639 match(mRegN);
4641 format %{ %}
4642 interface(REG_INTER);
4643 %}
4645 operand t2_RegN() %{
4646 constraint(ALLOC_IN_RC(t2_reg));
4647 match(RegN);
4648 match(mRegN);
4650 format %{ %}
4651 interface(REG_INTER);
4652 %}
4654 operand t3_RegN() %{
4655 constraint(ALLOC_IN_RC(t3_reg));
4656 match(RegN);
4657 match(mRegN);
4659 format %{ %}
4660 interface(REG_INTER);
4661 %}
4663 operand t8_RegN() %{
4664 constraint(ALLOC_IN_RC(t8_reg));
4665 match(RegN);
4666 match(mRegN);
4668 format %{ %}
4669 interface(REG_INTER);
4670 %}
4672 operand t9_RegN() %{
4673 constraint(ALLOC_IN_RC(t9_reg));
4674 match(RegN);
4675 match(mRegN);
4677 format %{ %}
4678 interface(REG_INTER);
4679 %}
4681 operand a0_RegN() %{
4682 constraint(ALLOC_IN_RC(a0_reg));
4683 match(RegN);
4684 match(mRegN);
4686 format %{ %}
4687 interface(REG_INTER);
4688 %}
4690 operand a1_RegN() %{
4691 constraint(ALLOC_IN_RC(a1_reg));
4692 match(RegN);
4693 match(mRegN);
4695 format %{ %}
4696 interface(REG_INTER);
4697 %}
4699 operand a2_RegN() %{
4700 constraint(ALLOC_IN_RC(a2_reg));
4701 match(RegN);
4702 match(mRegN);
4704 format %{ %}
4705 interface(REG_INTER);
4706 %}
4708 operand a3_RegN() %{
4709 constraint(ALLOC_IN_RC(a3_reg));
4710 match(RegN);
4711 match(mRegN);
4713 format %{ %}
4714 interface(REG_INTER);
4715 %}
4717 operand a4_RegN() %{
4718 constraint(ALLOC_IN_RC(a4_reg));
4719 match(RegN);
4720 match(mRegN);
4722 format %{ %}
4723 interface(REG_INTER);
4724 %}
4726 operand a5_RegN() %{
4727 constraint(ALLOC_IN_RC(a5_reg));
4728 match(RegN);
4729 match(mRegN);
4731 format %{ %}
4732 interface(REG_INTER);
4733 %}
4735 operand a6_RegN() %{
4736 constraint(ALLOC_IN_RC(a6_reg));
4737 match(RegN);
4738 match(mRegN);
4740 format %{ %}
4741 interface(REG_INTER);
4742 %}
4744 operand a7_RegN() %{
4745 constraint(ALLOC_IN_RC(a7_reg));
4746 match(RegN);
4747 match(mRegN);
4749 format %{ %}
4750 interface(REG_INTER);
4751 %}
4753 operand s0_RegN() %{
4754 constraint(ALLOC_IN_RC(s0_reg));
4755 match(RegN);
4756 match(mRegN);
4758 format %{ %}
4759 interface(REG_INTER);
4760 %}
4762 operand s1_RegN() %{
4763 constraint(ALLOC_IN_RC(s1_reg));
4764 match(RegN);
4765 match(mRegN);
4767 format %{ %}
4768 interface(REG_INTER);
4769 %}
4771 operand s2_RegN() %{
4772 constraint(ALLOC_IN_RC(s2_reg));
4773 match(RegN);
4774 match(mRegN);
4776 format %{ %}
4777 interface(REG_INTER);
4778 %}
4780 operand s3_RegN() %{
4781 constraint(ALLOC_IN_RC(s3_reg));
4782 match(RegN);
4783 match(mRegN);
4785 format %{ %}
4786 interface(REG_INTER);
4787 %}
4789 operand s4_RegN() %{
4790 constraint(ALLOC_IN_RC(s4_reg));
4791 match(RegN);
4792 match(mRegN);
4794 format %{ %}
4795 interface(REG_INTER);
4796 %}
4798 operand s5_RegN() %{
4799 constraint(ALLOC_IN_RC(s5_reg));
4800 match(RegN);
4801 match(mRegN);
4803 format %{ %}
4804 interface(REG_INTER);
4805 %}
4807 operand s6_RegN() %{
4808 constraint(ALLOC_IN_RC(s6_reg));
4809 match(RegN);
4810 match(mRegN);
4812 format %{ %}
4813 interface(REG_INTER);
4814 %}
4816 operand s7_RegN() %{
4817 constraint(ALLOC_IN_RC(s7_reg));
4818 match(RegN);
4819 match(mRegN);
4821 format %{ %}
4822 interface(REG_INTER);
4823 %}
4825 operand v0_RegN() %{
4826 constraint(ALLOC_IN_RC(v0_reg));
4827 match(RegN);
4828 match(mRegN);
4830 format %{ %}
4831 interface(REG_INTER);
4832 %}
4834 operand v1_RegN() %{
4835 constraint(ALLOC_IN_RC(v1_reg));
4836 match(RegN);
4837 match(mRegN);
4839 format %{ %}
4840 interface(REG_INTER);
4841 %}
4843 // Pointer Register
4844 operand mRegP() %{
4845 constraint(ALLOC_IN_RC(p_reg));
4846 match(RegP);
4848 format %{ %}
4849 interface(REG_INTER);
4850 %}
4852 operand no_T8_mRegP() %{
4853 constraint(ALLOC_IN_RC(no_T8_p_reg));
4854 match(RegP);
4855 match(mRegP);
4857 format %{ %}
4858 interface(REG_INTER);
4859 %}
4861 operand s0_RegP()
4862 %{
4863 constraint(ALLOC_IN_RC(s0_long_reg));
4864 match(RegP);
4865 match(mRegP);
4866 match(no_T8_mRegP);
4868 format %{ %}
4869 interface(REG_INTER);
4870 %}
4872 operand s1_RegP()
4873 %{
4874 constraint(ALLOC_IN_RC(s1_long_reg));
4875 match(RegP);
4876 match(mRegP);
4877 match(no_T8_mRegP);
4879 format %{ %}
4880 interface(REG_INTER);
4881 %}
4883 operand s2_RegP()
4884 %{
4885 constraint(ALLOC_IN_RC(s2_long_reg));
4886 match(RegP);
4887 match(mRegP);
4888 match(no_T8_mRegP);
4890 format %{ %}
4891 interface(REG_INTER);
4892 %}
4894 operand s3_RegP()
4895 %{
4896 constraint(ALLOC_IN_RC(s3_long_reg));
4897 match(RegP);
4898 match(mRegP);
4899 match(no_T8_mRegP);
4901 format %{ %}
4902 interface(REG_INTER);
4903 %}
4905 operand s4_RegP()
4906 %{
4907 constraint(ALLOC_IN_RC(s4_long_reg));
4908 match(RegP);
4909 match(mRegP);
4910 match(no_T8_mRegP);
4912 format %{ %}
4913 interface(REG_INTER);
4914 %}
4916 operand s5_RegP()
4917 %{
4918 constraint(ALLOC_IN_RC(s5_long_reg));
4919 match(RegP);
4920 match(mRegP);
4921 match(no_T8_mRegP);
4923 format %{ %}
4924 interface(REG_INTER);
4925 %}
4927 operand s6_RegP()
4928 %{
4929 constraint(ALLOC_IN_RC(s6_long_reg));
4930 match(RegP);
4931 match(mRegP);
4932 match(no_T8_mRegP);
4934 format %{ %}
4935 interface(REG_INTER);
4936 %}
4938 operand s7_RegP()
4939 %{
4940 constraint(ALLOC_IN_RC(s7_long_reg));
4941 match(RegP);
4942 match(mRegP);
4943 match(no_T8_mRegP);
4945 format %{ %}
4946 interface(REG_INTER);
4947 %}
4949 operand t0_RegP()
4950 %{
4951 constraint(ALLOC_IN_RC(t0_long_reg));
4952 match(RegP);
4953 match(mRegP);
4954 match(no_T8_mRegP);
4956 format %{ %}
4957 interface(REG_INTER);
4958 %}
4960 operand t1_RegP()
4961 %{
4962 constraint(ALLOC_IN_RC(t1_long_reg));
4963 match(RegP);
4964 match(mRegP);
4965 match(no_T8_mRegP);
4967 format %{ %}
4968 interface(REG_INTER);
4969 %}
4971 operand t2_RegP()
4972 %{
4973 constraint(ALLOC_IN_RC(t2_long_reg));
4974 match(RegP);
4975 match(mRegP);
4976 match(no_T8_mRegP);
4978 format %{ %}
4979 interface(REG_INTER);
4980 %}
4982 operand t3_RegP()
4983 %{
4984 constraint(ALLOC_IN_RC(t3_long_reg));
4985 match(RegP);
4986 match(mRegP);
4987 match(no_T8_mRegP);
4989 format %{ %}
4990 interface(REG_INTER);
4991 %}
4993 operand t8_RegP()
4994 %{
4995 constraint(ALLOC_IN_RC(t8_long_reg));
4996 match(RegP);
4997 match(mRegP);
4999 format %{ %}
5000 interface(REG_INTER);
5001 %}
5003 operand t9_RegP()
5004 %{
5005 constraint(ALLOC_IN_RC(t9_long_reg));
5006 match(RegP);
5007 match(mRegP);
5008 match(no_T8_mRegP);
5010 format %{ %}
5011 interface(REG_INTER);
5012 %}
5014 operand a0_RegP()
5015 %{
5016 constraint(ALLOC_IN_RC(a0_long_reg));
5017 match(RegP);
5018 match(mRegP);
5019 match(no_T8_mRegP);
5021 format %{ %}
5022 interface(REG_INTER);
5023 %}
5025 operand a1_RegP()
5026 %{
5027 constraint(ALLOC_IN_RC(a1_long_reg));
5028 match(RegP);
5029 match(mRegP);
5030 match(no_T8_mRegP);
5032 format %{ %}
5033 interface(REG_INTER);
5034 %}
5036 operand a2_RegP()
5037 %{
5038 constraint(ALLOC_IN_RC(a2_long_reg));
5039 match(RegP);
5040 match(mRegP);
5041 match(no_T8_mRegP);
5043 format %{ %}
5044 interface(REG_INTER);
5045 %}
5047 operand a3_RegP()
5048 %{
5049 constraint(ALLOC_IN_RC(a3_long_reg));
5050 match(RegP);
5051 match(mRegP);
5052 match(no_T8_mRegP);
5054 format %{ %}
5055 interface(REG_INTER);
5056 %}
5058 operand a4_RegP()
5059 %{
5060 constraint(ALLOC_IN_RC(a4_long_reg));
5061 match(RegP);
5062 match(mRegP);
5063 match(no_T8_mRegP);
5065 format %{ %}
5066 interface(REG_INTER);
5067 %}
5070 operand a5_RegP()
5071 %{
5072 constraint(ALLOC_IN_RC(a5_long_reg));
5073 match(RegP);
5074 match(mRegP);
5075 match(no_T8_mRegP);
5077 format %{ %}
5078 interface(REG_INTER);
5079 %}
5081 operand a6_RegP()
5082 %{
5083 constraint(ALLOC_IN_RC(a6_long_reg));
5084 match(RegP);
5085 match(mRegP);
5086 match(no_T8_mRegP);
5088 format %{ %}
5089 interface(REG_INTER);
5090 %}
5092 operand a7_RegP()
5093 %{
5094 constraint(ALLOC_IN_RC(a7_long_reg));
5095 match(RegP);
5096 match(mRegP);
5097 match(no_T8_mRegP);
5099 format %{ %}
5100 interface(REG_INTER);
5101 %}
5103 operand v0_RegP()
5104 %{
5105 constraint(ALLOC_IN_RC(v0_long_reg));
5106 match(RegP);
5107 match(mRegP);
5108 match(no_T8_mRegP);
5110 format %{ %}
5111 interface(REG_INTER);
5112 %}
5114 operand v1_RegP()
5115 %{
5116 constraint(ALLOC_IN_RC(v1_long_reg));
5117 match(RegP);
5118 match(mRegP);
5119 match(no_T8_mRegP);
5121 format %{ %}
5122 interface(REG_INTER);
5123 %}
5125 /*
5126 operand mSPRegP(mRegP reg) %{
5127 constraint(ALLOC_IN_RC(sp_reg));
5128 match(reg);
5130 format %{ "SP" %}
5131 interface(REG_INTER);
5132 %}
5134 operand mFPRegP(mRegP reg) %{
5135 constraint(ALLOC_IN_RC(fp_reg));
5136 match(reg);
5138 format %{ "FP" %}
5139 interface(REG_INTER);
5140 %}
5141 */
5143 operand mRegL() %{
5144 constraint(ALLOC_IN_RC(long_reg));
5145 match(RegL);
5147 format %{ %}
5148 interface(REG_INTER);
5149 %}
5151 operand v0RegL() %{
5152 constraint(ALLOC_IN_RC(v0_long_reg));
5153 match(RegL);
5154 match(mRegL);
5156 format %{ %}
5157 interface(REG_INTER);
5158 %}
5160 operand v1RegL() %{
5161 constraint(ALLOC_IN_RC(v1_long_reg));
5162 match(RegL);
5163 match(mRegL);
5165 format %{ %}
5166 interface(REG_INTER);
5167 %}
5169 operand a0RegL() %{
5170 constraint(ALLOC_IN_RC(a0_long_reg));
5171 match(RegL);
5172 match(mRegL);
5174 format %{ "A0" %}
5175 interface(REG_INTER);
5176 %}
5178 operand a1RegL() %{
5179 constraint(ALLOC_IN_RC(a1_long_reg));
5180 match(RegL);
5181 match(mRegL);
5183 format %{ %}
5184 interface(REG_INTER);
5185 %}
5187 operand a2RegL() %{
5188 constraint(ALLOC_IN_RC(a2_long_reg));
5189 match(RegL);
5190 match(mRegL);
5192 format %{ %}
5193 interface(REG_INTER);
5194 %}
5196 operand a3RegL() %{
5197 constraint(ALLOC_IN_RC(a3_long_reg));
5198 match(RegL);
5199 match(mRegL);
5201 format %{ %}
5202 interface(REG_INTER);
5203 %}
5205 operand t0RegL() %{
5206 constraint(ALLOC_IN_RC(t0_long_reg));
5207 match(RegL);
5208 match(mRegL);
5210 format %{ %}
5211 interface(REG_INTER);
5212 %}
5214 operand t1RegL() %{
5215 constraint(ALLOC_IN_RC(t1_long_reg));
5216 match(RegL);
5217 match(mRegL);
5219 format %{ %}
5220 interface(REG_INTER);
5221 %}
5223 operand t2RegL() %{
5224 constraint(ALLOC_IN_RC(t2_long_reg));
5225 match(RegL);
5226 match(mRegL);
5228 format %{ %}
5229 interface(REG_INTER);
5230 %}
5232 operand t3RegL() %{
5233 constraint(ALLOC_IN_RC(t3_long_reg));
5234 match(RegL);
5235 match(mRegL);
5237 format %{ %}
5238 interface(REG_INTER);
5239 %}
5241 operand t8RegL() %{
5242 constraint(ALLOC_IN_RC(t8_long_reg));
5243 match(RegL);
5244 match(mRegL);
5246 format %{ %}
5247 interface(REG_INTER);
5248 %}
5250 operand a4RegL() %{
5251 constraint(ALLOC_IN_RC(a4_long_reg));
5252 match(RegL);
5253 match(mRegL);
5255 format %{ %}
5256 interface(REG_INTER);
5257 %}
5259 operand a5RegL() %{
5260 constraint(ALLOC_IN_RC(a5_long_reg));
5261 match(RegL);
5262 match(mRegL);
5264 format %{ %}
5265 interface(REG_INTER);
5266 %}
5268 operand a6RegL() %{
5269 constraint(ALLOC_IN_RC(a6_long_reg));
5270 match(RegL);
5271 match(mRegL);
5273 format %{ %}
5274 interface(REG_INTER);
5275 %}
5277 operand a7RegL() %{
5278 constraint(ALLOC_IN_RC(a7_long_reg));
5279 match(RegL);
5280 match(mRegL);
5282 format %{ %}
5283 interface(REG_INTER);
5284 %}
5286 operand s0RegL() %{
5287 constraint(ALLOC_IN_RC(s0_long_reg));
5288 match(RegL);
5289 match(mRegL);
5291 format %{ %}
5292 interface(REG_INTER);
5293 %}
5295 operand s1RegL() %{
5296 constraint(ALLOC_IN_RC(s1_long_reg));
5297 match(RegL);
5298 match(mRegL);
5300 format %{ %}
5301 interface(REG_INTER);
5302 %}
5304 operand s2RegL() %{
5305 constraint(ALLOC_IN_RC(s2_long_reg));
5306 match(RegL);
5307 match(mRegL);
5309 format %{ %}
5310 interface(REG_INTER);
5311 %}
5313 operand s3RegL() %{
5314 constraint(ALLOC_IN_RC(s3_long_reg));
5315 match(RegL);
5316 match(mRegL);
5318 format %{ %}
5319 interface(REG_INTER);
5320 %}
5322 operand s4RegL() %{
5323 constraint(ALLOC_IN_RC(s4_long_reg));
5324 match(RegL);
5325 match(mRegL);
5327 format %{ %}
5328 interface(REG_INTER);
5329 %}
5331 operand s7RegL() %{
5332 constraint(ALLOC_IN_RC(s7_long_reg));
5333 match(RegL);
5334 match(mRegL);
5336 format %{ %}
5337 interface(REG_INTER);
5338 %}
5340 // Floating register operands
5341 operand regF() %{
5342 constraint(ALLOC_IN_RC(flt_reg));
5343 match(RegF);
5345 format %{ %}
5346 interface(REG_INTER);
5347 %}
5349 //Double Precision Floating register operands
5350 operand regD() %{
5351 constraint(ALLOC_IN_RC(dbl_reg));
5352 match(RegD);
5354 format %{ %}
5355 interface(REG_INTER);
5356 %}
5358 //----------Memory Operands----------------------------------------------------
5359 // Indirect Memory Operand
5360 operand indirect(mRegP reg) %{
5361 constraint(ALLOC_IN_RC(p_reg));
5362 match(reg);
5364 format %{ "[$reg] @ indirect" %}
5365 interface(MEMORY_INTER) %{
5366 base($reg);
5367 index(0x0); /* NO_INDEX */
5368 scale(0x0);
5369 disp(0x0);
5370 %}
5371 %}
5373 // Indirect Memory Plus Short Offset Operand
5374 operand indOffset8(mRegP reg, immL8 off)
5375 %{
5376 constraint(ALLOC_IN_RC(p_reg));
5377 match(AddP reg off);
5379 op_cost(10);
5380 format %{ "[$reg + $off (8-bit)] @ indOffset8" %}
5381 interface(MEMORY_INTER) %{
5382 base($reg);
5383 index(0x0); /* NO_INDEX */
5384 scale(0x0);
5385 disp($off);
5386 %}
5387 %}
5389 // Indirect Memory Times Scale Plus Index Register
5390 operand indIndexScale(mRegP reg, mRegL lreg, immI2 scale)
5391 %{
5392 constraint(ALLOC_IN_RC(p_reg));
5393 match(AddP reg (LShiftL lreg scale));
5395 op_cost(10);
5396 format %{"[$reg + $lreg << $scale] @ indIndexScale" %}
5397 interface(MEMORY_INTER) %{
5398 base($reg);
5399 index($lreg);
5400 scale($scale);
5401 disp(0x0);
5402 %}
5403 %}
5406 // [base + index + offset]
5407 operand baseIndexOffset8(mRegP base, mRegL index, immL8 off)
5408 %{
5409 constraint(ALLOC_IN_RC(p_reg));
5410 op_cost(5);
5411 match(AddP (AddP base index) off);
5413 format %{ "[$base + $index + $off (8-bit)] @ baseIndexOffset8" %}
5414 interface(MEMORY_INTER) %{
5415 base($base);
5416 index($index);
5417 scale(0x0);
5418 disp($off);
5419 %}
5420 %}
5422 // [base + index + offset]
5423 operand baseIndexOffset8_convI2L(mRegP base, mRegI index, immL8 off)
5424 %{
5425 constraint(ALLOC_IN_RC(p_reg));
5426 op_cost(5);
5427 match(AddP (AddP base (ConvI2L index)) off);
5429 format %{ "[$base + $index + $off (8-bit)] @ baseIndexOffset8_convI2L" %}
5430 interface(MEMORY_INTER) %{
5431 base($base);
5432 index($index);
5433 scale(0x0);
5434 disp($off);
5435 %}
5436 %}
5438 // Indirect Memory Times Scale Plus Index Register Plus Offset Operand
5439 operand indIndexScaleOffset8(mRegP reg, immL8 off, mRegL lreg, immI2 scale)
5440 %{
5441 constraint(ALLOC_IN_RC(p_reg));
5442 match(AddP (AddP reg (LShiftL lreg scale)) off);
5444 op_cost(10);
5445 format %{"[$reg + $off + $lreg << $scale] @ indIndexScaleOffset8" %}
5446 interface(MEMORY_INTER) %{
5447 base($reg);
5448 index($lreg);
5449 scale($scale);
5450 disp($off);
5451 %}
5452 %}
5454 operand indIndexScaleOffset8_convI2L(mRegP reg, immL8 off, mRegI ireg, immI2 scale)
5455 %{
5456 constraint(ALLOC_IN_RC(p_reg));
5457 match(AddP (AddP reg (LShiftL (ConvI2L ireg) scale)) off);
5459 op_cost(10);
5460 format %{"[$reg + $off + $ireg << $scale] @ indIndexScaleOffset8_convI2L" %}
5461 interface(MEMORY_INTER) %{
5462 base($reg);
5463 index($ireg);
5464 scale($scale);
5465 disp($off);
5466 %}
5467 %}
5469 // [base + index<<scale + offset]
5470 operand basePosIndexScaleOffset8(mRegP base, mRegI index, immL8 off, immI_0_31 scale)
5471 %{
5472 constraint(ALLOC_IN_RC(p_reg));
5473 //predicate(n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
5474 op_cost(10);
5475 match(AddP (AddP base (LShiftL (ConvI2L index) scale)) off);
5477 format %{ "[$base + $index << $scale + $off (8-bit)] @ basePosIndexScaleOffset8" %}
5478 interface(MEMORY_INTER) %{
5479 base($base);
5480 index($index);
5481 scale($scale);
5482 disp($off);
5483 %}
5484 %}
5486 // Indirect Memory Times Scale Plus Index Register Plus Offset Operand
5487 operand indIndexScaleOffsetNarrow(mRegN reg, immL8 off, mRegL lreg, immI2 scale)
5488 %{
5489 predicate(Universe::narrow_oop_shift() == 0);
5490 constraint(ALLOC_IN_RC(p_reg));
5491 match(AddP (AddP (DecodeN reg) (LShiftL lreg scale)) off);
5493 op_cost(10);
5494 format %{"[$reg + $off + $lreg << $scale] @ indIndexScaleOffsetNarrow" %}
5495 interface(MEMORY_INTER) %{
5496 base($reg);
5497 index($lreg);
5498 scale($scale);
5499 disp($off);
5500 %}
5501 %}
5503 // [base + index<<scale + offset] for compressd Oops
5504 operand indPosIndexI2LScaleOffset8Narrow(mRegN base, mRegI index, immL8 off, immI_0_31 scale)
5505 %{
5506 constraint(ALLOC_IN_RC(p_reg));
5507 //predicate(Universe::narrow_oop_shift() == 0 && n->in(2)->in(3)->in(1)->as_Type()->type()->is_long()->_lo >= 0);
5508 predicate(Universe::narrow_oop_shift() == 0);
5509 op_cost(10);
5510 match(AddP (AddP (DecodeN base) (LShiftL (ConvI2L index) scale)) off);
5512 format %{ "[$base + $index << $scale + $off (8-bit)] @ indPosIndexI2LScaleOffset8Narrow" %}
5513 interface(MEMORY_INTER) %{
5514 base($base);
5515 index($index);
5516 scale($scale);
5517 disp($off);
5518 %}
5519 %}
5521 //FIXME: I think it's better to limit the immI to be 16-bit at most!
5522 // Indirect Memory Plus Long Offset Operand
5523 operand indOffset32(mRegP reg, immL32 off) %{
5524 constraint(ALLOC_IN_RC(p_reg));
5525 op_cost(20);
5526 match(AddP reg off);
5528 format %{ "[$reg + $off (32-bit)] @ indOffset32" %}
5529 interface(MEMORY_INTER) %{
5530 base($reg);
5531 index(0x0); /* NO_INDEX */
5532 scale(0x0);
5533 disp($off);
5534 %}
5535 %}
5537 // Indirect Memory Plus Index Register
5538 operand indIndex(mRegP addr, mRegL index) %{
5539 constraint(ALLOC_IN_RC(p_reg));
5540 match(AddP addr index);
5542 op_cost(20);
5543 format %{"[$addr + $index] @ indIndex" %}
5544 interface(MEMORY_INTER) %{
5545 base($addr);
5546 index($index);
5547 scale(0x0);
5548 disp(0x0);
5549 %}
5550 %}
5552 operand indirectNarrowKlass(mRegN reg)
5553 %{
5554 predicate(Universe::narrow_klass_shift() == 0);
5555 constraint(ALLOC_IN_RC(p_reg));
5556 op_cost(10);
5557 match(DecodeNKlass reg);
5559 format %{ "[$reg] @ indirectNarrowKlass" %}
5560 interface(MEMORY_INTER) %{
5561 base($reg);
5562 index(0x0);
5563 scale(0x0);
5564 disp(0x0);
5565 %}
5566 %}
5568 operand indOffset8NarrowKlass(mRegN reg, immL8 off)
5569 %{
5570 predicate(Universe::narrow_klass_shift() == 0);
5571 constraint(ALLOC_IN_RC(p_reg));
5572 op_cost(10);
5573 match(AddP (DecodeNKlass reg) off);
5575 format %{ "[$reg + $off (8-bit)] @ indOffset8NarrowKlass" %}
5576 interface(MEMORY_INTER) %{
5577 base($reg);
5578 index(0x0);
5579 scale(0x0);
5580 disp($off);
5581 %}
5582 %}
5584 operand indOffset32NarrowKlass(mRegN reg, immL32 off)
5585 %{
5586 predicate(Universe::narrow_klass_shift() == 0);
5587 constraint(ALLOC_IN_RC(p_reg));
5588 op_cost(10);
5589 match(AddP (DecodeNKlass reg) off);
5591 format %{ "[$reg + $off (32-bit)] @ indOffset32NarrowKlass" %}
5592 interface(MEMORY_INTER) %{
5593 base($reg);
5594 index(0x0);
5595 scale(0x0);
5596 disp($off);
5597 %}
5598 %}
5600 operand indIndexOffsetNarrowKlass(mRegN reg, mRegL lreg, immL32 off)
5601 %{
5602 predicate(Universe::narrow_klass_shift() == 0);
5603 constraint(ALLOC_IN_RC(p_reg));
5604 match(AddP (AddP (DecodeNKlass reg) lreg) off);
5606 op_cost(10);
5607 format %{"[$reg + $off + $lreg] @ indIndexOffsetNarrowKlass" %}
5608 interface(MEMORY_INTER) %{
5609 base($reg);
5610 index($lreg);
5611 scale(0x0);
5612 disp($off);
5613 %}
5614 %}
5616 operand indIndexNarrowKlass(mRegN reg, mRegL lreg)
5617 %{
5618 predicate(Universe::narrow_klass_shift() == 0);
5619 constraint(ALLOC_IN_RC(p_reg));
5620 match(AddP (DecodeNKlass reg) lreg);
5622 op_cost(10);
5623 format %{"[$reg + $lreg] @ indIndexNarrowKlass" %}
5624 interface(MEMORY_INTER) %{
5625 base($reg);
5626 index($lreg);
5627 scale(0x0);
5628 disp(0x0);
5629 %}
5630 %}
5632 // Indirect Memory Operand
5633 operand indirectNarrow(mRegN reg)
5634 %{
5635 predicate(Universe::narrow_oop_shift() == 0);
5636 constraint(ALLOC_IN_RC(p_reg));
5637 op_cost(10);
5638 match(DecodeN reg);
5640 format %{ "[$reg] @ indirectNarrow" %}
5641 interface(MEMORY_INTER) %{
5642 base($reg);
5643 index(0x0);
5644 scale(0x0);
5645 disp(0x0);
5646 %}
5647 %}
5649 // Indirect Memory Plus Short Offset Operand
5650 operand indOffset8Narrow(mRegN reg, immL8 off)
5651 %{
5652 predicate(Universe::narrow_oop_shift() == 0);
5653 constraint(ALLOC_IN_RC(p_reg));
5654 op_cost(10);
5655 match(AddP (DecodeN reg) off);
5657 format %{ "[$reg + $off (8-bit)] @ indOffset8Narrow" %}
5658 interface(MEMORY_INTER) %{
5659 base($reg);
5660 index(0x0);
5661 scale(0x0);
5662 disp($off);
5663 %}
5664 %}
5666 // Indirect Memory Plus Index Register Plus Offset Operand
5667 operand indIndexOffset8Narrow(mRegN reg, mRegL lreg, immL8 off)
5668 %{
5669 predicate(Universe::narrow_oop_shift() == 0);
5670 constraint(ALLOC_IN_RC(p_reg));
5671 match(AddP (AddP (DecodeN reg) lreg) off);
5673 op_cost(10);
5674 format %{"[$reg + $off + $lreg] @ indIndexOffset8Narrow" %}
5675 interface(MEMORY_INTER) %{
5676 base($reg);
5677 index($lreg);
5678 scale(0x0);
5679 disp($off);
5680 %}
5681 %}
5683 //----------Load Long Memory Operands------------------------------------------
5684 // The load-long idiom will use it's address expression again after loading
5685 // the first word of the long. If the load-long destination overlaps with
5686 // registers used in the addressing expression, the 2nd half will be loaded
5687 // from a clobbered address. Fix this by requiring that load-long use
5688 // address registers that do not overlap with the load-long target.
5690 // load-long support
5691 operand load_long_RegP() %{
5692 constraint(ALLOC_IN_RC(p_reg));
5693 match(RegP);
5694 match(mRegP);
5695 op_cost(100);
5696 format %{ %}
5697 interface(REG_INTER);
5698 %}
5700 // Indirect Memory Operand Long
5701 operand load_long_indirect(load_long_RegP reg) %{
5702 constraint(ALLOC_IN_RC(p_reg));
5703 match(reg);
5705 format %{ "[$reg]" %}
5706 interface(MEMORY_INTER) %{
5707 base($reg);
5708 index(0x0);
5709 scale(0x0);
5710 disp(0x0);
5711 %}
5712 %}
5714 // Indirect Memory Plus Long Offset Operand
5715 operand load_long_indOffset32(load_long_RegP reg, immL32 off) %{
5716 match(AddP reg off);
5718 format %{ "[$reg + $off]" %}
5719 interface(MEMORY_INTER) %{
5720 base($reg);
5721 index(0x0);
5722 scale(0x0);
5723 disp($off);
5724 %}
5725 %}
5727 //----------Conditional Branch Operands----------------------------------------
5728 // Comparison Op - This is the operation of the comparison, and is limited to
5729 // the following set of codes:
5730 // L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
5731 //
5732 // Other attributes of the comparison, such as unsignedness, are specified
5733 // by the comparison instruction that sets a condition code flags register.
5734 // That result is represented by a flags operand whose subtype is appropriate
5735 // to the unsignedness (etc.) of the comparison.
5736 //
5737 // Later, the instruction which matches both the Comparison Op (a Bool) and
5738 // the flags (produced by the Cmp) specifies the coding of the comparison op
5739 // by matching a specific subtype of Bool operand below, such as cmpOpU.
5741 // Comparision Code
5742 operand cmpOp() %{
5743 match(Bool);
5745 format %{ "" %}
5746 interface(COND_INTER) %{
5747 equal(0x01);
5748 not_equal(0x02);
5749 greater(0x03);
5750 greater_equal(0x04);
5751 less(0x05);
5752 less_equal(0x06);
5753 overflow(0x7);
5754 no_overflow(0x8);
5755 %}
5756 %}
5759 // Comparision Code
5760 // Comparison Code, unsigned compare. Used by FP also, with
5761 // C2 (unordered) turned into GT or LT already. The other bits
5762 // C0 and C3 are turned into Carry & Zero flags.
5763 operand cmpOpU() %{
5764 match(Bool);
5766 format %{ "" %}
5767 interface(COND_INTER) %{
5768 equal(0x01);
5769 not_equal(0x02);
5770 greater(0x03);
5771 greater_equal(0x04);
5772 less(0x05);
5773 less_equal(0x06);
5774 overflow(0x7);
5775 no_overflow(0x8);
5776 %}
5777 %}
5779 /*
5780 // Comparison Code, unsigned compare. Used by FP also, with
5781 // C2 (unordered) turned into GT or LT already. The other bits
5782 // C0 and C3 are turned into Carry & Zero flags.
5783 operand cmpOpU() %{
5784 match(Bool);
5786 format %{ "" %}
5787 interface(COND_INTER) %{
5788 equal(0x4);
5789 not_equal(0x5);
5790 less(0x2);
5791 greater_equal(0x3);
5792 less_equal(0x6);
5793 greater(0x7);
5794 %}
5795 %}
5796 */
5797 /*
5798 // Comparison Code for FP conditional move
5799 operand cmpOp_fcmov() %{
5800 match(Bool);
5802 format %{ "" %}
5803 interface(COND_INTER) %{
5804 equal (0x01);
5805 not_equal (0x02);
5806 greater (0x03);
5807 greater_equal(0x04);
5808 less (0x05);
5809 less_equal (0x06);
5810 %}
5811 %}
5813 // Comparision Code used in long compares
5814 operand cmpOp_commute() %{
5815 match(Bool);
5817 format %{ "" %}
5818 interface(COND_INTER) %{
5819 equal(0x4);
5820 not_equal(0x5);
5821 less(0xF);
5822 greater_equal(0xE);
5823 less_equal(0xD);
5824 greater(0xC);
5825 %}
5826 %}
5827 */
5829 //----------Special Memory Operands--------------------------------------------
5830 // Stack Slot Operand - This operand is used for loading and storing temporary
5831 // values on the stack where a match requires a value to
5832 // flow through memory.
5833 operand stackSlotP(sRegP reg) %{
5834 constraint(ALLOC_IN_RC(stack_slots));
5835 // No match rule because this operand is only generated in matching
5836 op_cost(50);
5837 format %{ "[$reg]" %}
5838 interface(MEMORY_INTER) %{
5839 base(0x1d); // SP
5840 index(0x0); // No Index
5841 scale(0x0); // No Scale
5842 disp($reg); // Stack Offset
5843 %}
5844 %}
5846 operand stackSlotI(sRegI reg) %{
5847 constraint(ALLOC_IN_RC(stack_slots));
5848 // No match rule because this operand is only generated in matching
5849 op_cost(50);
5850 format %{ "[$reg]" %}
5851 interface(MEMORY_INTER) %{
5852 base(0x1d); // SP
5853 index(0x0); // No Index
5854 scale(0x0); // No Scale
5855 disp($reg); // Stack Offset
5856 %}
5857 %}
5859 operand stackSlotF(sRegF reg) %{
5860 constraint(ALLOC_IN_RC(stack_slots));
5861 // No match rule because this operand is only generated in matching
5862 op_cost(50);
5863 format %{ "[$reg]" %}
5864 interface(MEMORY_INTER) %{
5865 base(0x1d); // SP
5866 index(0x0); // No Index
5867 scale(0x0); // No Scale
5868 disp($reg); // Stack Offset
5869 %}
5870 %}
5872 operand stackSlotD(sRegD reg) %{
5873 constraint(ALLOC_IN_RC(stack_slots));
5874 // No match rule because this operand is only generated in matching
5875 op_cost(50);
5876 format %{ "[$reg]" %}
5877 interface(MEMORY_INTER) %{
5878 base(0x1d); // SP
5879 index(0x0); // No Index
5880 scale(0x0); // No Scale
5881 disp($reg); // Stack Offset
5882 %}
5883 %}
5885 operand stackSlotL(sRegL reg) %{
5886 constraint(ALLOC_IN_RC(stack_slots));
5887 // No match rule because this operand is only generated in matching
5888 op_cost(50);
5889 format %{ "[$reg]" %}
5890 interface(MEMORY_INTER) %{
5891 base(0x1d); // SP
5892 index(0x0); // No Index
5893 scale(0x0); // No Scale
5894 disp($reg); // Stack Offset
5895 %}
5896 %}
5899 //------------------------OPERAND CLASSES--------------------------------------
5900 //opclass memory( direct, indirect, indOffset16, indOffset32, indOffset32X, indIndexOffset );
5901 opclass memory( indirect, indirectNarrow, indOffset8, indOffset32, indIndex, indIndexScale, load_long_indirect, load_long_indOffset32, baseIndexOffset8, baseIndexOffset8_convI2L, indIndexScaleOffset8, indIndexScaleOffset8_convI2L, basePosIndexScaleOffset8, indIndexScaleOffsetNarrow, indPosIndexI2LScaleOffset8Narrow, indOffset8Narrow, indIndexOffset8Narrow);
5904 //----------PIPELINE-----------------------------------------------------------
5905 // Rules which define the behavior of the target architectures pipeline.
5907 pipeline %{
5909 //----------ATTRIBUTES---------------------------------------------------------
5910 attributes %{
5911 fixed_size_instructions; // Fixed size instructions
5912 branch_has_delay_slot; // branch have delay slot in gs2
5913 max_instructions_per_bundle = 1; // 1 instruction per bundle
5914 max_bundles_per_cycle = 4; // Up to 4 bundles per cycle
5915 bundle_unit_size=4;
5916 instruction_unit_size = 4; // An instruction is 4 bytes long
5917 instruction_fetch_unit_size = 16; // The processor fetches one line
5918 instruction_fetch_units = 1; // of 16 bytes
5920 // List of nop instructions
5921 nops( MachNop );
5922 %}
5924 //----------RESOURCES----------------------------------------------------------
5925 // Resources are the functional units available to the machine
5927 resources(D1, D2, D3, D4, DECODE = D1 | D2 | D3| D4, ALU1, ALU2, ALU = ALU1 | ALU2, FPU1, FPU2, FPU = FPU1 | FPU2, MEM, BR);
5929 //----------PIPELINE DESCRIPTION-----------------------------------------------
5930 // Pipeline Description specifies the stages in the machine's pipeline
5932 // IF: fetch
5933 // ID: decode
5934 // RD: read
5935 // CA: caculate
5936 // WB: write back
5937 // CM: commit
5939 pipe_desc(IF, ID, RD, CA, WB, CM);
5942 //----------PIPELINE CLASSES---------------------------------------------------
5943 // Pipeline Classes describe the stages in which input and output are
5944 // referenced by the hardware pipeline.
5946 //No.1 Integer ALU reg-reg operation : dst <-- reg1 op reg2
5947 pipe_class ialu_regI_regI(mRegI dst, mRegI src1, mRegI src2) %{
5948 single_instruction;
5949 src1 : RD(read);
5950 src2 : RD(read);
5951 dst : WB(write)+1;
5952 DECODE : ID;
5953 ALU : CA;
5954 %}
5956 //No.19 Integer mult operation : dst <-- reg1 mult reg2
5957 pipe_class ialu_mult(mRegI dst, mRegI src1, mRegI src2) %{
5958 src1 : RD(read);
5959 src2 : RD(read);
5960 dst : WB(write)+5;
5961 DECODE : ID;
5962 ALU2 : CA;
5963 %}
5965 pipe_class mulL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
5966 src1 : RD(read);
5967 src2 : RD(read);
5968 dst : WB(write)+10;
5969 DECODE : ID;
5970 ALU2 : CA;
5971 %}
5973 //No.19 Integer div operation : dst <-- reg1 div reg2
5974 pipe_class ialu_div(mRegI dst, mRegI src1, mRegI src2) %{
5975 src1 : RD(read);
5976 src2 : RD(read);
5977 dst : WB(write)+10;
5978 DECODE : ID;
5979 ALU2 : CA;
5980 %}
5982 //No.19 Integer mod operation : dst <-- reg1 mod reg2
5983 pipe_class ialu_mod(mRegI dst, mRegI src1, mRegI src2) %{
5984 instruction_count(2);
5985 src1 : RD(read);
5986 src2 : RD(read);
5987 dst : WB(write)+10;
5988 DECODE : ID;
5989 ALU2 : CA;
5990 %}
5992 //No.15 Long ALU reg-reg operation : dst <-- reg1 op reg2
5993 pipe_class ialu_regL_regL(mRegL dst, mRegL src1, mRegL src2) %{
5994 instruction_count(2);
5995 src1 : RD(read);
5996 src2 : RD(read);
5997 dst : WB(write);
5998 DECODE : ID;
5999 ALU : CA;
6000 %}
6002 //No.18 Long ALU reg-imm16 operation : dst <-- reg1 op imm16
6003 pipe_class ialu_regL_imm16(mRegL dst, mRegL src) %{
6004 instruction_count(2);
6005 src : RD(read);
6006 dst : WB(write);
6007 DECODE : ID;
6008 ALU : CA;
6009 %}
6011 //no.16 load Long from memory :
6012 pipe_class ialu_loadL(mRegL dst, memory mem) %{
6013 instruction_count(2);
6014 mem : RD(read);
6015 dst : WB(write)+5;
6016 DECODE : ID;
6017 MEM : RD;
6018 %}
6020 //No.17 Store Long to Memory :
6021 pipe_class ialu_storeL(mRegL src, memory mem) %{
6022 instruction_count(2);
6023 mem : RD(read);
6024 src : RD(read);
6025 DECODE : ID;
6026 MEM : RD;
6027 %}
6029 //No.2 Integer ALU reg-imm16 operation : dst <-- reg1 op imm16
6030 pipe_class ialu_regI_imm16(mRegI dst, mRegI src) %{
6031 single_instruction;
6032 src : RD(read);
6033 dst : WB(write);
6034 DECODE : ID;
6035 ALU : CA;
6036 %}
6038 //No.3 Integer move operation : dst <-- reg
6039 pipe_class ialu_regI_mov(mRegI dst, mRegI src) %{
6040 src : RD(read);
6041 dst : WB(write);
6042 DECODE : ID;
6043 ALU : CA;
6044 %}
6046 //No.4 No instructions : do nothing
6047 pipe_class empty( ) %{
6048 instruction_count(0);
6049 %}
6051 //No.5 UnConditional branch :
6052 pipe_class pipe_jump( label labl ) %{
6053 multiple_bundles;
6054 DECODE : ID;
6055 BR : RD;
6056 %}
6058 //No.6 ALU Conditional branch :
6059 pipe_class pipe_alu_branch(mRegI src1, mRegI src2, label labl ) %{
6060 multiple_bundles;
6061 src1 : RD(read);
6062 src2 : RD(read);
6063 DECODE : ID;
6064 BR : RD;
6065 %}
6067 //no.7 load integer from memory :
6068 pipe_class ialu_loadI(mRegI dst, memory mem) %{
6069 mem : RD(read);
6070 dst : WB(write)+3;
6071 DECODE : ID;
6072 MEM : RD;
6073 %}
6075 //No.8 Store Integer to Memory :
6076 pipe_class ialu_storeI(mRegI src, memory mem) %{
6077 mem : RD(read);
6078 src : RD(read);
6079 DECODE : ID;
6080 MEM : RD;
6081 %}
6084 //No.10 Floating FPU reg-reg operation : dst <-- reg1 op reg2
6085 pipe_class fpu_regF_regF(regF dst, regF src1, regF src2) %{
6086 src1 : RD(read);
6087 src2 : RD(read);
6088 dst : WB(write);
6089 DECODE : ID;
6090 FPU : CA;
6091 %}
6093 //No.22 Floating div operation : dst <-- reg1 div reg2
6094 pipe_class fpu_div(regF dst, regF src1, regF src2) %{
6095 src1 : RD(read);
6096 src2 : RD(read);
6097 dst : WB(write);
6098 DECODE : ID;
6099 FPU2 : CA;
6100 %}
6102 pipe_class fcvt_I2D(regD dst, mRegI src) %{
6103 src : RD(read);
6104 dst : WB(write);
6105 DECODE : ID;
6106 FPU1 : CA;
6107 %}
6109 pipe_class fcvt_D2I(mRegI dst, regD src) %{
6110 src : RD(read);
6111 dst : WB(write);
6112 DECODE : ID;
6113 FPU1 : CA;
6114 %}
6116 pipe_class pipe_mfc1(mRegI dst, regD src) %{
6117 src : RD(read);
6118 dst : WB(write);
6119 DECODE : ID;
6120 MEM : RD;
6121 %}
6123 pipe_class pipe_mtc1(regD dst, mRegI src) %{
6124 src : RD(read);
6125 dst : WB(write);
6126 DECODE : ID;
6127 MEM : RD(5);
6128 %}
6130 //No.23 Floating sqrt operation : dst <-- reg1 sqrt reg2
6131 pipe_class fpu_sqrt(regF dst, regF src1, regF src2) %{
6132 multiple_bundles;
6133 src1 : RD(read);
6134 src2 : RD(read);
6135 dst : WB(write);
6136 DECODE : ID;
6137 FPU2 : CA;
6138 %}
6140 //No.11 Load Floating from Memory :
6141 pipe_class fpu_loadF(regF dst, memory mem) %{
6142 instruction_count(1);
6143 mem : RD(read);
6144 dst : WB(write)+3;
6145 DECODE : ID;
6146 MEM : RD;
6147 %}
6149 //No.12 Store Floating to Memory :
6150 pipe_class fpu_storeF(regF src, memory mem) %{
6151 instruction_count(1);
6152 mem : RD(read);
6153 src : RD(read);
6154 DECODE : ID;
6155 MEM : RD;
6156 %}
6158 //No.13 FPU Conditional branch :
6159 pipe_class pipe_fpu_branch(regF src1, regF src2, label labl ) %{
6160 multiple_bundles;
6161 src1 : RD(read);
6162 src2 : RD(read);
6163 DECODE : ID;
6164 BR : RD;
6165 %}
6167 //No.14 Floating FPU reg operation : dst <-- op reg
6168 pipe_class fpu1_regF(regF dst, regF src) %{
6169 src : RD(read);
6170 dst : WB(write);
6171 DECODE : ID;
6172 FPU : CA;
6173 %}
6175 pipe_class long_memory_op() %{
6176 instruction_count(10); multiple_bundles; force_serialization;
6177 fixed_latency(30);
6178 %}
6180 pipe_class simple_call() %{
6181 instruction_count(10); multiple_bundles; force_serialization;
6182 fixed_latency(200);
6183 BR : RD;
6184 %}
6186 pipe_class call() %{
6187 instruction_count(10); multiple_bundles; force_serialization;
6188 fixed_latency(200);
6189 %}
6191 //FIXME:
6192 //No.9 Piple slow : for multi-instructions
6193 pipe_class pipe_slow( ) %{
6194 instruction_count(20);
6195 force_serialization;
6196 multiple_bundles;
6197 fixed_latency(50);
6198 %}
6200 %}
6204 //----------INSTRUCTIONS-------------------------------------------------------
6205 //
6206 // match -- States which machine-independent subtree may be replaced
6207 // by this instruction.
6208 // ins_cost -- The estimated cost of this instruction is used by instruction
6209 // selection to identify a minimum cost tree of machine
6210 // instructions that matches a tree of machine-independent
6211 // instructions.
6212 // format -- A string providing the disassembly for this instruction.
6213 // The value of an instruction's operand may be inserted
6214 // by referring to it with a '$' prefix.
6215 // opcode -- Three instruction opcodes may be provided. These are referred
6216 // to within an encode class as $primary, $secondary, and $tertiary
6217 // respectively. The primary opcode is commonly used to
6218 // indicate the type of machine instruction, while secondary
6219 // and tertiary are often used for prefix options or addressing
6220 // modes.
6221 // ins_encode -- A list of encode classes with parameters. The encode class
6222 // name must have been defined in an 'enc_class' specification
6223 // in the encode section of the architecture description.
6226 // Load Integer
6227 instruct loadI(mRegI dst, memory mem) %{
6228 match(Set dst (LoadI mem));
6230 ins_cost(125);
6231 format %{ "lw $dst, $mem #@loadI" %}
6232 ins_encode (load_I_enc(dst, mem));
6233 ins_pipe( ialu_loadI );
6234 %}
6236 instruct loadI_convI2L(mRegL dst, memory mem) %{
6237 match(Set dst (ConvI2L (LoadI mem)));
6239 ins_cost(125);
6240 format %{ "lw $dst, $mem #@loadI_convI2L" %}
6241 ins_encode (load_I_enc(dst, mem));
6242 ins_pipe( ialu_loadI );
6243 %}
6245 // Load Integer (32 bit signed) to Byte (8 bit signed)
6246 instruct loadI2B(mRegI dst, memory mem, immI_24 twentyfour) %{
6247 match(Set dst (RShiftI (LShiftI (LoadI mem) twentyfour) twentyfour));
6249 ins_cost(125);
6250 format %{ "lb $dst, $mem\t# int -> byte #@loadI2B" %}
6251 ins_encode(load_B_enc(dst, mem));
6252 ins_pipe(ialu_loadI);
6253 %}
6255 // Load Integer (32 bit signed) to Unsigned Byte (8 bit UNsigned)
6256 instruct loadI2UB(mRegI dst, memory mem, immI_255 mask) %{
6257 match(Set dst (AndI (LoadI mem) mask));
6259 ins_cost(125);
6260 format %{ "lbu $dst, $mem\t# int -> ubyte #@loadI2UB" %}
6261 ins_encode(load_UB_enc(dst, mem));
6262 ins_pipe(ialu_loadI);
6263 %}
6265 // Load Integer (32 bit signed) to Short (16 bit signed)
6266 instruct loadI2S(mRegI dst, memory mem, immI_16 sixteen) %{
6267 match(Set dst (RShiftI (LShiftI (LoadI mem) sixteen) sixteen));
6269 ins_cost(125);
6270 format %{ "lh $dst, $mem\t# int -> short #@loadI2S" %}
6271 ins_encode(load_S_enc(dst, mem));
6272 ins_pipe(ialu_loadI);
6273 %}
6275 // Load Integer (32 bit signed) to Unsigned Short/Char (16 bit UNsigned)
6276 instruct loadI2US(mRegI dst, memory mem, immI_65535 mask) %{
6277 match(Set dst (AndI (LoadI mem) mask));
6279 ins_cost(125);
6280 format %{ "lhu $dst, $mem\t# int -> ushort/char #@loadI2US" %}
6281 ins_encode(load_C_enc(dst, mem));
6282 ins_pipe(ialu_loadI);
6283 %}
6285 // Load Long.
6286 instruct loadL(mRegL dst, memory mem) %{
6287 // predicate(!((LoadLNode*)n)->require_atomic_access());
6288 match(Set dst (LoadL mem));
6290 ins_cost(250);
6291 format %{ "ld $dst, $mem #@loadL" %}
6292 ins_encode(load_L_enc(dst, mem));
6293 ins_pipe( ialu_loadL );
6294 %}
6296 // Load Long - UNaligned
6297 instruct loadL_unaligned(mRegL dst, memory mem) %{
6298 match(Set dst (LoadL_unaligned mem));
6300 // FIXME: Jin: Need more effective ldl/ldr
6301 ins_cost(450);
6302 format %{ "ld $dst, $mem #@loadL_unaligned\n\t" %}
6303 ins_encode(load_L_enc(dst, mem));
6304 ins_pipe( ialu_loadL );
6305 %}
6307 // Store Long
6308 instruct storeL_reg(memory mem, mRegL src) %{
6309 match(Set mem (StoreL mem src));
6311 ins_cost(200);
6312 format %{ "sd $mem, $src #@storeL_reg\n" %}
6313 ins_encode(store_L_reg_enc(mem, src));
6314 ins_pipe( ialu_storeL );
6315 %}
6317 instruct storeL_immL0(memory mem, immL0 zero) %{
6318 match(Set mem (StoreL mem zero));
6320 ins_cost(180);
6321 format %{ "sd zero, $mem #@storeL_immL0" %}
6322 ins_encode(store_L_immL0_enc(mem, zero));
6323 ins_pipe( ialu_storeL );
6324 %}
6326 instruct storeL_imm(memory mem, immL src) %{
6327 match(Set mem (StoreL mem src));
6329 ins_cost(200);
6330 format %{ "sd $src, $mem #@storeL_imm" %}
6331 ins_encode(store_L_immL_enc(mem, src));
6332 ins_pipe( ialu_storeL );
6333 %}
6335 // Load Compressed Pointer
6336 instruct loadN(mRegN dst, memory mem)
6337 %{
6338 match(Set dst (LoadN mem));
6340 ins_cost(125); // XXX
6341 format %{ "lwu $dst, $mem\t# compressed ptr @ loadN" %}
6342 ins_encode (load_N_enc(dst, mem));
6343 ins_pipe( ialu_loadI ); // XXX
6344 %}
6346 instruct loadN2P(mRegP dst, memory mem)
6347 %{
6348 match(Set dst (DecodeN (LoadN mem)));
6349 predicate(Universe::narrow_oop_base() == NULL && Universe::narrow_oop_shift() == 0);
6351 ins_cost(125); // XXX
6352 format %{ "lwu $dst, $mem\t# @ loadN2P" %}
6353 ins_encode (load_N_enc(dst, mem));
6354 ins_pipe( ialu_loadI ); // XXX
6355 %}
6357 // Load Pointer
6358 instruct loadP(mRegP dst, memory mem) %{
6359 match(Set dst (LoadP mem));
6361 ins_cost(125);
6362 format %{ "ld $dst, $mem #@loadP" %}
6363 ins_encode (load_P_enc(dst, mem));
6364 ins_pipe( ialu_loadI );
6365 %}
6367 // Load Klass Pointer
6368 instruct loadKlass(mRegP dst, memory mem) %{
6369 match(Set dst (LoadKlass mem));
6371 ins_cost(125);
6372 format %{ "MOV $dst,$mem @ loadKlass" %}
6373 ins_encode (load_P_enc(dst, mem));
6374 ins_pipe( ialu_loadI );
6375 %}
6377 // Load narrow Klass Pointer
6378 instruct loadNKlass(mRegN dst, memory mem)
6379 %{
6380 match(Set dst (LoadNKlass mem));
6382 ins_cost(125); // XXX
6383 format %{ "lwu $dst, $mem\t# compressed klass ptr @ loadNKlass" %}
6384 ins_encode (load_N_enc(dst, mem));
6385 ins_pipe( ialu_loadI ); // XXX
6386 %}
6388 instruct loadN2PKlass(mRegP dst, memory mem)
6389 %{
6390 match(Set dst (DecodeNKlass (LoadNKlass mem)));
6391 predicate(Universe::narrow_klass_base() == NULL && Universe::narrow_klass_shift() == 0);
6393 ins_cost(125); // XXX
6394 format %{ "lwu $dst, $mem\t# compressed klass ptr @ loadN2PKlass" %}
6395 ins_encode (load_N_enc(dst, mem));
6396 ins_pipe( ialu_loadI ); // XXX
6397 %}
6399 // Load Constant
6400 instruct loadConI(mRegI dst, immI src) %{
6401 match(Set dst src);
6403 ins_cost(150);
6404 format %{ "mov $dst, $src #@loadConI" %}
6405 ins_encode %{
6406 Register dst = $dst$$Register;
6407 int value = $src$$constant;
6408 __ move(dst, value);
6409 %}
6410 ins_pipe( ialu_regI_regI );
6411 %}
6414 instruct loadConL_set64(mRegL dst, immL src) %{
6415 match(Set dst src);
6416 ins_cost(120);
6417 format %{ "li $dst, $src @ loadConL_set64" %}
6418 ins_encode %{
6419 __ set64($dst$$Register, $src$$constant);
6420 %}
6421 ins_pipe(ialu_regL_regL);
6422 %}
6424 /*
6425 // Load long value from constant table (predicated by immL_expensive).
6426 instruct loadConL_load(mRegL dst, immL_expensive src) %{
6427 match(Set dst src);
6428 ins_cost(150);
6429 format %{ "ld $dst, $constantoffset[$constanttablebase] # load long $src from table @ loadConL_ldx" %}
6430 ins_encode %{
6431 int con_offset = $constantoffset($src);
6433 if (Assembler::is_simm16(con_offset)) {
6434 __ ld($dst$$Register, $constanttablebase, con_offset);
6435 } else {
6436 __ set64(AT, con_offset);
6437 if (UseLoongsonISA) {
6438 __ gsldx($dst$$Register, $constanttablebase, AT, 0);
6439 } else {
6440 __ daddu(AT, $constanttablebase, AT);
6441 __ ld($dst$$Register, AT, 0);
6442 }
6443 }
6444 %}
6445 ins_pipe(ialu_loadI);
6446 %}
6447 */
6449 instruct loadConL16(mRegL dst, immL16 src) %{
6450 match(Set dst src);
6451 ins_cost(105);
6452 format %{ "mov $dst, $src #@loadConL16" %}
6453 ins_encode %{
6454 Register dst_reg = as_Register($dst$$reg);
6455 int value = $src$$constant;
6456 __ daddiu(dst_reg, R0, value);
6457 %}
6458 ins_pipe( ialu_regL_regL );
6459 %}
6462 instruct loadConL0(mRegL dst, immL0 src) %{
6463 match(Set dst src);
6464 ins_cost(100);
6465 format %{ "mov $dst, zero #@loadConL0" %}
6466 ins_encode %{
6467 Register dst_reg = as_Register($dst$$reg);
6468 __ daddu(dst_reg, R0, R0);
6469 %}
6470 ins_pipe( ialu_regL_regL );
6471 %}
6473 // Load Range
6474 instruct loadRange(mRegI dst, memory mem) %{
6475 match(Set dst (LoadRange mem));
6477 ins_cost(125);
6478 format %{ "MOV $dst,$mem @ loadRange" %}
6479 ins_encode(load_I_enc(dst, mem));
6480 ins_pipe( ialu_loadI );
6481 %}
6484 instruct storeP(memory mem, mRegP src ) %{
6485 match(Set mem (StoreP mem src));
6487 ins_cost(125);
6488 format %{ "sd $src, $mem #@storeP" %}
6489 ins_encode(store_P_reg_enc(mem, src));
6490 ins_pipe( ialu_storeI );
6491 %}
6493 // Store NULL Pointer, mark word, or other simple pointer constant.
6494 instruct storeImmP0(memory mem, immP0 zero) %{
6495 match(Set mem (StoreP mem zero));
6497 ins_cost(125);
6498 format %{ "mov $mem, $zero #@storeImmP0" %}
6499 ins_encode(store_P_immP0_enc(mem));
6500 ins_pipe( ialu_storeI );
6501 %}
6503 // Store NULL Pointer, mark word, or other simple pointer constant.
6504 instruct storeImmP(memory mem, immP31 src) %{
6505 match(Set mem (StoreP mem src));
6507 ins_cost(150);
6508 format %{ "mov $mem, $src #@storeImmP" %}
6509 ins_encode(store_P_immP_enc(mem, src));
6510 ins_pipe( ialu_storeI );
6511 %}
6513 // Store Byte Immediate
6514 instruct storeImmB(memory mem, immI8 src) %{
6515 match(Set mem (StoreB mem src));
6517 ins_cost(150);
6518 format %{ "movb $mem, $src #@storeImmB" %}
6519 ins_encode(store_B_immI_enc(mem, src));
6520 ins_pipe( ialu_storeI );
6521 %}
6523 // Store Compressed Pointer
6524 instruct storeN(memory mem, mRegN src)
6525 %{
6526 match(Set mem (StoreN mem src));
6528 ins_cost(125); // XXX
6529 format %{ "sw $mem, $src\t# compressed ptr @ storeN" %}
6530 ins_encode(store_N_reg_enc(mem, src));
6531 ins_pipe( ialu_storeI );
6532 %}
6534 instruct storeP2N(memory mem, mRegP src)
6535 %{
6536 match(Set mem (StoreN mem (EncodeP src)));
6537 predicate(Universe::narrow_oop_base() == NULL && Universe::narrow_oop_shift() == 0);
6539 ins_cost(125); // XXX
6540 format %{ "sw $mem, $src\t# @ storeP2N" %}
6541 ins_encode(store_N_reg_enc(mem, src));
6542 ins_pipe( ialu_storeI );
6543 %}
6545 instruct storeNKlass(memory mem, mRegN src)
6546 %{
6547 match(Set mem (StoreNKlass mem src));
6549 ins_cost(125); // XXX
6550 format %{ "sw $mem, $src\t# compressed klass ptr @ storeNKlass" %}
6551 ins_encode(store_N_reg_enc(mem, src));
6552 ins_pipe( ialu_storeI );
6553 %}
6555 instruct storeP2NKlass(memory mem, mRegP src)
6556 %{
6557 match(Set mem (StoreNKlass mem (EncodePKlass src)));
6558 predicate(Universe::narrow_klass_base() == NULL && Universe::narrow_klass_shift() == 0);
6560 ins_cost(125); // XXX
6561 format %{ "sw $mem, $src\t# @ storeP2NKlass" %}
6562 ins_encode(store_N_reg_enc(mem, src));
6563 ins_pipe( ialu_storeI );
6564 %}
6566 instruct storeImmN0(memory mem, immN0 zero)
6567 %{
6568 match(Set mem (StoreN mem zero));
6570 ins_cost(125); // XXX
6571 format %{ "storeN0 zero, $mem\t# compressed ptr" %}
6572 ins_encode(storeImmN0_enc(mem, zero));
6573 ins_pipe( ialu_storeI );
6574 %}
6576 instruct storeImmN(memory mem, immN src)
6577 %{
6578 match(Set mem (StoreN mem src));
6580 ins_cost(150);
6581 format %{ "storeImmN $mem, $src\t# compressed ptr @ storeImmN" %}
6582 ins_encode(storeImmN_enc(mem, src));
6583 ins_pipe( ialu_storeI );
6584 %}
6586 instruct storeImmNKlass(memory mem, immNKlass src)
6587 %{
6588 match(Set mem (StoreNKlass mem src));
6590 ins_cost(150); // XXX
6591 format %{ "sw $mem, $src\t# compressed klass ptr @ storeImmNKlass" %}
6592 ins_encode(storeImmNKlass_enc(mem, src));
6593 ins_pipe( ialu_storeI );
6594 %}
6596 // Store Byte
6597 instruct storeB(memory mem, mRegI src) %{
6598 match(Set mem (StoreB mem src));
6600 ins_cost(125);
6601 format %{ "sb $src, $mem #@storeB" %}
6602 ins_encode(store_B_reg_enc(mem, src));
6603 ins_pipe( ialu_storeI );
6604 %}
6606 instruct storeB_convL2I(memory mem, mRegL src) %{
6607 match(Set mem (StoreB mem (ConvL2I src)));
6609 ins_cost(125);
6610 format %{ "sb $src, $mem #@storeB_convL2I" %}
6611 ins_encode(store_B_reg_enc(mem, src));
6612 ins_pipe( ialu_storeI );
6613 %}
6615 // Load Byte (8bit signed)
6616 instruct loadB(mRegI dst, memory mem) %{
6617 match(Set dst (LoadB mem));
6619 ins_cost(125);
6620 format %{ "lb $dst, $mem #@loadB" %}
6621 ins_encode(load_B_enc(dst, mem));
6622 ins_pipe( ialu_loadI );
6623 %}
6625 instruct loadB_convI2L(mRegL dst, memory mem) %{
6626 match(Set dst (ConvI2L (LoadB mem)));
6628 ins_cost(125);
6629 format %{ "lb $dst, $mem #@loadB_convI2L" %}
6630 ins_encode(load_B_enc(dst, mem));
6631 ins_pipe( ialu_loadI );
6632 %}
6634 // Load Byte (8bit UNsigned)
6635 instruct loadUB(mRegI dst, memory mem) %{
6636 match(Set dst (LoadUB mem));
6638 ins_cost(125);
6639 format %{ "lbu $dst, $mem #@loadUB" %}
6640 ins_encode(load_UB_enc(dst, mem));
6641 ins_pipe( ialu_loadI );
6642 %}
6644 instruct loadUB_convI2L(mRegL dst, memory mem) %{
6645 match(Set dst (ConvI2L (LoadUB mem)));
6647 ins_cost(125);
6648 format %{ "lbu $dst, $mem #@loadUB_convI2L" %}
6649 ins_encode(load_UB_enc(dst, mem));
6650 ins_pipe( ialu_loadI );
6651 %}
6653 // Load Short (16bit signed)
6654 instruct loadS(mRegI dst, memory mem) %{
6655 match(Set dst (LoadS mem));
6657 ins_cost(125);
6658 format %{ "lh $dst, $mem #@loadS" %}
6659 ins_encode(load_S_enc(dst, mem));
6660 ins_pipe( ialu_loadI );
6661 %}
6663 // Load Short (16 bit signed) to Byte (8 bit signed)
6664 instruct loadS2B(mRegI dst, memory mem, immI_24 twentyfour) %{
6665 match(Set dst (RShiftI (LShiftI (LoadS mem) twentyfour) twentyfour));
6667 ins_cost(125);
6668 format %{ "lb $dst, $mem\t# short -> byte #@loadS2B" %}
6669 ins_encode(load_B_enc(dst, mem));
6670 ins_pipe(ialu_loadI);
6671 %}
6673 instruct loadS_convI2L(mRegL dst, memory mem) %{
6674 match(Set dst (ConvI2L (LoadS mem)));
6676 ins_cost(125);
6677 format %{ "lh $dst, $mem #@loadS_convI2L" %}
6678 ins_encode(load_S_enc(dst, mem));
6679 ins_pipe( ialu_loadI );
6680 %}
6682 // Store Integer Immediate
6683 instruct storeImmI(memory mem, immI src) %{
6684 match(Set mem (StoreI mem src));
6686 ins_cost(150);
6687 format %{ "mov $mem, $src #@storeImmI" %}
6688 ins_encode(store_I_immI_enc(mem, src));
6689 ins_pipe( ialu_storeI );
6690 %}
6692 // Store Integer
6693 instruct storeI(memory mem, mRegI src) %{
6694 match(Set mem (StoreI mem src));
6696 ins_cost(125);
6697 format %{ "sw $mem, $src #@storeI" %}
6698 ins_encode(store_I_reg_enc(mem, src));
6699 ins_pipe( ialu_storeI );
6700 %}
6702 instruct storeI_convL2I(memory mem, mRegL src) %{
6703 match(Set mem (StoreI mem (ConvL2I src)));
6705 ins_cost(125);
6706 format %{ "sw $mem, $src #@storeI_convL2I" %}
6707 ins_encode(store_I_reg_enc(mem, src));
6708 ins_pipe( ialu_storeI );
6709 %}
6711 // Load Float
6712 instruct loadF(regF dst, memory mem) %{
6713 match(Set dst (LoadF mem));
6715 ins_cost(150);
6716 format %{ "loadF $dst, $mem #@loadF" %}
6717 ins_encode(load_F_enc(dst, mem));
6718 ins_pipe( ialu_loadI );
6719 %}
6721 instruct loadConP_general(mRegP dst, immP src) %{
6722 match(Set dst src);
6724 ins_cost(120);
6725 format %{ "li $dst, $src #@loadConP_general" %}
6727 ins_encode %{
6728 Register dst = $dst$$Register;
6729 long* value = (long*)$src$$constant;
6731 if($src->constant_reloc() == relocInfo::metadata_type){
6732 int klass_index = __ oop_recorder()->find_index((Klass*)value);
6733 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
6735 __ relocate(rspec);
6736 __ patchable_set48(dst, (long)value);
6737 }else if($src->constant_reloc() == relocInfo::oop_type){
6738 int oop_index = __ oop_recorder()->find_index((jobject)value);
6739 RelocationHolder rspec = oop_Relocation::spec(oop_index);
6741 __ relocate(rspec);
6742 __ patchable_set48(dst, (long)value);
6743 } else if ($src->constant_reloc() == relocInfo::none) {
6744 __ set64(dst, (long)value);
6745 }
6746 %}
6748 ins_pipe( ialu_regI_regI );
6749 %}
6751 /*
6752 instruct loadConP_load(mRegP dst, immP_load src) %{
6753 match(Set dst src);
6755 ins_cost(100);
6756 format %{ "ld $dst, [$constanttablebase + $constantoffset] load from constant table: ptr=$src @ loadConP_load" %}
6758 ins_encode %{
6760 int con_offset = $constantoffset($src);
6762 if (Assembler::is_simm16(con_offset)) {
6763 __ ld($dst$$Register, $constanttablebase, con_offset);
6764 } else {
6765 __ set64(AT, con_offset);
6766 if (UseLoongsonISA) {
6767 __ gsldx($dst$$Register, $constanttablebase, AT, 0);
6768 } else {
6769 __ daddu(AT, $constanttablebase, AT);
6770 __ ld($dst$$Register, AT, 0);
6771 }
6772 }
6773 %}
6775 ins_pipe(ialu_loadI);
6776 %}
6777 */
6779 instruct loadConP_no_oop_cheap(mRegP dst, immP_no_oop_cheap src) %{
6780 match(Set dst src);
6782 ins_cost(80);
6783 format %{ "li $dst, $src @ loadConP_no_oop_cheap" %}
6785 ins_encode %{
6786 __ set64($dst$$Register, $src$$constant);
6787 %}
6789 ins_pipe(ialu_regI_regI);
6790 %}
6793 instruct loadConP_poll(mRegP dst, immP_poll src) %{
6794 match(Set dst src);
6796 ins_cost(50);
6797 format %{ "li $dst, $src #@loadConP_poll" %}
6799 ins_encode %{
6800 Register dst = $dst$$Register;
6801 intptr_t value = (intptr_t)$src$$constant;
6803 __ set64(dst, (jlong)value);
6804 %}
6806 ins_pipe( ialu_regI_regI );
6807 %}
6809 instruct loadConP0(mRegP dst, immP0 src)
6810 %{
6811 match(Set dst src);
6813 ins_cost(50);
6814 format %{ "mov $dst, R0\t# ptr" %}
6815 ins_encode %{
6816 Register dst_reg = $dst$$Register;
6817 __ daddu(dst_reg, R0, R0);
6818 %}
6819 ins_pipe( ialu_regI_regI );
6820 %}
6822 instruct loadConN0(mRegN dst, immN0 src) %{
6823 match(Set dst src);
6824 format %{ "move $dst, R0\t# compressed NULL ptr" %}
6825 ins_encode %{
6826 __ move($dst$$Register, R0);
6827 %}
6828 ins_pipe( ialu_regI_regI );
6829 %}
6831 instruct loadConN(mRegN dst, immN src) %{
6832 match(Set dst src);
6834 ins_cost(125);
6835 format %{ "li $dst, $src\t# compressed ptr @ loadConN" %}
6836 ins_encode %{
6837 Register dst = $dst$$Register;
6838 __ set_narrow_oop(dst, (jobject)$src$$constant);
6839 %}
6840 ins_pipe( ialu_regI_regI ); // XXX
6841 %}
6843 instruct loadConNKlass(mRegN dst, immNKlass src) %{
6844 match(Set dst src);
6846 ins_cost(125);
6847 format %{ "li $dst, $src\t# compressed klass ptr @ loadConNKlass" %}
6848 ins_encode %{
6849 Register dst = $dst$$Register;
6850 __ set_narrow_klass(dst, (Klass*)$src$$constant);
6851 %}
6852 ins_pipe( ialu_regI_regI ); // XXX
6853 %}
6855 //FIXME
6856 // Tail Call; Jump from runtime stub to Java code.
6857 // Also known as an 'interprocedural jump'.
6858 // Target of jump will eventually return to caller.
6859 // TailJump below removes the return address.
6860 instruct TailCalljmpInd(mRegP jump_target, mRegP method_oop) %{
6861 match(TailCall jump_target method_oop );
6862 ins_cost(300);
6863 format %{ "JMP $jump_target \t# @TailCalljmpInd" %}
6865 ins_encode %{
6866 Register target = $jump_target$$Register;
6867 Register oop = $method_oop$$Register;
6869 /* 2012/10/12 Jin: RA will be used in generate_forward_exception() */
6870 __ push(RA);
6872 __ move(S3, oop);
6873 __ jr(target);
6874 __ nop();
6875 %}
6877 ins_pipe( pipe_jump );
6878 %}
6880 // Create exception oop: created by stack-crawling runtime code.
6881 // Created exception is now available to this handler, and is setup
6882 // just prior to jumping to this handler. No code emitted.
6883 instruct CreateException( a0_RegP ex_oop )
6884 %{
6885 match(Set ex_oop (CreateEx));
6887 // use the following format syntax
6888 format %{ "# exception oop is in A0; no code emitted @CreateException" %}
6889 ins_encode %{
6890 /* Jin: X86 leaves this function empty */
6891 __ block_comment("CreateException is empty in X86/MIPS");
6892 %}
6893 ins_pipe( empty );
6894 // ins_pipe( pipe_jump );
6895 %}
6898 /* 2012/9/14 Jin: The mechanism of exception handling is clear now.
6900 - Common try/catch:
6901 2012/9/14 Jin: [stubGenerator_mips.cpp] generate_forward_exception()
6902 |- V0, V1 are created
6903 |- T9 <= SharedRuntime::exception_handler_for_return_address
6904 `- jr T9
6905 `- the caller's exception_handler
6906 `- jr OptoRuntime::exception_blob
6907 `- here
6908 - Rethrow(e.g. 'unwind'):
6909 * The callee:
6910 |- an exception is triggered during execution
6911 `- exits the callee method through RethrowException node
6912 |- The callee pushes exception_oop(T0) and exception_pc(RA)
6913 `- The callee jumps to OptoRuntime::rethrow_stub()
6914 * In OptoRuntime::rethrow_stub:
6915 |- The VM calls _rethrow_Java to determine the return address in the caller method
6916 `- exits the stub with tailjmpInd
6917 |- pops exception_oop(V0) and exception_pc(V1)
6918 `- jumps to the return address(usually an exception_handler)
6919 * The caller:
6920 `- continues processing the exception_blob with V0/V1
6921 */
6923 /*
6924 Disassembling OptoRuntime::rethrow_stub()
6926 ; locals
6927 0x2d3bf320: addiu sp, sp, 0xfffffff8
6928 0x2d3bf324: sw ra, 0x4(sp)
6929 0x2d3bf328: sw fp, 0x0(sp)
6930 0x2d3bf32c: addu fp, sp, zero
6931 0x2d3bf330: addiu sp, sp, 0xfffffff0
6932 0x2d3bf334: sw ra, 0x8(sp)
6933 0x2d3bf338: sw t0, 0x4(sp)
6934 0x2d3bf33c: sw sp, 0x0(sp)
6936 ; get_thread(S2)
6937 0x2d3bf340: addu s2, sp, zero
6938 0x2d3bf344: srl s2, s2, 12
6939 0x2d3bf348: sll s2, s2, 2
6940 0x2d3bf34c: lui at, 0x2c85
6941 0x2d3bf350: addu at, at, s2
6942 0x2d3bf354: lw s2, 0xffffcc80(at)
6944 0x2d3bf358: lw s0, 0x0(sp)
6945 0x2d3bf35c: sw s0, 0x118(s2) // last_sp -> threa
6946 0x2d3bf360: sw s2, 0xc(sp)
6948 ; OptoRuntime::rethrow_C(oopDesc* exception, JavaThread* thread, address ret_pc)
6949 0x2d3bf364: lw a0, 0x4(sp)
6950 0x2d3bf368: lw a1, 0xc(sp)
6951 0x2d3bf36c: lw a2, 0x8(sp)
6952 ;; Java_To_Runtime
6953 0x2d3bf370: lui t9, 0x2c34
6954 0x2d3bf374: addiu t9, t9, 0xffff8a48
6955 0x2d3bf378: jalr t9
6956 0x2d3bf37c: nop
6958 0x2d3bf380: addu s3, v0, zero ; S3: SharedRuntime::raw_exception_handler_for_return_address()
6960 0x2d3bf384: lw s0, 0xc(sp)
6961 0x2d3bf388: sw zero, 0x118(s0)
6962 0x2d3bf38c: sw zero, 0x11c(s0)
6963 0x2d3bf390: lw s1, 0x144(s0) ; ex_oop: S1
6964 0x2d3bf394: addu s2, s0, zero
6965 0x2d3bf398: sw zero, 0x144(s2)
6966 0x2d3bf39c: lw s0, 0x4(s2)
6967 0x2d3bf3a0: addiu s4, zero, 0x0
6968 0x2d3bf3a4: bne s0, s4, 0x2d3bf3d4
6969 0x2d3bf3a8: nop
6970 0x2d3bf3ac: addiu sp, sp, 0x10
6971 0x2d3bf3b0: addiu sp, sp, 0x8
6972 0x2d3bf3b4: lw ra, 0xfffffffc(sp)
6973 0x2d3bf3b8: lw fp, 0xfffffff8(sp)
6974 0x2d3bf3bc: lui at, 0x2b48
6975 0x2d3bf3c0: lw at, 0x100(at)
6977 ; tailjmpInd: Restores exception_oop & exception_pc
6978 0x2d3bf3c4: addu v1, ra, zero
6979 0x2d3bf3c8: addu v0, s1, zero
6980 0x2d3bf3cc: jr s3
6981 0x2d3bf3d0: nop
6982 ; Exception:
6983 0x2d3bf3d4: lui s1, 0x2cc8 ; generate_forward_exception()
6984 0x2d3bf3d8: addiu s1, s1, 0x40
6985 0x2d3bf3dc: addiu s2, zero, 0x0
6986 0x2d3bf3e0: addiu sp, sp, 0x10
6987 0x2d3bf3e4: addiu sp, sp, 0x8
6988 0x2d3bf3e8: lw ra, 0xfffffffc(sp)
6989 0x2d3bf3ec: lw fp, 0xfffffff8(sp)
6990 0x2d3bf3f0: lui at, 0x2b48
6991 0x2d3bf3f4: lw at, 0x100(at)
6992 ; TailCalljmpInd
6993 __ push(RA); ; to be used in generate_forward_exception()
6994 0x2d3bf3f8: addu t7, s2, zero
6995 0x2d3bf3fc: jr s1
6996 0x2d3bf400: nop
6997 */
6998 // Rethrow exception:
6999 // The exception oop will come in the first argument position.
7000 // Then JUMP (not call) to the rethrow stub code.
7001 instruct RethrowException()
7002 %{
7003 match(Rethrow);
7005 // use the following format syntax
7006 format %{ "JMP rethrow_stub #@RethrowException" %}
7007 ins_encode %{
7008 __ block_comment("@ RethrowException");
7010 cbuf.set_insts_mark();
7011 cbuf.relocate(cbuf.insts_mark(), runtime_call_Relocation::spec());
7013 // call OptoRuntime::rethrow_stub to get the exception handler in parent method
7014 __ patchable_jump((address)OptoRuntime::rethrow_stub());
7015 %}
7016 ins_pipe( pipe_jump );
7017 %}
7019 instruct branchConP_zero(cmpOpU cmp, mRegP op1, immP0 zero, label labl) %{
7020 match(If cmp (CmpP op1 zero));
7021 effect(USE labl);
7023 ins_cost(180);
7024 format %{ "b$cmp $op1, R0, $labl #@branchConP_zero" %}
7026 ins_encode %{
7027 Register op1 = $op1$$Register;
7028 Register op2 = R0;
7029 Label &L = *($labl$$label);
7030 int flag = $cmp$$cmpcode;
7032 switch(flag)
7033 {
7034 case 0x01: //equal
7035 if (&L)
7036 __ beq(op1, op2, L);
7037 else
7038 __ beq(op1, op2, (int)0);
7039 break;
7040 case 0x02: //not_equal
7041 if (&L)
7042 __ bne(op1, op2, L);
7043 else
7044 __ bne(op1, op2, (int)0);
7045 break;
7046 /*
7047 case 0x03: //above
7048 __ sltu(AT, op2, op1);
7049 if(&L)
7050 __ bne(R0, AT, L);
7051 else
7052 __ bne(R0, AT, (int)0);
7053 break;
7054 case 0x04: //above_equal
7055 __ sltu(AT, op1, op2);
7056 if(&L)
7057 __ beq(AT, R0, L);
7058 else
7059 __ beq(AT, R0, (int)0);
7060 break;
7061 case 0x05: //below
7062 __ sltu(AT, op1, op2);
7063 if(&L)
7064 __ bne(R0, AT, L);
7065 else
7066 __ bne(R0, AT, (int)0);
7067 break;
7068 case 0x06: //below_equal
7069 __ sltu(AT, op2, op1);
7070 if(&L)
7071 __ beq(AT, R0, L);
7072 else
7073 __ beq(AT, R0, (int)0);
7074 break;
7075 */
7076 default:
7077 Unimplemented();
7078 }
7079 __ nop();
7080 %}
7082 ins_pc_relative(1);
7083 ins_pipe( pipe_alu_branch );
7084 %}
7086 instruct branchConN2P_zero(cmpOpU cmp, mRegN op1, immP0 zero, label labl) %{
7087 match(If cmp (CmpP (DecodeN op1) zero));
7088 predicate(Universe::narrow_oop_base() == NULL && Universe::narrow_oop_shift() == 0);
7089 effect(USE labl);
7091 ins_cost(180);
7092 format %{ "b$cmp $op1, R0, $labl #@branchConN2P_zero" %}
7094 ins_encode %{
7095 Register op1 = $op1$$Register;
7096 Register op2 = R0;
7097 Label &L = *($labl$$label);
7098 int flag = $cmp$$cmpcode;
7100 switch(flag)
7101 {
7102 case 0x01: //equal
7103 if (&L)
7104 __ beq(op1, op2, L);
7105 else
7106 __ beq(op1, op2, (int)0);
7107 break;
7108 case 0x02: //not_equal
7109 if (&L)
7110 __ bne(op1, op2, L);
7111 else
7112 __ bne(op1, op2, (int)0);
7113 break;
7114 default:
7115 Unimplemented();
7116 }
7117 __ nop();
7118 %}
7120 ins_pc_relative(1);
7121 ins_pipe( pipe_alu_branch );
7122 %}
7125 instruct branchConP(cmpOpU cmp, mRegP op1, mRegP op2, label labl) %{
7126 match(If cmp (CmpP op1 op2));
7127 // predicate(can_branch_register(_kids[0]->_leaf, _kids[1]->_leaf));
7128 effect(USE labl);
7130 ins_cost(200);
7131 format %{ "b$cmp $op1, $op2, $labl #@branchConP" %}
7133 ins_encode %{
7134 Register op1 = $op1$$Register;
7135 Register op2 = $op2$$Register;
7136 Label &L = *($labl$$label);
7137 int flag = $cmp$$cmpcode;
7139 switch(flag)
7140 {
7141 case 0x01: //equal
7142 if (&L)
7143 __ beq(op1, op2, L);
7144 else
7145 __ beq(op1, op2, (int)0);
7146 break;
7147 case 0x02: //not_equal
7148 if (&L)
7149 __ bne(op1, op2, L);
7150 else
7151 __ bne(op1, op2, (int)0);
7152 break;
7153 case 0x03: //above
7154 __ sltu(AT, op2, op1);
7155 if(&L)
7156 __ bne(R0, AT, L);
7157 else
7158 __ bne(R0, AT, (int)0);
7159 break;
7160 case 0x04: //above_equal
7161 __ sltu(AT, op1, op2);
7162 if(&L)
7163 __ beq(AT, R0, L);
7164 else
7165 __ beq(AT, R0, (int)0);
7166 break;
7167 case 0x05: //below
7168 __ sltu(AT, op1, op2);
7169 if(&L)
7170 __ bne(R0, AT, L);
7171 else
7172 __ bne(R0, AT, (int)0);
7173 break;
7174 case 0x06: //below_equal
7175 __ sltu(AT, op2, op1);
7176 if(&L)
7177 __ beq(AT, R0, L);
7178 else
7179 __ beq(AT, R0, (int)0);
7180 break;
7181 default:
7182 Unimplemented();
7183 }
7184 __ nop();
7185 %}
7187 ins_pc_relative(1);
7188 ins_pipe( pipe_alu_branch );
7189 %}
7191 instruct cmpN_null_branch(cmpOp cmp, mRegN op1, immN0 null, label labl) %{
7192 match(If cmp (CmpN op1 null));
7193 effect(USE labl);
7195 ins_cost(180);
7196 format %{ "CMP $op1,0\t! compressed ptr\n\t"
7197 "BP$cmp $labl @ cmpN_null_branch" %}
7198 ins_encode %{
7199 Register op1 = $op1$$Register;
7200 Register op2 = R0;
7201 Label &L = *($labl$$label);
7202 int flag = $cmp$$cmpcode;
7204 switch(flag)
7205 {
7206 case 0x01: //equal
7207 if (&L)
7208 __ beq(op1, op2, L);
7209 else
7210 __ beq(op1, op2, (int)0);
7211 break;
7212 case 0x02: //not_equal
7213 if (&L)
7214 __ bne(op1, op2, L);
7215 else
7216 __ bne(op1, op2, (int)0);
7217 break;
7218 default:
7219 Unimplemented();
7220 }
7221 __ nop();
7222 %}
7223 //TODO: pipe_branchP or create pipe_branchN LEE
7224 ins_pc_relative(1);
7225 ins_pipe( pipe_alu_branch );
7226 %}
7228 instruct cmpN_reg_branch(cmpOp cmp, mRegN op1, mRegN op2, label labl) %{
7229 match(If cmp (CmpN op1 op2));
7230 effect(USE labl);
7232 ins_cost(180);
7233 format %{ "CMP $op1,$op2\t! compressed ptr\n\t"
7234 "BP$cmp $labl" %}
7235 ins_encode %{
7236 Register op1_reg = $op1$$Register;
7237 Register op2_reg = $op2$$Register;
7238 Label &L = *($labl$$label);
7239 int flag = $cmp$$cmpcode;
7241 switch(flag)
7242 {
7243 case 0x01: //equal
7244 if (&L)
7245 __ beq(op1_reg, op2_reg, L);
7246 else
7247 __ beq(op1_reg, op2_reg, (int)0);
7248 break;
7249 case 0x02: //not_equal
7250 if (&L)
7251 __ bne(op1_reg, op2_reg, L);
7252 else
7253 __ bne(op1_reg, op2_reg, (int)0);
7254 break;
7255 case 0x03: //above
7256 __ sltu(AT, op2_reg, op1_reg);
7257 if(&L)
7258 __ bne(R0, AT, L);
7259 else
7260 __ bne(R0, AT, (int)0);
7261 break;
7262 case 0x04: //above_equal
7263 __ sltu(AT, op1_reg, op2_reg);
7264 if(&L)
7265 __ beq(AT, R0, L);
7266 else
7267 __ beq(AT, R0, (int)0);
7268 break;
7269 case 0x05: //below
7270 __ sltu(AT, op1_reg, op2_reg);
7271 if(&L)
7272 __ bne(R0, AT, L);
7273 else
7274 __ bne(R0, AT, (int)0);
7275 break;
7276 case 0x06: //below_equal
7277 __ sltu(AT, op2_reg, op1_reg);
7278 if(&L)
7279 __ beq(AT, R0, L);
7280 else
7281 __ beq(AT, R0, (int)0);
7282 break;
7283 default:
7284 Unimplemented();
7285 }
7286 __ nop();
7287 %}
7288 ins_pc_relative(1);
7289 ins_pipe( pipe_alu_branch );
7290 %}
7292 instruct branchConIU_reg_reg(cmpOpU cmp, mRegI src1, mRegI src2, label labl) %{
7293 match( If cmp (CmpU src1 src2) );
7294 effect(USE labl);
7295 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_reg" %}
7297 ins_encode %{
7298 Register op1 = $src1$$Register;
7299 Register op2 = $src2$$Register;
7300 Label &L = *($labl$$label);
7301 int flag = $cmp$$cmpcode;
7303 switch(flag)
7304 {
7305 case 0x01: //equal
7306 if (&L)
7307 __ beq(op1, op2, L);
7308 else
7309 __ beq(op1, op2, (int)0);
7310 break;
7311 case 0x02: //not_equal
7312 if (&L)
7313 __ bne(op1, op2, L);
7314 else
7315 __ bne(op1, op2, (int)0);
7316 break;
7317 case 0x03: //above
7318 __ sltu(AT, op2, op1);
7319 if(&L)
7320 __ bne(AT, R0, L);
7321 else
7322 __ bne(AT, R0, (int)0);
7323 break;
7324 case 0x04: //above_equal
7325 __ sltu(AT, op1, op2);
7326 if(&L)
7327 __ beq(AT, R0, L);
7328 else
7329 __ beq(AT, R0, (int)0);
7330 break;
7331 case 0x05: //below
7332 __ sltu(AT, op1, op2);
7333 if(&L)
7334 __ bne(AT, R0, L);
7335 else
7336 __ bne(AT, R0, (int)0);
7337 break;
7338 case 0x06: //below_equal
7339 __ sltu(AT, op2, op1);
7340 if(&L)
7341 __ beq(AT, R0, L);
7342 else
7343 __ beq(AT, R0, (int)0);
7344 break;
7345 default:
7346 Unimplemented();
7347 }
7348 __ nop();
7349 %}
7351 ins_pc_relative(1);
7352 ins_pipe( pipe_alu_branch );
7353 %}
7356 instruct branchConIU_reg_imm(cmpOpU cmp, mRegI src1, immI src2, label labl) %{
7357 match( If cmp (CmpU src1 src2) );
7358 effect(USE labl);
7359 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_imm" %}
7361 ins_encode %{
7362 Register op1 = $src1$$Register;
7363 int val = $src2$$constant;
7364 Label &L = *($labl$$label);
7365 int flag = $cmp$$cmpcode;
7367 __ move(AT, val);
7368 switch(flag)
7369 {
7370 case 0x01: //equal
7371 if (&L)
7372 __ beq(op1, AT, L);
7373 else
7374 __ beq(op1, AT, (int)0);
7375 break;
7376 case 0x02: //not_equal
7377 if (&L)
7378 __ bne(op1, AT, L);
7379 else
7380 __ bne(op1, AT, (int)0);
7381 break;
7382 case 0x03: //above
7383 __ sltu(AT, AT, op1);
7384 if(&L)
7385 __ bne(R0, AT, L);
7386 else
7387 __ bne(R0, AT, (int)0);
7388 break;
7389 case 0x04: //above_equal
7390 __ sltu(AT, op1, AT);
7391 if(&L)
7392 __ beq(AT, R0, L);
7393 else
7394 __ beq(AT, R0, (int)0);
7395 break;
7396 case 0x05: //below
7397 __ sltu(AT, op1, AT);
7398 if(&L)
7399 __ bne(R0, AT, L);
7400 else
7401 __ bne(R0, AT, (int)0);
7402 break;
7403 case 0x06: //below_equal
7404 __ sltu(AT, AT, op1);
7405 if(&L)
7406 __ beq(AT, R0, L);
7407 else
7408 __ beq(AT, R0, (int)0);
7409 break;
7410 default:
7411 Unimplemented();
7412 }
7413 __ nop();
7414 %}
7416 ins_pc_relative(1);
7417 ins_pipe( pipe_alu_branch );
7418 %}
7420 instruct branchConI_reg_reg(cmpOp cmp, mRegI src1, mRegI src2, label labl) %{
7421 match( If cmp (CmpI src1 src2) );
7422 effect(USE labl);
7423 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_reg" %}
7425 ins_encode %{
7426 Register op1 = $src1$$Register;
7427 Register op2 = $src2$$Register;
7428 Label &L = *($labl$$label);
7429 int flag = $cmp$$cmpcode;
7431 switch(flag)
7432 {
7433 case 0x01: //equal
7434 if (&L)
7435 __ beq(op1, op2, L);
7436 else
7437 __ beq(op1, op2, (int)0);
7438 break;
7439 case 0x02: //not_equal
7440 if (&L)
7441 __ bne(op1, op2, L);
7442 else
7443 __ bne(op1, op2, (int)0);
7444 break;
7445 case 0x03: //above
7446 __ slt(AT, op2, op1);
7447 if(&L)
7448 __ bne(R0, AT, L);
7449 else
7450 __ bne(R0, AT, (int)0);
7451 break;
7452 case 0x04: //above_equal
7453 __ slt(AT, op1, op2);
7454 if(&L)
7455 __ beq(AT, R0, L);
7456 else
7457 __ beq(AT, R0, (int)0);
7458 break;
7459 case 0x05: //below
7460 __ slt(AT, op1, op2);
7461 if(&L)
7462 __ bne(R0, AT, L);
7463 else
7464 __ bne(R0, AT, (int)0);
7465 break;
7466 case 0x06: //below_equal
7467 __ slt(AT, op2, op1);
7468 if(&L)
7469 __ beq(AT, R0, L);
7470 else
7471 __ beq(AT, R0, (int)0);
7472 break;
7473 default:
7474 Unimplemented();
7475 }
7476 __ nop();
7477 %}
7479 ins_pc_relative(1);
7480 ins_pipe( pipe_alu_branch );
7481 %}
7483 instruct branchConI_reg_imm0(cmpOp cmp, mRegI src1, immI0 src2, label labl) %{
7484 match( If cmp (CmpI src1 src2) );
7485 effect(USE labl);
7486 ins_cost(170);
7487 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm0" %}
7489 ins_encode %{
7490 Register op1 = $src1$$Register;
7491 // int val = $src2$$constant;
7492 Label &L = *($labl$$label);
7493 int flag = $cmp$$cmpcode;
7495 //__ move(AT, val);
7496 switch(flag)
7497 {
7498 case 0x01: //equal
7499 if (&L)
7500 __ beq(op1, R0, L);
7501 else
7502 __ beq(op1, R0, (int)0);
7503 break;
7504 case 0x02: //not_equal
7505 if (&L)
7506 __ bne(op1, R0, L);
7507 else
7508 __ bne(op1, R0, (int)0);
7509 break;
7510 case 0x03: //greater
7511 if(&L)
7512 __ bgtz(op1, L);
7513 else
7514 __ bgtz(op1, (int)0);
7515 break;
7516 case 0x04: //greater_equal
7517 if(&L)
7518 __ bgez(op1, L);
7519 else
7520 __ bgez(op1, (int)0);
7521 break;
7522 case 0x05: //less
7523 if(&L)
7524 __ bltz(op1, L);
7525 else
7526 __ bltz(op1, (int)0);
7527 break;
7528 case 0x06: //less_equal
7529 if(&L)
7530 __ blez(op1, L);
7531 else
7532 __ blez(op1, (int)0);
7533 break;
7534 default:
7535 Unimplemented();
7536 }
7537 __ nop();
7538 %}
7540 ins_pc_relative(1);
7541 ins_pipe( pipe_alu_branch );
7542 %}
7545 instruct branchConI_reg_imm(cmpOp cmp, mRegI src1, immI src2, label labl) %{
7546 match( If cmp (CmpI src1 src2) );
7547 effect(USE labl);
7548 ins_cost(200);
7549 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm" %}
7551 ins_encode %{
7552 Register op1 = $src1$$Register;
7553 int val = $src2$$constant;
7554 Label &L = *($labl$$label);
7555 int flag = $cmp$$cmpcode;
7557 __ move(AT, val);
7558 switch(flag)
7559 {
7560 case 0x01: //equal
7561 if (&L)
7562 __ beq(op1, AT, L);
7563 else
7564 __ beq(op1, AT, (int)0);
7565 break;
7566 case 0x02: //not_equal
7567 if (&L)
7568 __ bne(op1, AT, L);
7569 else
7570 __ bne(op1, AT, (int)0);
7571 break;
7572 case 0x03: //greater
7573 __ slt(AT, AT, op1);
7574 if(&L)
7575 __ bne(R0, AT, L);
7576 else
7577 __ bne(R0, AT, (int)0);
7578 break;
7579 case 0x04: //greater_equal
7580 __ slt(AT, op1, AT);
7581 if(&L)
7582 __ beq(AT, R0, L);
7583 else
7584 __ beq(AT, R0, (int)0);
7585 break;
7586 case 0x05: //less
7587 __ slt(AT, op1, AT);
7588 if(&L)
7589 __ bne(R0, AT, L);
7590 else
7591 __ bne(R0, AT, (int)0);
7592 break;
7593 case 0x06: //less_equal
7594 __ slt(AT, AT, op1);
7595 if(&L)
7596 __ beq(AT, R0, L);
7597 else
7598 __ beq(AT, R0, (int)0);
7599 break;
7600 default:
7601 Unimplemented();
7602 }
7603 __ nop();
7604 %}
7606 ins_pc_relative(1);
7607 ins_pipe( pipe_alu_branch );
7608 %}
7610 instruct branchConIU_reg_imm0(cmpOpU cmp, mRegI src1, immI0 zero, label labl) %{
7611 match( If cmp (CmpU src1 zero) );
7612 effect(USE labl);
7613 format %{ "BR$cmp $src1, zero, $labl #@branchConIU_reg_imm0" %}
7615 ins_encode %{
7616 Register op1 = $src1$$Register;
7617 Label &L = *($labl$$label);
7618 int flag = $cmp$$cmpcode;
7620 switch(flag)
7621 {
7622 case 0x01: //equal
7623 if (&L)
7624 __ beq(op1, R0, L);
7625 else
7626 __ beq(op1, R0, (int)0);
7627 break;
7628 case 0x02: //not_equal
7629 if (&L)
7630 __ bne(op1, R0, L);
7631 else
7632 __ bne(op1, R0, (int)0);
7633 break;
7634 case 0x03: //above
7635 if(&L)
7636 __ bne(R0, op1, L);
7637 else
7638 __ bne(R0, op1, (int)0);
7639 break;
7640 case 0x04: //above_equal
7641 if(&L)
7642 __ beq(R0, R0, L);
7643 else
7644 __ beq(R0, R0, (int)0);
7645 break;
7646 case 0x05: //below
7647 return;
7648 break;
7649 case 0x06: //below_equal
7650 if(&L)
7651 __ beq(op1, R0, L);
7652 else
7653 __ beq(op1, R0, (int)0);
7654 break;
7655 default:
7656 Unimplemented();
7657 }
7658 __ nop();
7659 %}
7661 ins_pc_relative(1);
7662 ins_pipe( pipe_alu_branch );
7663 %}
7666 instruct branchConIU_reg_immI16(cmpOpU cmp, mRegI src1, immI16 src2, label labl) %{
7667 match( If cmp (CmpU src1 src2) );
7668 effect(USE labl);
7669 ins_cost(180);
7670 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_immI16" %}
7672 ins_encode %{
7673 Register op1 = $src1$$Register;
7674 int val = $src2$$constant;
7675 Label &L = *($labl$$label);
7676 int flag = $cmp$$cmpcode;
7678 switch(flag)
7679 {
7680 case 0x01: //equal
7681 __ move(AT, val);
7682 if (&L)
7683 __ beq(op1, AT, L);
7684 else
7685 __ beq(op1, AT, (int)0);
7686 break;
7687 case 0x02: //not_equal
7688 __ move(AT, val);
7689 if (&L)
7690 __ bne(op1, AT, L);
7691 else
7692 __ bne(op1, AT, (int)0);
7693 break;
7694 case 0x03: //above
7695 __ move(AT, val);
7696 __ sltu(AT, AT, op1);
7697 if(&L)
7698 __ bne(R0, AT, L);
7699 else
7700 __ bne(R0, AT, (int)0);
7701 break;
7702 case 0x04: //above_equal
7703 __ sltiu(AT, op1, val);
7704 if(&L)
7705 __ beq(AT, R0, L);
7706 else
7707 __ beq(AT, R0, (int)0);
7708 break;
7709 case 0x05: //below
7710 __ sltiu(AT, op1, val);
7711 if(&L)
7712 __ bne(R0, AT, L);
7713 else
7714 __ bne(R0, AT, (int)0);
7715 break;
7716 case 0x06: //below_equal
7717 __ move(AT, val);
7718 __ sltu(AT, AT, op1);
7719 if(&L)
7720 __ beq(AT, R0, L);
7721 else
7722 __ beq(AT, R0, (int)0);
7723 break;
7724 default:
7725 Unimplemented();
7726 }
7727 __ nop();
7728 %}
7730 ins_pc_relative(1);
7731 ins_pipe( pipe_alu_branch );
7732 %}
7735 instruct branchConL_regL_regL(cmpOp cmp, mRegL src1, mRegL src2, label labl) %{
7736 match( If cmp (CmpL src1 src2) );
7737 effect(USE labl);
7738 format %{ "BR$cmp $src1, $src2, $labl #@branchConL_regL_regL" %}
7739 ins_cost(250);
7741 ins_encode %{
7742 Register opr1_reg = as_Register($src1$$reg);
7743 Register opr2_reg = as_Register($src2$$reg);
7745 Label &target = *($labl$$label);
7746 int flag = $cmp$$cmpcode;
7748 switch(flag)
7749 {
7750 case 0x01: //equal
7751 if (&target)
7752 __ beq(opr1_reg, opr2_reg, target);
7753 else
7754 __ beq(opr1_reg, opr2_reg, (int)0);
7755 __ delayed()->nop();
7756 break;
7758 case 0x02: //not_equal
7759 if(&target)
7760 __ bne(opr1_reg, opr2_reg, target);
7761 else
7762 __ bne(opr1_reg, opr2_reg, (int)0);
7763 __ delayed()->nop();
7764 break;
7766 case 0x03: //greater
7767 __ slt(AT, opr2_reg, opr1_reg);
7768 if(&target)
7769 __ bne(AT, R0, target);
7770 else
7771 __ bne(AT, R0, (int)0);
7772 __ delayed()->nop();
7773 break;
7775 case 0x04: //greater_equal
7776 __ slt(AT, opr1_reg, opr2_reg);
7777 if(&target)
7778 __ beq(AT, R0, target);
7779 else
7780 __ beq(AT, R0, (int)0);
7781 __ delayed()->nop();
7783 break;
7785 case 0x05: //less
7786 __ slt(AT, opr1_reg, opr2_reg);
7787 if(&target)
7788 __ bne(AT, R0, target);
7789 else
7790 __ bne(AT, R0, (int)0);
7791 __ delayed()->nop();
7793 break;
7795 case 0x06: //less_equal
7796 __ slt(AT, opr2_reg, opr1_reg);
7798 if(&target)
7799 __ beq(AT, R0, target);
7800 else
7801 __ beq(AT, R0, (int)0);
7802 __ delayed()->nop();
7804 break;
7806 default:
7807 Unimplemented();
7808 }
7809 %}
7812 ins_pc_relative(1);
7813 ins_pipe( pipe_alu_branch );
7814 %}
7816 instruct branchConL_reg_immL16_sub(cmpOp cmp, mRegL src1, immL16_sub src2, label labl) %{
7817 match( If cmp (CmpL src1 src2) );
7818 effect(USE labl);
7819 ins_cost(180);
7820 format %{ "BR$cmp $src1, $src2, $labl #@branchConL_reg_immL16_sub" %}
7822 ins_encode %{
7823 Register op1 = $src1$$Register;
7824 int val = $src2$$constant;
7825 Label &L = *($labl$$label);
7826 int flag = $cmp$$cmpcode;
7828 __ daddiu(AT, op1, -1 * val);
7829 switch(flag)
7830 {
7831 case 0x01: //equal
7832 if (&L)
7833 __ beq(R0, AT, L);
7834 else
7835 __ beq(R0, AT, (int)0);
7836 break;
7837 case 0x02: //not_equal
7838 if (&L)
7839 __ bne(R0, AT, L);
7840 else
7841 __ bne(R0, AT, (int)0);
7842 break;
7843 case 0x03: //greater
7844 if(&L)
7845 __ bgtz(AT, L);
7846 else
7847 __ bgtz(AT, (int)0);
7848 break;
7849 case 0x04: //greater_equal
7850 if(&L)
7851 __ bgez(AT, L);
7852 else
7853 __ bgez(AT, (int)0);
7854 break;
7855 case 0x05: //less
7856 if(&L)
7857 __ bltz(AT, L);
7858 else
7859 __ bltz(AT, (int)0);
7860 break;
7861 case 0x06: //less_equal
7862 if(&L)
7863 __ blez(AT, L);
7864 else
7865 __ blez(AT, (int)0);
7866 break;
7867 default:
7868 Unimplemented();
7869 }
7870 __ nop();
7871 %}
7873 ins_pc_relative(1);
7874 ins_pipe( pipe_alu_branch );
7875 %}
7878 instruct branchConI_reg_imm16_sub(cmpOp cmp, mRegI src1, immI16_sub src2, label labl) %{
7879 match( If cmp (CmpI src1 src2) );
7880 effect(USE labl);
7881 ins_cost(180);
7882 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm16_sub" %}
7884 ins_encode %{
7885 Register op1 = $src1$$Register;
7886 int val = $src2$$constant;
7887 Label &L = *($labl$$label);
7888 int flag = $cmp$$cmpcode;
7890 __ addiu32(AT, op1, -1 * val);
7891 switch(flag)
7892 {
7893 case 0x01: //equal
7894 if (&L)
7895 __ beq(R0, AT, L);
7896 else
7897 __ beq(R0, AT, (int)0);
7898 break;
7899 case 0x02: //not_equal
7900 if (&L)
7901 __ bne(R0, AT, L);
7902 else
7903 __ bne(R0, AT, (int)0);
7904 break;
7905 case 0x03: //greater
7906 if(&L)
7907 __ bgtz(AT, L);
7908 else
7909 __ bgtz(AT, (int)0);
7910 break;
7911 case 0x04: //greater_equal
7912 if(&L)
7913 __ bgez(AT, L);
7914 else
7915 __ bgez(AT, (int)0);
7916 break;
7917 case 0x05: //less
7918 if(&L)
7919 __ bltz(AT, L);
7920 else
7921 __ bltz(AT, (int)0);
7922 break;
7923 case 0x06: //less_equal
7924 if(&L)
7925 __ blez(AT, L);
7926 else
7927 __ blez(AT, (int)0);
7928 break;
7929 default:
7930 Unimplemented();
7931 }
7932 __ nop();
7933 %}
7935 ins_pc_relative(1);
7936 ins_pipe( pipe_alu_branch );
7937 %}
7939 instruct branchConL_regL_immL0(cmpOp cmp, mRegL src1, immL0 zero, label labl) %{
7940 match( If cmp (CmpL src1 zero) );
7941 effect(USE labl);
7942 format %{ "BR$cmp $src1, zero, $labl #@branchConL_regL_immL0" %}
7943 ins_cost(150);
7945 ins_encode %{
7946 Register opr1_reg = as_Register($src1$$reg);
7947 Label &target = *($labl$$label);
7948 int flag = $cmp$$cmpcode;
7950 switch(flag)
7951 {
7952 case 0x01: //equal
7953 if (&target)
7954 __ beq(opr1_reg, R0, target);
7955 else
7956 __ beq(opr1_reg, R0, int(0));
7957 break;
7959 case 0x02: //not_equal
7960 if(&target)
7961 __ bne(opr1_reg, R0, target);
7962 else
7963 __ bne(opr1_reg, R0, (int)0);
7964 break;
7966 case 0x03: //greater
7967 if(&target)
7968 __ bgtz(opr1_reg, target);
7969 else
7970 __ bgtz(opr1_reg, (int)0);
7971 break;
7973 case 0x04: //greater_equal
7974 if(&target)
7975 __ bgez(opr1_reg, target);
7976 else
7977 __ bgez(opr1_reg, (int)0);
7978 break;
7980 case 0x05: //less
7981 __ slt(AT, opr1_reg, R0);
7982 if(&target)
7983 __ bne(AT, R0, target);
7984 else
7985 __ bne(AT, R0, (int)0);
7986 break;
7988 case 0x06: //less_equal
7989 if (&target)
7990 __ blez(opr1_reg, target);
7991 else
7992 __ blez(opr1_reg, int(0));
7993 break;
7995 default:
7996 Unimplemented();
7997 }
7998 __ delayed()->nop();
7999 %}
8002 ins_pc_relative(1);
8003 ins_pipe( pipe_alu_branch );
8004 %}
8007 //FIXME
8008 instruct branchConF_reg_reg(cmpOp cmp, regF src1, regF src2, label labl) %{
8009 match( If cmp (CmpF src1 src2) );
8010 effect(USE labl);
8011 format %{ "BR$cmp $src1, $src2, $labl #@branchConF_reg_reg" %}
8013 ins_encode %{
8014 FloatRegister reg_op1 = $src1$$FloatRegister;
8015 FloatRegister reg_op2 = $src2$$FloatRegister;
8016 Label &L = *($labl$$label);
8017 int flag = $cmp$$cmpcode;
8019 switch(flag)
8020 {
8021 case 0x01: //equal
8022 __ c_eq_s(reg_op1, reg_op2);
8023 if (&L)
8024 __ bc1t(L);
8025 else
8026 __ bc1t((int)0);
8027 break;
8028 case 0x02: //not_equal
8029 __ c_eq_s(reg_op1, reg_op2);
8030 if (&L)
8031 __ bc1f(L);
8032 else
8033 __ bc1f((int)0);
8034 break;
8035 case 0x03: //greater
8036 __ c_ule_s(reg_op1, reg_op2);
8037 if(&L)
8038 __ bc1f(L);
8039 else
8040 __ bc1f((int)0);
8041 break;
8042 case 0x04: //greater_equal
8043 __ c_ult_s(reg_op1, reg_op2);
8044 if(&L)
8045 __ bc1f(L);
8046 else
8047 __ bc1f((int)0);
8048 break;
8049 case 0x05: //less
8050 __ c_ult_s(reg_op1, reg_op2);
8051 if(&L)
8052 __ bc1t(L);
8053 else
8054 __ bc1t((int)0);
8055 break;
8056 case 0x06: //less_equal
8057 __ c_ule_s(reg_op1, reg_op2);
8058 if(&L)
8059 __ bc1t(L);
8060 else
8061 __ bc1t((int)0);
8062 break;
8063 default:
8064 Unimplemented();
8065 }
8066 __ nop();
8067 %}
8069 ins_pc_relative(1);
8070 ins_pipe(pipe_slow);
8071 %}
8073 instruct branchConD_reg_reg(cmpOp cmp, regD src1, regD src2, label labl) %{
8074 match( If cmp (CmpD src1 src2) );
8075 effect(USE labl);
8076 format %{ "BR$cmp $src1, $src2, $labl #@branchConD_reg_reg" %}
8078 ins_encode %{
8079 FloatRegister reg_op1 = $src1$$FloatRegister;
8080 FloatRegister reg_op2 = $src2$$FloatRegister;
8081 Label &L = *($labl$$label);
8082 int flag = $cmp$$cmpcode;
8084 switch(flag)
8085 {
8086 case 0x01: //equal
8087 __ c_eq_d(reg_op1, reg_op2);
8088 if (&L)
8089 __ bc1t(L);
8090 else
8091 __ bc1t((int)0);
8092 break;
8093 case 0x02: //not_equal
8094 //2016/4/19 aoqi: c_ueq_d cannot distinguish NaN from equal. Double.isNaN(Double) is implemented by 'f != f', so the use of c_ueq_d causes bugs.
8095 __ c_eq_d(reg_op1, reg_op2);
8096 if (&L)
8097 __ bc1f(L);
8098 else
8099 __ bc1f((int)0);
8100 break;
8101 case 0x03: //greater
8102 __ c_ule_d(reg_op1, reg_op2);
8103 if(&L)
8104 __ bc1f(L);
8105 else
8106 __ bc1f((int)0);
8107 break;
8108 case 0x04: //greater_equal
8109 __ c_ult_d(reg_op1, reg_op2);
8110 if(&L)
8111 __ bc1f(L);
8112 else
8113 __ bc1f((int)0);
8114 break;
8115 case 0x05: //less
8116 __ c_ult_d(reg_op1, reg_op2);
8117 if(&L)
8118 __ bc1t(L);
8119 else
8120 __ bc1t((int)0);
8121 break;
8122 case 0x06: //less_equal
8123 __ c_ule_d(reg_op1, reg_op2);
8124 if(&L)
8125 __ bc1t(L);
8126 else
8127 __ bc1t((int)0);
8128 break;
8129 default:
8130 Unimplemented();
8131 }
8132 __ nop();
8133 %}
8135 ins_pc_relative(1);
8136 ins_pipe(pipe_slow);
8137 %}
8140 // Call Runtime Instruction
8141 instruct CallRuntimeDirect(method meth) %{
8142 match(CallRuntime );
8143 effect(USE meth);
8145 ins_cost(300);
8146 format %{ "CALL,runtime #@CallRuntimeDirect" %}
8147 ins_encode( Java_To_Runtime( meth ) );
8148 ins_pipe( pipe_slow );
8149 ins_alignment(16);
8150 %}
8154 //------------------------MemBar Instructions-------------------------------
8155 //Memory barrier flavors
8157 instruct membar_acquire() %{
8158 match(MemBarAcquire);
8159 ins_cost(0);
8161 size(0);
8162 format %{ "MEMBAR-acquire (empty) @ membar_acquire" %}
8163 ins_encode();
8164 ins_pipe(empty);
8165 %}
8167 instruct load_fence() %{
8168 match(LoadFence);
8169 ins_cost(400);
8171 format %{ "MEMBAR @ load_fence" %}
8172 ins_encode %{
8173 __ sync();
8174 %}
8175 ins_pipe(pipe_slow);
8176 %}
8178 instruct membar_acquire_lock()
8179 %{
8180 match(MemBarAcquireLock);
8181 ins_cost(0);
8183 size(0);
8184 format %{ "MEMBAR-acquire (acquire as part of CAS in prior FastLock so empty encoding) @ membar_acquire_lock" %}
8185 ins_encode();
8186 ins_pipe(empty);
8187 %}
8189 instruct membar_release() %{
8190 match(MemBarRelease);
8191 ins_cost(0);
8193 size(0);
8194 format %{ "MEMBAR-release (empty) @ membar_release" %}
8195 ins_encode();
8196 ins_pipe(empty);
8197 %}
8199 instruct store_fence() %{
8200 match(StoreFence);
8201 ins_cost(400);
8203 format %{ "MEMBAR @ store_fence" %}
8205 ins_encode %{
8206 __ sync();
8207 %}
8209 ins_pipe(pipe_slow);
8210 %}
8212 instruct membar_release_lock()
8213 %{
8214 match(MemBarReleaseLock);
8215 ins_cost(0);
8217 size(0);
8218 format %{ "MEMBAR-release-lock (release in FastUnlock so empty) @ membar_release_lock" %}
8219 ins_encode();
8220 ins_pipe(empty);
8221 %}
8224 instruct membar_volatile() %{
8225 match(MemBarVolatile);
8226 ins_cost(400);
8228 format %{ "MEMBAR-volatile" %}
8229 ins_encode %{
8230 if( !os::is_MP() ) return; // Not needed on single CPU
8231 __ sync();
8233 %}
8234 ins_pipe(pipe_slow);
8235 %}
8237 instruct unnecessary_membar_volatile() %{
8238 match(MemBarVolatile);
8239 predicate(Matcher::post_store_load_barrier(n));
8240 ins_cost(0);
8242 size(0);
8243 format %{ "MEMBAR-volatile (unnecessary so empty encoding) @ unnecessary_membar_volatile" %}
8244 ins_encode( );
8245 ins_pipe(empty);
8246 %}
8248 instruct membar_storestore() %{
8249 match(MemBarStoreStore);
8251 ins_cost(0);
8252 size(0);
8253 format %{ "MEMBAR-storestore (empty encoding) @ membar_storestore" %}
8254 ins_encode( );
8255 ins_pipe(empty);
8256 %}
8258 //----------Move Instructions--------------------------------------------------
8259 instruct castX2P(mRegP dst, mRegL src) %{
8260 match(Set dst (CastX2P src));
8261 format %{ "castX2P $dst, $src @ castX2P" %}
8262 ins_encode %{
8263 Register src = $src$$Register;
8264 Register dst = $dst$$Register;
8266 if(src != dst)
8267 __ move(dst, src);
8268 %}
8269 ins_cost(10);
8270 ins_pipe( ialu_regI_mov );
8271 %}
8273 instruct castP2X(mRegL dst, mRegP src ) %{
8274 match(Set dst (CastP2X src));
8276 format %{ "mov $dst, $src\t #@castP2X" %}
8277 ins_encode %{
8278 Register src = $src$$Register;
8279 Register dst = $dst$$Register;
8281 if(src != dst)
8282 __ move(dst, src);
8283 %}
8284 ins_pipe( ialu_regI_mov );
8285 %}
8287 instruct MoveF2I_reg_reg(mRegI dst, regF src) %{
8288 match(Set dst (MoveF2I src));
8289 effect(DEF dst, USE src);
8290 ins_cost(85);
8291 format %{ "MoveF2I $dst, $src @ MoveF2I_reg_reg" %}
8292 ins_encode %{
8293 Register dst = as_Register($dst$$reg);
8294 FloatRegister src = as_FloatRegister($src$$reg);
8296 __ mfc1(dst, src);
8297 %}
8298 ins_pipe( pipe_slow );
8299 %}
8301 instruct MoveI2F_reg_reg(regF dst, mRegI src) %{
8302 match(Set dst (MoveI2F src));
8303 effect(DEF dst, USE src);
8304 ins_cost(85);
8305 format %{ "MoveI2F $dst, $src @ MoveI2F_reg_reg" %}
8306 ins_encode %{
8307 Register src = as_Register($src$$reg);
8308 FloatRegister dst = as_FloatRegister($dst$$reg);
8310 __ mtc1(src, dst);
8311 %}
8312 ins_pipe( pipe_slow );
8313 %}
8315 instruct MoveD2L_reg_reg(mRegL dst, regD src) %{
8316 match(Set dst (MoveD2L src));
8317 effect(DEF dst, USE src);
8318 ins_cost(85);
8319 format %{ "MoveD2L $dst, $src @ MoveD2L_reg_reg" %}
8320 ins_encode %{
8321 Register dst = as_Register($dst$$reg);
8322 FloatRegister src = as_FloatRegister($src$$reg);
8324 __ dmfc1(dst, src);
8325 %}
8326 ins_pipe( pipe_slow );
8327 %}
8329 instruct MoveL2D_reg_reg(regD dst, mRegL src) %{
8330 match(Set dst (MoveL2D src));
8331 effect(DEF dst, USE src);
8332 ins_cost(85);
8333 format %{ "MoveL2D $dst, $src @ MoveL2D_reg_reg" %}
8334 ins_encode %{
8335 FloatRegister dst = as_FloatRegister($dst$$reg);
8336 Register src = as_Register($src$$reg);
8338 __ dmtc1(src, dst);
8339 %}
8340 ins_pipe( pipe_slow );
8341 %}
8343 //----------Conditional Move---------------------------------------------------
8344 // Conditional move
8345 instruct cmovI_cmpI_reg_reg(mRegI dst, mRegI src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8346 match(Set dst (CMoveI (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8347 ins_cost(80);
8348 format %{
8349 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpI_reg_reg\n"
8350 "\tCMOV $dst,$src \t @cmovI_cmpI_reg_reg"
8351 %}
8353 ins_encode %{
8354 Register op1 = $tmp1$$Register;
8355 Register op2 = $tmp2$$Register;
8356 Register dst = $dst$$Register;
8357 Register src = $src$$Register;
8358 int flag = $cop$$cmpcode;
8360 switch(flag)
8361 {
8362 case 0x01: //equal
8363 __ subu32(AT, op1, op2);
8364 __ movz(dst, src, AT);
8365 break;
8367 case 0x02: //not_equal
8368 __ subu32(AT, op1, op2);
8369 __ movn(dst, src, AT);
8370 break;
8372 case 0x03: //great
8373 __ slt(AT, op2, op1);
8374 __ movn(dst, src, AT);
8375 break;
8377 case 0x04: //great_equal
8378 __ slt(AT, op1, op2);
8379 __ movz(dst, src, AT);
8380 break;
8382 case 0x05: //less
8383 __ slt(AT, op1, op2);
8384 __ movn(dst, src, AT);
8385 break;
8387 case 0x06: //less_equal
8388 __ slt(AT, op2, op1);
8389 __ movz(dst, src, AT);
8390 break;
8392 default:
8393 Unimplemented();
8394 }
8395 %}
8397 ins_pipe( pipe_slow );
8398 %}
8400 instruct cmovI_cmpP_reg_reg(mRegI dst, mRegI src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
8401 match(Set dst (CMoveI (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
8402 ins_cost(80);
8403 format %{
8404 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpP_reg_reg\n\t"
8405 "CMOV $dst,$src\t @cmovI_cmpP_reg_reg"
8406 %}
8407 ins_encode %{
8408 Register op1 = $tmp1$$Register;
8409 Register op2 = $tmp2$$Register;
8410 Register dst = $dst$$Register;
8411 Register src = $src$$Register;
8412 int flag = $cop$$cmpcode;
8414 switch(flag)
8415 {
8416 case 0x01: //equal
8417 __ subu(AT, op1, op2);
8418 __ movz(dst, src, AT);
8419 break;
8421 case 0x02: //not_equal
8422 __ subu(AT, op1, op2);
8423 __ movn(dst, src, AT);
8424 break;
8426 case 0x03: //above
8427 __ sltu(AT, op2, op1);
8428 __ movn(dst, src, AT);
8429 break;
8431 case 0x04: //above_equal
8432 __ sltu(AT, op1, op2);
8433 __ movz(dst, src, AT);
8434 break;
8436 case 0x05: //below
8437 __ sltu(AT, op1, op2);
8438 __ movn(dst, src, AT);
8439 break;
8441 case 0x06: //below_equal
8442 __ sltu(AT, op2, op1);
8443 __ movz(dst, src, AT);
8444 break;
8446 default:
8447 Unimplemented();
8448 }
8449 %}
8451 ins_pipe( pipe_slow );
8452 %}
8454 instruct cmovI_cmpN_reg_reg(mRegI dst, mRegI src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8455 match(Set dst (CMoveI (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8456 ins_cost(80);
8457 format %{
8458 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpN_reg_reg\n\t"
8459 "CMOV $dst,$src\t @cmovI_cmpN_reg_reg"
8460 %}
8461 ins_encode %{
8462 Register op1 = $tmp1$$Register;
8463 Register op2 = $tmp2$$Register;
8464 Register dst = $dst$$Register;
8465 Register src = $src$$Register;
8466 int flag = $cop$$cmpcode;
8468 switch(flag)
8469 {
8470 case 0x01: //equal
8471 __ subu32(AT, op1, op2);
8472 __ movz(dst, src, AT);
8473 break;
8475 case 0x02: //not_equal
8476 __ subu32(AT, op1, op2);
8477 __ movn(dst, src, AT);
8478 break;
8480 case 0x03: //above
8481 __ sltu(AT, op2, op1);
8482 __ movn(dst, src, AT);
8483 break;
8485 case 0x04: //above_equal
8486 __ sltu(AT, op1, op2);
8487 __ movz(dst, src, AT);
8488 break;
8490 case 0x05: //below
8491 __ sltu(AT, op1, op2);
8492 __ movn(dst, src, AT);
8493 break;
8495 case 0x06: //below_equal
8496 __ sltu(AT, op2, op1);
8497 __ movz(dst, src, AT);
8498 break;
8500 default:
8501 Unimplemented();
8502 }
8503 %}
8505 ins_pipe( pipe_slow );
8506 %}
8508 instruct cmovP_cmpN_reg_reg(mRegP dst, mRegP src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8509 match(Set dst (CMoveP (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8510 ins_cost(80);
8511 format %{
8512 "CMPU$cop $tmp1,$tmp2\t @cmovP_cmpN_reg_reg\n\t"
8513 "CMOV $dst,$src\t @cmovP_cmpN_reg_reg"
8514 %}
8515 ins_encode %{
8516 Register op1 = $tmp1$$Register;
8517 Register op2 = $tmp2$$Register;
8518 Register dst = $dst$$Register;
8519 Register src = $src$$Register;
8520 int flag = $cop$$cmpcode;
8522 switch(flag)
8523 {
8524 case 0x01: //equal
8525 __ subu32(AT, op1, op2);
8526 __ movz(dst, src, AT);
8527 break;
8529 case 0x02: //not_equal
8530 __ subu32(AT, op1, op2);
8531 __ movn(dst, src, AT);
8532 break;
8534 case 0x03: //above
8535 __ sltu(AT, op2, op1);
8536 __ movn(dst, src, AT);
8537 break;
8539 case 0x04: //above_equal
8540 __ sltu(AT, op1, op2);
8541 __ movz(dst, src, AT);
8542 break;
8544 case 0x05: //below
8545 __ sltu(AT, op1, op2);
8546 __ movn(dst, src, AT);
8547 break;
8549 case 0x06: //below_equal
8550 __ sltu(AT, op2, op1);
8551 __ movz(dst, src, AT);
8552 break;
8554 default:
8555 Unimplemented();
8556 }
8557 %}
8559 ins_pipe( pipe_slow );
8560 %}
8562 instruct cmovN_cmpP_reg_reg(mRegN dst, mRegN src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
8563 match(Set dst (CMoveN (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
8564 ins_cost(80);
8565 format %{
8566 "CMPU$cop $tmp1,$tmp2\t @cmovN_cmpP_reg_reg\n\t"
8567 "CMOV $dst,$src\t @cmovN_cmpP_reg_reg"
8568 %}
8569 ins_encode %{
8570 Register op1 = $tmp1$$Register;
8571 Register op2 = $tmp2$$Register;
8572 Register dst = $dst$$Register;
8573 Register src = $src$$Register;
8574 int flag = $cop$$cmpcode;
8576 switch(flag)
8577 {
8578 case 0x01: //equal
8579 __ subu(AT, op1, op2);
8580 __ movz(dst, src, AT);
8581 break;
8583 case 0x02: //not_equal
8584 __ subu(AT, op1, op2);
8585 __ movn(dst, src, AT);
8586 break;
8588 case 0x03: //above
8589 __ sltu(AT, op2, op1);
8590 __ movn(dst, src, AT);
8591 break;
8593 case 0x04: //above_equal
8594 __ sltu(AT, op1, op2);
8595 __ movz(dst, src, AT);
8596 break;
8598 case 0x05: //below
8599 __ sltu(AT, op1, op2);
8600 __ movn(dst, src, AT);
8601 break;
8603 case 0x06: //below_equal
8604 __ sltu(AT, op2, op1);
8605 __ movz(dst, src, AT);
8606 break;
8608 default:
8609 Unimplemented();
8610 }
8611 %}
8613 ins_pipe( pipe_slow );
8614 %}
8616 instruct cmovP_cmpD_reg_reg(mRegP dst, mRegP src, regD tmp1, regD tmp2, cmpOp cop ) %{
8617 match(Set dst (CMoveP (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8618 ins_cost(80);
8619 format %{
8620 "CMP$cop $tmp1, $tmp2\t @cmovP_cmpD_reg_reg\n"
8621 "\tCMOV $dst,$src \t @cmovP_cmpD_reg_reg"
8622 %}
8623 ins_encode %{
8624 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8625 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8626 Register dst = as_Register($dst$$reg);
8627 Register src = as_Register($src$$reg);
8629 int flag = $cop$$cmpcode;
8631 switch(flag)
8632 {
8633 case 0x01: //equal
8634 __ c_eq_d(reg_op1, reg_op2);
8635 __ movt(dst, src);
8636 break;
8637 case 0x02: //not_equal
8638 __ c_eq_d(reg_op1, reg_op2);
8639 __ movf(dst, src);
8640 break;
8641 case 0x03: //greater
8642 __ c_ole_d(reg_op1, reg_op2);
8643 __ movf(dst, src);
8644 break;
8645 case 0x04: //greater_equal
8646 __ c_olt_d(reg_op1, reg_op2);
8647 __ movf(dst, src);
8648 break;
8649 case 0x05: //less
8650 __ c_ult_d(reg_op1, reg_op2);
8651 __ movt(dst, src);
8652 break;
8653 case 0x06: //less_equal
8654 __ c_ule_d(reg_op1, reg_op2);
8655 __ movt(dst, src);
8656 break;
8657 default:
8658 Unimplemented();
8659 }
8660 %}
8662 ins_pipe( pipe_slow );
8663 %}
8666 instruct cmovN_cmpN_reg_reg(mRegN dst, mRegN src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
8667 match(Set dst (CMoveN (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
8668 ins_cost(80);
8669 format %{
8670 "CMPU$cop $tmp1,$tmp2\t @cmovN_cmpN_reg_reg\n\t"
8671 "CMOV $dst,$src\t @cmovN_cmpN_reg_reg"
8672 %}
8673 ins_encode %{
8674 Register op1 = $tmp1$$Register;
8675 Register op2 = $tmp2$$Register;
8676 Register dst = $dst$$Register;
8677 Register src = $src$$Register;
8678 int flag = $cop$$cmpcode;
8680 switch(flag)
8681 {
8682 case 0x01: //equal
8683 __ subu32(AT, op1, op2);
8684 __ movz(dst, src, AT);
8685 break;
8687 case 0x02: //not_equal
8688 __ subu32(AT, op1, op2);
8689 __ movn(dst, src, AT);
8690 break;
8692 case 0x03: //above
8693 __ sltu(AT, op2, op1);
8694 __ movn(dst, src, AT);
8695 break;
8697 case 0x04: //above_equal
8698 __ sltu(AT, op1, op2);
8699 __ movz(dst, src, AT);
8700 break;
8702 case 0x05: //below
8703 __ sltu(AT, op1, op2);
8704 __ movn(dst, src, AT);
8705 break;
8707 case 0x06: //below_equal
8708 __ sltu(AT, op2, op1);
8709 __ movz(dst, src, AT);
8710 break;
8712 default:
8713 Unimplemented();
8714 }
8715 %}
8717 ins_pipe( pipe_slow );
8718 %}
8721 instruct cmovI_cmpU_reg_reg(mRegI dst, mRegI src, mRegI tmp1, mRegI tmp2, cmpOpU cop ) %{
8722 match(Set dst (CMoveI (Binary cop (CmpU tmp1 tmp2)) (Binary dst src)));
8723 ins_cost(80);
8724 format %{
8725 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpU_reg_reg\n\t"
8726 "CMOV $dst,$src\t @cmovI_cmpU_reg_reg"
8727 %}
8728 ins_encode %{
8729 Register op1 = $tmp1$$Register;
8730 Register op2 = $tmp2$$Register;
8731 Register dst = $dst$$Register;
8732 Register src = $src$$Register;
8733 int flag = $cop$$cmpcode;
8735 switch(flag)
8736 {
8737 case 0x01: //equal
8738 __ subu(AT, op1, op2);
8739 __ movz(dst, src, AT);
8740 break;
8742 case 0x02: //not_equal
8743 __ subu(AT, op1, op2);
8744 __ movn(dst, src, AT);
8745 break;
8747 case 0x03: //above
8748 __ sltu(AT, op2, op1);
8749 __ movn(dst, src, AT);
8750 break;
8752 case 0x04: //above_equal
8753 __ sltu(AT, op1, op2);
8754 __ movz(dst, src, AT);
8755 break;
8757 case 0x05: //below
8758 __ sltu(AT, op1, op2);
8759 __ movn(dst, src, AT);
8760 break;
8762 case 0x06: //below_equal
8763 __ sltu(AT, op2, op1);
8764 __ movz(dst, src, AT);
8765 break;
8767 default:
8768 Unimplemented();
8769 }
8770 %}
8772 ins_pipe( pipe_slow );
8773 %}
8775 instruct cmovI_cmpL_reg_reg(mRegI dst, mRegI src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8776 match(Set dst (CMoveI (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8777 ins_cost(80);
8778 format %{
8779 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpL_reg_reg\n"
8780 "\tCMOV $dst,$src \t @cmovI_cmpL_reg_reg"
8781 %}
8782 ins_encode %{
8783 Register opr1 = as_Register($tmp1$$reg);
8784 Register opr2 = as_Register($tmp2$$reg);
8785 Register dst = $dst$$Register;
8786 Register src = $src$$Register;
8787 int flag = $cop$$cmpcode;
8789 switch(flag)
8790 {
8791 case 0x01: //equal
8792 __ subu(AT, opr1, opr2);
8793 __ movz(dst, src, AT);
8794 break;
8796 case 0x02: //not_equal
8797 __ subu(AT, opr1, opr2);
8798 __ movn(dst, src, AT);
8799 break;
8801 case 0x03: //greater
8802 __ slt(AT, opr2, opr1);
8803 __ movn(dst, src, AT);
8804 break;
8806 case 0x04: //greater_equal
8807 __ slt(AT, opr1, opr2);
8808 __ movz(dst, src, AT);
8809 break;
8811 case 0x05: //less
8812 __ slt(AT, opr1, opr2);
8813 __ movn(dst, src, AT);
8814 break;
8816 case 0x06: //less_equal
8817 __ slt(AT, opr2, opr1);
8818 __ movz(dst, src, AT);
8819 break;
8821 default:
8822 Unimplemented();
8823 }
8824 %}
8826 ins_pipe( pipe_slow );
8827 %}
8829 instruct cmovP_cmpL_reg_reg(mRegP dst, mRegP src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
8830 match(Set dst (CMoveP (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
8831 ins_cost(80);
8832 format %{
8833 "CMP$cop $tmp1, $tmp2\t @cmovP_cmpL_reg_reg\n"
8834 "\tCMOV $dst,$src \t @cmovP_cmpL_reg_reg"
8835 %}
8836 ins_encode %{
8837 Register opr1 = as_Register($tmp1$$reg);
8838 Register opr2 = as_Register($tmp2$$reg);
8839 Register dst = $dst$$Register;
8840 Register src = $src$$Register;
8841 int flag = $cop$$cmpcode;
8843 switch(flag)
8844 {
8845 case 0x01: //equal
8846 __ subu(AT, opr1, opr2);
8847 __ movz(dst, src, AT);
8848 break;
8850 case 0x02: //not_equal
8851 __ subu(AT, opr1, opr2);
8852 __ movn(dst, src, AT);
8853 break;
8855 case 0x03: //greater
8856 __ slt(AT, opr2, opr1);
8857 __ movn(dst, src, AT);
8858 break;
8860 case 0x04: //greater_equal
8861 __ slt(AT, opr1, opr2);
8862 __ movz(dst, src, AT);
8863 break;
8865 case 0x05: //less
8866 __ slt(AT, opr1, opr2);
8867 __ movn(dst, src, AT);
8868 break;
8870 case 0x06: //less_equal
8871 __ slt(AT, opr2, opr1);
8872 __ movz(dst, src, AT);
8873 break;
8875 default:
8876 Unimplemented();
8877 }
8878 %}
8880 ins_pipe( pipe_slow );
8881 %}
8883 instruct cmovI_cmpD_reg_reg(mRegI dst, mRegI src, regD tmp1, regD tmp2, cmpOp cop ) %{
8884 match(Set dst (CMoveI (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
8885 ins_cost(80);
8886 format %{
8887 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpD_reg_reg\n"
8888 "\tCMOV $dst,$src \t @cmovI_cmpD_reg_reg"
8889 %}
8890 ins_encode %{
8891 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
8892 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
8893 Register dst = as_Register($dst$$reg);
8894 Register src = as_Register($src$$reg);
8896 int flag = $cop$$cmpcode;
8898 switch(flag)
8899 {
8900 case 0x01: //equal
8901 __ c_eq_d(reg_op1, reg_op2);
8902 __ movt(dst, src);
8903 break;
8904 case 0x02: //not_equal
8905 //2016/4/19 aoqi: See instruct branchConD_reg_reg. The change in branchConD_reg_reg fixed a bug. It seems similar here, so I made thesame change.
8906 __ c_eq_d(reg_op1, reg_op2);
8907 __ movf(dst, src);
8908 break;
8909 case 0x03: //greater
8910 __ c_ole_d(reg_op1, reg_op2);
8911 __ movf(dst, src);
8912 break;
8913 case 0x04: //greater_equal
8914 __ c_olt_d(reg_op1, reg_op2);
8915 __ movf(dst, src);
8916 break;
8917 case 0x05: //less
8918 __ c_ult_d(reg_op1, reg_op2);
8919 __ movt(dst, src);
8920 break;
8921 case 0x06: //less_equal
8922 __ c_ule_d(reg_op1, reg_op2);
8923 __ movt(dst, src);
8924 break;
8925 default:
8926 Unimplemented();
8927 }
8928 %}
8930 ins_pipe( pipe_slow );
8931 %}
8934 instruct cmovP_cmpP_reg_reg(mRegP dst, mRegP src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
8935 match(Set dst (CMoveP (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
8936 ins_cost(80);
8937 format %{
8938 "CMPU$cop $tmp1,$tmp2\t @cmovP_cmpP_reg_reg\n\t"
8939 "CMOV $dst,$src\t @cmovP_cmpP_reg_reg"
8940 %}
8941 ins_encode %{
8942 Register op1 = $tmp1$$Register;
8943 Register op2 = $tmp2$$Register;
8944 Register dst = $dst$$Register;
8945 Register src = $src$$Register;
8946 int flag = $cop$$cmpcode;
8948 switch(flag)
8949 {
8950 case 0x01: //equal
8951 __ subu(AT, op1, op2);
8952 __ movz(dst, src, AT);
8953 break;
8955 case 0x02: //not_equal
8956 __ subu(AT, op1, op2);
8957 __ movn(dst, src, AT);
8958 break;
8960 case 0x03: //above
8961 __ sltu(AT, op2, op1);
8962 __ movn(dst, src, AT);
8963 break;
8965 case 0x04: //above_equal
8966 __ sltu(AT, op1, op2);
8967 __ movz(dst, src, AT);
8968 break;
8970 case 0x05: //below
8971 __ sltu(AT, op1, op2);
8972 __ movn(dst, src, AT);
8973 break;
8975 case 0x06: //below_equal
8976 __ sltu(AT, op2, op1);
8977 __ movz(dst, src, AT);
8978 break;
8980 default:
8981 Unimplemented();
8982 }
8983 %}
8985 ins_pipe( pipe_slow );
8986 %}
8988 instruct cmovP_cmpI_reg_reg(mRegP dst, mRegP src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
8989 match(Set dst (CMoveP (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
8990 ins_cost(80);
8991 format %{
8992 "CMP$cop $tmp1,$tmp2\t @cmovP_cmpI_reg_reg\n\t"
8993 "CMOV $dst,$src\t @cmovP_cmpI_reg_reg"
8994 %}
8995 ins_encode %{
8996 Register op1 = $tmp1$$Register;
8997 Register op2 = $tmp2$$Register;
8998 Register dst = $dst$$Register;
8999 Register src = $src$$Register;
9000 int flag = $cop$$cmpcode;
9002 switch(flag)
9003 {
9004 case 0x01: //equal
9005 __ subu32(AT, op1, op2);
9006 __ movz(dst, src, AT);
9007 break;
9009 case 0x02: //not_equal
9010 __ subu32(AT, op1, op2);
9011 __ movn(dst, src, AT);
9012 break;
9014 case 0x03: //above
9015 __ slt(AT, op2, op1);
9016 __ movn(dst, src, AT);
9017 break;
9019 case 0x04: //above_equal
9020 __ slt(AT, op1, op2);
9021 __ movz(dst, src, AT);
9022 break;
9024 case 0x05: //below
9025 __ slt(AT, op1, op2);
9026 __ movn(dst, src, AT);
9027 break;
9029 case 0x06: //below_equal
9030 __ slt(AT, op2, op1);
9031 __ movz(dst, src, AT);
9032 break;
9034 default:
9035 Unimplemented();
9036 }
9037 %}
9039 ins_pipe( pipe_slow );
9040 %}
9042 instruct cmovN_cmpI_reg_reg(mRegN dst, mRegN src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
9043 match(Set dst (CMoveN (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
9044 ins_cost(80);
9045 format %{
9046 "CMP$cop $tmp1,$tmp2\t @cmovN_cmpI_reg_reg\n\t"
9047 "CMOV $dst,$src\t @cmovN_cmpI_reg_reg"
9048 %}
9049 ins_encode %{
9050 Register op1 = $tmp1$$Register;
9051 Register op2 = $tmp2$$Register;
9052 Register dst = $dst$$Register;
9053 Register src = $src$$Register;
9054 int flag = $cop$$cmpcode;
9056 switch(flag)
9057 {
9058 case 0x01: //equal
9059 __ subu32(AT, op1, op2);
9060 __ movz(dst, src, AT);
9061 break;
9063 case 0x02: //not_equal
9064 __ subu32(AT, op1, op2);
9065 __ movn(dst, src, AT);
9066 break;
9068 case 0x03: //above
9069 __ slt(AT, op2, op1);
9070 __ movn(dst, src, AT);
9071 break;
9073 case 0x04: //above_equal
9074 __ slt(AT, op1, op2);
9075 __ movz(dst, src, AT);
9076 break;
9078 case 0x05: //below
9079 __ slt(AT, op1, op2);
9080 __ movn(dst, src, AT);
9081 break;
9083 case 0x06: //below_equal
9084 __ slt(AT, op2, op1);
9085 __ movz(dst, src, AT);
9086 break;
9088 default:
9089 Unimplemented();
9090 }
9091 %}
9093 ins_pipe( pipe_slow );
9094 %}
9097 instruct cmovL_cmpI_reg_reg(mRegL dst, mRegL src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
9098 match(Set dst (CMoveL (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
9099 ins_cost(80);
9100 format %{
9101 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpI_reg_reg\n"
9102 "\tCMOV $dst,$src \t @cmovL_cmpI_reg_reg"
9103 %}
9105 ins_encode %{
9106 Register op1 = $tmp1$$Register;
9107 Register op2 = $tmp2$$Register;
9108 Register dst = as_Register($dst$$reg);
9109 Register src = as_Register($src$$reg);
9110 int flag = $cop$$cmpcode;
9112 switch(flag)
9113 {
9114 case 0x01: //equal
9115 __ subu32(AT, op1, op2);
9116 __ movz(dst, src, AT);
9117 break;
9119 case 0x02: //not_equal
9120 __ subu32(AT, op1, op2);
9121 __ movn(dst, src, AT);
9122 break;
9124 case 0x03: //great
9125 __ slt(AT, op2, op1);
9126 __ movn(dst, src, AT);
9127 break;
9129 case 0x04: //great_equal
9130 __ slt(AT, op1, op2);
9131 __ movz(dst, src, AT);
9132 break;
9134 case 0x05: //less
9135 __ slt(AT, op1, op2);
9136 __ movn(dst, src, AT);
9137 break;
9139 case 0x06: //less_equal
9140 __ slt(AT, op2, op1);
9141 __ movz(dst, src, AT);
9142 break;
9144 default:
9145 Unimplemented();
9146 }
9147 %}
9149 ins_pipe( pipe_slow );
9150 %}
9152 instruct cmovL_cmpL_reg_reg(mRegL dst, mRegL src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
9153 match(Set dst (CMoveL (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
9154 ins_cost(80);
9155 format %{
9156 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpL_reg_reg\n"
9157 "\tCMOV $dst,$src \t @cmovL_cmpL_reg_reg"
9158 %}
9159 ins_encode %{
9160 Register opr1 = as_Register($tmp1$$reg);
9161 Register opr2 = as_Register($tmp2$$reg);
9162 Register dst = as_Register($dst$$reg);
9163 Register src = as_Register($src$$reg);
9164 int flag = $cop$$cmpcode;
9166 switch(flag)
9167 {
9168 case 0x01: //equal
9169 __ subu(AT, opr1, opr2);
9170 __ movz(dst, src, AT);
9171 break;
9173 case 0x02: //not_equal
9174 __ subu(AT, opr1, opr2);
9175 __ movn(dst, src, AT);
9176 break;
9178 case 0x03: //greater
9179 __ slt(AT, opr2, opr1);
9180 __ movn(dst, src, AT);
9181 break;
9183 case 0x04: //greater_equal
9184 __ slt(AT, opr1, opr2);
9185 __ movz(dst, src, AT);
9186 break;
9188 case 0x05: //less
9189 __ slt(AT, opr1, opr2);
9190 __ movn(dst, src, AT);
9191 break;
9193 case 0x06: //less_equal
9194 __ slt(AT, opr2, opr1);
9195 __ movz(dst, src, AT);
9196 break;
9198 default:
9199 Unimplemented();
9200 }
9201 %}
9203 ins_pipe( pipe_slow );
9204 %}
9206 instruct cmovL_cmpN_reg_reg(mRegL dst, mRegL src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
9207 match(Set dst (CMoveL (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
9208 ins_cost(80);
9209 format %{
9210 "CMPU$cop $tmp1,$tmp2\t @cmovL_cmpN_reg_reg\n\t"
9211 "CMOV $dst,$src\t @cmovL_cmpN_reg_reg"
9212 %}
9213 ins_encode %{
9214 Register op1 = $tmp1$$Register;
9215 Register op2 = $tmp2$$Register;
9216 Register dst = $dst$$Register;
9217 Register src = $src$$Register;
9218 int flag = $cop$$cmpcode;
9220 switch(flag)
9221 {
9222 case 0x01: //equal
9223 __ subu32(AT, op1, op2);
9224 __ movz(dst, src, AT);
9225 break;
9227 case 0x02: //not_equal
9228 __ subu32(AT, op1, op2);
9229 __ movn(dst, src, AT);
9230 break;
9232 case 0x03: //above
9233 __ sltu(AT, op2, op1);
9234 __ movn(dst, src, AT);
9235 break;
9237 case 0x04: //above_equal
9238 __ sltu(AT, op1, op2);
9239 __ movz(dst, src, AT);
9240 break;
9242 case 0x05: //below
9243 __ sltu(AT, op1, op2);
9244 __ movn(dst, src, AT);
9245 break;
9247 case 0x06: //below_equal
9248 __ sltu(AT, op2, op1);
9249 __ movz(dst, src, AT);
9250 break;
9252 default:
9253 Unimplemented();
9254 }
9255 %}
9257 ins_pipe( pipe_slow );
9258 %}
9261 instruct cmovL_cmpD_reg_reg(mRegL dst, mRegL src, regD tmp1, regD tmp2, cmpOp cop ) %{
9262 match(Set dst (CMoveL (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
9263 ins_cost(80);
9264 format %{
9265 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpD_reg_reg\n"
9266 "\tCMOV $dst,$src \t @cmovL_cmpD_reg_reg"
9267 %}
9268 ins_encode %{
9269 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
9270 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
9271 Register dst = as_Register($dst$$reg);
9272 Register src = as_Register($src$$reg);
9274 int flag = $cop$$cmpcode;
9276 switch(flag)
9277 {
9278 case 0x01: //equal
9279 __ c_eq_d(reg_op1, reg_op2);
9280 __ movt(dst, src);
9281 break;
9282 case 0x02: //not_equal
9283 __ c_eq_d(reg_op1, reg_op2);
9284 __ movf(dst, src);
9285 break;
9286 case 0x03: //greater
9287 __ c_ole_d(reg_op1, reg_op2);
9288 __ movf(dst, src);
9289 break;
9290 case 0x04: //greater_equal
9291 __ c_olt_d(reg_op1, reg_op2);
9292 __ movf(dst, src);
9293 break;
9294 case 0x05: //less
9295 __ c_ult_d(reg_op1, reg_op2);
9296 __ movt(dst, src);
9297 break;
9298 case 0x06: //less_equal
9299 __ c_ule_d(reg_op1, reg_op2);
9300 __ movt(dst, src);
9301 break;
9302 default:
9303 Unimplemented();
9304 }
9305 %}
9307 ins_pipe( pipe_slow );
9308 %}
9310 instruct cmovD_cmpD_reg_reg(regD dst, regD src, regD tmp1, regD tmp2, cmpOp cop ) %{
9311 match(Set dst (CMoveD (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
9312 ins_cost(200);
9313 format %{
9314 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpD_reg_reg\n"
9315 "\tCMOV $dst,$src \t @cmovD_cmpD_reg_reg"
9316 %}
9317 ins_encode %{
9318 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
9319 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
9320 FloatRegister dst = as_FloatRegister($dst$$reg);
9321 FloatRegister src = as_FloatRegister($src$$reg);
9323 int flag = $cop$$cmpcode;
9325 Label L;
9327 switch(flag)
9328 {
9329 case 0x01: //equal
9330 __ c_eq_d(reg_op1, reg_op2);
9331 __ bc1f(L);
9332 __ nop();
9333 __ mov_d(dst, src);
9334 __ bind(L);
9335 break;
9336 case 0x02: //not_equal
9337 //2016/4/19 aoqi: See instruct branchConD_reg_reg. The change in branchConD_reg_reg fixed a bug. It seems similar here, so I made thesame change.
9338 __ c_eq_d(reg_op1, reg_op2);
9339 __ bc1t(L);
9340 __ nop();
9341 __ mov_d(dst, src);
9342 __ bind(L);
9343 break;
9344 case 0x03: //greater
9345 __ c_ole_d(reg_op1, reg_op2);
9346 __ bc1t(L);
9347 __ nop();
9348 __ mov_d(dst, src);
9349 __ bind(L);
9350 break;
9351 case 0x04: //greater_equal
9352 __ c_olt_d(reg_op1, reg_op2);
9353 __ bc1t(L);
9354 __ nop();
9355 __ mov_d(dst, src);
9356 __ bind(L);
9357 break;
9358 case 0x05: //less
9359 __ c_ult_d(reg_op1, reg_op2);
9360 __ bc1f(L);
9361 __ nop();
9362 __ mov_d(dst, src);
9363 __ bind(L);
9364 break;
9365 case 0x06: //less_equal
9366 __ c_ule_d(reg_op1, reg_op2);
9367 __ bc1f(L);
9368 __ nop();
9369 __ mov_d(dst, src);
9370 __ bind(L);
9371 break;
9372 default:
9373 Unimplemented();
9374 }
9375 %}
9377 ins_pipe( pipe_slow );
9378 %}
9380 instruct cmovF_cmpI_reg_reg(regF dst, regF src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
9381 match(Set dst (CMoveF (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
9382 ins_cost(200);
9383 format %{
9384 "CMP$cop $tmp1, $tmp2\t @cmovF_cmpI_reg_reg\n"
9385 "\tCMOV $dst, $src \t @cmovF_cmpI_reg_reg"
9386 %}
9388 ins_encode %{
9389 Register op1 = $tmp1$$Register;
9390 Register op2 = $tmp2$$Register;
9391 FloatRegister dst = as_FloatRegister($dst$$reg);
9392 FloatRegister src = as_FloatRegister($src$$reg);
9393 int flag = $cop$$cmpcode;
9394 Label L;
9396 switch(flag)
9397 {
9398 case 0x01: //equal
9399 __ bne(op1, op2, L);
9400 __ nop();
9401 __ mov_s(dst, src);
9402 __ bind(L);
9403 break;
9404 case 0x02: //not_equal
9405 __ beq(op1, op2, L);
9406 __ nop();
9407 __ mov_s(dst, src);
9408 __ bind(L);
9409 break;
9410 case 0x03: //great
9411 __ slt(AT, op2, op1);
9412 __ beq(AT, R0, L);
9413 __ nop();
9414 __ mov_s(dst, src);
9415 __ bind(L);
9416 break;
9417 case 0x04: //great_equal
9418 __ slt(AT, op1, op2);
9419 __ bne(AT, R0, L);
9420 __ nop();
9421 __ mov_s(dst, src);
9422 __ bind(L);
9423 break;
9424 case 0x05: //less
9425 __ slt(AT, op1, op2);
9426 __ beq(AT, R0, L);
9427 __ nop();
9428 __ mov_s(dst, src);
9429 __ bind(L);
9430 break;
9431 case 0x06: //less_equal
9432 __ slt(AT, op2, op1);
9433 __ bne(AT, R0, L);
9434 __ nop();
9435 __ mov_s(dst, src);
9436 __ bind(L);
9437 break;
9438 default:
9439 Unimplemented();
9440 }
9441 %}
9443 ins_pipe( pipe_slow );
9444 %}
9446 instruct cmovD_cmpI_reg_reg(regD dst, regD src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
9447 match(Set dst (CMoveD (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
9448 ins_cost(200);
9449 format %{
9450 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpI_reg_reg\n"
9451 "\tCMOV $dst, $src \t @cmovD_cmpI_reg_reg"
9452 %}
9454 ins_encode %{
9455 Register op1 = $tmp1$$Register;
9456 Register op2 = $tmp2$$Register;
9457 FloatRegister dst = as_FloatRegister($dst$$reg);
9458 FloatRegister src = as_FloatRegister($src$$reg);
9459 int flag = $cop$$cmpcode;
9460 Label L;
9462 switch(flag)
9463 {
9464 case 0x01: //equal
9465 __ bne(op1, op2, L);
9466 __ nop();
9467 __ mov_d(dst, src);
9468 __ bind(L);
9469 break;
9470 case 0x02: //not_equal
9471 __ beq(op1, op2, L);
9472 __ nop();
9473 __ mov_d(dst, src);
9474 __ bind(L);
9475 break;
9476 case 0x03: //great
9477 __ slt(AT, op2, op1);
9478 __ beq(AT, R0, L);
9479 __ nop();
9480 __ mov_d(dst, src);
9481 __ bind(L);
9482 break;
9483 case 0x04: //great_equal
9484 __ slt(AT, op1, op2);
9485 __ bne(AT, R0, L);
9486 __ nop();
9487 __ mov_d(dst, src);
9488 __ bind(L);
9489 break;
9490 case 0x05: //less
9491 __ slt(AT, op1, op2);
9492 __ beq(AT, R0, L);
9493 __ nop();
9494 __ mov_d(dst, src);
9495 __ bind(L);
9496 break;
9497 case 0x06: //less_equal
9498 __ slt(AT, op2, op1);
9499 __ bne(AT, R0, L);
9500 __ nop();
9501 __ mov_d(dst, src);
9502 __ bind(L);
9503 break;
9504 default:
9505 Unimplemented();
9506 }
9507 %}
9509 ins_pipe( pipe_slow );
9510 %}
9512 instruct cmovD_cmpP_reg_reg(regD dst, regD src, mRegP tmp1, mRegP tmp2, cmpOp cop ) %{
9513 match(Set dst (CMoveD (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
9514 ins_cost(200);
9515 format %{
9516 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpP_reg_reg\n"
9517 "\tCMOV $dst, $src \t @cmovD_cmpP_reg_reg"
9518 %}
9520 ins_encode %{
9521 Register op1 = $tmp1$$Register;
9522 Register op2 = $tmp2$$Register;
9523 FloatRegister dst = as_FloatRegister($dst$$reg);
9524 FloatRegister src = as_FloatRegister($src$$reg);
9525 int flag = $cop$$cmpcode;
9526 Label L;
9528 switch(flag)
9529 {
9530 case 0x01: //equal
9531 __ bne(op1, op2, L);
9532 __ nop();
9533 __ mov_d(dst, src);
9534 __ bind(L);
9535 break;
9536 case 0x02: //not_equal
9537 __ beq(op1, op2, L);
9538 __ nop();
9539 __ mov_d(dst, src);
9540 __ bind(L);
9541 break;
9542 case 0x03: //great
9543 __ slt(AT, op2, op1);
9544 __ beq(AT, R0, L);
9545 __ nop();
9546 __ mov_d(dst, src);
9547 __ bind(L);
9548 break;
9549 case 0x04: //great_equal
9550 __ slt(AT, op1, op2);
9551 __ bne(AT, R0, L);
9552 __ nop();
9553 __ mov_d(dst, src);
9554 __ bind(L);
9555 break;
9556 case 0x05: //less
9557 __ slt(AT, op1, op2);
9558 __ beq(AT, R0, L);
9559 __ nop();
9560 __ mov_d(dst, src);
9561 __ bind(L);
9562 break;
9563 case 0x06: //less_equal
9564 __ slt(AT, op2, op1);
9565 __ bne(AT, R0, L);
9566 __ nop();
9567 __ mov_d(dst, src);
9568 __ bind(L);
9569 break;
9570 default:
9571 Unimplemented();
9572 }
9573 %}
9575 ins_pipe( pipe_slow );
9576 %}
9578 //FIXME
9579 instruct cmovI_cmpF_reg_reg(mRegI dst, mRegI src, regF tmp1, regF tmp2, cmpOp cop ) %{
9580 match(Set dst (CMoveI (Binary cop (CmpF tmp1 tmp2)) (Binary dst src)));
9581 ins_cost(80);
9582 format %{
9583 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpF_reg_reg\n"
9584 "\tCMOV $dst,$src \t @cmovI_cmpF_reg_reg"
9585 %}
9587 ins_encode %{
9588 FloatRegister reg_op1 = $tmp1$$FloatRegister;
9589 FloatRegister reg_op2 = $tmp2$$FloatRegister;
9590 Register dst = $dst$$Register;
9591 Register src = $src$$Register;
9592 int flag = $cop$$cmpcode;
9594 switch(flag)
9595 {
9596 case 0x01: //equal
9597 __ c_eq_s(reg_op1, reg_op2);
9598 __ movt(dst, src);
9599 break;
9600 case 0x02: //not_equal
9601 __ c_eq_s(reg_op1, reg_op2);
9602 __ movf(dst, src);
9603 break;
9604 case 0x03: //greater
9605 __ c_ole_s(reg_op1, reg_op2);
9606 __ movf(dst, src);
9607 break;
9608 case 0x04: //greater_equal
9609 __ c_olt_s(reg_op1, reg_op2);
9610 __ movf(dst, src);
9611 break;
9612 case 0x05: //less
9613 __ c_ult_s(reg_op1, reg_op2);
9614 __ movt(dst, src);
9615 break;
9616 case 0x06: //less_equal
9617 __ c_ule_s(reg_op1, reg_op2);
9618 __ movt(dst, src);
9619 break;
9620 default:
9621 Unimplemented();
9622 }
9623 %}
9624 ins_pipe( pipe_slow );
9625 %}
9627 instruct cmovF_cmpF_reg_reg(regF dst, regF src, regF tmp1, regF tmp2, cmpOp cop ) %{
9628 match(Set dst (CMoveF (Binary cop (CmpF tmp1 tmp2)) (Binary dst src)));
9629 ins_cost(200);
9630 format %{
9631 "CMP$cop $tmp1, $tmp2\t @cmovF_cmpF_reg_reg\n"
9632 "\tCMOV $dst,$src \t @cmovF_cmpF_reg_reg"
9633 %}
9635 ins_encode %{
9636 FloatRegister reg_op1 = $tmp1$$FloatRegister;
9637 FloatRegister reg_op2 = $tmp2$$FloatRegister;
9638 FloatRegister dst = $dst$$FloatRegister;
9639 FloatRegister src = $src$$FloatRegister;
9640 Label L;
9641 int flag = $cop$$cmpcode;
9643 switch(flag)
9644 {
9645 case 0x01: //equal
9646 __ c_eq_s(reg_op1, reg_op2);
9647 __ bc1f(L);
9648 __ nop();
9649 __ mov_s(dst, src);
9650 __ bind(L);
9651 break;
9652 case 0x02: //not_equal
9653 __ c_eq_s(reg_op1, reg_op2);
9654 __ bc1t(L);
9655 __ nop();
9656 __ mov_s(dst, src);
9657 __ bind(L);
9658 break;
9659 case 0x03: //greater
9660 __ c_ole_s(reg_op1, reg_op2);
9661 __ bc1t(L);
9662 __ nop();
9663 __ mov_s(dst, src);
9664 __ bind(L);
9665 break;
9666 case 0x04: //greater_equal
9667 __ c_olt_s(reg_op1, reg_op2);
9668 __ bc1t(L);
9669 __ nop();
9670 __ mov_s(dst, src);
9671 __ bind(L);
9672 break;
9673 case 0x05: //less
9674 __ c_ult_s(reg_op1, reg_op2);
9675 __ bc1f(L);
9676 __ nop();
9677 __ mov_s(dst, src);
9678 __ bind(L);
9679 break;
9680 case 0x06: //less_equal
9681 __ c_ule_s(reg_op1, reg_op2);
9682 __ bc1f(L);
9683 __ nop();
9684 __ mov_s(dst, src);
9685 __ bind(L);
9686 break;
9687 default:
9688 Unimplemented();
9689 }
9690 %}
9691 ins_pipe( pipe_slow );
9692 %}
9694 // Manifest a CmpL result in an integer register. Very painful.
9695 // This is the test to avoid.
9696 instruct cmpL3_reg_reg(mRegI dst, mRegL src1, mRegL src2) %{
9697 match(Set dst (CmpL3 src1 src2));
9698 ins_cost(1000);
9699 format %{ "cmpL3 $dst, $src1, $src2 @ cmpL3_reg_reg" %}
9700 ins_encode %{
9701 Register opr1 = as_Register($src1$$reg);
9702 Register opr2 = as_Register($src2$$reg);
9703 Register dst = as_Register($dst$$reg);
9705 Label Done;
9707 __ subu(AT, opr1, opr2);
9708 __ bltz(AT, Done);
9709 __ delayed()->daddiu(dst, R0, -1);
9711 __ move(dst, 1);
9712 __ movz(dst, R0, AT);
9714 __ bind(Done);
9715 %}
9716 ins_pipe( pipe_slow );
9717 %}
9719 //
9720 // less_rsult = -1
9721 // greater_result = 1
9722 // equal_result = 0
9723 // nan_result = -1
9724 //
9725 instruct cmpF3_reg_reg(mRegI dst, regF src1, regF src2) %{
9726 match(Set dst (CmpF3 src1 src2));
9727 ins_cost(1000);
9728 format %{ "cmpF3 $dst, $src1, $src2 @ cmpF3_reg_reg" %}
9729 ins_encode %{
9730 FloatRegister src1 = as_FloatRegister($src1$$reg);
9731 FloatRegister src2 = as_FloatRegister($src2$$reg);
9732 Register dst = as_Register($dst$$reg);
9734 Label Done;
9736 __ c_ult_s(src1, src2);
9737 __ bc1t(Done);
9738 __ delayed()->daddiu(dst, R0, -1);
9740 __ c_eq_s(src1, src2);
9741 __ move(dst, 1);
9742 __ movt(dst, R0);
9744 __ bind(Done);
9745 %}
9746 ins_pipe( pipe_slow );
9747 %}
9749 instruct cmpD3_reg_reg(mRegI dst, regD src1, regD src2) %{
9750 match(Set dst (CmpD3 src1 src2));
9751 ins_cost(1000);
9752 format %{ "cmpD3 $dst, $src1, $src2 @ cmpD3_reg_reg" %}
9753 ins_encode %{
9754 FloatRegister src1 = as_FloatRegister($src1$$reg);
9755 FloatRegister src2 = as_FloatRegister($src2$$reg);
9756 Register dst = as_Register($dst$$reg);
9758 Label Done;
9760 __ c_ult_d(src1, src2);
9761 __ bc1t(Done);
9762 __ delayed()->daddiu(dst, R0, -1);
9764 __ c_eq_d(src1, src2);
9765 __ move(dst, 1);
9766 __ movt(dst, R0);
9768 __ bind(Done);
9769 %}
9770 ins_pipe( pipe_slow );
9771 %}
9773 instruct clear_array(mRegL cnt, mRegP base, Universe dummy) %{
9774 match(Set dummy (ClearArray cnt base));
9775 format %{ "CLEAR_ARRAY base = $base, cnt = $cnt # Clear doublewords" %}
9776 ins_encode %{
9777 //Assume cnt is the number of bytes in an array to be cleared,
9778 //and base points to the starting address of the array.
9779 Register base = $base$$Register;
9780 Register num = $cnt$$Register;
9781 Label Loop, done;
9783 __ beq(num, R0, done);
9784 __ delayed()->daddu(AT, base, R0);
9786 __ move(T9, num); /* T9 = words */
9788 __ bind(Loop);
9789 __ sd(R0, AT, 0);
9790 __ daddi(T9, T9, -1);
9791 __ bne(T9, R0, Loop);
9792 __ delayed()->daddi(AT, AT, wordSize);
9794 __ bind(done);
9795 %}
9796 ins_pipe( pipe_slow );
9797 %}
9799 instruct string_compare(a4_RegP str1, mA5RegI cnt1, a6_RegP str2, mA7RegI cnt2, no_Ax_mRegI result) %{
9800 match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
9801 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2);
9803 format %{ "String Compare $str1[len: $cnt1], $str2[len: $cnt2] -> $result @ string_compare" %}
9804 ins_encode %{
9805 // Get the first character position in both strings
9806 // [8] char array, [12] offset, [16] count
9807 Register str1 = $str1$$Register;
9808 Register str2 = $str2$$Register;
9809 Register cnt1 = $cnt1$$Register;
9810 Register cnt2 = $cnt2$$Register;
9811 Register result = $result$$Register;
9813 Label L, Loop, haveResult, done;
9815 // compute the and difference of lengths (in result)
9816 __ subu(result, cnt1, cnt2); // result holds the difference of two lengths
9818 // compute the shorter length (in cnt1)
9819 __ slt(AT, cnt2, cnt1);
9820 __ movn(cnt1, cnt2, AT);
9822 // Now the shorter length is in cnt1 and cnt2 can be used as a tmp register
9823 __ bind(Loop); // Loop begin
9824 __ beq(cnt1, R0, done);
9825 __ delayed()->lhu(AT, str1, 0);;
9827 // compare current character
9828 __ lhu(cnt2, str2, 0);
9829 __ bne(AT, cnt2, haveResult);
9830 __ delayed()->addi(str1, str1, 2);
9831 __ addi(str2, str2, 2);
9832 __ b(Loop);
9833 __ delayed()->addi(cnt1, cnt1, -1); // Loop end
9835 __ bind(haveResult);
9836 __ subu(result, AT, cnt2);
9838 __ bind(done);
9839 %}
9841 ins_pipe( pipe_slow );
9842 %}
9844 // intrinsic optimization
9845 instruct string_equals(a4_RegP str1, a5_RegP str2, mA6RegI cnt, mA7RegI temp, no_Ax_mRegI result) %{
9846 match(Set result (StrEquals (Binary str1 str2) cnt));
9847 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL temp);
9849 format %{ "String Equal $str1, $str2, len:$cnt tmp:$temp -> $result @ string_equals" %}
9850 ins_encode %{
9851 // Get the first character position in both strings
9852 // [8] char array, [12] offset, [16] count
9853 Register str1 = $str1$$Register;
9854 Register str2 = $str2$$Register;
9855 Register cnt = $cnt$$Register;
9856 Register tmp = $temp$$Register;
9857 Register result = $result$$Register;
9859 Label Loop, done;
9862 __ beq(str1, str2, done); // same char[] ?
9863 __ daddiu(result, R0, 1);
9865 __ bind(Loop); // Loop begin
9866 __ beq(cnt, R0, done);
9867 __ daddiu(result, R0, 1); // count == 0
9869 // compare current character
9870 __ lhu(AT, str1, 0);;
9871 __ lhu(tmp, str2, 0);
9872 __ bne(AT, tmp, done);
9873 __ delayed()->daddi(result, R0, 0);
9874 __ addi(str1, str1, 2);
9875 __ addi(str2, str2, 2);
9876 __ b(Loop);
9877 __ delayed()->addi(cnt, cnt, -1); // Loop end
9879 __ bind(done);
9880 %}
9882 ins_pipe( pipe_slow );
9883 %}
9885 //----------Arithmetic Instructions-------------------------------------------
9886 //----------Addition Instructions---------------------------------------------
9887 instruct addI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
9888 match(Set dst (AddI src1 src2));
9890 format %{ "add $dst, $src1, $src2 #@addI_Reg_Reg" %}
9891 ins_encode %{
9892 Register dst = $dst$$Register;
9893 Register src1 = $src1$$Register;
9894 Register src2 = $src2$$Register;
9895 __ addu32(dst, src1, src2);
9896 %}
9897 ins_pipe( ialu_regI_regI );
9898 %}
9900 instruct addI_Reg_imm(mRegI dst, mRegI src1, immI src2) %{
9901 match(Set dst (AddI src1 src2));
9903 format %{ "add $dst, $src1, $src2 #@addI_Reg_imm" %}
9904 ins_encode %{
9905 Register dst = $dst$$Register;
9906 Register src1 = $src1$$Register;
9907 int imm = $src2$$constant;
9909 if(Assembler::is_simm16(imm)) {
9910 __ addiu32(dst, src1, imm);
9911 } else {
9912 __ move(AT, imm);
9913 __ addu32(dst, src1, AT);
9914 }
9915 %}
9916 ins_pipe( ialu_regI_regI );
9917 %}
9919 instruct addP_reg_reg(mRegP dst, mRegP src1, mRegL src2) %{
9920 match(Set dst (AddP src1 src2));
9922 format %{ "dadd $dst, $src1, $src2 #@addP_reg_reg" %}
9924 ins_encode %{
9925 Register dst = $dst$$Register;
9926 Register src1 = $src1$$Register;
9927 Register src2 = $src2$$Register;
9928 __ daddu(dst, src1, src2);
9929 %}
9931 ins_pipe( ialu_regI_regI );
9932 %}
9934 instruct addP_reg_reg_convI2L(mRegP dst, mRegP src1, mRegI src2) %{
9935 match(Set dst (AddP src1 (ConvI2L src2)));
9937 format %{ "dadd $dst, $src1, $src2 #@addP_reg_reg_convI2L" %}
9939 ins_encode %{
9940 Register dst = $dst$$Register;
9941 Register src1 = $src1$$Register;
9942 Register src2 = $src2$$Register;
9943 __ daddu(dst, src1, src2);
9944 %}
9946 ins_pipe( ialu_regI_regI );
9947 %}
9949 instruct addP_reg_imm(mRegP dst, mRegP src1, immL src2) %{
9950 match(Set dst (AddP src1 src2));
9952 format %{ "daddi $dst, $src1, $src2 #@addP_reg_imm" %}
9953 ins_encode %{
9954 Register src1 = $src1$$Register;
9955 long src2 = $src2$$constant;
9956 Register dst = $dst$$Register;
9958 if(Assembler::is_simm16(src2)) {
9959 __ daddiu(dst, src1, src2);
9960 } else {
9961 __ set64(AT, src2);
9962 __ daddu(dst, src1, AT);
9963 }
9964 %}
9965 ins_pipe( ialu_regI_imm16 );
9966 %}
9968 // Add Long Register with Register
9969 instruct addL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
9970 match(Set dst (AddL src1 src2));
9971 ins_cost(200);
9972 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_Reg\t" %}
9974 ins_encode %{
9975 Register dst_reg = as_Register($dst$$reg);
9976 Register src1_reg = as_Register($src1$$reg);
9977 Register src2_reg = as_Register($src2$$reg);
9979 __ daddu(dst_reg, src1_reg, src2_reg);
9980 %}
9982 ins_pipe( ialu_regL_regL );
9983 %}
9985 instruct addL_Reg_imm(mRegL dst, mRegL src1, immL16 src2)
9986 %{
9987 match(Set dst (AddL src1 src2));
9989 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_imm " %}
9990 ins_encode %{
9991 Register dst_reg = as_Register($dst$$reg);
9992 Register src1_reg = as_Register($src1$$reg);
9993 int src2_imm = $src2$$constant;
9995 __ daddiu(dst_reg, src1_reg, src2_imm);
9996 %}
9998 ins_pipe( ialu_regL_regL );
9999 %}
10001 instruct addL_RegI2L_imm(mRegL dst, mRegI src1, immL16 src2)
10002 %{
10003 match(Set dst (AddL (ConvI2L src1) src2));
10005 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_imm " %}
10006 ins_encode %{
10007 Register dst_reg = as_Register($dst$$reg);
10008 Register src1_reg = as_Register($src1$$reg);
10009 int src2_imm = $src2$$constant;
10011 __ daddiu(dst_reg, src1_reg, src2_imm);
10012 %}
10014 ins_pipe( ialu_regL_regL );
10015 %}
10017 instruct addL_RegI2L_Reg(mRegL dst, mRegI src1, mRegL src2) %{
10018 match(Set dst (AddL (ConvI2L src1) src2));
10019 ins_cost(200);
10020 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_Reg\t" %}
10022 ins_encode %{
10023 Register dst_reg = as_Register($dst$$reg);
10024 Register src1_reg = as_Register($src1$$reg);
10025 Register src2_reg = as_Register($src2$$reg);
10027 __ daddu(dst_reg, src1_reg, src2_reg);
10028 %}
10030 ins_pipe( ialu_regL_regL );
10031 %}
10033 instruct addL_RegI2L_RegI2L(mRegL dst, mRegI src1, mRegI src2) %{
10034 match(Set dst (AddL (ConvI2L src1) (ConvI2L src2)));
10035 ins_cost(200);
10036 format %{ "ADD $dst, $src1, $src2 #@addL_RegI2L_RegI2L\t" %}
10038 ins_encode %{
10039 Register dst_reg = as_Register($dst$$reg);
10040 Register src1_reg = as_Register($src1$$reg);
10041 Register src2_reg = as_Register($src2$$reg);
10043 __ daddu(dst_reg, src1_reg, src2_reg);
10044 %}
10046 ins_pipe( ialu_regL_regL );
10047 %}
10049 instruct addL_Reg_RegI2L(mRegL dst, mRegL src1, mRegI src2) %{
10050 match(Set dst (AddL src1 (ConvI2L src2)));
10051 ins_cost(200);
10052 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_RegI2L\t" %}
10054 ins_encode %{
10055 Register dst_reg = as_Register($dst$$reg);
10056 Register src1_reg = as_Register($src1$$reg);
10057 Register src2_reg = as_Register($src2$$reg);
10059 __ daddu(dst_reg, src1_reg, src2_reg);
10060 %}
10062 ins_pipe( ialu_regL_regL );
10063 %}
10065 //----------Subtraction Instructions-------------------------------------------
10066 // Integer Subtraction Instructions
10067 instruct subI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10068 match(Set dst (SubI src1 src2));
10069 ins_cost(100);
10071 format %{ "sub $dst, $src1, $src2 #@subI_Reg_Reg" %}
10072 ins_encode %{
10073 Register dst = $dst$$Register;
10074 Register src1 = $src1$$Register;
10075 Register src2 = $src2$$Register;
10076 __ subu32(dst, src1, src2);
10077 %}
10078 ins_pipe( ialu_regI_regI );
10079 %}
10081 instruct subI_Reg_immI16_sub(mRegI dst, mRegI src1, immI16_sub src2) %{
10082 match(Set dst (SubI src1 src2));
10083 ins_cost(80);
10085 format %{ "sub $dst, $src1, $src2 #@subI_Reg_immI16_sub" %}
10086 ins_encode %{
10087 Register dst = $dst$$Register;
10088 Register src1 = $src1$$Register;
10089 __ addiu32(dst, src1, -1 * $src2$$constant);
10090 %}
10091 ins_pipe( ialu_regI_regI );
10092 %}
10094 instruct negI_Reg(mRegI dst, immI0 zero, mRegI src) %{
10095 match(Set dst (SubI zero src));
10096 ins_cost(80);
10098 format %{ "neg $dst, $src #@negI_Reg" %}
10099 ins_encode %{
10100 Register dst = $dst$$Register;
10101 Register src = $src$$Register;
10102 __ subu32(dst, R0, src);
10103 %}
10104 ins_pipe( ialu_regI_regI );
10105 %}
10107 instruct negL_Reg(mRegL dst, immL0 zero, mRegL src) %{
10108 match(Set dst (SubL zero src));
10109 ins_cost(80);
10111 format %{ "neg $dst, $src #@negL_Reg" %}
10112 ins_encode %{
10113 Register dst = $dst$$Register;
10114 Register src = $src$$Register;
10115 __ subu(dst, R0, src);
10116 %}
10117 ins_pipe( ialu_regI_regI );
10118 %}
10120 instruct subL_Reg_immL16_sub(mRegL dst, mRegL src1, immL16_sub src2) %{
10121 match(Set dst (SubL src1 src2));
10122 ins_cost(80);
10124 format %{ "sub $dst, $src1, $src2 #@subL_Reg_immL16_sub" %}
10125 ins_encode %{
10126 Register dst = $dst$$Register;
10127 Register src1 = $src1$$Register;
10128 __ daddiu(dst, src1, -1 * $src2$$constant);
10129 %}
10130 ins_pipe( ialu_regI_regI );
10131 %}
10133 // Subtract Long Register with Register.
10134 instruct subL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10135 match(Set dst (SubL src1 src2));
10136 ins_cost(100);
10137 format %{ "SubL $dst, $src1, $src2 @ subL_Reg_Reg" %}
10138 ins_encode %{
10139 Register dst = as_Register($dst$$reg);
10140 Register src1 = as_Register($src1$$reg);
10141 Register src2 = as_Register($src2$$reg);
10143 __ subu(dst, src1, src2);
10144 %}
10145 ins_pipe( ialu_regL_regL );
10146 %}
10148 instruct subL_Reg_RegI2L(mRegL dst, mRegL src1, mRegI src2) %{
10149 match(Set dst (SubL src1 (ConvI2L src2)));
10150 ins_cost(100);
10151 format %{ "SubL $dst, $src1, $src2 @ subL_Reg_RegI2L" %}
10152 ins_encode %{
10153 Register dst = as_Register($dst$$reg);
10154 Register src1 = as_Register($src1$$reg);
10155 Register src2 = as_Register($src2$$reg);
10157 __ subu(dst, src1, src2);
10158 %}
10159 ins_pipe( ialu_regL_regL );
10160 %}
10162 instruct subL_RegI2L_Reg(mRegL dst, mRegI src1, mRegL src2) %{
10163 match(Set dst (SubL (ConvI2L src1) src2));
10164 ins_cost(200);
10165 format %{ "SubL $dst, $src1, $src2 @ subL_RegI2L_Reg" %}
10166 ins_encode %{
10167 Register dst = as_Register($dst$$reg);
10168 Register src1 = as_Register($src1$$reg);
10169 Register src2 = as_Register($src2$$reg);
10171 __ subu(dst, src1, src2);
10172 %}
10173 ins_pipe( ialu_regL_regL );
10174 %}
10176 instruct subL_RegI2L_RegI2L(mRegL dst, mRegI src1, mRegI src2) %{
10177 match(Set dst (SubL (ConvI2L src1) (ConvI2L src2)));
10178 ins_cost(200);
10179 format %{ "SubL $dst, $src1, $src2 @ subL_RegI2L_RegI2L" %}
10180 ins_encode %{
10181 Register dst = as_Register($dst$$reg);
10182 Register src1 = as_Register($src1$$reg);
10183 Register src2 = as_Register($src2$$reg);
10185 __ subu(dst, src1, src2);
10186 %}
10187 ins_pipe( ialu_regL_regL );
10188 %}
10190 // Integer MOD with Register
10191 instruct modI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10192 match(Set dst (ModI src1 src2));
10193 ins_cost(300);
10194 format %{ "modi $dst, $src1, $src2 @ modI_Reg_Reg" %}
10195 ins_encode %{
10196 Register dst = $dst$$Register;
10197 Register src1 = $src1$$Register;
10198 Register src2 = $src2$$Register;
10200 //if (UseLoongsonISA) {
10201 if (0) {
10202 // 2016.08.10
10203 // Experiments show that gsmod is slower that div+mfhi.
10204 // So I just disable it here.
10205 __ gsmod(dst, src1, src2);
10206 } else {
10207 __ div(src1, src2);
10208 __ mfhi(dst);
10209 }
10210 %}
10212 //ins_pipe( ialu_mod );
10213 ins_pipe( ialu_regI_regI );
10214 %}
10216 instruct modL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
10217 match(Set dst (ModL src1 src2));
10218 format %{ "modL $dst, $src1, $src2 @modL_reg_reg" %}
10220 ins_encode %{
10221 Register dst = as_Register($dst$$reg);
10222 Register op1 = as_Register($src1$$reg);
10223 Register op2 = as_Register($src2$$reg);
10225 if (UseLoongsonISA) {
10226 __ gsdmod(dst, op1, op2);
10227 } else {
10228 __ ddiv(op1, op2);
10229 __ mfhi(dst);
10230 }
10231 %}
10232 ins_pipe( pipe_slow );
10233 %}
10235 instruct mulI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10236 match(Set dst (MulI src1 src2));
10238 ins_cost(300);
10239 format %{ "mul $dst, $src1, $src2 @ mulI_Reg_Reg" %}
10240 ins_encode %{
10241 Register src1 = $src1$$Register;
10242 Register src2 = $src2$$Register;
10243 Register dst = $dst$$Register;
10245 __ mul(dst, src1, src2);
10246 %}
10247 ins_pipe( ialu_mult );
10248 %}
10250 instruct maddI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2, mRegI src3) %{
10251 match(Set dst (AddI (MulI src1 src2) src3));
10253 ins_cost(999);
10254 format %{ "madd $dst, $src1 * $src2 + $src3 #@maddI_Reg_Reg" %}
10255 ins_encode %{
10256 Register src1 = $src1$$Register;
10257 Register src2 = $src2$$Register;
10258 Register src3 = $src3$$Register;
10259 Register dst = $dst$$Register;
10261 __ mtlo(src3);
10262 __ madd(src1, src2);
10263 __ mflo(dst);
10264 %}
10265 ins_pipe( ialu_mult );
10266 %}
10268 instruct divI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10269 match(Set dst (DivI src1 src2));
10271 ins_cost(300);
10272 format %{ "div $dst, $src1, $src2 @ divI_Reg_Reg" %}
10273 ins_encode %{
10274 Register src1 = $src1$$Register;
10275 Register src2 = $src2$$Register;
10276 Register dst = $dst$$Register;
10278 /* 2012/4/21 Jin: In MIPS, div does not cause exception.
10279 We must trap an exception manually. */
10280 __ teq(R0, src2, 0x7);
10282 if (UseLoongsonISA) {
10283 __ gsdiv(dst, src1, src2);
10284 } else {
10285 __ div(src1, src2);
10287 __ nop();
10288 __ nop();
10289 __ mflo(dst);
10290 }
10291 %}
10292 ins_pipe( ialu_mod );
10293 %}
10295 instruct divF_Reg_Reg(regF dst, regF src1, regF src2) %{
10296 match(Set dst (DivF src1 src2));
10298 ins_cost(300);
10299 format %{ "divF $dst, $src1, $src2 @ divF_Reg_Reg" %}
10300 ins_encode %{
10301 FloatRegister src1 = $src1$$FloatRegister;
10302 FloatRegister src2 = $src2$$FloatRegister;
10303 FloatRegister dst = $dst$$FloatRegister;
10305 /* Here do we need to trap an exception manually ? */
10306 __ div_s(dst, src1, src2);
10307 %}
10308 ins_pipe( pipe_slow );
10309 %}
10311 instruct divD_Reg_Reg(regD dst, regD src1, regD src2) %{
10312 match(Set dst (DivD src1 src2));
10314 ins_cost(300);
10315 format %{ "divD $dst, $src1, $src2 @ divD_Reg_Reg" %}
10316 ins_encode %{
10317 FloatRegister src1 = $src1$$FloatRegister;
10318 FloatRegister src2 = $src2$$FloatRegister;
10319 FloatRegister dst = $dst$$FloatRegister;
10321 /* Here do we need to trap an exception manually ? */
10322 __ div_d(dst, src1, src2);
10323 %}
10324 ins_pipe( pipe_slow );
10325 %}
10327 instruct mulL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
10328 match(Set dst (MulL src1 src2));
10329 format %{ "mulL $dst, $src1, $src2 @mulL_reg_reg" %}
10330 ins_encode %{
10331 Register dst = as_Register($dst$$reg);
10332 Register op1 = as_Register($src1$$reg);
10333 Register op2 = as_Register($src2$$reg);
10335 if (UseLoongsonISA) {
10336 __ gsdmult(dst, op1, op2);
10337 } else {
10338 __ dmult(op1, op2);
10339 __ mflo(dst);
10340 }
10341 %}
10342 ins_pipe( pipe_slow );
10343 %}
10345 instruct mulL_reg_regI2L(mRegL dst, mRegL src1, mRegI src2) %{
10346 match(Set dst (MulL src1 (ConvI2L src2)));
10347 format %{ "mulL $dst, $src1, $src2 @mulL_reg_regI2L" %}
10348 ins_encode %{
10349 Register dst = as_Register($dst$$reg);
10350 Register op1 = as_Register($src1$$reg);
10351 Register op2 = as_Register($src2$$reg);
10353 if (UseLoongsonISA) {
10354 __ gsdmult(dst, op1, op2);
10355 } else {
10356 __ dmult(op1, op2);
10357 __ mflo(dst);
10358 }
10359 %}
10360 ins_pipe( pipe_slow );
10361 %}
10363 instruct divL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
10364 match(Set dst (DivL src1 src2));
10365 format %{ "divL $dst, $src1, $src2 @divL_reg_reg" %}
10367 ins_encode %{
10368 Register dst = as_Register($dst$$reg);
10369 Register op1 = as_Register($src1$$reg);
10370 Register op2 = as_Register($src2$$reg);
10372 if (UseLoongsonISA) {
10373 __ gsddiv(dst, op1, op2);
10374 } else {
10375 __ ddiv(op1, op2);
10376 __ mflo(dst);
10377 }
10378 %}
10379 ins_pipe( pipe_slow );
10380 %}
10382 instruct addF_reg_reg(regF dst, regF src1, regF src2) %{
10383 match(Set dst (AddF src1 src2));
10384 format %{ "AddF $dst, $src1, $src2 @addF_reg_reg" %}
10385 ins_encode %{
10386 FloatRegister src1 = as_FloatRegister($src1$$reg);
10387 FloatRegister src2 = as_FloatRegister($src2$$reg);
10388 FloatRegister dst = as_FloatRegister($dst$$reg);
10390 __ add_s(dst, src1, src2);
10391 %}
10392 ins_pipe( fpu_regF_regF );
10393 %}
10395 instruct subF_reg_reg(regF dst, regF src1, regF src2) %{
10396 match(Set dst (SubF src1 src2));
10397 format %{ "SubF $dst, $src1, $src2 @subF_reg_reg" %}
10398 ins_encode %{
10399 FloatRegister src1 = as_FloatRegister($src1$$reg);
10400 FloatRegister src2 = as_FloatRegister($src2$$reg);
10401 FloatRegister dst = as_FloatRegister($dst$$reg);
10403 __ sub_s(dst, src1, src2);
10404 %}
10405 ins_pipe( fpu_regF_regF );
10406 %}
10407 instruct addD_reg_reg(regD dst, regD src1, regD src2) %{
10408 match(Set dst (AddD src1 src2));
10409 format %{ "AddD $dst, $src1, $src2 @addD_reg_reg" %}
10410 ins_encode %{
10411 FloatRegister src1 = as_FloatRegister($src1$$reg);
10412 FloatRegister src2 = as_FloatRegister($src2$$reg);
10413 FloatRegister dst = as_FloatRegister($dst$$reg);
10415 __ add_d(dst, src1, src2);
10416 %}
10417 ins_pipe( fpu_regF_regF );
10418 %}
10420 instruct subD_reg_reg(regD dst, regD src1, regD src2) %{
10421 match(Set dst (SubD src1 src2));
10422 format %{ "SubD $dst, $src1, $src2 @subD_reg_reg" %}
10423 ins_encode %{
10424 FloatRegister src1 = as_FloatRegister($src1$$reg);
10425 FloatRegister src2 = as_FloatRegister($src2$$reg);
10426 FloatRegister dst = as_FloatRegister($dst$$reg);
10428 __ sub_d(dst, src1, src2);
10429 %}
10430 ins_pipe( fpu_regF_regF );
10431 %}
10433 instruct negF_reg(regF dst, regF src) %{
10434 match(Set dst (NegF src));
10435 format %{ "negF $dst, $src @negF_reg" %}
10436 ins_encode %{
10437 FloatRegister src = as_FloatRegister($src$$reg);
10438 FloatRegister dst = as_FloatRegister($dst$$reg);
10440 __ neg_s(dst, src);
10441 %}
10442 ins_pipe( fpu_regF_regF );
10443 %}
10445 instruct negD_reg(regD dst, regD src) %{
10446 match(Set dst (NegD src));
10447 format %{ "negD $dst, $src @negD_reg" %}
10448 ins_encode %{
10449 FloatRegister src = as_FloatRegister($src$$reg);
10450 FloatRegister dst = as_FloatRegister($dst$$reg);
10452 __ neg_d(dst, src);
10453 %}
10454 ins_pipe( fpu_regF_regF );
10455 %}
10458 instruct mulF_reg_reg(regF dst, regF src1, regF src2) %{
10459 match(Set dst (MulF src1 src2));
10460 format %{ "MULF $dst, $src1, $src2 @mulF_reg_reg" %}
10461 ins_encode %{
10462 FloatRegister src1 = $src1$$FloatRegister;
10463 FloatRegister src2 = $src2$$FloatRegister;
10464 FloatRegister dst = $dst$$FloatRegister;
10466 __ mul_s(dst, src1, src2);
10467 %}
10468 ins_pipe( fpu_regF_regF );
10469 %}
10471 instruct maddF_reg_reg(regF dst, regF src1, regF src2, regF src3) %{
10472 match(Set dst (AddF (MulF src1 src2) src3));
10473 // For compatibility reason (e.g. on the Loongson platform), disable this guy.
10474 ins_cost(44444);
10475 format %{ "maddF $dst, $src1, $src2, $src3 @maddF_reg_reg" %}
10476 ins_encode %{
10477 FloatRegister src1 = $src1$$FloatRegister;
10478 FloatRegister src2 = $src2$$FloatRegister;
10479 FloatRegister src3 = $src3$$FloatRegister;
10480 FloatRegister dst = $dst$$FloatRegister;
10482 __ madd_s(dst, src1, src2, src3);
10483 %}
10484 ins_pipe( fpu_regF_regF );
10485 %}
10487 // Mul two double precision floating piont number
10488 instruct mulD_reg_reg(regD dst, regD src1, regD src2) %{
10489 match(Set dst (MulD src1 src2));
10490 format %{ "MULD $dst, $src1, $src2 @mulD_reg_reg" %}
10491 ins_encode %{
10492 FloatRegister src1 = $src1$$FloatRegister;
10493 FloatRegister src2 = $src2$$FloatRegister;
10494 FloatRegister dst = $dst$$FloatRegister;
10496 __ mul_d(dst, src1, src2);
10497 %}
10498 ins_pipe( fpu_regF_regF );
10499 %}
10501 instruct maddD_reg_reg(regD dst, regD src1, regD src2, regD src3) %{
10502 match(Set dst (AddD (MulD src1 src2) src3));
10503 // For compatibility reason (e.g. on the Loongson platform), disable this guy.
10504 ins_cost(44444);
10505 format %{ "maddD $dst, $src1, $src2, $src3 @maddD_reg_reg" %}
10506 ins_encode %{
10507 FloatRegister src1 = $src1$$FloatRegister;
10508 FloatRegister src2 = $src2$$FloatRegister;
10509 FloatRegister src3 = $src3$$FloatRegister;
10510 FloatRegister dst = $dst$$FloatRegister;
10512 __ madd_d(dst, src1, src2, src3);
10513 %}
10514 ins_pipe( fpu_regF_regF );
10515 %}
10517 instruct absF_reg(regF dst, regF src) %{
10518 match(Set dst (AbsF src));
10519 ins_cost(100);
10520 format %{ "absF $dst, $src @absF_reg" %}
10521 ins_encode %{
10522 FloatRegister src = as_FloatRegister($src$$reg);
10523 FloatRegister dst = as_FloatRegister($dst$$reg);
10525 __ abs_s(dst, src);
10526 %}
10527 ins_pipe( fpu_regF_regF );
10528 %}
10531 // intrinsics for math_native.
10532 // AbsD SqrtD CosD SinD TanD LogD Log10D
10534 instruct absD_reg(regD dst, regD src) %{
10535 match(Set dst (AbsD src));
10536 ins_cost(100);
10537 format %{ "absD $dst, $src @absD_reg" %}
10538 ins_encode %{
10539 FloatRegister src = as_FloatRegister($src$$reg);
10540 FloatRegister dst = as_FloatRegister($dst$$reg);
10542 __ abs_d(dst, src);
10543 %}
10544 ins_pipe( fpu_regF_regF );
10545 %}
10547 instruct sqrtD_reg(regD dst, regD src) %{
10548 match(Set dst (SqrtD src));
10549 ins_cost(100);
10550 format %{ "SqrtD $dst, $src @sqrtD_reg" %}
10551 ins_encode %{
10552 FloatRegister src = as_FloatRegister($src$$reg);
10553 FloatRegister dst = as_FloatRegister($dst$$reg);
10555 __ sqrt_d(dst, src);
10556 %}
10557 ins_pipe( fpu_regF_regF );
10558 %}
10560 instruct sqrtF_reg(regF dst, regF src) %{
10561 match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
10562 ins_cost(100);
10563 format %{ "SqrtF $dst, $src @sqrtF_reg" %}
10564 ins_encode %{
10565 FloatRegister src = as_FloatRegister($src$$reg);
10566 FloatRegister dst = as_FloatRegister($dst$$reg);
10568 __ sqrt_s(dst, src);
10569 %}
10570 ins_pipe( fpu_regF_regF );
10571 %}
10572 //----------------------------------Logical Instructions----------------------
10573 //__________________________________Integer Logical Instructions-------------
10575 //And Instuctions
10576 // And Register with Immediate
10577 instruct andI_Reg_immI(mRegI dst, mRegI src1, immI src2) %{
10578 match(Set dst (AndI src1 src2));
10580 format %{ "and $dst, $src1, $src2 #@andI_Reg_immI" %}
10581 ins_encode %{
10582 Register dst = $dst$$Register;
10583 Register src = $src1$$Register;
10584 int val = $src2$$constant;
10586 __ move(AT, val);
10587 __ andr(dst, src, AT);
10588 %}
10589 ins_pipe( ialu_regI_regI );
10590 %}
10592 instruct andI_Reg_imm_0_65535(mRegI dst, mRegI src1, immI_0_65535 src2) %{
10593 match(Set dst (AndI src1 src2));
10594 ins_cost(60);
10596 format %{ "and $dst, $src1, $src2 #@andI_Reg_imm_0_65535" %}
10597 ins_encode %{
10598 Register dst = $dst$$Register;
10599 Register src = $src1$$Register;
10600 int val = $src2$$constant;
10602 __ andi(dst, src, val);
10603 %}
10604 ins_pipe( ialu_regI_regI );
10605 %}
10607 instruct andI_Reg_immI_nonneg_mask(mRegI dst, mRegI src1, immI_nonneg_mask mask) %{
10608 match(Set dst (AndI src1 mask));
10609 ins_cost(60);
10611 format %{ "and $dst, $src1, $mask #@andI_Reg_immI_nonneg_mask" %}
10612 ins_encode %{
10613 Register dst = $dst$$Register;
10614 Register src = $src1$$Register;
10615 int size = Assembler::is_int_mask($mask$$constant);
10617 __ ext(dst, src, 0, size);
10618 %}
10619 ins_pipe( ialu_regI_regI );
10620 %}
10622 instruct andL_Reg_immL_nonneg_mask(mRegL dst, mRegL src1, immL_nonneg_mask mask) %{
10623 match(Set dst (AndL src1 mask));
10624 ins_cost(60);
10626 format %{ "and $dst, $src1, $mask #@andL_Reg_immL_nonneg_mask" %}
10627 ins_encode %{
10628 Register dst = $dst$$Register;
10629 Register src = $src1$$Register;
10630 int size = Assembler::is_jlong_mask($mask$$constant);
10632 __ dext(dst, src, 0, size);
10633 %}
10634 ins_pipe( ialu_regI_regI );
10635 %}
10637 instruct xorI_Reg_imm_0_65535(mRegI dst, mRegI src1, immI_0_65535 src2) %{
10638 match(Set dst (XorI src1 src2));
10639 ins_cost(60);
10641 format %{ "xori $dst, $src1, $src2 #@xorI_Reg_imm_0_65535" %}
10642 ins_encode %{
10643 Register dst = $dst$$Register;
10644 Register src = $src1$$Register;
10645 int val = $src2$$constant;
10647 __ xori(dst, src, val);
10648 %}
10649 ins_pipe( ialu_regI_regI );
10650 %}
10652 instruct xorI_Reg_immI_M1(mRegI dst, mRegI src1, immI_M1 M1) %{
10653 match(Set dst (XorI src1 M1));
10654 predicate(UseLoongsonISA && Use3A2000);
10655 ins_cost(60);
10657 format %{ "xor $dst, $src1, $M1 #@xorI_Reg_immI_M1" %}
10658 ins_encode %{
10659 Register dst = $dst$$Register;
10660 Register src = $src1$$Register;
10662 __ gsorn(dst, R0, src);
10663 %}
10664 ins_pipe( ialu_regI_regI );
10665 %}
10667 instruct xorL2I_Reg_immI_M1(mRegI dst, mRegL src1, immI_M1 M1) %{
10668 match(Set dst (XorI (ConvL2I src1) M1));
10669 predicate(UseLoongsonISA && Use3A2000);
10670 ins_cost(60);
10672 format %{ "xor $dst, $src1, $M1 #@xorL2I_Reg_immI_M1" %}
10673 ins_encode %{
10674 Register dst = $dst$$Register;
10675 Register src = $src1$$Register;
10677 __ gsorn(dst, R0, src);
10678 %}
10679 ins_pipe( ialu_regI_regI );
10680 %}
10682 instruct xorL_Reg_imm_0_65535(mRegL dst, mRegL src1, immL_0_65535 src2) %{
10683 match(Set dst (XorL src1 src2));
10684 ins_cost(60);
10686 format %{ "xori $dst, $src1, $src2 #@xorL_Reg_imm_0_65535" %}
10687 ins_encode %{
10688 Register dst = $dst$$Register;
10689 Register src = $src1$$Register;
10690 int val = $src2$$constant;
10692 __ xori(dst, src, val);
10693 %}
10694 ins_pipe( ialu_regI_regI );
10695 %}
10697 /*
10698 instruct xorL_Reg_immL_M1(mRegL dst, mRegL src1, immL_M1 M1) %{
10699 match(Set dst (XorL src1 M1));
10700 predicate(UseLoongsonISA);
10701 ins_cost(60);
10703 format %{ "xor $dst, $src1, $M1 #@xorL_Reg_immL_M1" %}
10704 ins_encode %{
10705 Register dst = $dst$$Register;
10706 Register src = $src1$$Register;
10708 __ gsorn(dst, R0, src);
10709 %}
10710 ins_pipe( ialu_regI_regI );
10711 %}
10712 */
10714 instruct lbu_and_lmask(mRegI dst, memory mem, immI_255 mask) %{
10715 match(Set dst (AndI mask (LoadB mem)));
10716 ins_cost(60);
10718 format %{ "lhu $dst, $mem #@lbu_and_lmask" %}
10719 ins_encode(load_UB_enc(dst, mem));
10720 ins_pipe( ialu_loadI );
10721 %}
10723 instruct lbu_and_rmask(mRegI dst, memory mem, immI_255 mask) %{
10724 match(Set dst (AndI (LoadB mem) mask));
10725 ins_cost(60);
10727 format %{ "lhu $dst, $mem #@lbu_and_rmask" %}
10728 ins_encode(load_UB_enc(dst, mem));
10729 ins_pipe( ialu_loadI );
10730 %}
10732 instruct andI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10733 match(Set dst (AndI src1 src2));
10735 format %{ "and $dst, $src1, $src2 #@andI_Reg_Reg" %}
10736 ins_encode %{
10737 Register dst = $dst$$Register;
10738 Register src1 = $src1$$Register;
10739 Register src2 = $src2$$Register;
10740 __ andr(dst, src1, src2);
10741 %}
10742 ins_pipe( ialu_regI_regI );
10743 %}
10745 instruct andnI_Reg_nReg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10746 match(Set dst (AndI src1 (XorI src2 M1)));
10747 predicate(UseLoongsonISA && Use3A2000);
10749 format %{ "andn $dst, $src1, $src2 #@andnI_Reg_nReg" %}
10750 ins_encode %{
10751 Register dst = $dst$$Register;
10752 Register src1 = $src1$$Register;
10753 Register src2 = $src2$$Register;
10755 __ gsandn(dst, src1, src2);
10756 %}
10757 ins_pipe( ialu_regI_regI );
10758 %}
10760 instruct ornI_Reg_nReg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10761 match(Set dst (OrI src1 (XorI src2 M1)));
10762 predicate(UseLoongsonISA && Use3A2000);
10764 format %{ "orn $dst, $src1, $src2 #@ornI_Reg_nReg" %}
10765 ins_encode %{
10766 Register dst = $dst$$Register;
10767 Register src1 = $src1$$Register;
10768 Register src2 = $src2$$Register;
10770 __ gsorn(dst, src1, src2);
10771 %}
10772 ins_pipe( ialu_regI_regI );
10773 %}
10775 instruct andnI_nReg_Reg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10776 match(Set dst (AndI (XorI src1 M1) src2));
10777 predicate(UseLoongsonISA && Use3A2000);
10779 format %{ "andn $dst, $src2, $src1 #@andnI_nReg_Reg" %}
10780 ins_encode %{
10781 Register dst = $dst$$Register;
10782 Register src1 = $src1$$Register;
10783 Register src2 = $src2$$Register;
10785 __ gsandn(dst, src2, src1);
10786 %}
10787 ins_pipe( ialu_regI_regI );
10788 %}
10790 instruct ornI_nReg_Reg(mRegI dst, mRegI src1, mRegI src2, immI_M1 M1) %{
10791 match(Set dst (OrI (XorI src1 M1) src2));
10792 predicate(UseLoongsonISA && Use3A2000);
10794 format %{ "orn $dst, $src2, $src1 #@ornI_nReg_Reg" %}
10795 ins_encode %{
10796 Register dst = $dst$$Register;
10797 Register src1 = $src1$$Register;
10798 Register src2 = $src2$$Register;
10800 __ gsorn(dst, src2, src1);
10801 %}
10802 ins_pipe( ialu_regI_regI );
10803 %}
10805 // And Long Register with Register
10806 instruct andL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10807 match(Set dst (AndL src1 src2));
10808 format %{ "AND $dst, $src1, $src2 @ andL_Reg_Reg\n\t" %}
10809 ins_encode %{
10810 Register dst_reg = as_Register($dst$$reg);
10811 Register src1_reg = as_Register($src1$$reg);
10812 Register src2_reg = as_Register($src2$$reg);
10814 __ andr(dst_reg, src1_reg, src2_reg);
10815 %}
10816 ins_pipe( ialu_regL_regL );
10817 %}
10819 instruct andL_Reg_Reg_convI2L(mRegL dst, mRegL src1, mRegI src2) %{
10820 match(Set dst (AndL src1 (ConvI2L src2)));
10821 format %{ "AND $dst, $src1, $src2 @ andL_Reg_Reg_convI2L\n\t" %}
10822 ins_encode %{
10823 Register dst_reg = as_Register($dst$$reg);
10824 Register src1_reg = as_Register($src1$$reg);
10825 Register src2_reg = as_Register($src2$$reg);
10827 __ andr(dst_reg, src1_reg, src2_reg);
10828 %}
10829 ins_pipe( ialu_regL_regL );
10830 %}
10832 instruct andL_Reg_imm_0_65535(mRegL dst, mRegL src1, immL_0_65535 src2) %{
10833 match(Set dst (AndL src1 src2));
10834 ins_cost(60);
10836 format %{ "and $dst, $src1, $src2 #@andL_Reg_imm_0_65535" %}
10837 ins_encode %{
10838 Register dst = $dst$$Register;
10839 Register src = $src1$$Register;
10840 long val = $src2$$constant;
10842 __ andi(dst, src, val);
10843 %}
10844 ins_pipe( ialu_regI_regI );
10845 %}
10847 instruct andL2I_Reg_imm_0_65535(mRegI dst, mRegL src1, immL_0_65535 src2) %{
10848 match(Set dst (ConvL2I (AndL src1 src2)));
10849 ins_cost(60);
10851 format %{ "and $dst, $src1, $src2 #@andL2I_Reg_imm_0_65535" %}
10852 ins_encode %{
10853 Register dst = $dst$$Register;
10854 Register src = $src1$$Register;
10855 long val = $src2$$constant;
10857 __ andi(dst, src, val);
10858 %}
10859 ins_pipe( ialu_regI_regI );
10860 %}
10862 /*
10863 instruct andnL_Reg_nReg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10864 match(Set dst (AndL src1 (XorL src2 M1)));
10865 predicate(UseLoongsonISA);
10867 format %{ "andn $dst, $src1, $src2 #@andnL_Reg_nReg" %}
10868 ins_encode %{
10869 Register dst = $dst$$Register;
10870 Register src1 = $src1$$Register;
10871 Register src2 = $src2$$Register;
10873 __ gsandn(dst, src1, src2);
10874 %}
10875 ins_pipe( ialu_regI_regI );
10876 %}
10877 */
10879 /*
10880 instruct ornL_Reg_nReg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10881 match(Set dst (OrL src1 (XorL src2 M1)));
10882 predicate(UseLoongsonISA);
10884 format %{ "orn $dst, $src1, $src2 #@ornL_Reg_nReg" %}
10885 ins_encode %{
10886 Register dst = $dst$$Register;
10887 Register src1 = $src1$$Register;
10888 Register src2 = $src2$$Register;
10890 __ gsorn(dst, src1, src2);
10891 %}
10892 ins_pipe( ialu_regI_regI );
10893 %}
10894 */
10896 /*
10897 instruct andnL_nReg_Reg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10898 match(Set dst (AndL (XorL src1 M1) src2));
10899 predicate(UseLoongsonISA);
10901 format %{ "andn $dst, $src2, $src1 #@andnL_nReg_Reg" %}
10902 ins_encode %{
10903 Register dst = $dst$$Register;
10904 Register src1 = $src1$$Register;
10905 Register src2 = $src2$$Register;
10907 __ gsandn(dst, src2, src1);
10908 %}
10909 ins_pipe( ialu_regI_regI );
10910 %}
10911 */
10913 /*
10914 instruct ornL_nReg_Reg(mRegL dst, mRegL src1, mRegL src2, immL_M1 M1) %{
10915 match(Set dst (OrL (XorL src1 M1) src2));
10916 predicate(UseLoongsonISA);
10918 format %{ "orn $dst, $src2, $src1 #@ornL_nReg_Reg" %}
10919 ins_encode %{
10920 Register dst = $dst$$Register;
10921 Register src1 = $src1$$Register;
10922 Register src2 = $src2$$Register;
10924 __ gsorn(dst, src2, src1);
10925 %}
10926 ins_pipe( ialu_regI_regI );
10927 %}
10928 */
10930 instruct andL_Reg_immL_M8(mRegL dst, immL_M8 M8) %{
10931 match(Set dst (AndL dst M8));
10932 ins_cost(60);
10934 format %{ "and $dst, $dst, $M8 #@andL_Reg_immL_M8" %}
10935 ins_encode %{
10936 Register dst = $dst$$Register;
10938 __ dins(dst, R0, 0, 3);
10939 %}
10940 ins_pipe( ialu_regI_regI );
10941 %}
10943 instruct andL_Reg_immL_M5(mRegL dst, immL_M5 M5) %{
10944 match(Set dst (AndL dst M5));
10945 ins_cost(60);
10947 format %{ "and $dst, $dst, $M5 #@andL_Reg_immL_M5" %}
10948 ins_encode %{
10949 Register dst = $dst$$Register;
10951 __ dins(dst, R0, 2, 1);
10952 %}
10953 ins_pipe( ialu_regI_regI );
10954 %}
10956 instruct andL_Reg_immL_M7(mRegL dst, immL_M7 M7) %{
10957 match(Set dst (AndL dst M7));
10958 ins_cost(60);
10960 format %{ "and $dst, $dst, $M7 #@andL_Reg_immL_M7" %}
10961 ins_encode %{
10962 Register dst = $dst$$Register;
10964 __ dins(dst, R0, 1, 2);
10965 %}
10966 ins_pipe( ialu_regI_regI );
10967 %}
10969 instruct andL_Reg_immL_M4(mRegL dst, immL_M4 M4) %{
10970 match(Set dst (AndL dst M4));
10971 ins_cost(60);
10973 format %{ "and $dst, $dst, $M4 #@andL_Reg_immL_M4" %}
10974 ins_encode %{
10975 Register dst = $dst$$Register;
10977 __ dins(dst, R0, 0, 2);
10978 %}
10979 ins_pipe( ialu_regI_regI );
10980 %}
10982 instruct andL_Reg_immL_M121(mRegL dst, immL_M121 M121) %{
10983 match(Set dst (AndL dst M121));
10984 ins_cost(60);
10986 format %{ "and $dst, $dst, $M121 #@andL_Reg_immL_M121" %}
10987 ins_encode %{
10988 Register dst = $dst$$Register;
10990 __ dins(dst, R0, 3, 4);
10991 %}
10992 ins_pipe( ialu_regI_regI );
10993 %}
10995 // Or Long Register with Register
10996 instruct orL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
10997 match(Set dst (OrL src1 src2));
10998 format %{ "OR $dst, $src1, $src2 @ orL_Reg_Reg\t" %}
10999 ins_encode %{
11000 Register dst_reg = $dst$$Register;
11001 Register src1_reg = $src1$$Register;
11002 Register src2_reg = $src2$$Register;
11004 __ orr(dst_reg, src1_reg, src2_reg);
11005 %}
11006 ins_pipe( ialu_regL_regL );
11007 %}
11009 instruct orL_Reg_P2XReg(mRegL dst, mRegP src1, mRegL src2) %{
11010 match(Set dst (OrL (CastP2X src1) src2));
11011 format %{ "OR $dst, $src1, $src2 @ orL_Reg_P2XReg\t" %}
11012 ins_encode %{
11013 Register dst_reg = $dst$$Register;
11014 Register src1_reg = $src1$$Register;
11015 Register src2_reg = $src2$$Register;
11017 __ orr(dst_reg, src1_reg, src2_reg);
11018 %}
11019 ins_pipe( ialu_regL_regL );
11020 %}
11022 // Xor Long Register with Register
11023 instruct xorL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
11024 match(Set dst (XorL src1 src2));
11025 format %{ "XOR $dst, $src1, $src2 @ xorL_Reg_Reg\t" %}
11026 ins_encode %{
11027 Register dst_reg = as_Register($dst$$reg);
11028 Register src1_reg = as_Register($src1$$reg);
11029 Register src2_reg = as_Register($src2$$reg);
11031 __ xorr(dst_reg, src1_reg, src2_reg);
11032 %}
11033 ins_pipe( ialu_regL_regL );
11034 %}
11036 // Shift Left by 8-bit immediate
11037 instruct salI_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
11038 match(Set dst (LShiftI src shift));
11040 format %{ "SHL $dst, $src, $shift #@salI_Reg_imm" %}
11041 ins_encode %{
11042 Register src = $src$$Register;
11043 Register dst = $dst$$Register;
11044 int shamt = $shift$$constant;
11046 __ sll(dst, src, shamt);
11047 %}
11048 ins_pipe( ialu_regI_regI );
11049 %}
11051 instruct salL2I_Reg_imm(mRegI dst, mRegL src, immI8 shift) %{
11052 match(Set dst (LShiftI (ConvL2I src) shift));
11054 format %{ "SHL $dst, $src, $shift #@salL2I_Reg_imm" %}
11055 ins_encode %{
11056 Register src = $src$$Register;
11057 Register dst = $dst$$Register;
11058 int shamt = $shift$$constant;
11060 __ sll(dst, src, shamt);
11061 %}
11062 ins_pipe( ialu_regI_regI );
11063 %}
11065 instruct salI_Reg_imm_and_M65536(mRegI dst, mRegI src, immI_16 shift, immI_M65536 mask) %{
11066 match(Set dst (AndI (LShiftI src shift) mask));
11068 format %{ "SHL $dst, $src, $shift #@salI_Reg_imm_and_M65536" %}
11069 ins_encode %{
11070 Register src = $src$$Register;
11071 Register dst = $dst$$Register;
11073 __ sll(dst, src, 16);
11074 %}
11075 ins_pipe( ialu_regI_regI );
11076 %}
11078 instruct land7_2_s(mRegI dst, mRegL src, immL7 seven, immI_16 sixteen)
11079 %{
11080 match(Set dst (RShiftI (LShiftI (ConvL2I (AndL src seven)) sixteen) sixteen));
11082 format %{ "andi $dst, $src, 7\t# @land7_2_s" %}
11083 ins_encode %{
11084 Register src = $src$$Register;
11085 Register dst = $dst$$Register;
11087 __ andi(dst, src, 7);
11088 %}
11089 ins_pipe(ialu_regI_regI);
11090 %}
11092 instruct ori2s(mRegI dst, mRegI src1, immI_0_32767 src2, immI_16 sixteen)
11093 %{
11094 match(Set dst (RShiftI (LShiftI (OrI src1 src2) sixteen) sixteen));
11096 format %{ "ori $dst, $src1, $src2\t# @ori2s" %}
11097 ins_encode %{
11098 Register src = $src1$$Register;
11099 int val = $src2$$constant;
11100 Register dst = $dst$$Register;
11102 __ ori(dst, src, val);
11103 %}
11104 ins_pipe(ialu_regI_regI);
11105 %}
11107 // Logical Shift Right by 16, followed by Arithmetic Shift Left by 16.
11108 // This idiom is used by the compiler the i2s bytecode.
11109 instruct i2s(mRegI dst, mRegI src, immI_16 sixteen)
11110 %{
11111 match(Set dst (RShiftI (LShiftI src sixteen) sixteen));
11113 format %{ "i2s $dst, $src\t# @i2s" %}
11114 ins_encode %{
11115 Register src = $src$$Register;
11116 Register dst = $dst$$Register;
11118 __ seh(dst, src);
11119 %}
11120 ins_pipe(ialu_regI_regI);
11121 %}
11123 // Logical Shift Right by 24, followed by Arithmetic Shift Left by 24.
11124 // This idiom is used by the compiler for the i2b bytecode.
11125 instruct i2b(mRegI dst, mRegI src, immI_24 twentyfour)
11126 %{
11127 match(Set dst (RShiftI (LShiftI src twentyfour) twentyfour));
11129 format %{ "i2b $dst, $src\t# @i2b" %}
11130 ins_encode %{
11131 Register src = $src$$Register;
11132 Register dst = $dst$$Register;
11134 __ seb(dst, src);
11135 %}
11136 ins_pipe(ialu_regI_regI);
11137 %}
11140 instruct salI_RegL2I_imm(mRegI dst, mRegL src, immI8 shift) %{
11141 match(Set dst (LShiftI (ConvL2I src) shift));
11143 format %{ "SHL $dst, $src, $shift #@salI_RegL2I_imm" %}
11144 ins_encode %{
11145 Register src = $src$$Register;
11146 Register dst = $dst$$Register;
11147 int shamt = $shift$$constant;
11149 __ sll(dst, src, shamt);
11150 %}
11151 ins_pipe( ialu_regI_regI );
11152 %}
11154 // Shift Left by 8-bit immediate
11155 instruct salI_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
11156 match(Set dst (LShiftI src shift));
11158 format %{ "SHL $dst, $src, $shift #@salI_Reg_Reg" %}
11159 ins_encode %{
11160 Register src = $src$$Register;
11161 Register dst = $dst$$Register;
11162 Register shamt = $shift$$Register;
11163 __ sllv(dst, src, shamt);
11164 %}
11165 ins_pipe( ialu_regI_regI );
11166 %}
11169 // Shift Left Long
11170 instruct salL_Reg_imm(mRegL dst, mRegL src, immI8 shift) %{
11171 //predicate(UseNewLongLShift);
11172 match(Set dst (LShiftL src shift));
11173 ins_cost(100);
11174 format %{ "salL $dst, $src, $shift @ salL_Reg_imm" %}
11175 ins_encode %{
11176 Register src_reg = as_Register($src$$reg);
11177 Register dst_reg = as_Register($dst$$reg);
11178 int shamt = $shift$$constant;
11180 if (__ is_simm(shamt, 5))
11181 __ dsll(dst_reg, src_reg, shamt);
11182 else
11183 {
11184 int sa = Assembler::low(shamt, 6);
11185 if (sa < 32) {
11186 __ dsll(dst_reg, src_reg, sa);
11187 } else {
11188 __ dsll32(dst_reg, src_reg, sa - 32);
11189 }
11190 }
11191 %}
11192 ins_pipe( ialu_regL_regL );
11193 %}
11195 instruct salL_RegI2L_imm(mRegL dst, mRegI src, immI8 shift) %{
11196 //predicate(UseNewLongLShift);
11197 match(Set dst (LShiftL (ConvI2L src) shift));
11198 ins_cost(100);
11199 format %{ "salL $dst, $src, $shift @ salL_RegI2L_imm" %}
11200 ins_encode %{
11201 Register src_reg = as_Register($src$$reg);
11202 Register dst_reg = as_Register($dst$$reg);
11203 int shamt = $shift$$constant;
11205 if (__ is_simm(shamt, 5))
11206 __ dsll(dst_reg, src_reg, shamt);
11207 else
11208 {
11209 int sa = Assembler::low(shamt, 6);
11210 if (sa < 32) {
11211 __ dsll(dst_reg, src_reg, sa);
11212 } else {
11213 __ dsll32(dst_reg, src_reg, sa - 32);
11214 }
11215 }
11216 %}
11217 ins_pipe( ialu_regL_regL );
11218 %}
11220 // Shift Left Long
11221 instruct salL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
11222 //predicate(UseNewLongLShift);
11223 match(Set dst (LShiftL src shift));
11224 ins_cost(100);
11225 format %{ "salL $dst, $src, $shift @ salL_Reg_Reg" %}
11226 ins_encode %{
11227 Register src_reg = as_Register($src$$reg);
11228 Register dst_reg = as_Register($dst$$reg);
11230 __ dsllv(dst_reg, src_reg, $shift$$Register);
11231 %}
11232 ins_pipe( ialu_regL_regL );
11233 %}
11235 instruct salL_convI2L_Reg_imm(mRegL dst, mRegI src, immI8 shift) %{
11236 match(Set dst (LShiftL (ConvI2L src) shift));
11237 ins_cost(100);
11238 format %{ "salL $dst, $src, $shift @ salL_convI2L_Reg_imm" %}
11239 ins_encode %{
11240 Register src_reg = as_Register($src$$reg);
11241 Register dst_reg = as_Register($dst$$reg);
11242 int shamt = $shift$$constant;
11244 if (__ is_simm(shamt, 5)) {
11245 __ dsll(dst_reg, src_reg, shamt);
11246 } else {
11247 int sa = Assembler::low(shamt, 6);
11248 if (sa < 32) {
11249 __ dsll(dst_reg, src_reg, sa);
11250 } else {
11251 __ dsll32(dst_reg, src_reg, sa - 32);
11252 }
11253 }
11254 %}
11255 ins_pipe( ialu_regL_regL );
11256 %}
11258 // Shift Right Long
11259 instruct sarL_Reg_imm(mRegL dst, mRegL src, immI8 shift) %{
11260 match(Set dst (RShiftL src shift));
11261 ins_cost(100);
11262 format %{ "sarL $dst, $src, $shift @ sarL_Reg_imm" %}
11263 ins_encode %{
11264 Register src_reg = as_Register($src$$reg);
11265 Register dst_reg = as_Register($dst$$reg);
11266 int shamt = ($shift$$constant & 0x3f);
11267 if (__ is_simm(shamt, 5))
11268 __ dsra(dst_reg, src_reg, shamt);
11269 else {
11270 int sa = Assembler::low(shamt, 6);
11271 if (sa < 32) {
11272 __ dsra(dst_reg, src_reg, sa);
11273 } else {
11274 __ dsra32(dst_reg, src_reg, sa - 32);
11275 }
11276 }
11277 %}
11278 ins_pipe( ialu_regL_regL );
11279 %}
11281 instruct sarL2I_Reg_immI_32_63(mRegI dst, mRegL src, immI_32_63 shift) %{
11282 match(Set dst (ConvL2I (RShiftL src shift)));
11283 ins_cost(100);
11284 format %{ "sarL $dst, $src, $shift @ sarL2I_Reg_immI_32_63" %}
11285 ins_encode %{
11286 Register src_reg = as_Register($src$$reg);
11287 Register dst_reg = as_Register($dst$$reg);
11288 int shamt = $shift$$constant;
11290 __ dsra32(dst_reg, src_reg, shamt - 32);
11291 %}
11292 ins_pipe( ialu_regL_regL );
11293 %}
11295 // Shift Right Long arithmetically
11296 instruct sarL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
11297 //predicate(UseNewLongLShift);
11298 match(Set dst (RShiftL src shift));
11299 ins_cost(100);
11300 format %{ "sarL $dst, $src, $shift @ sarL_Reg_Reg" %}
11301 ins_encode %{
11302 Register src_reg = as_Register($src$$reg);
11303 Register dst_reg = as_Register($dst$$reg);
11305 __ dsrav(dst_reg, src_reg, $shift$$Register);
11306 %}
11307 ins_pipe( ialu_regL_regL );
11308 %}
11310 // Shift Right Long logically
11311 instruct slrL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
11312 match(Set dst (URShiftL src shift));
11313 ins_cost(100);
11314 format %{ "slrL $dst, $src, $shift @ slrL_Reg_Reg" %}
11315 ins_encode %{
11316 Register src_reg = as_Register($src$$reg);
11317 Register dst_reg = as_Register($dst$$reg);
11319 __ dsrlv(dst_reg, src_reg, $shift$$Register);
11320 %}
11321 ins_pipe( ialu_regL_regL );
11322 %}
11324 instruct slrL_Reg_immI_0_31(mRegL dst, mRegL src, immI_0_31 shift) %{
11325 match(Set dst (URShiftL src shift));
11326 ins_cost(80);
11327 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_0_31" %}
11328 ins_encode %{
11329 Register src_reg = as_Register($src$$reg);
11330 Register dst_reg = as_Register($dst$$reg);
11331 int shamt = $shift$$constant;
11333 __ dsrl(dst_reg, src_reg, shamt);
11334 %}
11335 ins_pipe( ialu_regL_regL );
11336 %}
11338 instruct slrL_Reg_immI_0_31_and_max_int(mRegI dst, mRegL src, immI_0_31 shift, immI_MaxI max_int) %{
11339 match(Set dst (AndI (ConvL2I (URShiftL src shift)) max_int));
11340 ins_cost(80);
11341 format %{ "dext $dst, $src, $shift, 31 @ slrL_Reg_immI_0_31_and_max_int" %}
11342 ins_encode %{
11343 Register src_reg = as_Register($src$$reg);
11344 Register dst_reg = as_Register($dst$$reg);
11345 int shamt = $shift$$constant;
11347 __ dext(dst_reg, src_reg, shamt, 31);
11348 %}
11349 ins_pipe( ialu_regL_regL );
11350 %}
11352 instruct slrL_P2XReg_immI_0_31(mRegL dst, mRegP src, immI_0_31 shift) %{
11353 match(Set dst (URShiftL (CastP2X src) shift));
11354 ins_cost(80);
11355 format %{ "slrL $dst, $src, $shift @ slrL_P2XReg_immI_0_31" %}
11356 ins_encode %{
11357 Register src_reg = as_Register($src$$reg);
11358 Register dst_reg = as_Register($dst$$reg);
11359 int shamt = $shift$$constant;
11361 __ dsrl(dst_reg, src_reg, shamt);
11362 %}
11363 ins_pipe( ialu_regL_regL );
11364 %}
11366 instruct slrL_Reg_immI_32_63(mRegL dst, mRegL src, immI_32_63 shift) %{
11367 match(Set dst (URShiftL src shift));
11368 ins_cost(80);
11369 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_32_63" %}
11370 ins_encode %{
11371 Register src_reg = as_Register($src$$reg);
11372 Register dst_reg = as_Register($dst$$reg);
11373 int shamt = $shift$$constant;
11375 __ dsrl32(dst_reg, src_reg, shamt - 32);
11376 %}
11377 ins_pipe( ialu_regL_regL );
11378 %}
11380 instruct slrL_Reg_immI_convL2I(mRegI dst, mRegL src, immI_32_63 shift) %{
11381 match(Set dst (ConvL2I (URShiftL src shift)));
11382 predicate(n->in(1)->in(2)->get_int() > 32);
11383 ins_cost(80);
11384 format %{ "slrL $dst, $src, $shift @ slrL_Reg_immI_convL2I" %}
11385 ins_encode %{
11386 Register src_reg = as_Register($src$$reg);
11387 Register dst_reg = as_Register($dst$$reg);
11388 int shamt = $shift$$constant;
11390 __ dsrl32(dst_reg, src_reg, shamt - 32);
11391 %}
11392 ins_pipe( ialu_regL_regL );
11393 %}
11395 instruct slrL_P2XReg_immI_32_63(mRegL dst, mRegP src, immI_32_63 shift) %{
11396 match(Set dst (URShiftL (CastP2X src) shift));
11397 ins_cost(80);
11398 format %{ "slrL $dst, $src, $shift @ slrL_P2XReg_immI_32_63" %}
11399 ins_encode %{
11400 Register src_reg = as_Register($src$$reg);
11401 Register dst_reg = as_Register($dst$$reg);
11402 int shamt = $shift$$constant;
11404 __ dsrl32(dst_reg, src_reg, shamt - 32);
11405 %}
11406 ins_pipe( ialu_regL_regL );
11407 %}
11409 // Xor Instructions
11410 // Xor Register with Register
11411 instruct xorI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
11412 match(Set dst (XorI src1 src2));
11414 format %{ "XOR $dst, $src1, $src2 #@xorI_Reg_Reg" %}
11416 ins_encode %{
11417 Register dst = $dst$$Register;
11418 Register src1 = $src1$$Register;
11419 Register src2 = $src2$$Register;
11420 __ xorr(dst, src1, src2);
11421 __ sll(dst, dst, 0); /* long -> int */
11422 %}
11424 ins_pipe( ialu_regI_regI );
11425 %}
11427 // Or Instructions
11428 // Or Register with Register
11429 instruct orI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
11430 match(Set dst (OrI src1 src2));
11432 format %{ "OR $dst, $src1, $src2 #@orI_Reg_Reg" %}
11433 ins_encode %{
11434 Register dst = $dst$$Register;
11435 Register src1 = $src1$$Register;
11436 Register src2 = $src2$$Register;
11437 __ orr(dst, src1, src2);
11438 %}
11440 ins_pipe( ialu_regI_regI );
11441 %}
11443 instruct rotI_shr_logical_Reg(mRegI dst, mRegI src, immI_0_31 rshift, immI_0_31 lshift, immI_1 one) %{
11444 match(Set dst (OrI (URShiftI src rshift) (LShiftI (AndI src one) lshift)));
11445 predicate(32 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int())));
11447 format %{ "rotr $dst, $src, 1 ...\n\t"
11448 "srl $dst, $dst, ($rshift-1) @ rotI_shr_logical_Reg" %}
11449 ins_encode %{
11450 Register dst = $dst$$Register;
11451 Register src = $src$$Register;
11452 int rshift = $rshift$$constant;
11454 __ rotr(dst, src, 1);
11455 if (rshift - 1) {
11456 __ srl(dst, dst, rshift - 1);
11457 }
11458 %}
11460 ins_pipe( ialu_regI_regI );
11461 %}
11463 instruct orI_Reg_castP2X(mRegL dst, mRegL src1, mRegP src2) %{
11464 match(Set dst (OrI src1 (CastP2X src2)));
11466 format %{ "OR $dst, $src1, $src2 #@orI_Reg_castP2X" %}
11467 ins_encode %{
11468 Register dst = $dst$$Register;
11469 Register src1 = $src1$$Register;
11470 Register src2 = $src2$$Register;
11471 __ orr(dst, src1, src2);
11472 %}
11474 ins_pipe( ialu_regI_regI );
11475 %}
11477 // Logical Shift Right by 8-bit immediate
11478 instruct shr_logical_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
11479 match(Set dst (URShiftI src shift));
11480 // effect(KILL cr);
11482 format %{ "SRL $dst, $src, $shift #@shr_logical_Reg_imm" %}
11483 ins_encode %{
11484 Register src = $src$$Register;
11485 Register dst = $dst$$Register;
11486 int shift = $shift$$constant;
11488 __ srl(dst, src, shift);
11489 %}
11490 ins_pipe( ialu_regI_regI );
11491 %}
11493 instruct shr_logical_Reg_imm_nonneg_mask(mRegI dst, mRegI src, immI_0_31 shift, immI_nonneg_mask mask) %{
11494 match(Set dst (AndI (URShiftI src shift) mask));
11496 format %{ "ext $dst, $src, $shift, one-bits($mask) #@shr_logical_Reg_imm_nonneg_mask" %}
11497 ins_encode %{
11498 Register src = $src$$Register;
11499 Register dst = $dst$$Register;
11500 int pos = $shift$$constant;
11501 int size = Assembler::is_int_mask($mask$$constant);
11503 __ ext(dst, src, pos, size);
11504 %}
11505 ins_pipe( ialu_regI_regI );
11506 %}
11508 instruct rolI_Reg_immI_0_31(mRegI dst, immI_0_31 lshift, immI_0_31 rshift)
11509 %{
11510 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
11511 match(Set dst (OrI (LShiftI dst lshift) (URShiftI dst rshift)));
11513 ins_cost(100);
11514 format %{ "rotr $dst, $dst, $rshift #@rolI_Reg_immI_0_31" %}
11515 ins_encode %{
11516 Register dst = $dst$$Register;
11517 int sa = $rshift$$constant;
11519 __ rotr(dst, dst, sa);
11520 %}
11521 ins_pipe( ialu_regI_regI );
11522 %}
11524 instruct rolL_Reg_immI_0_31(mRegL dst, immI_32_63 lshift, immI_0_31 rshift)
11525 %{
11526 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11527 match(Set dst (OrL (LShiftL dst lshift) (URShiftL dst rshift)));
11529 ins_cost(100);
11530 format %{ "rotr $dst, $dst, $rshift #@rolL_Reg_immI_0_31" %}
11531 ins_encode %{
11532 Register dst = $dst$$Register;
11533 int sa = $rshift$$constant;
11535 __ drotr(dst, dst, sa);
11536 %}
11537 ins_pipe( ialu_regI_regI );
11538 %}
11540 instruct rolL_Reg_immI_32_63(mRegL dst, immI_0_31 lshift, immI_32_63 rshift)
11541 %{
11542 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11543 match(Set dst (OrL (LShiftL dst lshift) (URShiftL dst rshift)));
11545 ins_cost(100);
11546 format %{ "rotr $dst, $dst, $rshift #@rolL_Reg_immI_32_63" %}
11547 ins_encode %{
11548 Register dst = $dst$$Register;
11549 int sa = $rshift$$constant;
11551 __ drotr32(dst, dst, sa - 32);
11552 %}
11553 ins_pipe( ialu_regI_regI );
11554 %}
11556 instruct rorI_Reg_immI_0_31(mRegI dst, immI_0_31 rshift, immI_0_31 lshift)
11557 %{
11558 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x1f));
11559 match(Set dst (OrI (URShiftI dst rshift) (LShiftI dst lshift)));
11561 ins_cost(100);
11562 format %{ "rotr $dst, $dst, $rshift #@rorI_Reg_immI_0_31" %}
11563 ins_encode %{
11564 Register dst = $dst$$Register;
11565 int sa = $rshift$$constant;
11567 __ rotr(dst, dst, sa);
11568 %}
11569 ins_pipe( ialu_regI_regI );
11570 %}
11572 instruct rorL_Reg_immI_0_31(mRegL dst, immI_0_31 rshift, immI_32_63 lshift)
11573 %{
11574 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11575 match(Set dst (OrL (URShiftL dst rshift) (LShiftL dst lshift)));
11577 ins_cost(100);
11578 format %{ "rotr $dst, $dst, $rshift #@rorL_Reg_immI_0_31" %}
11579 ins_encode %{
11580 Register dst = $dst$$Register;
11581 int sa = $rshift$$constant;
11583 __ drotr(dst, dst, sa);
11584 %}
11585 ins_pipe( ialu_regI_regI );
11586 %}
11588 instruct rorL_Reg_immI_32_63(mRegL dst, immI_32_63 rshift, immI_0_31 lshift)
11589 %{
11590 predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 0x3f));
11591 match(Set dst (OrL (URShiftL dst rshift) (LShiftL dst lshift)));
11593 ins_cost(100);
11594 format %{ "rotr $dst, $dst, $rshift #@rorL_Reg_immI_32_63" %}
11595 ins_encode %{
11596 Register dst = $dst$$Register;
11597 int sa = $rshift$$constant;
11599 __ drotr32(dst, dst, sa - 32);
11600 %}
11601 ins_pipe( ialu_regI_regI );
11602 %}
11604 // Logical Shift Right
11605 instruct shr_logical_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
11606 match(Set dst (URShiftI src shift));
11608 format %{ "SRL $dst, $src, $shift #@shr_logical_Reg_Reg" %}
11609 ins_encode %{
11610 Register src = $src$$Register;
11611 Register dst = $dst$$Register;
11612 Register shift = $shift$$Register;
11613 __ srlv(dst, src, shift);
11614 %}
11615 ins_pipe( ialu_regI_regI );
11616 %}
11619 instruct shr_arith_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
11620 match(Set dst (RShiftI src shift));
11621 // effect(KILL cr);
11623 format %{ "SRA $dst, $src, $shift #@shr_arith_Reg_imm" %}
11624 ins_encode %{
11625 Register src = $src$$Register;
11626 Register dst = $dst$$Register;
11627 int shift = $shift$$constant;
11628 __ sra(dst, src, shift);
11629 %}
11630 ins_pipe( ialu_regI_regI );
11631 %}
11633 instruct shr_arith_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
11634 match(Set dst (RShiftI src shift));
11635 // effect(KILL cr);
11637 format %{ "SRA $dst, $src, $shift #@shr_arith_Reg_Reg" %}
11638 ins_encode %{
11639 Register src = $src$$Register;
11640 Register dst = $dst$$Register;
11641 Register shift = $shift$$Register;
11642 __ srav(dst, src, shift);
11643 %}
11644 ins_pipe( ialu_regI_regI );
11645 %}
11647 //----------Convert Int to Boolean---------------------------------------------
11649 instruct convI2B(mRegI dst, mRegI src) %{
11650 match(Set dst (Conv2B src));
11652 ins_cost(100);
11653 format %{ "convI2B $dst, $src @ convI2B" %}
11654 ins_encode %{
11655 Register dst = as_Register($dst$$reg);
11656 Register src = as_Register($src$$reg);
11658 if (dst != src) {
11659 __ daddiu(dst, R0, 1);
11660 __ movz(dst, R0, src);
11661 } else {
11662 __ move(AT, src);
11663 __ daddiu(dst, R0, 1);
11664 __ movz(dst, R0, AT);
11665 }
11666 %}
11668 ins_pipe( ialu_regL_regL );
11669 %}
11671 instruct convI2L_reg( mRegL dst, mRegI src) %{
11672 match(Set dst (ConvI2L src));
11674 ins_cost(100);
11675 format %{ "SLL $dst, $src @ convI2L_reg\t" %}
11676 ins_encode %{
11677 Register dst = as_Register($dst$$reg);
11678 Register src = as_Register($src$$reg);
11680 if(dst != src) __ sll(dst, src, 0);
11681 %}
11682 ins_pipe( ialu_regL_regL );
11683 %}
11686 instruct convL2I_reg( mRegI dst, mRegL src ) %{
11687 match(Set dst (ConvL2I src));
11689 format %{ "MOV $dst, $src @ convL2I_reg" %}
11690 ins_encode %{
11691 Register dst = as_Register($dst$$reg);
11692 Register src = as_Register($src$$reg);
11694 __ sll(dst, src, 0);
11695 %}
11697 ins_pipe( ialu_regI_regI );
11698 %}
11700 instruct convL2I2L_reg( mRegL dst, mRegL src ) %{
11701 match(Set dst (ConvI2L (ConvL2I src)));
11703 format %{ "sll $dst, $src, 0 @ convL2I2L_reg" %}
11704 ins_encode %{
11705 Register dst = as_Register($dst$$reg);
11706 Register src = as_Register($src$$reg);
11708 __ sll(dst, src, 0);
11709 %}
11711 ins_pipe( ialu_regI_regI );
11712 %}
11714 instruct convL2D_reg( regD dst, mRegL src ) %{
11715 match(Set dst (ConvL2D src));
11716 format %{ "convL2D $dst, $src @ convL2D_reg" %}
11717 ins_encode %{
11718 Register src = as_Register($src$$reg);
11719 FloatRegister dst = as_FloatRegister($dst$$reg);
11721 __ dmtc1(src, dst);
11722 __ cvt_d_l(dst, dst);
11723 %}
11725 ins_pipe( pipe_slow );
11726 %}
11728 instruct convD2L_reg_fast( mRegL dst, regD src ) %{
11729 match(Set dst (ConvD2L src));
11730 ins_cost(150);
11731 format %{ "convD2L $dst, $src @ convD2L_reg_fast" %}
11732 ins_encode %{
11733 Register dst = as_Register($dst$$reg);
11734 FloatRegister src = as_FloatRegister($src$$reg);
11736 Label Done;
11738 __ trunc_l_d(F30, src);
11739 // max_long: 0x7fffffffffffffff
11740 // __ set64(AT, 0x7fffffffffffffff);
11741 __ daddiu(AT, R0, -1);
11742 __ dsrl(AT, AT, 1);
11743 __ dmfc1(dst, F30);
11745 __ bne(dst, AT, Done);
11746 __ delayed()->mtc1(R0, F30);
11748 __ cvt_d_w(F30, F30);
11749 __ c_ult_d(src, F30);
11750 __ bc1f(Done);
11751 __ delayed()->daddiu(T9, R0, -1);
11753 __ c_un_d(src, src); //NaN?
11754 __ subu(dst, T9, AT);
11755 __ movt(dst, R0);
11757 __ bind(Done);
11758 %}
11760 ins_pipe( pipe_slow );
11761 %}
11763 instruct convD2L_reg_slow( mRegL dst, regD src ) %{
11764 match(Set dst (ConvD2L src));
11765 ins_cost(250);
11766 format %{ "convD2L $dst, $src @ convD2L_reg_slow" %}
11767 ins_encode %{
11768 Register dst = as_Register($dst$$reg);
11769 FloatRegister src = as_FloatRegister($src$$reg);
11771 Label L;
11773 __ c_un_d(src, src); //NaN?
11774 __ bc1t(L);
11775 __ delayed();
11776 __ move(dst, R0);
11778 __ trunc_l_d(F30, src);
11779 __ cfc1(AT, 31);
11780 __ li(T9, 0x10000);
11781 __ andr(AT, AT, T9);
11782 __ beq(AT, R0, L);
11783 __ delayed()->dmfc1(dst, F30);
11785 __ mov_d(F12, src);
11786 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1);
11787 __ move(dst, V0);
11788 __ bind(L);
11789 %}
11791 ins_pipe( pipe_slow );
11792 %}
11794 instruct convF2I_reg_fast( mRegI dst, regF src ) %{
11795 match(Set dst (ConvF2I src));
11796 ins_cost(150);
11797 format %{ "convf2i $dst, $src @ convF2I_reg_fast" %}
11798 ins_encode %{
11799 Register dreg = $dst$$Register;
11800 FloatRegister fval = $src$$FloatRegister;
11802 __ trunc_w_s(F30, fval);
11803 __ mfc1(dreg, F30);
11804 __ c_un_s(fval, fval); //NaN?
11805 __ movt(dreg, R0);
11806 %}
11808 ins_pipe( pipe_slow );
11809 %}
11811 instruct convF2I_reg_slow( mRegI dst, regF src ) %{
11812 match(Set dst (ConvF2I src));
11813 ins_cost(250);
11814 format %{ "convf2i $dst, $src @ convF2I_reg_slow" %}
11815 ins_encode %{
11816 Register dreg = $dst$$Register;
11817 FloatRegister fval = $src$$FloatRegister;
11818 Label L;
11820 __ c_un_s(fval, fval); //NaN?
11821 __ bc1t(L);
11822 __ delayed();
11823 __ move(dreg, R0);
11825 __ trunc_w_s(F30, fval);
11827 /* Call SharedRuntime:f2i() to do valid convention */
11828 __ cfc1(AT, 31);
11829 __ li(T9, 0x10000);
11830 __ andr(AT, AT, T9);
11831 __ beq(AT, R0, L);
11832 __ delayed()->mfc1(dreg, F30);
11834 __ mov_s(F12, fval);
11836 /* 2014/01/08 Fu : This bug was found when running ezDS's control-panel.
11837 * J 982 C2 javax.swing.text.BoxView.layoutMajorAxis(II[I[I)V (283 bytes) @ 0x000000555c46aa74
11838 *
11839 * An interger array index has been assigned to V0, and then changed from 1 to Integer.MAX_VALUE.
11840 * V0 is corrupted during call_VM_leaf(), and should be preserved.
11841 */
11842 if(dreg != V0) {
11843 __ push(V0);
11844 }
11845 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
11846 if(dreg != V0) {
11847 __ move(dreg, V0);
11848 __ pop(V0);
11849 }
11850 __ bind(L);
11851 %}
11853 ins_pipe( pipe_slow );
11854 %}
11856 instruct convF2L_reg_fast( mRegL dst, regF src ) %{
11857 match(Set dst (ConvF2L src));
11858 ins_cost(150);
11859 format %{ "convf2l $dst, $src @ convF2L_reg_fast" %}
11860 ins_encode %{
11861 Register dreg = $dst$$Register;
11862 FloatRegister fval = $src$$FloatRegister;
11864 __ trunc_l_s(F30, fval);
11865 __ dmfc1(dreg, F30);
11866 __ c_un_s(fval, fval); //NaN?
11867 __ movt(dreg, R0);
11868 %}
11870 ins_pipe( pipe_slow );
11871 %}
11873 instruct convF2L_reg_slow( mRegL dst, regF src ) %{
11874 match(Set dst (ConvF2L src));
11875 ins_cost(250);
11876 format %{ "convf2l $dst, $src @ convF2L_reg_slow" %}
11877 ins_encode %{
11878 Register dst = as_Register($dst$$reg);
11879 FloatRegister fval = $src$$FloatRegister;
11880 Label L;
11882 __ c_un_s(fval, fval); //NaN?
11883 __ bc1t(L);
11884 __ delayed();
11885 __ move(dst, R0);
11887 __ trunc_l_s(F30, fval);
11888 __ cfc1(AT, 31);
11889 __ li(T9, 0x10000);
11890 __ andr(AT, AT, T9);
11891 __ beq(AT, R0, L);
11892 __ delayed()->dmfc1(dst, F30);
11894 __ mov_s(F12, fval);
11895 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
11896 __ move(dst, V0);
11897 __ bind(L);
11898 %}
11900 ins_pipe( pipe_slow );
11901 %}
11903 instruct convL2F_reg( regF dst, mRegL src ) %{
11904 match(Set dst (ConvL2F src));
11905 format %{ "convl2f $dst, $src @ convL2F_reg" %}
11906 ins_encode %{
11907 FloatRegister dst = $dst$$FloatRegister;
11908 Register src = as_Register($src$$reg);
11909 Label L;
11911 __ dmtc1(src, dst);
11912 __ cvt_s_l(dst, dst);
11913 %}
11915 ins_pipe( pipe_slow );
11916 %}
11918 instruct convI2F_reg( regF dst, mRegI src ) %{
11919 match(Set dst (ConvI2F src));
11920 format %{ "convi2f $dst, $src @ convI2F_reg" %}
11921 ins_encode %{
11922 Register src = $src$$Register;
11923 FloatRegister dst = $dst$$FloatRegister;
11925 __ mtc1(src, dst);
11926 __ cvt_s_w(dst, dst);
11927 %}
11929 ins_pipe( fpu_regF_regF );
11930 %}
11932 instruct cmpLTMask_immI0( mRegI dst, mRegI p, immI0 zero ) %{
11933 match(Set dst (CmpLTMask p zero));
11934 ins_cost(100);
11936 format %{ "sra $dst, $p, 31 @ cmpLTMask_immI0" %}
11937 ins_encode %{
11938 Register src = $p$$Register;
11939 Register dst = $dst$$Register;
11941 __ sra(dst, src, 31);
11942 %}
11943 ins_pipe( pipe_slow );
11944 %}
11947 instruct cmpLTMask( mRegI dst, mRegI p, mRegI q ) %{
11948 match(Set dst (CmpLTMask p q));
11949 ins_cost(400);
11951 format %{ "cmpLTMask $dst, $p, $q @ cmpLTMask" %}
11952 ins_encode %{
11953 Register p = $p$$Register;
11954 Register q = $q$$Register;
11955 Register dst = $dst$$Register;
11957 __ slt(dst, p, q);
11958 __ subu(dst, R0, dst);
11959 %}
11960 ins_pipe( pipe_slow );
11961 %}
11963 instruct convP2B(mRegI dst, mRegP src) %{
11964 match(Set dst (Conv2B src));
11966 ins_cost(100);
11967 format %{ "convP2B $dst, $src @ convP2B" %}
11968 ins_encode %{
11969 Register dst = as_Register($dst$$reg);
11970 Register src = as_Register($src$$reg);
11972 if (dst != src) {
11973 __ daddiu(dst, R0, 1);
11974 __ movz(dst, R0, src);
11975 } else {
11976 __ move(AT, src);
11977 __ daddiu(dst, R0, 1);
11978 __ movz(dst, R0, AT);
11979 }
11980 %}
11982 ins_pipe( ialu_regL_regL );
11983 %}
11986 instruct convI2D_reg_reg(regD dst, mRegI src) %{
11987 match(Set dst (ConvI2D src));
11988 format %{ "conI2D $dst, $src @convI2D_reg" %}
11989 ins_encode %{
11990 Register src = $src$$Register;
11991 FloatRegister dst = $dst$$FloatRegister;
11992 __ mtc1(src, dst);
11993 __ cvt_d_w(dst, dst);
11994 %}
11995 ins_pipe( fpu_regF_regF );
11996 %}
11998 instruct convF2D_reg_reg(regD dst, regF src) %{
11999 match(Set dst (ConvF2D src));
12000 format %{ "convF2D $dst, $src\t# @convF2D_reg_reg" %}
12001 ins_encode %{
12002 FloatRegister dst = $dst$$FloatRegister;
12003 FloatRegister src = $src$$FloatRegister;
12005 __ cvt_d_s(dst, src);
12006 %}
12007 ins_pipe( fpu_regF_regF );
12008 %}
12010 instruct convD2F_reg_reg(regF dst, regD src) %{
12011 match(Set dst (ConvD2F src));
12012 format %{ "convD2F $dst, $src\t# @convD2F_reg_reg" %}
12013 ins_encode %{
12014 FloatRegister dst = $dst$$FloatRegister;
12015 FloatRegister src = $src$$FloatRegister;
12017 __ cvt_s_d(dst, src);
12018 %}
12019 ins_pipe( fpu_regF_regF );
12020 %}
12022 // Convert a double to an int. If the double is a NAN, stuff a zero in instead.
12023 instruct convD2I_reg_reg_fast( mRegI dst, regD src ) %{
12024 match(Set dst (ConvD2I src));
12026 ins_cost(150);
12027 format %{ "convD2I $dst, $src\t# @ convD2I_reg_reg_fast" %}
12029 ins_encode %{
12030 FloatRegister src = $src$$FloatRegister;
12031 Register dst = $dst$$Register;
12033 Label Done;
12035 __ trunc_w_d(F30, src);
12036 // max_int: 2147483647
12037 __ move(AT, 0x7fffffff);
12038 __ mfc1(dst, F30);
12040 __ bne(dst, AT, Done);
12041 __ delayed()->mtc1(R0, F30);
12043 __ cvt_d_w(F30, F30);
12044 __ c_ult_d(src, F30);
12045 __ bc1f(Done);
12046 __ delayed()->addiu(T9, R0, -1);
12048 __ c_un_d(src, src); //NaN?
12049 __ subu32(dst, T9, AT);
12050 __ movt(dst, R0);
12052 __ bind(Done);
12053 %}
12054 ins_pipe( pipe_slow );
12055 %}
12057 instruct convD2I_reg_reg_slow( mRegI dst, regD src ) %{
12058 match(Set dst (ConvD2I src));
12060 ins_cost(250);
12061 format %{ "convD2I $dst, $src\t# @ convD2I_reg_reg_slow" %}
12063 ins_encode %{
12064 FloatRegister src = $src$$FloatRegister;
12065 Register dst = $dst$$Register;
12066 Label L;
12068 __ trunc_w_d(F30, src);
12069 __ cfc1(AT, 31);
12070 __ li(T9, 0x10000);
12071 __ andr(AT, AT, T9);
12072 __ beq(AT, R0, L);
12073 __ delayed()->mfc1(dst, F30);
12075 __ mov_d(F12, src);
12076 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 1);
12077 __ move(dst, V0);
12078 __ bind(L);
12080 %}
12081 ins_pipe( pipe_slow );
12082 %}
12084 // Convert oop pointer into compressed form
12085 instruct encodeHeapOop(mRegN dst, mRegP src) %{
12086 predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
12087 match(Set dst (EncodeP src));
12088 format %{ "encode_heap_oop $dst,$src" %}
12089 ins_encode %{
12090 Register src = $src$$Register;
12091 Register dst = $dst$$Register;
12093 __ encode_heap_oop(dst, src);
12094 %}
12095 ins_pipe( ialu_regL_regL );
12096 %}
12098 instruct encodeHeapOop_not_null(mRegN dst, mRegP src) %{
12099 predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
12100 match(Set dst (EncodeP src));
12101 format %{ "encode_heap_oop_not_null $dst,$src @ encodeHeapOop_not_null" %}
12102 ins_encode %{
12103 __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
12104 %}
12105 ins_pipe( ialu_regL_regL );
12106 %}
12108 instruct decodeHeapOop(mRegP dst, mRegN src) %{
12109 predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
12110 n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
12111 match(Set dst (DecodeN src));
12112 format %{ "decode_heap_oop $dst,$src @ decodeHeapOop" %}
12113 ins_encode %{
12114 Register s = $src$$Register;
12115 Register d = $dst$$Register;
12117 __ decode_heap_oop(d, s);
12118 %}
12119 ins_pipe( ialu_regL_regL );
12120 %}
12122 instruct decodeHeapOop_not_null(mRegP dst, mRegN src) %{
12123 predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
12124 n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
12125 match(Set dst (DecodeN src));
12126 format %{ "decode_heap_oop_not_null $dst,$src @ decodeHeapOop_not_null" %}
12127 ins_encode %{
12128 Register s = $src$$Register;
12129 Register d = $dst$$Register;
12130 if (s != d) {
12131 __ decode_heap_oop_not_null(d, s);
12132 } else {
12133 __ decode_heap_oop_not_null(d);
12134 }
12135 %}
12136 ins_pipe( ialu_regL_regL );
12137 %}
12139 instruct encodeKlass_not_null(mRegN dst, mRegP src) %{
12140 match(Set dst (EncodePKlass src));
12141 format %{ "encode_heap_oop_not_null $dst,$src @ encodeKlass_not_null" %}
12142 ins_encode %{
12143 __ encode_klass_not_null($dst$$Register, $src$$Register);
12144 %}
12145 ins_pipe( ialu_regL_regL );
12146 %}
12148 instruct decodeKlass_not_null(mRegP dst, mRegN src) %{
12149 match(Set dst (DecodeNKlass src));
12150 format %{ "decode_heap_klass_not_null $dst,$src" %}
12151 ins_encode %{
12152 Register s = $src$$Register;
12153 Register d = $dst$$Register;
12154 if (s != d) {
12155 __ decode_klass_not_null(d, s);
12156 } else {
12157 __ decode_klass_not_null(d);
12158 }
12159 %}
12160 ins_pipe( ialu_regL_regL );
12161 %}
12163 //FIXME
12164 instruct tlsLoadP(mRegP dst) %{
12165 match(Set dst (ThreadLocal));
12167 ins_cost(0);
12168 format %{ " get_thread in $dst #@tlsLoadP" %}
12169 ins_encode %{
12170 Register dst = $dst$$Register;
12171 #ifdef OPT_THREAD
12172 __ move(dst, TREG);
12173 #else
12174 __ get_thread(dst);
12175 #endif
12176 %}
12178 ins_pipe( ialu_loadI );
12179 %}
12182 instruct checkCastPP( mRegP dst ) %{
12183 match(Set dst (CheckCastPP dst));
12185 format %{ "#checkcastPP of $dst (empty encoding) #@chekCastPP" %}
12186 ins_encode( /*empty encoding*/ );
12187 ins_pipe( empty );
12188 %}
12190 instruct castPP(mRegP dst)
12191 %{
12192 match(Set dst (CastPP dst));
12194 size(0);
12195 format %{ "# castPP of $dst" %}
12196 ins_encode(/* empty encoding */);
12197 ins_pipe(empty);
12198 %}
12200 instruct castII( mRegI dst ) %{
12201 match(Set dst (CastII dst));
12202 format %{ "#castII of $dst empty encoding" %}
12203 ins_encode( /*empty encoding*/ );
12204 ins_cost(0);
12205 ins_pipe( empty );
12206 %}
12208 // Return Instruction
12209 // Remove the return address & jump to it.
12210 instruct Ret() %{
12211 match(Return);
12212 format %{ "RET #@Ret" %}
12214 ins_encode %{
12215 __ jr(RA);
12216 __ nop();
12217 %}
12219 ins_pipe( pipe_jump );
12220 %}
12222 /*
12223 // For Loongson CPUs, jr seems too slow, so this rule shouldn't be imported.
12224 instruct jumpXtnd(mRegL switch_val) %{
12225 match(Jump switch_val);
12227 ins_cost(350);
12229 format %{ "load T9 <-- [$constanttablebase, $switch_val, $constantoffset] @ jumpXtnd\n\t"
12230 "jr T9\n\t"
12231 "nop" %}
12232 ins_encode %{
12233 Register table_base = $constanttablebase;
12234 int con_offset = $constantoffset;
12235 Register switch_reg = $switch_val$$Register;
12237 if (UseLoongsonISA) {
12238 if (Assembler::is_simm(con_offset, 8)) {
12239 __ gsldx(T9, table_base, switch_reg, con_offset);
12240 } else if (Assembler::is_simm16(con_offset)) {
12241 __ daddu(T9, table_base, switch_reg);
12242 __ ld(T9, T9, con_offset);
12243 } else {
12244 __ move(T9, con_offset);
12245 __ daddu(AT, table_base, switch_reg);
12246 __ gsldx(T9, AT, T9, 0);
12247 }
12248 } else {
12249 if (Assembler::is_simm16(con_offset)) {
12250 __ daddu(T9, table_base, switch_reg);
12251 __ ld(T9, T9, con_offset);
12252 } else {
12253 __ move(T9, con_offset);
12254 __ daddu(AT, table_base, switch_reg);
12255 __ daddu(AT, T9, AT);
12256 __ ld(T9, AT, 0);
12257 }
12258 }
12260 __ jr(T9);
12261 __ nop();
12263 %}
12264 ins_pipe(pipe_jump);
12265 %}
12266 */
12268 // Jump Direct - Label defines a relative address from JMP
12269 instruct jmpDir(label labl) %{
12270 match(Goto);
12271 effect(USE labl);
12273 ins_cost(300);
12274 format %{ "JMP $labl #@jmpDir" %}
12276 ins_encode %{
12277 Label &L = *($labl$$label);
12278 if(&L)
12279 __ b(L);
12280 else
12281 __ b(int(0));
12282 __ nop();
12283 %}
12285 ins_pipe( pipe_jump );
12286 ins_pc_relative(1);
12287 %}
12291 // Tail Jump; remove the return address; jump to target.
12292 // TailCall above leaves the return address around.
12293 // TailJump is used in only one place, the rethrow_Java stub (fancy_jump=2).
12294 // ex_oop (Exception Oop) is needed in %o0 at the jump. As there would be a
12295 // "restore" before this instruction (in Epilogue), we need to materialize it
12296 // in %i0.
12297 //FIXME
12298 instruct tailjmpInd(mRegP jump_target,mRegP ex_oop) %{
12299 match( TailJump jump_target ex_oop );
12300 ins_cost(200);
12301 format %{ "Jmp $jump_target ; ex_oop = $ex_oop #@tailjmpInd" %}
12302 ins_encode %{
12303 Register target = $jump_target$$Register;
12305 /* 2012/9/14 Jin: V0, V1 are indicated in:
12306 * [stubGenerator_mips.cpp] generate_forward_exception()
12307 * [runtime_mips.cpp] OptoRuntime::generate_exception_blob()
12308 */
12309 Register oop = $ex_oop$$Register;
12310 Register exception_oop = V0;
12311 Register exception_pc = V1;
12313 __ move(exception_pc, RA);
12314 __ move(exception_oop, oop);
12316 __ jr(target);
12317 __ nop();
12318 %}
12319 ins_pipe( pipe_jump );
12320 %}
12322 // ============================================================================
12323 // Procedure Call/Return Instructions
12324 // Call Java Static Instruction
12325 // Note: If this code changes, the corresponding ret_addr_offset() and
12326 // compute_padding() functions will have to be adjusted.
12327 instruct CallStaticJavaDirect(method meth) %{
12328 match(CallStaticJava);
12329 effect(USE meth);
12331 ins_cost(300);
12332 format %{ "CALL,static #@CallStaticJavaDirect " %}
12333 ins_encode( Java_Static_Call( meth ) );
12334 ins_pipe( pipe_slow );
12335 ins_pc_relative(1);
12336 %}
12338 // Call Java Dynamic Instruction
12339 // Note: If this code changes, the corresponding ret_addr_offset() and
12340 // compute_padding() functions will have to be adjusted.
12341 instruct CallDynamicJavaDirect(method meth) %{
12342 match(CallDynamicJava);
12343 effect(USE meth);
12345 ins_cost(300);
12346 format %{"MOV IC_Klass, (oop)-1\n\t"
12347 "CallDynamic @ CallDynamicJavaDirect" %}
12348 ins_encode( Java_Dynamic_Call( meth ) );
12349 ins_pipe( pipe_slow );
12350 ins_pc_relative(1);
12351 %}
12353 instruct CallLeafNoFPDirect(method meth) %{
12354 match(CallLeafNoFP);
12355 effect(USE meth);
12357 ins_cost(300);
12358 format %{ "CALL_LEAF_NOFP,runtime " %}
12359 ins_encode(Java_To_Runtime(meth));
12360 ins_pipe( pipe_slow );
12361 ins_pc_relative(1);
12362 ins_alignment(16);
12363 %}
12365 // Prefetch instructions.
12367 instruct prefetchrNTA( memory mem ) %{
12368 match(PrefetchRead mem);
12369 ins_cost(125);
12371 format %{ "pref $mem\t# Prefetch into non-temporal cache for read @ prefetchrNTA" %}
12372 ins_encode %{
12373 int base = $mem$$base;
12374 int index = $mem$$index;
12375 int scale = $mem$$scale;
12376 int disp = $mem$$disp;
12378 if( index != 0 ) {
12379 if (scale == 0) {
12380 __ daddu(AT, as_Register(base), as_Register(index));
12381 } else {
12382 __ dsll(AT, as_Register(index), scale);
12383 __ daddu(AT, as_Register(base), AT);
12384 }
12385 } else {
12386 __ move(AT, as_Register(base));
12387 }
12388 if( Assembler::is_simm16(disp) ) {
12389 __ daddiu(AT, as_Register(base), disp);
12390 __ daddiu(AT, AT, disp);
12391 } else {
12392 __ move(T9, disp);
12393 __ daddu(AT, as_Register(base), T9);
12394 }
12395 __ pref(0, AT, 0); //hint: 0:load
12396 %}
12397 ins_pipe(pipe_slow);
12398 %}
12400 instruct prefetchwNTA( memory mem ) %{
12401 match(PrefetchWrite mem);
12402 ins_cost(125);
12403 format %{ "pref $mem\t# Prefetch to non-temporal cache for write @ prefetchwNTA" %}
12404 ins_encode %{
12405 int base = $mem$$base;
12406 int index = $mem$$index;
12407 int scale = $mem$$scale;
12408 int disp = $mem$$disp;
12410 if( index != 0 ) {
12411 if (scale == 0) {
12412 __ daddu(AT, as_Register(base), as_Register(index));
12413 } else {
12414 __ dsll(AT, as_Register(index), scale);
12415 __ daddu(AT, as_Register(base), AT);
12416 }
12417 } else {
12418 __ move(AT, as_Register(base));
12419 }
12420 if( Assembler::is_simm16(disp) ) {
12421 __ daddiu(AT, as_Register(base), disp);
12422 __ daddiu(AT, AT, disp);
12423 } else {
12424 __ move(T9, disp);
12425 __ daddu(AT, as_Register(base), T9);
12426 }
12427 __ pref(1, AT, 0); //hint: 1:store
12428 %}
12429 ins_pipe(pipe_slow);
12430 %}
12432 // Prefetch instructions for allocation.
12434 instruct prefetchAllocNTA( memory mem ) %{
12435 match(PrefetchAllocation mem);
12436 ins_cost(125);
12437 format %{ "pref $mem\t# Prefetch allocation @ prefetchAllocNTA" %}
12438 ins_encode %{
12439 int base = $mem$$base;
12440 int index = $mem$$index;
12441 int scale = $mem$$scale;
12442 int disp = $mem$$disp;
12444 Register dst = R0;
12446 if( index != 0 ) {
12447 if( Assembler::is_simm16(disp) ) {
12448 if( UseLoongsonISA ) {
12449 if (scale == 0) {
12450 __ gslbx(dst, as_Register(base), as_Register(index), disp);
12451 } else {
12452 __ dsll(AT, as_Register(index), scale);
12453 __ gslbx(dst, as_Register(base), AT, disp);
12454 }
12455 } else {
12456 if (scale == 0) {
12457 __ addu(AT, as_Register(base), as_Register(index));
12458 } else {
12459 __ dsll(AT, as_Register(index), scale);
12460 __ addu(AT, as_Register(base), AT);
12461 }
12462 __ lb(dst, AT, disp);
12463 }
12464 } else {
12465 if (scale == 0) {
12466 __ addu(AT, as_Register(base), as_Register(index));
12467 } else {
12468 __ dsll(AT, as_Register(index), scale);
12469 __ addu(AT, as_Register(base), AT);
12470 }
12471 __ move(T9, disp);
12472 if( UseLoongsonISA ) {
12473 __ gslbx(dst, AT, T9, 0);
12474 } else {
12475 __ addu(AT, AT, T9);
12476 __ lb(dst, AT, 0);
12477 }
12478 }
12479 } else {
12480 if( Assembler::is_simm16(disp) ) {
12481 __ lb(dst, as_Register(base), disp);
12482 } else {
12483 __ move(T9, disp);
12484 if( UseLoongsonISA ) {
12485 __ gslbx(dst, as_Register(base), T9, 0);
12486 } else {
12487 __ addu(AT, as_Register(base), T9);
12488 __ lb(dst, AT, 0);
12489 }
12490 }
12491 }
12492 %}
12493 ins_pipe(pipe_slow);
12494 %}
12497 // Call runtime without safepoint
12498 instruct CallLeafDirect(method meth) %{
12499 match(CallLeaf);
12500 effect(USE meth);
12502 ins_cost(300);
12503 format %{ "CALL_LEAF,runtime #@CallLeafDirect " %}
12504 ins_encode(Java_To_Runtime(meth));
12505 ins_pipe( pipe_slow );
12506 ins_pc_relative(1);
12507 ins_alignment(16);
12508 %}
12510 // Load Char (16bit unsigned)
12511 instruct loadUS(mRegI dst, memory mem) %{
12512 match(Set dst (LoadUS mem));
12514 ins_cost(125);
12515 format %{ "loadUS $dst,$mem @ loadC" %}
12516 ins_encode(load_C_enc(dst, mem));
12517 ins_pipe( ialu_loadI );
12518 %}
12520 instruct loadUS_convI2L(mRegL dst, memory mem) %{
12521 match(Set dst (ConvI2L (LoadUS mem)));
12523 ins_cost(125);
12524 format %{ "loadUS $dst,$mem @ loadUS_convI2L" %}
12525 ins_encode(load_C_enc(dst, mem));
12526 ins_pipe( ialu_loadI );
12527 %}
12529 // Store Char (16bit unsigned)
12530 instruct storeC(memory mem, mRegI src) %{
12531 match(Set mem (StoreC mem src));
12533 ins_cost(125);
12534 format %{ "storeC $src, $mem @ storeC" %}
12535 ins_encode(store_C_reg_enc(mem, src));
12536 ins_pipe( ialu_loadI );
12537 %}
12539 instruct storeC0(memory mem, immI0 zero) %{
12540 match(Set mem (StoreC mem zero));
12542 ins_cost(125);
12543 format %{ "storeC $zero, $mem @ storeC0" %}
12544 ins_encode(store_C0_enc(mem));
12545 ins_pipe( ialu_loadI );
12546 %}
12549 instruct loadConF0(regF dst, immF0 zero) %{
12550 match(Set dst zero);
12551 ins_cost(100);
12553 format %{ "mov $dst, zero @ loadConF0\n"%}
12554 ins_encode %{
12555 FloatRegister dst = $dst$$FloatRegister;
12557 __ mtc1(R0, dst);
12558 %}
12559 ins_pipe( fpu_loadF );
12560 %}
12563 instruct loadConF(regF dst, immF src) %{
12564 match(Set dst src);
12565 ins_cost(125);
12567 format %{ "lwc1 $dst, $constantoffset[$constanttablebase] # load FLOAT $src from table @ loadConF" %}
12568 ins_encode %{
12569 int con_offset = $constantoffset($src);
12571 if (Assembler::is_simm16(con_offset)) {
12572 __ lwc1($dst$$FloatRegister, $constanttablebase, con_offset);
12573 } else {
12574 __ set64(AT, con_offset);
12575 if (UseLoongsonISA) {
12576 __ gslwxc1($dst$$FloatRegister, $constanttablebase, AT, 0);
12577 } else {
12578 __ daddu(AT, $constanttablebase, AT);
12579 __ lwc1($dst$$FloatRegister, AT, 0);
12580 }
12581 }
12582 %}
12583 ins_pipe( fpu_loadF );
12584 %}
12587 instruct loadConD0(regD dst, immD0 zero) %{
12588 match(Set dst zero);
12589 ins_cost(100);
12591 format %{ "mov $dst, zero @ loadConD0"%}
12592 ins_encode %{
12593 FloatRegister dst = as_FloatRegister($dst$$reg);
12595 __ dmtc1(R0, dst);
12596 %}
12597 ins_pipe( fpu_loadF );
12598 %}
12600 instruct loadConD(regD dst, immD src) %{
12601 match(Set dst src);
12602 ins_cost(125);
12604 format %{ "ldc1 $dst, $constantoffset[$constanttablebase] # load DOUBLE $src from table @ loadConD" %}
12605 ins_encode %{
12606 int con_offset = $constantoffset($src);
12608 if (Assembler::is_simm16(con_offset)) {
12609 __ ldc1($dst$$FloatRegister, $constanttablebase, con_offset);
12610 } else {
12611 __ set64(AT, con_offset);
12612 if (UseLoongsonISA) {
12613 __ gsldxc1($dst$$FloatRegister, $constanttablebase, AT, 0);
12614 } else {
12615 __ daddu(AT, $constanttablebase, AT);
12616 __ ldc1($dst$$FloatRegister, AT, 0);
12617 }
12618 }
12619 %}
12620 ins_pipe( fpu_loadF );
12621 %}
12623 // Store register Float value (it is faster than store from FPU register)
12624 instruct storeF_reg( memory mem, regF src) %{
12625 match(Set mem (StoreF mem src));
12627 ins_cost(50);
12628 format %{ "store $mem, $src\t# store float @ storeF_reg" %}
12629 ins_encode(store_F_reg_enc(mem, src));
12630 ins_pipe( fpu_storeF );
12631 %}
12633 instruct storeF_imm0( memory mem, immF0 zero) %{
12634 match(Set mem (StoreF mem zero));
12636 ins_cost(40);
12637 format %{ "store $mem, zero\t# store float @ storeF_imm0" %}
12638 ins_encode %{
12639 int base = $mem$$base;
12640 int index = $mem$$index;
12641 int scale = $mem$$scale;
12642 int disp = $mem$$disp;
12644 if( index != 0 ) {
12645 if ( UseLoongsonISA ) {
12646 if ( Assembler::is_simm(disp, 8) ) {
12647 if ( scale == 0 ) {
12648 __ gsswx(R0, as_Register(base), as_Register(index), disp);
12649 } else {
12650 __ dsll(T9, as_Register(index), scale);
12651 __ gsswx(R0, as_Register(base), T9, disp);
12652 }
12653 } else if ( Assembler::is_simm16(disp) ) {
12654 if ( scale == 0 ) {
12655 __ daddu(AT, as_Register(base), as_Register(index));
12656 } else {
12657 __ dsll(T9, as_Register(index), scale);
12658 __ daddu(AT, as_Register(base), T9);
12659 }
12660 __ sw(R0, AT, disp);
12661 } else {
12662 if ( scale == 0 ) {
12663 __ move(T9, disp);
12664 __ daddu(AT, as_Register(index), T9);
12665 __ gsswx(R0, as_Register(base), AT, 0);
12666 } else {
12667 __ dsll(T9, as_Register(index), scale);
12668 __ move(AT, disp);
12669 __ daddu(AT, AT, T9);
12670 __ gsswx(R0, as_Register(base), AT, 0);
12671 }
12672 }
12673 } else { //not use loongson isa
12674 if(scale != 0) {
12675 __ dsll(T9, as_Register(index), scale);
12676 __ daddu(AT, as_Register(base), T9);
12677 } else {
12678 __ daddu(AT, as_Register(base), as_Register(index));
12679 }
12680 if( Assembler::is_simm16(disp) ) {
12681 __ sw(R0, AT, disp);
12682 } else {
12683 __ move(T9, disp);
12684 __ daddu(AT, AT, T9);
12685 __ sw(R0, AT, 0);
12686 }
12687 }
12688 } else { //index is 0
12689 if ( UseLoongsonISA ) {
12690 if ( Assembler::is_simm16(disp) ) {
12691 __ sw(R0, as_Register(base), disp);
12692 } else {
12693 __ move(T9, disp);
12694 __ gsswx(R0, as_Register(base), T9, 0);
12695 }
12696 } else {
12697 if( Assembler::is_simm16(disp) ) {
12698 __ sw(R0, as_Register(base), disp);
12699 } else {
12700 __ move(T9, disp);
12701 __ daddu(AT, as_Register(base), T9);
12702 __ sw(R0, AT, 0);
12703 }
12704 }
12705 }
12706 %}
12707 ins_pipe( ialu_storeI );
12708 %}
12710 // Load Double
12711 instruct loadD(regD dst, memory mem) %{
12712 match(Set dst (LoadD mem));
12714 ins_cost(150);
12715 format %{ "loadD $dst, $mem #@loadD" %}
12716 ins_encode(load_D_enc(dst, mem));
12717 ins_pipe( ialu_loadI );
12718 %}
12720 // Load Double - UNaligned
12721 instruct loadD_unaligned(regD dst, memory mem ) %{
12722 match(Set dst (LoadD_unaligned mem));
12723 ins_cost(250);
12724 // FIXME: Jin: Need more effective ldl/ldr
12725 format %{ "loadD_unaligned $dst, $mem #@loadD_unaligned" %}
12726 ins_encode(load_D_enc(dst, mem));
12727 ins_pipe( ialu_loadI );
12728 %}
12730 instruct storeD_reg( memory mem, regD src) %{
12731 match(Set mem (StoreD mem src));
12733 ins_cost(50);
12734 format %{ "store $mem, $src\t# store float @ storeD_reg" %}
12735 ins_encode(store_D_reg_enc(mem, src));
12736 ins_pipe( fpu_storeF );
12737 %}
12739 instruct storeD_imm0( memory mem, immD0 zero) %{
12740 match(Set mem (StoreD mem zero));
12742 ins_cost(40);
12743 format %{ "store $mem, zero\t# store float @ storeD_imm0" %}
12744 ins_encode %{
12745 int base = $mem$$base;
12746 int index = $mem$$index;
12747 int scale = $mem$$scale;
12748 int disp = $mem$$disp;
12750 __ mtc1(R0, F30);
12751 __ cvt_d_w(F30, F30);
12753 if( index != 0 ) {
12754 if ( UseLoongsonISA ) {
12755 if ( Assembler::is_simm(disp, 8) ) {
12756 if (scale == 0) {
12757 __ gssdxc1(F30, as_Register(base), as_Register(index), disp);
12758 } else {
12759 __ dsll(T9, as_Register(index), scale);
12760 __ gssdxc1(F30, as_Register(base), T9, disp);
12761 }
12762 } else if ( Assembler::is_simm16(disp) ) {
12763 if (scale == 0) {
12764 __ daddu(AT, as_Register(base), as_Register(index));
12765 __ sdc1(F30, AT, disp);
12766 } else {
12767 __ dsll(T9, as_Register(index), scale);
12768 __ daddu(AT, as_Register(base), T9);
12769 __ sdc1(F30, AT, disp);
12770 }
12771 } else {
12772 if (scale == 0) {
12773 __ move(T9, disp);
12774 __ daddu(AT, as_Register(index), T9);
12775 __ gssdxc1(F30, as_Register(base), AT, 0);
12776 } else {
12777 __ move(T9, disp);
12778 __ dsll(AT, as_Register(index), scale);
12779 __ daddu(AT, AT, T9);
12780 __ gssdxc1(F30, as_Register(base), AT, 0);
12781 }
12782 }
12783 } else { // not use loongson isa
12784 if(scale != 0) {
12785 __ dsll(T9, as_Register(index), scale);
12786 __ daddu(AT, as_Register(base), T9);
12787 } else {
12788 __ daddu(AT, as_Register(base), as_Register(index));
12789 }
12790 if( Assembler::is_simm16(disp) ) {
12791 __ sdc1(F30, AT, disp);
12792 } else {
12793 __ move(T9, disp);
12794 __ daddu(AT, AT, T9);
12795 __ sdc1(F30, AT, 0);
12796 }
12797 }
12798 } else {// index is 0
12799 if ( UseLoongsonISA ) {
12800 if ( Assembler::is_simm16(disp) ) {
12801 __ sdc1(F30, as_Register(base), disp);
12802 } else {
12803 __ move(T9, disp);
12804 __ gssdxc1(F30, as_Register(base), T9, 0);
12805 }
12806 } else {
12807 if( Assembler::is_simm16(disp) ) {
12808 __ sdc1(F30, as_Register(base), disp);
12809 } else {
12810 __ move(T9, disp);
12811 __ daddu(AT, as_Register(base), T9);
12812 __ sdc1(F30, AT, 0);
12813 }
12814 }
12815 }
12816 %}
12817 ins_pipe( ialu_storeI );
12818 %}
12820 instruct loadSSI(mRegI dst, stackSlotI src)
12821 %{
12822 match(Set dst src);
12824 ins_cost(125);
12825 format %{ "lw $dst, $src\t# int stk @ loadSSI" %}
12826 ins_encode %{
12827 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSI) !");
12828 __ lw($dst$$Register, SP, $src$$disp);
12829 %}
12830 ins_pipe(ialu_loadI);
12831 %}
12833 instruct storeSSI(stackSlotI dst, mRegI src)
12834 %{
12835 match(Set dst src);
12837 ins_cost(100);
12838 format %{ "sw $dst, $src\t# int stk @ storeSSI" %}
12839 ins_encode %{
12840 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSI) !");
12841 __ sw($src$$Register, SP, $dst$$disp);
12842 %}
12843 ins_pipe(ialu_storeI);
12844 %}
12846 instruct loadSSL(mRegL dst, stackSlotL src)
12847 %{
12848 match(Set dst src);
12850 ins_cost(125);
12851 format %{ "ld $dst, $src\t# long stk @ loadSSL" %}
12852 ins_encode %{
12853 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSL) !");
12854 __ ld($dst$$Register, SP, $src$$disp);
12855 %}
12856 ins_pipe(ialu_loadI);
12857 %}
12859 instruct storeSSL(stackSlotL dst, mRegL src)
12860 %{
12861 match(Set dst src);
12863 ins_cost(100);
12864 format %{ "sd $dst, $src\t# long stk @ storeSSL" %}
12865 ins_encode %{
12866 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSL) !");
12867 __ sd($src$$Register, SP, $dst$$disp);
12868 %}
12869 ins_pipe(ialu_storeI);
12870 %}
12872 instruct loadSSP(mRegP dst, stackSlotP src)
12873 %{
12874 match(Set dst src);
12876 ins_cost(125);
12877 format %{ "ld $dst, $src\t# ptr stk @ loadSSP" %}
12878 ins_encode %{
12879 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSP) !");
12880 __ ld($dst$$Register, SP, $src$$disp);
12881 %}
12882 ins_pipe(ialu_loadI);
12883 %}
12885 instruct storeSSP(stackSlotP dst, mRegP src)
12886 %{
12887 match(Set dst src);
12889 ins_cost(100);
12890 format %{ "sd $dst, $src\t# ptr stk @ storeSSP" %}
12891 ins_encode %{
12892 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSP) !");
12893 __ sd($src$$Register, SP, $dst$$disp);
12894 %}
12895 ins_pipe(ialu_storeI);
12896 %}
12898 instruct loadSSF(regF dst, stackSlotF src)
12899 %{
12900 match(Set dst src);
12902 ins_cost(125);
12903 format %{ "lwc1 $dst, $src\t# float stk @ loadSSF" %}
12904 ins_encode %{
12905 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSF) !");
12906 __ lwc1($dst$$FloatRegister, SP, $src$$disp);
12907 %}
12908 ins_pipe(ialu_loadI);
12909 %}
12911 instruct storeSSF(stackSlotF dst, regF src)
12912 %{
12913 match(Set dst src);
12915 ins_cost(100);
12916 format %{ "swc1 $dst, $src\t# float stk @ storeSSF" %}
12917 ins_encode %{
12918 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSF) !");
12919 __ swc1($src$$FloatRegister, SP, $dst$$disp);
12920 %}
12921 ins_pipe(fpu_storeF);
12922 %}
12924 // Use the same format since predicate() can not be used here.
12925 instruct loadSSD(regD dst, stackSlotD src)
12926 %{
12927 match(Set dst src);
12929 ins_cost(125);
12930 format %{ "ldc1 $dst, $src\t# double stk @ loadSSD" %}
12931 ins_encode %{
12932 guarantee( Assembler::is_simm16($src$$disp), "disp too long (loadSSD) !");
12933 __ ldc1($dst$$FloatRegister, SP, $src$$disp);
12934 %}
12935 ins_pipe(ialu_loadI);
12936 %}
12938 instruct storeSSD(stackSlotD dst, regD src)
12939 %{
12940 match(Set dst src);
12942 ins_cost(100);
12943 format %{ "sdc1 $dst, $src\t# double stk @ storeSSD" %}
12944 ins_encode %{
12945 guarantee( Assembler::is_simm16($dst$$disp), "disp too long (storeSSD) !");
12946 __ sdc1($src$$FloatRegister, SP, $dst$$disp);
12947 %}
12948 ins_pipe(fpu_storeF);
12949 %}
12951 instruct cmpFastLock( FlagsReg cr, mRegP object, s0_RegP box, mRegI tmp, mRegP scr) %{
12952 match( Set cr (FastLock object box) );
12953 effect( TEMP tmp, TEMP scr, USE_KILL box );
12954 ins_cost(300);
12955 format %{ "FASTLOCK $cr $object, $box, $tmp #@ cmpFastLock" %}
12956 ins_encode %{
12957 __ fast_lock($object$$Register, $box$$Register, $tmp$$Register, $scr$$Register);
12958 %}
12960 ins_pipe( pipe_slow );
12961 ins_pc_relative(1);
12962 %}
12964 instruct cmpFastUnlock( FlagsReg cr, mRegP object, s0_RegP box, mRegP tmp ) %{
12965 match( Set cr (FastUnlock object box) );
12966 effect( TEMP tmp, USE_KILL box );
12967 ins_cost(300);
12968 format %{ "FASTUNLOCK $object, $box, $tmp #@cmpFastUnlock" %}
12969 ins_encode %{
12970 __ fast_unlock($object$$Register, $box$$Register, $tmp$$Register);
12971 %}
12973 ins_pipe( pipe_slow );
12974 ins_pc_relative(1);
12975 %}
12977 // Store CMS card-mark Immediate
12978 instruct storeImmCM(memory mem, immI8 src) %{
12979 match(Set mem (StoreCM mem src));
12981 ins_cost(150);
12982 format %{ "MOV8 $mem,$src\t! CMS card-mark imm0" %}
12983 // opcode(0xC6);
12984 ins_encode(store_B_immI_enc_sync(mem, src));
12985 ins_pipe( ialu_storeI );
12986 %}
12988 // Die now
12989 instruct ShouldNotReachHere( )
12990 %{
12991 match(Halt);
12992 ins_cost(300);
12994 // Use the following format syntax
12995 format %{ "ILLTRAP ;#@ShouldNotReachHere" %}
12996 ins_encode %{
12997 // Here we should emit illtrap !
12999 __ stop("in ShoudNotReachHere");
13001 %}
13002 ins_pipe( pipe_jump );
13003 %}
13005 instruct leaP8Narrow(mRegP dst, indOffset8Narrow mem)
13006 %{
13007 predicate(Universe::narrow_oop_shift() == 0);
13008 match(Set dst mem);
13010 ins_cost(110);
13011 format %{ "leaq $dst, $mem\t# ptr off8narrow @ leaP8Narrow" %}
13012 ins_encode %{
13013 Register dst = $dst$$Register;
13014 Register base = as_Register($mem$$base);
13015 int disp = $mem$$disp;
13017 __ daddiu(dst, base, disp);
13018 %}
13019 ins_pipe( ialu_regI_imm16 );
13020 %}
13022 instruct leaPPosIdxScaleOff8(mRegP dst, basePosIndexScaleOffset8 mem)
13023 %{
13024 match(Set dst mem);
13026 ins_cost(110);
13027 format %{ "leaq $dst, $mem\t# @ PosIdxScaleOff8" %}
13028 ins_encode %{
13029 Register dst = $dst$$Register;
13030 Register base = as_Register($mem$$base);
13031 Register index = as_Register($mem$$index);
13032 int scale = $mem$$scale;
13033 int disp = $mem$$disp;
13035 if (scale == 0) {
13036 __ daddu(AT, base, index);
13037 __ daddiu(dst, AT, disp);
13038 } else {
13039 __ dsll(AT, index, scale);
13040 __ daddu(AT, base, AT);
13041 __ daddiu(dst, AT, disp);
13042 }
13043 %}
13045 ins_pipe( ialu_regI_imm16 );
13046 %}
13048 instruct leaPIdxScale(mRegP dst, indIndexScale mem)
13049 %{
13050 match(Set dst mem);
13052 ins_cost(110);
13053 format %{ "leaq $dst, $mem\t# @ leaPIdxScale" %}
13054 ins_encode %{
13055 Register dst = $dst$$Register;
13056 Register base = as_Register($mem$$base);
13057 Register index = as_Register($mem$$index);
13058 int scale = $mem$$scale;
13060 if (scale == 0) {
13061 __ daddu(dst, base, index);
13062 } else {
13063 __ dsll(AT, index, scale);
13064 __ daddu(dst, base, AT);
13065 }
13066 %}
13068 ins_pipe( ialu_regI_imm16 );
13069 %}
13071 // Jump Direct Conditional - Label defines a relative address from Jcc+1
13072 instruct jmpLoopEnd(cmpOp cop, mRegI src1, mRegI src2, label labl) %{
13073 match(CountedLoopEnd cop (CmpI src1 src2));
13074 effect(USE labl);
13076 ins_cost(300);
13077 format %{ "J$cop $src1, $src2, $labl\t# Loop end @ jmpLoopEnd" %}
13078 ins_encode %{
13079 Register op1 = $src1$$Register;
13080 Register op2 = $src2$$Register;
13081 Label &L = *($labl$$label);
13082 int flag = $cop$$cmpcode;
13084 switch(flag)
13085 {
13086 case 0x01: //equal
13087 if (&L)
13088 __ beq(op1, op2, L);
13089 else
13090 __ beq(op1, op2, (int)0);
13091 break;
13092 case 0x02: //not_equal
13093 if (&L)
13094 __ bne(op1, op2, L);
13095 else
13096 __ bne(op1, op2, (int)0);
13097 break;
13098 case 0x03: //above
13099 __ slt(AT, op2, op1);
13100 if(&L)
13101 __ bne(AT, R0, L);
13102 else
13103 __ bne(AT, R0, (int)0);
13104 break;
13105 case 0x04: //above_equal
13106 __ slt(AT, op1, op2);
13107 if(&L)
13108 __ beq(AT, R0, L);
13109 else
13110 __ beq(AT, R0, (int)0);
13111 break;
13112 case 0x05: //below
13113 __ slt(AT, op1, op2);
13114 if(&L)
13115 __ bne(AT, R0, L);
13116 else
13117 __ bne(AT, R0, (int)0);
13118 break;
13119 case 0x06: //below_equal
13120 __ slt(AT, op2, op1);
13121 if(&L)
13122 __ beq(AT, R0, L);
13123 else
13124 __ beq(AT, R0, (int)0);
13125 break;
13126 default:
13127 Unimplemented();
13128 }
13129 __ nop();
13130 %}
13131 ins_pipe( pipe_jump );
13132 ins_pc_relative(1);
13133 %}
13136 instruct jmpLoopEnd_reg_imm16_sub(cmpOp cop, mRegI src1, immI16_sub src2, label labl) %{
13137 match(CountedLoopEnd cop (CmpI src1 src2));
13138 effect(USE labl);
13140 ins_cost(250);
13141 format %{ "J$cop $src1, $src2, $labl\t# Loop end @ jmpLoopEnd_reg_imm16_sub" %}
13142 ins_encode %{
13143 Register op1 = $src1$$Register;
13144 int op2 = $src2$$constant;
13145 Label &L = *($labl$$label);
13146 int flag = $cop$$cmpcode;
13148 __ addiu32(AT, op1, -1 * op2);
13150 switch(flag)
13151 {
13152 case 0x01: //equal
13153 if (&L)
13154 __ beq(AT, R0, L);
13155 else
13156 __ beq(AT, R0, (int)0);
13157 break;
13158 case 0x02: //not_equal
13159 if (&L)
13160 __ bne(AT, R0, L);
13161 else
13162 __ bne(AT, R0, (int)0);
13163 break;
13164 case 0x03: //above
13165 if(&L)
13166 __ bgtz(AT, L);
13167 else
13168 __ bgtz(AT, (int)0);
13169 break;
13170 case 0x04: //above_equal
13171 if(&L)
13172 __ bgez(AT, L);
13173 else
13174 __ bgez(AT,(int)0);
13175 break;
13176 case 0x05: //below
13177 if(&L)
13178 __ bltz(AT, L);
13179 else
13180 __ bltz(AT, (int)0);
13181 break;
13182 case 0x06: //below_equal
13183 if(&L)
13184 __ blez(AT, L);
13185 else
13186 __ blez(AT, (int)0);
13187 break;
13188 default:
13189 Unimplemented();
13190 }
13191 __ nop();
13192 %}
13193 ins_pipe( pipe_jump );
13194 ins_pc_relative(1);
13195 %}
13198 /*
13199 // Jump Direct Conditional - Label defines a relative address from Jcc+1
13200 instruct jmpLoopEndU(cmpOpU cop, eFlagsRegU cmp, label labl) %{
13201 match(CountedLoopEnd cop cmp);
13202 effect(USE labl);
13204 ins_cost(300);
13205 format %{ "J$cop,u $labl\t# Loop end" %}
13206 size(6);
13207 opcode(0x0F, 0x80);
13208 ins_encode( Jcc( cop, labl) );
13209 ins_pipe( pipe_jump );
13210 ins_pc_relative(1);
13211 %}
13213 instruct jmpLoopEndUCF(cmpOpUCF cop, eFlagsRegUCF cmp, label labl) %{
13214 match(CountedLoopEnd cop cmp);
13215 effect(USE labl);
13217 ins_cost(200);
13218 format %{ "J$cop,u $labl\t# Loop end" %}
13219 opcode(0x0F, 0x80);
13220 ins_encode( Jcc( cop, labl) );
13221 ins_pipe( pipe_jump );
13222 ins_pc_relative(1);
13223 %}
13224 */
13226 // This match pattern is created for StoreIConditional since I cannot match IfNode without a RegFlags! fujie 2012/07/17
13227 instruct jmpCon_flags(cmpOp cop, FlagsReg cr, label labl) %{
13228 match(If cop cr);
13229 effect(USE labl);
13231 ins_cost(300);
13232 format %{ "J$cop $labl #mips uses AT as eflag @jmpCon_flags" %}
13234 ins_encode %{
13235 Label &L = *($labl$$label);
13236 switch($cop$$cmpcode)
13237 {
13238 case 0x01: //equal
13239 if (&L)
13240 __ bne(AT, R0, L);
13241 else
13242 __ bne(AT, R0, (int)0);
13243 break;
13244 case 0x02: //not equal
13245 if (&L)
13246 __ beq(AT, R0, L);
13247 else
13248 __ beq(AT, R0, (int)0);
13249 break;
13250 default:
13251 Unimplemented();
13252 }
13253 __ nop();
13254 %}
13256 ins_pipe( pipe_jump );
13257 ins_pc_relative(1);
13258 %}
13261 // ============================================================================
13262 // The 2nd slow-half of a subtype check. Scan the subklass's 2ndary superklass
13263 // array for an instance of the superklass. Set a hidden internal cache on a
13264 // hit (cache is checked with exposed code in gen_subtype_check()). Return
13265 // NZ for a miss or zero for a hit. The encoding ALSO sets flags.
13266 instruct partialSubtypeCheck( mRegP result, no_T8_mRegP sub, no_T8_mRegP super, mT8RegI tmp ) %{
13267 match(Set result (PartialSubtypeCheck sub super));
13268 effect(KILL tmp);
13269 ins_cost(1100); // slightly larger than the next version
13270 format %{ "partialSubtypeCheck result=$result, sub=$sub, super=$super, tmp=$tmp " %}
13272 ins_encode( enc_PartialSubtypeCheck(result, sub, super, tmp) );
13273 ins_pipe( pipe_slow );
13274 %}
13277 // Conditional-store of an int value.
13278 // ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG on Intel.
13279 instruct storeIConditional( memory mem, mRegI oldval, mRegI newval, FlagsReg cr ) %{
13280 match(Set cr (StoreIConditional mem (Binary oldval newval)));
13281 // effect(KILL oldval);
13282 format %{ "CMPXCHG $newval, $mem, $oldval \t# @storeIConditional" %}
13284 ins_encode %{
13285 Register oldval = $oldval$$Register;
13286 Register newval = $newval$$Register;
13287 Address addr(as_Register($mem$$base), $mem$$disp);
13288 Label again, failure;
13290 // int base = $mem$$base;
13291 int index = $mem$$index;
13292 int scale = $mem$$scale;
13293 int disp = $mem$$disp;
13295 guarantee(Assembler::is_simm16(disp), "");
13297 if( index != 0 ) {
13298 __ stop("in storeIConditional: index != 0");
13299 } else {
13300 __ bind(again);
13301 if(!Use3A2000) __ sync();
13302 __ ll(AT, addr);
13303 __ bne(AT, oldval, failure);
13304 __ delayed()->addu(AT, R0, R0);
13306 __ addu(AT, newval, R0);
13307 __ sc(AT, addr);
13308 __ beq(AT, R0, again);
13309 __ delayed()->addiu(AT, R0, 0xFF);
13310 __ bind(failure);
13311 __ sync();
13312 }
13313 %}
13315 ins_pipe( long_memory_op );
13316 %}
13318 // Conditional-store of a long value.
13319 // ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG.
13320 instruct storeLConditional(memory mem, t2RegL oldval, mRegL newval, FlagsReg cr )
13321 %{
13322 match(Set cr (StoreLConditional mem (Binary oldval newval)));
13323 effect(KILL oldval);
13325 format %{ "cmpxchg $mem, $newval\t# If $oldval == $mem then store $newval into $mem" %}
13326 ins_encode%{
13327 Register oldval = $oldval$$Register;
13328 Register newval = $newval$$Register;
13329 Address addr((Register)$mem$$base, $mem$$disp);
13331 int index = $mem$$index;
13332 int scale = $mem$$scale;
13333 int disp = $mem$$disp;
13335 guarantee(Assembler::is_simm16(disp), "");
13337 if( index != 0 ) {
13338 __ stop("in storeIConditional: index != 0");
13339 } else {
13340 __ cmpxchg(newval, addr, oldval);
13341 }
13342 %}
13343 ins_pipe( long_memory_op );
13344 %}
13347 instruct compareAndSwapI( mRegI res, mRegP mem_ptr, mS2RegI oldval, mRegI newval) %{
13348 match(Set res (CompareAndSwapI mem_ptr (Binary oldval newval)));
13349 effect(KILL oldval);
13350 // match(CompareAndSwapI mem_ptr (Binary oldval newval));
13351 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapI\n\t"
13352 "MOV $res, 1 @ compareAndSwapI\n\t"
13353 "BNE AT, R0 @ compareAndSwapI\n\t"
13354 "MOV $res, 0 @ compareAndSwapI\n"
13355 "L:" %}
13356 ins_encode %{
13357 Register newval = $newval$$Register;
13358 Register oldval = $oldval$$Register;
13359 Register res = $res$$Register;
13360 Address addr($mem_ptr$$Register, 0);
13361 Label L;
13363 __ cmpxchg32(newval, addr, oldval);
13364 __ move(res, AT);
13365 %}
13366 ins_pipe( long_memory_op );
13367 %}
13369 //FIXME:
13370 instruct compareAndSwapP( mRegI res, mRegP mem_ptr, s2_RegP oldval, mRegP newval) %{
13371 match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval)));
13372 effect(KILL oldval);
13373 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapP\n\t"
13374 "MOV $res, AT @ compareAndSwapP\n\t"
13375 "L:" %}
13376 ins_encode %{
13377 Register newval = $newval$$Register;
13378 Register oldval = $oldval$$Register;
13379 Register res = $res$$Register;
13380 Address addr($mem_ptr$$Register, 0);
13381 Label L;
13383 __ cmpxchg(newval, addr, oldval);
13384 __ move(res, AT);
13385 %}
13386 ins_pipe( long_memory_op );
13387 %}
13389 instruct compareAndSwapN( mRegI res, mRegP mem_ptr, t2_RegN oldval, mRegN newval) %{
13390 match(Set res (CompareAndSwapN mem_ptr (Binary oldval newval)));
13391 effect(KILL oldval);
13392 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapN\n\t"
13393 "MOV $res, AT @ compareAndSwapN\n\t"
13394 "L:" %}
13395 ins_encode %{
13396 Register newval = $newval$$Register;
13397 Register oldval = $oldval$$Register;
13398 Register res = $res$$Register;
13399 Address addr($mem_ptr$$Register, 0);
13400 Label L;
13402 /* 2013/7/19 Jin: cmpxchg32 is implemented with ll/sc, which will do sign extension.
13403 * Thus, we should extend oldval's sign for correct comparision.
13404 */
13405 __ sll(oldval, oldval, 0);
13407 __ cmpxchg32(newval, addr, oldval);
13408 __ move(res, AT);
13409 %}
13410 ins_pipe( long_memory_op );
13411 %}
13413 //----------Max and Min--------------------------------------------------------
13414 // Min Instructions
13415 ////
13416 // *** Min and Max using the conditional move are slower than the
13417 // *** branch version on a Pentium III.
13418 // // Conditional move for min
13419 //instruct cmovI_reg_lt( eRegI op2, eRegI op1, eFlagsReg cr ) %{
13420 // effect( USE_DEF op2, USE op1, USE cr );
13421 // format %{ "CMOVlt $op2,$op1\t! min" %}
13422 // opcode(0x4C,0x0F);
13423 // ins_encode( OpcS, OpcP, RegReg( op2, op1 ) );
13424 // ins_pipe( pipe_cmov_reg );
13425 //%}
13426 //
13427 //// Min Register with Register (P6 version)
13428 //instruct minI_eReg_p6( eRegI op1, eRegI op2 ) %{
13429 // predicate(VM_Version::supports_cmov() );
13430 // match(Set op2 (MinI op1 op2));
13431 // ins_cost(200);
13432 // expand %{
13433 // eFlagsReg cr;
13434 // compI_eReg(cr,op1,op2);
13435 // cmovI_reg_lt(op2,op1,cr);
13436 // %}
13437 //%}
13439 // Min Register with Register (generic version)
13440 instruct minI_Reg_Reg(mRegI dst, mRegI src) %{
13441 match(Set dst (MinI dst src));
13442 //effect(KILL flags);
13443 ins_cost(80);
13445 format %{ "MIN $dst, $src @minI_Reg_Reg" %}
13446 ins_encode %{
13447 Register dst = $dst$$Register;
13448 Register src = $src$$Register;
13450 __ slt(AT, src, dst);
13451 __ movn(dst, src, AT);
13453 %}
13455 ins_pipe( pipe_slow );
13456 %}
13458 // Max Register with Register
13459 // *** Min and Max using the conditional move are slower than the
13460 // *** branch version on a Pentium III.
13461 // // Conditional move for max
13462 //instruct cmovI_reg_gt( eRegI op2, eRegI op1, eFlagsReg cr ) %{
13463 // effect( USE_DEF op2, USE op1, USE cr );
13464 // format %{ "CMOVgt $op2,$op1\t! max" %}
13465 // opcode(0x4F,0x0F);
13466 // ins_encode( OpcS, OpcP, RegReg( op2, op1 ) );
13467 // ins_pipe( pipe_cmov_reg );
13468 //%}
13469 //
13470 // // Max Register with Register (P6 version)
13471 //instruct maxI_eReg_p6( eRegI op1, eRegI op2 ) %{
13472 // predicate(VM_Version::supports_cmov() );
13473 // match(Set op2 (MaxI op1 op2));
13474 // ins_cost(200);
13475 // expand %{
13476 // eFlagsReg cr;
13477 // compI_eReg(cr,op1,op2);
13478 // cmovI_reg_gt(op2,op1,cr);
13479 // %}
13480 //%}
13482 // Max Register with Register (generic version)
13483 instruct maxI_Reg_Reg(mRegI dst, mRegI src) %{
13484 match(Set dst (MaxI dst src));
13485 ins_cost(80);
13487 format %{ "MAX $dst, $src @maxI_Reg_Reg" %}
13489 ins_encode %{
13490 Register dst = $dst$$Register;
13491 Register src = $src$$Register;
13493 __ slt(AT, dst, src);
13494 __ movn(dst, src, AT);
13496 %}
13498 ins_pipe( pipe_slow );
13499 %}
13501 instruct maxI_Reg_zero(mRegI dst, immI0 zero) %{
13502 match(Set dst (MaxI dst zero));
13503 ins_cost(50);
13505 format %{ "MAX $dst, 0 @maxI_Reg_zero" %}
13507 ins_encode %{
13508 Register dst = $dst$$Register;
13510 __ slt(AT, dst, R0);
13511 __ movn(dst, R0, AT);
13513 %}
13515 ins_pipe( pipe_slow );
13516 %}
13518 instruct zerox_long_reg_reg(mRegL dst, mRegL src, immL_32bits mask)
13519 %{
13520 match(Set dst (AndL src mask));
13522 format %{ "movl $dst, $src\t# zero-extend long @ zerox_long_reg_reg" %}
13523 ins_encode %{
13524 Register dst = $dst$$Register;
13525 Register src = $src$$Register;
13527 __ dext(dst, src, 0, 32);
13528 %}
13529 ins_pipe(ialu_regI_regI);
13530 %}
13532 instruct combine_i2l(mRegL dst, mRegI src1, immL_32bits mask, mRegI src2, immI_32 shift32)
13533 %{
13534 match(Set dst (OrL (AndL (ConvI2L src1) mask) (LShiftL (ConvI2L src2) shift32)));
13536 format %{ "combine_i2l $dst, $src2(H), $src1(L) @ combine_i2l" %}
13537 ins_encode %{
13538 Register dst = $dst$$Register;
13539 Register src1 = $src1$$Register;
13540 Register src2 = $src2$$Register;
13542 if (src1 == dst) {
13543 __ dinsu(dst, src2, 32, 32);
13544 } else if (src2 == dst) {
13545 __ dsll32(dst, dst, 0);
13546 __ dins(dst, src1, 0, 32);
13547 } else {
13548 __ dext(dst, src1, 0, 32);
13549 __ dinsu(dst, src2, 32, 32);
13550 }
13551 %}
13552 ins_pipe(ialu_regI_regI);
13553 %}
13555 // Zero-extend convert int to long
13556 instruct convI2L_reg_reg_zex(mRegL dst, mRegI src, immL_32bits mask)
13557 %{
13558 match(Set dst (AndL (ConvI2L src) mask));
13560 format %{ "movl $dst, $src\t# i2l zero-extend @ convI2L_reg_reg_zex" %}
13561 ins_encode %{
13562 Register dst = $dst$$Register;
13563 Register src = $src$$Register;
13565 __ dext(dst, src, 0, 32);
13566 %}
13567 ins_pipe(ialu_regI_regI);
13568 %}
13570 instruct convL2I2L_reg_reg_zex(mRegL dst, mRegL src, immL_32bits mask)
13571 %{
13572 match(Set dst (AndL (ConvI2L (ConvL2I src)) mask));
13574 format %{ "movl $dst, $src\t# i2l zero-extend @ convL2I2L_reg_reg_zex" %}
13575 ins_encode %{
13576 Register dst = $dst$$Register;
13577 Register src = $src$$Register;
13579 __ dext(dst, src, 0, 32);
13580 %}
13581 ins_pipe(ialu_regI_regI);
13582 %}
13584 // Match loading integer and casting it to unsigned int in long register.
13585 // LoadI + ConvI2L + AndL 0xffffffff.
13586 instruct loadUI2L_rmask(mRegL dst, memory mem, immL_32bits mask) %{
13587 match(Set dst (AndL (ConvI2L (LoadI mem)) mask));
13589 format %{ "lwu $dst, $mem \t// zero-extend to long @ loadUI2L_rmask" %}
13590 ins_encode (load_N_enc(dst, mem));
13591 ins_pipe(ialu_loadI);
13592 %}
13594 instruct loadUI2L_lmask(mRegL dst, memory mem, immL_32bits mask) %{
13595 match(Set dst (AndL mask (ConvI2L (LoadI mem))));
13597 format %{ "lwu $dst, $mem \t// zero-extend to long @ loadUI2L_lmask" %}
13598 ins_encode (load_N_enc(dst, mem));
13599 ins_pipe(ialu_loadI);
13600 %}
13603 // ============================================================================
13604 // Safepoint Instruction
13605 instruct safePoint_poll_reg(mRegP poll) %{
13606 match(SafePoint poll);
13607 predicate(false);
13608 effect(USE poll);
13610 ins_cost(125);
13611 format %{ "Safepoint @ [$poll] : poll for GC @ safePoint_poll_reg" %}
13613 ins_encode %{
13614 Register poll_reg = $poll$$Register;
13616 __ block_comment("Safepoint:");
13617 __ relocate(relocInfo::poll_type);
13618 __ lw(AT, poll_reg, 0);
13619 %}
13621 ins_pipe( ialu_storeI );
13622 %}
13624 instruct safePoint_poll() %{
13625 match(SafePoint);
13627 ins_cost(105);
13628 format %{ "poll for GC @ safePoint_poll" %}
13630 ins_encode %{
13631 __ block_comment("Safepoint:");
13632 __ set64(T9, (long)os::get_polling_page());
13633 __ relocate(relocInfo::poll_type);
13634 __ lw(AT, T9, 0);
13635 %}
13637 ins_pipe( ialu_storeI );
13638 %}
13640 //----------Arithmetic Conversion Instructions---------------------------------
13642 instruct roundFloat_nop(regF dst)
13643 %{
13644 match(Set dst (RoundFloat dst));
13646 ins_cost(0);
13647 ins_encode();
13648 ins_pipe(empty);
13649 %}
13651 instruct roundDouble_nop(regD dst)
13652 %{
13653 match(Set dst (RoundDouble dst));
13655 ins_cost(0);
13656 ins_encode();
13657 ins_pipe(empty);
13658 %}
13660 //---------- Zeros Count Instructions ------------------------------------------
13661 // CountLeadingZerosINode CountTrailingZerosINode
13662 instruct countLeadingZerosI(mRegI dst, mRegI src) %{
13663 predicate(UseCountLeadingZerosInstruction);
13664 match(Set dst (CountLeadingZerosI src));
13666 format %{ "clz $dst, $src\t# count leading zeros (int)" %}
13667 ins_encode %{
13668 __ clz($dst$$Register, $src$$Register);
13669 %}
13670 ins_pipe( ialu_regL_regL );
13671 %}
13673 instruct countLeadingZerosL(mRegI dst, mRegL src) %{
13674 predicate(UseCountLeadingZerosInstruction);
13675 match(Set dst (CountLeadingZerosL src));
13677 format %{ "dclz $dst, $src\t# count leading zeros (long)" %}
13678 ins_encode %{
13679 __ dclz($dst$$Register, $src$$Register);
13680 %}
13681 ins_pipe( ialu_regL_regL );
13682 %}
13684 instruct countTrailingZerosI(mRegI dst, mRegI src) %{
13685 predicate(UseCountTrailingZerosInstruction);
13686 match(Set dst (CountTrailingZerosI src));
13688 format %{ "ctz $dst, $src\t# count trailing zeros (int)" %}
13689 ins_encode %{
13690 // ctz and dctz is gs instructions.
13691 __ ctz($dst$$Register, $src$$Register);
13692 %}
13693 ins_pipe( ialu_regL_regL );
13694 %}
13696 instruct countTrailingZerosL(mRegI dst, mRegL src) %{
13697 predicate(UseCountTrailingZerosInstruction);
13698 match(Set dst (CountTrailingZerosL src));
13700 format %{ "dcto $dst, $src\t# count trailing zeros (long)" %}
13701 ins_encode %{
13702 __ dctz($dst$$Register, $src$$Register);
13703 %}
13704 ins_pipe( ialu_regL_regL );
13705 %}
13707 // ====================VECTOR INSTRUCTIONS=====================================
13709 // Load vectors (8 bytes long)
13710 instruct loadV8(vecD dst, memory mem) %{
13711 predicate(n->as_LoadVector()->memory_size() == 8);
13712 match(Set dst (LoadVector mem));
13713 ins_cost(125);
13714 format %{ "load $dst, $mem\t! load vector (8 bytes)" %}
13715 ins_encode(load_D_enc(dst, mem));
13716 ins_pipe( fpu_loadF );
13717 %}
13719 // Store vectors (8 bytes long)
13720 instruct storeV8(memory mem, vecD src) %{
13721 predicate(n->as_StoreVector()->memory_size() == 8);
13722 match(Set mem (StoreVector mem src));
13723 ins_cost(145);
13724 format %{ "store $mem, $src\t! store vector (8 bytes)" %}
13725 ins_encode(store_D_reg_enc(mem, src));
13726 ins_pipe( fpu_storeF );
13727 %}
13729 instruct Repl8B_DSP(vecD dst, mRegI src) %{
13730 predicate(n->as_Vector()->length() == 8 && Use3A2000);
13731 match(Set dst (ReplicateB src));
13732 ins_cost(100);
13733 format %{ "replv_ob AT, $src\n\t"
13734 "dmtc1 AT, $dst\t! replicate8B" %}
13735 ins_encode %{
13736 __ replv_ob(AT, $src$$Register);
13737 __ dmtc1(AT, $dst$$FloatRegister);
13738 %}
13739 ins_pipe( pipe_mtc1 );
13740 %}
13742 instruct Repl8B(vecD dst, mRegI src) %{
13743 predicate(n->as_Vector()->length() == 8);
13744 match(Set dst (ReplicateB src));
13745 ins_cost(140);
13746 format %{ "move AT, $src\n\t"
13747 "dins AT, AT, 8, 8\n\t"
13748 "dins AT, AT, 16, 16\n\t"
13749 "dinsu AT, AT, 32, 32\n\t"
13750 "dmtc1 AT, $dst\t! replicate8B" %}
13751 ins_encode %{
13752 __ move(AT, $src$$Register);
13753 __ dins(AT, AT, 8, 8);
13754 __ dins(AT, AT, 16, 16);
13755 __ dinsu(AT, AT, 32, 32);
13756 __ dmtc1(AT, $dst$$FloatRegister);
13757 %}
13758 ins_pipe( pipe_mtc1 );
13759 %}
13761 instruct Repl8B_imm_DSP(vecD dst, immI con) %{
13762 predicate(n->as_Vector()->length() == 8 && Use3A2000);
13763 match(Set dst (ReplicateB con));
13764 ins_cost(110);
13765 format %{ "repl_ob AT, [$con]\n\t"
13766 "dmtc1 AT, $dst,0x00\t! replicate8B($con)" %}
13767 ins_encode %{
13768 int val = $con$$constant;
13769 __ repl_ob(AT, val);
13770 __ dmtc1(AT, $dst$$FloatRegister);
13771 %}
13772 ins_pipe( pipe_mtc1 );
13773 %}
13775 instruct Repl8B_imm(vecD dst, immI con) %{
13776 predicate(n->as_Vector()->length() == 8);
13777 match(Set dst (ReplicateB con));
13778 ins_cost(150);
13779 format %{ "move AT, [$con]\n\t"
13780 "dins AT, AT, 8, 8\n\t"
13781 "dins AT, AT, 16, 16\n\t"
13782 "dinsu AT, AT, 32, 32\n\t"
13783 "dmtc1 AT, $dst,0x00\t! replicate8B($con)" %}
13784 ins_encode %{
13785 __ move(AT, $con$$constant);
13786 __ dins(AT, AT, 8, 8);
13787 __ dins(AT, AT, 16, 16);
13788 __ dinsu(AT, AT, 32, 32);
13789 __ dmtc1(AT, $dst$$FloatRegister);
13790 %}
13791 ins_pipe( pipe_mtc1 );
13792 %}
13794 instruct Repl8B_zero(vecD dst, immI0 zero) %{
13795 predicate(n->as_Vector()->length() == 8);
13796 match(Set dst (ReplicateB zero));
13797 ins_cost(90);
13798 format %{ "dmtc1 R0, $dst\t! replicate8B zero" %}
13799 ins_encode %{
13800 __ dmtc1(R0, $dst$$FloatRegister);
13801 %}
13802 ins_pipe( pipe_mtc1 );
13803 %}
13805 instruct Repl8B_M1(vecD dst, immI_M1 M1) %{
13806 predicate(n->as_Vector()->length() == 8);
13807 match(Set dst (ReplicateB M1));
13808 ins_cost(80);
13809 format %{ "dmtc1 -1, $dst\t! replicate8B -1" %}
13810 ins_encode %{
13811 __ nor(AT, R0, R0);
13812 __ dmtc1(AT, $dst$$FloatRegister);
13813 %}
13814 ins_pipe( pipe_mtc1 );
13815 %}
13817 instruct Repl4S_DSP(vecD dst, mRegI src) %{
13818 predicate(n->as_Vector()->length() == 4 && Use3A2000);
13819 match(Set dst (ReplicateS src));
13820 ins_cost(100);
13821 format %{ "replv_qh AT, $src\n\t"
13822 "dmtc1 AT, $dst\t! replicate4S" %}
13823 ins_encode %{
13824 __ replv_qh(AT, $src$$Register);
13825 __ dmtc1(AT, $dst$$FloatRegister);
13826 %}
13827 ins_pipe( pipe_mtc1 );
13828 %}
13830 instruct Repl4S(vecD dst, mRegI src) %{
13831 predicate(n->as_Vector()->length() == 4);
13832 match(Set dst (ReplicateS src));
13833 ins_cost(120);
13834 format %{ "move AT, $src \n\t"
13835 "dins AT, AT, 16, 16\n\t"
13836 "dinsu AT, AT, 32, 32\n\t"
13837 "dmtc1 AT, $dst\t! replicate4S" %}
13838 ins_encode %{
13839 __ move(AT, $src$$Register);
13840 __ dins(AT, AT, 16, 16);
13841 __ dinsu(AT, AT, 32, 32);
13842 __ dmtc1(AT, $dst$$FloatRegister);
13843 %}
13844 ins_pipe( pipe_mtc1 );
13845 %}
13847 instruct Repl4S_imm_DSP(vecD dst, immI con) %{
13848 predicate(n->as_Vector()->length() == 4 && Use3A2000);
13849 match(Set dst (ReplicateS con));
13850 ins_cost(100);
13851 format %{ "replv_qh AT, [$con]\n\t"
13852 "dmtc1 AT, $dst\t! replicate4S($con)" %}
13853 ins_encode %{
13854 int val = $con$$constant;
13855 if ( Assembler::is_simm(val, 10)) {
13856 //repl_qh supports 10 bits immediate
13857 __ repl_qh(AT, val);
13858 } else {
13859 __ li32(AT, val);
13860 __ replv_qh(AT, AT);
13861 }
13862 __ dmtc1(AT, $dst$$FloatRegister);
13863 %}
13864 ins_pipe( pipe_mtc1 );
13865 %}
13867 instruct Repl4S_imm(vecD dst, immI con) %{
13868 predicate(n->as_Vector()->length() == 4);
13869 match(Set dst (ReplicateS con));
13870 ins_cost(110);
13871 format %{ "move AT, [$con]\n\t"
13872 "dins AT, AT, 16, 16\n\t"
13873 "dinsu AT, AT, 32, 32\n\t"
13874 "dmtc1 AT, $dst\t! replicate4S($con)" %}
13875 ins_encode %{
13876 __ move(AT, $con$$constant);
13877 __ dins(AT, AT, 16, 16);
13878 __ dinsu(AT, AT, 32, 32);
13879 __ dmtc1(AT, $dst$$FloatRegister);
13880 %}
13881 ins_pipe( pipe_mtc1 );
13882 %}
13884 instruct Repl4S_zero(vecD dst, immI0 zero) %{
13885 predicate(n->as_Vector()->length() == 4);
13886 match(Set dst (ReplicateS zero));
13887 format %{ "dmtc1 R0, $dst\t! replicate4S zero" %}
13888 ins_encode %{
13889 __ dmtc1(R0, $dst$$FloatRegister);
13890 %}
13891 ins_pipe( pipe_mtc1 );
13892 %}
13894 instruct Repl4S_M1(vecD dst, immI_M1 M1) %{
13895 predicate(n->as_Vector()->length() == 4);
13896 match(Set dst (ReplicateS M1));
13897 format %{ "dmtc1 -1, $dst\t! replicate4S -1" %}
13898 ins_encode %{
13899 __ nor(AT, R0, R0);
13900 __ dmtc1(AT, $dst$$FloatRegister);
13901 %}
13902 ins_pipe( pipe_mtc1 );
13903 %}
13905 // Replicate integer (4 byte) scalar to be vector
13906 instruct Repl2I(vecD dst, mRegI src) %{
13907 predicate(n->as_Vector()->length() == 2);
13908 match(Set dst (ReplicateI src));
13909 format %{ "dins AT, $src, 0, 32\n\t"
13910 "dinsu AT, $src, 32, 32\n\t"
13911 "dmtc1 AT, $dst\t! replicate2I" %}
13912 ins_encode %{
13913 __ dins(AT, $src$$Register, 0, 32);
13914 __ dinsu(AT, $src$$Register, 32, 32);
13915 __ dmtc1(AT, $dst$$FloatRegister);
13916 %}
13917 ins_pipe( pipe_mtc1 );
13918 %}
13920 // Replicate integer (4 byte) scalar immediate to be vector by loading from const table.
13921 instruct Repl2I_imm(vecD dst, immI con, mA7RegI tmp) %{
13922 predicate(n->as_Vector()->length() == 2);
13923 match(Set dst (ReplicateI con));
13924 effect(KILL tmp);
13925 format %{ "li32 AT, [$con], 32\n\t"
13926 "dinsu AT, AT\n\t"
13927 "dmtc1 AT, $dst\t! replicate2I($con)" %}
13928 ins_encode %{
13929 int val = $con$$constant;
13930 __ li32(AT, val);
13931 __ dinsu(AT, AT, 32, 32);
13932 __ dmtc1(AT, $dst$$FloatRegister);
13933 %}
13934 ins_pipe( pipe_mtc1 );
13935 %}
13937 // Replicate integer (4 byte) scalar zero to be vector
13938 instruct Repl2I_zero(vecD dst, immI0 zero) %{
13939 predicate(n->as_Vector()->length() == 2);
13940 match(Set dst (ReplicateI zero));
13941 format %{ "dmtc1 R0, $dst\t! replicate2I zero" %}
13942 ins_encode %{
13943 __ dmtc1(R0, $dst$$FloatRegister);
13944 %}
13945 ins_pipe( pipe_mtc1 );
13946 %}
13948 // Replicate integer (4 byte) scalar -1 to be vector
13949 instruct Repl2I_M1(vecD dst, immI_M1 M1) %{
13950 predicate(n->as_Vector()->length() == 2);
13951 match(Set dst (ReplicateI M1));
13952 format %{ "dmtc1 -1, $dst\t! replicate2I -1, use AT" %}
13953 ins_encode %{
13954 __ nor(AT, R0, R0);
13955 __ dmtc1(AT, $dst$$FloatRegister);
13956 %}
13957 ins_pipe( pipe_mtc1 );
13958 %}
13960 // Replicate float (4 byte) scalar to be vector
13961 instruct Repl2F(vecD dst, regF src) %{
13962 predicate(n->as_Vector()->length() == 2);
13963 match(Set dst (ReplicateF src));
13964 format %{ "cvt.ps $dst, $src, $src\t! replicate2F" %}
13965 ins_encode %{
13966 __ cvt_ps_s($dst$$FloatRegister, $src$$FloatRegister, $src$$FloatRegister);
13967 %}
13968 ins_pipe( pipe_slow );
13969 %}
13971 // Replicate float (4 byte) scalar zero to be vector
13972 instruct Repl2F_zero(vecD dst, immF0 zero) %{
13973 predicate(n->as_Vector()->length() == 2);
13974 match(Set dst (ReplicateF zero));
13975 format %{ "dmtc1 R0, $dst\t! replicate2F zero" %}
13976 ins_encode %{
13977 __ dmtc1(R0, $dst$$FloatRegister);
13978 %}
13979 ins_pipe( pipe_mtc1 );
13980 %}
13983 // ====================VECTOR ARITHMETIC=======================================
13985 // --------------------------------- ADD --------------------------------------
13987 // Floats vector add
13988 // kernel does not have emulation of PS instructions yet, so PS instructions is disabled.
13989 instruct vadd2F(vecD dst, vecD src) %{
13990 predicate(n->as_Vector()->length() == 2 && !UseLoongsonISA);
13991 match(Set dst (AddVF dst src));
13992 format %{ "add.ps $dst,$src\t! add packed2F" %}
13993 ins_encode %{
13994 __ add_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
13995 %}
13996 ins_pipe( pipe_slow );
13997 %}
13999 instruct vadd2F3(vecD dst, vecD src1, vecD src2) %{
14000 predicate(n->as_Vector()->length() == 2 && !UseLoongsonISA);
14001 match(Set dst (AddVF src1 src2));
14002 format %{ "add.ps $dst,$src1,$src2\t! add packed2F" %}
14003 ins_encode %{
14004 __ add_ps($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
14005 %}
14006 ins_pipe( fpu_regF_regF );
14007 %}
14009 // --------------------------------- SUB --------------------------------------
14011 // Floats vector sub
14012 instruct vsub2F(vecD dst, vecD src) %{
14013 predicate(n->as_Vector()->length() == 2 && !UseLoongsonISA);
14014 match(Set dst (SubVF dst src));
14015 format %{ "sub.ps $dst,$src\t! sub packed2F" %}
14016 ins_encode %{
14017 __ sub_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
14018 %}
14019 ins_pipe( fpu_regF_regF );
14020 %}
14022 // --------------------------------- MUL --------------------------------------
14024 // Floats vector mul
14025 instruct vmul2F(vecD dst, vecD src) %{
14026 predicate(n->as_Vector()->length() == 2 && !UseLoongsonISA);
14027 match(Set dst (MulVF dst src));
14028 format %{ "mul.ps $dst, $src\t! mul packed2F" %}
14029 ins_encode %{
14030 __ mul_ps($dst$$FloatRegister, $dst$$FloatRegister, $src$$FloatRegister);
14031 %}
14032 ins_pipe( fpu_regF_regF );
14033 %}
14035 instruct vmul2F3(vecD dst, vecD src1, vecD src2) %{
14036 predicate(n->as_Vector()->length() == 2 && !UseLoongsonISA);
14037 match(Set dst (MulVF src1 src2));
14038 format %{ "mul.ps $dst, $src1, $src2\t! mul packed2F" %}
14039 ins_encode %{
14040 __ mul_ps($dst$$FloatRegister, $src1$$FloatRegister, $src2$$FloatRegister);
14041 %}
14042 ins_pipe( fpu_regF_regF );
14043 %}
14045 // --------------------------------- DIV --------------------------------------
14046 // MIPS do not have div.ps
14049 //----------PEEPHOLE RULES-----------------------------------------------------
14050 // These must follow all instruction definitions as they use the names
14051 // defined in the instructions definitions.
14052 //
14053 // peepmatch ( root_instr_name [preceeding_instruction]* );
14054 //
14055 // peepconstraint %{
14056 // (instruction_number.operand_name relational_op instruction_number.operand_name
14057 // [, ...] );
14058 // // instruction numbers are zero-based using left to right order in peepmatch
14059 //
14060 // peepreplace ( instr_name ( [instruction_number.operand_name]* ) );
14061 // // provide an instruction_number.operand_name for each operand that appears
14062 // // in the replacement instruction's match rule
14063 //
14064 // ---------VM FLAGS---------------------------------------------------------
14065 //
14066 // All peephole optimizations can be turned off using -XX:-OptoPeephole
14067 //
14068 // Each peephole rule is given an identifying number starting with zero and
14069 // increasing by one in the order seen by the parser. An individual peephole
14070 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
14071 // on the command-line.
14072 //
14073 // ---------CURRENT LIMITATIONS----------------------------------------------
14074 //
14075 // Only match adjacent instructions in same basic block
14076 // Only equality constraints
14077 // Only constraints between operands, not (0.dest_reg == EAX_enc)
14078 // Only one replacement instruction
14079 //
14080 // ---------EXAMPLE----------------------------------------------------------
14081 //
14082 // // pertinent parts of existing instructions in architecture description
14083 // instruct movI(eRegI dst, eRegI src) %{
14084 // match(Set dst (CopyI src));
14085 // %}
14086 //
14087 // instruct incI_eReg(eRegI dst, immI1 src, eFlagsReg cr) %{
14088 // match(Set dst (AddI dst src));
14089 // effect(KILL cr);
14090 // %}
14091 //
14092 // // Change (inc mov) to lea
14093 // peephole %{
14094 // // increment preceeded by register-register move
14095 // peepmatch ( incI_eReg movI );
14096 // // require that the destination register of the increment
14097 // // match the destination register of the move
14098 // peepconstraint ( 0.dst == 1.dst );
14099 // // construct a replacement instruction that sets
14100 // // the destination to ( move's source register + one )
14101 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
14102 // %}
14103 //
14104 // Implementation no longer uses movX instructions since
14105 // machine-independent system no longer uses CopyX nodes.
14106 //
14107 // peephole %{
14108 // peepmatch ( incI_eReg movI );
14109 // peepconstraint ( 0.dst == 1.dst );
14110 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
14111 // %}
14112 //
14113 // peephole %{
14114 // peepmatch ( decI_eReg movI );
14115 // peepconstraint ( 0.dst == 1.dst );
14116 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
14117 // %}
14118 //
14119 // peephole %{
14120 // peepmatch ( addI_eReg_imm movI );
14121 // peepconstraint ( 0.dst == 1.dst );
14122 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
14123 // %}
14124 //
14125 // peephole %{
14126 // peepmatch ( addP_eReg_imm movP );
14127 // peepconstraint ( 0.dst == 1.dst );
14128 // peepreplace ( leaP_eReg_immI( 0.dst 1.src 0.src ) );
14129 // %}
14131 // // Change load of spilled value to only a spill
14132 // instruct storeI(memory mem, eRegI src) %{
14133 // match(Set mem (StoreI mem src));
14134 // %}
14135 //
14136 // instruct loadI(eRegI dst, memory mem) %{
14137 // match(Set dst (LoadI mem));
14138 // %}
14139 //
14140 //peephole %{
14141 // peepmatch ( loadI storeI );
14142 // peepconstraint ( 1.src == 0.dst, 1.mem == 0.mem );
14143 // peepreplace ( storeI( 1.mem 1.mem 1.src ) );
14144 //%}
14146 //----------SMARTSPILL RULES---------------------------------------------------
14147 // These must follow all instruction definitions as they use the names
14148 // defined in the instructions definitions.