Tue, 26 Jul 2016 17:06:17 +0800
Add multiply word to GPR instruction (mul) in MIPS assembler.
1 //
2 // Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
3 // Copyright (c) 2015, 2016, Loongson Technology. All rights reserved.
4 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 //
6 // This code is free software; you can redistribute it and/or modify it
7 // under the terms of the GNU General Public License version 2 only, as
8 // published by the Free Software Foundation.
9 //
10 // This code is distributed in the hope that it will be useful, but WITHOUT
11 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 // version 2 for more details (a copy is included in the LICENSE file that
14 // accompanied this code).
15 //
16 // You should have received a copy of the GNU General Public License version
17 // 2 along with this work; if not, write to the Free Software Foundation,
18 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 //
20 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 // or visit www.oracle.com if you need additional information or have any
22 // questions.
23 //
24 //
26 // GodSon3 Architecture Description File
28 //----------REGISTER DEFINITION BLOCK------------------------------------------
29 // This information is used by the matcher and the register allocator to
30 // describe individual registers and classes of registers within the target
31 // archtecture.
33 // format:
34 // reg_def name (call convention, c-call convention, ideal type, encoding);
35 // call convention :
36 // NS = No-Save
37 // SOC = Save-On-Call
38 // SOE = Save-On-Entry
39 // AS = Always-Save
40 // ideal type :
41 // see opto/opcodes.hpp for more info
42 // reg_class name (reg, ...);
43 // alloc_class name (reg, ...);
44 register %{
46 // General Registers
47 // Integer Registers
48 reg_def R0 ( NS, NS, Op_RegI, 0, VMRegImpl::Bad());
49 reg_def AT ( NS, NS, Op_RegI, 1, AT->as_VMReg());
50 reg_def AT_H ( NS, NS, Op_RegI, 1, AT->as_VMReg()->next());
51 reg_def V0 (SOC, SOC, Op_RegI, 2, V0->as_VMReg());
52 reg_def V0_H (SOC, SOC, Op_RegI, 2, V0->as_VMReg()->next());
53 reg_def V1 (SOC, SOC, Op_RegI, 3, V1->as_VMReg());
54 reg_def V1_H (SOC, SOC, Op_RegI, 3, V1->as_VMReg()->next());
55 reg_def A0 (SOC, SOC, Op_RegI, 4, A0->as_VMReg());
56 reg_def A0_H (SOC, SOC, Op_RegI, 4, A0->as_VMReg()->next());
57 reg_def A1 (SOC, SOC, Op_RegI, 5, A1->as_VMReg());
58 reg_def A1_H (SOC, SOC, Op_RegI, 5, A1->as_VMReg()->next());
59 reg_def A2 (SOC, SOC, Op_RegI, 6, A2->as_VMReg());
60 reg_def A2_H (SOC, SOC, Op_RegI, 6, A2->as_VMReg()->next());
61 reg_def A3 (SOC, SOC, Op_RegI, 7, A3->as_VMReg());
62 reg_def A3_H (SOC, SOC, Op_RegI, 7, A3->as_VMReg()->next());
63 reg_def A4 (SOC, SOC, Op_RegI, 8, A4->as_VMReg());
64 reg_def A4_H (SOC, SOC, Op_RegI, 8, A4->as_VMReg()->next());
65 reg_def A5 (SOC, SOC, Op_RegI, 9, A5->as_VMReg());
66 reg_def A5_H (SOC, SOC, Op_RegI, 9, A5->as_VMReg()->next());
67 reg_def A6 (SOC, SOC, Op_RegI, 10, A6->as_VMReg());
68 reg_def A6_H (SOC, SOC, Op_RegI, 10, A6->as_VMReg()->next());
69 reg_def A7 (SOC, SOC, Op_RegI, 11, A7->as_VMReg());
70 reg_def A7_H (SOC, SOC, Op_RegI, 11, A7->as_VMReg()->next());
71 reg_def T0 (SOC, SOC, Op_RegI, 12, T0->as_VMReg());
72 reg_def T0_H (SOC, SOC, Op_RegI, 12, T0->as_VMReg()->next());
73 reg_def T1 (SOC, SOC, Op_RegI, 13, T1->as_VMReg());
74 reg_def T1_H (SOC, SOC, Op_RegI, 13, T1->as_VMReg()->next());
75 reg_def T2 (SOC, SOC, Op_RegI, 14, T2->as_VMReg());
76 reg_def T2_H (SOC, SOC, Op_RegI, 14, T2->as_VMReg()->next());
77 reg_def T3 (SOC, SOC, Op_RegI, 15, T3->as_VMReg());
78 reg_def T3_H (SOC, SOC, Op_RegI, 15, T3->as_VMReg()->next());
79 reg_def S0 (SOC, SOE, Op_RegI, 16, S0->as_VMReg());
80 reg_def S0_H (SOC, SOE, Op_RegI, 16, S0->as_VMReg()->next());
81 reg_def S1 (SOC, SOE, Op_RegI, 17, S1->as_VMReg());
82 reg_def S1_H (SOC, SOE, Op_RegI, 17, S1->as_VMReg()->next());
83 reg_def S2 (SOC, SOE, Op_RegI, 18, S2->as_VMReg());
84 reg_def S2_H (SOC, SOE, Op_RegI, 18, S2->as_VMReg()->next());
85 reg_def S3 (SOC, SOE, Op_RegI, 19, S3->as_VMReg());
86 reg_def S3_H (SOC, SOE, Op_RegI, 19, S3->as_VMReg()->next());
87 reg_def S4 (SOC, SOE, Op_RegI, 20, S4->as_VMReg());
88 reg_def S4_H (SOC, SOE, Op_RegI, 20, S4->as_VMReg()->next());
89 reg_def S5 (SOC, SOE, Op_RegI, 21, S5->as_VMReg());
90 reg_def S6 (SOC, SOE, Op_RegI, 22, S6->as_VMReg());
91 reg_def S7 (SOC, SOE, Op_RegI, 23, S7->as_VMReg());
92 reg_def T8 (SOC, SOC, Op_RegI, 24, T8->as_VMReg());
93 reg_def T9 (SOC, SOC, Op_RegI, 25, T9->as_VMReg());
95 // Special Registers
96 reg_def K0 ( NS, NS, Op_RegI, 26, K0->as_VMReg());
97 reg_def K1 ( NS, NS, Op_RegI, 27, K1->as_VMReg());
98 reg_def GP ( NS, NS, Op_RegI, 28, GP->as_VMReg());
99 reg_def GP_H ( NS, NS, Op_RegI, 28, GP->as_VMReg()->next());
100 reg_def SP ( NS, NS, Op_RegI, 29, SP->as_VMReg());
101 reg_def SP_H ( NS, NS, Op_RegI, 29, SP->as_VMReg()->next());
102 reg_def FP ( NS, NS, Op_RegI, 30, FP->as_VMReg());
103 reg_def FP_H ( NS, NS, Op_RegI, 30, FP->as_VMReg()->next());
104 reg_def RA ( NS, NS, Op_RegI, 31, RA->as_VMReg());
105 reg_def RA_H ( NS, NS, Op_RegI, 31, RA->as_VMReg()->next());
107 // Floating registers.
108 reg_def F0 ( SOC, SOC, Op_RegF, 0, F0->as_VMReg());
109 reg_def F0_H ( SOC, SOC, Op_RegF, 0, F0->as_VMReg()->next());
110 reg_def F1 ( SOC, SOC, Op_RegF, 1, F1->as_VMReg());
111 reg_def F1_H ( SOC, SOC, Op_RegF, 1, F1->as_VMReg()->next());
112 reg_def F2 ( SOC, SOC, Op_RegF, 2, F2->as_VMReg());
113 reg_def F2_H ( SOC, SOC, Op_RegF, 2, F2->as_VMReg()->next());
114 reg_def F3 ( SOC, SOC, Op_RegF, 3, F3->as_VMReg());
115 reg_def F3_H ( SOC, SOC, Op_RegF, 3, F3->as_VMReg()->next());
116 reg_def F4 ( SOC, SOC, Op_RegF, 4, F4->as_VMReg());
117 reg_def F4_H ( SOC, SOC, Op_RegF, 4, F4->as_VMReg()->next());
118 reg_def F5 ( SOC, SOC, Op_RegF, 5, F5->as_VMReg());
119 reg_def F5_H ( SOC, SOC, Op_RegF, 5, F5->as_VMReg()->next());
120 reg_def F6 ( SOC, SOC, Op_RegF, 6, F6->as_VMReg());
121 reg_def F6_H ( SOC, SOC, Op_RegF, 6, F6->as_VMReg()->next());
122 reg_def F7 ( SOC, SOC, Op_RegF, 7, F7->as_VMReg());
123 reg_def F7_H ( SOC, SOC, Op_RegF, 7, F7->as_VMReg()->next());
124 reg_def F8 ( SOC, SOC, Op_RegF, 8, F8->as_VMReg());
125 reg_def F8_H ( SOC, SOC, Op_RegF, 8, F8->as_VMReg()->next());
126 reg_def F9 ( SOC, SOC, Op_RegF, 9, F9->as_VMReg());
127 reg_def F9_H ( SOC, SOC, Op_RegF, 9, F9->as_VMReg()->next());
128 reg_def F10 ( SOC, SOC, Op_RegF, 10, F10->as_VMReg());
129 reg_def F10_H ( SOC, SOC, Op_RegF, 10, F10->as_VMReg()->next());
130 reg_def F11 ( SOC, SOC, Op_RegF, 11, F11->as_VMReg());
131 reg_def F11_H ( SOC, SOC, Op_RegF, 11, F11->as_VMReg()->next());
132 reg_def F12 ( SOC, SOC, Op_RegF, 12, F12->as_VMReg());
133 reg_def F12_H ( SOC, SOC, Op_RegF, 12, F12->as_VMReg()->next());
134 reg_def F13 ( SOC, SOC, Op_RegF, 13, F13->as_VMReg());
135 reg_def F13_H ( SOC, SOC, Op_RegF, 13, F13->as_VMReg()->next());
136 reg_def F14 ( SOC, SOC, Op_RegF, 14, F14->as_VMReg());
137 reg_def F14_H ( SOC, SOC, Op_RegF, 14, F14->as_VMReg()->next());
138 reg_def F15 ( SOC, SOC, Op_RegF, 15, F15->as_VMReg());
139 reg_def F15_H ( SOC, SOC, Op_RegF, 15, F15->as_VMReg()->next());
140 reg_def F16 ( SOC, SOC, Op_RegF, 16, F16->as_VMReg());
141 reg_def F16_H ( SOC, SOC, Op_RegF, 16, F16->as_VMReg()->next());
142 reg_def F17 ( SOC, SOC, Op_RegF, 17, F17->as_VMReg());
143 reg_def F17_H ( SOC, SOC, Op_RegF, 17, F17->as_VMReg()->next());
144 reg_def F18 ( SOC, SOC, Op_RegF, 18, F18->as_VMReg());
145 reg_def F18_H ( SOC, SOC, Op_RegF, 18, F18->as_VMReg()->next());
146 reg_def F19 ( SOC, SOC, Op_RegF, 19, F19->as_VMReg());
147 reg_def F19_H ( SOC, SOC, Op_RegF, 19, F19->as_VMReg()->next());
148 reg_def F20 ( SOC, SOC, Op_RegF, 20, F20->as_VMReg());
149 reg_def F20_H ( SOC, SOC, Op_RegF, 20, F20->as_VMReg()->next());
150 reg_def F21 ( SOC, SOC, Op_RegF, 21, F21->as_VMReg());
151 reg_def F21_H ( SOC, SOC, Op_RegF, 21, F21->as_VMReg()->next());
152 reg_def F22 ( SOC, SOC, Op_RegF, 22, F22->as_VMReg());
153 reg_def F22_H ( SOC, SOC, Op_RegF, 22, F22->as_VMReg()->next());
154 reg_def F23 ( SOC, SOC, Op_RegF, 23, F23->as_VMReg());
155 reg_def F23_H ( SOC, SOC, Op_RegF, 23, F23->as_VMReg()->next());
156 reg_def F24 ( SOC, SOC, Op_RegF, 24, F24->as_VMReg());
157 reg_def F24_H ( SOC, SOC, Op_RegF, 24, F24->as_VMReg()->next());
158 reg_def F25 ( SOC, SOC, Op_RegF, 25, F25->as_VMReg());
159 reg_def F25_H ( SOC, SOC, Op_RegF, 25, F25->as_VMReg()->next());
160 reg_def F26 ( SOC, SOC, Op_RegF, 26, F26->as_VMReg());
161 reg_def F26_H ( SOC, SOC, Op_RegF, 26, F26->as_VMReg()->next());
162 reg_def F27 ( SOC, SOC, Op_RegF, 27, F27->as_VMReg());
163 reg_def F27_H ( SOC, SOC, Op_RegF, 27, F27->as_VMReg()->next());
164 reg_def F28 ( SOC, SOC, Op_RegF, 28, F28->as_VMReg());
165 reg_def F28_H ( SOC, SOC, Op_RegF, 28, F28->as_VMReg()->next());
166 reg_def F29 ( SOC, SOC, Op_RegF, 29, F29->as_VMReg());
167 reg_def F29_H ( SOC, SOC, Op_RegF, 29, F29->as_VMReg()->next());
168 reg_def F30 ( SOC, SOC, Op_RegF, 30, F30->as_VMReg());
169 reg_def F30_H ( SOC, SOC, Op_RegF, 30, F30->as_VMReg()->next());
170 reg_def F31 ( SOC, SOC, Op_RegF, 31, F31->as_VMReg());
171 reg_def F31_H ( SOC, SOC, Op_RegF, 31, F31->as_VMReg()->next());
174 // ----------------------------
175 // Special Registers
176 // Condition Codes Flag Registers
177 reg_def MIPS_FLAG (SOC, SOC, Op_RegFlags, 1, as_Register(1)->as_VMReg());
178 //S6 is used for get_thread(S6)
179 //S5 is uesd for heapbase of compressed oop
180 alloc_class chunk0( S0, S0_H,
181 S1, S1_H,
182 S2, S2_H,
183 S3, S3_H,
184 S4, S4_H,
185 T0, T0_H,
186 T1, T1_H,
187 T2, T2_H,
188 T3, T3_H,
189 V0, V0_H,
190 V1, V1_H,
191 A0, A0_H,
192 A1, A1_H,
193 A2, A2_H,
194 A3, A3_H,
195 A4, A4_H,
196 A5, A5_H,
197 A6, A6_H,
198 A7, A7_H,
199 RA, RA_H,
200 SP, SP_H,
201 FP, FP_H,
202 GP, GP_H );
204 alloc_class chunk1( F0, F0_H,
205 F1, F1_H,
206 F2, F2_H,
207 F3, F3_H,
208 F4, F4_H,
209 F5, F5_H,
210 F6, F6_H,
211 F7, F7_H,
212 F8, F8_H,
213 F9, F9_H,
214 F10, F10_H,
215 F11, F11_H,
216 F12, F12_H,
217 F13, F13_H,
218 F14, F14_H,
219 F15, F15_H,
220 F16, F16_H,
221 F17, F17_H,
222 F18, F18_H,
223 F19, F19_H,
224 F20, F20_H,
225 F21, F21_H,
226 F22, F22_H,
227 F23, F23_H,
228 F24, F24_H,
229 F25, F25_H,
230 F26, F26_H,
231 F27, F27_H,
232 F28, F28_H,
233 F29, F29_H,
234 F30, F30_H,
235 F31, F31_H);
237 alloc_class chunk2(MIPS_FLAG);
239 // Class for all registers
240 reg_class any_reg( T0, T0_H,
241 T1, T1_H,
242 T2, T2_H,
243 T3, T3_H,
244 A4, A4_H,
245 A5, A5_H,
246 A6, A6_H,
247 A7, A7_H,
248 S0, S0_H,
249 S1, S1_H,
250 S2, S2_H,
251 S3, S3_H,
252 S4, S4_H,
253 V0, V0_H,
254 V1, V1_H
255 );
257 // Class for general registers
258 reg_class g_reg( T0, T0_H,
259 T1, T1_H,
260 T2, T2_H,
261 T3, T3_H,
262 A4, A4_H,
263 A5, A5_H,
264 A6, A6_H,
265 A7, A7_H,
266 S0, S0_H,
267 S1, S1_H,
268 S2, S2_H,
269 S3, S3_H,
270 S4, S4_H,
271 V0, V0_H,
272 V1, V1_H,
273 A0, A0_H,
274 A1, A1_H,
275 A2, A2_H,
276 A3, A3_H );
278 reg_class s_reg( S0, S1, S2, S3, S4 );
279 reg_class s0_reg( S0 );
280 reg_class s1_reg( S1 );
281 reg_class s2_reg( S2 );
282 reg_class s3_reg( S3 );
283 reg_class s4_reg( S4 );
285 reg_class t_reg( T0, T1, T2, T3);
286 reg_class t0_reg( T0 );
287 reg_class t1_reg( T1 );
288 reg_class t2_reg( T2 );
289 reg_class t3_reg( T3 );
291 reg_class a0_reg( A0 );
292 reg_class a1_reg( A1 );
293 reg_class a2_reg( A2 );
294 reg_class a3_reg( A3 );
295 reg_class a4_reg( A4 );
296 reg_class a5_reg( A5 );
297 reg_class a6_reg( A6 );
298 reg_class a7_reg( A7 );
300 reg_class mips_flags(MIPS_FLAG);
302 // Class of registers that can appear in an address with no offset.
303 // EBP and ESP require an extra instruction byte for zero offset.
304 // Used in fast-unlock
305 //reg_class p_reg(EDX, EDI, ESI, EBX);
306 reg_class p_reg( T0, T0_H,
307 T1, T1_H,
308 T2, T2_H,
309 T3, T3_H,
311 A0, A0_H,
312 A1, A1_H,
313 A2, A2_H,
314 A3, A3_H,
315 A4, A4_H,
316 A5, A5_H,
317 A6, A6_H,
318 A7, A7_H,
320 S0, S0_H,
321 S1, S1_H,
322 S2, S2_H,
323 S3, S3_H,
324 S4, S4_H);
325 reg_class int_reg( T0, T1, T2, T3, S0, S1, S2, S3, S4, V0, V1, A0, A1, A2, A3, A4, A6, A7 );
326 reg_class sp_reg( SP, SP_H );
327 reg_class fp_reg( FP, FP_H );
330 reg_class long_reg(
331 T0, T0_H,
332 T1, T1_H,
333 T2, T2_H,
334 T3, T3_H,
336 A0, A0_H,
337 A1, A1_H,
338 A2, A2_H,
339 A3, A3_H,
340 A4, A4_H,
341 A5, A5_H,
342 A6, A6_H,
343 A7, A7_H,
345 S0, S0_H,
346 S1, S1_H,
347 S2, S2_H,
348 S3, S3_H,
349 S4, S4_H);
351 reg_class v0_long_reg( V0, V0_H );
352 reg_class v1_long_reg( V1, V1_H );
353 reg_class a0_long_reg( A0, A0_H );
354 reg_class a1_long_reg( A1, A1_H );
355 reg_class a2_long_reg( A2, A2_H );
356 reg_class a3_long_reg( A3, A3_H );
357 reg_class a4_long_reg( A4, A4_H );
358 reg_class a5_long_reg( A5, A5_H );
359 reg_class a6_long_reg( A6, A6_H );
360 reg_class a7_long_reg( A7, A7_H );
361 reg_class t0_long_reg( T0, T0_H );
362 reg_class t1_long_reg( T1, T1_H );
363 reg_class t2_long_reg( T2, T2_H );
364 reg_class t3_long_reg( T3, T3_H );
365 reg_class s0_long_reg( S0, S0_H );
366 reg_class s1_long_reg( S1, S1_H );
367 reg_class s2_long_reg( S2, S2_H );
368 reg_class s3_long_reg( S3, S3_H );
369 reg_class s4_long_reg( S4, S4_H );
371 // Floating point registers.
372 // 2012/8/23 Fu: F30/F31 are used as temporary registers in D2I
373 reg_class flt_reg( F0, F1, F2, F3, F4, F5, F6, F7, F8, F9, F10, F11, F12, F13, F14, F15, F16, F17 F18, F19, F20, F21, F22, F23, F24, F25, F26, F27, F28);
374 reg_class dbl_reg( F0, F0_H,
375 F1, F1_H,
376 F2, F2_H,
377 F3, F3_H,
378 F4, F4_H,
379 F5, F5_H,
380 F6, F6_H,
381 F7, F7_H,
382 F8, F8_H,
383 F9, F9_H,
384 F10, F10_H,
385 F11, F11_H,
386 F12, F12_H,
387 F13, F13_H,
388 F14, F14_H,
389 F15, F15_H,
390 F16, F16_H,
391 F17, F17_H,
392 F18, F18_H,
393 F19, F19_H,
394 F20, F20_H,
395 F21, F21_H,
396 F22, F22_H,
397 F23, F23_H,
398 F24, F24_H,
399 F25, F25_H,
400 F26, F26_H,
401 F27, F27_H,
402 F28, F28_H,
403 F29, F29_H);
405 reg_class flt_arg0( F12 );
406 reg_class dbl_arg0( F12, F12_H );
407 reg_class dbl_arg1( F14, F14_H );
409 %}
411 //----------DEFINITION BLOCK---------------------------------------------------
412 // Define name --> value mappings to inform the ADLC of an integer valued name
413 // Current support includes integer values in the range [0, 0x7FFFFFFF]
414 // Format:
415 // int_def <name> ( <int_value>, <expression>);
416 // Generated Code in ad_<arch>.hpp
417 // #define <name> (<expression>)
418 // // value == <int_value>
419 // Generated code in ad_<arch>.cpp adlc_verification()
420 // assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
421 //
422 definitions %{
423 int_def DEFAULT_COST ( 100, 100);
424 int_def HUGE_COST (1000000, 1000000);
426 // Memory refs are twice as expensive as run-of-the-mill.
427 int_def MEMORY_REF_COST ( 200, DEFAULT_COST * 2);
429 // Branches are even more expensive.
430 int_def BRANCH_COST ( 300, DEFAULT_COST * 3);
431 // we use jr instruction to construct call, so more expensive
432 // by yjl 2/28/2006
433 int_def CALL_COST ( 500, DEFAULT_COST * 5);
434 /*
435 int_def EQUAL ( 1, 1 );
436 int_def NOT_EQUAL ( 2, 2 );
437 int_def GREATER ( 3, 3 );
438 int_def GREATER_EQUAL ( 4, 4 );
439 int_def LESS ( 5, 5 );
440 int_def LESS_EQUAL ( 6, 6 );
441 */
442 %}
446 //----------SOURCE BLOCK-------------------------------------------------------
447 // This is a block of C++ code which provides values, functions, and
448 // definitions necessary in the rest of the architecture description
450 source_hpp %{
451 // Header information of the source block.
452 // Method declarations/definitions which are used outside
453 // the ad-scope can conveniently be defined here.
454 //
455 // To keep related declarations/definitions/uses close together,
456 // we switch between source %{ }% and source_hpp %{ }% freely as needed.
458 class CallStubImpl {
460 //--------------------------------------------------------------
461 //---< Used for optimization in Compile::shorten_branches >---
462 //--------------------------------------------------------------
464 public:
465 // Size of call trampoline stub.
466 static uint size_call_trampoline() {
467 return 0; // no call trampolines on this platform
468 }
470 // number of relocations needed by a call trampoline stub
471 static uint reloc_call_trampoline() {
472 return 0; // no call trampolines on this platform
473 }
474 };
476 class HandlerImpl {
478 public:
480 static int emit_exception_handler(CodeBuffer &cbuf);
481 static int emit_deopt_handler(CodeBuffer& cbuf);
483 static uint size_exception_handler() {
484 // NativeCall instruction size is the same as NativeJump.
485 // exception handler starts out as jump and can be patched to
486 // a call be deoptimization. (4932387)
487 // Note that this value is also credited (in output.cpp) to
488 // the size of the code section.
489 // return NativeJump::instruction_size;
490 int size = NativeCall::instruction_size;
491 return round_to(size, 16);
492 }
494 #ifdef _LP64
495 static uint size_deopt_handler() {
496 int size = NativeCall::instruction_size;
497 return round_to(size, 16);
498 }
499 #else
500 static uint size_deopt_handler() {
501 // NativeCall instruction size is the same as NativeJump.
502 // exception handler starts out as jump and can be patched to
503 // a call be deoptimization. (4932387)
504 // Note that this value is also credited (in output.cpp) to
505 // the size of the code section.
506 return 5 + NativeJump::instruction_size; // pushl(); jmp;
507 }
508 #endif
509 };
511 %} // end source_hpp
513 source %{
515 #define NO_INDEX 0
516 #define RELOC_IMM64 Assembler::imm_operand
517 #define RELOC_DISP32 Assembler::disp32_operand
520 #define __ _masm.
523 // Emit exception handler code.
524 // Stuff framesize into a register and call a VM stub routine.
525 int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf) {
526 /*
527 // Note that the code buffer's insts_mark is always relative to insts.
528 // That's why we must use the macroassembler to generate a handler.
529 MacroAssembler _masm(&cbuf);
530 address base = __ start_a_stub(size_exception_handler());
531 if (base == NULL) return 0; // CodeBuffer::expand failed
532 int offset = __ offset();
533 __ jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point()));
534 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
535 __ end_a_stub();
536 return offset;
537 */
538 // Note that the code buffer's insts_mark is always relative to insts.
539 // That's why we must use the macroassembler to generate a handler.
540 MacroAssembler _masm(&cbuf);
541 address base =
542 __ start_a_stub(size_exception_handler());
543 if (base == NULL) return 0; // CodeBuffer::expand failed
544 int offset = __ offset();
546 __ block_comment("; emit_exception_handler");
548 /* 2012/9/25 FIXME Jin: According to X86, we should use direct jumpt.
549 * * However, this will trigger an assert after the 40th method:
550 * *
551 * * 39 b java.lang.Throwable::<init> (25 bytes)
552 * * --- ns java.lang.Throwable::fillInStackTrace
553 * * 40 !b java.net.URLClassLoader::findClass (29 bytes)
554 * * /vm/opto/runtime.cpp, 900 , assert(caller.is_compiled_frame(),"must be")
555 * * 40 made not entrant (2) java.net.URLClassLoader::findClass (29 bytes)
556 * *
557 * * If we change from JR to JALR, the assert will disappear, but WebClient will
558 * * fail after the 403th method with unknown reason.
559 * */
560 __ li48(T9, (long)OptoRuntime::exception_blob()->entry_point());
561 __ jr(T9);
562 __ delayed()->nop();
563 __ align(16);
564 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
565 __ end_a_stub();
566 return offset;
567 }
569 // Emit deopt handler code.
570 int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) {
571 /*
572 // Note that the code buffer's insts_mark is always relative to insts.
573 // That's why we must use the macroassembler to generate a handler.
574 MacroAssembler _masm(&cbuf);
575 address base = __ start_a_stub(size_deopt_handler());
576 if (base == NULL) return 0; // CodeBuffer::expand failed
577 int offset = __ offset();
579 #ifdef _LP64
580 address the_pc = (address) __ pc();
581 Label next;
582 // push a "the_pc" on the stack without destroying any registers
583 // as they all may be live.
585 // push address of "next"
586 __ call(next, relocInfo::none); // reloc none is fine since it is a disp32
587 __ bind(next);
588 // adjust it so it matches "the_pc"
589 __ subptr(Address(rsp, 0), __ offset() - offset);
590 #else
591 InternalAddress here(__ pc());
592 __ pushptr(here.addr());
593 #endif
595 __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
596 assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
597 __ end_a_stub();
598 return offset;
599 */
600 // Note that the code buffer's insts_mark is always relative to insts.
601 // That's why we must use the macroassembler to generate a handler.
602 MacroAssembler _masm(&cbuf);
603 address base =
604 __ start_a_stub(size_deopt_handler());
606 // FIXME
607 if (base == NULL) return 0; // CodeBuffer::expand failed
608 int offset = __ offset();
610 __ block_comment("; emit_deopt_handler");
612 cbuf.set_insts_mark();
613 __ relocate(relocInfo::runtime_call_type);
615 __ li48(T9, (long)SharedRuntime::deopt_blob()->unpack());
616 __ jalr(T9);
617 __ delayed()->nop();
618 __ align(16);
619 assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
620 __ end_a_stub();
621 return offset;
622 }
625 const bool Matcher::match_rule_supported(int opcode) {
626 if (!has_match_rule(opcode))
627 return false;
628 /*
629 switch (opcode) {
630 case Op_PopCountI:
631 case Op_PopCountL:
632 if (!UsePopCountInstruction)
633 return false;
634 break;
635 case Op_MulVI:
636 if ((UseSSE < 4) && (UseAVX < 1)) // only with SSE4_1 or AVX
637 return false;
638 break;
639 case Op_CompareAndSwapL:
640 #ifdef _LP64
641 case Op_CompareAndSwapP:
642 #endif
643 if (!VM_Version::supports_cx8())
644 return false;
645 break;
646 }
647 */
648 return true; // Per default match rules are supported.
649 }
651 //FIXME
652 // emit call stub, compiled java to interpreter
653 void emit_java_to_interp(CodeBuffer &cbuf ) {
654 // Stub is fixed up when the corresponding call is converted from calling
655 // compiled code to calling interpreted code.
656 // mov rbx,0
657 // jmp -1
659 address mark = cbuf.insts_mark(); // get mark within main instrs section
661 // Note that the code buffer's insts_mark is always relative to insts.
662 // That's why we must use the macroassembler to generate a stub.
663 MacroAssembler _masm(&cbuf);
665 address base =
666 __ start_a_stub(Compile::MAX_stubs_size);
667 if (base == NULL) return; // CodeBuffer::expand failed
668 // static stub relocation stores the instruction address of the call
670 __ relocate(static_stub_Relocation::spec(mark), 0);
672 /* 2012/10/29 Jin: Rmethod contains methodOop, it should be relocated for GC */
673 /*
674 int oop_index = __ oop_recorder()->allocate_index(NULL);
675 RelocationHolder rspec = oop_Relocation::spec(oop_index);
676 __ relocate(rspec);
677 */
679 // static stub relocation also tags the methodOop in the code-stream.
680 __ li48(S3, (long)0);
681 // This is recognized as unresolved by relocs/nativeInst/ic code
683 __ relocate(relocInfo::runtime_call_type);
685 cbuf.set_insts_mark();
686 address call_pc = (address)-1;
687 __ li48(AT, (long)call_pc);
688 __ jr(AT);
689 __ nop();
690 __ align(16);
691 __ end_a_stub();
692 // Update current stubs pointer and restore code_end.
693 }
695 // size of call stub, compiled java to interpretor
696 uint size_java_to_interp() {
697 int size = 4 * 4 + NativeCall::instruction_size; // sizeof(li48) + NativeCall::instruction_size
698 return round_to(size, 16);
699 }
701 // relocation entries for call stub, compiled java to interpreter
702 uint reloc_java_to_interp() {
703 return 16; // in emit_java_to_interp + in Java_Static_Call
704 }
706 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
707 if( Assembler::is_simm16(offset) ) return true;
708 else
709 {
710 assert(false, "Not implemented yet !" );
711 Unimplemented();
712 }
713 }
716 // No additional cost for CMOVL.
717 const int Matcher::long_cmove_cost() { return 0; }
719 // No CMOVF/CMOVD with SSE2
720 const int Matcher::float_cmove_cost() { return ConditionalMoveLimit; }
722 // Does the CPU require late expand (see block.cpp for description of late expand)?
723 const bool Matcher::require_postalloc_expand = false;
725 // Do we need to mask the count passed to shift instructions or does
726 // the cpu only look at the lower 5/6 bits anyway?
727 const bool Matcher::need_masked_shift_count = false;
729 bool Matcher::narrow_oop_use_complex_address() {
730 assert(UseCompressedOops, "only for compressed oops code");
731 return (LogMinObjAlignmentInBytes <= 3);
732 }
734 bool Matcher::narrow_klass_use_complex_address() {
735 assert(UseCompressedClassPointers, "only for compressed klass code");
736 return (LogKlassAlignmentInBytes <= 3);
737 }
739 // This is UltraSparc specific, true just means we have fast l2f conversion
740 const bool Matcher::convL2FSupported(void) {
741 return true;
742 }
746 // Max vector size in bytes. 0 if not supported.
747 const int Matcher::vector_width_in_bytes(BasicType bt) {
748 // return UseSSE >= 2 ? 8 : 0;
749 return 0;
750 }
752 // Register for MODI projection of divmodI
753 RegMask Matcher::modI_proj_mask() {
754 return P_REG_mask();
755 }
757 // Register for DIVL projection of divmodL
758 RegMask Matcher::divL_proj_mask() {
759 ShouldNotReachHere();
760 return RegMask();
761 }
763 int Matcher::regnum_to_fpu_offset(int regnum) {
764 return regnum - 32; // The FP registers are in the second chunk
765 }
768 const bool Matcher::isSimpleConstant64(jlong value) {
769 // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
770 return false;
771 }
773 // Register for DIVI projection of divmodI
774 RegMask Matcher::divI_proj_mask() {
775 return P_REG_mask();
776 }
778 // Limits on vector size (number of elements) loaded into vector.
779 const int Matcher::max_vector_size(const BasicType bt) {
780 return vector_width_in_bytes(bt)/type2aelembytes(bt);
781 }
782 const int Matcher::min_vector_size(const BasicType bt) {
783 int max_size = max_vector_size(bt);
784 // Min size which can be loaded into vector is 4 bytes.
785 int size = (type2aelembytes(bt) == 1) ? 4 : 2;
786 return MIN2(size,max_size);
787 }
789 // Vector ideal reg
790 const int Matcher::vector_ideal_reg(int size) {
791 return 0;
792 }
794 // Only lowest bits of xmm reg are used for vector shift count.
795 const int Matcher::vector_shift_count_ideal_reg(int size) {
796 return Op_VecS;
797 }
799 // x86 supports misaligned vectors store/load.
800 const bool Matcher::misaligned_vectors_ok() {
801 return !AlignVector; // can be changed by flag
802 }
804 // Return whether or not this register is ever used as an argument. This
805 // function is used on startup to build the trampoline stubs in generateOptoStub.
806 // Registers not mentioned will be killed by the VM call in the trampoline, and
807 // arguments in those registers not be available to the callee.
808 bool Matcher::can_be_java_arg( int reg ) {
809 /* Refer to: [sharedRuntime_mips_64.cpp] SharedRuntime::java_calling_convention() */
810 if ( reg == T0_num || reg == T0_H_num
811 || reg == A0_num || reg == A0_H_num
812 || reg == A1_num || reg == A1_H_num
813 || reg == A2_num || reg == A2_H_num
814 || reg == A3_num || reg == A3_H_num
815 || reg == A4_num || reg == A4_H_num
816 || reg == A5_num || reg == A5_H_num
817 || reg == A6_num || reg == A6_H_num
818 || reg == A7_num || reg == A7_H_num )
819 return true;
821 if (reg >= F12_num && reg <= F19_num)
822 return true;
824 return false;
825 }
827 bool Matcher::is_spillable_arg( int reg ) {
828 return can_be_java_arg(reg);
829 }
831 //TODO: in MIPS i donot know LEE
832 bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) {
833 // In 64 bit mode a code which use multiply when
834 // devisor is constant is faster than hardware
835 // DIV instruction (it uses MulHiL).
836 return false;
837 }
839 // Register for MODL projection of divmodL
840 RegMask Matcher::modL_proj_mask() {
841 ShouldNotReachHere();
842 return RegMask();
843 }
845 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
846 return FP_REG_mask();
847 }
849 // x86 AES instructions are compatible with SunJCE expanded
850 // keys, hence we do not need to pass the original key to stubs
851 const bool Matcher::pass_original_key_for_aes() {
852 return false;
853 }
855 // The address of the call instruction needs to be 16-byte aligned to
856 // ensure that it does not span a cache line so that it can be patched.
858 int CallStaticJavaDirectNode::compute_padding(int current_offset) const {
859 //lui
860 //ori
861 //dsll
862 //ori
864 //jalr
865 //nop
867 return round_to(current_offset, alignment_required()) - current_offset;
868 }
870 // The address of the call instruction needs to be 16-byte aligned to
871 // ensure that it does not span a cache line so that it can be patched.
872 int CallDynamicJavaDirectNode::compute_padding(int current_offset) const {
873 //li64 <--- skip
875 //lui
876 //ori
877 //dsll
878 //ori
880 //jalr
881 //nop
883 current_offset += 4 * 6; // skip li64
884 return round_to(current_offset, alignment_required()) - current_offset;
885 }
887 int CallLeafNoFPDirectNode::compute_padding(int current_offset) const {
888 //lui
889 //ori
890 //dsll
891 //ori
893 //jalr
894 //nop
896 return round_to(current_offset, alignment_required()) - current_offset;
897 }
899 int CallLeafDirectNode::compute_padding(int current_offset) const {
900 //lui
901 //ori
902 //dsll
903 //ori
905 //jalr
906 //nop
908 return round_to(current_offset, alignment_required()) - current_offset;
909 }
911 int CallRuntimeDirectNode::compute_padding(int current_offset) const {
912 //lui
913 //ori
914 //dsll
915 //ori
917 //jalr
918 //nop
920 return round_to(current_offset, alignment_required()) - current_offset;
921 }
923 // If CPU can load and store mis-aligned doubles directly then no fixup is
924 // needed. Else we split the double into 2 integer pieces and move it
925 // piece-by-piece. Only happens when passing doubles into C code as the
926 // Java calling convention forces doubles to be aligned.
927 const bool Matcher::misaligned_doubles_ok = false;
928 // Do floats take an entire double register or just half?
929 //const bool Matcher::float_in_double = true;
930 bool Matcher::float_in_double() { return false; }
931 // Do ints take an entire long register or just half?
932 const bool Matcher::int_in_long = false;
933 // Threshold size for cleararray.
934 const int Matcher::init_array_short_size = 8 * BytesPerLong;
935 // Is it better to copy float constants, or load them directly from memory?
936 // Intel can load a float constant from a direct address, requiring no
937 // extra registers. Most RISCs will have to materialize an address into a
938 // register first, so they would do better to copy the constant from stack.
939 const bool Matcher::rematerialize_float_constants = false;
940 // Advertise here if the CPU requires explicit rounding operations
941 // to implement the UseStrictFP mode.
942 const bool Matcher::strict_fp_requires_explicit_rounding = false;
943 // The ecx parameter to rep stos for the ClearArray node is in dwords.
944 const bool Matcher::init_array_count_is_in_bytes = false;
945 // Should the Matcher clone shifts on addressing modes, expecting them to
946 // be subsumed into complex addressing expressions or compute them into
947 // registers? True for Intel but false for most RISCs
948 const bool Matcher::clone_shift_expressions = false;
952 // Indicate if the safepoint node needs the polling page as an input.
953 // Since x86 does have absolute addressing, it doesn't.
954 bool SafePointNode::needs_polling_address_input() {
955 return false;
956 }
958 // !!!!! Special hack to get all type of calls to specify the byte offset
959 // from the start of the call to the point where the return address
960 // will point.
961 int MachCallStaticJavaNode::ret_addr_offset() {
962 assert(NativeCall::instruction_size == 24, "in MachCallStaticJavaNode::ret_addr_offset");
963 //The value ought to be 16 bytes.
964 //lui
965 //ori
966 //dsll
967 //ori
968 //jalr
969 //nop
970 return NativeCall::instruction_size;
971 }
973 int MachCallDynamicJavaNode::ret_addr_offset() {
974 /* 2012/9/10 Jin: must be kept in sync with Java_Dynamic_Call */
976 // return NativeCall::instruction_size;
977 assert(NativeCall::instruction_size == 24, "in MachCallDynamicJavaNode::ret_addr_offset");
978 //The value ought to be 4 + 16 bytes.
979 //lui IC_Klass,
980 //ori IC_Klass,
981 //dsll IC_Klass
982 //ori IC_Klass
983 //lui T9
984 //ori T9
985 //dsll T9
986 //ori T9
987 //jalr T9
988 //nop
989 return 6 * 4 + NativeCall::instruction_size;
991 }
993 /*
994 // EMIT_OPCODE()
995 void emit_opcode(CodeBuffer &cbuf, int code) {
996 *(cbuf.code_end()) = (unsigned char)code;
997 cbuf.set_code_end(cbuf.code_end() + 1);
998 }
999 */
1001 void emit_d32_reloc(CodeBuffer &cbuf, int d32, relocInfo::relocType reloc,
1002 int format) {
1003 cbuf.relocate(cbuf.insts_mark(), reloc, format);
1004 cbuf.insts()->emit_int32(d32);
1005 }
1007 //=============================================================================
1009 // Figure out which register class each belongs in: rc_int, rc_float, rc_stack
1010 enum RC { rc_bad, rc_int, rc_float, rc_stack };
1011 static enum RC rc_class( OptoReg::Name reg ) {
1012 if( !OptoReg::is_valid(reg) ) return rc_bad;
1013 if (OptoReg::is_stack(reg)) return rc_stack;
1014 VMReg r = OptoReg::as_VMReg(reg);
1015 if (r->is_Register()) return rc_int;
1016 assert(r->is_FloatRegister(), "must be");
1017 return rc_float;
1018 }
1020 uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream* st ) const {
1021 // Get registers to move
1022 OptoReg::Name src_second = ra_->get_reg_second(in(1));
1023 OptoReg::Name src_first = ra_->get_reg_first(in(1));
1024 OptoReg::Name dst_second = ra_->get_reg_second(this );
1025 OptoReg::Name dst_first = ra_->get_reg_first(this );
1027 enum RC src_second_rc = rc_class(src_second);
1028 enum RC src_first_rc = rc_class(src_first);
1029 enum RC dst_second_rc = rc_class(dst_second);
1030 enum RC dst_first_rc = rc_class(dst_first);
1032 assert(OptoReg::is_valid(src_first) && OptoReg::is_valid(dst_first), "must move at least 1 register" );
1034 // Generate spill code!
1035 int size = 0;
1037 if( src_first == dst_first && src_second == dst_second )
1038 return 0; // Self copy, no move
1040 if (src_first_rc == rc_stack) {
1041 // mem ->
1042 if (dst_first_rc == rc_stack) {
1043 // mem -> mem
1044 assert(src_second != dst_first, "overlap");
1045 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1046 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1047 // 64-bit
1048 int src_offset = ra_->reg2offset(src_first);
1049 int dst_offset = ra_->reg2offset(dst_first);
1050 if (cbuf) {
1051 MacroAssembler _masm(cbuf);
1052 __ ld(AT, Address(SP, src_offset));
1053 __ sd(AT, Address(SP, dst_offset));
1054 #ifndef PRODUCT
1055 } else {
1056 if(!do_size){
1057 if (size != 0) st->print("\n\t");
1058 st->print("ld AT, [SP + #%d]\t# 64-bit mem-mem spill 1\n\t"
1059 "sd AT, [SP + #%d]",
1060 src_offset, dst_offset);
1061 }
1062 #endif
1063 }
1064 size += 8;
1065 } else {
1066 // 32-bit
1067 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1068 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1069 // No pushl/popl, so:
1070 int src_offset = ra_->reg2offset(src_first);
1071 int dst_offset = ra_->reg2offset(dst_first);
1072 if (cbuf) {
1073 MacroAssembler _masm(cbuf);
1074 __ lw(AT, Address(SP, src_offset));
1075 __ sw(AT, Address(SP, dst_offset));
1076 #ifndef PRODUCT
1077 } else {
1078 if(!do_size){
1079 if (size != 0) st->print("\n\t");
1080 st->print("lw AT, [SP + #%d] spill 2\n\t"
1081 "sw AT, [SP + #%d]\n\t",
1082 src_offset, dst_offset);
1083 }
1084 #endif
1085 }
1086 size += 8;
1087 }
1088 return size;
1089 } else if (dst_first_rc == rc_int) {
1090 // mem -> gpr
1091 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1092 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1093 // 64-bit
1094 int offset = ra_->reg2offset(src_first);
1095 if (cbuf) {
1096 MacroAssembler _masm(cbuf);
1097 __ ld(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1098 #ifndef PRODUCT
1099 } else {
1100 if(!do_size){
1101 if (size != 0) st->print("\n\t");
1102 st->print("ld %s, [SP + #%d]\t# spill 3",
1103 Matcher::regName[dst_first],
1104 offset);
1105 }
1106 #endif
1107 }
1108 size += 4;
1109 } else {
1110 // 32-bit
1111 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1112 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1113 int offset = ra_->reg2offset(src_first);
1114 if (cbuf) {
1115 MacroAssembler _masm(cbuf);
1116 if (this->ideal_reg() == Op_RegI)
1117 __ lw(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1118 else
1119 __ lwu(as_Register(Matcher::_regEncode[dst_first]), Address(SP, offset));
1120 #ifndef PRODUCT
1121 } else {
1122 if(!do_size){
1123 if (size != 0) st->print("\n\t");
1124 if (this->ideal_reg() == Op_RegI)
1125 st->print("lw %s, [SP + #%d]\t# spill 4",
1126 Matcher::regName[dst_first],
1127 offset);
1128 else
1129 st->print("lwu %s, [SP + #%d]\t# spill 5",
1130 Matcher::regName[dst_first],
1131 offset);
1132 }
1133 #endif
1134 }
1135 size += 4;
1136 }
1137 return size;
1138 } else if (dst_first_rc == rc_float) {
1139 // mem-> xmm
1140 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1141 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1142 // 64-bit
1143 int offset = ra_->reg2offset(src_first);
1144 if (cbuf) {
1145 MacroAssembler _masm(cbuf);
1146 __ ldc1( as_FloatRegister(Matcher::_regEncode[dst_first]), Address(SP, offset));
1147 #ifndef PRODUCT
1148 } else {
1149 if(!do_size){
1150 if (size != 0) st->print("\n\t");
1151 st->print("ldc1 %s, [SP + #%d]\t# spill 6",
1152 Matcher::regName[dst_first],
1153 offset);
1154 }
1155 #endif
1156 }
1157 size += 4;
1158 } else {
1159 // 32-bit
1160 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1161 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1162 int offset = ra_->reg2offset(src_first);
1163 if (cbuf) {
1164 MacroAssembler _masm(cbuf);
1165 __ lwc1( as_FloatRegister(Matcher::_regEncode[dst_first]), Address(SP, offset));
1166 #ifndef PRODUCT
1167 } else {
1168 if(!do_size){
1169 if (size != 0) st->print("\n\t");
1170 st->print("lwc1 %s, [SP + #%d]\t# spill 7",
1171 Matcher::regName[dst_first],
1172 offset);
1173 }
1174 #endif
1175 }
1176 size += 4;
1177 }
1178 return size;
1179 }
1180 } else if (src_first_rc == rc_int) {
1181 // gpr ->
1182 if (dst_first_rc == rc_stack) {
1183 // gpr -> mem
1184 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1185 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1186 // 64-bit
1187 int offset = ra_->reg2offset(dst_first);
1188 if (cbuf) {
1189 MacroAssembler _masm(cbuf);
1190 __ sd(as_Register(Matcher::_regEncode[src_first]), Address(SP, offset));
1191 #ifndef PRODUCT
1192 } else {
1193 if(!do_size){
1194 if (size != 0) st->print("\n\t");
1195 st->print("sd %s, [SP + #%d] # spill 8",
1196 Matcher::regName[src_first],
1197 offset);
1198 }
1199 #endif
1200 }
1201 size += 4;
1202 } else {
1203 // 32-bit
1204 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1205 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1206 int offset = ra_->reg2offset(dst_first);
1207 if (cbuf) {
1208 MacroAssembler _masm(cbuf);
1209 __ sw(as_Register(Matcher::_regEncode[src_first]), Address(SP, offset));
1210 #ifndef PRODUCT
1211 } else {
1212 if(!do_size){
1213 if (size != 0) st->print("\n\t");
1214 st->print("sw [SP + #%d], %s\t# spill 9",
1215 offset,
1216 Matcher::regName[src_first]);
1217 }
1218 #endif
1219 }
1220 size += 4;
1221 }
1222 return size;
1223 } else if (dst_first_rc == rc_int) {
1224 // gpr -> gpr
1225 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1226 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1227 // 64-bit
1228 if (cbuf) {
1229 MacroAssembler _masm(cbuf);
1230 __ move(as_Register(Matcher::_regEncode[dst_first]),
1231 as_Register(Matcher::_regEncode[src_first]));
1232 #ifndef PRODUCT
1233 } else {
1234 if(!do_size){
1235 if (size != 0) st->print("\n\t");
1236 st->print("move(64bit) %s, %s\t# spill 10",
1237 Matcher::regName[dst_first],
1238 Matcher::regName[src_first]);
1239 }
1240 #endif
1241 }
1242 size += 4;
1243 return size;
1244 } else {
1245 // 32-bit
1246 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1247 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1248 if (cbuf) {
1249 MacroAssembler _masm(cbuf);
1250 if (this->ideal_reg() == Op_RegI)
1251 __ move_u32(as_Register(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first]));
1252 else
1253 __ daddu(as_Register(Matcher::_regEncode[dst_first]), as_Register(Matcher::_regEncode[src_first]), R0);
1255 #ifndef PRODUCT
1256 } else {
1257 if(!do_size){
1258 if (size != 0) st->print("\n\t");
1259 st->print("move(32-bit) %s, %s\t# spill 11",
1260 Matcher::regName[dst_first],
1261 Matcher::regName[src_first]);
1262 }
1263 #endif
1264 }
1265 size += 4;
1266 return size;
1267 }
1268 } else if (dst_first_rc == rc_float) {
1269 // gpr -> xmm
1270 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1271 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1272 // 64-bit
1273 if (cbuf) {
1274 MacroAssembler _masm(cbuf);
1275 __ dmtc1(as_Register(Matcher::_regEncode[src_first]), as_FloatRegister(Matcher::_regEncode[dst_first]));
1276 #ifndef PRODUCT
1277 } else {
1278 if(!do_size){
1279 if (size != 0) st->print("\n\t");
1280 st->print("dmtc1 %s, %s\t# spill 12",
1281 Matcher::regName[dst_first],
1282 Matcher::regName[src_first]);
1283 }
1284 #endif
1285 }
1286 size += 4;
1287 } else {
1288 // 32-bit
1289 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1290 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1291 if (cbuf) {
1292 MacroAssembler _masm(cbuf);
1293 __ mtc1( as_Register(Matcher::_regEncode[src_first]), as_FloatRegister(Matcher::_regEncode[dst_first]) );
1294 #ifndef PRODUCT
1295 } else {
1296 if(!do_size){
1297 if (size != 0) st->print("\n\t");
1298 st->print("mtc1 %s, %s\t# spill 13",
1299 Matcher::regName[dst_first],
1300 Matcher::regName[src_first]);
1301 }
1302 #endif
1303 }
1304 size += 4;
1305 }
1306 return size;
1307 }
1308 } else if (src_first_rc == rc_float) {
1309 // xmm ->
1310 if (dst_first_rc == rc_stack) {
1311 // xmm -> mem
1312 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1313 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1314 // 64-bit
1315 int offset = ra_->reg2offset(dst_first);
1316 if (cbuf) {
1317 MacroAssembler _masm(cbuf);
1318 __ sdc1( as_FloatRegister(Matcher::_regEncode[src_first]), Address(SP, offset) );
1319 #ifndef PRODUCT
1320 } else {
1321 if(!do_size){
1322 if (size != 0) st->print("\n\t");
1323 st->print("sdc1 %s, [SP + #%d]\t# spill 14",
1324 Matcher::regName[src_first],
1325 offset);
1326 }
1327 #endif
1328 }
1329 size += 4;
1330 } else {
1331 // 32-bit
1332 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1333 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1334 int offset = ra_->reg2offset(dst_first);
1335 if (cbuf) {
1336 MacroAssembler _masm(cbuf);
1337 __ swc1(as_FloatRegister(Matcher::_regEncode[src_first]), Address(SP, offset));
1338 #ifndef PRODUCT
1339 } else {
1340 if(!do_size){
1341 if (size != 0) st->print("\n\t");
1342 st->print("swc1 %s, [SP + #%d]\t# spill 15",
1343 Matcher::regName[src_first],
1344 offset);
1345 }
1346 #endif
1347 }
1348 size += 4;
1349 }
1350 return size;
1351 } else if (dst_first_rc == rc_int) {
1352 // xmm -> gpr
1353 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1354 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1355 // 64-bit
1356 if (cbuf) {
1357 MacroAssembler _masm(cbuf);
1358 __ dmfc1( as_Register(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1359 #ifndef PRODUCT
1360 } else {
1361 if(!do_size){
1362 if (size != 0) st->print("\n\t");
1363 st->print("dmfc1 %s, %s\t# spill 16",
1364 Matcher::regName[dst_first],
1365 Matcher::regName[src_first]);
1366 }
1367 #endif
1368 }
1369 size += 4;
1370 } else {
1371 // 32-bit
1372 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1373 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1374 if (cbuf) {
1375 MacroAssembler _masm(cbuf);
1376 __ mfc1( as_Register(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1377 #ifndef PRODUCT
1378 } else {
1379 if(!do_size){
1380 if (size != 0) st->print("\n\t");
1381 st->print("mfc1 %s, %s\t# spill 17",
1382 Matcher::regName[dst_first],
1383 Matcher::regName[src_first]);
1384 }
1385 #endif
1386 }
1387 size += 4;
1388 }
1389 return size;
1390 } else if (dst_first_rc == rc_float) {
1391 // xmm -> xmm
1392 if ((src_first & 1) == 0 && src_first + 1 == src_second &&
1393 (dst_first & 1) == 0 && dst_first + 1 == dst_second) {
1394 // 64-bit
1395 if (cbuf) {
1396 MacroAssembler _masm(cbuf);
1397 __ mov_d( as_FloatRegister(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1398 #ifndef PRODUCT
1399 } else {
1400 if(!do_size){
1401 if (size != 0) st->print("\n\t");
1402 st->print("mov_d %s, %s\t# spill 18",
1403 Matcher::regName[dst_first],
1404 Matcher::regName[src_first]);
1405 }
1406 #endif
1407 }
1408 size += 4;
1409 } else {
1410 // 32-bit
1411 assert(!((src_first & 1) == 0 && src_first + 1 == src_second), "no transform");
1412 assert(!((dst_first & 1) == 0 && dst_first + 1 == dst_second), "no transform");
1413 if (cbuf) {
1414 MacroAssembler _masm(cbuf);
1415 __ mov_s( as_FloatRegister(Matcher::_regEncode[dst_first]), as_FloatRegister(Matcher::_regEncode[src_first]));
1416 #ifndef PRODUCT
1417 } else {
1418 if(!do_size){
1419 if (size != 0) st->print("\n\t");
1420 st->print("mov_s %s, %s\t# spill 19",
1421 Matcher::regName[dst_first],
1422 Matcher::regName[src_first]);
1423 }
1424 #endif
1425 }
1426 size += 4;
1427 }
1428 return size;
1429 }
1430 }
1432 assert(0," foo ");
1433 Unimplemented();
1434 return size;
1436 }
1438 #ifndef PRODUCT
1439 void MachSpillCopyNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1440 implementation( NULL, ra_, false, st );
1441 }
1442 #endif
1444 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1445 implementation( &cbuf, ra_, false, NULL );
1446 }
1448 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
1449 return implementation( NULL, ra_, true, NULL );
1450 }
1452 //=============================================================================
1453 #
1455 #ifndef PRODUCT
1456 void MachBreakpointNode::format( PhaseRegAlloc *, outputStream* st ) const {
1457 st->print("INT3");
1458 }
1459 #endif
1461 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc* ra_) const {
1462 MacroAssembler _masm(&cbuf);
1463 __ int3();
1464 }
1466 uint MachBreakpointNode::size(PhaseRegAlloc* ra_) const {
1467 return MachNode::size(ra_);
1468 }
1471 //=============================================================================
1472 #ifndef PRODUCT
1473 void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1474 Compile *C = ra_->C;
1475 int framesize = C->frame_size_in_bytes();
1477 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1479 st->print("daddiu SP, SP, %d # Rlease stack @ MachEpilogNode",framesize);
1480 st->cr(); st->print("\t");
1481 st->print("ld RA, SP, %d # Restore RA @ MachEpilogNode", -8);
1482 st->cr(); st->print("\t");
1483 st->print("ld FP, SP, %d # Restore FP @ MachEpilogNode", -16);
1485 if( do_polling() && C->is_method_compilation() ) {
1486 st->print("Poll Safepoint # MachEpilogNode");
1487 }
1488 }
1489 #endif
1491 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1492 Compile *C = ra_->C;
1493 MacroAssembler _masm(&cbuf);
1494 int framesize = C->frame_size_in_bytes();
1496 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1498 __ daddiu(SP, SP, framesize);
1499 __ ld(RA, SP, -wordSize );
1500 __ ld(FP, SP, -wordSize*2 );
1502 /* 2012/11/19 Jin: The epilog in a RuntimeStub should not contain a safepoint */
1503 if( do_polling() && C->is_method_compilation() ) {
1504 #ifndef OPT_SAFEPOINT
1505 __ li48(AT, (long)os::get_polling_page());
1506 __ relocate(relocInfo::poll_return_type);
1507 __ lw(AT, AT, 0);
1508 #else
1509 __ lui(AT, Assembler::split_high((intptr_t)os::get_polling_page()));
1510 __ relocate(relocInfo::poll_return_type);
1511 __ lw(AT, AT, Assembler::split_low((intptr_t)os::get_polling_page()));
1512 #endif
1513 }
1514 }
1516 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
1517 return MachNode::size(ra_); // too many variables; just compute it the hard way fujie debug
1518 }
1520 int MachEpilogNode::reloc() const {
1521 return 0; // a large enough number
1522 }
1524 const Pipeline * MachEpilogNode::pipeline() const {
1525 return MachNode::pipeline_class();
1526 }
1528 int MachEpilogNode::safepoint_offset() const { return 0; }
1530 //=============================================================================
1532 #ifndef PRODUCT
1533 void BoxLockNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1534 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1535 int reg = ra_->get_reg_first(this);
1536 st->print("ADDI %s, SP, %d @BoxLockNode",Matcher::regName[reg],offset);
1537 }
1538 #endif
1541 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
1542 return 4;
1543 }
1545 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1546 MacroAssembler _masm(&cbuf);
1547 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1548 int reg = ra_->get_encode(this);
1550 __ addi(as_Register(reg), SP, offset);
1551 /*
1552 if( offset >= 128 ) {
1553 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
1554 emit_rm(cbuf, 0x2, reg, 0x04);
1555 emit_rm(cbuf, 0x0, 0x04, SP_enc);
1556 emit_d32(cbuf, offset);
1557 }
1558 else {
1559 emit_opcode(cbuf, 0x8D); // LEA reg,[SP+offset]
1560 emit_rm(cbuf, 0x1, reg, 0x04);
1561 emit_rm(cbuf, 0x0, 0x04, SP_enc);
1562 emit_d8(cbuf, offset);
1563 }
1564 */
1565 }
1568 //static int sizeof_FFree_Float_Stack_All = -1;
1570 int MachCallRuntimeNode::ret_addr_offset() {
1571 //lui
1572 //ori
1573 //dsll
1574 //ori
1575 //jalr
1576 //nop
1577 assert(NativeCall::instruction_size == 24, "in MachCallRuntimeNode::ret_addr_offset()");
1578 return NativeCall::instruction_size;
1579 // return 16;
1580 }
1586 //=============================================================================
1587 #ifndef PRODUCT
1588 void MachNopNode::format( PhaseRegAlloc *, outputStream* st ) const {
1589 st->print("NOP \t# %d bytes pad for loops and calls", 4 * _count);
1590 }
1591 #endif
1593 void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc * ) const {
1594 MacroAssembler _masm(&cbuf);
1595 int i = 0;
1596 for(i = 0; i < _count; i++)
1597 __ nop();
1598 }
1600 uint MachNopNode::size(PhaseRegAlloc *) const {
1601 return 4 * _count;
1602 }
1603 const Pipeline* MachNopNode::pipeline() const {
1604 return MachNode::pipeline_class();
1605 }
1607 //=============================================================================
1609 //=============================================================================
1610 #ifndef PRODUCT
1611 void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1612 st->print_cr("load_klass(AT, T0)");
1613 st->print_cr("\tbeq(AT, iCache, L)");
1614 st->print_cr("\tnop");
1615 st->print_cr("\tjmp(SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type)");
1616 st->print_cr("\tnop");
1617 st->print_cr("\tnop");
1618 st->print_cr(" L:");
1619 }
1620 #endif
1623 void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1624 MacroAssembler _masm(&cbuf);
1625 #ifdef ASSERT
1626 //uint code_size = cbuf.code_size();
1627 #endif
1628 int ic_reg = Matcher::inline_cache_reg_encode();
1629 Label L;
1630 Register receiver = T0;
1631 Register iCache = as_Register(ic_reg);
1632 __ load_klass(AT, receiver);
1633 __ beq(AT, iCache, L);
1634 __ nop();
1636 __ relocate(relocInfo::runtime_call_type);
1637 __ li48(T9, (long)SharedRuntime::get_ic_miss_stub());
1638 __ jr(T9);
1639 __ nop();
1641 /* WARNING these NOPs are critical so that verified entry point is properly
1642 * 8 bytes aligned for patching by NativeJump::patch_verified_entry() */
1643 __ align(CodeEntryAlignment);
1644 __ bind(L);
1645 }
1647 uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
1648 return MachNode::size(ra_);
1649 }
1653 //=============================================================================
1655 const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
1657 int Compile::ConstantTable::calculate_table_base_offset() const {
1658 return 0; // absolute addressing, no offset
1659 }
1661 bool MachConstantBaseNode::requires_postalloc_expand() const { return false; }
1662 void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) {
1663 ShouldNotReachHere();
1664 }
1666 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
1667 // Empty encoding
1668 }
1670 uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
1671 return 0;
1672 }
1674 #ifndef PRODUCT
1675 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
1676 st->print("# MachConstantBaseNode (empty encoding)");
1677 }
1678 #endif
1681 //=============================================================================
1682 #ifndef PRODUCT
1683 void MachPrologNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
1684 Compile* C = ra_->C;
1686 int framesize = C->frame_size_in_bytes();
1687 int bangsize = C->bang_size_in_bytes();
1688 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1690 // Calls to C2R adapters often do not accept exceptional returns.
1691 // We require that their callers must bang for them. But be careful, because
1692 // some VM calls (such as call site linkage) can use several kilobytes of
1693 // stack. But the stack safety zone should account for that.
1694 // See bugs 4446381, 4468289, 4497237.
1695 if (C->need_stack_bang(bangsize)) {
1696 st->print_cr("# stack bang"); st->print("\t");
1697 }
1698 st->print("sd RA, (SP)-8 @ MachPrologNode\n\t");
1699 st->print("sd FP, (SP)-16 \n\t");
1700 st->print("daddiu FP, SP, -16 \n\t");
1701 st->print("daddiu SP, SP, -%d \t",framesize);
1702 }
1703 #endif
1706 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1707 Compile* C = ra_->C;
1708 MacroAssembler _masm(&cbuf);
1710 int framesize = C->frame_size_in_bytes();
1711 int bangsize = C->bang_size_in_bytes();
1713 // __ verified_entry(framesize, C->need_stack_bang(bangsize)?bangsize:0, false);
1715 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
1717 if (C->need_stack_bang(framesize)) {
1718 __ generate_stack_overflow_check(framesize);
1719 }
1721 __ sd(RA, SP, -wordSize);
1722 __ sd(FP, SP, -wordSize*2);
1723 __ daddiu(FP, SP, -wordSize*2);
1724 __ daddiu(SP, SP, -framesize);
1725 __ nop(); /* 2013.10.22 Jin: Make enough room for patch_verified_entry() */
1726 __ nop();
1728 C->set_frame_complete(cbuf.insts_size());
1729 if (C->has_mach_constant_base_node()) {
1730 // NOTE: We set the table base offset here because users might be
1731 // emitted before MachConstantBaseNode.
1732 Compile::ConstantTable& constant_table = C->constant_table();
1733 constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
1734 }
1736 }
1739 uint MachPrologNode::size(PhaseRegAlloc *ra_) const {
1740 //fprintf(stderr, "\nPrologNode::size(ra_)= %d \n", MachNode::size(ra_));//fujie debug
1741 return MachNode::size(ra_); // too many variables; just compute it the hard way
1742 }
1744 int MachPrologNode::reloc() const {
1745 return 0; // a large enough number
1746 }
1748 %}
1750 //----------ENCODING BLOCK-----------------------------------------------------
1751 // This block specifies the encoding classes used by the compiler to output
1752 // byte streams. Encoding classes generate functions which are called by
1753 // Machine Instruction Nodes in order to generate the bit encoding of the
1754 // instruction. Operands specify their base encoding interface with the
1755 // interface keyword. There are currently supported four interfaces,
1756 // REG_INTER, CONST_INTER, MEMORY_INTER, & COND_INTER. REG_INTER causes an
1757 // operand to generate a function which returns its register number when
1758 // queried. CONST_INTER causes an operand to generate a function which
1759 // returns the value of the constant when queried. MEMORY_INTER causes an
1760 // operand to generate four functions which return the Base Register, the
1761 // Index Register, the Scale Value, and the Offset Value of the operand when
1762 // queried. COND_INTER causes an operand to generate six functions which
1763 // return the encoding code (ie - encoding bits for the instruction)
1764 // associated with each basic boolean condition for a conditional instruction.
1765 // Instructions specify two basic values for encoding. They use the
1766 // ins_encode keyword to specify their encoding class (which must be one of
1767 // the class names specified in the encoding block), and they use the
1768 // opcode keyword to specify, in order, their primary, secondary, and
1769 // tertiary opcode. Only the opcode sections which a particular instruction
1770 // needs for encoding need to be specified.
1771 encode %{
1772 /*
1773 Alias:
1774 1044 b java.io.ObjectInputStream::readHandle (130 bytes)
1775 118 B14: # B19 B15 <- B13 Freq: 0.899955
1776 118 add S1, S2, V0 #@addP_reg_reg
1777 11c lb S0, [S1 + #-8257524] #@loadB
1778 120 BReq S0, #3, B19 #@branchConI_reg_imm P=0.100000 C=-1.000000
1779 */
1780 //Load byte signed
1781 enc_class load_B_enc (mRegI dst, memory mem) %{
1782 MacroAssembler _masm(&cbuf);
1783 int dst = $dst$$reg;
1784 int base = $mem$$base;
1785 int index = $mem$$index;
1786 int scale = $mem$$scale;
1787 int disp = $mem$$disp;
1789 guarantee(scale == 0, "scale is not zero !");
1791 if( index != 0 ) {
1792 __ addu(AT, as_Register(base), as_Register(index));
1793 if( Assembler::is_simm16(disp) ) {
1794 __ lb(as_Register(dst), AT, disp);
1795 } else {
1796 __ move(T9, disp);
1797 __ addu(AT, AT, T9);
1798 __ lb(as_Register(dst), AT, 0);
1799 }
1800 } else {
1801 if( Assembler::is_simm16(disp) ) {
1802 __ lb(as_Register(dst), as_Register(base), disp);
1803 } else {
1804 __ move(T9, disp);
1805 __ addu(AT, as_Register(base), T9);
1806 __ lb(as_Register(dst), AT, 0);
1807 }
1808 }
1809 %}
1811 //Load byte unsigned
1812 enc_class load_UB_enc (mRegI dst, memory mem) %{
1813 MacroAssembler _masm(&cbuf);
1814 int dst = $dst$$reg;
1815 int base = $mem$$base;
1816 int index = $mem$$index;
1817 int scale = $mem$$scale;
1818 int disp = $mem$$disp;
1820 guarantee(scale == 0, "scale is not zero !");
1822 if( index != 0 ) {
1823 __ daddu(AT, as_Register(base), as_Register(index));
1824 if( Assembler::is_simm16(disp) ) {
1825 __ lbu(as_Register(dst), AT, disp);
1826 } else {
1827 __ move(T9, disp);
1828 __ daddu(AT, AT, T9);
1829 __ lbu(as_Register(dst), AT, 0);
1830 }
1831 } else {
1832 if( Assembler::is_simm16(disp) ) {
1833 __ lbu(as_Register(dst), as_Register(base), disp);
1834 } else {
1835 __ move(T9, disp);
1836 __ daddu(AT, as_Register(base), T9);
1837 __ lbu(as_Register(dst), AT, 0);
1838 }
1839 }
1840 %}
1842 enc_class store_B_reg_enc (memory mem, mRegI src) %{
1843 MacroAssembler _masm(&cbuf);
1844 int src = $src$$reg;
1845 int base = $mem$$base;
1846 int index = $mem$$index;
1847 int scale = $mem$$scale;
1848 int disp = $mem$$disp;
1850 guarantee(scale == 0, "scale is not zero !");
1852 if( index != 0 ) {
1853 __ addu(AT, as_Register(base), as_Register(index));
1854 if( Assembler::is_simm16(disp) ) {
1855 __ sb(as_Register(src), AT, disp);
1856 } else {
1857 __ move(T9, disp);
1858 __ addu(AT, AT, T9);
1859 __ sb(as_Register(src), AT, 0);
1860 }
1861 } else {
1862 if( Assembler::is_simm16(disp) ) {
1863 __ sb(as_Register(src), as_Register(base), disp);
1864 } else {
1865 __ move(T9, disp);
1866 __ addu(AT, as_Register(base), T9);
1867 __ sb(as_Register(src), AT, 0);
1868 }
1869 }
1870 %}
1872 enc_class store_B_immI_enc (memory mem, immI8 src) %{
1873 MacroAssembler _masm(&cbuf);
1874 int base = $mem$$base;
1875 int index = $mem$$index;
1876 int scale = $mem$$scale;
1877 int disp = $mem$$disp;
1878 int value = $src$$constant;
1880 guarantee(scale == 0, "scale is not zero !");
1882 if( index != 0 ) {
1883 __ daddu(AT, as_Register(base), as_Register(index));
1884 if( Assembler::is_simm16(disp) ) {
1885 if (value == 0) {
1886 __ sync();
1887 __ sb(R0, AT, disp);
1888 } else {
1889 __ move(T9, value);
1890 __ sync();
1891 __ sb(T9, AT, disp);
1892 }
1893 } else {
1894 if (value == 0) {
1895 __ move(T9, disp);
1896 __ daddu(AT, AT, T9);
1897 __ sync();
1898 __ sb(R0, AT, 0);
1899 } else {
1900 __ move(T9, disp);
1901 __ daddu(AT, AT, T9);
1902 __ move(T9, value);
1903 __ sync();
1904 __ sb(T9, AT, 0);
1905 }
1906 }
1907 } else {
1908 if( Assembler::is_simm16(disp) ) {
1909 if (value == 0) {
1910 __ sync();
1911 __ sb(R0, as_Register(base), disp);
1912 } else {
1913 __ move(AT, value);
1914 __ sync();
1915 __ sb(AT, as_Register(base), disp);
1916 }
1917 } else {
1918 if (value == 0) {
1919 __ move(T9, disp);
1920 __ daddu(AT, as_Register(base), T9);
1921 __ sync();
1922 __ sb(R0, AT, 0);
1923 } else {
1924 __ move(T9, disp);
1925 __ daddu(AT, as_Register(base), T9);
1926 __ move(T9, value);
1927 __ sync();
1928 __ sb(T9, AT, 0);
1929 }
1930 }
1931 }
1932 %}
1934 // Load Short (16bit signed)
1935 enc_class load_S_enc (mRegI dst, memory mem) %{
1936 MacroAssembler _masm(&cbuf);
1937 int dst = $dst$$reg;
1938 int base = $mem$$base;
1939 int index = $mem$$index;
1940 int scale = $mem$$scale;
1941 int disp = $mem$$disp;
1943 guarantee(scale == 0, "scale is not zero !");
1945 if( index != 0 ) {
1946 __ addu(AT, as_Register(base), as_Register(index));
1947 if( Assembler::is_simm16(disp) ) {
1948 __ lh(as_Register(dst), AT, disp);
1949 } else {
1950 __ move(T9, disp);
1951 __ addu(AT, AT, T9);
1952 __ lh(as_Register(dst), AT, 0);
1953 }
1954 } else {
1955 if( Assembler::is_simm16(disp) ) {
1956 __ lh(as_Register(dst), as_Register(base), disp);
1957 } else {
1958 __ move(T9, disp);
1959 __ addu(AT, as_Register(base), T9);
1960 __ lh(as_Register(dst), AT, 0);
1961 }
1962 }
1963 %}
1965 // Load Char (16bit unsigned)
1966 enc_class load_C_enc (mRegI dst, memory mem) %{
1967 MacroAssembler _masm(&cbuf);
1968 int dst = $dst$$reg;
1969 int base = $mem$$base;
1970 int index = $mem$$index;
1971 int scale = $mem$$scale;
1972 int disp = $mem$$disp;
1974 guarantee(scale == 0, "scale is not zero !");
1976 if( index != 0 ) {
1977 __ daddu(AT, as_Register(base), as_Register(index));
1978 if( Assembler::is_simm16(disp) ) {
1979 __ lhu(as_Register(dst), AT, disp);
1980 } else {
1981 __ move(T9, disp);
1982 __ addu(AT, AT, T9);
1983 __ lhu(as_Register(dst), AT, 0);
1984 }
1985 } else {
1986 if( Assembler::is_simm16(disp) ) {
1987 __ lhu(as_Register(dst), as_Register(base), disp);
1988 } else {
1989 __ move(T9, disp);
1990 __ daddu(AT, as_Register(base), T9);
1991 __ lhu(as_Register(dst), AT, 0);
1992 }
1993 }
1994 %}
1996 // Store Char (16bit unsigned)
1997 enc_class store_C_reg_enc (memory mem, mRegI src) %{
1998 MacroAssembler _masm(&cbuf);
1999 int src = $src$$reg;
2000 int base = $mem$$base;
2001 int index = $mem$$index;
2002 int scale = $mem$$scale;
2003 int disp = $mem$$disp;
2005 guarantee(scale == 0, "scale is not zero !");
2007 if( index != 0 ) {
2008 __ addu(AT, as_Register(base), as_Register(index));
2009 if( Assembler::is_simm16(disp) ) {
2010 __ sh(as_Register(src), AT, disp);
2011 } else {
2012 __ move(T9, disp);
2013 __ addu(AT, AT, T9);
2014 __ sh(as_Register(src), AT, 0);
2015 }
2016 } else {
2017 if( Assembler::is_simm16(disp) ) {
2018 __ sh(as_Register(src), as_Register(base), disp);
2019 } else {
2020 __ move(T9, disp);
2021 __ addu(AT, as_Register(base), T9);
2022 __ sh(as_Register(src), AT, 0);
2023 }
2024 }
2025 %}
2027 enc_class load_I_enc (mRegI dst, memory mem) %{
2028 MacroAssembler _masm(&cbuf);
2029 int dst = $dst$$reg;
2030 int base = $mem$$base;
2031 int index = $mem$$index;
2032 int scale = $mem$$scale;
2033 int disp = $mem$$disp;
2035 guarantee(scale == 0, "scale is not zero !");
2037 if( index != 0 ) {
2038 __ addu(AT, as_Register(base), as_Register(index));
2039 if( Assembler::is_simm16(disp) ) {
2040 __ lw(as_Register(dst), AT, disp);
2041 } else {
2042 __ move(T9, disp);
2043 __ addu(AT, AT, T9);
2044 __ lw(as_Register(dst), AT, 0);
2045 }
2046 } else {
2047 if( Assembler::is_simm16(disp) ) {
2048 __ lw(as_Register(dst), as_Register(base), disp);
2049 } else {
2050 __ move(T9, disp);
2051 __ addu(AT, as_Register(base), T9);
2052 __ lw(as_Register(dst), AT, 0);
2053 }
2054 }
2055 %}
2057 enc_class store_I_reg_enc (memory mem, mRegI src) %{
2058 MacroAssembler _masm(&cbuf);
2059 int src = $src$$reg;
2060 int base = $mem$$base;
2061 int index = $mem$$index;
2062 int scale = $mem$$scale;
2063 int disp = $mem$$disp;
2065 guarantee(scale == 0, "scale is not zero !");
2067 if( index != 0 ) {
2068 __ addu(AT, as_Register(base), as_Register(index));
2069 if( Assembler::is_simm16(disp) ) {
2070 __ sw(as_Register(src), AT, disp);
2071 } else {
2072 __ move(T9, disp);
2073 __ addu(AT, AT, T9);
2074 __ sw(as_Register(src), AT, 0);
2075 }
2076 } else {
2077 if( Assembler::is_simm16(disp) ) {
2078 __ sw(as_Register(src), as_Register(base), disp);
2079 } else {
2080 __ move(T9, disp);
2081 __ addu(AT, as_Register(base), T9);
2082 __ sw(as_Register(src), AT, 0);
2083 }
2084 }
2085 %}
2087 enc_class store_I_immI_enc (memory mem, immI src) %{
2088 MacroAssembler _masm(&cbuf);
2089 int base = $mem$$base;
2090 int index = $mem$$index;
2091 int scale = $mem$$scale;
2092 int disp = $mem$$disp;
2093 int value = $src$$constant;
2095 guarantee(scale == 0, "scale is not zero !");
2097 if( index != 0 ) {
2098 __ daddu(AT, as_Register(base), as_Register(index));
2099 if( Assembler::is_simm16(disp) ) {
2100 if (value == 0) {
2101 __ sw(R0, AT, disp);
2102 } else {
2103 __ move(T9, value);
2104 __ sw(T9, AT, disp);
2105 }
2106 } else {
2107 if (value == 0) {
2108 __ move(T9, disp);
2109 __ addu(AT, AT, T9);
2110 __ sw(R0, AT, 0);
2111 } else {
2112 __ move(T9, disp);
2113 __ addu(AT, AT, T9);
2114 __ move(T9, value);
2115 __ sw(T9, AT, 0);
2116 }
2117 }
2118 } else {
2119 if( Assembler::is_simm16(disp) ) {
2120 if (value == 0) {
2121 __ sw(R0, as_Register(base), disp);
2122 } else {
2123 __ move(AT, value);
2124 __ sw(AT, as_Register(base), disp);
2125 }
2126 } else {
2127 if (value == 0) {
2128 __ move(T9, disp);
2129 __ addu(AT, as_Register(base), T9);
2130 __ sw(R0, AT, 0);
2131 } else {
2132 __ move(T9, disp);
2133 __ addu(AT, as_Register(base), T9);
2134 __ move(T9, value);
2135 __ sw(T9, AT, 0);
2136 }
2137 }
2138 }
2139 %}
2141 enc_class load_N_enc (mRegN dst, memory mem) %{
2142 MacroAssembler _masm(&cbuf);
2143 int dst = $dst$$reg;
2144 int base = $mem$$base;
2145 int index = $mem$$index;
2146 int scale = $mem$$scale;
2147 int disp = $mem$$disp;
2148 relocInfo::relocType disp_reloc = $mem->disp_reloc();
2149 assert(disp_reloc == relocInfo::none, "cannot have disp");
2151 if( index != 0 ) {
2152 __ daddu(AT, as_Register(base), as_Register(index));
2153 if( Assembler::is_simm16(disp) ) {
2154 __ lwu(as_Register(dst), AT, disp);
2155 } else {
2156 __ li(T9, disp);
2157 __ daddu(AT, AT, T9);
2158 __ lwu(as_Register(dst), AT, 0);
2159 }
2160 } else {
2161 if( Assembler::is_simm16(disp) ) {
2162 __ lwu(as_Register(dst), as_Register(base), disp);
2163 } else {
2164 __ li(T9, disp);
2165 __ daddu(AT, as_Register(base), T9);
2166 __ lwu(as_Register(dst), AT, 0);
2167 }
2168 }
2170 %}
2171 enc_class load_P_enc (mRegP dst, memory mem) %{
2172 MacroAssembler _masm(&cbuf);
2173 int dst = $dst$$reg;
2174 int base = $mem$$base;
2175 int index = $mem$$index;
2176 int scale = $mem$$scale;
2177 int disp = $mem$$disp;
2178 relocInfo::relocType disp_reloc = $mem->disp_reloc();
2179 assert(disp_reloc == relocInfo::none, "cannot have disp");
2181 if( index != 0 ) {
2182 __ daddu(AT, as_Register(base), as_Register(index));
2183 if( Assembler::is_simm16(disp) ) {
2184 __ ld(as_Register(dst), AT, disp);
2185 } else {
2186 __ li(T9, disp);
2187 __ daddu(AT, AT, T9);
2188 __ ld(as_Register(dst), AT, 0);
2189 }
2190 } else {
2191 if( Assembler::is_simm16(disp) ) {
2192 __ ld(as_Register(dst), as_Register(base), disp);
2193 } else {
2194 __ li(T9, disp);
2195 __ daddu(AT, as_Register(base), T9);
2196 __ ld(as_Register(dst), AT, 0);
2197 }
2198 }
2199 // if( disp_reloc != relocInfo::none) __ ld(as_Register(dst), as_Register(dst), 0);
2200 %}
2202 enc_class store_P_reg_enc (memory mem, mRegP src) %{
2203 MacroAssembler _masm(&cbuf);
2204 int src = $src$$reg;
2205 int base = $mem$$base;
2206 int index = $mem$$index;
2207 int scale = $mem$$scale;
2208 int disp = $mem$$disp;
2210 guarantee(scale == 0, "scale is not zero !");
2212 if( index != 0 ) {
2213 __ daddu(AT, as_Register(base), as_Register(index));
2214 if( Assembler::is_simm16(disp) ) {
2215 __ sd(as_Register(src), AT, disp);
2216 } else {
2217 __ move(T9, disp);
2218 __ daddu(AT, AT, T9);
2219 __ sd(as_Register(src), AT, 0);
2220 }
2221 } else {
2222 if( Assembler::is_simm16(disp) ) {
2223 __ sd(as_Register(src), as_Register(base), disp);
2224 } else {
2225 __ move(T9, disp);
2226 __ daddu(AT, as_Register(base), T9);
2227 __ sd(as_Register(src), AT, 0);
2228 }
2229 }
2230 %}
2232 enc_class store_N_reg_enc (memory mem, mRegN src) %{
2233 MacroAssembler _masm(&cbuf);
2234 int src = $src$$reg;
2235 int base = $mem$$base;
2236 int index = $mem$$index;
2237 int scale = $mem$$scale;
2238 int disp = $mem$$disp;
2240 guarantee(scale == 0, "scale is not zero !");
2242 if( index != 0 ) {
2243 __ addu(AT, as_Register(base), as_Register(index));
2244 if( Assembler::is_simm16(disp) ) {
2245 __ sw(as_Register(src), AT, disp);
2246 } else {
2247 __ move(T9, disp);
2248 __ addu(AT, AT, T9);
2249 __ sw(as_Register(src), AT, 0);
2250 }
2251 } else {
2252 if( Assembler::is_simm16(disp) ) {
2253 __ sw(as_Register(src), as_Register(base), disp);
2254 } else {
2255 __ move(T9, disp);
2256 __ addu(AT, as_Register(base), T9);
2257 __ sw(as_Register(src), AT, 0);
2258 }
2259 }
2260 %}
2262 enc_class store_P_immP_enc (memory mem, immP31 src) %{
2263 MacroAssembler _masm(&cbuf);
2264 int base = $mem$$base;
2265 int index = $mem$$index;
2266 int scale = $mem$$scale;
2267 int disp = $mem$$disp;
2268 long value = $src$$constant;
2270 guarantee(scale == 0, "scale is not zero !");
2272 if( index != 0 ) {
2273 __ daddu(AT, as_Register(base), as_Register(index));
2274 if( Assembler::is_simm16(disp) ) {
2275 if (value == 0) {
2276 __ sd(R0, AT, disp);
2277 } else {
2278 __ move(T9, value);
2279 __ sd(T9, AT, disp);
2280 }
2281 } else {
2282 if (value == 0) {
2283 __ move(T9, disp);
2284 __ daddu(AT, AT, T9);
2285 __ sd(R0, AT, 0);
2286 } else {
2287 __ move(T9, disp);
2288 __ daddu(AT, AT, T9);
2289 __ move(T9, value);
2290 __ sd(T9, AT, 0);
2291 }
2292 }
2293 } else {
2294 if( Assembler::is_simm16(disp) ) {
2295 if (value == 0) {
2296 __ sd(R0, as_Register(base), disp);
2297 } else {
2298 __ move(AT, value);
2299 __ sd(AT, as_Register(base), disp);
2300 }
2301 } else {
2302 if (value == 0) {
2303 __ move(T9, disp);
2304 __ daddu(AT, as_Register(base), T9);
2305 __ sd(R0, AT, 0);
2306 } else {
2307 __ move(T9, disp);
2308 __ daddu(AT, as_Register(base), T9);
2309 __ move(T9, value);
2310 __ sd(T9, AT, 0);
2311 }
2312 }
2313 }
2314 %}
2316 /*
2317 * 1d4 storeImmN [S0 + #16 (8-bit)], narrowoop: spec/benchmarks/_213_javac/Identifier:exact *
2318 * # compressed ptr ! Field: spec/benchmarks/_213_javac/Identifier.value
2319 * 0x00000055648065d4: daddu at, s0, zero
2320 * 0x00000055648065d8: lui t9, 0x0 ; {oop(a 'spec/benchmarks/_213_javac/Identifier')}
2321 * 0x00000055648065dc: ori t9, t9, 0xfffff610
2322 * 0x00000055648065e0: dsll t9, t9, 16
2323 * 0x00000055648065e4: ori t9, t9, 0xffffc628
2324 * 0x00000055648065e8: sw t9, 0x10(at)
2325 */
2326 enc_class storeImmN_enc (memory mem, immN src) %{
2327 MacroAssembler _masm(&cbuf);
2328 int base = $mem$$base;
2329 int index = $mem$$index;
2330 int scale = $mem$$scale;
2331 int disp = $mem$$disp;
2332 long * value = (long *)$src$$constant;
2334 if (value == NULL) {
2335 guarantee(Assembler::is_simm16(disp), "FIXME: disp is not simm16!");
2336 if (index == 0) {
2337 __ sw(R0, as_Register(base), disp);
2338 } else {
2339 __ daddu(AT, as_Register(base), as_Register(index));
2340 __ sw(R0, AT, disp);
2341 }
2343 return;
2344 }
2346 int oop_index = __ oop_recorder()->find_index((jobject)value);
2347 RelocationHolder rspec = oop_Relocation::spec(oop_index);
2349 guarantee(scale == 0, "FIXME: scale is not zero !");
2350 guarantee(value != 0, "FIXME: value is zero !");
2352 if (index != 0) {
2353 __ daddu(AT, as_Register(base), as_Register(index));
2354 if( Assembler::is_simm16(disp) ) {
2355 if(rspec.type() != relocInfo::none) {
2356 __ relocate(rspec, Assembler::narrow_oop_operand);
2357 __ li48(T9, oop_index);
2358 } else {
2359 __ move(T9, oop_index);
2360 }
2361 __ sw(T9, AT, disp);
2362 } else {
2363 __ move(T9, disp);
2364 __ addu(AT, AT, T9);
2366 if(rspec.type() != relocInfo::none) {
2367 __ relocate(rspec, Assembler::narrow_oop_operand);
2368 __ li48(T9, oop_index);
2369 } else {
2370 __ move(T9, oop_index);
2371 }
2372 __ sw(T9, AT, 0);
2373 }
2374 }
2375 else {
2376 if( Assembler::is_simm16(disp) ) {
2377 if($src->constant_reloc() != relocInfo::none) {
2378 __ relocate(rspec, Assembler::narrow_oop_operand);
2379 __ li48(T9, oop_index);
2380 }
2381 else {
2382 __ li48(T9, oop_index);
2383 }
2384 __ sw(T9, as_Register(base), disp);
2385 } else {
2386 __ move(T9, disp);
2387 __ daddu(AT, as_Register(base), T9);
2389 if($src->constant_reloc() != relocInfo::none){
2390 __ relocate(rspec, Assembler::narrow_oop_operand);
2391 __ li48(T9, oop_index);
2392 } else {
2393 __ li48(T9, oop_index);
2394 }
2395 __ sw(T9, AT, 0);
2396 }
2397 }
2398 %}
2400 enc_class storeImmNKlass_enc (memory mem, immNKlass src) %{
2401 MacroAssembler _masm(&cbuf);
2403 assert (UseCompressedOops, "should only be used for compressed headers");
2404 assert (__ oop_recorder() != NULL, "this assembler needs an OopRecorder");
2406 int base = $mem$$base;
2407 int index = $mem$$index;
2408 int scale = $mem$$scale;
2409 int disp = $mem$$disp;
2410 long value = $src$$constant;
2412 guarantee(scale == 0, "scale is not zero !");
2414 int klass_index = __ oop_recorder()->find_index((Klass*)value);
2415 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
2416 long narrowp = Klass::encode_klass((Klass*)value);
2418 if(index!=0){
2419 __ daddu(AT, as_Register(base), as_Register(index));
2420 if( Assembler::is_simm16(disp) ) {
2421 if(rspec.type() != relocInfo::none){
2422 __ relocate(rspec, Assembler::narrow_oop_operand);
2423 __ li48(T9, narrowp);
2424 }
2425 else {
2426 __ li48(T9, narrowp);
2427 }
2428 __ sw(T9, AT, disp);
2429 } else {
2430 __ move(T9, disp);
2431 __ daddu(AT, AT, T9);
2433 if(rspec.type() != relocInfo::none){
2434 __ relocate(rspec, Assembler::narrow_oop_operand);
2435 __ li48(T9, narrowp);
2436 }
2437 else {
2438 __ li48(T9, narrowp);
2439 }
2441 __ sw(T9, AT, 0);
2442 }
2443 }
2444 else {
2445 if( Assembler::is_simm16(disp) ) {
2446 if(rspec.type() != relocInfo::none){
2447 __ relocate(rspec, Assembler::narrow_oop_operand);
2448 __ li48(T9, narrowp);
2449 }
2450 else {
2451 __ li48(T9, narrowp);
2452 }
2453 __ sw(T9, as_Register(base), disp);
2454 } else {
2455 __ move(T9, disp);
2456 __ daddu(AT, as_Register(base), T9);
2458 if(rspec.type() != relocInfo::none){
2459 __ relocate(rspec, Assembler::narrow_oop_operand);
2460 __ li48(T9, narrowp);
2461 }
2462 else {
2463 __ li48(T9, narrowp);
2464 }
2465 __ sw(T9, AT, 0);
2466 }
2467 }
2468 %}
2470 enc_class storeImmN0_enc(memory mem, ImmN0 src) %{
2471 MacroAssembler _masm(&cbuf);
2472 int base = $mem$$base;
2473 int index = $mem$$index;
2474 int scale = $mem$$scale;
2475 int disp = $mem$$disp;
2477 guarantee(scale == 0, "scale is not zero !");
2479 if(index!=0){
2480 __ daddu(AT, as_Register(base), as_Register(index));
2481 if( Assembler::is_simm16(disp) ) {
2482 __ sw(S5_heapbase, AT, disp);
2483 } else {
2484 __ move(T9, disp);
2485 __ daddu(AT, AT, T9);
2486 __ sw(S5_heapbase, AT, 0);
2487 }
2488 }
2489 else {
2490 if( Assembler::is_simm16(disp) ) {
2491 __ sw(S5_heapbase, as_Register(base), disp);
2492 } else {
2493 __ move(T9, disp);
2494 __ daddu(AT, as_Register(base), T9);
2495 __ sw(S5_heapbase, AT, 0);
2496 }
2497 }
2498 %}
2500 enc_class load_L_enc (mRegL dst, memory mem) %{
2501 MacroAssembler _masm(&cbuf);
2502 int base = $mem$$base;
2503 int index = $mem$$index;
2504 int scale = $mem$$scale;
2505 int disp = $mem$$disp;
2506 Register dst_reg = as_Register($dst$$reg);
2508 guarantee(scale == 0, "scale is not zero !");
2510 /*********************2013/03/27**************************
2511 * Jin: $base may contain a null object.
2512 * Server JIT force the exception_offset to be the pos of
2513 * the first instruction.
2514 * I insert such a 'null_check' at the beginning.
2515 *******************************************************/
2517 __ lw(AT, as_Register(base), 0);
2519 /*********************2012/10/04**************************
2520 * Error case found in SortTest
2521 * 337 b java.util.Arrays::sort1 (401 bytes)
2522 * B73:
2523 * d34 lw T4.lo, [T4 + #16] #@loadL-lo
2524 * lw T4.hi, [T4 + #16]+4 #@loadL-hi
2525 *
2526 * The original instructions generated here are :
2527 * __ lw(dst_lo, as_Register(base), disp);
2528 * __ lw(dst_hi, as_Register(base), disp + 4);
2529 *******************************************************/
2531 if( index != 0 ) {
2532 __ daddu(AT, as_Register(base), as_Register(index));
2533 if( Assembler::is_simm16(disp) ) {
2534 __ ld(dst_reg, AT, disp);
2535 } else {
2536 __ move(T9, disp);
2537 __ daddu(AT, AT, T9);
2538 __ ld(dst_reg, AT, 0);
2539 }
2540 } else {
2541 if( Assembler::is_simm16(disp) ) {
2542 __ move(AT, as_Register(base));
2543 __ ld(dst_reg, AT, disp);
2544 } else {
2545 __ move(T9, disp);
2546 __ daddu(AT, as_Register(base), T9);
2547 __ ld(dst_reg, AT, 0);
2548 }
2549 }
2550 %}
2552 enc_class store_L_reg_enc (memory mem, mRegL src) %{
2553 MacroAssembler _masm(&cbuf);
2554 int base = $mem$$base;
2555 int index = $mem$$index;
2556 int scale = $mem$$scale;
2557 int disp = $mem$$disp;
2558 Register src_reg = as_Register($src$$reg);
2560 guarantee(scale == 0, "scale is not zero !");
2562 if( index != 0 ) {
2563 __ daddu(AT, as_Register(base), as_Register(index));
2564 if( Assembler::is_simm16(disp) ) {
2565 __ sd(src_reg, AT, disp);
2566 } else {
2567 __ move(T9, disp);
2568 __ daddu(AT, AT, T9);
2569 __ sd(src_reg, AT, 0);
2570 }
2571 } else {
2572 if( Assembler::is_simm16(disp) ) {
2573 __ move(AT, as_Register(base));
2574 __ sd(src_reg, AT, disp);
2575 } else {
2576 __ move(T9, disp);
2577 __ daddu(AT, as_Register(base), T9);
2578 __ sd(src_reg, AT, 0);
2579 }
2580 }
2581 %}
2583 enc_class store_L_immL0_enc (memory mem, immL0 src) %{
2584 MacroAssembler _masm(&cbuf);
2585 int base = $mem$$base;
2586 int index = $mem$$index;
2587 int scale = $mem$$scale;
2588 int disp = $mem$$disp;
2590 guarantee(scale == 0, "scale is not zero !");
2592 if( index != 0 ) {
2593 __ daddu(AT, as_Register(base), as_Register(index));
2594 if( Assembler::is_simm16(disp) ) {
2595 __ sd(R0, AT, disp);
2596 } else {
2597 __ move(T9, disp);
2598 __ addu(AT, AT, T9);
2599 __ sd(R0, AT, 0);
2600 }
2601 } else {
2602 if( Assembler::is_simm16(disp) ) {
2603 __ move(AT, as_Register(base));
2604 __ sd(R0, AT, disp);
2605 } else {
2606 __ move(T9, disp);
2607 __ addu(AT, as_Register(base), T9);
2608 __ sd(R0, AT, 0);
2609 }
2610 }
2611 %}
2613 enc_class store_L_immL_enc (memory mem, immL src) %{
2614 MacroAssembler _masm(&cbuf);
2615 int base = $mem$$base;
2616 int index = $mem$$index;
2617 int scale = $mem$$scale;
2618 int disp = $mem$$disp;
2619 long imm = $src$$constant;
2621 guarantee(scale == 0, "scale is not zero !");
2623 if( index != 0 ) {
2624 __ daddu(AT, as_Register(base), as_Register(index));
2625 if( Assembler::is_simm16(disp) ) {
2626 __ li(T9, imm);
2627 __ sd(T9, AT, disp);
2628 } else {
2629 __ move(T9, disp);
2630 __ addu(AT, AT, T9);
2631 __ li(T9, imm);
2632 __ sd(T9, AT, 0);
2633 }
2634 } else {
2635 if( Assembler::is_simm16(disp) ) {
2636 __ move(AT, as_Register(base));
2637 __ li(T9, imm);
2638 __ sd(T9, AT, disp);
2639 } else {
2640 __ move(T9, disp);
2641 __ addu(AT, as_Register(base), T9);
2642 __ li(T9, imm);
2643 __ sd(T9, AT, 0);
2644 }
2645 }
2646 %}
2648 enc_class load_F_enc (regF dst, memory mem) %{
2649 MacroAssembler _masm(&cbuf);
2650 int base = $mem$$base;
2651 int index = $mem$$index;
2652 int scale = $mem$$scale;
2653 int disp = $mem$$disp;
2654 FloatRegister dst = $dst$$FloatRegister;
2656 guarantee(scale == 0, "scale is not zero !");
2658 if( index != 0 ) {
2659 __ daddu(AT, as_Register(base), as_Register(index));
2660 if( Assembler::is_simm16(disp) ) {
2661 __ lwc1(dst, AT, disp);
2662 } else {
2663 __ move(T9, disp);
2664 __ daddu(AT, AT, T9);
2665 __ lwc1(dst, AT, 0);
2666 }
2667 } else {
2668 if( Assembler::is_simm16(disp) ) {
2669 __ lwc1(dst, as_Register(base), disp);
2670 } else {
2671 __ move(T9, disp);
2672 __ daddu(AT, as_Register(base), T9);
2673 __ lwc1(dst, AT, 0);
2674 }
2675 }
2676 %}
2678 enc_class store_F_reg_enc (memory mem, regF src) %{
2679 MacroAssembler _masm(&cbuf);
2680 int base = $mem$$base;
2681 int index = $mem$$index;
2682 int scale = $mem$$scale;
2683 int disp = $mem$$disp;
2684 FloatRegister src = $src$$FloatRegister;
2686 guarantee(scale == 0, "scale is not zero !");
2688 if( index != 0 ) {
2689 __ daddu(AT, as_Register(base), as_Register(index));
2690 if( Assembler::is_simm16(disp) ) {
2691 __ swc1(src, AT, disp);
2692 } else {
2693 __ move(T9, disp);
2694 __ daddu(AT, AT, T9);
2695 __ swc1(src, AT, 0);
2696 }
2697 } else {
2698 if( Assembler::is_simm16(disp) ) {
2699 __ swc1(src, as_Register(base), disp);
2700 } else {
2701 __ move(T9, disp);
2702 __ daddu(AT, as_Register(base), T9);
2703 __ swc1(src, AT, 0);
2704 }
2705 }
2706 %}
2708 enc_class load_D_enc (regD dst, memory mem) %{
2709 MacroAssembler _masm(&cbuf);
2710 int base = $mem$$base;
2711 int index = $mem$$index;
2712 int scale = $mem$$scale;
2713 int disp = $mem$$disp;
2714 FloatRegister dst_reg = as_FloatRegister($dst$$reg);
2716 guarantee(scale == 0, "scale is not zero !");
2718 if( index != 0 ) {
2719 __ daddu(AT, as_Register(base), as_Register(index));
2720 if( Assembler::is_simm16(disp) ) {
2721 __ ldc1(dst_reg, AT, disp);
2722 } else {
2723 __ move(T9, disp);
2724 __ addu(AT, AT, T9);
2725 __ ldc1(dst_reg, AT, 0);
2726 }
2727 } else {
2728 if( Assembler::is_simm16(disp) ) {
2729 __ ldc1(dst_reg, as_Register(base), disp);
2730 } else {
2731 __ move(T9, disp);
2732 __ addu(AT, as_Register(base), T9);
2733 __ ldc1(dst_reg, AT, 0);
2734 }
2735 }
2736 %}
2738 enc_class store_D_reg_enc (memory mem, regD src) %{
2739 MacroAssembler _masm(&cbuf);
2740 int base = $mem$$base;
2741 int index = $mem$$index;
2742 int scale = $mem$$scale;
2743 int disp = $mem$$disp;
2744 FloatRegister src_reg = as_FloatRegister($src$$reg);
2746 guarantee(scale == 0, "scale is not zero !");
2748 if( index != 0 ) {
2749 __ daddu(AT, as_Register(base), as_Register(index));
2750 if( Assembler::is_simm16(disp) ) {
2751 __ sdc1(src_reg, AT, disp);
2752 } else {
2753 __ move(T9, disp);
2754 __ addu(AT, AT, T9);
2755 __ sdc1(src_reg, AT, 0);
2756 }
2757 } else {
2758 if( Assembler::is_simm16(disp) ) {
2759 __ sdc1(src_reg, as_Register(base), disp);
2760 } else {
2761 __ move(T9, disp);
2762 __ addu(AT, as_Register(base), T9);
2763 __ sdc1(src_reg, AT, 0);
2764 }
2765 }
2766 %}
2768 enc_class Java_To_Runtime (method meth) %{ // CALL Java_To_Runtime, Java_To_Runtime_Leaf
2769 MacroAssembler _masm(&cbuf);
2770 // This is the instruction starting address for relocation info.
2771 __ block_comment("Java_To_Runtime");
2772 cbuf.set_insts_mark();
2773 __ relocate(relocInfo::runtime_call_type);
2775 __ li48(T9, (long)$meth$$method);
2776 __ jalr(T9);
2777 __ nop();
2778 %}
2780 enc_class Java_Static_Call (method meth) %{ // JAVA STATIC CALL
2781 // CALL to fixup routine. Fixup routine uses ScopeDesc info to determine
2782 // who we intended to call.
2783 MacroAssembler _masm(&cbuf);
2784 cbuf.set_insts_mark();
2786 if ( !_method ) {
2787 __ relocate(relocInfo::runtime_call_type);
2788 //emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.code_end()) - 4),
2789 // runtime_call_Relocation::spec(), RELOC_IMM32 );
2790 } else if(_optimized_virtual) {
2791 __ relocate(relocInfo::opt_virtual_call_type);
2792 //emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.code_end()) - 4),
2793 // opt_virtual_call_Relocation::spec(), RELOC_IMM32 );
2794 } else {
2795 __ relocate(relocInfo::static_call_type);
2796 //emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.code_end()) - 4),
2797 // static_call_Relocation::spec(), RELOC_IMM32 );
2798 }
2800 __ li(T9, $meth$$method);
2801 __ jalr(T9);
2802 __ nop();
2803 if( _method ) { // Emit stub for static call
2804 emit_java_to_interp(cbuf);
2805 }
2806 %}
2809 /*
2810 * [Ref: LIR_Assembler::ic_call() ]
2811 */
2812 enc_class Java_Dynamic_Call (method meth) %{ // JAVA DYNAMIC CALL
2813 MacroAssembler _masm(&cbuf);
2814 __ block_comment("Java_Dynamic_Call");
2815 __ ic_call((address)$meth$$method);
2816 %}
2818 enc_class call_epilog %{
2819 /*
2820 if( VerifyStackAtCalls ) {
2821 // Check that stack depth is unchanged: find majik cookie on stack
2822 int framesize = ra_->reg2offset_unchecked(OptoReg::add(ra_->_matcher._old_SP,-3*VMRegImpl::slots_per_word));
2823 if(framesize >= 128) {
2824 emit_opcode(cbuf, 0x81); // cmp [esp+0],0xbadb1ood
2825 emit_d8(cbuf,0xBC);
2826 emit_d8(cbuf,0x24);
2827 emit_d32(cbuf,framesize); // Find majik cookie from ESP
2828 emit_d32(cbuf, 0xbadb100d);
2829 }
2830 else {
2831 emit_opcode(cbuf, 0x81); // cmp [esp+0],0xbadb1ood
2832 emit_d8(cbuf,0x7C);
2833 emit_d8(cbuf,0x24);
2834 emit_d8(cbuf,framesize); // Find majik cookie from ESP
2835 emit_d32(cbuf, 0xbadb100d);
2836 }
2837 // jmp EQ around INT3
2838 // QQQ TODO
2839 const int jump_around = 5; // size of call to breakpoint, 1 for CC
2840 emit_opcode(cbuf,0x74);
2841 emit_d8(cbuf, jump_around);
2842 // QQQ temporary
2843 emit_break(cbuf);
2844 // Die if stack mismatch
2845 // emit_opcode(cbuf,0xCC);
2846 }
2847 */
2848 %}
2852 enc_class Set_Flags_After_Fast_Lock_Unlock(FlagsReg cr) %{
2853 Register flags = $cr$$Register;
2854 Label L;
2856 MacroAssembler _masm(&cbuf);
2858 __ addu(flags, R0, R0);
2859 __ beq(AT, R0, L);
2860 __ delayed()->nop();
2861 __ move(flags, 0xFFFFFFFF);
2862 __ bind(L);
2863 %}
2865 enc_class enc_PartialSubtypeCheck(mRegP result, mRegP sub, mRegP super) %{
2866 Register result = $result$$Register;
2867 Register sub = $sub$$Register;
2868 Register super = $super$$Register;
2869 Register length = T8;
2870 Register tmp = T9;
2871 Label miss;
2873 /* 2012/9/28 Jin: result may be the same as sub
2874 * 47c B40: # B21 B41 <- B20 Freq: 0.155379
2875 * 47c partialSubtypeCheck result=S1, sub=S1, super=S3, length=S0
2876 * 4bc mov S2, NULL #@loadConP
2877 * 4c0 beq S1, S2, B21 #@branchConP P=0.999999 C=-1.000000
2878 */
2879 MacroAssembler _masm(&cbuf);
2880 Label done;
2881 __ check_klass_subtype_slow_path(sub, super, length, tmp,
2882 NULL, &miss,
2883 /*set_cond_codes:*/ true);
2884 /* 2013/7/22 Jin: Refer to X86_64's RDI */
2885 __ move(result, 0);
2886 __ b(done);
2887 __ nop();
2889 __ bind(miss);
2890 __ move(result, 1);
2891 __ bind(done);
2892 %}
2894 %}
2897 //---------MIPS FRAME--------------------------------------------------------------
2898 // Definition of frame structure and management information.
2899 //
2900 // S T A C K L A Y O U T Allocators stack-slot number
2901 // | (to get allocators register number
2902 // G Owned by | | v add SharedInfo::stack0)
2903 // r CALLER | |
2904 // o | +--------+ pad to even-align allocators stack-slot
2905 // w V | pad0 | numbers; owned by CALLER
2906 // t -----------+--------+----> Matcher::_in_arg_limit, unaligned
2907 // h ^ | in | 5
2908 // | | args | 4 Holes in incoming args owned by SELF
2909 // | | old | | 3
2910 // | | SP-+--------+----> Matcher::_old_SP, even aligned
2911 // v | | ret | 3 return address
2912 // Owned by +--------+
2913 // Self | pad2 | 2 pad to align old SP
2914 // | +--------+ 1
2915 // | | locks | 0
2916 // | +--------+----> SharedInfo::stack0, even aligned
2917 // | | pad1 | 11 pad to align new SP
2918 // | +--------+
2919 // | | | 10
2920 // | | spills | 9 spills
2921 // V | | 8 (pad0 slot for callee)
2922 // -----------+--------+----> Matcher::_out_arg_limit, unaligned
2923 // ^ | out | 7
2924 // | | args | 6 Holes in outgoing args owned by CALLEE
2925 // Owned by new | |
2926 // Callee SP-+--------+----> Matcher::_new_SP, even aligned
2927 // | |
2928 //
2929 // Note 1: Only region 8-11 is determined by the allocator. Region 0-5 is
2930 // known from SELF's arguments and the Java calling convention.
2931 // Region 6-7 is determined per call site.
2932 // Note 2: If the calling convention leaves holes in the incoming argument
2933 // area, those holes are owned by SELF. Holes in the outgoing area
2934 // are owned by the CALLEE. Holes should not be nessecary in the
2935 // incoming area, as the Java calling convention is completely under
2936 // the control of the AD file. Doubles can be sorted and packed to
2937 // avoid holes. Holes in the outgoing arguments may be nessecary for
2938 // varargs C calling conventions.
2939 // Note 3: Region 0-3 is even aligned, with pad2 as needed. Region 3-5 is
2940 // even aligned with pad0 as needed.
2941 // Region 6 is even aligned. Region 6-7 is NOT even aligned;
2942 // region 6-11 is even aligned; it may be padded out more so that
2943 // the region from SP to FP meets the minimum stack alignment.
2944 // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack
2945 // alignment. Region 11, pad1, may be dynamically extended so that
2946 // SP meets the minimum alignment.
2949 frame %{
2951 stack_direction(TOWARDS_LOW);
2953 // These two registers define part of the calling convention
2954 // between compiled code and the interpreter.
2955 // SEE StartI2CNode::calling_convention & StartC2INode::calling_convention & StartOSRNode::calling_convention
2956 // for more information. by yjl 3/16/2006
2958 inline_cache_reg(T1); // Inline Cache Register
2959 interpreter_method_oop_reg(S3); // Method Oop Register when calling interpreter
2960 /*
2961 inline_cache_reg(T1); // Inline Cache Register or methodOop for I2C
2962 interpreter_arg_ptr_reg(A0); // Argument pointer for I2C adapters
2963 */
2965 // Optional: name the operand used by cisc-spilling to access [stack_pointer + offset]
2966 cisc_spilling_operand_name(indOffset32);
2968 // Number of stack slots consumed by locking an object
2969 // generate Compile::sync_stack_slots
2970 #ifdef _LP64
2971 sync_stack_slots(2);
2972 #else
2973 sync_stack_slots(1);
2974 #endif
2976 frame_pointer(SP);
2978 // Interpreter stores its frame pointer in a register which is
2979 // stored to the stack by I2CAdaptors.
2980 // I2CAdaptors convert from interpreted java to compiled java.
2982 interpreter_frame_pointer(FP);
2984 // generate Matcher::stack_alignment
2985 stack_alignment(StackAlignmentInBytes); //wordSize = sizeof(char*);
2987 // Number of stack slots between incoming argument block and the start of
2988 // a new frame. The PROLOG must add this many slots to the stack. The
2989 // EPILOG must remove this many slots. Intel needs one slot for
2990 // return address.
2991 // generate Matcher::in_preserve_stack_slots
2992 //in_preserve_stack_slots(VerifyStackAtCalls + 2); //Now VerifyStackAtCalls is defined as false ! Leave one stack slot for ra and fp
2993 in_preserve_stack_slots(4); //Now VerifyStackAtCalls is defined as false ! Leave two stack slots for ra and fp
2995 // Number of outgoing stack slots killed above the out_preserve_stack_slots
2996 // for calls to C. Supports the var-args backing area for register parms.
2997 varargs_C_out_slots_killed(0);
2999 // The after-PROLOG location of the return address. Location of
3000 // return address specifies a type (REG or STACK) and a number
3001 // representing the register number (i.e. - use a register name) or
3002 // stack slot.
3003 // Ret Addr is on stack in slot 0 if no locks or verification or alignment.
3004 // Otherwise, it is above the locks and verification slot and alignment word
3005 //return_addr(STACK -1+ round_to(1+VerifyStackAtCalls+Compile::current()->sync()*Compile::current()->sync_stack_slots(),WordsPerLong));
3006 return_addr(REG RA);
3008 // Body of function which returns an integer array locating
3009 // arguments either in registers or in stack slots. Passed an array
3010 // of ideal registers called "sig" and a "length" count. Stack-slot
3011 // offsets are based on outgoing arguments, i.e. a CALLER setting up
3012 // arguments for a CALLEE. Incoming stack arguments are
3013 // automatically biased by the preserve_stack_slots field above.
3016 // will generated to Matcher::calling_convention(OptoRegPair *sig, uint length, bool is_outgoing)
3017 // StartNode::calling_convention call this. by yjl 3/16/2006
3018 calling_convention %{
3019 SharedRuntime::java_calling_convention(sig_bt, regs, length, false);
3020 %}
3025 // Body of function which returns an integer array locating
3026 // arguments either in registers or in stack slots. Passed an array
3027 // of ideal registers called "sig" and a "length" count. Stack-slot
3028 // offsets are based on outgoing arguments, i.e. a CALLER setting up
3029 // arguments for a CALLEE. Incoming stack arguments are
3030 // automatically biased by the preserve_stack_slots field above.
3033 // SEE CallRuntimeNode::calling_convention for more information. by yjl 3/16/2006
3034 c_calling_convention %{
3035 (void) SharedRuntime::c_calling_convention(sig_bt, regs, /*regs2=*/NULL, length);
3036 %}
3039 // Location of C & interpreter return values
3040 // register(s) contain(s) return value for Op_StartI2C and Op_StartOSR.
3041 // SEE Matcher::match. by yjl 3/16/2006
3042 c_return_value %{
3043 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3044 /* -- , -- , Op_RegN, Op_RegI, Op_RegP, Op_RegF, Op_RegD, Op_RegL */
3045 static int lo[Op_RegL+1] = { 0, 0, V0_num, V0_num, V0_num, F0_num, F0_num, V0_num };
3046 static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, V0_H_num, OptoReg::Bad, F0_H_num, V0_H_num };
3047 return OptoRegPair(hi[ideal_reg],lo[ideal_reg]);
3048 %}
3050 // Location of return values
3051 // register(s) contain(s) return value for Op_StartC2I and Op_Start.
3052 // SEE Matcher::match. by yjl 3/16/2006
3054 return_value %{
3055 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3056 /* -- , -- , Op_RegN, Op_RegI, Op_RegP, Op_RegF, Op_RegD, Op_RegL */
3057 static int lo[Op_RegL+1] = { 0, 0, V0_num, V0_num, V0_num, F0_num, F0_num, V0_num };
3058 static int hi[Op_RegL+1] = { 0, 0, OptoReg::Bad, OptoReg::Bad, V0_H_num, OptoReg::Bad, F0_H_num, V0_H_num};
3059 return OptoRegPair(hi[ideal_reg],lo[ideal_reg]);
3060 %}
3062 %}
3064 //----------ATTRIBUTES---------------------------------------------------------
3065 //----------Operand Attributes-------------------------------------------------
3066 op_attrib op_cost(0); // Required cost attribute
3068 //----------Instruction Attributes---------------------------------------------
3069 ins_attrib ins_cost(100); // Required cost attribute
3070 ins_attrib ins_size(32); // Required size attribute (in bits)
3071 ins_attrib ins_pc_relative(0); // Required PC Relative flag
3072 ins_attrib ins_short_branch(0); // Required flag: is this instruction a
3073 // non-matching short branch variant of some
3074 // long branch?
3075 ins_attrib ins_alignment(4); // Required alignment attribute (must be a power of 2)
3076 // specifies the alignment that some part of the instruction (not
3077 // necessarily the start) requires. If > 1, a compute_padding()
3078 // function must be provided for the instruction
3080 //----------OPERANDS-----------------------------------------------------------
3081 // Operand definitions must precede instruction definitions for correct parsing
3082 // in the ADLC because operands constitute user defined types which are used in
3083 // instruction definitions.
3086 // Flags register, used as output of compare instructions
3087 operand FlagsReg() %{
3088 constraint(ALLOC_IN_RC(mips_flags));
3089 match(RegFlags);
3091 format %{ "EFLAGS" %}
3092 interface(REG_INTER);
3093 %}
3095 //----------Simple Operands----------------------------------------------------
3096 //TODO: Should we need to define some more special immediate number ?
3097 // Immediate Operands
3098 // Integer Immediate
3099 operand immI() %{
3100 match(ConI);
3101 //TODO: should not match immI8 here LEE
3102 match(immI8);
3104 op_cost(20);
3105 format %{ %}
3106 interface(CONST_INTER);
3107 %}
3109 // Long Immediate 8-bit
3110 operand immL8()
3111 %{
3112 predicate(-0x80L <= n->get_long() && n->get_long() < 0x80L);
3113 match(ConL);
3115 op_cost(5);
3116 format %{ %}
3117 interface(CONST_INTER);
3118 %}
3120 // Constant for test vs zero
3121 operand immI0() %{
3122 predicate(n->get_int() == 0);
3123 match(ConI);
3125 op_cost(0);
3126 format %{ %}
3127 interface(CONST_INTER);
3128 %}
3130 // Constant for increment
3131 operand immI1() %{
3132 predicate(n->get_int() == 1);
3133 match(ConI);
3135 op_cost(0);
3136 format %{ %}
3137 interface(CONST_INTER);
3138 %}
3140 // Constant for decrement
3141 operand immI_M1() %{
3142 predicate(n->get_int() == -1);
3143 match(ConI);
3145 op_cost(0);
3146 format %{ %}
3147 interface(CONST_INTER);
3148 %}
3150 // Valid scale values for addressing modes
3151 operand immI2() %{
3152 predicate(0 <= n->get_int() && (n->get_int() <= 3));
3153 match(ConI);
3155 format %{ %}
3156 interface(CONST_INTER);
3157 %}
3159 operand immI8() %{
3160 predicate((-128 <= n->get_int()) && (n->get_int() <= 127));
3161 match(ConI);
3163 op_cost(5);
3164 format %{ %}
3165 interface(CONST_INTER);
3166 %}
3168 operand immI16() %{
3169 predicate((-32768 <= n->get_int()) && (n->get_int() <= 32767));
3170 match(ConI);
3172 op_cost(10);
3173 format %{ %}
3174 interface(CONST_INTER);
3175 %}
3177 // Constant for long shifts
3178 operand immI_32() %{
3179 predicate( n->get_int() == 32 );
3180 match(ConI);
3182 op_cost(0);
3183 format %{ %}
3184 interface(CONST_INTER);
3185 %}
3187 operand immI_1_31() %{
3188 predicate( n->get_int() >= 1 && n->get_int() <= 31 );
3189 match(ConI);
3191 op_cost(0);
3192 format %{ %}
3193 interface(CONST_INTER);
3194 %}
3196 operand immI_32_63() %{
3197 predicate( n->get_int() >= 32 && n->get_int() <= 63 );
3198 match(ConI);
3199 op_cost(0);
3201 format %{ %}
3202 interface(CONST_INTER);
3203 %}
3205 operand immI16_sub() %{
3206 predicate((-32767 <= n->get_int()) && (n->get_int() <= 32767));
3207 match(ConI);
3209 op_cost(10);
3210 format %{ %}
3211 interface(CONST_INTER);
3212 %}
3214 operand immI_0_65535() %{
3215 predicate( n->get_int() >= 0 && n->get_int() <= 65535 );
3216 match(ConI);
3217 op_cost(0);
3219 format %{ %}
3220 interface(CONST_INTER);
3221 %}
3223 operand immI_1() %{
3224 predicate( n->get_int() == 1 );
3225 match(ConI);
3227 op_cost(0);
3228 format %{ %}
3229 interface(CONST_INTER);
3230 %}
3232 operand immI_2() %{
3233 predicate( n->get_int() == 2 );
3234 match(ConI);
3236 op_cost(0);
3237 format %{ %}
3238 interface(CONST_INTER);
3239 %}
3241 operand immI_3() %{
3242 predicate( n->get_int() == 3 );
3243 match(ConI);
3245 op_cost(0);
3246 format %{ %}
3247 interface(CONST_INTER);
3248 %}
3250 // Immediates for special shifts (sign extend)
3252 // Constants for increment
3253 operand immI_16() %{
3254 predicate( n->get_int() == 16 );
3255 match(ConI);
3257 format %{ %}
3258 interface(CONST_INTER);
3259 %}
3261 operand immI_24() %{
3262 predicate( n->get_int() == 24 );
3263 match(ConI);
3265 format %{ %}
3266 interface(CONST_INTER);
3267 %}
3269 // Constant for byte-wide masking
3270 operand immI_255() %{
3271 predicate( n->get_int() == 255 );
3272 match(ConI);
3274 format %{ %}
3275 interface(CONST_INTER);
3276 %}
3278 // Pointer Immediate
3279 operand immP() %{
3280 match(ConP);
3282 op_cost(10);
3283 format %{ %}
3284 interface(CONST_INTER);
3285 %}
3287 operand immP31()
3288 %{
3289 predicate(n->as_Type()->type()->reloc() == relocInfo::none
3290 && (n->get_ptr() >> 31) == 0);
3291 match(ConP);
3293 op_cost(5);
3294 format %{ %}
3295 interface(CONST_INTER);
3296 %}
3298 // NULL Pointer Immediate
3299 operand immP0() %{
3300 predicate( n->get_ptr() == 0 );
3301 match(ConP);
3302 op_cost(0);
3304 format %{ %}
3305 interface(CONST_INTER);
3306 %}
3308 // Pointer Immediate
3309 operand immN() %{
3310 match(ConN);
3312 op_cost(10);
3313 format %{ %}
3314 interface(CONST_INTER);
3315 %}
3317 operand immNKlass() %{
3318 match(ConNKlass);
3320 op_cost(10);
3321 format %{ %}
3322 interface(CONST_INTER);
3323 %}
3325 // NULL Pointer Immediate
3326 operand immN0() %{
3327 predicate(n->get_narrowcon() == 0);
3328 match(ConN);
3330 op_cost(5);
3331 format %{ %}
3332 interface(CONST_INTER);
3333 %}
3335 // Long Immediate
3336 operand immL() %{
3337 match(ConL);
3339 op_cost(20);
3340 format %{ %}
3341 interface(CONST_INTER);
3342 %}
3344 // Long Immediate zero
3345 operand immL0() %{
3346 predicate( n->get_long() == 0L );
3347 match(ConL);
3348 op_cost(0);
3350 format %{ %}
3351 interface(CONST_INTER);
3352 %}
3354 // Long Immediate zero
3355 operand immL_M1() %{
3356 predicate( n->get_long() == -1L );
3357 match(ConL);
3358 op_cost(0);
3360 format %{ %}
3361 interface(CONST_INTER);
3362 %}
3364 // Long immediate from 0 to 127.
3365 // Used for a shorter form of long mul by 10.
3366 operand immL_127() %{
3367 predicate((0 <= n->get_long()) && (n->get_long() <= 127));
3368 match(ConL);
3369 op_cost(0);
3371 format %{ %}
3372 interface(CONST_INTER);
3373 %}
3376 // Long Immediate: low 32-bit mask
3377 operand immL_32bits() %{
3378 predicate(n->get_long() == 0xFFFFFFFFL);
3379 match(ConL);
3380 op_cost(20);
3382 format %{ %}
3383 interface(CONST_INTER);
3384 %}
3386 // Long Immediate 32-bit signed
3387 operand immL32()
3388 %{
3389 predicate(n->get_long() == (int) (n->get_long()));
3390 match(ConL);
3392 op_cost(15);
3393 format %{ %}
3394 interface(CONST_INTER);
3395 %}
3398 //single-precision floating-point zero
3399 operand immF0() %{
3400 predicate(jint_cast(n->getf()) == 0);
3401 match(ConF);
3403 op_cost(5);
3404 format %{ %}
3405 interface(CONST_INTER);
3406 %}
3408 //single-precision floating-point immediate
3409 operand immF() %{
3410 match(ConF);
3412 op_cost(20);
3413 format %{ %}
3414 interface(CONST_INTER);
3415 %}
3417 //double-precision floating-point zero
3418 operand immD0() %{
3419 predicate(jlong_cast(n->getd()) == 0);
3420 match(ConD);
3422 op_cost(5);
3423 format %{ %}
3424 interface(CONST_INTER);
3425 %}
3427 //double-precision floating-point immediate
3428 operand immD() %{
3429 match(ConD);
3431 op_cost(20);
3432 format %{ %}
3433 interface(CONST_INTER);
3434 %}
3436 // Register Operands
3437 // Integer Register
3438 operand mRegI() %{
3439 constraint(ALLOC_IN_RC(int_reg));
3440 match(RegI);
3441 match(s_RegI);
3442 match(t_RegI);
3444 format %{ %}
3445 interface(REG_INTER);
3446 %}
3449 // Subset of Integer Registers
3450 operand s_RegI(mRegI reg) %{
3451 constraint(ALLOC_IN_RC(s_reg));
3452 match(reg);
3453 match(mS0RegI);
3454 match(mS1RegI);
3455 match(mS2RegI);
3456 match(mS3RegI);
3457 match(mS4RegI);
3459 format %{ %}
3460 interface(REG_INTER);
3461 %}
3463 operand mS0RegI(s_RegI reg) %{
3464 constraint(ALLOC_IN_RC(s0_reg));
3465 match(reg);
3466 match(mRegI);
3468 format %{ "S0" %}
3469 interface(REG_INTER);
3470 %}
3472 operand mS1RegI(s_RegI reg) %{
3473 constraint(ALLOC_IN_RC(s1_reg));
3474 match(reg);
3475 match(mRegI);
3477 format %{ "S1" %}
3478 interface(REG_INTER);
3479 %}
3481 operand mS2RegI(s_RegI reg) %{
3482 constraint(ALLOC_IN_RC(s2_reg));
3483 match(reg);
3484 match(mRegI);
3486 format %{ "S0" %}
3487 interface(REG_INTER);
3488 %}
3490 operand mS3RegI(s_RegI reg) %{
3491 constraint(ALLOC_IN_RC(s3_reg));
3492 match(reg);
3493 match(mRegI);
3495 format %{ "S3" %}
3496 interface(REG_INTER);
3497 %}
3499 operand mS4RegI(s_RegI reg) %{
3500 constraint(ALLOC_IN_RC(s4_reg));
3501 match(reg);
3502 match(mRegI);
3504 format %{ "S4" %}
3505 interface(REG_INTER);
3506 %}
3508 // Subset of Integer Registers
3509 operand t_RegI(mRegI reg) %{
3510 constraint(ALLOC_IN_RC(t_reg));
3511 match(reg);
3512 match(mT0RegI);
3513 match(mT1RegI);
3514 match(mT2RegI);
3515 match(mT3RegI);
3517 format %{ %}
3518 interface(REG_INTER);
3519 %}
3521 operand mT0RegI(t_RegI reg) %{
3522 constraint(ALLOC_IN_RC(t0_reg));
3523 match(reg);
3524 match(mRegI);
3526 format %{ "T0" %}
3527 interface(REG_INTER);
3528 %}
3530 operand mT1RegI(t_RegI reg) %{
3531 constraint(ALLOC_IN_RC(t1_reg));
3532 match(reg);
3533 match(mRegI);
3535 format %{ "T1" %}
3536 interface(REG_INTER);
3537 %}
3539 operand mT2RegI(t_RegI reg) %{
3540 constraint(ALLOC_IN_RC(t2_reg));
3541 match(reg);
3542 match(mRegI);
3544 format %{ "T2" %}
3545 interface(REG_INTER);
3546 %}
3548 operand mT3RegI(t_RegI reg) %{
3549 constraint(ALLOC_IN_RC(t3_reg));
3550 match(reg);
3551 match(mRegI);
3553 format %{ "T3" %}
3554 interface(REG_INTER);
3555 %}
3557 operand mRegN() %{
3558 constraint(ALLOC_IN_RC(int_reg));
3559 match(RegN);
3561 match(t0_RegN);
3562 match(t1_RegN);
3563 match(t2_RegN);
3564 match(t3_RegN);
3566 match(a3_RegN);
3567 format %{ %}
3568 interface(REG_INTER);
3569 %}
3571 operand t0_RegN() %{
3572 constraint(ALLOC_IN_RC(t0_reg));
3573 match(RegN);
3574 match(mRegN);
3576 format %{ %}
3577 interface(REG_INTER);
3578 %}
3580 operand t1_RegN() %{
3581 constraint(ALLOC_IN_RC(t1_reg));
3582 match(RegN);
3583 match(mRegN);
3585 format %{ %}
3586 interface(REG_INTER);
3587 %}
3589 operand t2_RegN() %{
3590 constraint(ALLOC_IN_RC(t2_reg));
3591 match(RegN);
3592 match(mRegN);
3594 format %{ %}
3595 interface(REG_INTER);
3596 %}
3598 operand t3_RegN() %{
3599 constraint(ALLOC_IN_RC(t3_reg));
3600 match(RegN);
3601 match(mRegN);
3603 format %{ %}
3604 interface(REG_INTER);
3605 %}
3607 operand a3_RegN() %{
3608 constraint(ALLOC_IN_RC(a3_reg));
3609 match(RegN);
3610 match(mRegN);
3612 format %{ %}
3613 interface(REG_INTER);
3614 %}
3615 // Pointer Register
3616 operand mRegP() %{
3617 constraint(ALLOC_IN_RC(p_reg));
3618 match(RegP);
3620 match(t0_RegP);
3621 match(t1_RegP);
3622 match(t2_RegP);
3623 match(t3_RegP);
3624 match(a3_RegP);
3625 match(a4_RegP);
3626 match(a5_RegP);
3627 match(a6_RegP);
3628 match(a7_RegP);
3630 match(a0_RegP);
3632 match(s0_RegP);
3633 match(s1_RegP);
3634 match(s2_RegP);
3635 match(s3_RegP);
3636 match(s4_RegP);
3637 //match(s5_RegP);
3638 // match(s7_RegP);
3640 //match(mSPRegP);
3641 //match(mFPRegP);
3643 format %{ %}
3644 interface(REG_INTER);
3645 %}
3647 operand a0_RegP()
3648 %{
3649 constraint(ALLOC_IN_RC(a0_long_reg));
3650 match(RegP);
3651 match(mRegP);
3653 format %{ %}
3654 interface(REG_INTER);
3655 %}
3657 operand s0_RegP()
3658 %{
3659 constraint(ALLOC_IN_RC(s0_long_reg));
3660 match(RegP);
3661 match(mRegP);
3663 format %{ %}
3664 interface(REG_INTER);
3665 %}
3667 operand s1_RegP()
3668 %{
3669 constraint(ALLOC_IN_RC(s1_long_reg));
3670 match(RegP);
3671 match(mRegP);
3673 format %{ %}
3674 interface(REG_INTER);
3675 %}
3677 operand s2_RegP()
3678 %{
3679 constraint(ALLOC_IN_RC(s2_long_reg));
3680 match(RegP);
3681 match(mRegP);
3683 format %{ %}
3684 interface(REG_INTER);
3685 %}
3687 operand s3_RegP()
3688 %{
3689 constraint(ALLOC_IN_RC(s3_long_reg));
3690 match(RegP);
3691 match(mRegP);
3693 format %{ %}
3694 interface(REG_INTER);
3695 %}
3697 operand s4_RegP()
3698 %{
3699 constraint(ALLOC_IN_RC(s4_long_reg));
3700 match(RegP);
3701 match(mRegP);
3703 format %{ %}
3704 interface(REG_INTER);
3705 %}
3707 operand t0_RegP()
3708 %{
3709 constraint(ALLOC_IN_RC(t0_long_reg));
3710 match(RegP);
3711 match(mRegP);
3713 format %{ %}
3714 interface(REG_INTER);
3715 %}
3717 operand t1_RegP()
3718 %{
3719 constraint(ALLOC_IN_RC(t1_long_reg));
3720 match(RegP);
3721 match(mRegP);
3723 format %{ %}
3724 interface(REG_INTER);
3725 %}
3727 operand t2_RegP()
3728 %{
3729 constraint(ALLOC_IN_RC(t2_long_reg));
3730 match(RegP);
3731 match(mRegP);
3733 format %{ %}
3734 interface(REG_INTER);
3735 %}
3737 operand t3_RegP()
3738 %{
3739 constraint(ALLOC_IN_RC(t3_long_reg));
3740 match(RegP);
3741 match(mRegP);
3743 format %{ %}
3744 interface(REG_INTER);
3745 %}
3747 operand a3_RegP()
3748 %{
3749 constraint(ALLOC_IN_RC(a3_long_reg));
3750 match(RegP);
3751 match(mRegP);
3753 format %{ %}
3754 interface(REG_INTER);
3755 %}
3757 operand a4_RegP()
3758 %{
3759 constraint(ALLOC_IN_RC(a4_long_reg));
3760 match(RegP);
3761 match(mRegP);
3763 format %{ %}
3764 interface(REG_INTER);
3765 %}
3768 operand a5_RegP()
3769 %{
3770 constraint(ALLOC_IN_RC(a5_long_reg));
3771 match(RegP);
3772 match(mRegP);
3774 format %{ %}
3775 interface(REG_INTER);
3776 %}
3778 operand a6_RegP()
3779 %{
3780 constraint(ALLOC_IN_RC(a6_long_reg));
3781 match(RegP);
3782 match(mRegP);
3784 format %{ %}
3785 interface(REG_INTER);
3786 %}
3788 operand a7_RegP()
3789 %{
3790 constraint(ALLOC_IN_RC(a7_long_reg));
3791 match(RegP);
3792 match(mRegP);
3794 format %{ %}
3795 interface(REG_INTER);
3796 %}
3798 /*
3799 operand mSPRegP(mRegP reg) %{
3800 constraint(ALLOC_IN_RC(sp_reg));
3801 match(reg);
3803 format %{ "SP" %}
3804 interface(REG_INTER);
3805 %}
3807 operand mFPRegP(mRegP reg) %{
3808 constraint(ALLOC_IN_RC(fp_reg));
3809 match(reg);
3811 format %{ "FP" %}
3812 interface(REG_INTER);
3813 %}
3814 */
3816 operand mRegL() %{
3817 constraint(ALLOC_IN_RC(long_reg));
3818 match(RegL);
3819 match(v0RegL);
3820 match(v1RegL);
3821 match(a0RegL);
3822 match(a1RegL);
3823 match(a2RegL);
3824 match(a3RegL);
3825 match(t0RegL);
3826 match(t1RegL);
3827 match(t2RegL);
3828 match(t3RegL);
3829 match(a4RegL);
3830 match(a5RegL);
3831 match(a6RegL);
3832 match(a7RegL);
3834 format %{ %}
3835 interface(REG_INTER);
3836 %}
3838 operand v0RegL() %{
3839 constraint(ALLOC_IN_RC(v0_long_reg));
3840 match(RegL);
3841 match(mRegL);
3843 format %{ %}
3844 interface(REG_INTER);
3845 %}
3847 operand v1RegL() %{
3848 constraint(ALLOC_IN_RC(v1_long_reg));
3849 match(RegL);
3850 match(mRegL);
3852 format %{ %}
3853 interface(REG_INTER);
3854 %}
3856 operand a0RegL() %{
3857 constraint(ALLOC_IN_RC(a0_long_reg));
3858 match(RegL);
3859 match(mRegL);
3861 format %{ "A0" %}
3862 interface(REG_INTER);
3863 %}
3865 operand a1RegL() %{
3866 constraint(ALLOC_IN_RC(a1_long_reg));
3867 match(RegL);
3868 match(mRegL);
3870 format %{ %}
3871 interface(REG_INTER);
3872 %}
3874 operand a2RegL() %{
3875 constraint(ALLOC_IN_RC(a2_long_reg));
3876 match(RegL);
3877 match(mRegL);
3879 format %{ %}
3880 interface(REG_INTER);
3881 %}
3883 operand a3RegL() %{
3884 constraint(ALLOC_IN_RC(a3_long_reg));
3885 match(RegL);
3886 match(mRegL);
3888 format %{ %}
3889 interface(REG_INTER);
3890 %}
3892 operand t0RegL() %{
3893 constraint(ALLOC_IN_RC(t0_long_reg));
3894 match(RegL);
3895 match(mRegL);
3897 format %{ %}
3898 interface(REG_INTER);
3899 %}
3901 operand t1RegL() %{
3902 constraint(ALLOC_IN_RC(t1_long_reg));
3903 match(RegL);
3904 match(mRegL);
3906 format %{ %}
3907 interface(REG_INTER);
3908 %}
3910 operand t2RegL() %{
3911 constraint(ALLOC_IN_RC(t2_long_reg));
3912 match(RegL);
3913 match(mRegL);
3915 format %{ %}
3916 interface(REG_INTER);
3917 %}
3919 operand t3RegL() %{
3920 constraint(ALLOC_IN_RC(t3_long_reg));
3921 match(RegL);
3922 match(mRegL);
3924 format %{ %}
3925 interface(REG_INTER);
3926 %}
3928 operand a4RegL() %{
3929 constraint(ALLOC_IN_RC(a4_long_reg));
3930 match(RegL);
3931 match(mRegL);
3933 format %{ %}
3934 interface(REG_INTER);
3935 %}
3937 operand a5RegL() %{
3938 constraint(ALLOC_IN_RC(a5_long_reg));
3939 match(RegL);
3940 match(mRegL);
3942 format %{ %}
3943 interface(REG_INTER);
3944 %}
3946 operand a6RegL() %{
3947 constraint(ALLOC_IN_RC(a6_long_reg));
3948 match(RegL);
3949 match(mRegL);
3951 format %{ %}
3952 interface(REG_INTER);
3953 %}
3955 operand a7RegL() %{
3956 constraint(ALLOC_IN_RC(a7_long_reg));
3957 match(RegL);
3958 match(mRegL);
3960 format %{ %}
3961 interface(REG_INTER);
3962 %}
3964 // Floating register operands
3965 operand regF() %{
3966 constraint(ALLOC_IN_RC(flt_reg));
3967 match(RegF);
3969 format %{ %}
3970 interface(REG_INTER);
3971 %}
3973 //Double Precision Floating register operands
3974 operand regD() %{
3975 constraint(ALLOC_IN_RC(dbl_reg));
3976 match(RegD);
3978 format %{ %}
3979 interface(REG_INTER);
3980 %}
3982 //----------Memory Operands----------------------------------------------------
3983 // Indirect Memory Operand
3984 operand indirect(mRegP reg) %{
3985 constraint(ALLOC_IN_RC(p_reg));
3986 op_cost(10);
3987 match(reg);
3989 format %{ "[$reg] @ indirect" %}
3990 interface(MEMORY_INTER) %{
3991 base($reg);
3992 index(0x0); /* NO_INDEX */
3993 scale(0x0);
3994 disp(0x0);
3995 %}
3996 %}
3998 // Indirect Memory Plus Short Offset Operand
3999 operand indOffset8(mRegP reg, immL8 off)
4000 %{
4001 constraint(ALLOC_IN_RC(p_reg));
4002 op_cost(10);
4003 match(AddP reg off);
4005 format %{ "[$reg + $off (8-bit)] @ indOffset8" %}
4006 interface(MEMORY_INTER) %{
4007 base($reg);
4008 index(0x0); /* NO_INDEX */
4009 scale(0x0);
4010 disp($off);
4011 %}
4012 %}
4014 //FIXME: I think it's better to limit the immI to be 16-bit at most!
4015 // Indirect Memory Plus Long Offset Operand
4016 operand indOffset32(mRegP reg, immL32 off) %{
4017 constraint(ALLOC_IN_RC(p_reg));
4018 op_cost(20);
4019 match(AddP reg off);
4021 format %{ "[$reg + $off (32-bit)] @ indOffset32" %}
4022 interface(MEMORY_INTER) %{
4023 base($reg);
4024 index(0x0); /* NO_INDEX */
4025 scale(0x0);
4026 disp($off);
4027 %}
4028 %}
4030 /*
4031 2012/8/15 Jin: indOffset32X will cause a very large offseet(more than 16-bit).
4033 MIPS:
4034 lui t5, 0x3218 ; {oop('java/lang/System')}
4035 addiu t5, t5, 0x1ac8
4036 lw t5, 0x15c(t5)
4038 X86:
4039 move 0x15c, ebx ; {oop('java/lang/System')}
4040 move 0x3c247320(ebx), ebx
4041 */
4042 /*
4043 // Indirect Memory Plus Long Offset Operand
4044 operand indOffset32X(mRegI reg, immP off) %{
4045 match(AddP off reg);
4047 format %{ "indOffset32X [$reg + $off]" %}
4048 interface(MEMORY_INTER) %{
4049 base($reg);
4050 // index(0x4);
4051 index(0x0);
4052 scale(0x0);
4053 disp($off);
4054 %}
4055 %}
4056 */
4058 // Indirect Memory Plus Index Register
4059 operand indIndex(mRegP addr, mRegL index) %{
4060 constraint(ALLOC_IN_RC(p_reg));
4061 match(AddP addr index);
4063 op_cost(10);
4064 format %{"[$addr + $index] @ indIndex" %}
4065 interface(MEMORY_INTER) %{
4066 base($addr);
4067 index($index);
4068 scale(0x0);
4069 disp(0x0);
4070 %}
4071 %}
4073 operand indirectNarrowKlass(mRegN reg)
4074 %{
4075 predicate(Universe::narrow_klass_shift() == 0);
4076 constraint(ALLOC_IN_RC(p_reg));
4077 op_cost(10);
4078 match(DecodeNKlass reg);
4080 format %{ "[$reg] @ indirectNarrowKlass" %}
4081 interface(MEMORY_INTER) %{
4082 base($reg);
4083 index(0x4);
4084 scale(0x0);
4085 disp(0x0);
4086 %}
4087 %}
4089 operand indOffset8NarrowKlass(mRegN reg, immL8 off)
4090 %{
4091 predicate(Universe::narrow_klass_shift() == 0);
4092 constraint(ALLOC_IN_RC(p_reg));
4093 op_cost(10);
4094 match(AddP (DecodeNKlass reg) off);
4096 format %{ "[$reg + $off (8-bit)] @ indOffset8NarrowKlass" %}
4097 interface(MEMORY_INTER) %{
4098 base($reg);
4099 index(0x4);
4100 scale(0x0);
4101 disp($off);
4102 %}
4103 %}
4105 operand indOffset32NarrowKlass(mRegN reg, immL32 off)
4106 %{
4107 predicate(Universe::narrow_klass_shift() == 0);
4108 constraint(ALLOC_IN_RC(p_reg));
4109 op_cost(10);
4110 match(AddP (DecodeNKlass reg) off);
4112 format %{ "[$reg + $off (32-bit)] @ indOffset32NarrowKlass" %}
4113 interface(MEMORY_INTER) %{
4114 base($reg);
4115 index(0x4);
4116 scale(0x0);
4117 disp($off);
4118 %}
4119 %}
4121 operand indIndexOffsetNarrowKlass(mRegN reg, mRegL lreg, immL32 off)
4122 %{
4123 predicate(Universe::narrow_klass_shift() == 0);
4124 constraint(ALLOC_IN_RC(p_reg));
4125 match(AddP (AddP (DecodeNKlass reg) lreg) off);
4127 op_cost(10);
4128 format %{"[$reg + $off + $lreg] @ indIndexOffsetNarrowKlass" %}
4129 interface(MEMORY_INTER) %{
4130 base($reg);
4131 index($lreg);
4132 scale(0x0);
4133 disp($off);
4134 %}
4135 %}
4137 operand indIndexNarrowKlass(mRegN reg, mRegL lreg)
4138 %{
4139 predicate(Universe::narrow_klass_shift() == 0);
4140 constraint(ALLOC_IN_RC(p_reg));
4141 match(AddP (DecodeNKlass reg) lreg);
4143 op_cost(10);
4144 format %{"[$reg + $lreg] @ indIndexNarrowKlass" %}
4145 interface(MEMORY_INTER) %{
4146 base($reg);
4147 index($lreg);
4148 scale(0x0);
4149 disp(0x0);
4150 %}
4151 %}
4153 // Indirect Memory Operand
4154 operand indirectNarrow(mRegN reg)
4155 %{
4156 predicate(Universe::narrow_oop_shift() == 0);
4157 constraint(ALLOC_IN_RC(p_reg));
4158 op_cost(10);
4159 match(DecodeN reg);
4161 format %{ "[$reg] @ indirectNarrow" %}
4162 interface(MEMORY_INTER) %{
4163 base($reg);
4164 index(0x0);
4165 scale(0x0);
4166 disp(0x0);
4167 %}
4168 %}
4170 //----------Load Long Memory Operands------------------------------------------
4171 // The load-long idiom will use it's address expression again after loading
4172 // the first word of the long. If the load-long destination overlaps with
4173 // registers used in the addressing expression, the 2nd half will be loaded
4174 // from a clobbered address. Fix this by requiring that load-long use
4175 // address registers that do not overlap with the load-long target.
4177 // load-long support
4178 operand load_long_RegP() %{
4179 constraint(ALLOC_IN_RC(p_reg));
4180 match(RegP);
4181 match(mRegP);
4182 op_cost(100);
4183 format %{ %}
4184 interface(REG_INTER);
4185 %}
4187 // Indirect Memory Operand Long
4188 operand load_long_indirect(load_long_RegP reg) %{
4189 constraint(ALLOC_IN_RC(p_reg));
4190 match(reg);
4192 format %{ "[$reg]" %}
4193 interface(MEMORY_INTER) %{
4194 base($reg);
4195 index(0x0); /* FIXME: In X86, index==0 means none indirect register. While In MIPS, we indicates 0 for the same meaning */
4196 scale(0x0);
4197 disp(0x0);
4198 %}
4199 %}
4201 // Indirect Memory Plus Long Offset Operand
4202 operand load_long_indOffset32(load_long_RegP reg, immL32 off) %{
4203 match(AddP reg off);
4205 format %{ "[$reg + $off]" %}
4206 interface(MEMORY_INTER) %{
4207 base($reg);
4208 index(0x0);
4209 scale(0x0);
4210 disp($off);
4211 %}
4212 %}
4214 //----------Conditional Branch Operands----------------------------------------
4215 // Comparison Op - This is the operation of the comparison, and is limited to
4216 // the following set of codes:
4217 // L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
4218 //
4219 // Other attributes of the comparison, such as unsignedness, are specified
4220 // by the comparison instruction that sets a condition code flags register.
4221 // That result is represented by a flags operand whose subtype is appropriate
4222 // to the unsignedness (etc.) of the comparison.
4223 //
4224 // Later, the instruction which matches both the Comparison Op (a Bool) and
4225 // the flags (produced by the Cmp) specifies the coding of the comparison op
4226 // by matching a specific subtype of Bool operand below, such as cmpOpU.
4228 // Comparision Code
4229 operand cmpOp() %{
4230 match(Bool);
4232 format %{ "" %}
4233 interface(COND_INTER) %{
4234 equal(0x01);
4235 not_equal(0x02);
4236 greater(0x03);
4237 greater_equal(0x04);
4238 less(0x05);
4239 less_equal(0x06);
4240 overflow(0x7);
4241 no_overflow(0x8);
4242 %}
4243 %}
4246 // Comparision Code
4247 // Comparison Code, unsigned compare. Used by FP also, with
4248 // C2 (unordered) turned into GT or LT already. The other bits
4249 // C0 and C3 are turned into Carry & Zero flags.
4250 operand cmpOpU() %{
4251 match(Bool);
4253 format %{ "" %}
4254 interface(COND_INTER) %{
4255 equal(0x01);
4256 not_equal(0x02);
4257 greater(0x03);
4258 greater_equal(0x04);
4259 less(0x05);
4260 less_equal(0x06);
4261 overflow(0x7);
4262 no_overflow(0x8);
4263 %}
4264 %}
4266 /*
4267 // Comparison Code, unsigned compare. Used by FP also, with
4268 // C2 (unordered) turned into GT or LT already. The other bits
4269 // C0 and C3 are turned into Carry & Zero flags.
4270 operand cmpOpU() %{
4271 match(Bool);
4273 format %{ "" %}
4274 interface(COND_INTER) %{
4275 equal(0x4);
4276 not_equal(0x5);
4277 less(0x2);
4278 greater_equal(0x3);
4279 less_equal(0x6);
4280 greater(0x7);
4281 %}
4282 %}
4283 */
4284 /*
4285 // Comparison Code for FP conditional move
4286 operand cmpOp_fcmov() %{
4287 match(Bool);
4289 format %{ "" %}
4290 interface(COND_INTER) %{
4291 equal (0x01);
4292 not_equal (0x02);
4293 greater (0x03);
4294 greater_equal(0x04);
4295 less (0x05);
4296 less_equal (0x06);
4297 %}
4298 %}
4300 // Comparision Code used in long compares
4301 operand cmpOp_commute() %{
4302 match(Bool);
4304 format %{ "" %}
4305 interface(COND_INTER) %{
4306 equal(0x4);
4307 not_equal(0x5);
4308 less(0xF);
4309 greater_equal(0xE);
4310 less_equal(0xD);
4311 greater(0xC);
4312 %}
4313 %}
4314 */
4316 /*
4317 //----------Special Memory Operands--------------------------------------------
4318 // Stack Slot Operand - This operand is used for loading and storing temporary
4319 // values on the stack where a match requires a value to
4320 // flow through memory.
4321 operand stackSlotP(sRegP reg) %{
4322 constraint(ALLOC_IN_RC(stack_slots));
4323 // No match rule because this operand is only generated in matching
4324 op_cost(50);
4325 format %{ "[$reg]" %}
4326 interface(MEMORY_INTER) %{
4327 base(0x1d); // SP
4328 index(0x0); // No Index
4329 scale(0x0); // No Scale
4330 disp($reg); // Stack Offset
4331 %}
4332 %}
4334 operand stackSlotI(sRegI reg) %{
4335 constraint(ALLOC_IN_RC(stack_slots));
4336 // No match rule because this operand is only generated in matching
4337 op_cost(50);
4338 format %{ "[$reg]" %}
4339 interface(MEMORY_INTER) %{
4340 base(0x1d); // SP
4341 index(0x0); // No Index
4342 scale(0x0); // No Scale
4343 disp($reg); // Stack Offset
4344 %}
4345 %}
4347 operand stackSlotF(sRegF reg) %{
4348 constraint(ALLOC_IN_RC(stack_slots));
4349 // No match rule because this operand is only generated in matching
4350 op_cost(50);
4351 format %{ "[$reg]" %}
4352 interface(MEMORY_INTER) %{
4353 base(0x1d); // SP
4354 index(0x0); // No Index
4355 scale(0x0); // No Scale
4356 disp($reg); // Stack Offset
4357 %}
4358 %}
4360 operand stackSlotD(sRegD reg) %{
4361 constraint(ALLOC_IN_RC(stack_slots));
4362 // No match rule because this operand is only generated in matching
4363 op_cost(50);
4364 format %{ "[$reg]" %}
4365 interface(MEMORY_INTER) %{
4366 base(0x1d); // SP
4367 index(0x0); // No Index
4368 scale(0x0); // No Scale
4369 disp($reg); // Stack Offset
4370 %}
4371 %}
4373 operand stackSlotL(sRegL reg) %{
4374 constraint(ALLOC_IN_RC(stack_slots));
4375 // No match rule because this operand is only generated in matching
4376 op_cost(50);
4377 format %{ "[$reg]" %}
4378 interface(MEMORY_INTER) %{
4379 base(0x1d); // SP
4380 index(0x0); // No Index
4381 scale(0x0); // No Scale
4382 disp($reg); // Stack Offset
4383 %}
4384 %}
4385 */
4388 //------------------------OPERAND CLASSES--------------------------------------
4389 //opclass memory( direct, indirect, indOffset16, indOffset32, indOffset32X, indIndexOffset );
4390 opclass memory( indirect, indirectNarrow, indOffset8, indOffset32, indIndex, load_long_indirect, load_long_indOffset32 );
4393 //----------PIPELINE-----------------------------------------------------------
4394 // Rules which define the behavior of the target architectures pipeline.
4396 pipeline %{
4398 //----------ATTRIBUTES---------------------------------------------------------
4399 attributes %{
4400 fixed_size_instructions; // Fixed size instructions
4401 branch_has_delay_slot; // branch have delay slot in gs2
4402 max_instructions_per_bundle = 4; // Up to 5 instructions per bundle
4403 instruction_unit_size = 4; // An instruction is 4 bytes long
4404 instruction_fetch_unit_size = 32; // The processor fetches one line
4405 instruction_fetch_units = 1; // of 32 bytes
4407 // List of nop instructions
4408 nops( MachNop );
4409 %}
4411 //----------RESOURCES----------------------------------------------------------
4412 // Resources are the functional units available to the machine
4414 // godson2c pipeline
4415 // 4 decoders, a "bundle" is the limit 4 instructions decoded per cycle
4416 // 1 load/store ops per cycle, 1 branch, 2 FPU,
4417 // 2 ALU op, only ALU0 handles mul/div instructions.
4418 resources( D0, D1, D2, D3, DECODE = D0 | D1 | D2 | D3,
4419 MEM, BR, FPU0, FPU1, FPU = FPU0 | FPU1,
4420 ALU0, ALU1, ALU = ALU0 | ALU1 );
4422 //----------PIPELINE DESCRIPTION-----------------------------------------------
4423 // Pipeline Description specifies the stages in the machine's pipeline
4425 // godson 2c pipeline
4426 // i dont know the detail of the godson 2c pipeline, leave it blank now.
4427 // by yjl 2/21/2006
4428 pipe_desc(S0, S1, S2, S3, S4, S5, S6);
4430 //----------PIPELINE CLASSES---------------------------------------------------
4431 // Pipeline Classes describe the stages in which input and output are
4432 // referenced by the hardware pipeline.
4434 // COMPILE SKIPPED
4435 // Then: _reg
4436 // Then: _reg if there is a 2nd register
4437 // Then: _long if it's a pair of instructions implementing a long
4438 // Then: _fat if it requires the big decoder
4439 // Or: _mem if it requires the big decoder and a memory unit.
4441 // Integer Load from Memory
4442 pipe_class ialu_reg_mem(memory mem, mRegI src) %{
4443 single_instruction;
4444 mem : S3(read);
4445 src : S5(read);
4446 D0 : S0; // big decoder only
4447 ALU : S4; // any alu
4448 MEM : S3;
4449 %}
4451 // Integer ALU reg operation
4452 pipe_class ialu_reg(mRegI dst) %{
4453 single_instruction;
4454 dst : S4(write);
4455 dst : S3(read);
4456 DECODE : S0; // any decoder
4457 ALU : S3; // any alu
4458 %}
4460 // Long ALU reg-mem operation
4461 pipe_class ialu_reg_long_mem(mRegL dst, memory mem) %{
4462 instruction_count(2);
4463 dst : S5(write);
4464 mem : S3(read);
4465 D0 : S0(2); // big decoder only; twice
4466 ALU : S4(2); // any 2 alus
4467 MEM : S3(2); // both mems
4468 %}
4470 // Integer ALU reg-reg operation
4471 pipe_class ialu_reg_reg(mRegI dst, mRegI src1, mRegI src2) %{
4472 single_instruction;
4473 dst : S4(write);
4474 src1 : S3(read);
4475 src2 : S3(read);
4476 DECODE : S0; // any decoder
4477 ALU : S3; // any alu
4478 %}
4480 // Integer ALU reg-reg operation
4481 pipe_class ialu_reg_imm(mRegI dst, mRegI src1, immI src2) %{
4482 single_instruction;
4483 dst : S4(write);
4484 src1 : S3(read);
4485 DECODE : S0; // any decoder
4486 ALU : S3; // any alu
4487 %}
4489 // Integer ALU0 reg-reg operation
4490 pipe_class ialu_reg_reg_alu0(mRegI dst, mRegI src1, mRegI src2) %{
4491 single_instruction;
4492 dst : S4(write);
4493 src1 : S3(read);
4494 src2 : S3(read);
4495 D0 : S0; // Big decoder only
4496 ALU0 : S3; // only alu0
4497 %}
4499 // Integer ALU reg-mem operation
4500 pipe_class ialu_reg_reg_mem(mRegI dst,mRegI src1, memory src2) %{
4501 single_instruction;
4502 dst : S4(write);
4503 src1 : S3(read);
4504 src2 : S3(read);
4505 D0 : S0; // big decoder only
4506 ALU : S4; // any alu
4507 MEM : S3;
4508 %}
4511 // Float reg-mem operation
4512 pipe_class fpu_reg_mem(regD dst, memory mem) %{
4513 instruction_count(2);
4514 dst : S5(write);
4515 mem : S3(read);
4516 D0 : S0; // big decoder only
4517 DECODE : S1; // any decoder for FPU POP
4518 FPU : S4;
4519 MEM : S3; // any mem
4520 %}
4523 // Float reg-reg operation
4524 pipe_class fpu_reg_reg(regD dst, regD src1, regD src2) %{
4525 instruction_count(2);
4526 dst : S4(write);
4527 src1 : S3(read);
4528 src2 : S3(read);
4529 DECODE : S0(2); // any 2 decoders
4530 FPU : S3;
4531 %}
4534 // Long ALU reg operation using big decoder
4535 pipe_class ialu_reg_long_fat(mRegL dst) %{
4536 instruction_count(2);
4537 dst : S4(write);
4538 dst : S3(read);
4539 D0 : S0(2); // big decoder only; twice
4540 ALU : S3(2); // any 2 alus
4541 %}
4543 // UnConditional branch
4544 pipe_class pipe_jmp( label labl ) %{
4545 single_instruction;
4546 BR : S3;
4547 %}
4550 // Integer ALU reg operation using big decoder
4551 pipe_class ialu_reg_fat(mRegI dst) %{
4552 single_instruction;
4553 dst : S4(write);
4554 dst : S3(read);
4555 D0 : S0; // big decoder only
4556 ALU : S3; // any alu
4557 %}
4559 // Conditional branch
4560 pipe_class pipe_branchP( cmpOp cmp, mRegP op1, mRegP op2, label labl ) %{
4561 single_instruction;
4562 op1 : S1(read);
4563 op2 : S1(read);
4564 BR : S3;
4565 %}
4567 // Generic big/slow expanded idiom
4568 pipe_class pipe_slow( ) %{
4569 instruction_count(10); multiple_bundles; force_serialization;
4570 fixed_latency(100);
4571 D0 : S0(2);
4572 MEM : S3(2);
4573 %}
4575 pipe_class ialu_mem_reg(memory mem, mRegI src) %{
4576 single_instruction;
4577 mem : S3(read);
4578 src : S5(read);
4579 D0 : S0; // big decoder only
4580 ALU : S4; // any alu
4581 MEM : S3;
4582 %}
4585 // Integer ALU operation
4586 pipe_class ialu_none(mRegI dst) %{
4587 single_instruction;
4588 dst : S5(write);
4589 ALU : S4;
4590 %}
4592 // Integer Store to Memory
4593 pipe_class ialu_mem_imm(memory mem) %{
4594 single_instruction;
4595 mem : S3(read);
4596 D0 : S0; // big decoder only
4597 ALU : S4; // any alu
4598 MEM : S3;
4599 %}
4601 // Float load constant
4602 pipe_class fpu_reg_con(regD dst) %{
4603 instruction_count(2);
4604 dst : S5(write);
4605 D0 : S0; // big decoder only for the load
4606 DECODE : S1; // any decoder for FPU POP
4607 FPU : S4;
4608 MEM : S3; // any mem
4609 %}
4611 // Float mem-reg operation
4612 pipe_class fpu_mem_reg(memory mem, regD src) %{
4613 instruction_count(2);
4614 src : S5(read);
4615 mem : S3(read);
4616 DECODE : S0; // any decoder for FPU PUSH
4617 D0 : S1; // big decoder only
4618 FPU : S4;
4619 MEM : S3; // any mem
4620 %}
4622 // Conditional branch
4623 pipe_class pipe_jcc( cmpOp cmp, FlagsReg cr, label labl ) %{
4624 single_instruction;
4625 cr : S1(read);
4626 BR : S3;
4627 %}
4629 // Allocation idiom
4630 pipe_class pipe_cmpxchg( mRegP dst, mRegP heap_ptr ) %{
4631 instruction_count(1); force_serialization;
4632 fixed_latency(6);
4633 heap_ptr : S3(read);
4634 DECODE : S0(3);
4635 D0 : S2;
4636 MEM : S3;
4637 ALU : S3(2);
4638 dst : S5(write);
4639 BR : S5;
4640 %}
4643 // The real do-nothing guy
4644 pipe_class empty( ) %{
4645 instruction_count(0);
4646 %}
4649 // Long Store to Memory
4650 pipe_class ialu_mem_long_reg(memory mem, mRegL src) %{
4651 instruction_count(2);
4652 mem : S3(read);
4653 src : S5(read);
4654 D0 : S0(2); // big decoder only; twice
4655 ALU : S4(2); // any 2 alus
4656 MEM : S3(2); // Both mems
4657 %}
4659 // Long ALU reg-reg operation
4660 pipe_class ialu_reg_reg_long(mRegL dst, mRegL src) %{
4661 instruction_count(2);
4662 dst : S4(write);
4663 src : S3(read);
4664 DECODE : S0(2); // any 2 decoders
4665 ALU : S3(2); // both alus
4666 %}
4668 // Long ALU reg operation
4669 pipe_class ialu_reg_long(mRegL dst, mRegL src) %{
4670 instruction_count(2);
4671 dst : S4(write);
4672 src : S3(read);
4673 DECODE : S0(2); // any 2 decoders
4674 ALU : S3(2); // both alus
4675 %}
4678 // Conditional move reg-reg
4679 pipe_class pipe_cmov_reg( mRegI dst, mRegI src ) %{
4680 single_instruction;
4681 dst : S4(write);
4682 src : S3(read);
4683 DECODE : S0; // any decoder
4684 %}
4686 // Conditional move reg-reg long
4687 pipe_class pipe_cmov_reg_long(mRegL dst, mRegL src) %{
4688 single_instruction;
4689 dst : S4(write);
4690 src : S3(read);
4691 DECODE : S0(2); // any 2 decoders
4692 %}
4694 %}
4698 //----------INSTRUCTIONS-------------------------------------------------------
4699 //
4700 // match -- States which machine-independent subtree may be replaced
4701 // by this instruction.
4702 // ins_cost -- The estimated cost of this instruction is used by instruction
4703 // selection to identify a minimum cost tree of machine
4704 // instructions that matches a tree of machine-independent
4705 // instructions.
4706 // format -- A string providing the disassembly for this instruction.
4707 // The value of an instruction's operand may be inserted
4708 // by referring to it with a '$' prefix.
4709 // opcode -- Three instruction opcodes may be provided. These are referred
4710 // to within an encode class as $primary, $secondary, and $tertiary
4711 // respectively. The primary opcode is commonly used to
4712 // indicate the type of machine instruction, while secondary
4713 // and tertiary are often used for prefix options or addressing
4714 // modes.
4715 // ins_encode -- A list of encode classes with parameters. The encode class
4716 // name must have been defined in an 'enc_class' specification
4717 // in the encode section of the architecture description.
4720 // Load Integer
4721 instruct loadI(mRegI dst, memory mem) %{
4722 match(Set dst (LoadI mem));
4724 ins_cost(125);
4725 format %{ "lw $dst, $mem #@loadI" %}
4726 ins_encode (load_I_enc(dst, mem));
4727 ins_pipe( ialu_reg_mem );
4728 %}
4730 // Load Long.
4731 instruct loadL(mRegL dst, memory mem) %{
4732 // predicate(!((LoadLNode*)n)->require_atomic_access());
4733 match(Set dst (LoadL mem));
4735 ins_cost(250);
4736 format %{ "ld $dst, $mem #@loadL" %}
4737 ins_encode(load_L_enc(dst, mem));
4738 ins_pipe( ialu_reg_long_mem );
4739 %}
4741 // Load Long - UNaligned
4742 instruct loadL_unaligned(mRegL dst, memory mem) %{
4743 match(Set dst (LoadL_unaligned mem));
4745 // FIXME: Jin: Need more effective ldl/ldr
4746 ins_cost(450);
4747 format %{ "ld $dst, $mem #@loadL_unaligned\n\t" %}
4748 ins_encode(load_L_enc(dst, mem));
4749 ins_pipe( ialu_reg_long_mem );
4750 %}
4752 // Store Long
4753 instruct storeL_reg(memory mem, mRegL src) %{
4754 predicate(!((StoreLNode*)n)->require_atomic_access());
4755 match(Set mem (StoreL mem src));
4757 ins_cost(200);
4758 format %{ "sd $mem, $src #@storeL_reg\n" %}
4759 ins_encode(store_L_reg_enc(mem, src));
4760 ins_pipe( ialu_mem_long_reg );
4761 %}
4763 //FIXME:volatile! atomic!
4764 // Volatile Store Long. Must be atomic, so move it into
4765 // the FP TOS and then do a 64-bit FIST. Has to probe the
4766 // target address before the store (for null-ptr checks)
4767 // so the memory operand is used twice in the encoding.
4768 instruct storeL_reg_atomic(memory mem, mRegL src) %{
4769 predicate(((StoreLNode*)n)->require_atomic_access());
4770 match(Set mem (StoreL mem src));
4772 ins_cost(200);
4773 format %{ "sw $mem, $src #@storeL_reg_atomic\n\t"
4774 "sw $mem+4, $src.hi #@storeL_reg_atomic" %}
4775 ins_encode %{
4776 Register src = as_Register($src$$reg);
4778 int base = $mem$$base;
4779 int index = $mem$$index;
4780 int scale = $mem$$scale;
4781 int disp = $mem$$disp;
4783 // assert(false, "storeL_reg_atomic should store a long value atomically");
4785 if( scale != 0 ) Unimplemented();
4786 if( index != 0 ) {
4787 if( Assembler::is_simm16(disp) ) {
4788 __ addu(AT, as_Register(base), as_Register(index));
4789 __ sd(src, AT, disp);
4790 } else {
4791 __ addu(AT, as_Register(base), as_Register(index));
4792 __ move(T9, disp);
4793 __ addu(AT, AT, T9);
4794 __ sd(src, AT, 0);
4795 }
4796 } else {
4797 if( Assembler::is_simm16(disp) ) {
4798 __ move(AT, as_Register(base));
4799 __ sd(src, AT, disp);
4800 } else {
4801 __ move(AT, as_Register(base));
4802 __ move(T9, disp);
4803 __ addu(AT, AT, T9);
4804 __ sd(src, AT, 0);
4805 }
4806 }
4808 %}
4809 ins_pipe( ialu_mem_long_reg );
4810 %}
4812 instruct storeL_immL0(memory mem, immL0 zero) %{
4813 match(Set mem (StoreL mem zero));
4815 ins_cost(180);
4816 format %{ "sd $mem, zero #@storeL_immL0" %}
4817 ins_encode(store_L_immL0_enc(mem, zero));
4818 ins_pipe( ialu_mem_long_reg );
4819 %}
4821 instruct storeL_imm(memory mem, immL src) %{
4822 match(Set mem (StoreL mem src));
4824 ins_cost(200);
4825 format %{ "sw $mem, $src #@storeL_imm" %}
4826 ins_encode(store_L_immL_enc(mem, src));
4827 ins_pipe( ialu_mem_long_reg );
4828 %}
4830 // Load Compressed Pointer
4831 instruct loadN(mRegN dst, memory mem)
4832 %{
4833 match(Set dst (LoadN mem));
4835 ins_cost(125); // XXX
4836 format %{ "lwu $dst, $mem\t# compressed ptr @ loadN" %}
4837 //TODO: Address should be implemented
4838 /*
4839 ins_encode %{
4840 __ lwu($dst$$Register, $mem$$Address);
4841 %}
4842 */
4843 ins_encode (load_N_enc(dst, mem));
4844 ins_pipe(ialu_reg_mem); // XXX
4845 %}
4847 // Load Pointer
4848 instruct loadP(mRegP dst, memory mem) %{
4849 match(Set dst (LoadP mem));
4851 ins_cost(125);
4852 format %{ "ld $dst, $mem #@loadP" %}
4853 ins_encode (load_P_enc(dst, mem));
4854 ins_pipe( ialu_reg_mem );
4855 %}
4857 // Load Klass Pointer
4858 instruct loadKlass(mRegP dst, memory mem) %{
4859 match(Set dst (LoadKlass mem));
4861 ins_cost(125);
4862 format %{ "MOV $dst,$mem @ loadKlass" %}
4863 ins_encode (load_P_enc(dst, mem));
4864 ins_pipe( ialu_reg_mem );
4865 %}
4867 // Load narrow Klass Pointer
4868 instruct loadNKlass(mRegN dst, memory mem)
4869 %{
4870 match(Set dst (LoadNKlass mem));
4872 ins_cost(125); // XXX
4873 format %{ "lwu $dst, $mem\t# compressed klass ptr" %}
4874 ins_encode (load_N_enc(dst, mem));
4875 /*
4876 ins_encode %{
4877 __ lwu($dst$$Register, $mem$$Address);
4878 %}
4879 */
4880 ins_pipe(ialu_reg_mem); // XXX
4881 %}
4883 // Load Constant
4884 instruct loadConI(mRegI dst, immI src) %{
4885 match(Set dst src);
4887 format %{ "mov $dst, $src #@loadConI" %}
4888 ins_encode %{
4889 Register dst = $dst$$Register;
4890 int value = $src$$constant;
4891 __ move(dst, value);
4892 %}
4893 ins_pipe( ialu_reg_fat );
4894 %}
4897 instruct loadConL(mRegL dst, immL src) %{
4898 match(Set dst src);
4899 // effect(ILL cr);
4900 ins_cost(200);
4901 format %{ "li $dst, $src #@loadConL\t"
4902 %}
4903 ins_encode %{
4904 Register dst_reg = as_Register($dst$$reg);
4905 __ li(dst_reg, (long)$src$$constant);
4906 %}
4907 ins_pipe( ialu_reg_long_fat );
4908 %}
4911 // Load Range
4912 instruct loadRange(mRegI dst, memory mem) %{
4913 match(Set dst (LoadRange mem));
4915 ins_cost(125);
4916 format %{ "MOV $dst,$mem @ loadRange" %}
4917 ins_encode(load_I_enc(dst, mem));
4918 ins_pipe( ialu_reg_mem );
4919 %}
4922 instruct storeP(memory mem, mRegP src ) %{
4923 match(Set mem (StoreP mem src));
4925 ins_cost(125);
4926 format %{ "sd $src, $mem #@storeP" %}
4927 ins_encode(store_P_reg_enc(mem, src));
4928 ins_pipe( ialu_mem_reg );
4929 %}
4931 /*
4932 [Ref: loadConP]
4934 Error:
4935 0x2d4b6d40: lui t9, 0x4f <--- handle
4936 0x2d4b6d44: addiu t9, t9, 0xffff808c
4937 0x2d4b6d48: sw t9, 0x4(s2)
4939 OK:
4940 0x2cc5ed40: lui t9, 0x336a <--- klass
4941 0x2cc5ed44: addiu t9, t9, 0x5a10
4942 0x2cc5ed48: sw t9, 0x4(s2)
4943 */
4944 // Store Pointer Immediate; null pointers or constant oops that do not
4945 // need card-mark barriers.
4947 // Store NULL Pointer, mark word, or other simple pointer constant.
4948 instruct storeImmP(memory mem, immP31 src) %{
4949 match(Set mem (StoreP mem src));
4951 ins_cost(150);
4952 format %{ "mov $mem, $src #@storeImmP" %}
4953 ins_encode(store_P_immP_enc(mem, src));
4954 ins_pipe( ialu_mem_imm );
4955 %}
4957 // Store Byte Immediate
4958 instruct storeImmB(memory mem, immI8 src) %{
4959 match(Set mem (StoreB mem src));
4961 ins_cost(150);
4962 format %{ "movb $mem, $src #@storeImmB" %}
4963 ins_encode(store_B_immI_enc(mem, src));
4964 ins_pipe( ialu_mem_imm );
4965 %}
4967 // Store Compressed Pointer
4968 instruct storeN(memory mem, mRegN src)
4969 %{
4970 match(Set mem (StoreN mem src));
4972 ins_cost(125); // XXX
4973 format %{ "sw $mem, $src\t# compressed ptr @ storeN" %}
4974 ins_encode(store_N_reg_enc(mem, src));
4975 ins_pipe(ialu_mem_reg);
4976 %}
4978 instruct storeNKlass(memory mem, mRegN src)
4979 %{
4980 match(Set mem (StoreNKlass mem src));
4982 ins_cost(125); // XXX
4983 format %{ "sw $mem, $src\t# compressed klass ptr" %}
4984 ins_encode(store_N_reg_enc(mem, src));
4985 ins_pipe(ialu_mem_reg);
4986 %}
4988 instruct storeImmN0(memory mem, immN0 zero)
4989 %{
4990 predicate(Universe::narrow_oop_base() == NULL && Universe::narrow_klass_base() == NULL);
4991 match(Set mem (StoreN mem zero));
4993 ins_cost(125); // XXX
4994 format %{ "storeN0 $mem, R12\t# compressed ptr (R12_heapbase==0)" %}
4995 ins_encode(storeImmN0_enc(mem, zero));
4996 ins_pipe(ialu_mem_reg);
4997 %}
4999 instruct storeImmN(memory mem, immN src)
5000 %{
5001 match(Set mem (StoreN mem src));
5003 ins_cost(150); // XXX
5004 format %{ "storeImmN $mem, $src\t# compressed ptr @ storeImmN" %}
5005 ins_encode(storeImmN_enc(mem, src));
5006 ins_pipe(ialu_mem_imm);
5007 %}
5009 instruct storeImmNKlass(memory mem, immNKlass src)
5010 %{
5011 match(Set mem (StoreNKlass mem src));
5013 ins_cost(150); // XXX
5014 format %{ "sw $mem, $src\t# compressed klass ptr" %}
5015 ins_encode(storeImmNKlass_enc(mem, src));
5016 ins_pipe(ialu_mem_imm);
5017 %}
5019 // Store Byte
5020 instruct storeB(memory mem, mRegI src) %{
5021 match(Set mem (StoreB mem src));
5023 ins_cost(125);
5024 format %{ "sb $src, $mem #@storeB" %}
5025 ins_encode(store_B_reg_enc(mem, src));
5026 ins_pipe( ialu_mem_reg );
5027 %}
5029 // Load Byte (8bit signed)
5030 instruct loadB(mRegI dst, memory mem) %{
5031 match(Set dst (LoadB mem));
5033 ins_cost(125);
5034 format %{ "lb $dst, $mem #@loadB" %}
5035 ins_encode(load_B_enc(dst, mem));
5036 ins_pipe( ialu_reg_mem );
5037 %}
5039 // Load Byte (8bit UNsigned)
5040 instruct loadUB(mRegI dst, memory mem) %{
5041 match(Set dst (LoadUB mem));
5043 ins_cost(125);
5044 format %{ "lbu $dst, $mem #@loadUB" %}
5045 ins_encode(load_UB_enc(dst, mem));
5046 ins_pipe( ialu_reg_mem );
5047 %}
5049 // Load Short (16bit signed)
5050 instruct loadS(mRegI dst, memory mem) %{
5051 match(Set dst (LoadS mem));
5053 ins_cost(125);
5054 format %{ "lh $dst, $mem #@loadS" %}
5055 ins_encode(load_S_enc(dst, mem));
5056 ins_pipe( ialu_reg_mem );
5057 %}
5059 //TODO: check if it is necessery to do 'prefetch' in the future(which means never). LEE
5060 instruct prefetchAllocNTA( memory mem ) %{
5061 match(PrefetchAllocation mem);
5062 ins_cost(400);
5063 format %{ "PREFETCHNTA $mem\t# Prefetch allocation to non-temporal cache for write just sync" %}
5064 ins_encode %{
5065 __ sync();
5066 %}
5067 ins_pipe(pipe_slow);
5068 %}
5070 // Store Integer Immediate
5071 instruct storeImmI(memory mem, immI src) %{
5072 match(Set mem (StoreI mem src));
5074 ins_cost(150);
5075 format %{ "mov $mem, $src #@storeImmI" %}
5076 ins_encode(store_I_immI_enc(mem, src));
5077 ins_pipe( ialu_mem_imm );
5078 %}
5080 // Store Integer
5081 instruct storeI(memory mem, mRegI src) %{
5082 match(Set mem (StoreI mem src));
5084 ins_cost(125);
5085 format %{ "sw $mem, $src #@storeI" %}
5086 ins_encode(store_I_reg_enc(mem, src));
5087 ins_pipe( ialu_mem_reg );
5088 %}
5090 // Load Float
5091 instruct loadF(regF dst, memory mem) %{
5092 match(Set dst (LoadF mem));
5094 ins_cost(150);
5095 format %{ "loadF $dst, $mem #@loadF" %}
5096 ins_encode(load_F_enc(dst, mem));
5097 ins_pipe( fpu_reg_mem );
5098 %}
5100 instruct loadConP(mRegP dst, immP src) %{
5101 match(Set dst src);
5103 format %{ "li $dst, $src #@loadConP" %}
5105 ins_encode %{
5106 Register dst = $dst$$Register;
5107 long* value = (long*)$src$$constant;
5108 bool is_need_reloc = $src->constant_reloc() != relocInfo::none;
5110 /* During GC, klassOop may be moved to new position in the heap.
5111 * It must be relocated.
5112 * Refer: [c1_LIRAssembler_mips.cpp] jobject2reg()
5113 */
5114 if (is_need_reloc) {
5115 if($src->constant_reloc() == relocInfo::metadata_type){
5116 int klass_index = __ oop_recorder()->find_index((Klass*)value);
5117 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
5119 __ relocate(rspec);
5120 __ li48(dst, (long)value);
5121 }
5123 if($src->constant_reloc() == relocInfo::oop_type){
5124 int oop_index = __ oop_recorder()->find_index((jobject)value);
5125 RelocationHolder rspec = oop_Relocation::spec(oop_index);
5127 __ relocate(rspec);
5128 __ li48(dst, (long)value);
5129 }
5130 } else {
5131 __ li48(dst, (long)value);
5132 }
5133 %}
5135 ins_pipe( ialu_reg_fat );
5136 %}
5138 instruct loadConP0(mRegP dst, immP0 src)
5139 %{
5140 match(Set dst src);
5142 ins_cost(50);
5143 format %{ "mov $dst, R0\t# ptr" %}
5144 ins_encode %{
5145 Register dst_reg = $dst$$Register;
5146 __ move(dst_reg, R0);
5147 %}
5148 ins_pipe(ialu_reg);
5149 %}
5151 instruct loadConN0(mRegN dst, immN0 src, FlagsReg cr) %{
5152 match(Set dst src);
5153 effect(KILL cr);
5154 format %{ "move $dst, R0\t# compressed NULL ptr" %}
5155 ins_encode %{
5156 __ move($dst$$Register, R0);
5157 %}
5158 ins_pipe(ialu_reg);
5159 %}
5161 instruct loadConN(mRegN dst, immN src) %{
5162 match(Set dst src);
5164 ins_cost(125);
5165 format %{ "li $dst, $src\t# compressed ptr @ loadConN" %}
5166 ins_encode %{
5167 address con = (address)$src$$constant;
5168 if (con == NULL) {
5169 ShouldNotReachHere();
5170 } else {
5171 assert (UseCompressedOops, "should only be used for compressed headers");
5172 assert (Universe::heap() != NULL, "java heap should be initialized");
5173 assert (__ oop_recorder() != NULL, "this assembler needs an OopRecorder");
5175 Register dst = $dst$$Register;
5176 long* value = (long*)$src$$constant;
5177 int oop_index = __ oop_recorder()->find_index((jobject)value);
5178 RelocationHolder rspec = oop_Relocation::spec(oop_index);
5179 if(rspec.type()!=relocInfo::none){
5180 __ relocate(rspec, Assembler::narrow_oop_operand);
5181 __ li48(dst, oop_index);
5182 }
5183 else {
5184 __ li48(dst, oop_index);
5185 }
5186 }
5187 %}
5188 ins_pipe(ialu_reg_fat); // XXX
5189 %}
5191 instruct loadConNKlass(mRegN dst, immNKlass src) %{
5192 match(Set dst src);
5194 ins_cost(125);
5195 format %{ "li $dst, $src\t# compressed klass ptr" %}
5196 ins_encode %{
5197 address con = (address)$src$$constant;
5198 if (con == NULL) {
5199 ShouldNotReachHere();
5200 } else {
5201 Register dst = $dst$$Register;
5202 long* value = (long*)$src$$constant;
5204 int klass_index = __ oop_recorder()->find_index((Klass*)value);
5205 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
5206 long narrowp = (long)Klass::encode_klass((Klass*)value);
5208 if(rspec.type()!=relocInfo::none){
5209 __ relocate(rspec, Assembler::narrow_oop_operand);
5210 __ li48(dst, narrowp);
5211 }
5212 else {
5213 __ li48(dst, narrowp);
5214 }
5215 }
5216 %}
5217 ins_pipe(ialu_reg_fat); // XXX
5218 %}
5220 /*
5221 // Load Stack Slot
5222 instruct loadSSI(mRegI dst, stackSlotI src) %{
5223 match(Set dst src);
5224 ins_cost(25);
5226 format %{ "MOV $dst,$src #@loadSSI" %}
5227 ins_encode %{
5228 Register dst = $dst$$Register;
5230 int base = $src$$base;
5231 int index = $src$$index;
5232 int scale = $src$$scale;
5233 int disp = $src$$disp;
5235 fprintf(stderr, "\n?????????????????????????\n");//fujie debug
5236 if( scale != 0 ) Unimplemented();
5237 if( index != 0 ) {
5238 __ add(AT, as_Register(base), as_Register(index));
5239 __ lw(dst, AT, disp);
5240 } else {
5241 __ lw(dst, as_Register(base), disp);
5242 }
5244 %}
5246 ins_pipe( ialu_reg_mem );
5247 %}
5249 // Load Stack Slot
5250 instruct loadSSP(mRegP dst, stackSlotP src) %{
5251 match(Set dst src);
5252 ins_cost(25);
5254 format %{ "MOV $dst,$src #@loadSSP" %}
5255 ins_encode %{
5256 Register dst = $dst$$Register;
5258 int base = $src$$base;
5259 int index = $src$$index;
5260 int scale = $src$$scale;
5261 int disp = $src$$disp;
5263 fprintf(stderr, "\n?????????????????????????\n");//fujie debug
5264 if( scale != 0 ) Unimplemented();
5265 if( index != 0 ) {
5266 __ add(AT, as_Register(base), as_Register(index));
5267 __ lw(dst, AT, disp);
5268 } else {
5269 __ lw(dst, as_Register(base), disp);
5270 }
5272 %}
5274 ins_pipe( ialu_reg_mem );
5275 %}
5276 */
5278 //FIXME
5279 // Tail Call; Jump from runtime stub to Java code.
5280 // Also known as an 'interprocedural jump'.
5281 // Target of jump will eventually return to caller.
5282 // TailJump below removes the return address.
5283 instruct TailCalljmpInd(mRegP jump_target, mRegP method_oop) %{
5284 match(TailCall jump_target method_oop );
5285 ins_cost(300);
5286 format %{ "JMP $jump_target \t# @TailCalljmpInd" %}
5288 ins_encode %{
5289 Register target = $jump_target$$Register;
5290 Register oop = $method_oop$$Register;
5292 /* 2012/10/12 Jin: RA will be used in generate_forward_exception() */
5293 __ push(RA);
5295 __ move(S3, oop);
5296 __ jr(target);
5297 __ nop();
5298 %}
5300 ins_pipe( pipe_jmp );
5301 %}
5303 // Create exception oop: created by stack-crawling runtime code.
5304 // Created exception is now available to this handler, and is setup
5305 // just prior to jumping to this handler. No code emitted.
5306 instruct CreateException( a0_RegP ex_oop )
5307 %{
5308 match(Set ex_oop (CreateEx));
5310 // use the following format syntax
5311 format %{ "# exception oop is in A0; no code emitted @CreateException" %}
5312 ins_encode %{
5313 /* Jin: X86 leaves this function empty */
5314 __ block_comment("CreateException is empty in X86/MIPS");
5315 %}
5316 ins_pipe( empty );
5317 // ins_pipe( pipe_jmp );
5318 %}
5321 /* 2012/9/14 Jin: The mechanism of exception handling is clear now.
5323 - Common try/catch:
5324 2012/9/14 Jin: [stubGenerator_mips.cpp] generate_forward_exception()
5325 |- V0, V1 are created
5326 |- T9 <= SharedRuntime::exception_handler_for_return_address
5327 `- jr T9
5328 `- the caller's exception_handler
5329 `- jr OptoRuntime::exception_blob
5330 `- here
5331 - Rethrow(e.g. 'unwind'):
5332 * The callee:
5333 |- an exception is triggered during execution
5334 `- exits the callee method through RethrowException node
5335 |- The callee pushes exception_oop(T0) and exception_pc(RA)
5336 `- The callee jumps to OptoRuntime::rethrow_stub()
5337 * In OptoRuntime::rethrow_stub:
5338 |- The VM calls _rethrow_Java to determine the return address in the caller method
5339 `- exits the stub with tailjmpInd
5340 |- pops exception_oop(V0) and exception_pc(V1)
5341 `- jumps to the return address(usually an exception_handler)
5342 * The caller:
5343 `- continues processing the exception_blob with V0/V1
5344 */
5346 /*
5347 Disassembling OptoRuntime::rethrow_stub()
5349 ; locals
5350 0x2d3bf320: addiu sp, sp, 0xfffffff8
5351 0x2d3bf324: sw ra, 0x4(sp)
5352 0x2d3bf328: sw fp, 0x0(sp)
5353 0x2d3bf32c: addu fp, sp, zero
5354 0x2d3bf330: addiu sp, sp, 0xfffffff0
5355 0x2d3bf334: sw ra, 0x8(sp)
5356 0x2d3bf338: sw t0, 0x4(sp)
5357 0x2d3bf33c: sw sp, 0x0(sp)
5359 ; get_thread(S2)
5360 0x2d3bf340: addu s2, sp, zero
5361 0x2d3bf344: srl s2, s2, 12
5362 0x2d3bf348: sll s2, s2, 2
5363 0x2d3bf34c: lui at, 0x2c85
5364 0x2d3bf350: addu at, at, s2
5365 0x2d3bf354: lw s2, 0xffffcc80(at)
5367 0x2d3bf358: lw s0, 0x0(sp)
5368 0x2d3bf35c: sw s0, 0x118(s2) // last_sp -> threa
5369 0x2d3bf360: sw s2, 0xc(sp)
5371 ; OptoRuntime::rethrow_C(oopDesc* exception, JavaThread* thread, address ret_pc)
5372 0x2d3bf364: lw a0, 0x4(sp)
5373 0x2d3bf368: lw a1, 0xc(sp)
5374 0x2d3bf36c: lw a2, 0x8(sp)
5375 ;; Java_To_Runtime
5376 0x2d3bf370: lui t9, 0x2c34
5377 0x2d3bf374: addiu t9, t9, 0xffff8a48
5378 0x2d3bf378: jalr t9
5379 0x2d3bf37c: nop
5381 0x2d3bf380: addu s3, v0, zero ; S3: SharedRuntime::raw_exception_handler_for_return_address()
5383 0x2d3bf384: lw s0, 0xc(sp)
5384 0x2d3bf388: sw zero, 0x118(s0)
5385 0x2d3bf38c: sw zero, 0x11c(s0)
5386 0x2d3bf390: lw s1, 0x144(s0) ; ex_oop: S1
5387 0x2d3bf394: addu s2, s0, zero
5388 0x2d3bf398: sw zero, 0x144(s2)
5389 0x2d3bf39c: lw s0, 0x4(s2)
5390 0x2d3bf3a0: addiu s4, zero, 0x0
5391 0x2d3bf3a4: bne s0, s4, 0x2d3bf3d4
5392 0x2d3bf3a8: nop
5393 0x2d3bf3ac: addiu sp, sp, 0x10
5394 0x2d3bf3b0: addiu sp, sp, 0x8
5395 0x2d3bf3b4: lw ra, 0xfffffffc(sp)
5396 0x2d3bf3b8: lw fp, 0xfffffff8(sp)
5397 0x2d3bf3bc: lui at, 0x2b48
5398 0x2d3bf3c0: lw at, 0x100(at)
5400 ; tailjmpInd: Restores exception_oop & exception_pc
5401 0x2d3bf3c4: addu v1, ra, zero
5402 0x2d3bf3c8: addu v0, s1, zero
5403 0x2d3bf3cc: jr s3
5404 0x2d3bf3d0: nop
5405 ; Exception:
5406 0x2d3bf3d4: lui s1, 0x2cc8 ; generate_forward_exception()
5407 0x2d3bf3d8: addiu s1, s1, 0x40
5408 0x2d3bf3dc: addiu s2, zero, 0x0
5409 0x2d3bf3e0: addiu sp, sp, 0x10
5410 0x2d3bf3e4: addiu sp, sp, 0x8
5411 0x2d3bf3e8: lw ra, 0xfffffffc(sp)
5412 0x2d3bf3ec: lw fp, 0xfffffff8(sp)
5413 0x2d3bf3f0: lui at, 0x2b48
5414 0x2d3bf3f4: lw at, 0x100(at)
5415 ; TailCalljmpInd
5416 __ push(RA); ; to be used in generate_forward_exception()
5417 0x2d3bf3f8: addu t7, s2, zero
5418 0x2d3bf3fc: jr s1
5419 0x2d3bf400: nop
5420 */
5421 // Rethrow exception:
5422 // The exception oop will come in the first argument position.
5423 // Then JUMP (not call) to the rethrow stub code.
5424 instruct RethrowException()
5425 %{
5426 match(Rethrow);
5428 // use the following format syntax
5429 format %{ "JMP rethrow_stub #@RethrowException" %}
5430 ins_encode %{
5431 __ block_comment("@ RethrowException");
5433 cbuf.set_insts_mark();
5434 cbuf.relocate(cbuf.insts_mark(), runtime_call_Relocation::spec());
5436 // call OptoRuntime::rethrow_stub to get the exception handler in parent method
5437 __ li(T9, OptoRuntime::rethrow_stub());
5438 __ jr(T9);
5439 __ nop();
5440 %}
5441 ins_pipe( pipe_jmp );
5442 %}
5444 instruct branchConP_zero(cmpOpU cmp, mRegP op1, immP0 zero, label labl) %{
5445 match(If cmp (CmpP op1 zero));
5446 effect(USE labl);
5448 ins_cost(180);
5449 format %{ "b$cmp $op1, R0, $labl #@branchConP_zero" %}
5451 ins_encode %{
5452 Register op1 = $op1$$Register;
5453 Register op2 = R0;
5454 Label &L = *($labl$$label);
5455 int flag = $cmp$$cmpcode;
5457 switch(flag)
5458 {
5459 case 0x01: //equal
5460 if (&L)
5461 __ beq(op1, op2, L);
5462 else
5463 __ beq(op1, op2, (int)0);
5464 break;
5465 case 0x02: //not_equal
5466 if (&L)
5467 __ bne(op1, op2, L);
5468 else
5469 __ bne(op1, op2, (int)0);
5470 break;
5471 /*
5472 case 0x03: //above
5473 __ sltu(AT, op2, op1);
5474 if(&L)
5475 __ bne(R0, AT, L);
5476 else
5477 __ bne(R0, AT, (int)0);
5478 break;
5479 case 0x04: //above_equal
5480 __ sltu(AT, op1, op2);
5481 if(&L)
5482 __ beq(AT, R0, L);
5483 else
5484 __ beq(AT, R0, (int)0);
5485 break;
5486 case 0x05: //below
5487 __ sltu(AT, op1, op2);
5488 if(&L)
5489 __ bne(R0, AT, L);
5490 else
5491 __ bne(R0, AT, (int)0);
5492 break;
5493 case 0x06: //below_equal
5494 __ sltu(AT, op2, op1);
5495 if(&L)
5496 __ beq(AT, R0, L);
5497 else
5498 __ beq(AT, R0, (int)0);
5499 break;
5500 */
5501 default:
5502 Unimplemented();
5503 }
5504 __ nop();
5505 %}
5507 ins_pc_relative(1);
5508 ins_pipe(pipe_branchP);
5509 %}
5512 instruct branchConP(cmpOpU cmp, mRegP op1, mRegP op2, label labl) %{
5513 match(If cmp (CmpP op1 op2));
5514 // predicate(can_branch_register(_kids[0]->_leaf, _kids[1]->_leaf));
5515 effect(USE labl);
5517 ins_cost(200);
5518 format %{ "b$cmp $op1, $op2, $labl #@branchConP" %}
5520 ins_encode %{
5521 Register op1 = $op1$$Register;
5522 Register op2 = $op2$$Register;
5523 Label &L = *($labl$$label);
5524 int flag = $cmp$$cmpcode;
5526 switch(flag)
5527 {
5528 case 0x01: //equal
5529 if (&L)
5530 __ beq(op1, op2, L);
5531 else
5532 __ beq(op1, op2, (int)0);
5533 break;
5534 case 0x02: //not_equal
5535 if (&L)
5536 __ bne(op1, op2, L);
5537 else
5538 __ bne(op1, op2, (int)0);
5539 break;
5540 case 0x03: //above
5541 __ sltu(AT, op2, op1);
5542 if(&L)
5543 __ bne(R0, AT, L);
5544 else
5545 __ bne(R0, AT, (int)0);
5546 break;
5547 case 0x04: //above_equal
5548 __ sltu(AT, op1, op2);
5549 if(&L)
5550 __ beq(AT, R0, L);
5551 else
5552 __ beq(AT, R0, (int)0);
5553 break;
5554 case 0x05: //below
5555 __ sltu(AT, op1, op2);
5556 if(&L)
5557 __ bne(R0, AT, L);
5558 else
5559 __ bne(R0, AT, (int)0);
5560 break;
5561 case 0x06: //below_equal
5562 __ sltu(AT, op2, op1);
5563 if(&L)
5564 __ beq(AT, R0, L);
5565 else
5566 __ beq(AT, R0, (int)0);
5567 break;
5568 default:
5569 Unimplemented();
5570 }
5571 __ nop();
5572 %}
5574 ins_pc_relative(1);
5575 ins_pipe(pipe_branchP);
5576 %}
5578 instruct cmpN_null_branch(cmpOp cmp, mRegN op1, immN0 null, label labl) %{
5579 match(If cmp (CmpN op1 null));
5580 effect(USE labl);
5582 ins_cost(180);
5583 format %{ "CMP $op1,0\t! compressed ptr\n\t"
5584 "BP$cmp $labl" %}
5585 ins_encode %{
5586 Register op1 = $op1$$Register;
5587 Register op2 = R0;
5588 Label &L = *($labl$$label);
5589 int flag = $cmp$$cmpcode;
5591 switch(flag)
5592 {
5593 case 0x01: //equal
5594 if (&L)
5595 __ beq(op1, op2, L);
5596 else
5597 __ beq(op1, op2, (int)0);
5598 break;
5599 case 0x02: //not_equal
5600 if (&L)
5601 __ bne(op1, op2, L);
5602 else
5603 __ bne(op1, op2, (int)0);
5604 break;
5605 default:
5606 Unimplemented();
5607 }
5608 __ nop();
5609 %}
5610 //TODO: pipe_branchP or create pipe_branchN LEE
5611 ins_pc_relative(1);
5612 ins_pipe(pipe_branchP);
5613 %}
5615 instruct cmpN_reg_branch(cmpOp cmp, mRegN op1, mRegN op2, label labl) %{
5616 match(If cmp (CmpN op1 op2));
5617 effect(USE labl);
5619 ins_cost(180);
5620 format %{ "CMP $op1,$op2\t! compressed ptr\n\t"
5621 "BP$cmp $labl" %}
5622 ins_encode %{
5623 Register op1_reg = $op1$$Register;
5624 Register op2_reg = $op2$$Register;
5625 Label &L = *($labl$$label);
5626 int flag = $cmp$$cmpcode;
5628 switch(flag)
5629 {
5630 case 0x01: //equal
5631 if (&L)
5632 __ beq(op1_reg, op2_reg, L);
5633 else
5634 __ beq(op1_reg, op2_reg, (int)0);
5635 break;
5636 case 0x02: //not_equal
5637 if (&L)
5638 __ bne(op1_reg, op2_reg, L);
5639 else
5640 __ bne(op1_reg, op2_reg, (int)0);
5641 break;
5642 case 0x03: //above
5643 __ sltu(AT, op2_reg, op1_reg);
5644 if(&L)
5645 __ bne(R0, AT, L);
5646 else
5647 __ bne(R0, AT, (int)0);
5648 break;
5649 case 0x04: //above_equal
5650 __ sltu(AT, op1_reg, op2_reg);
5651 if(&L)
5652 __ beq(AT, R0, L);
5653 else
5654 __ beq(AT, R0, (int)0);
5655 break;
5656 case 0x05: //below
5657 __ sltu(AT, op1_reg, op2_reg);
5658 if(&L)
5659 __ bne(R0, AT, L);
5660 else
5661 __ bne(R0, AT, (int)0);
5662 break;
5663 case 0x06: //below_equal
5664 __ sltu(AT, op2_reg, op1_reg);
5665 if(&L)
5666 __ beq(AT, R0, L);
5667 else
5668 __ beq(AT, R0, (int)0);
5669 break;
5670 default:
5671 Unimplemented();
5672 }
5673 __ nop();
5674 %}
5675 ins_pc_relative(1);
5676 ins_pipe(pipe_branchP);
5677 %}
5679 instruct branchConIU_reg_reg(cmpOpU cmp, mRegI src1, mRegI src2, label labl) %{
5680 match( If cmp (CmpU src1 src2) );
5681 effect(USE labl);
5682 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_reg" %}
5684 ins_encode %{
5685 Register op1 = $src1$$Register;
5686 Register op2 = $src2$$Register;
5687 Label &L = *($labl$$label);
5688 int flag = $cmp$$cmpcode;
5690 switch(flag)
5691 {
5692 case 0x01: //equal
5693 if (&L)
5694 __ beq(op1, op2, L);
5695 else
5696 __ beq(op1, op2, (int)0);
5697 break;
5698 case 0x02: //not_equal
5699 if (&L)
5700 __ bne(op1, op2, L);
5701 else
5702 __ bne(op1, op2, (int)0);
5703 break;
5704 case 0x03: //above
5705 __ sltu(AT, op2, op1);
5706 if(&L)
5707 __ bne(AT, R0, L);
5708 else
5709 __ bne(AT, R0, (int)0);
5710 break;
5711 case 0x04: //above_equal
5712 __ sltu(AT, op1, op2);
5713 if(&L)
5714 __ beq(AT, R0, L);
5715 else
5716 __ beq(AT, R0, (int)0);
5717 break;
5718 case 0x05: //below
5719 __ sltu(AT, op1, op2);
5720 if(&L)
5721 __ bne(AT, R0, L);
5722 else
5723 __ bne(AT, R0, (int)0);
5724 break;
5725 case 0x06: //below_equal
5726 __ sltu(AT, op2, op1);
5727 if(&L)
5728 __ beq(AT, R0, L);
5729 else
5730 __ beq(AT, R0, (int)0);
5731 break;
5732 default:
5733 Unimplemented();
5734 }
5735 __ nop();
5736 %}
5738 ins_pc_relative(1);
5739 ins_pipe(pipe_branchP);
5740 %}
5743 instruct branchConIU_reg_imm(cmpOpU cmp, mRegI src1, immI src2, label labl) %{
5744 match( If cmp (CmpU src1 src2) );
5745 effect(USE labl);
5746 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_imm" %}
5748 ins_encode %{
5749 Register op1 = $src1$$Register;
5750 int val = $src2$$constant;
5751 Label &L = *($labl$$label);
5752 int flag = $cmp$$cmpcode;
5754 __ move(AT, val);
5755 switch(flag)
5756 {
5757 case 0x01: //equal
5758 if (&L)
5759 __ beq(op1, AT, L);
5760 else
5761 __ beq(op1, AT, (int)0);
5762 break;
5763 case 0x02: //not_equal
5764 if (&L)
5765 __ bne(op1, AT, L);
5766 else
5767 __ bne(op1, AT, (int)0);
5768 break;
5769 case 0x03: //above
5770 __ sltu(AT, AT, op1);
5771 if(&L)
5772 __ bne(R0, AT, L);
5773 else
5774 __ bne(R0, AT, (int)0);
5775 break;
5776 case 0x04: //above_equal
5777 __ sltu(AT, op1, AT);
5778 if(&L)
5779 __ beq(AT, R0, L);
5780 else
5781 __ beq(AT, R0, (int)0);
5782 break;
5783 case 0x05: //below
5784 __ sltu(AT, op1, AT);
5785 if(&L)
5786 __ bne(R0, AT, L);
5787 else
5788 __ bne(R0, AT, (int)0);
5789 break;
5790 case 0x06: //below_equal
5791 __ sltu(AT, AT, op1);
5792 if(&L)
5793 __ beq(AT, R0, L);
5794 else
5795 __ beq(AT, R0, (int)0);
5796 break;
5797 default:
5798 Unimplemented();
5799 }
5800 __ nop();
5801 %}
5803 ins_pc_relative(1);
5804 ins_pipe(pipe_branchP);
5805 %}
5807 instruct branchConI_reg_reg(cmpOp cmp, mRegI src1, mRegI src2, label labl) %{
5808 match( If cmp (CmpI src1 src2) );
5809 effect(USE labl);
5810 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_reg" %}
5812 ins_encode %{
5813 Register op1 = $src1$$Register;
5814 Register op2 = $src2$$Register;
5815 Label &L = *($labl$$label);
5816 int flag = $cmp$$cmpcode;
5818 switch(flag)
5819 {
5820 case 0x01: //equal
5821 if (&L)
5822 __ beq(op1, op2, L);
5823 else
5824 __ beq(op1, op2, (int)0);
5825 break;
5826 case 0x02: //not_equal
5827 if (&L)
5828 __ bne(op1, op2, L);
5829 else
5830 __ bne(op1, op2, (int)0);
5831 break;
5832 case 0x03: //above
5833 __ slt(AT, op2, op1);
5834 if(&L)
5835 __ bne(R0, AT, L);
5836 else
5837 __ bne(R0, AT, (int)0);
5838 break;
5839 case 0x04: //above_equal
5840 __ slt(AT, op1, op2);
5841 if(&L)
5842 __ beq(AT, R0, L);
5843 else
5844 __ beq(AT, R0, (int)0);
5845 break;
5846 case 0x05: //below
5847 __ slt(AT, op1, op2);
5848 if(&L)
5849 __ bne(R0, AT, L);
5850 else
5851 __ bne(R0, AT, (int)0);
5852 break;
5853 case 0x06: //below_equal
5854 __ slt(AT, op2, op1);
5855 if(&L)
5856 __ beq(AT, R0, L);
5857 else
5858 __ beq(AT, R0, (int)0);
5859 break;
5860 default:
5861 Unimplemented();
5862 }
5863 __ nop();
5864 %}
5866 ins_pc_relative(1);
5867 ins_pipe(pipe_branchP);
5868 %}
5870 instruct branchConI_reg_imm0(cmpOp cmp, mRegI src1, immI0 src2, label labl) %{
5871 match( If cmp (CmpI src1 src2) );
5872 effect(USE labl);
5873 ins_cost(170);
5874 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm0" %}
5876 ins_encode %{
5877 Register op1 = $src1$$Register;
5878 // int val = $src2$$constant;
5879 Label &L = *($labl$$label);
5880 int flag = $cmp$$cmpcode;
5882 //__ move(AT, val);
5883 switch(flag)
5884 {
5885 case 0x01: //equal
5886 if (&L)
5887 __ beq(op1, R0, L);
5888 else
5889 __ beq(op1, R0, (int)0);
5890 break;
5891 case 0x02: //not_equal
5892 if (&L)
5893 __ bne(op1, R0, L);
5894 else
5895 __ bne(op1, R0, (int)0);
5896 break;
5897 case 0x03: //greater
5898 if(&L)
5899 __ bgtz(op1, L);
5900 else
5901 __ bgtz(op1, (int)0);
5902 break;
5903 case 0x04: //greater_equal
5904 if(&L)
5905 __ bgez(op1, L);
5906 else
5907 __ bgez(op1, (int)0);
5908 break;
5909 case 0x05: //less
5910 if(&L)
5911 __ bltz(op1, L);
5912 else
5913 __ bltz(op1, (int)0);
5914 break;
5915 case 0x06: //less_equal
5916 if(&L)
5917 __ blez(op1, L);
5918 else
5919 __ blez(op1, (int)0);
5920 break;
5921 default:
5922 Unimplemented();
5923 }
5924 __ nop();
5925 %}
5927 ins_pc_relative(1);
5928 ins_pipe(pipe_branchP);
5929 %}
5932 instruct branchConI_reg_imm(cmpOp cmp, mRegI src1, immI src2, label labl) %{
5933 match( If cmp (CmpI src1 src2) );
5934 effect(USE labl);
5935 ins_cost(200);
5936 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm" %}
5938 ins_encode %{
5939 Register op1 = $src1$$Register;
5940 int val = $src2$$constant;
5941 Label &L = *($labl$$label);
5942 int flag = $cmp$$cmpcode;
5944 __ move(AT, val);
5945 switch(flag)
5946 {
5947 case 0x01: //equal
5948 if (&L)
5949 __ beq(op1, AT, L);
5950 else
5951 __ beq(op1, AT, (int)0);
5952 break;
5953 case 0x02: //not_equal
5954 if (&L)
5955 __ bne(op1, AT, L);
5956 else
5957 __ bne(op1, AT, (int)0);
5958 break;
5959 case 0x03: //greater
5960 __ slt(AT, AT, op1);
5961 if(&L)
5962 __ bne(R0, AT, L);
5963 else
5964 __ bne(R0, AT, (int)0);
5965 break;
5966 case 0x04: //greater_equal
5967 __ slt(AT, op1, AT);
5968 if(&L)
5969 __ beq(AT, R0, L);
5970 else
5971 __ beq(AT, R0, (int)0);
5972 break;
5973 case 0x05: //less
5974 __ slt(AT, op1, AT);
5975 if(&L)
5976 __ bne(R0, AT, L);
5977 else
5978 __ bne(R0, AT, (int)0);
5979 break;
5980 case 0x06: //less_equal
5981 __ slt(AT, AT, op1);
5982 if(&L)
5983 __ beq(AT, R0, L);
5984 else
5985 __ beq(AT, R0, (int)0);
5986 break;
5987 default:
5988 Unimplemented();
5989 }
5990 __ nop();
5991 %}
5993 ins_pc_relative(1);
5994 ins_pipe(pipe_branchP);
5995 %}
5997 instruct branchConIU_reg_imm0(cmpOpU cmp, mRegI src1, immI0 zero, label labl) %{
5998 match( If cmp (CmpU src1 zero) );
5999 effect(USE labl);
6000 format %{ "BR$cmp $src1, zero, $labl #@branchConIU_reg_imm0" %}
6002 ins_encode %{
6003 Register op1 = $src1$$Register;
6004 Label &L = *($labl$$label);
6005 int flag = $cmp$$cmpcode;
6007 switch(flag)
6008 {
6009 case 0x01: //equal
6010 if (&L)
6011 __ beq(op1, R0, L);
6012 else
6013 __ beq(op1, R0, (int)0);
6014 break;
6015 case 0x02: //not_equal
6016 if (&L)
6017 __ bne(op1, R0, L);
6018 else
6019 __ bne(op1, R0, (int)0);
6020 break;
6021 case 0x03: //above
6022 if(&L)
6023 __ bne(R0, op1, L);
6024 else
6025 __ bne(R0, op1, (int)0);
6026 break;
6027 case 0x04: //above_equal
6028 if(&L)
6029 __ beq(R0, R0, L);
6030 else
6031 __ beq(R0, R0, (int)0);
6032 break;
6033 case 0x05: //below
6034 return;
6035 break;
6036 case 0x06: //below_equal
6037 if(&L)
6038 __ beq(op1, R0, L);
6039 else
6040 __ beq(op1, R0, (int)0);
6041 break;
6042 default:
6043 Unimplemented();
6044 }
6045 __ nop();
6046 %}
6048 ins_pc_relative(1);
6049 ins_pipe(pipe_branchP);
6050 %}
6053 instruct branchConIU_reg_immI16(cmpOpU cmp, mRegI src1, immI16 src2, label labl) %{
6054 match( If cmp (CmpU src1 src2) );
6055 effect(USE labl);
6056 ins_cost(180);
6057 format %{ "BR$cmp $src1, $src2, $labl #@branchConIU_reg_immI16" %}
6059 ins_encode %{
6060 Register op1 = $src1$$Register;
6061 int val = $src2$$constant;
6062 Label &L = *($labl$$label);
6063 int flag = $cmp$$cmpcode;
6065 switch(flag)
6066 {
6067 case 0x01: //equal
6068 __ move(AT, val);
6069 if (&L)
6070 __ beq(op1, AT, L);
6071 else
6072 __ beq(op1, AT, (int)0);
6073 break;
6074 case 0x02: //not_equal
6075 __ move(AT, val);
6076 if (&L)
6077 __ bne(op1, AT, L);
6078 else
6079 __ bne(op1, AT, (int)0);
6080 break;
6081 case 0x03: //above
6082 __ move(AT, val);
6083 __ sltu(AT, AT, op1);
6084 if(&L)
6085 __ bne(R0, AT, L);
6086 else
6087 __ bne(R0, AT, (int)0);
6088 break;
6089 case 0x04: //above_equal
6090 __ sltiu(AT, op1, val);
6091 if(&L)
6092 __ beq(AT, R0, L);
6093 else
6094 __ beq(AT, R0, (int)0);
6095 break;
6096 case 0x05: //below
6097 __ sltiu(AT, op1, val);
6098 if(&L)
6099 __ bne(R0, AT, L);
6100 else
6101 __ bne(R0, AT, (int)0);
6102 break;
6103 case 0x06: //below_equal
6104 __ move(AT, val);
6105 __ sltu(AT, AT, op1);
6106 if(&L)
6107 __ beq(AT, R0, L);
6108 else
6109 __ beq(AT, R0, (int)0);
6110 break;
6111 default:
6112 Unimplemented();
6113 }
6114 __ nop();
6115 %}
6117 ins_pc_relative(1);
6118 ins_pipe(pipe_branchP);
6119 %}
6122 instruct branchConL_regL_regL(cmpOp cmp, mRegL src1, mRegL src2, label labl) %{
6123 match( If cmp (CmpL src1 src2) );
6124 effect(USE labl);
6125 format %{ "BR$cmp $src1, $src2, $labl #@branchConL_regL_regL" %}
6126 ins_cost(250);
6128 ins_encode %{
6129 Register opr1_reg = as_Register($src1$$reg);
6130 Register opr2_reg = as_Register($src2$$reg);
6132 Label &target = *($labl$$label);
6133 int flag = $cmp$$cmpcode;
6135 switch(flag)
6136 {
6137 case 0x01: //equal
6138 if (&target)
6139 __ beq(opr1_reg, opr2_reg, target);
6140 else
6141 __ beq(opr1_reg, opr2_reg, (int)0);
6142 __ delayed()->nop();
6143 break;
6145 case 0x02: //not_equal
6146 if(&target)
6147 __ bne(opr1_reg, opr2_reg, target);
6148 else
6149 __ bne(opr1_reg, opr2_reg, (int)0);
6150 __ delayed()->nop();
6151 break;
6153 case 0x03: //greater
6154 __ slt(AT, opr2_reg, opr1_reg);
6155 if(&target)
6156 __ bne(AT, R0, target);
6157 else
6158 __ bne(AT, R0, (int)0);
6159 __ delayed()->nop();
6160 break;
6162 case 0x04: //greater_equal
6163 __ slt(AT, opr1_reg, opr2_reg);
6164 if(&target)
6165 __ beq(AT, R0, target);
6166 else
6167 __ beq(AT, R0, (int)0);
6168 __ delayed()->nop();
6170 break;
6172 case 0x05: //less
6173 __ slt(AT, opr1_reg, opr2_reg);
6174 if(&target)
6175 __ bne(AT, R0, target);
6176 else
6177 __ bne(AT, R0, (int)0);
6178 __ delayed()->nop();
6180 break;
6182 case 0x06: //less_equal
6183 __ slt(AT, opr2_reg, opr1_reg);
6185 if(&target)
6186 __ beq(AT, R0, target);
6187 else
6188 __ beq(AT, R0, (int)0);
6189 __ delayed()->nop();
6191 break;
6193 default:
6194 Unimplemented();
6195 }
6196 %}
6199 ins_pc_relative(1);
6200 ins_pipe(pipe_branchP);
6201 %}
6203 instruct branchConI_reg_imm16_sub(cmpOp cmp, mRegI src1, immI16_sub src2, label labl) %{
6204 match( If cmp (CmpI src1 src2) );
6205 effect(USE labl);
6206 ins_cost(180);
6207 format %{ "BR$cmp $src1, $src2, $labl #@branchConI_reg_imm16_sub" %}
6209 ins_encode %{
6210 Register op1 = $src1$$Register;
6211 int val = $src2$$constant;
6212 Label &L = *($labl$$label);
6213 int flag = $cmp$$cmpcode;
6215 __ addiu32(AT, op1, -1 * val);
6216 switch(flag)
6217 {
6218 case 0x01: //equal
6219 if (&L)
6220 __ beq(R0, AT, L);
6221 else
6222 __ beq(R0, AT, (int)0);
6223 break;
6224 case 0x02: //not_equal
6225 if (&L)
6226 __ bne(R0, AT, L);
6227 else
6228 __ bne(R0, AT, (int)0);
6229 break;
6230 case 0x03: //greater
6231 if(&L)
6232 __ bgtz(AT, L);
6233 else
6234 __ bgtz(AT, (int)0);
6235 break;
6236 case 0x04: //greater_equal
6237 if(&L)
6238 __ bgez(AT, L);
6239 else
6240 __ bgez(AT, (int)0);
6241 break;
6242 case 0x05: //less
6243 if(&L)
6244 __ bltz(AT, L);
6245 else
6246 __ bltz(AT, (int)0);
6247 break;
6248 case 0x06: //less_equal
6249 if(&L)
6250 __ blez(AT, L);
6251 else
6252 __ blez(AT, (int)0);
6253 break;
6254 default:
6255 Unimplemented();
6256 }
6257 __ nop();
6258 %}
6260 ins_pc_relative(1);
6261 ins_pipe(pipe_branchP);
6262 %}
6264 instruct branchConL_regL_immL0(cmpOp cmp, mRegL src1, immL0 zero, label labl) %{
6265 match( If cmp (CmpL src1 zero) );
6266 effect(USE labl);
6267 format %{ "BR$cmp $src1, zero, $labl #@branchConL_regL_immL0" %}
6268 ins_cost(220);
6270 ins_encode %{
6271 Register opr1_reg = as_Register($src1$$reg);
6272 Label &target = *($labl$$label);
6273 int flag = $cmp$$cmpcode;
6275 switch(flag)
6276 {
6277 case 0x01: //equal
6278 if (&target)
6279 __ beq(opr1_reg, R0, target);
6280 else
6281 __ beq(opr1_reg, R0, int(0));
6282 break;
6284 case 0x02: //not_equal
6285 if(&target)
6286 __ bne(opr1_reg, R0, target);
6287 else
6288 __ bne(opr1_reg, R0, (int)0);
6289 break;
6291 case 0x03: //greater
6292 if(&target)
6293 __ bgtz(opr1_reg, target);
6294 else
6295 __ bgtz(opr1_reg, (int)0);
6296 break;
6298 case 0x04: //greater_equal
6299 if(&target)
6300 __ bgez(opr1_reg, target);
6301 else
6302 __ bgez(opr1_reg, (int)0);
6303 break;
6305 case 0x05: //less
6306 __ slt(AT, opr1_reg, R0);
6307 if(&target)
6308 __ bne(AT, R0, target);
6309 else
6310 __ bne(AT, R0, (int)0);
6311 break;
6313 case 0x06: //less_equal
6314 if (&target)
6315 __ blez(opr1_reg, target);
6316 else
6317 __ blez(opr1_reg, int(0));
6318 break;
6320 default:
6321 Unimplemented();
6322 }
6323 __ delayed()->nop();
6324 %}
6327 ins_pc_relative(1);
6328 ins_pipe(pipe_branchP);
6329 %}
6331 /*
6332 // Conditional Direct Branch
6333 instruct branchCon(cmpOp cmp, flagsReg icc, label labl) %{
6334 match(If cmp icc);
6335 effect(USE labl);
6337 size(8);
6338 ins_cost(BRANCH_COST);
6339 format %{ "BP$cmp $icc,$labl" %}
6340 // Prim = bits 24-22, Secnd = bits 31-30
6341 ins_encode( enc_bp( labl, cmp, icc ) );
6342 ins_pc_relative(1);
6343 ins_pipe(br_cc);
6344 %}
6345 */
6347 //FIXME
6348 instruct branchConF_reg_reg(cmpOp cmp, regF src1, regF src2, label labl) %{
6349 match( If cmp (CmpF src1 src2) );
6350 effect(USE labl);
6351 format %{ "BR$cmp $src1, $src2, $labl #@branchConF_reg_reg" %}
6353 ins_encode %{
6354 FloatRegister reg_op1 = $src1$$FloatRegister;
6355 FloatRegister reg_op2 = $src2$$FloatRegister;
6356 Label &L = *($labl$$label);
6357 int flag = $cmp$$cmpcode;
6359 switch(flag)
6360 {
6361 case 0x01: //equal
6362 __ c_eq_s(reg_op1, reg_op2);
6363 if (&L)
6364 __ bc1t(L);
6365 else
6366 __ bc1t((int)0);
6367 break;
6368 case 0x02: //not_equal
6369 __ c_ueq_s(reg_op1, reg_op2);
6370 if (&L)
6371 __ bc1f(L);
6372 else
6373 __ bc1f((int)0);
6374 break;
6375 case 0x03: //greater
6376 __ c_ule_s(reg_op1, reg_op2);
6377 if(&L)
6378 __ bc1f(L);
6379 else
6380 __ bc1f((int)0);
6381 break;
6382 case 0x04: //greater_equal
6383 __ c_ult_s(reg_op1, reg_op2);
6384 if(&L)
6385 __ bc1f(L);
6386 else
6387 __ bc1f((int)0);
6388 break;
6389 case 0x05: //less
6390 __ c_ult_s(reg_op1, reg_op2);
6391 if(&L)
6392 __ bc1t(L);
6393 else
6394 __ bc1t((int)0);
6395 break;
6396 case 0x06: //less_equal
6397 __ c_ule_s(reg_op1, reg_op2);
6398 if(&L)
6399 __ bc1t(L);
6400 else
6401 __ bc1t((int)0);
6402 break;
6403 default:
6404 Unimplemented();
6405 }
6406 __ nop();
6407 %}
6409 ins_pc_relative(1);
6410 ins_pipe(pipe_slow);
6411 %}
6413 instruct branchConD_reg_reg(cmpOp cmp, regD src1, regD src2, label labl) %{
6414 match( If cmp (CmpD src1 src2) );
6415 effect(USE labl);
6416 format %{ "BR$cmp $src1, $src2, $labl #@branchConD_reg_reg" %}
6418 ins_encode %{
6419 FloatRegister reg_op1 = $src1$$FloatRegister;
6420 FloatRegister reg_op2 = $src2$$FloatRegister;
6421 Label &L = *($labl$$label);
6422 int flag = $cmp$$cmpcode;
6424 switch(flag)
6425 {
6426 case 0x01: //equal
6427 __ c_eq_d(reg_op1, reg_op2);
6428 if (&L)
6429 __ bc1t(L);
6430 else
6431 __ bc1t((int)0);
6432 break;
6433 case 0x02: //not_equal
6434 //2016/4/19 aoqi: c_ueq_d cannot distinguish NaN from equal. Double.isNaN(Double) is implemented by 'f != f', so the use of c_ueq_d causes bugs.
6435 __ c_eq_d(reg_op1, reg_op2);
6436 if (&L)
6437 __ bc1f(L);
6438 else
6439 __ bc1f((int)0);
6440 break;
6441 case 0x03: //greater
6442 __ c_ule_d(reg_op1, reg_op2);
6443 if(&L)
6444 __ bc1f(L);
6445 else
6446 __ bc1f((int)0);
6447 break;
6448 case 0x04: //greater_equal
6449 __ c_ult_d(reg_op1, reg_op2);
6450 if(&L)
6451 __ bc1f(L);
6452 else
6453 __ bc1f((int)0);
6454 break;
6455 case 0x05: //less
6456 __ c_ult_d(reg_op1, reg_op2);
6457 if(&L)
6458 __ bc1t(L);
6459 else
6460 __ bc1t((int)0);
6461 break;
6462 case 0x06: //less_equal
6463 __ c_ule_d(reg_op1, reg_op2);
6464 if(&L)
6465 __ bc1t(L);
6466 else
6467 __ bc1t((int)0);
6468 break;
6469 default:
6470 Unimplemented();
6471 }
6472 __ nop();
6473 %}
6475 ins_pc_relative(1);
6476 ins_pipe(pipe_slow);
6477 %}
6480 // Call Runtime Instruction
6481 instruct CallRuntimeDirect(method meth) %{
6482 match(CallRuntime );
6483 effect(USE meth);
6485 ins_cost(300);
6486 format %{ "CALL,runtime #@CallRuntimeDirect" %}
6487 ins_encode( Java_To_Runtime( meth ) );
6488 ins_pipe( pipe_slow );
6489 ins_alignment(16);
6490 %}
6494 //------------------------MemBar Instructions-------------------------------
6495 //Memory barrier flavors
6497 instruct membar_acquire() %{
6498 match(MemBarAcquire);
6499 match(LoadFence);
6500 ins_cost(400);
6502 format %{ "MEMBAR-acquire" %}
6503 // ins_encode( enc_membar_acquire );
6504 ins_encode %{
6505 __ sync();
6506 %}
6507 ins_pipe(pipe_slow);
6508 %}
6510 instruct membar_acquire_lock() %{
6511 match(MemBarAcquireLock);
6512 ins_cost(400);
6513 format %{ "MEMBAR-acquire (prior CMPXCHG in FastLock just sync)" %}
6514 ins_encode %{
6515 __ sync();
6516 %}
6517 ins_pipe(pipe_slow);
6518 %}
6520 instruct membar_release() %{
6521 match(MemBarRelease);
6522 match(StoreFence);
6523 ins_cost(400);
6525 format %{ "MEMBAR-release" %}
6527 ins_encode %{
6528 __ sync();
6529 %}
6531 ins_pipe(pipe_slow);
6532 %}
6534 instruct membar_release_lock() %{
6535 match(MemBarReleaseLock);
6536 ins_cost(400);
6537 format %{ "MEMBAR-release (a FastiUnlock follows so just sync)" %}
6539 ins_encode %{
6540 __ sync();
6541 %}
6542 ins_pipe(pipe_slow);
6543 %}
6545 instruct membar_volatile() %{
6546 match(MemBarVolatile);
6547 ins_cost(400);
6549 format %{ "MEMBAR-volatile" %}
6550 /* ins_encode( enc_membar_volatile ); */
6551 ins_encode %{
6552 if( !os::is_MP() ) return; // Not needed on single CPU
6553 __ sync();
6555 %}
6556 ins_pipe(pipe_slow);
6557 %}
6559 instruct unnecessary_membar_volatile() %{
6560 match(MemBarVolatile);
6561 predicate(Matcher::post_store_load_barrier(n));
6562 ins_cost(400);
6563 format %{ "MEMBAR-volatile (unnecessary so just sync)" %}
6564 ins_encode %{
6565 __ sync();
6566 %}
6567 ins_pipe(pipe_slow);
6568 %}
6570 instruct membar_storestore() %{
6571 match(MemBarStoreStore);
6572 format %{ "MEMBAR-storestore (sync)" %}
6573 ins_encode %{
6574 __ sync();
6575 %}
6576 ins_cost(400);
6577 ins_pipe(pipe_slow);
6578 %}
6580 //----------Move Instructions--------------------------------------------------
6581 instruct castX2P(mRegP dst, mRegL src) %{
6582 match(Set dst (CastX2P src));
6583 format %{ "castX2P $dst, $src @ castX2P" %}
6584 ins_encode %{
6585 Register src = $src$$Register;
6586 Register dst = $dst$$Register;
6588 if(src != dst)
6589 __ move(dst, src);
6590 %}
6591 ins_cost(10);
6592 ins_pipe(ialu_reg_reg);
6593 %}
6595 instruct castP2X(mRegL dst, mRegP src ) %{
6596 match(Set dst (CastP2X src));
6598 format %{ "mov $dst, $src\t #@castP2X" %}
6599 ins_encode %{
6600 Register src = $src$$Register;
6601 Register dst = $dst$$Register;
6603 if(src != dst)
6604 __ move(dst, src);
6605 %}
6606 ins_pipe( ialu_reg_reg );
6607 %}
6609 instruct MoveF2I_reg_reg(mRegI dst, regF src) %{
6610 match(Set dst (MoveF2I src));
6611 effect(DEF dst, USE src);
6612 ins_cost(85);
6613 format %{ "MoveF2I $dst, $src @ MoveF2I_reg_reg" %}
6614 ins_encode %{
6615 Register dst = as_Register($dst$$reg);
6616 FloatRegister src = as_FloatRegister($src$$reg);
6618 __ mfc1(dst, src);
6619 %}
6620 ins_pipe( pipe_slow );
6621 %}
6623 instruct MoveI2F_reg_reg(regF dst, mRegI src) %{
6624 match(Set dst (MoveI2F src));
6625 effect(DEF dst, USE src);
6626 ins_cost(85);
6627 format %{ "MoveI2F $dst, $src @ MoveI2F_reg_reg" %}
6628 ins_encode %{
6629 Register src = as_Register($src$$reg);
6630 FloatRegister dst = as_FloatRegister($dst$$reg);
6632 __ mtc1(src, dst);
6633 %}
6634 ins_pipe( pipe_slow );
6635 %}
6637 instruct MoveD2L_reg_reg(mRegL dst, regD src) %{
6638 match(Set dst (MoveD2L src));
6639 effect(DEF dst, USE src);
6640 ins_cost(85);
6641 format %{ "MoveD2L $dst, $src @ MoveD2L_reg_reg" %}
6642 ins_encode %{
6643 Register dst = as_Register($dst$$reg);
6644 FloatRegister src = as_FloatRegister($src$$reg);
6646 __ dmfc1(dst, src);
6647 %}
6648 ins_pipe( pipe_slow );
6649 %}
6651 instruct MoveL2D_reg_reg(regD dst, mRegL src) %{
6652 match(Set dst (MoveL2D src));
6653 effect(DEF dst, USE src);
6654 ins_cost(85);
6655 format %{ "MoveL2D $dst, $src @ MoveL2D_reg_reg" %}
6656 ins_encode %{
6657 FloatRegister dst = as_FloatRegister($dst$$reg);
6658 Register src = as_Register($src$$reg);
6660 __ dmtc1(src, dst);
6661 %}
6662 ins_pipe( pipe_slow );
6663 %}
6665 //----------Conditional Move---------------------------------------------------
6666 // Conditional move
6667 instruct cmovI_cmpI_reg_reg(mRegI dst, mRegI src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
6668 match(Set dst (CMoveI (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
6669 ins_cost(80);
6670 format %{
6671 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpI_reg_reg\n"
6672 "\tCMOV $dst,$src \t @cmovI_cmpI_reg_reg"
6673 %}
6675 ins_encode %{
6676 Register op1 = $tmp1$$Register;
6677 Register op2 = $tmp2$$Register;
6678 Register dst = $dst$$Register;
6679 Register src = $src$$Register;
6680 int flag = $cop$$cmpcode;
6682 switch(flag)
6683 {
6684 case 0x01: //equal
6685 __ subu32(AT, op1, op2);
6686 __ movz(dst, src, AT);
6687 break;
6689 case 0x02: //not_equal
6690 __ subu32(AT, op1, op2);
6691 __ movn(dst, src, AT);
6692 break;
6694 case 0x03: //great
6695 __ slt(AT, op2, op1);
6696 __ movn(dst, src, AT);
6697 break;
6699 case 0x04: //great_equal
6700 __ slt(AT, op1, op2);
6701 __ movz(dst, src, AT);
6702 break;
6704 case 0x05: //less
6705 __ slt(AT, op1, op2);
6706 __ movn(dst, src, AT);
6707 break;
6709 case 0x06: //less_equal
6710 __ slt(AT, op2, op1);
6711 __ movz(dst, src, AT);
6712 break;
6714 default:
6715 Unimplemented();
6716 }
6717 %}
6719 ins_pipe( pipe_cmov_reg );
6720 %}
6722 instruct cmovI_cmpP_reg_reg(mRegI dst, mRegI src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
6723 match(Set dst (CMoveI (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
6724 ins_cost(80);
6725 format %{
6726 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpP_reg_reg\n\t"
6727 "CMOV $dst,$src\t @cmovI_cmpP_reg_reg"
6728 %}
6729 ins_encode %{
6730 Register op1 = $tmp1$$Register;
6731 Register op2 = $tmp2$$Register;
6732 Register dst = $dst$$Register;
6733 Register src = $src$$Register;
6734 int flag = $cop$$cmpcode;
6736 switch(flag)
6737 {
6738 case 0x01: //equal
6739 __ subu(AT, op1, op2);
6740 __ movz(dst, src, AT);
6741 break;
6743 case 0x02: //not_equal
6744 __ subu(AT, op1, op2);
6745 __ movn(dst, src, AT);
6746 break;
6748 case 0x03: //above
6749 __ sltu(AT, op2, op1);
6750 __ movn(dst, src, AT);
6751 break;
6753 case 0x04: //above_equal
6754 __ sltu(AT, op1, op2);
6755 __ movz(dst, src, AT);
6756 break;
6758 case 0x05: //below
6759 __ sltu(AT, op1, op2);
6760 __ movn(dst, src, AT);
6761 break;
6763 case 0x06: //below_equal
6764 __ sltu(AT, op2, op1);
6765 __ movz(dst, src, AT);
6766 break;
6768 default:
6769 Unimplemented();
6770 }
6771 %}
6773 ins_pipe( pipe_cmov_reg );
6774 %}
6776 instruct cmovI_cmpN_reg_reg(mRegI dst, mRegI src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
6777 match(Set dst (CMoveI (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
6778 ins_cost(80);
6779 format %{
6780 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpN_reg_reg\n\t"
6781 "CMOV $dst,$src\t @cmovI_cmpN_reg_reg"
6782 %}
6783 ins_encode %{
6784 Register op1 = $tmp1$$Register;
6785 Register op2 = $tmp2$$Register;
6786 Register dst = $dst$$Register;
6787 Register src = $src$$Register;
6788 int flag = $cop$$cmpcode;
6790 switch(flag)
6791 {
6792 case 0x01: //equal
6793 __ subu32(AT, op1, op2);
6794 __ movz(dst, src, AT);
6795 break;
6797 case 0x02: //not_equal
6798 __ subu32(AT, op1, op2);
6799 __ movn(dst, src, AT);
6800 break;
6802 case 0x03: //above
6803 __ sltu(AT, op2, op1);
6804 __ movn(dst, src, AT);
6805 break;
6807 case 0x04: //above_equal
6808 __ sltu(AT, op1, op2);
6809 __ movz(dst, src, AT);
6810 break;
6812 case 0x05: //below
6813 __ sltu(AT, op1, op2);
6814 __ movn(dst, src, AT);
6815 break;
6817 case 0x06: //below_equal
6818 __ sltu(AT, op2, op1);
6819 __ movz(dst, src, AT);
6820 break;
6822 default:
6823 Unimplemented();
6824 }
6825 %}
6827 ins_pipe( pipe_cmov_reg );
6828 %}
6830 instruct cmovP_cmpN_reg_reg(mRegP dst, mRegP src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
6831 match(Set dst (CMoveP (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
6832 ins_cost(80);
6833 format %{
6834 "CMPU$cop $tmp1,$tmp2\t @cmovP_cmpN_reg_reg\n\t"
6835 "CMOV $dst,$src\t @cmovP_cmpN_reg_reg"
6836 %}
6837 ins_encode %{
6838 Register op1 = $tmp1$$Register;
6839 Register op2 = $tmp2$$Register;
6840 Register dst = $dst$$Register;
6841 Register src = $src$$Register;
6842 int flag = $cop$$cmpcode;
6844 switch(flag)
6845 {
6846 case 0x01: //equal
6847 __ subu32(AT, op1, op2);
6848 __ movz(dst, src, AT);
6849 break;
6851 case 0x02: //not_equal
6852 __ subu32(AT, op1, op2);
6853 __ movn(dst, src, AT);
6854 break;
6856 case 0x03: //above
6857 __ sltu(AT, op2, op1);
6858 __ movn(dst, src, AT);
6859 break;
6861 case 0x04: //above_equal
6862 __ sltu(AT, op1, op2);
6863 __ movz(dst, src, AT);
6864 break;
6866 case 0x05: //below
6867 __ sltu(AT, op1, op2);
6868 __ movn(dst, src, AT);
6869 break;
6871 case 0x06: //below_equal
6872 __ sltu(AT, op2, op1);
6873 __ movz(dst, src, AT);
6874 break;
6876 default:
6877 Unimplemented();
6878 }
6879 %}
6881 ins_pipe( pipe_cmov_reg );
6882 %}
6884 instruct cmovN_cmpP_reg_reg(mRegN dst, mRegN src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
6885 match(Set dst (CMoveN (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
6886 ins_cost(80);
6887 format %{
6888 "CMPU$cop $tmp1,$tmp2\t @cmovN_cmpP_reg_reg\n\t"
6889 "CMOV $dst,$src\t @cmovN_cmpP_reg_reg"
6890 %}
6891 ins_encode %{
6892 Register op1 = $tmp1$$Register;
6893 Register op2 = $tmp2$$Register;
6894 Register dst = $dst$$Register;
6895 Register src = $src$$Register;
6896 int flag = $cop$$cmpcode;
6898 switch(flag)
6899 {
6900 case 0x01: //equal
6901 __ subu(AT, op1, op2);
6902 __ movz(dst, src, AT);
6903 break;
6905 case 0x02: //not_equal
6906 __ subu(AT, op1, op2);
6907 __ movn(dst, src, AT);
6908 break;
6910 case 0x03: //above
6911 __ sltu(AT, op2, op1);
6912 __ movn(dst, src, AT);
6913 break;
6915 case 0x04: //above_equal
6916 __ sltu(AT, op1, op2);
6917 __ movz(dst, src, AT);
6918 break;
6920 case 0x05: //below
6921 __ sltu(AT, op1, op2);
6922 __ movn(dst, src, AT);
6923 break;
6925 case 0x06: //below_equal
6926 __ sltu(AT, op2, op1);
6927 __ movz(dst, src, AT);
6928 break;
6930 default:
6931 Unimplemented();
6932 }
6933 %}
6935 ins_pipe( pipe_cmov_reg );
6936 %}
6938 instruct cmovP_cmpD_reg_reg(mRegP dst, mRegP src, regD tmp1, regD tmp2, cmpOp cop ) %{
6939 match(Set dst (CMoveP (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
6940 ins_cost(200);
6941 format %{
6942 "CMP$cop $tmp1, $tmp2\t @cmovP_cmpD_reg_reg\n"
6943 "\tCMOV $dst,$src \t @cmovP_cmpD_reg_reg"
6944 %}
6945 ins_encode %{
6946 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
6947 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
6948 Register dst = as_Register($dst$$reg);
6949 Register src = as_Register($src$$reg);
6951 int flag = $cop$$cmpcode;
6953 Label L;
6955 switch(flag)
6956 {
6957 case 0x01: //equal
6958 __ c_eq_d(reg_op1, reg_op2);
6959 __ bc1f(L);
6960 __ nop();
6961 __ move(dst, src);
6962 __ bind(L);
6963 break;
6964 case 0x02: //not_equal
6965 __ c_eq_d(reg_op1, reg_op2);
6966 __ bc1t(L);
6967 __ nop();
6968 __ move(dst, src);
6969 __ bind(L);
6970 break;
6971 case 0x03: //greater
6972 __ c_ole_d(reg_op1, reg_op2);
6973 __ bc1t(L);
6974 __ nop();
6975 __ move(dst, src);
6976 __ bind(L);
6977 break;
6978 case 0x04: //greater_equal
6979 __ c_olt_d(reg_op1, reg_op2);
6980 __ bc1t(L);
6981 __ nop();
6982 __ move(dst, src);
6983 __ bind(L);
6984 break;
6985 case 0x05: //less
6986 __ c_ult_d(reg_op1, reg_op2);
6987 __ bc1f(L);
6988 __ nop();
6989 __ move(dst, src);
6990 __ bind(L);
6991 break;
6992 case 0x06: //less_equal
6993 __ c_ule_d(reg_op1, reg_op2);
6994 __ bc1f(L);
6995 __ nop();
6996 __ move(dst, src);
6997 __ bind(L);
6998 break;
6999 default:
7000 Unimplemented();
7001 }
7002 %}
7004 ins_pipe( pipe_cmov_reg );
7005 %}
7008 instruct cmovN_cmpN_reg_reg(mRegN dst, mRegN src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
7009 match(Set dst (CMoveN (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
7010 ins_cost(80);
7011 format %{
7012 "CMPU$cop $tmp1,$tmp2\t @cmovN_cmpN_reg_reg\n\t"
7013 "CMOV $dst,$src\t @cmovN_cmpN_reg_reg"
7014 %}
7015 ins_encode %{
7016 Register op1 = $tmp1$$Register;
7017 Register op2 = $tmp2$$Register;
7018 Register dst = $dst$$Register;
7019 Register src = $src$$Register;
7020 int flag = $cop$$cmpcode;
7022 switch(flag)
7023 {
7024 case 0x01: //equal
7025 __ subu32(AT, op1, op2);
7026 __ movz(dst, src, AT);
7027 break;
7029 case 0x02: //not_equal
7030 __ subu32(AT, op1, op2);
7031 __ movn(dst, src, AT);
7032 break;
7034 case 0x03: //above
7035 __ sltu(AT, op2, op1);
7036 __ movn(dst, src, AT);
7037 break;
7039 case 0x04: //above_equal
7040 __ sltu(AT, op1, op2);
7041 __ movz(dst, src, AT);
7042 break;
7044 case 0x05: //below
7045 __ sltu(AT, op1, op2);
7046 __ movn(dst, src, AT);
7047 break;
7049 case 0x06: //below_equal
7050 __ sltu(AT, op2, op1);
7051 __ movz(dst, src, AT);
7052 break;
7054 default:
7055 Unimplemented();
7056 }
7057 %}
7059 ins_pipe( pipe_cmov_reg );
7060 %}
7063 instruct cmovI_cmpU_reg_reg(mRegI dst, mRegI src, mRegI tmp1, mRegI tmp2, cmpOpU cop ) %{
7064 match(Set dst (CMoveI (Binary cop (CmpU tmp1 tmp2)) (Binary dst src)));
7065 ins_cost(80);
7066 format %{
7067 "CMPU$cop $tmp1,$tmp2\t @cmovI_cmpU_reg_reg\n\t"
7068 "CMOV $dst,$src\t @cmovI_cmpU_reg_reg"
7069 %}
7070 ins_encode %{
7071 Register op1 = $tmp1$$Register;
7072 Register op2 = $tmp2$$Register;
7073 Register dst = $dst$$Register;
7074 Register src = $src$$Register;
7075 int flag = $cop$$cmpcode;
7077 switch(flag)
7078 {
7079 case 0x01: //equal
7080 __ subu(AT, op1, op2);
7081 __ movz(dst, src, AT);
7082 break;
7084 case 0x02: //not_equal
7085 __ subu(AT, op1, op2);
7086 __ movn(dst, src, AT);
7087 break;
7089 case 0x03: //above
7090 __ sltu(AT, op2, op1);
7091 __ movn(dst, src, AT);
7092 break;
7094 case 0x04: //above_equal
7095 __ sltu(AT, op1, op2);
7096 __ movz(dst, src, AT);
7097 break;
7099 case 0x05: //below
7100 __ sltu(AT, op1, op2);
7101 __ movn(dst, src, AT);
7102 break;
7104 case 0x06: //below_equal
7105 __ sltu(AT, op2, op1);
7106 __ movz(dst, src, AT);
7107 break;
7109 default:
7110 Unimplemented();
7111 }
7112 %}
7114 ins_pipe( pipe_cmov_reg );
7115 %}
7117 instruct cmovI_cmpL_reg_reg(mRegI dst, mRegI src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
7118 match(Set dst (CMoveI (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
7119 ins_cost(80);
7120 format %{
7121 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpL_reg_reg\n"
7122 "\tCMOV $dst,$src \t @cmovI_cmpL_reg_reg"
7123 %}
7124 ins_encode %{
7125 Register opr1 = as_Register($tmp1$$reg);
7126 Register opr2 = as_Register($tmp2$$reg);
7127 Register dst = $dst$$Register;
7128 Register src = $src$$Register;
7129 int flag = $cop$$cmpcode;
7131 switch(flag)
7132 {
7133 case 0x01: //equal
7134 __ subu(AT, opr1, opr2);
7135 __ movz(dst, src, AT);
7136 break;
7138 case 0x02: //not_equal
7139 __ subu(AT, opr1, opr2);
7140 __ movn(dst, src, AT);
7141 break;
7143 case 0x03: //greater
7144 __ slt(AT, opr2, opr1);
7145 __ movn(dst, src, AT);
7146 break;
7148 case 0x04: //greater_equal
7149 __ slt(AT, opr1, opr2);
7150 __ movz(dst, src, AT);
7151 break;
7153 case 0x05: //less
7154 __ slt(AT, opr1, opr2);
7155 __ movn(dst, src, AT);
7156 break;
7158 case 0x06: //less_equal
7159 __ slt(AT, opr2, opr1);
7160 __ movz(dst, src, AT);
7161 break;
7163 default:
7164 Unimplemented();
7165 }
7166 %}
7168 ins_pipe( pipe_cmov_reg );
7169 %}
7171 instruct cmovP_cmpL_reg_reg(mRegP dst, mRegP src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
7172 match(Set dst (CMoveP (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
7173 ins_cost(80);
7174 format %{
7175 "CMP$cop $tmp1, $tmp2\t @cmovP_cmpL_reg_reg\n"
7176 "\tCMOV $dst,$src \t @cmovP_cmpL_reg_reg"
7177 %}
7178 ins_encode %{
7179 Register opr1 = as_Register($tmp1$$reg);
7180 Register opr2 = as_Register($tmp2$$reg);
7181 Register dst = $dst$$Register;
7182 Register src = $src$$Register;
7183 int flag = $cop$$cmpcode;
7185 switch(flag)
7186 {
7187 case 0x01: //equal
7188 __ subu(AT, opr1, opr2);
7189 __ movz(dst, src, AT);
7190 break;
7192 case 0x02: //not_equal
7193 __ subu(AT, opr1, opr2);
7194 __ movn(dst, src, AT);
7195 break;
7197 case 0x03: //greater
7198 __ slt(AT, opr2, opr1);
7199 __ movn(dst, src, AT);
7200 break;
7202 case 0x04: //greater_equal
7203 __ slt(AT, opr1, opr2);
7204 __ movz(dst, src, AT);
7205 break;
7207 case 0x05: //less
7208 __ slt(AT, opr1, opr2);
7209 __ movn(dst, src, AT);
7210 break;
7212 case 0x06: //less_equal
7213 __ slt(AT, opr2, opr1);
7214 __ movz(dst, src, AT);
7215 break;
7217 default:
7218 Unimplemented();
7219 }
7220 %}
7222 ins_pipe( pipe_cmov_reg );
7223 %}
7225 instruct cmovI_cmpD_reg_reg(mRegI dst, mRegI src, regD tmp1, regD tmp2, cmpOp cop ) %{
7226 match(Set dst (CMoveI (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
7227 ins_cost(200);
7228 format %{
7229 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpD_reg_reg\n"
7230 "\tCMOV $dst,$src \t @cmovI_cmpD_reg_reg"
7231 %}
7232 ins_encode %{
7233 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
7234 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
7235 Register dst = as_Register($dst$$reg);
7236 Register src = as_Register($src$$reg);
7238 int flag = $cop$$cmpcode;
7240 Label L;
7242 switch(flag)
7243 {
7244 case 0x01: //equal
7245 __ c_eq_d(reg_op1, reg_op2);
7246 __ bc1f(L);
7247 __ nop();
7248 __ move(dst, src);
7249 __ bind(L);
7250 break;
7251 case 0x02: //not_equal
7252 //2016/4/19 aoqi: See instruct branchConD_reg_reg. The change in branchConD_reg_reg fixed a bug. It seems similar here, so I made thesame change.
7253 __ c_eq_d(reg_op1, reg_op2);
7254 __ bc1t(L);
7255 __ nop();
7256 __ move(dst, src);
7257 __ bind(L);
7258 break;
7259 case 0x03: //greater
7260 __ c_ole_d(reg_op1, reg_op2);
7261 __ bc1t(L);
7262 __ nop();
7263 __ move(dst, src);
7264 __ bind(L);
7265 break;
7266 case 0x04: //greater_equal
7267 __ c_olt_d(reg_op1, reg_op2);
7268 __ bc1t(L);
7269 __ nop();
7270 __ move(dst, src);
7271 __ bind(L);
7272 break;
7273 case 0x05: //less
7274 __ c_ult_d(reg_op1, reg_op2);
7275 __ bc1f(L);
7276 __ nop();
7277 __ move(dst, src);
7278 __ bind(L);
7279 break;
7280 case 0x06: //less_equal
7281 __ c_ule_d(reg_op1, reg_op2);
7282 __ bc1f(L);
7283 __ nop();
7284 __ move(dst, src);
7285 __ bind(L);
7286 break;
7287 default:
7288 Unimplemented();
7289 }
7290 %}
7292 ins_pipe( pipe_cmov_reg );
7293 %}
7296 instruct cmovP_cmpP_reg_reg(mRegP dst, mRegP src, mRegP tmp1, mRegP tmp2, cmpOpU cop ) %{
7297 match(Set dst (CMoveP (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
7298 ins_cost(80);
7299 format %{
7300 "CMPU$cop $tmp1,$tmp2\t @cmovP_cmpP_reg_reg\n\t"
7301 "CMOV $dst,$src\t @cmovP_cmpP_reg_reg"
7302 %}
7303 ins_encode %{
7304 Register op1 = $tmp1$$Register;
7305 Register op2 = $tmp2$$Register;
7306 Register dst = $dst$$Register;
7307 Register src = $src$$Register;
7308 int flag = $cop$$cmpcode;
7310 switch(flag)
7311 {
7312 case 0x01: //equal
7313 __ subu(AT, op1, op2);
7314 __ movz(dst, src, AT);
7315 break;
7317 case 0x02: //not_equal
7318 __ subu(AT, op1, op2);
7319 __ movn(dst, src, AT);
7320 break;
7322 case 0x03: //above
7323 __ sltu(AT, op2, op1);
7324 __ movn(dst, src, AT);
7325 break;
7327 case 0x04: //above_equal
7328 __ sltu(AT, op1, op2);
7329 __ movz(dst, src, AT);
7330 break;
7332 case 0x05: //below
7333 __ sltu(AT, op1, op2);
7334 __ movn(dst, src, AT);
7335 break;
7337 case 0x06: //below_equal
7338 __ sltu(AT, op2, op1);
7339 __ movz(dst, src, AT);
7340 break;
7342 default:
7343 Unimplemented();
7344 }
7345 %}
7347 ins_pipe( pipe_cmov_reg );
7348 %}
7350 instruct cmovP_cmpI_reg_reg(mRegP dst, mRegP src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
7351 match(Set dst (CMoveP (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
7352 ins_cost(80);
7353 format %{
7354 "CMP$cop $tmp1,$tmp2\t @cmovP_cmpI_reg_reg\n\t"
7355 "CMOV $dst,$src\t @cmovP_cmpI_reg_reg"
7356 %}
7357 ins_encode %{
7358 Register op1 = $tmp1$$Register;
7359 Register op2 = $tmp2$$Register;
7360 Register dst = $dst$$Register;
7361 Register src = $src$$Register;
7362 int flag = $cop$$cmpcode;
7364 switch(flag)
7365 {
7366 case 0x01: //equal
7367 __ subu32(AT, op1, op2);
7368 __ movz(dst, src, AT);
7369 break;
7371 case 0x02: //not_equal
7372 __ subu32(AT, op1, op2);
7373 __ movn(dst, src, AT);
7374 break;
7376 case 0x03: //above
7377 __ slt(AT, op2, op1);
7378 __ movn(dst, src, AT);
7379 break;
7381 case 0x04: //above_equal
7382 __ slt(AT, op1, op2);
7383 __ movz(dst, src, AT);
7384 break;
7386 case 0x05: //below
7387 __ slt(AT, op1, op2);
7388 __ movn(dst, src, AT);
7389 break;
7391 case 0x06: //below_equal
7392 __ slt(AT, op2, op1);
7393 __ movz(dst, src, AT);
7394 break;
7396 default:
7397 Unimplemented();
7398 }
7399 %}
7401 ins_pipe( pipe_cmov_reg );
7402 %}
7404 instruct cmovN_cmpI_reg_reg(mRegN dst, mRegN src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
7405 match(Set dst (CMoveN (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
7406 ins_cost(80);
7407 format %{
7408 "CMP$cop $tmp1,$tmp2\t @cmovN_cmpI_reg_reg\n\t"
7409 "CMOV $dst,$src\t @cmovN_cmpI_reg_reg"
7410 %}
7411 ins_encode %{
7412 Register op1 = $tmp1$$Register;
7413 Register op2 = $tmp2$$Register;
7414 Register dst = $dst$$Register;
7415 Register src = $src$$Register;
7416 int flag = $cop$$cmpcode;
7418 switch(flag)
7419 {
7420 case 0x01: //equal
7421 __ subu32(AT, op1, op2);
7422 __ movz(dst, src, AT);
7423 break;
7425 case 0x02: //not_equal
7426 __ subu32(AT, op1, op2);
7427 __ movn(dst, src, AT);
7428 break;
7430 case 0x03: //above
7431 __ slt(AT, op2, op1);
7432 __ movn(dst, src, AT);
7433 break;
7435 case 0x04: //above_equal
7436 __ slt(AT, op1, op2);
7437 __ movz(dst, src, AT);
7438 break;
7440 case 0x05: //below
7441 __ slt(AT, op1, op2);
7442 __ movn(dst, src, AT);
7443 break;
7445 case 0x06: //below_equal
7446 __ slt(AT, op2, op1);
7447 __ movz(dst, src, AT);
7448 break;
7450 default:
7451 Unimplemented();
7452 }
7453 %}
7455 ins_pipe( pipe_cmov_reg );
7456 %}
7459 instruct cmovL_cmpI_reg_reg(mRegL dst, mRegL src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
7460 match(Set dst (CMoveL (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
7461 ins_cost(80);
7462 format %{
7463 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpI_reg_reg\n"
7464 "\tCMOV $dst,$src \t @cmovL_cmpI_reg_reg"
7465 %}
7467 ins_encode %{
7468 Register op1 = $tmp1$$Register;
7469 Register op2 = $tmp2$$Register;
7470 Register dst = as_Register($dst$$reg);
7471 Register src = as_Register($src$$reg);
7472 int flag = $cop$$cmpcode;
7474 switch(flag)
7475 {
7476 case 0x01: //equal
7477 __ subu32(AT, op1, op2);
7478 __ movz(dst, src, AT);
7479 break;
7481 case 0x02: //not_equal
7482 __ subu32(AT, op1, op2);
7483 __ movn(dst, src, AT);
7484 break;
7486 case 0x03: //great
7487 __ slt(AT, op2, op1);
7488 __ movn(dst, src, AT);
7489 break;
7491 case 0x04: //great_equal
7492 __ slt(AT, op1, op2);
7493 __ movz(dst, src, AT);
7494 break;
7496 case 0x05: //less
7497 __ slt(AT, op1, op2);
7498 __ movn(dst, src, AT);
7499 break;
7501 case 0x06: //less_equal
7502 __ slt(AT, op2, op1);
7503 __ movz(dst, src, AT);
7504 break;
7506 default:
7507 Unimplemented();
7508 }
7509 %}
7511 ins_pipe( pipe_cmov_reg_long );
7512 %}
7514 instruct cmovL_cmpL_reg_reg(mRegL dst, mRegL src, mRegL tmp1, mRegL tmp2, cmpOp cop ) %{
7515 match(Set dst (CMoveL (Binary cop (CmpL tmp1 tmp2)) (Binary dst src)));
7516 ins_cost(80);
7517 format %{
7518 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpL_reg_reg\n"
7519 "\tCMOV $dst,$src \t @cmovL_cmpL_reg_reg"
7520 %}
7521 ins_encode %{
7522 Register opr1 = as_Register($tmp1$$reg);
7523 Register opr2 = as_Register($tmp2$$reg);
7524 Register dst = as_Register($dst$$reg);
7525 Register src = as_Register($src$$reg);
7526 int flag = $cop$$cmpcode;
7528 switch(flag)
7529 {
7530 case 0x01: //equal
7531 __ subu(AT, opr1, opr2);
7532 __ movz(dst, src, AT);
7533 break;
7535 case 0x02: //not_equal
7536 __ subu(AT, opr1, opr2);
7537 __ movn(dst, src, AT);
7538 break;
7540 case 0x03: //greater
7541 __ slt(AT, opr2, opr1);
7542 __ movn(dst, src, AT);
7543 break;
7545 case 0x04: //greater_equal
7546 __ slt(AT, opr1, opr2);
7547 __ movz(dst, src, AT);
7548 break;
7550 case 0x05: //less
7551 __ slt(AT, opr1, opr2);
7552 __ movn(dst, src, AT);
7553 break;
7555 case 0x06: //less_equal
7556 __ slt(AT, opr2, opr1);
7557 __ movz(dst, src, AT);
7558 break;
7560 default:
7561 Unimplemented();
7562 }
7563 %}
7565 ins_pipe( pipe_cmov_reg_long );
7566 %}
7568 instruct cmovL_cmpN_reg_reg(mRegL dst, mRegL src, mRegN tmp1, mRegN tmp2, cmpOpU cop ) %{
7569 match(Set dst (CMoveL (Binary cop (CmpN tmp1 tmp2)) (Binary dst src)));
7570 ins_cost(80);
7571 format %{
7572 "CMPU$cop $tmp1,$tmp2\t @cmovL_cmpN_reg_reg\n\t"
7573 "CMOV $dst,$src\t @cmovL_cmpN_reg_reg"
7574 %}
7575 ins_encode %{
7576 Register op1 = $tmp1$$Register;
7577 Register op2 = $tmp2$$Register;
7578 Register dst = $dst$$Register;
7579 Register src = $src$$Register;
7580 int flag = $cop$$cmpcode;
7582 switch(flag)
7583 {
7584 case 0x01: //equal
7585 __ subu32(AT, op1, op2);
7586 __ movz(dst, src, AT);
7587 break;
7589 case 0x02: //not_equal
7590 __ subu32(AT, op1, op2);
7591 __ movn(dst, src, AT);
7592 break;
7594 case 0x03: //above
7595 __ sltu(AT, op2, op1);
7596 __ movn(dst, src, AT);
7597 break;
7599 case 0x04: //above_equal
7600 __ sltu(AT, op1, op2);
7601 __ movz(dst, src, AT);
7602 break;
7604 case 0x05: //below
7605 __ sltu(AT, op1, op2);
7606 __ movn(dst, src, AT);
7607 break;
7609 case 0x06: //below_equal
7610 __ sltu(AT, op2, op1);
7611 __ movz(dst, src, AT);
7612 break;
7614 default:
7615 Unimplemented();
7616 }
7617 %}
7619 ins_pipe( pipe_cmov_reg );
7620 %}
7623 instruct cmovL_cmpD_reg_reg(mRegL dst, mRegL src, regD tmp1, regD tmp2, cmpOp cop ) %{
7624 match(Set dst (CMoveL (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
7625 ins_cost(200);
7626 format %{
7627 "CMP$cop $tmp1, $tmp2\t @cmovL_cmpD_reg_reg\n"
7628 "\tCMOV $dst,$src \t @cmovL_cmpD_reg_reg"
7629 %}
7630 ins_encode %{
7631 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
7632 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
7633 Register dst = as_Register($dst$$reg);
7634 Register src = as_Register($src$$reg);
7636 int flag = $cop$$cmpcode;
7638 Label L;
7640 switch(flag)
7641 {
7642 case 0x01: //equal
7643 __ c_eq_d(reg_op1, reg_op2);
7644 __ bc1f(L);
7645 __ nop();
7646 __ move(dst, src);
7647 __ bind(L);
7648 break;
7649 case 0x02: //not_equal
7650 __ c_eq_d(reg_op1, reg_op2);
7651 __ bc1t(L);
7652 __ nop();
7653 __ move(dst, src);
7654 __ bind(L);
7655 break;
7656 case 0x03: //greater
7657 __ c_ole_d(reg_op1, reg_op2);
7658 __ bc1t(L);
7659 __ nop();
7660 __ move(dst, src);
7661 __ bind(L);
7662 break;
7663 case 0x04: //greater_equal
7664 __ c_olt_d(reg_op1, reg_op2);
7665 __ bc1t(L);
7666 __ nop();
7667 __ move(dst, src);
7668 __ bind(L);
7669 break;
7670 case 0x05: //less
7671 __ c_ult_d(reg_op1, reg_op2);
7672 __ bc1f(L);
7673 __ nop();
7674 __ move(dst, src);
7675 __ bind(L);
7676 break;
7677 case 0x06: //less_equal
7678 __ c_ule_d(reg_op1, reg_op2);
7679 __ bc1f(L);
7680 __ nop();
7681 __ move(dst, src);
7682 __ bind(L);
7683 break;
7684 default:
7685 Unimplemented();
7686 }
7687 %}
7689 ins_pipe( pipe_cmov_reg );
7690 %}
7692 instruct cmovD_cmpD_reg_reg(regD dst, regD src, regD tmp1, regD tmp2, cmpOp cop ) %{
7693 match(Set dst (CMoveD (Binary cop (CmpD tmp1 tmp2)) (Binary dst src)));
7694 ins_cost(200);
7695 format %{
7696 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpD_reg_reg\n"
7697 "\tCMOV $dst,$src \t @cmovD_cmpD_reg_reg"
7698 %}
7699 ins_encode %{
7700 FloatRegister reg_op1 = as_FloatRegister($tmp1$$reg);
7701 FloatRegister reg_op2 = as_FloatRegister($tmp2$$reg);
7702 FloatRegister dst = as_FloatRegister($dst$$reg);
7703 FloatRegister src = as_FloatRegister($src$$reg);
7705 int flag = $cop$$cmpcode;
7707 Label L;
7709 switch(flag)
7710 {
7711 case 0x01: //equal
7712 __ c_eq_d(reg_op1, reg_op2);
7713 __ bc1f(L);
7714 __ nop();
7715 __ mov_d(dst, src);
7716 __ bind(L);
7717 break;
7718 case 0x02: //not_equal
7719 //2016/4/19 aoqi: See instruct branchConD_reg_reg. The change in branchConD_reg_reg fixed a bug. It seems similar here, so I made thesame change.
7720 __ c_eq_d(reg_op1, reg_op2);
7721 __ bc1t(L);
7722 __ nop();
7723 __ mov_d(dst, src);
7724 __ bind(L);
7725 break;
7726 case 0x03: //greater
7727 __ c_ole_d(reg_op1, reg_op2);
7728 __ bc1t(L);
7729 __ nop();
7730 __ mov_d(dst, src);
7731 __ bind(L);
7732 break;
7733 case 0x04: //greater_equal
7734 __ c_olt_d(reg_op1, reg_op2);
7735 __ bc1t(L);
7736 __ nop();
7737 __ mov_d(dst, src);
7738 __ bind(L);
7739 break;
7740 case 0x05: //less
7741 __ c_ult_d(reg_op1, reg_op2);
7742 __ bc1f(L);
7743 __ nop();
7744 __ mov_d(dst, src);
7745 __ bind(L);
7746 break;
7747 case 0x06: //less_equal
7748 __ c_ule_d(reg_op1, reg_op2);
7749 __ bc1f(L);
7750 __ nop();
7751 __ mov_d(dst, src);
7752 __ bind(L);
7753 break;
7754 default:
7755 Unimplemented();
7756 }
7757 %}
7759 ins_pipe( pipe_cmov_reg );
7760 %}
7762 instruct cmovF_cmpI_reg_reg(regF dst, regF src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
7763 match(Set dst (CMoveF (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
7764 ins_cost(200);
7765 format %{
7766 "CMP$cop $tmp1, $tmp2\t @cmovF_cmpI_reg_reg\n"
7767 "\tCMOV $dst, $src \t @cmovF_cmpI_reg_reg"
7768 %}
7770 ins_encode %{
7771 Register op1 = $tmp1$$Register;
7772 Register op2 = $tmp2$$Register;
7773 FloatRegister dst = as_FloatRegister($dst$$reg);
7774 FloatRegister src = as_FloatRegister($src$$reg);
7775 int flag = $cop$$cmpcode;
7776 Label L;
7778 switch(flag)
7779 {
7780 case 0x01: //equal
7781 __ bne(op1, op2, L);
7782 __ nop();
7783 __ mov_s(dst, src);
7784 __ bind(L);
7785 break;
7786 case 0x02: //not_equal
7787 __ beq(op1, op2, L);
7788 __ nop();
7789 __ mov_s(dst, src);
7790 __ bind(L);
7791 break;
7792 case 0x03: //great
7793 __ slt(AT, op2, op1);
7794 __ beq(AT, R0, L);
7795 __ nop();
7796 __ mov_s(dst, src);
7797 __ bind(L);
7798 break;
7799 case 0x04: //great_equal
7800 __ slt(AT, op1, op2);
7801 __ bne(AT, R0, L);
7802 __ nop();
7803 __ mov_s(dst, src);
7804 __ bind(L);
7805 break;
7806 case 0x05: //less
7807 __ slt(AT, op1, op2);
7808 __ beq(AT, R0, L);
7809 __ nop();
7810 __ mov_s(dst, src);
7811 __ bind(L);
7812 break;
7813 case 0x06: //less_equal
7814 __ slt(AT, op2, op1);
7815 __ bne(AT, R0, L);
7816 __ nop();
7817 __ mov_s(dst, src);
7818 __ bind(L);
7819 break;
7820 default:
7821 Unimplemented();
7822 }
7823 %}
7825 ins_pipe( pipe_slow );
7826 %}
7828 instruct cmovD_cmpI_reg_reg(regD dst, regD src, mRegI tmp1, mRegI tmp2, cmpOp cop ) %{
7829 match(Set dst (CMoveD (Binary cop (CmpI tmp1 tmp2)) (Binary dst src)));
7830 ins_cost(200);
7831 format %{
7832 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpI_reg_reg\n"
7833 "\tCMOV $dst, $src \t @cmovD_cmpI_reg_reg"
7834 %}
7836 ins_encode %{
7837 Register op1 = $tmp1$$Register;
7838 Register op2 = $tmp2$$Register;
7839 FloatRegister dst = as_FloatRegister($dst$$reg);
7840 FloatRegister src = as_FloatRegister($src$$reg);
7841 int flag = $cop$$cmpcode;
7842 Label L;
7844 switch(flag)
7845 {
7846 case 0x01: //equal
7847 __ bne(op1, op2, L);
7848 __ nop();
7849 __ mov_d(dst, src);
7850 __ bind(L);
7851 break;
7852 case 0x02: //not_equal
7853 __ beq(op1, op2, L);
7854 __ nop();
7855 __ mov_d(dst, src);
7856 __ bind(L);
7857 break;
7858 case 0x03: //great
7859 __ slt(AT, op2, op1);
7860 __ beq(AT, R0, L);
7861 __ nop();
7862 __ mov_d(dst, src);
7863 __ bind(L);
7864 break;
7865 case 0x04: //great_equal
7866 __ slt(AT, op1, op2);
7867 __ bne(AT, R0, L);
7868 __ nop();
7869 __ mov_d(dst, src);
7870 __ bind(L);
7871 break;
7872 case 0x05: //less
7873 __ slt(AT, op1, op2);
7874 __ beq(AT, R0, L);
7875 __ nop();
7876 __ mov_d(dst, src);
7877 __ bind(L);
7878 break;
7879 case 0x06: //less_equal
7880 __ slt(AT, op2, op1);
7881 __ bne(AT, R0, L);
7882 __ nop();
7883 __ mov_d(dst, src);
7884 __ bind(L);
7885 break;
7886 default:
7887 Unimplemented();
7888 }
7889 %}
7891 ins_pipe( pipe_slow );
7892 %}
7894 instruct cmovD_cmpP_reg_reg(regD dst, regD src, mRegP tmp1, mRegP tmp2, cmpOp cop ) %{
7895 match(Set dst (CMoveD (Binary cop (CmpP tmp1 tmp2)) (Binary dst src)));
7896 ins_cost(200);
7897 format %{
7898 "CMP$cop $tmp1, $tmp2\t @cmovD_cmpP_reg_reg\n"
7899 "\tCMOV $dst, $src \t @cmovD_cmpP_reg_reg"
7900 %}
7902 ins_encode %{
7903 Register op1 = $tmp1$$Register;
7904 Register op2 = $tmp2$$Register;
7905 FloatRegister dst = as_FloatRegister($dst$$reg);
7906 FloatRegister src = as_FloatRegister($src$$reg);
7907 int flag = $cop$$cmpcode;
7908 Label L;
7910 switch(flag)
7911 {
7912 case 0x01: //equal
7913 __ bne(op1, op2, L);
7914 __ nop();
7915 __ mov_d(dst, src);
7916 __ bind(L);
7917 break;
7918 case 0x02: //not_equal
7919 __ beq(op1, op2, L);
7920 __ nop();
7921 __ mov_d(dst, src);
7922 __ bind(L);
7923 break;
7924 case 0x03: //great
7925 __ slt(AT, op2, op1);
7926 __ beq(AT, R0, L);
7927 __ nop();
7928 __ mov_d(dst, src);
7929 __ bind(L);
7930 break;
7931 case 0x04: //great_equal
7932 __ slt(AT, op1, op2);
7933 __ bne(AT, R0, L);
7934 __ nop();
7935 __ mov_d(dst, src);
7936 __ bind(L);
7937 break;
7938 case 0x05: //less
7939 __ slt(AT, op1, op2);
7940 __ beq(AT, R0, L);
7941 __ nop();
7942 __ mov_d(dst, src);
7943 __ bind(L);
7944 break;
7945 case 0x06: //less_equal
7946 __ slt(AT, op2, op1);
7947 __ bne(AT, R0, L);
7948 __ nop();
7949 __ mov_d(dst, src);
7950 __ bind(L);
7951 break;
7952 default:
7953 Unimplemented();
7954 }
7955 %}
7957 ins_pipe( pipe_slow );
7958 %}
7960 //FIXME
7961 instruct cmovI_cmpF_reg_reg(mRegI dst, mRegI src, regF tmp1, regF tmp2, cmpOp cop ) %{
7962 match(Set dst (CMoveI (Binary cop (CmpF tmp1 tmp2)) (Binary dst src)));
7963 ins_cost(200);
7964 format %{
7965 "CMP$cop $tmp1, $tmp2\t @cmovI_cmpF_reg_reg\n"
7966 "\tCMOV $dst,$src \t @cmovI_cmpF_reg_reg"
7967 %}
7969 ins_encode %{
7970 FloatRegister reg_op1 = $tmp1$$FloatRegister;
7971 FloatRegister reg_op2 = $tmp2$$FloatRegister;
7972 Register dst = $dst$$Register;
7973 Register src = $src$$Register;
7974 Label L;
7975 int flag = $cop$$cmpcode;
7977 switch(flag)
7978 {
7979 case 0x01: //equal
7980 __ c_ueq_s(reg_op1, reg_op2);
7981 __ bc1f(L);
7982 __ nop();
7983 __ move(dst, src);
7984 __ bind(L);
7985 break;
7986 case 0x02: //not_equal
7987 __ c_eq_s(reg_op1, reg_op2);
7988 __ bc1t(L);
7989 __ nop();
7990 __ move(dst, src);
7991 __ bind(L);
7992 break;
7993 case 0x03: //greater
7994 __ c_ole_s(reg_op1, reg_op2);
7995 __ bc1t(L);
7996 __ nop();
7997 __ move(dst, src);
7998 __ bind(L);
7999 break;
8000 case 0x04: //greater_equal
8001 __ c_olt_s(reg_op1, reg_op2);
8002 __ bc1t(L);
8003 __ nop();
8004 __ move(dst, src);
8005 __ bind(L);
8006 break;
8007 case 0x05: //less
8008 __ c_ult_s(reg_op1, reg_op2);
8009 __ bc1f(L);
8010 __ nop();
8011 __ move(dst, src);
8012 __ bind(L);
8013 break;
8014 case 0x06: //less_equal
8015 __ c_ule_s(reg_op1, reg_op2);
8016 __ bc1f(L);
8017 __ nop();
8018 __ move(dst, src);
8019 __ bind(L);
8020 break;
8021 default:
8022 Unimplemented();
8023 }
8024 %}
8025 ins_pipe( pipe_slow );
8026 %}
8028 instruct cmovF_cmpF_reg_reg(regF dst, regF src, regF tmp1, regF tmp2, cmpOp cop ) %{
8029 match(Set dst (CMoveF (Binary cop (CmpF tmp1 tmp2)) (Binary dst src)));
8030 ins_cost(200);
8031 format %{
8032 "CMP$cop $tmp1, $tmp2\t @cmovF_cmpF_reg_reg\n"
8033 "\tCMOV $dst,$src \t @cmovF_cmpF_reg_reg"
8034 %}
8036 ins_encode %{
8037 FloatRegister reg_op1 = $tmp1$$FloatRegister;
8038 FloatRegister reg_op2 = $tmp2$$FloatRegister;
8039 FloatRegister dst = $dst$$FloatRegister;
8040 FloatRegister src = $src$$FloatRegister;
8041 Label L;
8042 int flag = $cop$$cmpcode;
8044 switch(flag)
8045 {
8046 case 0x01: //equal
8047 __ c_ueq_s(reg_op1, reg_op2);
8048 __ bc1f(L);
8049 __ nop();
8050 __ mov_s(dst, src);
8051 __ bind(L);
8052 break;
8053 case 0x02: //not_equal
8054 __ c_eq_s(reg_op1, reg_op2);
8055 __ bc1t(L);
8056 __ nop();
8057 __ mov_s(dst, src);
8058 __ bind(L);
8059 break;
8060 case 0x03: //greater
8061 __ c_ole_s(reg_op1, reg_op2);
8062 __ bc1t(L);
8063 __ nop();
8064 __ mov_s(dst, src);
8065 __ bind(L);
8066 break;
8067 case 0x04: //greater_equal
8068 __ c_olt_s(reg_op1, reg_op2);
8069 __ bc1t(L);
8070 __ nop();
8071 __ mov_s(dst, src);
8072 __ bind(L);
8073 break;
8074 case 0x05: //less
8075 __ c_ult_s(reg_op1, reg_op2);
8076 __ bc1f(L);
8077 __ nop();
8078 __ mov_s(dst, src);
8079 __ bind(L);
8080 break;
8081 case 0x06: //less_equal
8082 __ c_ule_s(reg_op1, reg_op2);
8083 __ bc1f(L);
8084 __ nop();
8085 __ mov_s(dst, src);
8086 __ bind(L);
8087 break;
8088 default:
8089 Unimplemented();
8090 }
8091 %}
8092 ins_pipe( pipe_slow );
8093 %}
8095 // Manifest a CmpL result in an integer register. Very painful.
8096 // This is the test to avoid.
8097 instruct cmpL3_reg_reg(mRegI dst, mRegL src1, mRegL src2) %{
8098 match(Set dst (CmpL3 src1 src2));
8099 ins_cost(1000);
8100 format %{ "cmpL3 $dst, $src1, $src2 @ cmpL3_reg_reg" %}
8101 ins_encode %{
8102 Register opr1 = as_Register($src1$$reg);
8103 Register opr2 = as_Register($src2$$reg);
8104 Register dst = as_Register($dst$$reg);
8106 Label p_one, done;
8108 __ subu(dst, opr1, opr2);
8110 __ beq(dst, R0, done);
8111 __ nop();
8113 __ bgtz(dst, done);
8114 __ delayed()->addiu32(dst, R0, 1);
8116 __ addiu32(dst, R0, -1);
8118 __ bind(done);
8119 %}
8120 ins_pipe( pipe_slow );
8121 %}
8123 //
8124 // less_rsult = 1
8125 // greater_result = -1
8126 // equal_result = 0
8127 // nan_result = -1
8128 //
8129 instruct cmpF3_reg_reg(mRegI dst, regF src1, regF src2) %{
8130 match(Set dst (CmpF3 src1 src2));
8131 ins_cost(1000);
8132 format %{ "cmpF3 $dst, $src1, $src2 @ cmpF3_reg_reg" %}
8133 ins_encode %{
8134 FloatRegister src1 = as_FloatRegister($src1$$reg);
8135 FloatRegister src2 = as_FloatRegister($src2$$reg);
8136 Register dst = as_Register($dst$$reg);
8138 Label EQU, LESS, DONE;
8140 __ move(dst, -1);
8141 __ c_eq_s(src1, src2);
8142 __ bc1t(EQU);
8143 __ nop();
8144 __ c_olt_s(src1, src2);
8145 __ bc1t(LESS);
8146 __ nop();
8147 __ beq(R0, R0, DONE);
8148 __ nop();
8149 __ bind(EQU);
8150 __ move(dst, 0);
8151 __ beq(R0, R0, DONE);
8152 __ nop();
8153 __ bind(LESS);
8154 __ move(dst, 1);
8155 __ bind(DONE);
8156 %}
8157 ins_pipe( pipe_slow );
8158 %}
8160 instruct cmpD3_reg_reg(mRegI dst, regD src1, regD src2) %{
8161 match(Set dst (CmpD3 src1 src2));
8162 ins_cost(1000);
8163 format %{ "cmpD3 $dst, $src1, $src2 @ cmpD3_reg_reg" %}
8164 ins_encode %{
8165 FloatRegister src1 = as_FloatRegister($src1$$reg);
8166 FloatRegister src2 = as_FloatRegister($src2$$reg);
8167 Register dst = as_Register($dst$$reg);
8169 Label EQU, LESS, DONE;
8171 __ move(dst, -1);
8172 __ c_eq_d(src1, src2);
8173 __ bc1t(EQU);
8174 __ nop();
8175 __ c_olt_d(src1, src2);
8176 __ bc1t(LESS);
8177 __ nop();
8178 __ beq(R0, R0, DONE);
8179 __ nop();
8180 __ bind(EQU);
8181 __ move(dst, 0);
8182 __ beq(R0, R0, DONE);
8183 __ nop();
8184 __ bind(LESS);
8185 __ move(dst, 1);
8186 __ bind(DONE);
8187 %}
8188 ins_pipe( pipe_slow );
8189 %}
8191 instruct clear_array(mRegL cnt, mRegP base, Universe dummy) %{
8192 match(Set dummy (ClearArray cnt base));
8193 format %{ "CLEAR_ARRAY base = $base, cnt = $cnt # Clear doublewords" %}
8194 ins_encode %{
8195 //Assume cnt is the number of bytes in an array to be cleared,
8196 //and base points to the starting address of the array.
8197 Register base = $base$$Register;
8198 Register num = $cnt$$Register;
8199 Label Loop, done;
8201 /* 2012/9/21 Jin: according to X86, $cnt is caculated by doublewords(8 bytes) */
8202 __ move(T9, num); /* T9 = words */
8203 __ beq(T9, R0, done);
8204 __ nop();
8205 __ move(AT, base);
8207 __ bind(Loop);
8208 __ sd(R0, Address(AT, 0));
8209 __ daddi(AT, AT, wordSize);
8210 __ daddi(T9, T9, -1);
8211 __ bne(T9, R0, Loop);
8212 __ delayed()->nop();
8213 __ bind(done);
8214 %}
8215 ins_pipe( pipe_slow );
8216 %}
8218 instruct string_compare(mRegP str1, mRegI cnt1, mRegP str2, mRegI cnt2, mRegI result) %{
8219 match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
8221 format %{ "String Compare $str1[len: $cnt1], $str2[len: $cnt2] -> $result @ string_compare" %}
8222 ins_encode %{
8223 // Get the first character position in both strings
8224 // [8] char array, [12] offset, [16] count
8225 Register str1 = $str1$$Register;
8226 Register str2 = $str2$$Register;
8227 Register cnt1 = $cnt1$$Register;
8228 Register cnt2 = $cnt2$$Register;
8229 Register result = $result$$Register;
8231 Register tmp1 = T8;
8232 Register tmp2 = T9;
8233 Register tmp3 = S7;
8235 Label L, Loop, haveResult, LoopEnd, done;
8237 /* 2013/7/9 Jin: StrComp is totally redefined in OpenJDK 8 */
8239 // Let tmp1 point to the first character of str1 (if str1 is not empty).
8240 __ move(tmp1, str1);
8242 // Let tmp2 point to the first character of str2 (if str2 is not empty).
8243 __ move(tmp2, str2);
8245 // compute the shorter length (in tmp3) and difference of lengths (in result)
8246 __ move(tmp3, cnt1);
8247 __ slt(AT, cnt1, cnt2);
8248 __ bne(AT, R0, L);
8249 __ nop();
8250 __ move(tmp3, cnt2); //Now the shorter length is in tmp3.
8251 __ bind(L);
8253 if ($str1$$Register != $result$$Register) __ push(str1);
8254 __ subu(result, cnt1, cnt2);
8255 __ push(result); // result holds the difference of two lengths
8257 //Begin to compare str1 and str2.
8258 __ bind(Loop);
8259 __ beq(tmp3, R0, LoopEnd);
8260 __ nop();
8262 // compare current character
8263 __ lhu(AT, tmp1, 0);
8264 __ lhu(str1, tmp2, 0);
8265 __ bne(AT, str1, haveResult);
8266 __ nop();
8267 __ addi(tmp1, tmp1, 2);
8268 __ addi(tmp2, tmp2, 2);
8269 __ b(Loop);
8270 __ delayed()->addi(tmp3, tmp3, -1);
8272 __ bind(LoopEnd);
8273 __ pop(result);
8274 __ beq(R0, R0, done);
8275 __ nop();
8277 __ bind(haveResult);
8278 __ subu(result, AT, str1);
8279 __ pop(AT);
8281 __ bind(done);
8282 if ($str1$$Register != $result$$Register) __ pop(str1);
8283 %}
8285 ins_pipe( pipe_slow );
8286 %}
8288 //----------Arithmetic Instructions-------------------------------------------
8289 //----------Addition Instructions---------------------------------------------
8290 instruct addI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
8291 match(Set dst (AddI src1 src2));
8293 format %{ "add $dst, $src1, $src2 #@addI_Reg_Reg" %}
8294 ins_encode %{
8295 Register dst = $dst$$Register;
8296 Register src1 = $src1$$Register;
8297 Register src2 = $src2$$Register;
8298 __ addu32(dst, src1, src2);
8299 %}
8300 ins_pipe( ialu_reg );
8301 %}
8303 instruct addI_Reg_imm(mRegI dst, mRegI src1, immI src2) %{
8304 match(Set dst (AddI src1 src2));
8306 format %{ "add $dst, $src1, $src2 #@addI_Reg_imm" %}
8307 ins_encode %{
8308 Register dst = $dst$$Register;
8309 Register src1 = $src1$$Register;
8310 int imm = $src2$$constant;
8312 if(Assembler::is_simm16(imm)) {
8313 __ addiu32(dst, src1, imm);
8314 } else {
8315 __ move(AT, imm);
8316 __ addu32(dst, src1, AT);
8317 }
8318 %}
8319 ins_pipe( ialu_reg );
8320 %}
8322 instruct addP_reg_reg(mRegP dst, mRegP src1, mRegL src2) %{
8323 match(Set dst (AddP src1 src2));
8325 format %{ "dadd $dst, $src1, $src2 #@addP_reg_reg" %}
8327 ins_encode %{
8328 Register dst = $dst$$Register;
8329 Register src1 = $src1$$Register;
8330 Register src2 = $src2$$Register;
8331 __ daddu(dst, src1, src2);
8332 %}
8334 ins_pipe( ialu_reg_reg );
8335 %}
8337 instruct addP_reg_imm(mRegP dst, mRegP src1, immL32 src2) %{
8338 match(Set dst (AddP src1 src2));
8339 // effect(KILL cr);
8341 format %{ "daddi $dst, $src1, $src2 #@addP_reg_imm" %}
8342 ins_encode %{
8343 Register src1 = $src1$$Register;
8344 long src2 = $src2$$constant;
8345 Register dst = $dst$$Register;
8347 if(Assembler::is_simm16(src2)) {
8348 __ daddiu(dst, src1, src2);
8349 } else {
8350 __ li(AT, src2);
8351 __ daddu(dst, src1, AT);
8352 }
8353 %}
8354 ins_pipe( ialu_reg_imm );
8355 %}
8357 // Add Long Register with Register
8358 instruct addL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
8359 match(Set dst (AddL src1 src2));
8360 ins_cost(200);
8361 format %{ "ADD $dst, $src1, $src2 #@addL_Reg_Reg\t" %}
8363 ins_encode %{
8364 Register dst_reg = as_Register($dst$$reg);
8365 Register src1_reg = as_Register($src1$$reg);
8366 Register src2_reg = as_Register($src2$$reg);
8368 __ daddu(dst_reg, src1_reg, src2_reg);
8369 %}
8371 ins_pipe( ialu_reg_reg_long );
8372 %}
8374 //----------Subtraction Instructions-------------------------------------------
8375 // Integer Subtraction Instructions
8376 instruct subI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
8377 match(Set dst (SubI src1 src2));
8379 format %{ "sub $dst, $src1, $src2 #@subI_Reg_Reg" %}
8380 ins_encode %{
8381 Register dst = $dst$$Register;
8382 Register src1 = $src1$$Register;
8383 Register src2 = $src2$$Register;
8384 __ subu32(dst, src1, src2);
8385 %}
8386 ins_pipe( ialu_reg );
8387 %}
8389 instruct subI_Reg_imm(mRegI dst, mRegI src1, immI src2) %{
8390 match(Set dst (SubI src1 src2));
8392 format %{ "sub $dst, $src1, $src2 #@subI_Reg_imm" %}
8393 ins_encode %{
8394 Register dst = $dst$$Register;
8395 Register src1 = $src1$$Register;
8396 __ move(AT, -1 * $src2$$constant);
8397 __ addu32(dst, src1, AT);
8398 %}
8399 ins_pipe( ialu_reg );
8400 %}
8402 // Subtract Long Register with Register.
8403 instruct subL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
8404 match(Set dst (SubL src1 src2));
8405 ins_cost(200);
8406 format %{ "SubL $dst, $src1, $src2 @ subL_Reg_Reg" %}
8407 ins_encode %{
8408 Register dst = as_Register($dst$$reg);
8409 Register src1 = as_Register($src1$$reg);
8410 Register src2 = as_Register($src2$$reg);
8412 __ subu(dst, src1, src2);
8413 %}
8414 ins_pipe( ialu_reg_reg_long );
8415 %}
8417 // Integer MOD with Register
8418 instruct modI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
8419 match(Set dst (ModI src1 src2));
8420 ins_cost(300);
8421 format %{ "modi $dst, $src1, $src2 @ modI_Reg_Reg" %}
8422 ins_encode %{
8423 Register dst = $dst$$Register;
8424 Register src1 = $src1$$Register;
8425 Register src2 = $src2$$Register;
8426 __ div(src1, src2);
8427 __ nop();
8428 __ nop();
8429 __ mfhi(dst);
8430 %}
8432 //ins_pipe( ialu_reg_reg_alu0 );
8433 ins_pipe( ialu_reg );
8434 %}
8436 instruct modL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
8437 match(Set dst (ModL src1 src2));
8438 format %{ "modL $dst, $src1, $src2 @modL_reg_reg" %}
8440 ins_encode %{
8441 Register dst = as_Register($dst$$reg);
8442 Register op1 = as_Register($src1$$reg);
8443 Register op2 = as_Register($src2$$reg);
8445 __ ddiv(op1, op2);
8446 __ mfhi(dst);
8447 %}
8448 ins_pipe( pipe_slow );
8449 %}
8451 instruct mulI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
8452 match(Set dst (MulI src1 src2));
8454 ins_cost(300);
8455 format %{ "mul $dst, $src1, $src2 @ mulI_Reg_Reg" %}
8456 ins_encode %{
8457 Register src1 = $src1$$Register;
8458 Register src2 = $src2$$Register;
8459 Register dst = $dst$$Register;
8461 __ mult(src1, src2);
8462 __ nop();
8463 __ nop();
8464 __ mflo(dst);
8465 __ sll(dst, dst, 0);
8466 %}
8467 ins_pipe( ialu_reg_reg_alu0 );
8468 %}
8470 instruct divI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
8471 match(Set dst (DivI src1 src2));
8473 ins_cost(300);
8474 format %{ "div $dst, $src1, $src2 @ divI_Reg_Reg" %}
8475 ins_encode %{
8476 Register src1 = $src1$$Register;
8477 Register src2 = $src2$$Register;
8478 Register dst = $dst$$Register;
8480 /* 2012/4/21 Jin: In MIPS, div does not cause exception.
8481 We must trap an exception manually. */
8482 __ div(src1, src2);
8484 __ teq(R0, src2, 0x7);
8485 __ nop();
8486 __ nop();
8487 __ mflo(dst);
8488 %}
8489 ins_pipe( ialu_reg_reg_alu0 );
8490 %}
8492 instruct divF_Reg_Reg(regF dst, regF src1, regF src2) %{
8493 match(Set dst (DivF src1 src2));
8495 ins_cost(300);
8496 format %{ "divF $dst, $src1, $src2 @ divF_Reg_Reg" %}
8497 ins_encode %{
8498 FloatRegister src1 = $src1$$FloatRegister;
8499 FloatRegister src2 = $src2$$FloatRegister;
8500 FloatRegister dst = $dst$$FloatRegister;
8502 /* Here do we need to trap an exception manually ? */
8503 __ div_s(dst, src1, src2);
8504 %}
8505 ins_pipe( pipe_slow );
8506 %}
8508 instruct divD_Reg_Reg(regD dst, regD src1, regD src2) %{
8509 match(Set dst (DivD src1 src2));
8511 ins_cost(300);
8512 format %{ "divD $dst, $src1, $src2 @ divD_Reg_Reg" %}
8513 ins_encode %{
8514 FloatRegister src1 = $src1$$FloatRegister;
8515 FloatRegister src2 = $src2$$FloatRegister;
8516 FloatRegister dst = $dst$$FloatRegister;
8518 /* Here do we need to trap an exception manually ? */
8519 __ div_d(dst, src1, src2);
8520 %}
8521 ins_pipe( pipe_slow );
8522 %}
8524 instruct divF_Reg_immF(regF dst, regF src1, immF src2, regF tmp) %{
8525 match(Set dst (DivF src1 src2));
8526 effect(TEMP tmp);
8528 ins_cost(300);
8529 format %{ "divF $dst, $src1, $src2 [tmp = $tmp] @ divF_Reg_immF" %}
8530 ins_encode %{
8531 FloatRegister src1 = $src1$$FloatRegister;
8532 FloatRegister tmp = $tmp$$FloatRegister;
8533 FloatRegister dst = $dst$$FloatRegister;
8535 jfloat jf = $src2$$constant;
8536 address const_addr = __ float_constant(jf);
8537 assert (const_addr != NULL, "must create float constant in the constant table");
8539 __ relocate(relocInfo::internal_pc_type);
8540 __ li(AT, const_addr);
8541 __ lwc1(tmp, AT, 0);
8543 __ div_s(dst, src1, tmp);
8544 %}
8545 ins_pipe( pipe_slow );
8546 %}
8549 instruct mulL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
8550 match(Set dst (MulL src1 src2));
8551 format %{ "mulL $dst, $src1, $src2 @mulL_reg_reg" %}
8552 ins_encode %{
8553 Register dst = as_Register($dst$$reg);
8554 Register op1 = as_Register($src1$$reg);
8555 Register op2 = as_Register($src2$$reg);
8557 __ dmult(op1, op2);
8558 __ mflo(dst);
8559 %}
8560 ins_pipe( pipe_slow );
8561 %}
8563 instruct divL_reg_reg(mRegL dst, mRegL src1, mRegL src2) %{
8564 match(Set dst (DivL src1 src2));
8565 format %{ "divL $dst, $src1, $src2 @divL_reg_reg" %}
8567 ins_encode %{
8568 Register dst = as_Register($dst$$reg);
8569 Register op1 = as_Register($src1$$reg);
8570 Register op2 = as_Register($src2$$reg);
8572 __ ddiv(op1, op2);
8573 __ mflo(dst);
8574 %}
8575 ins_pipe( pipe_slow );
8576 %}
8578 instruct addF_reg_reg(regF dst, regF src1, regF src2) %{
8579 match(Set dst (AddF src1 src2));
8580 format %{ "AddF $dst, $src1, $src2 @addF_reg_reg" %}
8581 ins_encode %{
8582 FloatRegister src1 = as_FloatRegister($src1$$reg);
8583 FloatRegister src2 = as_FloatRegister($src2$$reg);
8584 FloatRegister dst = as_FloatRegister($dst$$reg);
8586 __ add_s(dst, src1, src2);
8587 %}
8588 ins_pipe( fpu_reg_reg );
8589 %}
8591 instruct subF_reg_reg(regF dst, regF src1, regF src2) %{
8592 match(Set dst (SubF src1 src2));
8593 format %{ "SubF $dst, $src1, $src2 @subF_reg_reg" %}
8594 ins_encode %{
8595 FloatRegister src1 = as_FloatRegister($src1$$reg);
8596 FloatRegister src2 = as_FloatRegister($src2$$reg);
8597 FloatRegister dst = as_FloatRegister($dst$$reg);
8599 __ sub_s(dst, src1, src2);
8600 %}
8601 ins_pipe( fpu_reg_reg );
8602 %}
8603 instruct addD_reg_reg(regD dst, regD src1, regD src2) %{
8604 match(Set dst (AddD src1 src2));
8605 format %{ "AddD $dst, $src1, $src2 @addD_reg_reg" %}
8606 ins_encode %{
8607 FloatRegister src1 = as_FloatRegister($src1$$reg);
8608 FloatRegister src2 = as_FloatRegister($src2$$reg);
8609 FloatRegister dst = as_FloatRegister($dst$$reg);
8611 __ add_d(dst, src1, src2);
8612 %}
8613 ins_pipe( fpu_reg_reg );
8614 %}
8616 instruct subD_reg_reg(regD dst, regD src1, regD src2) %{
8617 match(Set dst (SubD src1 src2));
8618 format %{ "SubD $dst, $src1, $src2 @subD_reg_reg" %}
8619 ins_encode %{
8620 FloatRegister src1 = as_FloatRegister($src1$$reg);
8621 FloatRegister src2 = as_FloatRegister($src2$$reg);
8622 FloatRegister dst = as_FloatRegister($dst$$reg);
8624 __ sub_d(dst, src1, src2);
8625 %}
8626 ins_pipe( fpu_reg_reg );
8627 %}
8629 instruct negF_reg(regF dst, regF src) %{
8630 match(Set dst (NegF src));
8631 format %{ "negF $dst, $src @negF_reg" %}
8632 ins_encode %{
8633 FloatRegister src = as_FloatRegister($src$$reg);
8634 FloatRegister dst = as_FloatRegister($dst$$reg);
8636 __ neg_s(dst, src);
8637 %}
8638 ins_pipe( fpu_reg_reg );
8639 %}
8641 instruct negD_reg(regD dst, regD src) %{
8642 match(Set dst (NegD src));
8643 format %{ "negD $dst, $src @negD_reg" %}
8644 ins_encode %{
8645 FloatRegister src = as_FloatRegister($src$$reg);
8646 FloatRegister dst = as_FloatRegister($dst$$reg);
8648 __ neg_d(dst, src);
8649 %}
8650 ins_pipe( fpu_reg_reg );
8651 %}
8654 instruct mulF_reg_reg(regF dst, regF src1, regF src2) %{
8655 match(Set dst (MulF src1 src2));
8656 format %{ "MULF $dst, $src1, $src2 @mulF_reg_reg" %}
8657 ins_encode %{
8658 FloatRegister src1 = $src1$$FloatRegister;
8659 FloatRegister src2 = $src2$$FloatRegister;
8660 FloatRegister dst = $dst$$FloatRegister;
8662 __ mul_s(dst, src1, src2);
8663 %}
8664 ins_pipe( fpu_reg_reg );
8665 %}
8667 // Mul two double precision floating piont number
8668 instruct mulD_reg_reg(regD dst, regD src1, regD src2) %{
8669 match(Set dst (MulD src1 src2));
8670 format %{ "MULD $dst, $src1, $src2 @mulD_reg_reg" %}
8671 ins_encode %{
8672 FloatRegister src1 = $src1$$FloatRegister;
8673 FloatRegister src2 = $src2$$FloatRegister;
8674 FloatRegister dst = $dst$$FloatRegister;
8676 __ mul_d(dst, src1, src2);
8677 %}
8678 ins_pipe( fpu_reg_reg );
8679 %}
8681 instruct absF_reg(regF dst, regF src) %{
8682 match(Set dst (AbsF src));
8683 ins_cost(100);
8684 format %{ "absF $dst, $src @absF_reg" %}
8685 ins_encode %{
8686 FloatRegister src = as_FloatRegister($src$$reg);
8687 FloatRegister dst = as_FloatRegister($dst$$reg);
8689 __ abs_s(dst, src);
8690 %}
8691 ins_pipe( fpu_reg_reg );
8692 %}
8695 // intrinsics for math_native.
8696 // AbsD SqrtD CosD SinD TanD LogD Log10D
8698 instruct absD_reg(regD dst, regD src) %{
8699 match(Set dst (AbsD src));
8700 ins_cost(100);
8701 format %{ "absD $dst, $src @absD_reg" %}
8702 ins_encode %{
8703 FloatRegister src = as_FloatRegister($src$$reg);
8704 FloatRegister dst = as_FloatRegister($dst$$reg);
8706 __ abs_d(dst, src);
8707 %}
8708 ins_pipe( fpu_reg_reg );
8709 %}
8711 instruct sqrtD_reg(regD dst, regD src) %{
8712 match(Set dst (SqrtD src));
8713 ins_cost(100);
8714 format %{ "SqrtD $dst, $src @sqrtD_reg" %}
8715 ins_encode %{
8716 FloatRegister src = as_FloatRegister($src$$reg);
8717 FloatRegister dst = as_FloatRegister($dst$$reg);
8719 __ sqrt_d(dst, src);
8720 %}
8721 ins_pipe( fpu_reg_reg );
8722 %}
8724 //----------------------------------Logical Instructions----------------------
8725 //__________________________________Integer Logical Instructions-------------
8727 //And Instuctions
8728 // And Register with Immediate
8729 instruct andI_Reg_imm(mRegI dst, mRegI src1, immI src2) %{
8730 match(Set dst (AndI src1 src2));
8732 format %{ "and $dst, $src1, $src2 #@andI_Reg_imm" %}
8733 ins_encode %{
8734 Register dst = $dst$$Register;
8735 Register src = $src1$$Register;
8736 int val = $src2$$constant;
8738 __ move(AT, val);
8739 __ andr(dst, src, AT);
8740 %}
8741 ins_pipe( ialu_reg );
8742 %}
8744 instruct andI_Reg_imm_0_65535(mRegI dst, mRegI src1, immI_0_65535 src2) %{
8745 match(Set dst (AndI src1 src2));
8746 ins_cost(60);
8748 format %{ "and $dst, $src1, $src2 #@andI_Reg_imm_0_65535" %}
8749 ins_encode %{
8750 Register dst = $dst$$Register;
8751 Register src = $src1$$Register;
8752 int val = $src2$$constant;
8754 __ andi(dst, src, val);
8755 %}
8756 ins_pipe( ialu_reg );
8757 %}
8759 instruct andI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
8760 match(Set dst (AndI src1 src2));
8762 format %{ "and $dst, $src1, $src2 #@andI_Reg_Reg" %}
8763 ins_encode %{
8764 Register dst = $dst$$Register;
8765 Register src1 = $src1$$Register;
8766 Register src2 = $src2$$Register;
8767 __ andr(dst, src1, src2);
8768 %}
8769 ins_pipe( ialu_reg );
8770 %}
8772 // And Long Register with Register
8773 instruct andL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
8774 match(Set dst (AndL src1 src2));
8775 format %{ "AND $dst, $src1, $src2 @ andL_Reg_Reg\n\t" %}
8776 ins_encode %{
8777 Register dst_reg = as_Register($dst$$reg);
8778 Register src1_reg = as_Register($src1$$reg);
8779 Register src2_reg = as_Register($src2$$reg);
8781 __ andr(dst_reg, src1_reg, src2_reg);
8782 %}
8783 ins_pipe( ialu_reg_reg_long );
8784 %}
8786 // Or Long Register with Register
8787 instruct orL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
8788 match(Set dst (OrL src1 src2));
8789 format %{ "OR $dst, $src1, $src2 @ orL_Reg_Reg\t" %}
8790 ins_encode %{
8791 Register dst_reg = $dst$$Register;
8792 Register src1_reg = $src1$$Register;
8793 Register src2_reg = $src2$$Register;
8795 __ orr(dst_reg, src1_reg, src2_reg);
8796 %}
8797 ins_pipe( ialu_reg_reg_long );
8798 %}
8800 // Xor Long Register with Register
8801 instruct xorL_Reg_Reg(mRegL dst, mRegL src1, mRegL src2) %{
8802 match(Set dst (XorL src1 src2));
8803 format %{ "XOR $dst, $src1, $src2 @ xorL_Reg_Reg\t" %}
8804 ins_encode %{
8805 Register dst_reg = as_Register($dst$$reg);
8806 Register src1_reg = as_Register($src1$$reg);
8807 Register src2_reg = as_Register($src2$$reg);
8809 __ xorr(dst_reg, src1_reg, src2_reg);
8810 %}
8811 ins_pipe( ialu_reg_reg_long );
8812 %}
8814 // Shift Left by 8-bit immediate
8815 instruct salI_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
8816 match(Set dst (LShiftI src shift));
8818 format %{ "SHL $dst, $src, $shift #@salI_Reg_imm" %}
8819 ins_encode %{
8820 Register src = $src$$Register;
8821 Register dst = $dst$$Register;
8822 int shamt = $shift$$constant;
8824 /*
8825 094 SHL S0, S0, #-7 #@salI_Reg_imm
8826 static int insn_RRSO(int rt, int rd, int sa, int op) { return (rt<<16) | (rd<<11) | (sa<<6) | op; }
8827 void sll (Register rd, Register rt , int sa) {
8828 emit_long(insn_RRSO((int)rt->encoding(), (int)rd->encoding(), sa, sll_op));
8829 }
8830 */
8832 if(0 <= shamt && shamt < 32) __ sll(dst, src, shamt);
8833 else {
8834 __ move(AT, shamt);
8835 __ sllv(dst, src, AT);
8836 }
8837 %}
8838 ins_pipe( ialu_reg );
8839 %}
8841 // Shift Left by 8-bit immediate
8842 instruct salI_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
8843 match(Set dst (LShiftI src shift));
8845 format %{ "SHL $dst, $src, $shift #@salI_Reg_Reg" %}
8846 ins_encode %{
8847 Register src = $src$$Register;
8848 Register dst = $dst$$Register;
8849 Register shamt = $shift$$Register;
8850 __ sllv(dst, src, shamt);
8851 %}
8852 ins_pipe( ialu_reg );
8853 %}
8856 // Shift Left Long
8857 instruct salL_Reg_imm(mRegL dst, mRegL src, immI8 shift) %{
8858 //predicate(UseNewLongLShift);
8859 match(Set dst (LShiftL src shift));
8860 ins_cost(100);
8861 format %{ "salL $dst, $src, $shift @ salL_Reg_imm" %}
8862 ins_encode %{
8863 Register src_reg = as_Register($src$$reg);
8864 Register dst_reg = as_Register($dst$$reg);
8865 int shamt = $shift$$constant;
8867 if (__ is_simm(shamt, 5))
8868 __ dsll(dst_reg, src_reg, shamt);
8869 else
8870 {
8871 __ move(AT, shamt);
8872 __ dsllv(dst_reg, src_reg, AT);
8873 }
8874 %}
8875 ins_pipe( ialu_reg_long );
8876 %}
8879 // Shift Left Long
8880 instruct salL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
8881 //predicate(UseNewLongLShift);
8882 match(Set dst (LShiftL src shift));
8883 ins_cost(100);
8884 format %{ "salL $dst, $src, $shift @ salL_Reg_Reg" %}
8885 ins_encode %{
8886 Register creg = T9;
8887 Register src_reg = as_Register($src$$reg);
8888 Register dst_reg = as_Register($dst$$reg);
8890 __ move(creg, $shift$$Register);
8891 __ andi(creg, creg, 0x3f);
8892 __ dsllv(dst_reg, src_reg, creg);
8893 %}
8894 ins_pipe( ialu_reg_long );
8895 %}
8897 // Shift Right Long
8898 instruct sarL_Reg_imm(mRegL dst, mRegL src, immI8 shift) %{
8899 //predicate(UseNewLongLShift);
8900 match(Set dst (RShiftL src shift));
8901 ins_cost(100);
8902 format %{ "sarL $dst, $src, $shift @ sarL_Reg_imm" %}
8903 ins_encode %{
8904 Register src_reg = as_Register($src$$reg);
8905 Register dst_reg = as_Register($dst$$reg);
8906 int shamt = ($shift$$constant & 0x3f);
8907 if (__ is_simm(shamt, 5))
8908 __ dsra(dst_reg, src_reg, shamt);
8909 else
8910 {
8911 __ move(AT, shamt);
8912 __ dsrav(dst_reg, src_reg, AT);
8913 }
8914 %}
8915 ins_pipe( ialu_reg_long );
8916 %}
8918 // Shift Right Long arithmetically
8919 instruct sarL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
8920 //predicate(UseNewLongLShift);
8921 match(Set dst (RShiftL src shift));
8922 ins_cost(100);
8923 format %{ "sarL $dst, $src, $shift @ sarL_Reg_Reg" %}
8924 ins_encode %{
8925 Register creg = T9;
8926 Register src_reg = as_Register($src$$reg);
8927 Register dst_reg = as_Register($dst$$reg);
8929 __ move(creg, $shift$$Register);
8930 __ andi(creg, creg, 0x3f);
8931 __ dsrav(dst_reg, src_reg, creg);
8932 %}
8933 ins_pipe( ialu_reg_long );
8934 %}
8936 // Shift Right Long logically
8937 instruct slrL_Reg_Reg(mRegL dst, mRegL src, mRegI shift) %{
8938 match(Set dst (URShiftL src shift));
8939 ins_cost(100);
8940 format %{ "slrL $dst, $src, $shift @ slrL_Reg_Reg" %}
8941 ins_encode %{
8942 Register creg = T9;
8943 Register src_reg = as_Register($src$$reg);
8944 Register dst_reg = as_Register($dst$$reg);
8945 Label normal, done, notZero;
8947 __ move(creg, $shift$$Register);
8948 __ andi(creg, creg, 0x3f);
8949 __ dsrlv(dst_reg, src_reg, creg);
8950 %}
8951 ins_pipe( ialu_reg_long );
8952 %}
8955 // Xor Instructions
8956 // Xor Register with Register
8957 instruct xorI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
8958 match(Set dst (XorI src1 src2));
8960 format %{ "XOR $dst, $src1, $src2 #@xorI_Reg_Reg" %}
8962 ins_encode %{
8963 Register dst = $dst$$Register;
8964 Register src1 = $src1$$Register;
8965 Register src2 = $src2$$Register;
8966 __ xorr(dst, src1, src2);
8967 __ sll(dst, dst, 0); /* long -> int */
8968 %}
8970 ins_pipe( ialu_reg_reg );
8971 %}
8973 // Or Instructions
8974 // Or Register with Register
8975 instruct orI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
8976 match(Set dst (OrI src1 src2));
8978 format %{ "OR $dst, $src1, $src2 #@orI_Reg_Reg" %}
8979 ins_encode %{
8980 Register dst = $dst$$Register;
8981 Register src1 = $src1$$Register;
8982 Register src2 = $src2$$Register;
8983 __ orr(dst, src1, src2);
8984 %}
8986 ins_pipe( ialu_reg_reg );
8987 %}
8989 instruct orI_Reg_castP2X(mRegL dst, mRegL src1, mRegP src2) %{
8990 match(Set dst (OrI src1 (CastP2X src2)));
8992 format %{ "OR $dst, $src1, $src2 #@orI_Reg_castP2X" %}
8993 ins_encode %{
8994 Register dst = $dst$$Register;
8995 Register src1 = $src1$$Register;
8996 Register src2 = $src2$$Register;
8997 __ orr(dst, src1, src2);
8998 %}
9000 ins_pipe( ialu_reg_reg );
9001 %}
9003 // Logical Shift Right by 8-bit immediate
9004 instruct shr_logical_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
9005 match(Set dst (URShiftI src shift));
9006 // effect(KILL cr);
9008 format %{ "SRL $dst, $src, $shift #@shr_logical_Reg_imm" %}
9009 ins_encode %{
9010 Register src = $src$$Register;
9011 Register dst = $dst$$Register;
9012 int shift = $shift$$constant;
9013 if (shift > 0)
9014 __ srl(dst, src, shift);
9015 else
9016 {
9017 __ move(AT, shift);
9018 __ srlv(dst, src, AT);
9019 }
9020 %}
9021 ins_pipe( ialu_reg );
9022 %}
9024 // Logical Shift Right
9025 instruct shr_logical_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
9026 match(Set dst (URShiftI src shift));
9028 format %{ "SRL $dst, $src, $shift #@shr_logical_Reg_Reg" %}
9029 ins_encode %{
9030 Register src = $src$$Register;
9031 Register dst = $dst$$Register;
9032 Register shift = $shift$$Register;
9033 __ srlv(dst, src, shift);
9034 %}
9035 ins_pipe( ialu_reg );
9036 %}
9039 instruct shr_arith_Reg_imm(mRegI dst, mRegI src, immI8 shift) %{
9040 match(Set dst (RShiftI src shift));
9041 // effect(KILL cr);
9043 format %{ "SRA $dst, $src, $shift #@shr_arith_Reg_imm" %}
9044 ins_encode %{
9045 Register src = $src$$Register;
9046 Register dst = $dst$$Register;
9047 int shift = $shift$$constant;
9048 __ sra(dst, src, shift);
9049 %}
9050 ins_pipe( ialu_reg );
9051 %}
9053 instruct shr_arith_Reg_Reg(mRegI dst, mRegI src, mRegI shift) %{
9054 match(Set dst (RShiftI src shift));
9055 // effect(KILL cr);
9057 format %{ "SRA $dst, $src, $shift #@shr_arith_Reg_Reg" %}
9058 ins_encode %{
9059 Register src = $src$$Register;
9060 Register dst = $dst$$Register;
9061 Register shift = $shift$$Register;
9062 __ srav(dst, src, shift);
9063 %}
9064 ins_pipe( ialu_reg );
9065 %}
9067 //----------Convert Int to Boolean---------------------------------------------
9069 instruct movI_nocopy(mRegI dst, mRegI src) %{
9070 effect( DEF dst, USE src );
9071 format %{ "MOV $dst, $src @ movI_nocopy" %}
9072 ins_encode %{
9073 Register dst = $dst$$Register;
9074 Register src = $src$$Register;
9075 __ move(dst, src);
9076 %}
9077 ins_pipe( ialu_reg_reg );
9078 %}
9080 instruct ci2b(mRegI dst, mRegI src) %{
9081 effect( USE_DEF dst, USE src );
9083 format %{ "NEG $dst @ ci2b\n\t"
9084 "ADC $dst,$src @ ci2b" %}
9085 ins_encode %{
9086 Register dst = $dst$$Register;
9087 Register src = $src$$Register;
9088 Label L;
9089 //If ( dst != 0 ) CF = 1;
9090 guarantee(dst != src, "in ci2b");
9091 __ move(AT, src);
9092 __ beq(dst, R0, L);
9093 __ nop();
9094 __ addiu(AT, AT, 1);
9095 __ bind(L);
9096 __ neg(dst);
9097 __ addu(dst, dst, AT);
9098 %}
9100 ins_pipe( ialu_reg_reg_long );
9101 %}
9104 instruct convI2B(mRegI dst, mRegI src) %{
9105 match(Set dst (Conv2B src));
9107 expand %{
9108 movI_nocopy(dst,src);
9109 ci2b(dst,src);
9110 %}
9111 %}
9113 instruct convI2L_reg( mRegL dst, mRegI src) %{
9114 match(Set dst (ConvI2L src));
9116 ins_cost(50);
9117 format %{ "SLL $dst, $src @ convI2L_reg\t" %}
9118 ins_encode %{
9119 Register dst = as_Register($dst$$reg);
9120 Register src = as_Register($src$$reg);
9122 if(dst != src) __ sll(dst, src, 0);
9123 %}
9124 ins_pipe( ialu_reg_reg_long );
9125 %}
9128 instruct convL2I_reg( mRegI dst, mRegL src ) %{
9129 match(Set dst (ConvL2I src));
9130 effect( DEF dst, USE src );
9131 format %{ "MOV $dst, $src @ convL2I_reg" %}
9132 ins_encode %{
9133 Register dst = as_Register($dst$$reg);
9134 Register src = as_Register($src$$reg);
9136 __ dsll32(dst, src, 0);
9137 __ dsra32(dst, dst, 0);
9138 %}
9140 ins_pipe( ialu_reg_reg );
9141 %}
9143 instruct convL2D_reg( regD dst, mRegL src ) %{
9144 match(Set dst (ConvL2D src));
9145 effect( DEF dst, USE src );
9146 format %{ "convL2D $dst, $src @ convL2D_reg" %}
9147 ins_encode %{
9148 Register src = as_Register($src$$reg);
9149 FloatRegister dst = as_FloatRegister($dst$$reg);
9151 __ dmtc1(src, dst);
9152 __ cvt_d_l(dst, dst);
9153 %}
9155 ins_pipe( pipe_slow );
9156 %}
9158 instruct convD2L_reg( mRegL dst, regD src ) %{
9159 match(Set dst (ConvD2L src));
9160 effect( DEF dst, USE src );
9161 format %{ "convD2L $dst, $src @ convD2L_reg" %}
9162 ins_encode %{
9163 Register dst = as_Register($dst$$reg);
9164 FloatRegister src = as_FloatRegister($src$$reg);
9166 Label L;
9168 __ c_un_d(src, src); //NaN?
9169 __ bc1t(L);
9170 __ delayed();
9171 __ move(dst, R0);
9173 __ trunc_l_d(F30, src);
9174 __ cfc1(AT, 31);
9175 __ li(T9, 0x10000);
9176 __ andr(AT, AT, T9);
9177 __ beq(AT, R0, L);
9178 __ delayed()->dmfc1(dst, F30);
9180 __ mov_d(F12, src);
9181 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1);
9182 __ move(dst, V0);
9183 __ bind(L);
9184 %}
9186 ins_pipe( pipe_slow );
9187 %}
9189 instruct convF2I_reg( mRegI dst, regF src ) %{
9190 match(Set dst (ConvF2I src));
9191 effect( DEF dst, USE src );
9192 format %{ "convf2i $dst, $src @ convF2I_reg" %}
9193 ins_encode %{
9194 Register dreg = $dst$$Register;
9195 FloatRegister fval = $src$$FloatRegister;
9196 Label L;
9198 __ c_un_s(fval, fval); //NaN?
9199 __ bc1t(L);
9200 __ delayed();
9201 __ move(dreg, R0);
9203 __ trunc_w_s(F30, fval);
9205 /* Call SharedRuntime:f2i() to do valid convention */
9206 __ cfc1(AT, 31);
9207 __ li(T9, 0x10000);
9208 __ andr(AT, AT, T9);
9209 __ beq(AT, R0, L);
9210 __ delayed()->mfc1(dreg, F30);
9212 __ mov_s(F12, fval);
9214 /* 2014/01/08 Fu : This bug was found when running ezDS's control-panel.
9215 * J 982 C2 javax.swing.text.BoxView.layoutMajorAxis(II[I[I)V (283 bytes) @ 0x000000555c46aa74
9216 *
9217 * An interger array index has been assigned to V0, and then changed from 1 to Integer.MAX_VALUE.
9218 * V0 is corrupted during call_VM_leaf(), and should be preserved.
9219 */
9220 if(dreg != V0) {
9221 __ push(V0);
9222 }
9223 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
9224 if(dreg != V0) {
9225 __ move(dreg, V0);
9226 __ pop(V0);
9227 }
9228 __ bind(L);
9229 %}
9231 ins_pipe( pipe_slow );
9232 %}
9234 instruct convF2L_reg( mRegL dst, regF src ) %{
9235 match(Set dst (ConvF2L src));
9236 effect( DEF dst, USE src );
9237 format %{ "convf2l $dst, $src @ convF2L_reg" %}
9238 ins_encode %{
9239 Register dst = as_Register($dst$$reg);
9240 FloatRegister fval = $src$$FloatRegister;
9241 Label L;
9243 __ c_un_s(fval, fval); //NaN?
9244 __ bc1t(L);
9245 __ delayed();
9246 __ move(dst, R0);
9248 __ trunc_l_s(F30, fval);
9249 __ cfc1(AT, 31);
9250 __ li(T9, 0x10000);
9251 __ andr(AT, AT, T9);
9252 __ beq(AT, R0, L);
9253 __ delayed()->dmfc1(dst, F30);
9255 __ mov_s(F12, fval);
9256 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
9257 __ move(dst, V0);
9258 __ bind(L);
9259 %}
9261 ins_pipe( pipe_slow );
9262 %}
9264 instruct convL2F_reg( regF dst, mRegL src ) %{
9265 match(Set dst (ConvL2F src));
9266 effect( DEF dst, USE src );
9267 format %{ "convl2f $dst, $src @ convL2F_reg" %}
9268 ins_encode %{
9269 FloatRegister dst = $dst$$FloatRegister;
9270 Register src = as_Register($src$$reg);
9271 Label L;
9273 __ dmtc1(src, dst);
9274 __ cvt_s_l(dst, dst);
9275 %}
9277 ins_pipe( pipe_slow );
9278 %}
9280 instruct convI2F_reg( regF dst, mRegI src ) %{
9281 match(Set dst (ConvI2F src));
9282 effect( DEF dst, USE src );
9283 format %{ "convi2f $dst, $src @ convI2F_reg" %}
9284 ins_encode %{
9285 Register src = $src$$Register;
9286 FloatRegister dst = $dst$$FloatRegister;
9288 __ mtc1(src, dst);
9289 __ cvt_s_w(dst, dst);
9290 %}
9292 ins_pipe( fpu_reg_reg );
9293 %}
9295 instruct cmpLTMask( mRegI dst, mRegI p, mRegI q ) %{
9296 match(Set dst (CmpLTMask p q));
9297 ins_cost(400);
9299 format %{ "cmpLTMask $dst, $p, $q @ cmpLTMask" %}
9300 ins_encode %{
9301 Register p = $p$$Register;
9302 Register q = $q$$Register;
9303 Register dst = $dst$$Register;
9305 __ slt(dst, p, q);
9306 __ subu(dst, R0, dst);
9307 %}
9308 ins_pipe( pipe_slow );
9309 %}
9311 instruct movP_nocopy(mRegI dst, mRegP src) %{
9312 effect( DEF dst, USE src );
9313 format %{ "MOV $dst,$src @ movP_nocopy" %}
9314 ins_encode %{
9315 Register dst = $dst$$Register;
9316 Register src = $src$$Register;
9317 __ addu(dst, src, R0);
9318 %}
9319 // ins_encode( enc_Copy( dst, src) );
9320 ins_pipe( ialu_reg_reg );
9321 %}
9323 //FIXME
9324 //instruct cp2b( mRegI dst, mRegP src, eFlagsReg cr ) %{
9325 instruct cp2b( mRegI dst, mRegP src ) %{
9326 effect( USE_DEF dst, USE src );
9327 format %{ "NEG $dst\n\t @cp2b"
9328 "ADC $dst,$src @cp2b" %}
9329 ins_encode %{
9330 Register dst = $dst$$Register;
9331 Register src = $src$$Register;
9332 Label L;
9333 //If ( dst != 0 ) CF = 1;
9334 __ move(AT, src);
9335 __ beq(dst, R0, L);
9336 __ nop();
9337 __ addiu(AT, AT, 1);
9338 __ bind(L);
9339 __ neg(dst);
9340 __ addu(dst, dst, AT);
9341 %}
9343 ins_pipe( ialu_reg_reg_long );
9344 %}
9346 instruct convP2B( mRegI dst, mRegP src ) %{
9347 match(Set dst (Conv2B src));
9349 expand %{
9350 movP_nocopy(dst,src);
9351 cp2b(dst,src);
9352 %}
9353 %}
9355 instruct convI2D_reg_reg(regD dst, mRegI src) %{
9356 match(Set dst (ConvI2D src));
9357 format %{ "conI2D $dst, $src @convI2D_reg" %}
9358 ins_encode %{
9359 Register src = $src$$Register;
9360 FloatRegister dst = $dst$$FloatRegister;
9361 __ mtc1(src, dst);
9362 __ cvt_d_w(dst, dst);
9363 %}
9364 ins_pipe( fpu_reg_reg );
9365 %}
9367 instruct convF2I_reg_reg(mRegI dst, regF src) %{
9368 match(Set dst (ConvF2I src));
9369 format %{ "convF2I $dst, $src\t# @convF2D_reg_reg" %}
9370 ins_encode %{
9371 FloatRegister dst = $dst$$FloatRegister;
9372 FloatRegister src = $src$$FloatRegister;
9374 __ cvt_d_s(dst, src);
9375 %}
9376 ins_pipe( fpu_reg_reg );
9377 %}
9379 instruct convF2D_reg_reg(regD dst, regF src) %{
9380 match(Set dst (ConvF2D src));
9381 format %{ "convF2D $dst, $src\t# @convF2D_reg_reg" %}
9382 ins_encode %{
9383 FloatRegister dst = $dst$$FloatRegister;
9384 FloatRegister src = $src$$FloatRegister;
9386 __ cvt_d_s(dst, src);
9387 %}
9388 ins_pipe( fpu_reg_reg );
9389 %}
9391 instruct convD2F_reg_reg(regF dst, regD src) %{
9392 match(Set dst (ConvD2F src));
9393 format %{ "convD2F $dst, $src\t# @convD2F_reg_reg" %}
9394 ins_encode %{
9395 FloatRegister dst = $dst$$FloatRegister;
9396 FloatRegister src = $src$$FloatRegister;
9398 __ cvt_s_d(dst, src);
9399 %}
9400 ins_pipe( fpu_reg_reg );
9401 %}
9403 // Convert a double to an int. If the double is a NAN, stuff a zero in instead.
9404 instruct convD2I_reg_reg( mRegI dst, regD src ) %{
9405 match(Set dst (ConvD2I src));
9406 // effect( KILL tmp, KILL cr );//after this instruction, it will release register tmp and cr
9408 format %{ "convD2I $dst, $src\t# @ convD2I_reg_reg \n\t" %}
9410 ins_encode %{
9411 FloatRegister src = $src$$FloatRegister;
9412 Register dst = $dst$$Register;
9413 Label L;
9415 __ trunc_w_d(F30, src);
9416 __ cfc1(AT, 31);
9417 __ li(T9, 0x10000);
9418 __ andr(AT, AT, T9);
9419 __ beq(AT, R0, L);
9420 __ delayed()->mfc1(dst, F30);
9422 __ mov_d(F12, src);
9423 __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 1);
9424 __ move(dst, V0);
9425 __ bind(L);
9427 %}
9428 ins_pipe( pipe_slow );
9429 %}
9431 // Convert oop pointer into compressed form
9432 instruct encodeHeapOop(mRegN dst, mRegP src, FlagsReg cr) %{
9433 predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
9434 match(Set dst (EncodeP src));
9435 effect(KILL cr);
9436 format %{ "encode_heap_oop $dst,$src" %}
9437 ins_encode %{
9438 Register src = $src$$Register;
9439 Register dst = $dst$$Register;
9440 if (src != dst) {
9441 __ move(dst, src);
9442 }
9443 __ encode_heap_oop(dst);
9444 %}
9445 ins_pipe(ialu_reg_long);
9446 %}
9448 instruct encodeHeapOop_not_null(mRegN dst, mRegP src, FlagsReg cr) %{
9449 predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
9450 match(Set dst (EncodeP src));
9451 effect(KILL cr);
9452 format %{ "encode_heap_oop_not_null $dst,$src @ encodeHeapOop_not_null" %}
9453 ins_encode %{
9454 __ encode_heap_oop_not_null($dst$$Register, $src$$Register);
9455 %}
9456 ins_pipe(ialu_reg_long);
9457 %}
9459 instruct decodeHeapOop(mRegP dst, mRegN src) %{
9460 predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull &&
9461 n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant);
9462 match(Set dst (DecodeN src));
9463 format %{ "decode_heap_oop $dst,$src @ decodeHeapOop" %}
9464 ins_encode %{
9465 Register s = $src$$Register;
9466 Register d = $dst$$Register;
9467 if (s != d) {
9468 __ move(d, s);
9469 }
9470 __ decode_heap_oop(d);
9471 %}
9472 ins_pipe(ialu_reg_long);
9473 %}
9475 instruct decodeHeapOop_not_null(mRegP dst, mRegN src) %{
9476 predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull ||
9477 n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant);
9478 match(Set dst (DecodeN src));
9479 format %{ "decode_heap_oop_not_null $dst,$src @ decodeHeapOop_not_null" %}
9480 ins_encode %{
9481 Register s = $src$$Register;
9482 Register d = $dst$$Register;
9483 if (s != d) {
9484 __ decode_heap_oop_not_null(d, s);
9485 } else {
9486 __ decode_heap_oop_not_null(d);
9487 }
9488 %}
9489 ins_pipe(ialu_reg_long);
9490 %}
9492 instruct encodeKlass_not_null(mRegN dst, mRegP src, FlagsReg cr) %{
9493 match(Set dst (EncodePKlass src));
9494 effect(KILL cr);
9495 format %{ "encode_heap_oop_not_null $dst,$src @ encodeKlass_not_null" %}
9496 ins_encode %{
9497 __ encode_klass_not_null($dst$$Register, $src$$Register);
9498 %}
9499 ins_pipe(ialu_reg_long);
9500 %}
9502 instruct decodeKlass_not_null(mRegP dst, mRegN src, FlagsReg cr) %{
9503 match(Set dst (DecodeNKlass src));
9504 effect(KILL cr);
9505 format %{ "decode_heap_klass_not_null $dst,$src" %}
9506 ins_encode %{
9507 Register s = $src$$Register;
9508 Register d = $dst$$Register;
9509 if (s != d) {
9510 __ decode_klass_not_null(d, s);
9511 } else {
9512 __ decode_klass_not_null(d);
9513 }
9514 %}
9515 ins_pipe(ialu_reg_long);
9516 %}
9518 //FIXME
9519 instruct tlsLoadP(mRegP dst) %{
9520 match(Set dst (ThreadLocal));
9522 ins_cost(0);
9523 format %{ " get_thread in $dst #@tlsLoadP" %}
9524 ins_encode %{
9525 Register dst = $dst$$Register;
9526 #ifdef OPT_THREAD
9527 __ move(dst, TREG);
9528 #else
9529 __ get_thread(dst);
9530 #endif
9531 %}
9533 ins_pipe(ialu_none);
9534 %}
9537 instruct checkCastPP( mRegP dst ) %{
9538 match(Set dst (CheckCastPP dst));
9540 format %{ "#checkcastPP of $dst (empty encoding) #@chekCastPP" %}
9541 ins_encode( /*empty encoding*/ );
9542 ins_pipe( empty );
9543 %}
9545 instruct castPP(mRegP dst)
9546 %{
9547 match(Set dst (CastPP dst));
9549 size(0);
9550 format %{ "# castPP of $dst" %}
9551 ins_encode(/* empty encoding */);
9552 ins_pipe(empty);
9553 %}
9555 instruct castII( mRegI dst ) %{
9556 match(Set dst (CastII dst));
9557 format %{ "#castII of $dst empty encoding" %}
9558 ins_encode( /*empty encoding*/ );
9559 ins_cost(0);
9560 ins_pipe( empty );
9561 %}
9563 // Return Instruction
9564 // Remove the return address & jump to it.
9565 instruct Ret() %{
9566 match(Return);
9567 format %{ "RET #@Ret" %}
9569 ins_encode %{
9570 __ jr(RA);
9571 __ nop();
9572 %}
9574 ins_pipe( pipe_jmp );
9575 %}
9578 // Jump Direct - Label defines a relative address from JMP
9579 instruct jmpDir(label labl) %{
9580 match(Goto);
9581 effect(USE labl);
9583 ins_cost(300);
9584 format %{ "JMP $labl #@jmpDir" %}
9586 ins_encode %{
9587 Label &L = *($labl$$label);
9588 if(&L)
9589 __ b(L);
9590 else
9591 __ b(int(0));
9592 __ nop();
9593 %}
9595 ins_pipe( pipe_jmp );
9596 ins_pc_relative(1);
9597 %}
9601 // Tail Jump; remove the return address; jump to target.
9602 // TailCall above leaves the return address around.
9603 // TailJump is used in only one place, the rethrow_Java stub (fancy_jump=2).
9604 // ex_oop (Exception Oop) is needed in %o0 at the jump. As there would be a
9605 // "restore" before this instruction (in Epilogue), we need to materialize it
9606 // in %i0.
9607 //FIXME
9608 instruct tailjmpInd(mRegP jump_target,mRegP ex_oop) %{
9609 match( TailJump jump_target ex_oop );
9610 ins_cost(200);
9611 format %{ "Jmp $jump_target ; ex_oop = $ex_oop #@tailjmpInd" %}
9612 ins_encode %{
9613 Register target = $jump_target$$Register;
9615 /* 2012/9/14 Jin: V0, V1 are indicated in:
9616 * [stubGenerator_mips.cpp] generate_forward_exception()
9617 * [runtime_mips.cpp] OptoRuntime::generate_exception_blob()
9618 */
9619 Register oop = $ex_oop$$Register;
9620 Register exception_oop = V0;
9621 Register exception_pc = V1;
9623 __ move(exception_pc, RA);
9624 __ move(exception_oop, oop);
9626 __ jr(target);
9627 __ nop();
9628 %}
9629 ins_pipe( pipe_jmp );
9630 %}
9632 // ============================================================================
9633 // Procedure Call/Return Instructions
9634 // Call Java Static Instruction
9635 // Note: If this code changes, the corresponding ret_addr_offset() and
9636 // compute_padding() functions will have to be adjusted.
9637 instruct CallStaticJavaDirect(method meth) %{
9638 match(CallStaticJava);
9639 effect(USE meth);
9641 ins_cost(300);
9642 format %{ "CALL,static #@CallStaticJavaDirect " %}
9643 ins_encode( Java_Static_Call( meth ) );
9644 ins_pipe( pipe_slow );
9645 ins_pc_relative(1);
9646 ins_alignment(16);
9647 %}
9649 // Call Java Dynamic Instruction
9650 // Note: If this code changes, the corresponding ret_addr_offset() and
9651 // compute_padding() functions will have to be adjusted.
9652 instruct CallDynamicJavaDirect(method meth) %{
9653 match(CallDynamicJava);
9654 effect(USE meth);
9656 ins_cost(300);
9657 format %{"MOV IC_Klass, (oop)-1 @ CallDynamicJavaDirect\n\t"
9658 "CallDynamic @ CallDynamicJavaDirect" %}
9659 ins_encode( Java_Dynamic_Call( meth ) );
9660 ins_pipe( pipe_slow );
9661 ins_pc_relative(1);
9662 ins_alignment(16);
9663 %}
9665 instruct CallLeafNoFPDirect(method meth) %{
9666 match(CallLeafNoFP);
9667 effect(USE meth);
9669 ins_cost(300);
9670 format %{ "CALL_LEAF_NOFP,runtime " %}
9671 ins_encode(Java_To_Runtime(meth));
9672 ins_pipe( pipe_slow );
9673 ins_pc_relative(1);
9674 ins_alignment(16);
9675 %}
9678 instruct prefetchw0( memory mem ) %{
9679 // predicate(UseSSE==0 && !VM_Version::supports_3dnow());
9680 match(PrefetchWrite mem);
9681 format %{ "Prefetch (sync) #@prefetchw0" %}
9682 ins_encode %{
9683 __ sync();
9684 %}
9685 ins_pipe(pipe_slow);
9686 %}
9689 // Call runtime without safepoint
9690 instruct CallLeafDirect(method meth) %{
9691 match(CallLeaf);
9692 effect(USE meth);
9694 ins_cost(300);
9695 format %{ "CALL_LEAF,runtime #@CallLeafDirect " %}
9696 ins_encode(Java_To_Runtime(meth));
9697 ins_pipe( pipe_slow );
9698 ins_pc_relative(1);
9699 ins_alignment(16);
9700 %}
9702 // Load Char (16bit unsigned)
9703 instruct loadUS(mRegI dst, memory mem) %{
9704 match(Set dst (LoadUS mem));
9706 ins_cost(125);
9707 format %{ "loadUS $dst,$mem @ loadC" %}
9708 // opcode(0xB7, 0x0F);
9709 // ins_encode( OpcS, OpcP, RegMem(dst,mem));
9710 ins_encode(load_C_enc(dst, mem));
9711 ins_pipe( ialu_reg_mem );
9712 %}
9714 // Store Char (16bit unsigned)
9715 instruct storeC(memory mem, mRegI src) %{
9716 match(Set mem (StoreC mem src));
9718 ins_cost(125);
9719 format %{ "storeC $src,$mem @ storeC" %}
9720 ins_encode(store_C_reg_enc(mem, src));
9721 ins_pipe( ialu_reg_mem );
9722 %}
9725 instruct loadConF0(regF dst, immF0 zero) %{
9726 match(Set dst zero);
9727 ins_cost(100);
9729 format %{ "mov $dst, zero @ loadConF0\n"%}
9730 ins_encode %{
9731 FloatRegister dst = $dst$$FloatRegister;
9733 __ mtc1(R0, dst);
9734 %}
9735 ins_pipe( fpu_reg_con );
9736 %}
9739 instruct loadConF(regF dst, immF src) %{
9740 match(Set dst src);
9741 ins_cost(125);
9743 format %{ "mov $dst, $src @ loadConF"%}
9744 ins_encode %{
9745 FloatRegister dst = $dst$$FloatRegister;
9746 jfloat jf = $src$$constant;
9747 address const_addr = __ float_constant(jf);
9748 assert (const_addr != NULL, "must create float constant in the constant table");
9750 __ relocate(relocInfo::internal_pc_type);
9751 __ li(AT, const_addr);
9752 __ lwc1(dst, AT, 0);
9753 %}
9754 ins_pipe( fpu_reg_con );
9755 %}
9758 instruct loadConD0(regD dst, immD0 zero) %{
9759 match(Set dst zero);
9760 ins_cost(100);
9762 format %{ "mov $dst, zero @ loadConD0\n"%}
9763 ins_encode %{
9764 FloatRegister dst = as_FloatRegister($dst$$reg);
9766 __ dmtc1(R0, dst);
9767 %}
9768 ins_pipe( fpu_reg_con );
9769 %}
9771 instruct loadConD(regD dst, immD src) %{
9772 match(Set dst src);
9773 ins_cost(125);
9775 format %{ "mov $dst, $src @ loadConD\n"%}
9776 ins_encode %{
9777 FloatRegister dst_reg = as_FloatRegister($dst$$reg);
9779 jdouble jd = $src$$constant;
9780 address const_addr = __ double_constant(jd);
9781 assert (const_addr != NULL, "must create double constant in the constant table");
9783 __ relocate(relocInfo::internal_pc_type);
9784 __ li(AT, const_addr);
9785 __ ldc1(dst_reg, AT, 0);
9786 %}
9787 ins_pipe( fpu_reg_con );
9788 %}
9790 // Store register Float value (it is faster than store from FPU register)
9791 instruct storeF_reg( memory mem, regF src) %{
9792 match(Set mem (StoreF mem src));
9794 ins_cost(50);
9795 format %{ "store $mem, $src\t# store float @ storeF_reg" %}
9796 ins_encode(store_F_reg_enc(mem, src));
9797 ins_pipe( fpu_mem_reg );
9798 %}
9801 // Store immediate Float value (it is faster than store from FPU register)
9802 // The instruction usage is guarded by predicate in operand immF().
9803 instruct storeF_imm( memory mem, immF src) %{
9804 match(Set mem (StoreF mem src));
9806 ins_cost(50);
9807 format %{ "store $mem, $src\t# store float @ storeF_imm" %}
9808 ins_encode %{
9809 jfloat jf = $src$$constant;
9810 int base = $mem$$base;
9811 int index = $mem$$index;
9812 int scale = $mem$$scale;
9813 int disp = $mem$$disp;
9814 address const_addr = __ float_constant(jf);
9815 assert (const_addr != NULL, "must create float constant in the constant table");
9817 __ relocate(relocInfo::internal_pc_type);
9818 __ li(AT, const_addr);
9819 __ lwc1(F30, AT, 0);
9821 if( scale != 0 ) Unimplemented();
9822 if( index != 0 ) {
9823 if( Assembler::is_simm16(disp) ) {
9824 __ addu(AT, as_Register(base), as_Register(index));
9825 __ swc1(F30, AT, disp);
9826 } else {
9827 __ addu(AT, as_Register(base), as_Register(index));
9828 __ move(T9, disp);
9829 __ addu(AT, AT, T9);
9830 __ swc1(F30, AT, 0);
9831 }
9833 } else {
9834 if( Assembler::is_simm16(disp) ) {
9835 __ swc1(F30, as_Register(base), disp);
9836 } else {
9837 __ move(T9, disp);
9838 __ addu(AT, as_Register(base), T9);
9839 __ swc1(F30, AT, 0);
9840 }
9841 }
9842 %}
9843 ins_pipe( ialu_mem_imm );
9844 %}
9846 instruct storeF_imm0( memory mem, immF0 zero) %{
9847 match(Set mem (StoreF mem zero));
9849 ins_cost(40);
9850 format %{ "store $mem, zero\t# store float @ storeF_imm0" %}
9851 ins_encode %{
9852 int base = $mem$$base;
9853 int index = $mem$$index;
9854 int scale = $mem$$scale;
9855 int disp = $mem$$disp;
9857 if( index != 0 ) {
9858 if(scale != 0) {
9859 __ dsll(T9, as_Register(index), scale);
9860 __ addu(AT, as_Register(base), T9);
9861 } else {
9862 __ daddu(AT, as_Register(base), as_Register(index));
9863 }
9864 if( Assembler::is_simm16(disp) ) {
9865 __ sw(R0, AT, disp);
9866 } else {
9867 __ move(T9, disp);
9868 __ addu(AT, AT, T9);
9869 __ sw(R0, AT, 0);
9870 }
9872 } else {
9873 if( Assembler::is_simm16(disp) ) {
9874 __ sw(R0, as_Register(base), disp);
9875 } else {
9876 __ move(T9, disp);
9877 __ addu(AT, as_Register(base), T9);
9878 __ sw(R0, AT, 0);
9879 }
9880 }
9881 %}
9882 ins_pipe( ialu_mem_imm );
9883 %}
9885 // Load Double
9886 instruct loadD(regD dst, memory mem) %{
9887 match(Set dst (LoadD mem));
9889 ins_cost(150);
9890 format %{ "loadD $dst, $mem #@loadD" %}
9891 ins_encode(load_D_enc(dst, mem));
9892 ins_pipe( fpu_reg_mem );
9893 %}
9895 // Load Double - UNaligned
9896 instruct loadD_unaligned(regD dst, memory mem ) %{
9897 match(Set dst (LoadD_unaligned mem));
9898 ins_cost(250);
9899 // FIXME: Jin: Need more effective ldl/ldr
9900 format %{ "loadD_unaligned $dst, $mem #@loadD_unaligned" %}
9901 ins_encode(load_D_enc(dst, mem));
9902 ins_pipe( fpu_reg_mem );
9903 %}
9905 instruct storeD_reg( memory mem, regD src) %{
9906 match(Set mem (StoreD mem src));
9908 ins_cost(50);
9909 format %{ "store $mem, $src\t# store float @ storeD_reg" %}
9910 ins_encode(store_D_reg_enc(mem, src));
9911 ins_pipe( fpu_mem_reg );
9912 %}
9914 instruct storeD_imm0( memory mem, immD0 zero) %{
9915 match(Set mem (StoreD mem zero));
9917 ins_cost(40);
9918 format %{ "store $mem, zero\t# store float @ storeD_imm0" %}
9919 ins_encode %{
9920 int base = $mem$$base;
9921 int index = $mem$$index;
9922 int scale = $mem$$scale;
9923 int disp = $mem$$disp;
9925 __ mtc1(R0, F30);
9926 __ cvt_d_w(F30, F30);
9928 if( index != 0 ) {
9929 if(scale != 0) {
9930 __ dsll(T9, as_Register(index), scale);
9931 __ addu(AT, as_Register(base), T9);
9932 } else {
9933 __ daddu(AT, as_Register(base), as_Register(index));
9934 }
9935 if( Assembler::is_simm16(disp) ) {
9936 __ sdc1(F30, AT, disp);
9937 } else {
9938 __ move(T9, disp);
9939 __ addu(AT, AT, T9);
9940 __ sdc1(F30, AT, 0);
9941 }
9943 } else {
9944 if( Assembler::is_simm16(disp) ) {
9945 __ sdc1(F30, as_Register(base), disp);
9946 } else {
9947 __ move(T9, disp);
9948 __ addu(AT, as_Register(base), T9);
9949 __ sdc1(F30, AT, 0);
9950 }
9951 }
9952 %}
9953 ins_pipe( ialu_mem_imm );
9954 %}
9956 instruct cmpFastLock( FlagsReg cr, mRegP object, mRegP box, mRegI tmp, mRegP scr) %{
9957 match( Set cr (FastLock object box) );
9958 effect( TEMP tmp, TEMP scr );
9959 ins_cost(300);
9960 format %{ "FASTLOCK $cr $object, $box, $tmp #@ cmpFastLock" %}
9961 ins_encode %{
9962 __ fast_lock($object$$Register, $box$$Register, $tmp$$Register, $scr$$Register);
9963 %}
9965 ins_pipe( pipe_slow );
9966 ins_pc_relative(1);
9967 %}
9969 instruct cmpFastUnlock( FlagsReg cr, mRegP object, mRegP box, mRegP tmp ) %{
9970 match( Set cr (FastUnlock object box) );
9971 effect( TEMP tmp );
9972 ins_cost(300);
9973 format %{ "FASTUNLOCK $object, $box, $tmp #@cmpFastUnlock" %}
9974 ins_encode %{
9975 __ fast_unlock($object$$Register, $box$$Register, $tmp$$Register);
9976 %}
9978 ins_pipe( pipe_slow );
9979 ins_pc_relative(1);
9980 %}
9982 // Store CMS card-mark Immediate
9983 instruct storeImmCM(memory mem, immI8 src) %{
9984 match(Set mem (StoreCM mem src));
9986 ins_cost(150);
9987 format %{ "MOV8 $mem,$src\t! CMS card-mark imm0" %}
9988 // opcode(0xC6);
9989 ins_encode(store_B_immI_enc(mem, src));
9990 ins_pipe( ialu_mem_reg );
9991 %}
9993 // Die now
9994 instruct ShouldNotReachHere( )
9995 %{
9996 match(Halt);
9997 ins_cost(300);
9999 // Use the following format syntax
10000 format %{ "ILLTRAP ;#@ShouldNotReachHere" %}
10001 ins_encode %{
10002 // Here we should emit illtrap !
10004 __ stop("in ShoudNotReachHere");
10006 %}
10007 ins_pipe(pipe_jmp);
10008 %}
10011 // Jump Direct Conditional - Label defines a relative address from Jcc+1
10012 instruct jmpLoopEnd(cmpOp cop, mRegI src1, mRegI src2, label labl) %{
10013 match(CountedLoopEnd cop (CmpI src1 src2));
10014 effect(USE labl);
10016 ins_cost(300);
10017 format %{ "J$cop $src1, $src2, $labl\t# Loop end @ jmpLoopEnd" %}
10018 ins_encode %{
10019 Register op1 = $src1$$Register;
10020 Register op2 = $src2$$Register;
10021 Label &L = *($labl$$label);
10022 int flag = $cop$$cmpcode;
10024 switch(flag)
10025 {
10026 case 0x01: //equal
10027 if (&L)
10028 __ beq(op1, op2, L);
10029 else
10030 __ beq(op1, op2, (int)0);
10031 break;
10032 case 0x02: //not_equal
10033 if (&L)
10034 __ bne(op1, op2, L);
10035 else
10036 __ bne(op1, op2, (int)0);
10037 break;
10038 case 0x03: //above
10039 __ slt(AT, op2, op1);
10040 if(&L)
10041 __ bne(AT, R0, L);
10042 else
10043 __ bne(AT, R0, (int)0);
10044 break;
10045 case 0x04: //above_equal
10046 __ slt(AT, op1, op2);
10047 if(&L)
10048 __ beq(AT, R0, L);
10049 else
10050 __ beq(AT, R0, (int)0);
10051 break;
10052 case 0x05: //below
10053 __ slt(AT, op1, op2);
10054 if(&L)
10055 __ bne(AT, R0, L);
10056 else
10057 __ bne(AT, R0, (int)0);
10058 break;
10059 case 0x06: //below_equal
10060 __ slt(AT, op2, op1);
10061 if(&L)
10062 __ beq(AT, R0, L);
10063 else
10064 __ beq(AT, R0, (int)0);
10065 break;
10066 default:
10067 Unimplemented();
10068 }
10069 __ nop();
10070 %}
10071 ins_pipe( pipe_jcc );
10072 ins_pc_relative(1);
10073 %}
10076 instruct jmpLoopEnd_reg_imm16_sub(cmpOp cop, mRegI src1, immI16_sub src2, label labl) %{
10077 match(CountedLoopEnd cop (CmpI src1 src2));
10078 effect(USE labl);
10080 ins_cost(250);
10081 format %{ "J$cop $src1, $src2, $labl\t# Loop end @ jmpLoopEnd_reg_imm16_sub" %}
10082 ins_encode %{
10083 Register op1 = $src1$$Register;
10084 int op2 = $src2$$constant;
10085 Label &L = *($labl$$label);
10086 int flag = $cop$$cmpcode;
10088 __ addiu32(AT, op1, -1 * op2);
10090 switch(flag)
10091 {
10092 case 0x01: //equal
10093 if (&L)
10094 __ beq(AT, R0, L);
10095 else
10096 __ beq(AT, R0, (int)0);
10097 break;
10098 case 0x02: //not_equal
10099 if (&L)
10100 __ bne(AT, R0, L);
10101 else
10102 __ bne(AT, R0, (int)0);
10103 break;
10104 case 0x03: //above
10105 if(&L)
10106 __ bgtz(AT, L);
10107 else
10108 __ bgtz(AT, (int)0);
10109 break;
10110 case 0x04: //above_equal
10111 if(&L)
10112 __ bgez(AT, L);
10113 else
10114 __ bgez(AT,(int)0);
10115 break;
10116 case 0x05: //below
10117 if(&L)
10118 __ bltz(AT, L);
10119 else
10120 __ bltz(AT, (int)0);
10121 break;
10122 case 0x06: //below_equal
10123 if(&L)
10124 __ blez(AT, L);
10125 else
10126 __ blez(AT, (int)0);
10127 break;
10128 default:
10129 Unimplemented();
10130 }
10131 __ nop();
10132 %}
10133 ins_pipe( pipe_jcc );
10134 ins_pc_relative(1);
10135 %}
10138 /*
10139 // Jump Direct Conditional - Label defines a relative address from Jcc+1
10140 instruct jmpLoopEndU(cmpOpU cop, eFlagsRegU cmp, label labl) %{
10141 match(CountedLoopEnd cop cmp);
10142 effect(USE labl);
10144 ins_cost(300);
10145 format %{ "J$cop,u $labl\t# Loop end" %}
10146 size(6);
10147 opcode(0x0F, 0x80);
10148 ins_encode( Jcc( cop, labl) );
10149 ins_pipe( pipe_jcc );
10150 ins_pc_relative(1);
10151 %}
10153 instruct jmpLoopEndUCF(cmpOpUCF cop, eFlagsRegUCF cmp, label labl) %{
10154 match(CountedLoopEnd cop cmp);
10155 effect(USE labl);
10157 ins_cost(200);
10158 format %{ "J$cop,u $labl\t# Loop end" %}
10159 opcode(0x0F, 0x80);
10160 ins_encode( Jcc( cop, labl) );
10161 ins_pipe( pipe_jcc );
10162 ins_pc_relative(1);
10163 %}
10164 */
10166 // This match pattern is created for StoreIConditional since I cannot match IfNode without a RegFlags! fujie 2012/07/17
10167 instruct jmpCon_flags(cmpOp cop, FlagsReg cr, label labl) %{
10168 match(If cop cr);
10169 effect(USE labl);
10171 ins_cost(300);
10172 format %{ "J$cop $labl #mips uses AT as eflag @jmpCon_flags" %}
10174 ins_encode %{
10175 Label &L = *($labl$$label);
10176 switch($cop$$cmpcode)
10177 {
10178 case 0x01: //equal
10179 if (&L)
10180 __ bne(AT, R0, L);
10181 else
10182 __ bne(AT, R0, (int)0);
10183 break;
10184 case 0x02: //not equal
10185 if (&L)
10186 __ beq(AT, R0, L);
10187 else
10188 __ beq(AT, R0, (int)0);
10189 break;
10190 default:
10191 Unimplemented();
10192 }
10193 __ nop();
10194 %}
10196 ins_pipe( pipe_jcc );
10197 ins_pc_relative(1);
10198 %}
10201 // ============================================================================
10202 // The 2nd slow-half of a subtype check. Scan the subklass's 2ndary superklass
10203 // array for an instance of the superklass. Set a hidden internal cache on a
10204 // hit (cache is checked with exposed code in gen_subtype_check()). Return
10205 // NZ for a miss or zero for a hit. The encoding ALSO sets flags.
10206 instruct partialSubtypeCheck( mRegP result, mRegP sub, mRegP super ) %{
10207 match(Set result (PartialSubtypeCheck sub super));
10208 ins_cost(1100); // slightly larger than the next version
10209 format %{ "partialSubtypeCheck result=$result, sub=$sub, super=$super " %}
10211 ins_encode( enc_PartialSubtypeCheck(result, sub, super) );
10212 ins_pipe( pipe_slow );
10213 %}
10216 // Conditional-store of an int value.
10217 // ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG on Intel.
10218 instruct storeIConditional( memory mem, mRegI oldval, mRegI newval, FlagsReg cr ) %{
10219 match(Set cr (StoreIConditional mem (Binary oldval newval)));
10220 // effect(KILL oldval);
10221 format %{ "CMPXCHG $newval, $mem, $oldval \t# @storeIConditional" %}
10223 ins_encode %{
10224 Register oldval = $oldval$$Register;
10225 Register newval = $newval$$Register;
10226 Address addr(as_Register($mem$$base), $mem$$disp);
10227 Label again, failure;
10229 // int base = $mem$$base;
10230 int index = $mem$$index;
10231 int scale = $mem$$scale;
10232 int disp = $mem$$disp;
10234 guarantee(Assembler::is_simm16(disp), "");
10236 if( scale != 0 ) Unimplemented();
10237 if( index != 0 ) {
10238 __ stop("in storeIConditional: index != 0");
10239 } else {
10240 __ bind(again);
10241 __ sync();
10242 __ ll(AT, addr);
10243 __ bne(AT, oldval, failure);
10244 __ delayed()->addu(AT, R0, R0);
10246 __ addu(AT, newval, R0);
10247 __ sc(AT, addr);
10248 __ beq(AT, R0, again);
10249 __ delayed()->addiu(AT, R0, 0xFF);
10250 __ bind(failure);
10251 __ sync();
10252 }
10253 %}
10255 ins_pipe( pipe_cmpxchg );
10256 %}
10258 // Conditional-store of a long value.
10259 // ZF flag is set on success, reset otherwise. Implemented with a CMPXCHG.
10260 instruct storeLConditional(memory mem, t2RegL oldval, mRegL newval, FlagsReg cr )
10261 %{
10262 match(Set cr (StoreLConditional mem (Binary oldval newval)));
10263 effect(KILL oldval);
10265 format %{ "cmpxchg $mem, $newval\t# If $oldval == $mem then store $newval into $mem" %}
10266 ins_encode%{
10267 Register oldval = $oldval$$Register;
10268 Register newval = $newval$$Register;
10269 Address addr((Register)$mem$$base, $mem$$disp);
10271 int index = $mem$$index;
10272 int scale = $mem$$scale;
10273 int disp = $mem$$disp;
10275 guarantee(Assembler::is_simm16(disp), "");
10277 if( scale != 0 ) Unimplemented();
10278 if( index != 0 ) {
10279 __ stop("in storeIConditional: index != 0");
10280 } else {
10281 __ cmpxchg(newval, addr, oldval);
10282 }
10283 %}
10284 ins_pipe(pipe_cmpxchg);
10285 %}
10288 instruct compareAndSwapI( mRegI res, mRegP mem_ptr, mS2RegI oldval, mRegI newval) %{
10289 match(Set res (CompareAndSwapI mem_ptr (Binary oldval newval)));
10290 effect(KILL oldval);
10291 // match(CompareAndSwapI mem_ptr (Binary oldval newval));
10292 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapI\n\t"
10293 "MOV $res, 1 @ compareAndSwapI\n\t"
10294 "BNE AT, R0 @ compareAndSwapI\n\t"
10295 "MOV $res, 0 @ compareAndSwapI\n"
10296 "L:" %}
10297 ins_encode %{
10298 Register newval = $newval$$Register;
10299 Register oldval = $oldval$$Register;
10300 Register res = $res$$Register;
10301 Address addr($mem_ptr$$Register, 0);
10302 Label L;
10304 __ cmpxchg32(newval, addr, oldval);
10305 __ move(res, AT);
10306 %}
10307 ins_pipe( pipe_cmpxchg );
10308 %}
10310 //FIXME:
10311 instruct compareAndSwapP( mRegI res, mRegP mem_ptr, s2_RegP oldval, mRegP newval) %{
10312 match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval)));
10313 effect(KILL oldval);
10314 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapP\n\t"
10315 "MOV $res, AT @ compareAndSwapP\n\t"
10316 "L:" %}
10317 ins_encode %{
10318 Register newval = $newval$$Register;
10319 Register oldval = $oldval$$Register;
10320 Register res = $res$$Register;
10321 Address addr($mem_ptr$$Register, 0);
10322 Label L;
10324 __ cmpxchg(newval, addr, oldval);
10325 __ move(res, AT);
10326 %}
10327 ins_pipe( pipe_cmpxchg );
10328 %}
10330 instruct compareAndSwapN( mRegI res, mRegP mem_ptr, t2_RegN oldval, mRegN newval) %{
10331 match(Set res (CompareAndSwapN mem_ptr (Binary oldval newval)));
10332 effect(KILL oldval);
10333 format %{ "CMPXCHG $newval, [$mem_ptr], $oldval @ compareAndSwapN\n\t"
10334 "MOV $res, AT @ compareAndSwapN\n\t"
10335 "L:" %}
10336 ins_encode %{
10337 Register newval = $newval$$Register;
10338 Register oldval = $oldval$$Register;
10339 Register res = $res$$Register;
10340 Address addr($mem_ptr$$Register, 0);
10341 Label L;
10343 /* 2013/7/19 Jin: cmpxchg32 is implemented with ll/sc, which will do sign extension.
10344 * Thus, we should extend oldval's sign for correct comparision.
10345 */
10346 __ sll(oldval, oldval, 0);
10348 __ cmpxchg32(newval, addr, oldval);
10349 __ move(res, AT);
10350 %}
10351 ins_pipe( pipe_cmpxchg );
10352 %}
10354 //----------Max and Min--------------------------------------------------------
10355 // Min Instructions
10356 ////
10357 // *** Min and Max using the conditional move are slower than the
10358 // *** branch version on a Pentium III.
10359 // // Conditional move for min
10360 //instruct cmovI_reg_lt( eRegI op2, eRegI op1, eFlagsReg cr ) %{
10361 // effect( USE_DEF op2, USE op1, USE cr );
10362 // format %{ "CMOVlt $op2,$op1\t! min" %}
10363 // opcode(0x4C,0x0F);
10364 // ins_encode( OpcS, OpcP, RegReg( op2, op1 ) );
10365 // ins_pipe( pipe_cmov_reg );
10366 //%}
10367 //
10368 //// Min Register with Register (P6 version)
10369 //instruct minI_eReg_p6( eRegI op1, eRegI op2 ) %{
10370 // predicate(VM_Version::supports_cmov() );
10371 // match(Set op2 (MinI op1 op2));
10372 // ins_cost(200);
10373 // expand %{
10374 // eFlagsReg cr;
10375 // compI_eReg(cr,op1,op2);
10376 // cmovI_reg_lt(op2,op1,cr);
10377 // %}
10378 //%}
10380 // Min Register with Register (generic version)
10381 instruct minI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10382 match(Set dst (MinI src1 src2));
10383 //effect(KILL flags);
10384 ins_cost(300);
10386 format %{ "MIN $dst, ($src1,$src2) @minI_Reg_Reg" %}
10387 ins_encode %{
10388 Register dst = $dst$$Register;
10389 Register src1 = $src1$$Register;
10390 Register src2 = $src2$$Register;
10391 Label L;
10393 if(dst == src1) {
10394 __ slt(AT, src2, dst);
10395 __ beq(AT, R0, L);
10396 __ nop();
10397 __ addu(dst, src2, R0);
10398 __ bind(L);
10399 } else if(dst == src2) {
10400 __ slt(AT, src1, dst);
10401 __ beq(AT, R0, L);
10402 __ nop();
10403 __ addu(dst, src1, R0);
10404 __ bind(L);
10405 } else {
10406 __ slt(AT, src1, src2);
10407 __ bne(AT, R0, L);
10408 __ delayed()->addu(T9, src1, R0);
10409 __ addu(T9, src2, R0);
10410 __ bind(L);
10411 __ addu(dst, T9, R0);
10412 }
10413 %}
10415 ins_pipe( pipe_slow );
10416 %}
10418 // Max Register with Register
10419 // *** Min and Max using the conditional move are slower than the
10420 // *** branch version on a Pentium III.
10421 // // Conditional move for max
10422 //instruct cmovI_reg_gt( eRegI op2, eRegI op1, eFlagsReg cr ) %{
10423 // effect( USE_DEF op2, USE op1, USE cr );
10424 // format %{ "CMOVgt $op2,$op1\t! max" %}
10425 // opcode(0x4F,0x0F);
10426 // ins_encode( OpcS, OpcP, RegReg( op2, op1 ) );
10427 // ins_pipe( pipe_cmov_reg );
10428 //%}
10429 //
10430 // // Max Register with Register (P6 version)
10431 //instruct maxI_eReg_p6( eRegI op1, eRegI op2 ) %{
10432 // predicate(VM_Version::supports_cmov() );
10433 // match(Set op2 (MaxI op1 op2));
10434 // ins_cost(200);
10435 // expand %{
10436 // eFlagsReg cr;
10437 // compI_eReg(cr,op1,op2);
10438 // cmovI_reg_gt(op2,op1,cr);
10439 // %}
10440 //%}
10442 // Max Register with Register (generic version)
10443 instruct maxI_Reg_Reg(mRegI dst, mRegI src1, mRegI src2) %{
10444 match(Set dst (MaxI src1 src2));
10445 ins_cost(300);
10447 format %{ "MAX $dst, ($src1,$src2) @maxI_Reg_Reg" %}
10449 ins_encode %{
10450 Register dst = $dst$$Register;
10451 Register src1 = $src1$$Register;
10452 Register src2 = $src2$$Register;
10453 Label L;
10455 if(dst == src1) {
10456 __ slt(AT, dst, src2);
10457 __ beq(AT, R0, L);
10458 __ nop();
10459 __ addu(dst, src2, R0);
10460 __ bind(L);
10461 } else if (dst == src2) {
10462 __ slt(AT, dst, src1);
10463 __ beq(AT, R0, L);
10464 __ nop();
10465 __ addu(dst, src1, R0);
10466 __ bind(L);
10467 } else {
10468 __ slt(AT, src1, src2);
10469 __ beq(AT, R0, L);
10470 __ delayed()->addu(T9, src1, R0);
10471 __ addu(T9, src2, R0);
10472 __ bind(L);
10473 __ addu(dst, T9, R0);
10474 }
10475 %}
10477 ins_pipe( pipe_slow );
10478 %}
10481 // ============================================================================
10482 // Safepoint Instruction
10483 instruct safePoint_poll() %{
10484 match(SafePoint);
10486 ins_cost(125);
10487 format %{ "lui at, HI(polling_page)]\t! Safepoint: poll for GC @ safePoint_poll \n\t"
10488 "lw at, LO(polling_page)]\n " %}
10490 ins_encode %{
10491 __ block_comment("Safepoint:");
10492 #ifndef OPT_SAFEPOINT
10493 __ li48(S7, (long)os::get_polling_page());
10494 __ relocate(relocInfo::poll_type);
10495 __ lw(AT, S7, 0);
10496 #else
10497 __ lui(S7, Assembler::split_high((intptr_t)os::get_polling_page()));
10498 __ relocate(relocInfo::poll_type);
10499 __ lw(AT, S7, Assembler::split_low((intptr_t)os::get_polling_page()));
10500 #endif
10501 %}
10503 ins_pipe( ialu_mem_imm );
10504 %}
10506 //----------PEEPHOLE RULES-----------------------------------------------------
10507 // These must follow all instruction definitions as they use the names
10508 // defined in the instructions definitions.
10509 //
10510 // peepmatch ( root_instr_name [preceeding_instruction]* );
10511 //
10512 // peepconstraint %{
10513 // (instruction_number.operand_name relational_op instruction_number.operand_name
10514 // [, ...] );
10515 // // instruction numbers are zero-based using left to right order in peepmatch
10516 //
10517 // peepreplace ( instr_name ( [instruction_number.operand_name]* ) );
10518 // // provide an instruction_number.operand_name for each operand that appears
10519 // // in the replacement instruction's match rule
10520 //
10521 // ---------VM FLAGS---------------------------------------------------------
10522 //
10523 // All peephole optimizations can be turned off using -XX:-OptoPeephole
10524 //
10525 // Each peephole rule is given an identifying number starting with zero and
10526 // increasing by one in the order seen by the parser. An individual peephole
10527 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
10528 // on the command-line.
10529 //
10530 // ---------CURRENT LIMITATIONS----------------------------------------------
10531 //
10532 // Only match adjacent instructions in same basic block
10533 // Only equality constraints
10534 // Only constraints between operands, not (0.dest_reg == EAX_enc)
10535 // Only one replacement instruction
10536 //
10537 // ---------EXAMPLE----------------------------------------------------------
10538 //
10539 // // pertinent parts of existing instructions in architecture description
10540 // instruct movI(eRegI dst, eRegI src) %{
10541 // match(Set dst (CopyI src));
10542 // %}
10543 //
10544 // instruct incI_eReg(eRegI dst, immI1 src, eFlagsReg cr) %{
10545 // match(Set dst (AddI dst src));
10546 // effect(KILL cr);
10547 // %}
10548 //
10549 // // Change (inc mov) to lea
10550 // peephole %{
10551 // // increment preceeded by register-register move
10552 // peepmatch ( incI_eReg movI );
10553 // // require that the destination register of the increment
10554 // // match the destination register of the move
10555 // peepconstraint ( 0.dst == 1.dst );
10556 // // construct a replacement instruction that sets
10557 // // the destination to ( move's source register + one )
10558 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
10559 // %}
10560 //
10561 // Implementation no longer uses movX instructions since
10562 // machine-independent system no longer uses CopyX nodes.
10563 //
10564 // peephole %{
10565 // peepmatch ( incI_eReg movI );
10566 // peepconstraint ( 0.dst == 1.dst );
10567 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
10568 // %}
10569 //
10570 // peephole %{
10571 // peepmatch ( decI_eReg movI );
10572 // peepconstraint ( 0.dst == 1.dst );
10573 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
10574 // %}
10575 //
10576 // peephole %{
10577 // peepmatch ( addI_eReg_imm movI );
10578 // peepconstraint ( 0.dst == 1.dst );
10579 // peepreplace ( leaI_eReg_immI( 0.dst 1.src 0.src ) );
10580 // %}
10581 //
10582 // peephole %{
10583 // peepmatch ( addP_eReg_imm movP );
10584 // peepconstraint ( 0.dst == 1.dst );
10585 // peepreplace ( leaP_eReg_immI( 0.dst 1.src 0.src ) );
10586 // %}
10588 // // Change load of spilled value to only a spill
10589 // instruct storeI(memory mem, eRegI src) %{
10590 // match(Set mem (StoreI mem src));
10591 // %}
10592 //
10593 // instruct loadI(eRegI dst, memory mem) %{
10594 // match(Set dst (LoadI mem));
10595 // %}
10596 //
10597 //peephole %{
10598 // peepmatch ( loadI storeI );
10599 // peepconstraint ( 1.src == 0.dst, 1.mem == 0.mem );
10600 // peepreplace ( storeI( 1.mem 1.mem 1.src ) );
10601 //%}
10603 //----------SMARTSPILL RULES---------------------------------------------------
10604 // These must follow all instruction definitions as they use the names
10605 // defined in the instructions definitions.