Tue, 09 Oct 2012 10:11:38 +0200
7054512: Compress class pointers after perm gen removal
Summary: support of compress class pointers in the compilers.
Reviewed-by: kvn, twisti
1 //
2 // Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
3 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 //
5 // This code is free software; you can redistribute it and/or modify it
6 // under the terms of the GNU General Public License version 2 only, as
7 // published by the Free Software Foundation.
8 //
9 // This code is distributed in the hope that it will be useful, but WITHOUT
10 // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 // version 2 for more details (a copy is included in the LICENSE file that
13 // accompanied this code).
14 //
15 // You should have received a copy of the GNU General Public License version
16 // 2 along with this work; if not, write to the Free Software Foundation,
17 // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 //
19 // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 // or visit www.oracle.com if you need additional information or have any
21 // questions.
22 //
23 //
25 // SPARC Architecture Description File
27 //----------REGISTER DEFINITION BLOCK------------------------------------------
28 // This information is used by the matcher and the register allocator to
29 // describe individual registers and classes of registers within the target
30 // archtecture.
31 register %{
32 //----------Architecture Description Register Definitions----------------------
33 // General Registers
34 // "reg_def" name ( register save type, C convention save type,
35 // ideal register type, encoding, vm name );
36 // Register Save Types:
37 //
38 // NS = No-Save: The register allocator assumes that these registers
39 // can be used without saving upon entry to the method, &
40 // that they do not need to be saved at call sites.
41 //
42 // SOC = Save-On-Call: The register allocator assumes that these registers
43 // can be used without saving upon entry to the method,
44 // but that they must be saved at call sites.
45 //
46 // SOE = Save-On-Entry: The register allocator assumes that these registers
47 // must be saved before using them upon entry to the
48 // method, but they do not need to be saved at call
49 // sites.
50 //
51 // AS = Always-Save: The register allocator assumes that these registers
52 // must be saved before using them upon entry to the
53 // method, & that they must be saved at call sites.
54 //
55 // Ideal Register Type is used to determine how to save & restore a
56 // register. Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get
57 // spilled with LoadP/StoreP. If the register supports both, use Op_RegI.
58 //
59 // The encoding number is the actual bit-pattern placed into the opcodes.
62 // ----------------------------
63 // Integer/Long Registers
64 // ----------------------------
66 // Need to expose the hi/lo aspect of 64-bit registers
67 // This register set is used for both the 64-bit build and
68 // the 32-bit build with 1-register longs.
70 // Global Registers 0-7
71 reg_def R_G0H( NS, NS, Op_RegI,128, G0->as_VMReg()->next());
72 reg_def R_G0 ( NS, NS, Op_RegI, 0, G0->as_VMReg());
73 reg_def R_G1H(SOC, SOC, Op_RegI,129, G1->as_VMReg()->next());
74 reg_def R_G1 (SOC, SOC, Op_RegI, 1, G1->as_VMReg());
75 reg_def R_G2H( NS, NS, Op_RegI,130, G2->as_VMReg()->next());
76 reg_def R_G2 ( NS, NS, Op_RegI, 2, G2->as_VMReg());
77 reg_def R_G3H(SOC, SOC, Op_RegI,131, G3->as_VMReg()->next());
78 reg_def R_G3 (SOC, SOC, Op_RegI, 3, G3->as_VMReg());
79 reg_def R_G4H(SOC, SOC, Op_RegI,132, G4->as_VMReg()->next());
80 reg_def R_G4 (SOC, SOC, Op_RegI, 4, G4->as_VMReg());
81 reg_def R_G5H(SOC, SOC, Op_RegI,133, G5->as_VMReg()->next());
82 reg_def R_G5 (SOC, SOC, Op_RegI, 5, G5->as_VMReg());
83 reg_def R_G6H( NS, NS, Op_RegI,134, G6->as_VMReg()->next());
84 reg_def R_G6 ( NS, NS, Op_RegI, 6, G6->as_VMReg());
85 reg_def R_G7H( NS, NS, Op_RegI,135, G7->as_VMReg()->next());
86 reg_def R_G7 ( NS, NS, Op_RegI, 7, G7->as_VMReg());
88 // Output Registers 0-7
89 reg_def R_O0H(SOC, SOC, Op_RegI,136, O0->as_VMReg()->next());
90 reg_def R_O0 (SOC, SOC, Op_RegI, 8, O0->as_VMReg());
91 reg_def R_O1H(SOC, SOC, Op_RegI,137, O1->as_VMReg()->next());
92 reg_def R_O1 (SOC, SOC, Op_RegI, 9, O1->as_VMReg());
93 reg_def R_O2H(SOC, SOC, Op_RegI,138, O2->as_VMReg()->next());
94 reg_def R_O2 (SOC, SOC, Op_RegI, 10, O2->as_VMReg());
95 reg_def R_O3H(SOC, SOC, Op_RegI,139, O3->as_VMReg()->next());
96 reg_def R_O3 (SOC, SOC, Op_RegI, 11, O3->as_VMReg());
97 reg_def R_O4H(SOC, SOC, Op_RegI,140, O4->as_VMReg()->next());
98 reg_def R_O4 (SOC, SOC, Op_RegI, 12, O4->as_VMReg());
99 reg_def R_O5H(SOC, SOC, Op_RegI,141, O5->as_VMReg()->next());
100 reg_def R_O5 (SOC, SOC, Op_RegI, 13, O5->as_VMReg());
101 reg_def R_SPH( NS, NS, Op_RegI,142, SP->as_VMReg()->next());
102 reg_def R_SP ( NS, NS, Op_RegI, 14, SP->as_VMReg());
103 reg_def R_O7H(SOC, SOC, Op_RegI,143, O7->as_VMReg()->next());
104 reg_def R_O7 (SOC, SOC, Op_RegI, 15, O7->as_VMReg());
106 // Local Registers 0-7
107 reg_def R_L0H( NS, NS, Op_RegI,144, L0->as_VMReg()->next());
108 reg_def R_L0 ( NS, NS, Op_RegI, 16, L0->as_VMReg());
109 reg_def R_L1H( NS, NS, Op_RegI,145, L1->as_VMReg()->next());
110 reg_def R_L1 ( NS, NS, Op_RegI, 17, L1->as_VMReg());
111 reg_def R_L2H( NS, NS, Op_RegI,146, L2->as_VMReg()->next());
112 reg_def R_L2 ( NS, NS, Op_RegI, 18, L2->as_VMReg());
113 reg_def R_L3H( NS, NS, Op_RegI,147, L3->as_VMReg()->next());
114 reg_def R_L3 ( NS, NS, Op_RegI, 19, L3->as_VMReg());
115 reg_def R_L4H( NS, NS, Op_RegI,148, L4->as_VMReg()->next());
116 reg_def R_L4 ( NS, NS, Op_RegI, 20, L4->as_VMReg());
117 reg_def R_L5H( NS, NS, Op_RegI,149, L5->as_VMReg()->next());
118 reg_def R_L5 ( NS, NS, Op_RegI, 21, L5->as_VMReg());
119 reg_def R_L6H( NS, NS, Op_RegI,150, L6->as_VMReg()->next());
120 reg_def R_L6 ( NS, NS, Op_RegI, 22, L6->as_VMReg());
121 reg_def R_L7H( NS, NS, Op_RegI,151, L7->as_VMReg()->next());
122 reg_def R_L7 ( NS, NS, Op_RegI, 23, L7->as_VMReg());
124 // Input Registers 0-7
125 reg_def R_I0H( NS, NS, Op_RegI,152, I0->as_VMReg()->next());
126 reg_def R_I0 ( NS, NS, Op_RegI, 24, I0->as_VMReg());
127 reg_def R_I1H( NS, NS, Op_RegI,153, I1->as_VMReg()->next());
128 reg_def R_I1 ( NS, NS, Op_RegI, 25, I1->as_VMReg());
129 reg_def R_I2H( NS, NS, Op_RegI,154, I2->as_VMReg()->next());
130 reg_def R_I2 ( NS, NS, Op_RegI, 26, I2->as_VMReg());
131 reg_def R_I3H( NS, NS, Op_RegI,155, I3->as_VMReg()->next());
132 reg_def R_I3 ( NS, NS, Op_RegI, 27, I3->as_VMReg());
133 reg_def R_I4H( NS, NS, Op_RegI,156, I4->as_VMReg()->next());
134 reg_def R_I4 ( NS, NS, Op_RegI, 28, I4->as_VMReg());
135 reg_def R_I5H( NS, NS, Op_RegI,157, I5->as_VMReg()->next());
136 reg_def R_I5 ( NS, NS, Op_RegI, 29, I5->as_VMReg());
137 reg_def R_FPH( NS, NS, Op_RegI,158, FP->as_VMReg()->next());
138 reg_def R_FP ( NS, NS, Op_RegI, 30, FP->as_VMReg());
139 reg_def R_I7H( NS, NS, Op_RegI,159, I7->as_VMReg()->next());
140 reg_def R_I7 ( NS, NS, Op_RegI, 31, I7->as_VMReg());
142 // ----------------------------
143 // Float/Double Registers
144 // ----------------------------
146 // Float Registers
147 reg_def R_F0 ( SOC, SOC, Op_RegF, 0, F0->as_VMReg());
148 reg_def R_F1 ( SOC, SOC, Op_RegF, 1, F1->as_VMReg());
149 reg_def R_F2 ( SOC, SOC, Op_RegF, 2, F2->as_VMReg());
150 reg_def R_F3 ( SOC, SOC, Op_RegF, 3, F3->as_VMReg());
151 reg_def R_F4 ( SOC, SOC, Op_RegF, 4, F4->as_VMReg());
152 reg_def R_F5 ( SOC, SOC, Op_RegF, 5, F5->as_VMReg());
153 reg_def R_F6 ( SOC, SOC, Op_RegF, 6, F6->as_VMReg());
154 reg_def R_F7 ( SOC, SOC, Op_RegF, 7, F7->as_VMReg());
155 reg_def R_F8 ( SOC, SOC, Op_RegF, 8, F8->as_VMReg());
156 reg_def R_F9 ( SOC, SOC, Op_RegF, 9, F9->as_VMReg());
157 reg_def R_F10( SOC, SOC, Op_RegF, 10, F10->as_VMReg());
158 reg_def R_F11( SOC, SOC, Op_RegF, 11, F11->as_VMReg());
159 reg_def R_F12( SOC, SOC, Op_RegF, 12, F12->as_VMReg());
160 reg_def R_F13( SOC, SOC, Op_RegF, 13, F13->as_VMReg());
161 reg_def R_F14( SOC, SOC, Op_RegF, 14, F14->as_VMReg());
162 reg_def R_F15( SOC, SOC, Op_RegF, 15, F15->as_VMReg());
163 reg_def R_F16( SOC, SOC, Op_RegF, 16, F16->as_VMReg());
164 reg_def R_F17( SOC, SOC, Op_RegF, 17, F17->as_VMReg());
165 reg_def R_F18( SOC, SOC, Op_RegF, 18, F18->as_VMReg());
166 reg_def R_F19( SOC, SOC, Op_RegF, 19, F19->as_VMReg());
167 reg_def R_F20( SOC, SOC, Op_RegF, 20, F20->as_VMReg());
168 reg_def R_F21( SOC, SOC, Op_RegF, 21, F21->as_VMReg());
169 reg_def R_F22( SOC, SOC, Op_RegF, 22, F22->as_VMReg());
170 reg_def R_F23( SOC, SOC, Op_RegF, 23, F23->as_VMReg());
171 reg_def R_F24( SOC, SOC, Op_RegF, 24, F24->as_VMReg());
172 reg_def R_F25( SOC, SOC, Op_RegF, 25, F25->as_VMReg());
173 reg_def R_F26( SOC, SOC, Op_RegF, 26, F26->as_VMReg());
174 reg_def R_F27( SOC, SOC, Op_RegF, 27, F27->as_VMReg());
175 reg_def R_F28( SOC, SOC, Op_RegF, 28, F28->as_VMReg());
176 reg_def R_F29( SOC, SOC, Op_RegF, 29, F29->as_VMReg());
177 reg_def R_F30( SOC, SOC, Op_RegF, 30, F30->as_VMReg());
178 reg_def R_F31( SOC, SOC, Op_RegF, 31, F31->as_VMReg());
180 // Double Registers
181 // The rules of ADL require that double registers be defined in pairs.
182 // Each pair must be two 32-bit values, but not necessarily a pair of
183 // single float registers. In each pair, ADLC-assigned register numbers
184 // must be adjacent, with the lower number even. Finally, when the
185 // CPU stores such a register pair to memory, the word associated with
186 // the lower ADLC-assigned number must be stored to the lower address.
188 // These definitions specify the actual bit encodings of the sparc
189 // double fp register numbers. FloatRegisterImpl in register_sparc.hpp
190 // wants 0-63, so we have to convert every time we want to use fp regs
191 // with the macroassembler, using reg_to_DoubleFloatRegister_object().
192 // 255 is a flag meaning "don't go here".
193 // I believe we can't handle callee-save doubles D32 and up until
194 // the place in the sparc stack crawler that asserts on the 255 is
195 // fixed up.
196 reg_def R_D32 (SOC, SOC, Op_RegD, 1, F32->as_VMReg());
197 reg_def R_D32x(SOC, SOC, Op_RegD,255, F32->as_VMReg()->next());
198 reg_def R_D34 (SOC, SOC, Op_RegD, 3, F34->as_VMReg());
199 reg_def R_D34x(SOC, SOC, Op_RegD,255, F34->as_VMReg()->next());
200 reg_def R_D36 (SOC, SOC, Op_RegD, 5, F36->as_VMReg());
201 reg_def R_D36x(SOC, SOC, Op_RegD,255, F36->as_VMReg()->next());
202 reg_def R_D38 (SOC, SOC, Op_RegD, 7, F38->as_VMReg());
203 reg_def R_D38x(SOC, SOC, Op_RegD,255, F38->as_VMReg()->next());
204 reg_def R_D40 (SOC, SOC, Op_RegD, 9, F40->as_VMReg());
205 reg_def R_D40x(SOC, SOC, Op_RegD,255, F40->as_VMReg()->next());
206 reg_def R_D42 (SOC, SOC, Op_RegD, 11, F42->as_VMReg());
207 reg_def R_D42x(SOC, SOC, Op_RegD,255, F42->as_VMReg()->next());
208 reg_def R_D44 (SOC, SOC, Op_RegD, 13, F44->as_VMReg());
209 reg_def R_D44x(SOC, SOC, Op_RegD,255, F44->as_VMReg()->next());
210 reg_def R_D46 (SOC, SOC, Op_RegD, 15, F46->as_VMReg());
211 reg_def R_D46x(SOC, SOC, Op_RegD,255, F46->as_VMReg()->next());
212 reg_def R_D48 (SOC, SOC, Op_RegD, 17, F48->as_VMReg());
213 reg_def R_D48x(SOC, SOC, Op_RegD,255, F48->as_VMReg()->next());
214 reg_def R_D50 (SOC, SOC, Op_RegD, 19, F50->as_VMReg());
215 reg_def R_D50x(SOC, SOC, Op_RegD,255, F50->as_VMReg()->next());
216 reg_def R_D52 (SOC, SOC, Op_RegD, 21, F52->as_VMReg());
217 reg_def R_D52x(SOC, SOC, Op_RegD,255, F52->as_VMReg()->next());
218 reg_def R_D54 (SOC, SOC, Op_RegD, 23, F54->as_VMReg());
219 reg_def R_D54x(SOC, SOC, Op_RegD,255, F54->as_VMReg()->next());
220 reg_def R_D56 (SOC, SOC, Op_RegD, 25, F56->as_VMReg());
221 reg_def R_D56x(SOC, SOC, Op_RegD,255, F56->as_VMReg()->next());
222 reg_def R_D58 (SOC, SOC, Op_RegD, 27, F58->as_VMReg());
223 reg_def R_D58x(SOC, SOC, Op_RegD,255, F58->as_VMReg()->next());
224 reg_def R_D60 (SOC, SOC, Op_RegD, 29, F60->as_VMReg());
225 reg_def R_D60x(SOC, SOC, Op_RegD,255, F60->as_VMReg()->next());
226 reg_def R_D62 (SOC, SOC, Op_RegD, 31, F62->as_VMReg());
227 reg_def R_D62x(SOC, SOC, Op_RegD,255, F62->as_VMReg()->next());
230 // ----------------------------
231 // Special Registers
232 // Condition Codes Flag Registers
233 // I tried to break out ICC and XCC but it's not very pretty.
234 // Every Sparc instruction which defs/kills one also kills the other.
235 // Hence every compare instruction which defs one kind of flags ends
236 // up needing a kill of the other.
237 reg_def CCR (SOC, SOC, Op_RegFlags, 0, VMRegImpl::Bad());
239 reg_def FCC0(SOC, SOC, Op_RegFlags, 0, VMRegImpl::Bad());
240 reg_def FCC1(SOC, SOC, Op_RegFlags, 1, VMRegImpl::Bad());
241 reg_def FCC2(SOC, SOC, Op_RegFlags, 2, VMRegImpl::Bad());
242 reg_def FCC3(SOC, SOC, Op_RegFlags, 3, VMRegImpl::Bad());
244 // ----------------------------
245 // Specify the enum values for the registers. These enums are only used by the
246 // OptoReg "class". We can convert these enum values at will to VMReg when needed
247 // for visibility to the rest of the vm. The order of this enum influences the
248 // register allocator so having the freedom to set this order and not be stuck
249 // with the order that is natural for the rest of the vm is worth it.
250 alloc_class chunk0(
251 R_L0,R_L0H, R_L1,R_L1H, R_L2,R_L2H, R_L3,R_L3H, R_L4,R_L4H, R_L5,R_L5H, R_L6,R_L6H, R_L7,R_L7H,
252 R_G0,R_G0H, R_G1,R_G1H, R_G2,R_G2H, R_G3,R_G3H, R_G4,R_G4H, R_G5,R_G5H, R_G6,R_G6H, R_G7,R_G7H,
253 R_O7,R_O7H, R_SP,R_SPH, R_O0,R_O0H, R_O1,R_O1H, R_O2,R_O2H, R_O3,R_O3H, R_O4,R_O4H, R_O5,R_O5H,
254 R_I0,R_I0H, R_I1,R_I1H, R_I2,R_I2H, R_I3,R_I3H, R_I4,R_I4H, R_I5,R_I5H, R_FP,R_FPH, R_I7,R_I7H);
256 // Note that a register is not allocatable unless it is also mentioned
257 // in a widely-used reg_class below. Thus, R_G7 and R_G0 are outside i_reg.
259 alloc_class chunk1(
260 // The first registers listed here are those most likely to be used
261 // as temporaries. We move F0..F7 away from the front of the list,
262 // to reduce the likelihood of interferences with parameters and
263 // return values. Likewise, we avoid using F0/F1 for parameters,
264 // since they are used for return values.
265 // This FPU fine-tuning is worth about 1% on the SPEC geomean.
266 R_F8 ,R_F9 ,R_F10,R_F11,R_F12,R_F13,R_F14,R_F15,
267 R_F16,R_F17,R_F18,R_F19,R_F20,R_F21,R_F22,R_F23,
268 R_F24,R_F25,R_F26,R_F27,R_F28,R_F29,R_F30,R_F31,
269 R_F0 ,R_F1 ,R_F2 ,R_F3 ,R_F4 ,R_F5 ,R_F6 ,R_F7 , // used for arguments and return values
270 R_D32,R_D32x,R_D34,R_D34x,R_D36,R_D36x,R_D38,R_D38x,
271 R_D40,R_D40x,R_D42,R_D42x,R_D44,R_D44x,R_D46,R_D46x,
272 R_D48,R_D48x,R_D50,R_D50x,R_D52,R_D52x,R_D54,R_D54x,
273 R_D56,R_D56x,R_D58,R_D58x,R_D60,R_D60x,R_D62,R_D62x);
275 alloc_class chunk2(CCR, FCC0, FCC1, FCC2, FCC3);
277 //----------Architecture Description Register Classes--------------------------
278 // Several register classes are automatically defined based upon information in
279 // this architecture description.
280 // 1) reg_class inline_cache_reg ( as defined in frame section )
281 // 2) reg_class interpreter_method_oop_reg ( as defined in frame section )
282 // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ )
283 //
285 // G0 is not included in integer class since it has special meaning.
286 reg_class g0_reg(R_G0);
288 // ----------------------------
289 // Integer Register Classes
290 // ----------------------------
291 // Exclusions from i_reg:
292 // R_G0: hardwired zero
293 // R_G2: reserved by HotSpot to the TLS register (invariant within Java)
294 // R_G6: reserved by Solaris ABI to tools
295 // R_G7: reserved by Solaris ABI to libthread
296 // R_O7: Used as a temp in many encodings
297 reg_class int_reg(R_G1,R_G3,R_G4,R_G5,R_O0,R_O1,R_O2,R_O3,R_O4,R_O5,R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7,R_I0,R_I1,R_I2,R_I3,R_I4,R_I5);
299 // Class for all integer registers, except the G registers. This is used for
300 // encodings which use G registers as temps. The regular inputs to such
301 // instructions use a "notemp_" prefix, as a hack to ensure that the allocator
302 // will not put an input into a temp register.
303 reg_class notemp_int_reg(R_O0,R_O1,R_O2,R_O3,R_O4,R_O5,R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7,R_I0,R_I1,R_I2,R_I3,R_I4,R_I5);
305 reg_class g1_regI(R_G1);
306 reg_class g3_regI(R_G3);
307 reg_class g4_regI(R_G4);
308 reg_class o0_regI(R_O0);
309 reg_class o7_regI(R_O7);
311 // ----------------------------
312 // Pointer Register Classes
313 // ----------------------------
314 #ifdef _LP64
315 // 64-bit build means 64-bit pointers means hi/lo pairs
316 reg_class ptr_reg( R_G1H,R_G1, R_G3H,R_G3, R_G4H,R_G4, R_G5H,R_G5,
317 R_O0H,R_O0, R_O1H,R_O1, R_O2H,R_O2, R_O3H,R_O3, R_O4H,R_O4, R_O5H,R_O5,
318 R_L0H,R_L0, R_L1H,R_L1, R_L2H,R_L2, R_L3H,R_L3, R_L4H,R_L4, R_L5H,R_L5, R_L6H,R_L6, R_L7H,R_L7,
319 R_I0H,R_I0, R_I1H,R_I1, R_I2H,R_I2, R_I3H,R_I3, R_I4H,R_I4, R_I5H,R_I5 );
320 // Lock encodings use G3 and G4 internally
321 reg_class lock_ptr_reg( R_G1H,R_G1, R_G5H,R_G5,
322 R_O0H,R_O0, R_O1H,R_O1, R_O2H,R_O2, R_O3H,R_O3, R_O4H,R_O4, R_O5H,R_O5,
323 R_L0H,R_L0, R_L1H,R_L1, R_L2H,R_L2, R_L3H,R_L3, R_L4H,R_L4, R_L5H,R_L5, R_L6H,R_L6, R_L7H,R_L7,
324 R_I0H,R_I0, R_I1H,R_I1, R_I2H,R_I2, R_I3H,R_I3, R_I4H,R_I4, R_I5H,R_I5 );
325 // Special class for storeP instructions, which can store SP or RPC to TLS.
326 // It is also used for memory addressing, allowing direct TLS addressing.
327 reg_class sp_ptr_reg( R_G1H,R_G1, R_G2H,R_G2, R_G3H,R_G3, R_G4H,R_G4, R_G5H,R_G5,
328 R_O0H,R_O0, R_O1H,R_O1, R_O2H,R_O2, R_O3H,R_O3, R_O4H,R_O4, R_O5H,R_O5, R_SPH,R_SP,
329 R_L0H,R_L0, R_L1H,R_L1, R_L2H,R_L2, R_L3H,R_L3, R_L4H,R_L4, R_L5H,R_L5, R_L6H,R_L6, R_L7H,R_L7,
330 R_I0H,R_I0, R_I1H,R_I1, R_I2H,R_I2, R_I3H,R_I3, R_I4H,R_I4, R_I5H,R_I5, R_FPH,R_FP );
331 // R_L7 is the lowest-priority callee-save (i.e., NS) register
332 // We use it to save R_G2 across calls out of Java.
333 reg_class l7_regP(R_L7H,R_L7);
335 // Other special pointer regs
336 reg_class g1_regP(R_G1H,R_G1);
337 reg_class g2_regP(R_G2H,R_G2);
338 reg_class g3_regP(R_G3H,R_G3);
339 reg_class g4_regP(R_G4H,R_G4);
340 reg_class g5_regP(R_G5H,R_G5);
341 reg_class i0_regP(R_I0H,R_I0);
342 reg_class o0_regP(R_O0H,R_O0);
343 reg_class o1_regP(R_O1H,R_O1);
344 reg_class o2_regP(R_O2H,R_O2);
345 reg_class o7_regP(R_O7H,R_O7);
347 #else // _LP64
348 // 32-bit build means 32-bit pointers means 1 register.
349 reg_class ptr_reg( R_G1, R_G3,R_G4,R_G5,
350 R_O0,R_O1,R_O2,R_O3,R_O4,R_O5,
351 R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7,
352 R_I0,R_I1,R_I2,R_I3,R_I4,R_I5);
353 // Lock encodings use G3 and G4 internally
354 reg_class lock_ptr_reg(R_G1, R_G5,
355 R_O0,R_O1,R_O2,R_O3,R_O4,R_O5,
356 R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7,
357 R_I0,R_I1,R_I2,R_I3,R_I4,R_I5);
358 // Special class for storeP instructions, which can store SP or RPC to TLS.
359 // It is also used for memory addressing, allowing direct TLS addressing.
360 reg_class sp_ptr_reg( R_G1,R_G2,R_G3,R_G4,R_G5,
361 R_O0,R_O1,R_O2,R_O3,R_O4,R_O5,R_SP,
362 R_L0,R_L1,R_L2,R_L3,R_L4,R_L5,R_L6,R_L7,
363 R_I0,R_I1,R_I2,R_I3,R_I4,R_I5,R_FP);
364 // R_L7 is the lowest-priority callee-save (i.e., NS) register
365 // We use it to save R_G2 across calls out of Java.
366 reg_class l7_regP(R_L7);
368 // Other special pointer regs
369 reg_class g1_regP(R_G1);
370 reg_class g2_regP(R_G2);
371 reg_class g3_regP(R_G3);
372 reg_class g4_regP(R_G4);
373 reg_class g5_regP(R_G5);
374 reg_class i0_regP(R_I0);
375 reg_class o0_regP(R_O0);
376 reg_class o1_regP(R_O1);
377 reg_class o2_regP(R_O2);
378 reg_class o7_regP(R_O7);
379 #endif // _LP64
382 // ----------------------------
383 // Long Register Classes
384 // ----------------------------
385 // Longs in 1 register. Aligned adjacent hi/lo pairs.
386 // Note: O7 is never in this class; it is sometimes used as an encoding temp.
387 reg_class long_reg( R_G1H,R_G1, R_G3H,R_G3, R_G4H,R_G4, R_G5H,R_G5
388 ,R_O0H,R_O0, R_O1H,R_O1, R_O2H,R_O2, R_O3H,R_O3, R_O4H,R_O4, R_O5H,R_O5
389 #ifdef _LP64
390 // 64-bit, longs in 1 register: use all 64-bit integer registers
391 // 32-bit, longs in 1 register: cannot use I's and L's. Restrict to O's and G's.
392 ,R_L0H,R_L0, R_L1H,R_L1, R_L2H,R_L2, R_L3H,R_L3, R_L4H,R_L4, R_L5H,R_L5, R_L6H,R_L6, R_L7H,R_L7
393 ,R_I0H,R_I0, R_I1H,R_I1, R_I2H,R_I2, R_I3H,R_I3, R_I4H,R_I4, R_I5H,R_I5
394 #endif // _LP64
395 );
397 reg_class g1_regL(R_G1H,R_G1);
398 reg_class g3_regL(R_G3H,R_G3);
399 reg_class o2_regL(R_O2H,R_O2);
400 reg_class o7_regL(R_O7H,R_O7);
402 // ----------------------------
403 // Special Class for Condition Code Flags Register
404 reg_class int_flags(CCR);
405 reg_class float_flags(FCC0,FCC1,FCC2,FCC3);
406 reg_class float_flag0(FCC0);
409 // ----------------------------
410 // Float Point Register Classes
411 // ----------------------------
412 // Skip F30/F31, they are reserved for mem-mem copies
413 reg_class sflt_reg(R_F0,R_F1,R_F2,R_F3,R_F4,R_F5,R_F6,R_F7,R_F8,R_F9,R_F10,R_F11,R_F12,R_F13,R_F14,R_F15,R_F16,R_F17,R_F18,R_F19,R_F20,R_F21,R_F22,R_F23,R_F24,R_F25,R_F26,R_F27,R_F28,R_F29);
415 // Paired floating point registers--they show up in the same order as the floats,
416 // but they are used with the "Op_RegD" type, and always occur in even/odd pairs.
417 reg_class dflt_reg(R_F0, R_F1, R_F2, R_F3, R_F4, R_F5, R_F6, R_F7, R_F8, R_F9, R_F10,R_F11,R_F12,R_F13,R_F14,R_F15,
418 R_F16,R_F17,R_F18,R_F19,R_F20,R_F21,R_F22,R_F23,R_F24,R_F25,R_F26,R_F27,R_F28,R_F29,
419 /* Use extra V9 double registers; this AD file does not support V8 */
420 R_D32,R_D32x,R_D34,R_D34x,R_D36,R_D36x,R_D38,R_D38x,R_D40,R_D40x,R_D42,R_D42x,R_D44,R_D44x,R_D46,R_D46x,
421 R_D48,R_D48x,R_D50,R_D50x,R_D52,R_D52x,R_D54,R_D54x,R_D56,R_D56x,R_D58,R_D58x,R_D60,R_D60x,R_D62,R_D62x
422 );
424 // Paired floating point registers--they show up in the same order as the floats,
425 // but they are used with the "Op_RegD" type, and always occur in even/odd pairs.
426 // This class is usable for mis-aligned loads as happen in I2C adapters.
427 reg_class dflt_low_reg(R_F0, R_F1, R_F2, R_F3, R_F4, R_F5, R_F6, R_F7, R_F8, R_F9, R_F10,R_F11,R_F12,R_F13,R_F14,R_F15,
428 R_F16,R_F17,R_F18,R_F19,R_F20,R_F21,R_F22,R_F23,R_F24,R_F25,R_F26,R_F27,R_F28,R_F29);
429 %}
431 //----------DEFINITION BLOCK---------------------------------------------------
432 // Define name --> value mappings to inform the ADLC of an integer valued name
433 // Current support includes integer values in the range [0, 0x7FFFFFFF]
434 // Format:
435 // int_def <name> ( <int_value>, <expression>);
436 // Generated Code in ad_<arch>.hpp
437 // #define <name> (<expression>)
438 // // value == <int_value>
439 // Generated code in ad_<arch>.cpp adlc_verification()
440 // assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>");
441 //
442 definitions %{
443 // The default cost (of an ALU instruction).
444 int_def DEFAULT_COST ( 100, 100);
445 int_def HUGE_COST (1000000, 1000000);
447 // Memory refs are twice as expensive as run-of-the-mill.
448 int_def MEMORY_REF_COST ( 200, DEFAULT_COST * 2);
450 // Branches are even more expensive.
451 int_def BRANCH_COST ( 300, DEFAULT_COST * 3);
452 int_def CALL_COST ( 300, DEFAULT_COST * 3);
453 %}
456 //----------SOURCE BLOCK-------------------------------------------------------
457 // This is a block of C++ code which provides values, functions, and
458 // definitions necessary in the rest of the architecture description
459 source_hpp %{
460 // Must be visible to the DFA in dfa_sparc.cpp
461 extern bool can_branch_register( Node *bol, Node *cmp );
463 extern bool use_block_zeroing(Node* count);
465 // Macros to extract hi & lo halves from a long pair.
466 // G0 is not part of any long pair, so assert on that.
467 // Prevents accidentally using G1 instead of G0.
468 #define LONG_HI_REG(x) (x)
469 #define LONG_LO_REG(x) (x)
471 %}
473 source %{
474 #define __ _masm.
476 // tertiary op of a LoadP or StoreP encoding
477 #define REGP_OP true
479 static FloatRegister reg_to_SingleFloatRegister_object(int register_encoding);
480 static FloatRegister reg_to_DoubleFloatRegister_object(int register_encoding);
481 static Register reg_to_register_object(int register_encoding);
483 // Used by the DFA in dfa_sparc.cpp.
484 // Check for being able to use a V9 branch-on-register. Requires a
485 // compare-vs-zero, equal/not-equal, of a value which was zero- or sign-
486 // extended. Doesn't work following an integer ADD, for example, because of
487 // overflow (-1 incremented yields 0 plus a carry in the high-order word). On
488 // 32-bit V9 systems, interrupts currently blow away the high-order 32 bits and
489 // replace them with zero, which could become sign-extension in a different OS
490 // release. There's no obvious reason why an interrupt will ever fill these
491 // bits with non-zero junk (the registers are reloaded with standard LD
492 // instructions which either zero-fill or sign-fill).
493 bool can_branch_register( Node *bol, Node *cmp ) {
494 if( !BranchOnRegister ) return false;
495 #ifdef _LP64
496 if( cmp->Opcode() == Op_CmpP )
497 return true; // No problems with pointer compares
498 #endif
499 if( cmp->Opcode() == Op_CmpL )
500 return true; // No problems with long compares
502 if( !SparcV9RegsHiBitsZero ) return false;
503 if( bol->as_Bool()->_test._test != BoolTest::ne &&
504 bol->as_Bool()->_test._test != BoolTest::eq )
505 return false;
507 // Check for comparing against a 'safe' value. Any operation which
508 // clears out the high word is safe. Thus, loads and certain shifts
509 // are safe, as are non-negative constants. Any operation which
510 // preserves zero bits in the high word is safe as long as each of its
511 // inputs are safe. Thus, phis and bitwise booleans are safe if their
512 // inputs are safe. At present, the only important case to recognize
513 // seems to be loads. Constants should fold away, and shifts &
514 // logicals can use the 'cc' forms.
515 Node *x = cmp->in(1);
516 if( x->is_Load() ) return true;
517 if( x->is_Phi() ) {
518 for( uint i = 1; i < x->req(); i++ )
519 if( !x->in(i)->is_Load() )
520 return false;
521 return true;
522 }
523 return false;
524 }
526 bool use_block_zeroing(Node* count) {
527 // Use BIS for zeroing if count is not constant
528 // or it is >= BlockZeroingLowLimit.
529 return UseBlockZeroing && (count->find_intptr_t_con(BlockZeroingLowLimit) >= BlockZeroingLowLimit);
530 }
532 // ****************************************************************************
534 // REQUIRED FUNCTIONALITY
536 // !!!!! Special hack to get all type of calls to specify the byte offset
537 // from the start of the call to the point where the return address
538 // will point.
539 // The "return address" is the address of the call instruction, plus 8.
541 int MachCallStaticJavaNode::ret_addr_offset() {
542 int offset = NativeCall::instruction_size; // call; delay slot
543 if (_method_handle_invoke)
544 offset += 4; // restore SP
545 return offset;
546 }
548 int MachCallDynamicJavaNode::ret_addr_offset() {
549 int vtable_index = this->_vtable_index;
550 if (vtable_index < 0) {
551 // must be invalid_vtable_index, not nonvirtual_vtable_index
552 assert(vtable_index == Method::invalid_vtable_index, "correct sentinel value");
553 return (NativeMovConstReg::instruction_size +
554 NativeCall::instruction_size); // sethi; setlo; call; delay slot
555 } else {
556 assert(!UseInlineCaches, "expect vtable calls only if not using ICs");
557 int entry_offset = InstanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size();
558 int v_off = entry_offset*wordSize + vtableEntry::method_offset_in_bytes();
559 int klass_load_size;
560 if (UseCompressedKlassPointers) {
561 assert(Universe::heap() != NULL, "java heap should be initialized");
562 if (Universe::narrow_klass_base() == NULL)
563 klass_load_size = 2*BytesPerInstWord; // see MacroAssembler::load_klass()
564 else
565 klass_load_size = 3*BytesPerInstWord;
566 } else {
567 klass_load_size = 1*BytesPerInstWord;
568 }
569 if (Assembler::is_simm13(v_off)) {
570 return klass_load_size +
571 (2*BytesPerInstWord + // ld_ptr, ld_ptr
572 NativeCall::instruction_size); // call; delay slot
573 } else {
574 return klass_load_size +
575 (4*BytesPerInstWord + // set_hi, set, ld_ptr, ld_ptr
576 NativeCall::instruction_size); // call; delay slot
577 }
578 }
579 }
581 int MachCallRuntimeNode::ret_addr_offset() {
582 #ifdef _LP64
583 if (MacroAssembler::is_far_target(entry_point())) {
584 return NativeFarCall::instruction_size;
585 } else {
586 return NativeCall::instruction_size;
587 }
588 #else
589 return NativeCall::instruction_size; // call; delay slot
590 #endif
591 }
593 // Indicate if the safepoint node needs the polling page as an input.
594 // Since Sparc does not have absolute addressing, it does.
595 bool SafePointNode::needs_polling_address_input() {
596 return true;
597 }
599 // emit an interrupt that is caught by the debugger (for debugging compiler)
600 void emit_break(CodeBuffer &cbuf) {
601 MacroAssembler _masm(&cbuf);
602 __ breakpoint_trap();
603 }
605 #ifndef PRODUCT
606 void MachBreakpointNode::format( PhaseRegAlloc *, outputStream *st ) const {
607 st->print("TA");
608 }
609 #endif
611 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
612 emit_break(cbuf);
613 }
615 uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const {
616 return MachNode::size(ra_);
617 }
619 // Traceable jump
620 void emit_jmpl(CodeBuffer &cbuf, int jump_target) {
621 MacroAssembler _masm(&cbuf);
622 Register rdest = reg_to_register_object(jump_target);
623 __ JMP(rdest, 0);
624 __ delayed()->nop();
625 }
627 // Traceable jump and set exception pc
628 void emit_jmpl_set_exception_pc(CodeBuffer &cbuf, int jump_target) {
629 MacroAssembler _masm(&cbuf);
630 Register rdest = reg_to_register_object(jump_target);
631 __ JMP(rdest, 0);
632 __ delayed()->add(O7, frame::pc_return_offset, Oissuing_pc );
633 }
635 void emit_nop(CodeBuffer &cbuf) {
636 MacroAssembler _masm(&cbuf);
637 __ nop();
638 }
640 void emit_illtrap(CodeBuffer &cbuf) {
641 MacroAssembler _masm(&cbuf);
642 __ illtrap(0);
643 }
646 intptr_t get_offset_from_base(const MachNode* n, const TypePtr* atype, int disp32) {
647 assert(n->rule() != loadUB_rule, "");
649 intptr_t offset = 0;
650 const TypePtr *adr_type = TYPE_PTR_SENTINAL; // Check for base==RegI, disp==immP
651 const Node* addr = n->get_base_and_disp(offset, adr_type);
652 assert(adr_type == (const TypePtr*)-1, "VerifyOops: no support for sparc operands with base==RegI, disp==immP");
653 assert(addr != NULL && addr != (Node*)-1, "invalid addr");
654 assert(addr->bottom_type()->isa_oopptr() == atype, "");
655 atype = atype->add_offset(offset);
656 assert(disp32 == offset, "wrong disp32");
657 return atype->_offset;
658 }
661 intptr_t get_offset_from_base_2(const MachNode* n, const TypePtr* atype, int disp32) {
662 assert(n->rule() != loadUB_rule, "");
664 intptr_t offset = 0;
665 Node* addr = n->in(2);
666 assert(addr->bottom_type()->isa_oopptr() == atype, "");
667 if (addr->is_Mach() && addr->as_Mach()->ideal_Opcode() == Op_AddP) {
668 Node* a = addr->in(2/*AddPNode::Address*/);
669 Node* o = addr->in(3/*AddPNode::Offset*/);
670 offset = o->is_Con() ? o->bottom_type()->is_intptr_t()->get_con() : Type::OffsetBot;
671 atype = a->bottom_type()->is_ptr()->add_offset(offset);
672 assert(atype->isa_oop_ptr(), "still an oop");
673 }
674 offset = atype->is_ptr()->_offset;
675 if (offset != Type::OffsetBot) offset += disp32;
676 return offset;
677 }
679 static inline jdouble replicate_immI(int con, int count, int width) {
680 // Load a constant replicated "count" times with width "width"
681 assert(count*width == 8 && width <= 4, "sanity");
682 int bit_width = width * 8;
683 jlong val = con;
684 val &= (((jlong) 1) << bit_width) - 1; // mask off sign bits
685 for (int i = 0; i < count - 1; i++) {
686 val |= (val << bit_width);
687 }
688 jdouble dval = *((jdouble*) &val); // coerce to double type
689 return dval;
690 }
692 static inline jdouble replicate_immF(float con) {
693 // Replicate float con 2 times and pack into vector.
694 int val = *((int*)&con);
695 jlong lval = val;
696 lval = (lval << 32) | (lval & 0xFFFFFFFFl);
697 jdouble dval = *((jdouble*) &lval); // coerce to double type
698 return dval;
699 }
701 // Standard Sparc opcode form2 field breakdown
702 static inline void emit2_19(CodeBuffer &cbuf, int f30, int f29, int f25, int f22, int f20, int f19, int f0 ) {
703 f0 &= (1<<19)-1; // Mask displacement to 19 bits
704 int op = (f30 << 30) |
705 (f29 << 29) |
706 (f25 << 25) |
707 (f22 << 22) |
708 (f20 << 20) |
709 (f19 << 19) |
710 (f0 << 0);
711 cbuf.insts()->emit_int32(op);
712 }
714 // Standard Sparc opcode form2 field breakdown
715 static inline void emit2_22(CodeBuffer &cbuf, int f30, int f25, int f22, int f0 ) {
716 f0 >>= 10; // Drop 10 bits
717 f0 &= (1<<22)-1; // Mask displacement to 22 bits
718 int op = (f30 << 30) |
719 (f25 << 25) |
720 (f22 << 22) |
721 (f0 << 0);
722 cbuf.insts()->emit_int32(op);
723 }
725 // Standard Sparc opcode form3 field breakdown
726 static inline void emit3(CodeBuffer &cbuf, int f30, int f25, int f19, int f14, int f5, int f0 ) {
727 int op = (f30 << 30) |
728 (f25 << 25) |
729 (f19 << 19) |
730 (f14 << 14) |
731 (f5 << 5) |
732 (f0 << 0);
733 cbuf.insts()->emit_int32(op);
734 }
736 // Standard Sparc opcode form3 field breakdown
737 static inline void emit3_simm13(CodeBuffer &cbuf, int f30, int f25, int f19, int f14, int simm13 ) {
738 simm13 &= (1<<13)-1; // Mask to 13 bits
739 int op = (f30 << 30) |
740 (f25 << 25) |
741 (f19 << 19) |
742 (f14 << 14) |
743 (1 << 13) | // bit to indicate immediate-mode
744 (simm13<<0);
745 cbuf.insts()->emit_int32(op);
746 }
748 static inline void emit3_simm10(CodeBuffer &cbuf, int f30, int f25, int f19, int f14, int simm10 ) {
749 simm10 &= (1<<10)-1; // Mask to 10 bits
750 emit3_simm13(cbuf,f30,f25,f19,f14,simm10);
751 }
753 #ifdef ASSERT
754 // Helper function for VerifyOops in emit_form3_mem_reg
755 void verify_oops_warning(const MachNode *n, int ideal_op, int mem_op) {
756 warning("VerifyOops encountered unexpected instruction:");
757 n->dump(2);
758 warning("Instruction has ideal_Opcode==Op_%s and op_ld==Op_%s \n", NodeClassNames[ideal_op], NodeClassNames[mem_op]);
759 }
760 #endif
763 void emit_form3_mem_reg(CodeBuffer &cbuf, const MachNode* n, int primary, int tertiary,
764 int src1_enc, int disp32, int src2_enc, int dst_enc) {
766 #ifdef ASSERT
767 // The following code implements the +VerifyOops feature.
768 // It verifies oop values which are loaded into or stored out of
769 // the current method activation. +VerifyOops complements techniques
770 // like ScavengeALot, because it eagerly inspects oops in transit,
771 // as they enter or leave the stack, as opposed to ScavengeALot,
772 // which inspects oops "at rest", in the stack or heap, at safepoints.
773 // For this reason, +VerifyOops can sometimes detect bugs very close
774 // to their point of creation. It can also serve as a cross-check
775 // on the validity of oop maps, when used toegether with ScavengeALot.
777 // It would be good to verify oops at other points, especially
778 // when an oop is used as a base pointer for a load or store.
779 // This is presently difficult, because it is hard to know when
780 // a base address is biased or not. (If we had such information,
781 // it would be easy and useful to make a two-argument version of
782 // verify_oop which unbiases the base, and performs verification.)
784 assert((uint)tertiary == 0xFFFFFFFF || tertiary == REGP_OP, "valid tertiary");
785 bool is_verified_oop_base = false;
786 bool is_verified_oop_load = false;
787 bool is_verified_oop_store = false;
788 int tmp_enc = -1;
789 if (VerifyOops && src1_enc != R_SP_enc) {
790 // classify the op, mainly for an assert check
791 int st_op = 0, ld_op = 0;
792 switch (primary) {
793 case Assembler::stb_op3: st_op = Op_StoreB; break;
794 case Assembler::sth_op3: st_op = Op_StoreC; break;
795 case Assembler::stx_op3: // may become StoreP or stay StoreI or StoreD0
796 case Assembler::stw_op3: st_op = Op_StoreI; break;
797 case Assembler::std_op3: st_op = Op_StoreL; break;
798 case Assembler::stf_op3: st_op = Op_StoreF; break;
799 case Assembler::stdf_op3: st_op = Op_StoreD; break;
801 case Assembler::ldsb_op3: ld_op = Op_LoadB; break;
802 case Assembler::ldub_op3: ld_op = Op_LoadUB; break;
803 case Assembler::lduh_op3: ld_op = Op_LoadUS; break;
804 case Assembler::ldsh_op3: ld_op = Op_LoadS; break;
805 case Assembler::ldx_op3: // may become LoadP or stay LoadI
806 case Assembler::ldsw_op3: // may become LoadP or stay LoadI
807 case Assembler::lduw_op3: ld_op = Op_LoadI; break;
808 case Assembler::ldd_op3: ld_op = Op_LoadL; break;
809 case Assembler::ldf_op3: ld_op = Op_LoadF; break;
810 case Assembler::lddf_op3: ld_op = Op_LoadD; break;
811 case Assembler::prefetch_op3: ld_op = Op_LoadI; break;
813 default: ShouldNotReachHere();
814 }
815 if (tertiary == REGP_OP) {
816 if (st_op == Op_StoreI) st_op = Op_StoreP;
817 else if (ld_op == Op_LoadI) ld_op = Op_LoadP;
818 else ShouldNotReachHere();
819 if (st_op) {
820 // a store
821 // inputs are (0:control, 1:memory, 2:address, 3:value)
822 Node* n2 = n->in(3);
823 if (n2 != NULL) {
824 const Type* t = n2->bottom_type();
825 is_verified_oop_store = t->isa_oop_ptr() ? (t->is_ptr()->_offset==0) : false;
826 }
827 } else {
828 // a load
829 const Type* t = n->bottom_type();
830 is_verified_oop_load = t->isa_oop_ptr() ? (t->is_ptr()->_offset==0) : false;
831 }
832 }
834 if (ld_op) {
835 // a Load
836 // inputs are (0:control, 1:memory, 2:address)
837 if (!(n->ideal_Opcode()==ld_op) && // Following are special cases
838 !(n->ideal_Opcode()==Op_LoadPLocked && ld_op==Op_LoadP) &&
839 !(n->ideal_Opcode()==Op_LoadI && ld_op==Op_LoadF) &&
840 !(n->ideal_Opcode()==Op_LoadF && ld_op==Op_LoadI) &&
841 !(n->ideal_Opcode()==Op_LoadRange && ld_op==Op_LoadI) &&
842 !(n->ideal_Opcode()==Op_LoadKlass && ld_op==Op_LoadP) &&
843 !(n->ideal_Opcode()==Op_LoadL && ld_op==Op_LoadI) &&
844 !(n->ideal_Opcode()==Op_LoadL_unaligned && ld_op==Op_LoadI) &&
845 !(n->ideal_Opcode()==Op_LoadD_unaligned && ld_op==Op_LoadF) &&
846 !(n->ideal_Opcode()==Op_ConvI2F && ld_op==Op_LoadF) &&
847 !(n->ideal_Opcode()==Op_ConvI2D && ld_op==Op_LoadF) &&
848 !(n->ideal_Opcode()==Op_PrefetchRead && ld_op==Op_LoadI) &&
849 !(n->ideal_Opcode()==Op_PrefetchWrite && ld_op==Op_LoadI) &&
850 !(n->ideal_Opcode()==Op_PrefetchAllocation && ld_op==Op_LoadI) &&
851 !(n->ideal_Opcode()==Op_LoadVector && ld_op==Op_LoadD) &&
852 !(n->rule() == loadUB_rule)) {
853 verify_oops_warning(n, n->ideal_Opcode(), ld_op);
854 }
855 } else if (st_op) {
856 // a Store
857 // inputs are (0:control, 1:memory, 2:address, 3:value)
858 if (!(n->ideal_Opcode()==st_op) && // Following are special cases
859 !(n->ideal_Opcode()==Op_StoreCM && st_op==Op_StoreB) &&
860 !(n->ideal_Opcode()==Op_StoreI && st_op==Op_StoreF) &&
861 !(n->ideal_Opcode()==Op_StoreF && st_op==Op_StoreI) &&
862 !(n->ideal_Opcode()==Op_StoreL && st_op==Op_StoreI) &&
863 !(n->ideal_Opcode()==Op_StoreVector && st_op==Op_StoreD) &&
864 !(n->ideal_Opcode()==Op_StoreD && st_op==Op_StoreI && n->rule() == storeD0_rule)) {
865 verify_oops_warning(n, n->ideal_Opcode(), st_op);
866 }
867 }
869 if (src2_enc == R_G0_enc && n->rule() != loadUB_rule && n->ideal_Opcode() != Op_StoreCM ) {
870 Node* addr = n->in(2);
871 if (!(addr->is_Mach() && addr->as_Mach()->ideal_Opcode() == Op_AddP)) {
872 const TypeOopPtr* atype = addr->bottom_type()->isa_instptr(); // %%% oopptr?
873 if (atype != NULL) {
874 intptr_t offset = get_offset_from_base(n, atype, disp32);
875 intptr_t offset_2 = get_offset_from_base_2(n, atype, disp32);
876 if (offset != offset_2) {
877 get_offset_from_base(n, atype, disp32);
878 get_offset_from_base_2(n, atype, disp32);
879 }
880 assert(offset == offset_2, "different offsets");
881 if (offset == disp32) {
882 // we now know that src1 is a true oop pointer
883 is_verified_oop_base = true;
884 if (ld_op && src1_enc == dst_enc && ld_op != Op_LoadF && ld_op != Op_LoadD) {
885 if( primary == Assembler::ldd_op3 ) {
886 is_verified_oop_base = false; // Cannot 'ldd' into O7
887 } else {
888 tmp_enc = dst_enc;
889 dst_enc = R_O7_enc; // Load into O7; preserve source oop
890 assert(src1_enc != dst_enc, "");
891 }
892 }
893 }
894 if (st_op && (( offset == oopDesc::klass_offset_in_bytes())
895 || offset == oopDesc::mark_offset_in_bytes())) {
896 // loading the mark should not be allowed either, but
897 // we don't check this since it conflicts with InlineObjectHash
898 // usage of LoadINode to get the mark. We could keep the
899 // check if we create a new LoadMarkNode
900 // but do not verify the object before its header is initialized
901 ShouldNotReachHere();
902 }
903 }
904 }
905 }
906 }
907 #endif
909 uint instr;
910 instr = (Assembler::ldst_op << 30)
911 | (dst_enc << 25)
912 | (primary << 19)
913 | (src1_enc << 14);
915 uint index = src2_enc;
916 int disp = disp32;
918 if (src1_enc == R_SP_enc || src1_enc == R_FP_enc)
919 disp += STACK_BIAS;
921 // We should have a compiler bailout here rather than a guarantee.
922 // Better yet would be some mechanism to handle variable-size matches correctly.
923 guarantee(Assembler::is_simm13(disp), "Do not match large constant offsets" );
925 if( disp == 0 ) {
926 // use reg-reg form
927 // bit 13 is already zero
928 instr |= index;
929 } else {
930 // use reg-imm form
931 instr |= 0x00002000; // set bit 13 to one
932 instr |= disp & 0x1FFF;
933 }
935 cbuf.insts()->emit_int32(instr);
937 #ifdef ASSERT
938 {
939 MacroAssembler _masm(&cbuf);
940 if (is_verified_oop_base) {
941 __ verify_oop(reg_to_register_object(src1_enc));
942 }
943 if (is_verified_oop_store) {
944 __ verify_oop(reg_to_register_object(dst_enc));
945 }
946 if (tmp_enc != -1) {
947 __ mov(O7, reg_to_register_object(tmp_enc));
948 }
949 if (is_verified_oop_load) {
950 __ verify_oop(reg_to_register_object(dst_enc));
951 }
952 }
953 #endif
954 }
956 void emit_call_reloc(CodeBuffer &cbuf, intptr_t entry_point, relocInfo::relocType rtype, bool preserve_g2 = false) {
957 // The method which records debug information at every safepoint
958 // expects the call to be the first instruction in the snippet as
959 // it creates a PcDesc structure which tracks the offset of a call
960 // from the start of the codeBlob. This offset is computed as
961 // code_end() - code_begin() of the code which has been emitted
962 // so far.
963 // In this particular case we have skirted around the problem by
964 // putting the "mov" instruction in the delay slot but the problem
965 // may bite us again at some other point and a cleaner/generic
966 // solution using relocations would be needed.
967 MacroAssembler _masm(&cbuf);
968 __ set_inst_mark();
970 // We flush the current window just so that there is a valid stack copy
971 // the fact that the current window becomes active again instantly is
972 // not a problem there is nothing live in it.
974 #ifdef ASSERT
975 int startpos = __ offset();
976 #endif /* ASSERT */
978 __ call((address)entry_point, rtype);
980 if (preserve_g2) __ delayed()->mov(G2, L7);
981 else __ delayed()->nop();
983 if (preserve_g2) __ mov(L7, G2);
985 #ifdef ASSERT
986 if (preserve_g2 && (VerifyCompiledCode || VerifyOops)) {
987 #ifdef _LP64
988 // Trash argument dump slots.
989 __ set(0xb0b8ac0db0b8ac0d, G1);
990 __ mov(G1, G5);
991 __ stx(G1, SP, STACK_BIAS + 0x80);
992 __ stx(G1, SP, STACK_BIAS + 0x88);
993 __ stx(G1, SP, STACK_BIAS + 0x90);
994 __ stx(G1, SP, STACK_BIAS + 0x98);
995 __ stx(G1, SP, STACK_BIAS + 0xA0);
996 __ stx(G1, SP, STACK_BIAS + 0xA8);
997 #else // _LP64
998 // this is also a native call, so smash the first 7 stack locations,
999 // and the various registers
1001 // Note: [SP+0x40] is sp[callee_aggregate_return_pointer_sp_offset],
1002 // while [SP+0x44..0x58] are the argument dump slots.
1003 __ set((intptr_t)0xbaadf00d, G1);
1004 __ mov(G1, G5);
1005 __ sllx(G1, 32, G1);
1006 __ or3(G1, G5, G1);
1007 __ mov(G1, G5);
1008 __ stx(G1, SP, 0x40);
1009 __ stx(G1, SP, 0x48);
1010 __ stx(G1, SP, 0x50);
1011 __ stw(G1, SP, 0x58); // Do not trash [SP+0x5C] which is a usable spill slot
1012 #endif // _LP64
1013 }
1014 #endif /*ASSERT*/
1015 }
1017 //=============================================================================
1018 // REQUIRED FUNCTIONALITY for encoding
1019 void emit_lo(CodeBuffer &cbuf, int val) { }
1020 void emit_hi(CodeBuffer &cbuf, int val) { }
1023 //=============================================================================
1024 const RegMask& MachConstantBaseNode::_out_RegMask = PTR_REG_mask();
1026 int Compile::ConstantTable::calculate_table_base_offset() const {
1027 if (UseRDPCForConstantTableBase) {
1028 // The table base offset might be less but then it fits into
1029 // simm13 anyway and we are good (cf. MachConstantBaseNode::emit).
1030 return Assembler::min_simm13();
1031 } else {
1032 int offset = -(size() / 2);
1033 if (!Assembler::is_simm13(offset)) {
1034 offset = Assembler::min_simm13();
1035 }
1036 return offset;
1037 }
1038 }
1040 void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
1041 Compile* C = ra_->C;
1042 Compile::ConstantTable& constant_table = C->constant_table();
1043 MacroAssembler _masm(&cbuf);
1045 Register r = as_Register(ra_->get_encode(this));
1046 CodeSection* consts_section = __ code()->consts();
1047 int consts_size = consts_section->align_at_start(consts_section->size());
1048 assert(constant_table.size() == consts_size, err_msg("must be: %d == %d", constant_table.size(), consts_size));
1050 if (UseRDPCForConstantTableBase) {
1051 // For the following RDPC logic to work correctly the consts
1052 // section must be allocated right before the insts section. This
1053 // assert checks for that. The layout and the SECT_* constants
1054 // are defined in src/share/vm/asm/codeBuffer.hpp.
1055 assert(CodeBuffer::SECT_CONSTS + 1 == CodeBuffer::SECT_INSTS, "must be");
1056 int insts_offset = __ offset();
1058 // Layout:
1059 //
1060 // |----------- consts section ------------|----------- insts section -----------...
1061 // |------ constant table -----|- padding -|------------------x----
1062 // \ current PC (RDPC instruction)
1063 // |<------------- consts_size ----------->|<- insts_offset ->|
1064 // \ table base
1065 // The table base offset is later added to the load displacement
1066 // so it has to be negative.
1067 int table_base_offset = -(consts_size + insts_offset);
1068 int disp;
1070 // If the displacement from the current PC to the constant table
1071 // base fits into simm13 we set the constant table base to the
1072 // current PC.
1073 if (Assembler::is_simm13(table_base_offset)) {
1074 constant_table.set_table_base_offset(table_base_offset);
1075 disp = 0;
1076 } else {
1077 // Otherwise we set the constant table base offset to the
1078 // maximum negative displacement of load instructions to keep
1079 // the disp as small as possible:
1080 //
1081 // |<------------- consts_size ----------->|<- insts_offset ->|
1082 // |<--------- min_simm13 --------->|<-------- disp --------->|
1083 // \ table base
1084 table_base_offset = Assembler::min_simm13();
1085 constant_table.set_table_base_offset(table_base_offset);
1086 disp = (consts_size + insts_offset) + table_base_offset;
1087 }
1089 __ rdpc(r);
1091 if (disp != 0) {
1092 assert(r != O7, "need temporary");
1093 __ sub(r, __ ensure_simm13_or_reg(disp, O7), r);
1094 }
1095 }
1096 else {
1097 // Materialize the constant table base.
1098 address baseaddr = consts_section->start() + -(constant_table.table_base_offset());
1099 RelocationHolder rspec = internal_word_Relocation::spec(baseaddr);
1100 AddressLiteral base(baseaddr, rspec);
1101 __ set(base, r);
1102 }
1103 }
1105 uint MachConstantBaseNode::size(PhaseRegAlloc*) const {
1106 if (UseRDPCForConstantTableBase) {
1107 // This is really the worst case but generally it's only 1 instruction.
1108 return (1 /*rdpc*/ + 1 /*sub*/ + MacroAssembler::worst_case_insts_for_set()) * BytesPerInstWord;
1109 } else {
1110 return MacroAssembler::worst_case_insts_for_set() * BytesPerInstWord;
1111 }
1112 }
1114 #ifndef PRODUCT
1115 void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
1116 char reg[128];
1117 ra_->dump_register(this, reg);
1118 if (UseRDPCForConstantTableBase) {
1119 st->print("RDPC %s\t! constant table base", reg);
1120 } else {
1121 st->print("SET &constanttable,%s\t! constant table base", reg);
1122 }
1123 }
1124 #endif
1127 //=============================================================================
1129 #ifndef PRODUCT
1130 void MachPrologNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
1131 Compile* C = ra_->C;
1133 for (int i = 0; i < OptoPrologueNops; i++) {
1134 st->print_cr("NOP"); st->print("\t");
1135 }
1137 if( VerifyThread ) {
1138 st->print_cr("Verify_Thread"); st->print("\t");
1139 }
1141 size_t framesize = C->frame_slots() << LogBytesPerInt;
1143 // Calls to C2R adapters often do not accept exceptional returns.
1144 // We require that their callers must bang for them. But be careful, because
1145 // some VM calls (such as call site linkage) can use several kilobytes of
1146 // stack. But the stack safety zone should account for that.
1147 // See bugs 4446381, 4468289, 4497237.
1148 if (C->need_stack_bang(framesize)) {
1149 st->print_cr("! stack bang"); st->print("\t");
1150 }
1152 if (Assembler::is_simm13(-framesize)) {
1153 st->print ("SAVE R_SP,-%d,R_SP",framesize);
1154 } else {
1155 st->print_cr("SETHI R_SP,hi%%(-%d),R_G3",framesize); st->print("\t");
1156 st->print_cr("ADD R_G3,lo%%(-%d),R_G3",framesize); st->print("\t");
1157 st->print ("SAVE R_SP,R_G3,R_SP");
1158 }
1160 }
1161 #endif
1163 void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1164 Compile* C = ra_->C;
1165 MacroAssembler _masm(&cbuf);
1167 for (int i = 0; i < OptoPrologueNops; i++) {
1168 __ nop();
1169 }
1171 __ verify_thread();
1173 size_t framesize = C->frame_slots() << LogBytesPerInt;
1174 assert(framesize >= 16*wordSize, "must have room for reg. save area");
1175 assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment");
1177 // Calls to C2R adapters often do not accept exceptional returns.
1178 // We require that their callers must bang for them. But be careful, because
1179 // some VM calls (such as call site linkage) can use several kilobytes of
1180 // stack. But the stack safety zone should account for that.
1181 // See bugs 4446381, 4468289, 4497237.
1182 if (C->need_stack_bang(framesize)) {
1183 __ generate_stack_overflow_check(framesize);
1184 }
1186 if (Assembler::is_simm13(-framesize)) {
1187 __ save(SP, -framesize, SP);
1188 } else {
1189 __ sethi(-framesize & ~0x3ff, G3);
1190 __ add(G3, -framesize & 0x3ff, G3);
1191 __ save(SP, G3, SP);
1192 }
1193 C->set_frame_complete( __ offset() );
1195 if (!UseRDPCForConstantTableBase && C->has_mach_constant_base_node()) {
1196 // NOTE: We set the table base offset here because users might be
1197 // emitted before MachConstantBaseNode.
1198 Compile::ConstantTable& constant_table = C->constant_table();
1199 constant_table.set_table_base_offset(constant_table.calculate_table_base_offset());
1200 }
1201 }
1203 uint MachPrologNode::size(PhaseRegAlloc *ra_) const {
1204 return MachNode::size(ra_);
1205 }
1207 int MachPrologNode::reloc() const {
1208 return 10; // a large enough number
1209 }
1211 //=============================================================================
1212 #ifndef PRODUCT
1213 void MachEpilogNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
1214 Compile* C = ra_->C;
1216 if( do_polling() && ra_->C->is_method_compilation() ) {
1217 st->print("SETHI #PollAddr,L0\t! Load Polling address\n\t");
1218 #ifdef _LP64
1219 st->print("LDX [L0],G0\t!Poll for Safepointing\n\t");
1220 #else
1221 st->print("LDUW [L0],G0\t!Poll for Safepointing\n\t");
1222 #endif
1223 }
1225 if( do_polling() )
1226 st->print("RET\n\t");
1228 st->print("RESTORE");
1229 }
1230 #endif
1232 void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1233 MacroAssembler _masm(&cbuf);
1234 Compile* C = ra_->C;
1236 __ verify_thread();
1238 // If this does safepoint polling, then do it here
1239 if( do_polling() && ra_->C->is_method_compilation() ) {
1240 AddressLiteral polling_page(os::get_polling_page());
1241 __ sethi(polling_page, L0);
1242 __ relocate(relocInfo::poll_return_type);
1243 __ ld_ptr( L0, 0, G0 );
1244 }
1246 // If this is a return, then stuff the restore in the delay slot
1247 if( do_polling() ) {
1248 __ ret();
1249 __ delayed()->restore();
1250 } else {
1251 __ restore();
1252 }
1253 }
1255 uint MachEpilogNode::size(PhaseRegAlloc *ra_) const {
1256 return MachNode::size(ra_);
1257 }
1259 int MachEpilogNode::reloc() const {
1260 return 16; // a large enough number
1261 }
1263 const Pipeline * MachEpilogNode::pipeline() const {
1264 return MachNode::pipeline_class();
1265 }
1267 int MachEpilogNode::safepoint_offset() const {
1268 assert( do_polling(), "no return for this epilog node");
1269 return MacroAssembler::insts_for_sethi(os::get_polling_page()) * BytesPerInstWord;
1270 }
1272 //=============================================================================
1274 // Figure out which register class each belongs in: rc_int, rc_float, rc_stack
1275 enum RC { rc_bad, rc_int, rc_float, rc_stack };
1276 static enum RC rc_class( OptoReg::Name reg ) {
1277 if( !OptoReg::is_valid(reg) ) return rc_bad;
1278 if (OptoReg::is_stack(reg)) return rc_stack;
1279 VMReg r = OptoReg::as_VMReg(reg);
1280 if (r->is_Register()) return rc_int;
1281 assert(r->is_FloatRegister(), "must be");
1282 return rc_float;
1283 }
1285 static int impl_helper( const MachNode *mach, CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, bool is_load, int offset, int reg, int opcode, const char *op_str, int size, outputStream* st ) {
1286 if( cbuf ) {
1287 // Better yet would be some mechanism to handle variable-size matches correctly
1288 if (!Assembler::is_simm13(offset + STACK_BIAS)) {
1289 ra_->C->record_method_not_compilable("unable to handle large constant offsets");
1290 } else {
1291 emit_form3_mem_reg(*cbuf, mach, opcode, -1, R_SP_enc, offset, 0, Matcher::_regEncode[reg]);
1292 }
1293 }
1294 #ifndef PRODUCT
1295 else if( !do_size ) {
1296 if( size != 0 ) st->print("\n\t");
1297 if( is_load ) st->print("%s [R_SP + #%d],R_%s\t! spill",op_str,offset,OptoReg::regname(reg));
1298 else st->print("%s R_%s,[R_SP + #%d]\t! spill",op_str,OptoReg::regname(reg),offset);
1299 }
1300 #endif
1301 return size+4;
1302 }
1304 static int impl_mov_helper( CodeBuffer *cbuf, bool do_size, int src, int dst, int op1, int op2, const char *op_str, int size, outputStream* st ) {
1305 if( cbuf ) emit3( *cbuf, Assembler::arith_op, Matcher::_regEncode[dst], op1, 0, op2, Matcher::_regEncode[src] );
1306 #ifndef PRODUCT
1307 else if( !do_size ) {
1308 if( size != 0 ) st->print("\n\t");
1309 st->print("%s R_%s,R_%s\t! spill",op_str,OptoReg::regname(src),OptoReg::regname(dst));
1310 }
1311 #endif
1312 return size+4;
1313 }
1315 uint MachSpillCopyNode::implementation( CodeBuffer *cbuf,
1316 PhaseRegAlloc *ra_,
1317 bool do_size,
1318 outputStream* st ) const {
1319 // Get registers to move
1320 OptoReg::Name src_second = ra_->get_reg_second(in(1));
1321 OptoReg::Name src_first = ra_->get_reg_first(in(1));
1322 OptoReg::Name dst_second = ra_->get_reg_second(this );
1323 OptoReg::Name dst_first = ra_->get_reg_first(this );
1325 enum RC src_second_rc = rc_class(src_second);
1326 enum RC src_first_rc = rc_class(src_first);
1327 enum RC dst_second_rc = rc_class(dst_second);
1328 enum RC dst_first_rc = rc_class(dst_first);
1330 assert( OptoReg::is_valid(src_first) && OptoReg::is_valid(dst_first), "must move at least 1 register" );
1332 // Generate spill code!
1333 int size = 0;
1335 if( src_first == dst_first && src_second == dst_second )
1336 return size; // Self copy, no move
1338 // --------------------------------------
1339 // Check for mem-mem move. Load into unused float registers and fall into
1340 // the float-store case.
1341 if( src_first_rc == rc_stack && dst_first_rc == rc_stack ) {
1342 int offset = ra_->reg2offset(src_first);
1343 // Further check for aligned-adjacent pair, so we can use a double load
1344 if( (src_first&1)==0 && src_first+1 == src_second ) {
1345 src_second = OptoReg::Name(R_F31_num);
1346 src_second_rc = rc_float;
1347 size = impl_helper(this,cbuf,ra_,do_size,true,offset,R_F30_num,Assembler::lddf_op3,"LDDF",size, st);
1348 } else {
1349 size = impl_helper(this,cbuf,ra_,do_size,true,offset,R_F30_num,Assembler::ldf_op3 ,"LDF ",size, st);
1350 }
1351 src_first = OptoReg::Name(R_F30_num);
1352 src_first_rc = rc_float;
1353 }
1355 if( src_second_rc == rc_stack && dst_second_rc == rc_stack ) {
1356 int offset = ra_->reg2offset(src_second);
1357 size = impl_helper(this,cbuf,ra_,do_size,true,offset,R_F31_num,Assembler::ldf_op3,"LDF ",size, st);
1358 src_second = OptoReg::Name(R_F31_num);
1359 src_second_rc = rc_float;
1360 }
1362 // --------------------------------------
1363 // Check for float->int copy; requires a trip through memory
1364 if (src_first_rc == rc_float && dst_first_rc == rc_int && UseVIS < 3) {
1365 int offset = frame::register_save_words*wordSize;
1366 if (cbuf) {
1367 emit3_simm13( *cbuf, Assembler::arith_op, R_SP_enc, Assembler::sub_op3, R_SP_enc, 16 );
1368 impl_helper(this,cbuf,ra_,do_size,false,offset,src_first,Assembler::stf_op3 ,"STF ",size, st);
1369 impl_helper(this,cbuf,ra_,do_size,true ,offset,dst_first,Assembler::lduw_op3,"LDUW",size, st);
1370 emit3_simm13( *cbuf, Assembler::arith_op, R_SP_enc, Assembler::add_op3, R_SP_enc, 16 );
1371 }
1372 #ifndef PRODUCT
1373 else if (!do_size) {
1374 if (size != 0) st->print("\n\t");
1375 st->print( "SUB R_SP,16,R_SP\n");
1376 impl_helper(this,cbuf,ra_,do_size,false,offset,src_first,Assembler::stf_op3 ,"STF ",size, st);
1377 impl_helper(this,cbuf,ra_,do_size,true ,offset,dst_first,Assembler::lduw_op3,"LDUW",size, st);
1378 st->print("\tADD R_SP,16,R_SP\n");
1379 }
1380 #endif
1381 size += 16;
1382 }
1384 // Check for float->int copy on T4
1385 if (src_first_rc == rc_float && dst_first_rc == rc_int && UseVIS >= 3) {
1386 // Further check for aligned-adjacent pair, so we can use a double move
1387 if ((src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second)
1388 return impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::mftoi_op3,Assembler::mdtox_opf,"MOVDTOX",size, st);
1389 size = impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::mftoi_op3,Assembler::mstouw_opf,"MOVSTOUW",size, st);
1390 }
1391 // Check for int->float copy on T4
1392 if (src_first_rc == rc_int && dst_first_rc == rc_float && UseVIS >= 3) {
1393 // Further check for aligned-adjacent pair, so we can use a double move
1394 if ((src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second)
1395 return impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::mftoi_op3,Assembler::mxtod_opf,"MOVXTOD",size, st);
1396 size = impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::mftoi_op3,Assembler::mwtos_opf,"MOVWTOS",size, st);
1397 }
1399 // --------------------------------------
1400 // In the 32-bit 1-reg-longs build ONLY, I see mis-aligned long destinations.
1401 // In such cases, I have to do the big-endian swap. For aligned targets, the
1402 // hardware does the flop for me. Doubles are always aligned, so no problem
1403 // there. Misaligned sources only come from native-long-returns (handled
1404 // special below).
1405 #ifndef _LP64
1406 if( src_first_rc == rc_int && // source is already big-endian
1407 src_second_rc != rc_bad && // 64-bit move
1408 ((dst_first&1)!=0 || dst_second != dst_first+1) ) { // misaligned dst
1409 assert( (src_first&1)==0 && src_second == src_first+1, "source must be aligned" );
1410 // Do the big-endian flop.
1411 OptoReg::Name tmp = dst_first ; dst_first = dst_second ; dst_second = tmp ;
1412 enum RC tmp_rc = dst_first_rc; dst_first_rc = dst_second_rc; dst_second_rc = tmp_rc;
1413 }
1414 #endif
1416 // --------------------------------------
1417 // Check for integer reg-reg copy
1418 if( src_first_rc == rc_int && dst_first_rc == rc_int ) {
1419 #ifndef _LP64
1420 if( src_first == R_O0_num && src_second == R_O1_num ) { // Check for the evil O0/O1 native long-return case
1421 // Note: The _first and _second suffixes refer to the addresses of the the 2 halves of the 64-bit value
1422 // as stored in memory. On a big-endian machine like SPARC, this means that the _second
1423 // operand contains the least significant word of the 64-bit value and vice versa.
1424 OptoReg::Name tmp = OptoReg::Name(R_O7_num);
1425 assert( (dst_first&1)==0 && dst_second == dst_first+1, "return a native O0/O1 long to an aligned-adjacent 64-bit reg" );
1426 // Shift O0 left in-place, zero-extend O1, then OR them into the dst
1427 if( cbuf ) {
1428 emit3_simm13( *cbuf, Assembler::arith_op, Matcher::_regEncode[tmp], Assembler::sllx_op3, Matcher::_regEncode[src_first], 0x1020 );
1429 emit3_simm13( *cbuf, Assembler::arith_op, Matcher::_regEncode[src_second], Assembler::srl_op3, Matcher::_regEncode[src_second], 0x0000 );
1430 emit3 ( *cbuf, Assembler::arith_op, Matcher::_regEncode[dst_first], Assembler:: or_op3, Matcher::_regEncode[tmp], 0, Matcher::_regEncode[src_second] );
1431 #ifndef PRODUCT
1432 } else if( !do_size ) {
1433 if( size != 0 ) st->print("\n\t");
1434 st->print("SLLX R_%s,32,R_%s\t! Move O0-first to O7-high\n\t", OptoReg::regname(src_first), OptoReg::regname(tmp));
1435 st->print("SRL R_%s, 0,R_%s\t! Zero-extend O1\n\t", OptoReg::regname(src_second), OptoReg::regname(src_second));
1436 st->print("OR R_%s,R_%s,R_%s\t! spill",OptoReg::regname(tmp), OptoReg::regname(src_second), OptoReg::regname(dst_first));
1437 #endif
1438 }
1439 return size+12;
1440 }
1441 else if( dst_first == R_I0_num && dst_second == R_I1_num ) {
1442 // returning a long value in I0/I1
1443 // a SpillCopy must be able to target a return instruction's reg_class
1444 // Note: The _first and _second suffixes refer to the addresses of the the 2 halves of the 64-bit value
1445 // as stored in memory. On a big-endian machine like SPARC, this means that the _second
1446 // operand contains the least significant word of the 64-bit value and vice versa.
1447 OptoReg::Name tdest = dst_first;
1449 if (src_first == dst_first) {
1450 tdest = OptoReg::Name(R_O7_num);
1451 size += 4;
1452 }
1454 if( cbuf ) {
1455 assert( (src_first&1) == 0 && (src_first+1) == src_second, "return value was in an aligned-adjacent 64-bit reg");
1456 // Shift value in upper 32-bits of src to lower 32-bits of I0; move lower 32-bits to I1
1457 // ShrL_reg_imm6
1458 emit3_simm13( *cbuf, Assembler::arith_op, Matcher::_regEncode[tdest], Assembler::srlx_op3, Matcher::_regEncode[src_second], 32 | 0x1000 );
1459 // ShrR_reg_imm6 src, 0, dst
1460 emit3_simm13( *cbuf, Assembler::arith_op, Matcher::_regEncode[dst_second], Assembler::srl_op3, Matcher::_regEncode[src_first], 0x0000 );
1461 if (tdest != dst_first) {
1462 emit3 ( *cbuf, Assembler::arith_op, Matcher::_regEncode[dst_first], Assembler::or_op3, 0/*G0*/, 0/*op2*/, Matcher::_regEncode[tdest] );
1463 }
1464 }
1465 #ifndef PRODUCT
1466 else if( !do_size ) {
1467 if( size != 0 ) st->print("\n\t"); // %%%%% !!!!!
1468 st->print("SRLX R_%s,32,R_%s\t! Extract MSW\n\t",OptoReg::regname(src_second),OptoReg::regname(tdest));
1469 st->print("SRL R_%s, 0,R_%s\t! Extract LSW\n\t",OptoReg::regname(src_first),OptoReg::regname(dst_second));
1470 if (tdest != dst_first) {
1471 st->print("MOV R_%s,R_%s\t! spill\n\t", OptoReg::regname(tdest), OptoReg::regname(dst_first));
1472 }
1473 }
1474 #endif // PRODUCT
1475 return size+8;
1476 }
1477 #endif // !_LP64
1478 // Else normal reg-reg copy
1479 assert( src_second != dst_first, "smashed second before evacuating it" );
1480 size = impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::or_op3,0,"MOV ",size, st);
1481 assert( (src_first&1) == 0 && (dst_first&1) == 0, "never move second-halves of int registers" );
1482 // This moves an aligned adjacent pair.
1483 // See if we are done.
1484 if( src_first+1 == src_second && dst_first+1 == dst_second )
1485 return size;
1486 }
1488 // Check for integer store
1489 if( src_first_rc == rc_int && dst_first_rc == rc_stack ) {
1490 int offset = ra_->reg2offset(dst_first);
1491 // Further check for aligned-adjacent pair, so we can use a double store
1492 if( (src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second )
1493 return impl_helper(this,cbuf,ra_,do_size,false,offset,src_first,Assembler::stx_op3,"STX ",size, st);
1494 size = impl_helper(this,cbuf,ra_,do_size,false,offset,src_first,Assembler::stw_op3,"STW ",size, st);
1495 }
1497 // Check for integer load
1498 if( dst_first_rc == rc_int && src_first_rc == rc_stack ) {
1499 int offset = ra_->reg2offset(src_first);
1500 // Further check for aligned-adjacent pair, so we can use a double load
1501 if( (src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second )
1502 return impl_helper(this,cbuf,ra_,do_size,true,offset,dst_first,Assembler::ldx_op3 ,"LDX ",size, st);
1503 size = impl_helper(this,cbuf,ra_,do_size,true,offset,dst_first,Assembler::lduw_op3,"LDUW",size, st);
1504 }
1506 // Check for float reg-reg copy
1507 if( src_first_rc == rc_float && dst_first_rc == rc_float ) {
1508 // Further check for aligned-adjacent pair, so we can use a double move
1509 if( (src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second )
1510 return impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::fpop1_op3,Assembler::fmovd_opf,"FMOVD",size, st);
1511 size = impl_mov_helper(cbuf,do_size,src_first,dst_first,Assembler::fpop1_op3,Assembler::fmovs_opf,"FMOVS",size, st);
1512 }
1514 // Check for float store
1515 if( src_first_rc == rc_float && dst_first_rc == rc_stack ) {
1516 int offset = ra_->reg2offset(dst_first);
1517 // Further check for aligned-adjacent pair, so we can use a double store
1518 if( (src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second )
1519 return impl_helper(this,cbuf,ra_,do_size,false,offset,src_first,Assembler::stdf_op3,"STDF",size, st);
1520 size = impl_helper(this,cbuf,ra_,do_size,false,offset,src_first,Assembler::stf_op3 ,"STF ",size, st);
1521 }
1523 // Check for float load
1524 if( dst_first_rc == rc_float && src_first_rc == rc_stack ) {
1525 int offset = ra_->reg2offset(src_first);
1526 // Further check for aligned-adjacent pair, so we can use a double load
1527 if( (src_first&1)==0 && src_first+1 == src_second && (dst_first&1)==0 && dst_first+1 == dst_second )
1528 return impl_helper(this,cbuf,ra_,do_size,true,offset,dst_first,Assembler::lddf_op3,"LDDF",size, st);
1529 size = impl_helper(this,cbuf,ra_,do_size,true,offset,dst_first,Assembler::ldf_op3 ,"LDF ",size, st);
1530 }
1532 // --------------------------------------------------------------------
1533 // Check for hi bits still needing moving. Only happens for misaligned
1534 // arguments to native calls.
1535 if( src_second == dst_second )
1536 return size; // Self copy; no move
1537 assert( src_second_rc != rc_bad && dst_second_rc != rc_bad, "src_second & dst_second cannot be Bad" );
1539 #ifndef _LP64
1540 // In the LP64 build, all registers can be moved as aligned/adjacent
1541 // pairs, so there's never any need to move the high bits separately.
1542 // The 32-bit builds have to deal with the 32-bit ABI which can force
1543 // all sorts of silly alignment problems.
1545 // Check for integer reg-reg copy. Hi bits are stuck up in the top
1546 // 32-bits of a 64-bit register, but are needed in low bits of another
1547 // register (else it's a hi-bits-to-hi-bits copy which should have
1548 // happened already as part of a 64-bit move)
1549 if( src_second_rc == rc_int && dst_second_rc == rc_int ) {
1550 assert( (src_second&1)==1, "its the evil O0/O1 native return case" );
1551 assert( (dst_second&1)==0, "should have moved with 1 64-bit move" );
1552 // Shift src_second down to dst_second's low bits.
1553 if( cbuf ) {
1554 emit3_simm13( *cbuf, Assembler::arith_op, Matcher::_regEncode[dst_second], Assembler::srlx_op3, Matcher::_regEncode[src_second-1], 0x1020 );
1555 #ifndef PRODUCT
1556 } else if( !do_size ) {
1557 if( size != 0 ) st->print("\n\t");
1558 st->print("SRLX R_%s,32,R_%s\t! spill: Move high bits down low",OptoReg::regname(src_second-1),OptoReg::regname(dst_second));
1559 #endif
1560 }
1561 return size+4;
1562 }
1564 // Check for high word integer store. Must down-shift the hi bits
1565 // into a temp register, then fall into the case of storing int bits.
1566 if( src_second_rc == rc_int && dst_second_rc == rc_stack && (src_second&1)==1 ) {
1567 // Shift src_second down to dst_second's low bits.
1568 if( cbuf ) {
1569 emit3_simm13( *cbuf, Assembler::arith_op, Matcher::_regEncode[R_O7_num], Assembler::srlx_op3, Matcher::_regEncode[src_second-1], 0x1020 );
1570 #ifndef PRODUCT
1571 } else if( !do_size ) {
1572 if( size != 0 ) st->print("\n\t");
1573 st->print("SRLX R_%s,32,R_%s\t! spill: Move high bits down low",OptoReg::regname(src_second-1),OptoReg::regname(R_O7_num));
1574 #endif
1575 }
1576 size+=4;
1577 src_second = OptoReg::Name(R_O7_num); // Not R_O7H_num!
1578 }
1580 // Check for high word integer load
1581 if( dst_second_rc == rc_int && src_second_rc == rc_stack )
1582 return impl_helper(this,cbuf,ra_,do_size,true ,ra_->reg2offset(src_second),dst_second,Assembler::lduw_op3,"LDUW",size, st);
1584 // Check for high word integer store
1585 if( src_second_rc == rc_int && dst_second_rc == rc_stack )
1586 return impl_helper(this,cbuf,ra_,do_size,false,ra_->reg2offset(dst_second),src_second,Assembler::stw_op3 ,"STW ",size, st);
1588 // Check for high word float store
1589 if( src_second_rc == rc_float && dst_second_rc == rc_stack )
1590 return impl_helper(this,cbuf,ra_,do_size,false,ra_->reg2offset(dst_second),src_second,Assembler::stf_op3 ,"STF ",size, st);
1592 #endif // !_LP64
1594 Unimplemented();
1595 }
1597 #ifndef PRODUCT
1598 void MachSpillCopyNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
1599 implementation( NULL, ra_, false, st );
1600 }
1601 #endif
1603 void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1604 implementation( &cbuf, ra_, false, NULL );
1605 }
1607 uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const {
1608 return implementation( NULL, ra_, true, NULL );
1609 }
1611 //=============================================================================
1612 #ifndef PRODUCT
1613 void MachNopNode::format( PhaseRegAlloc *, outputStream *st ) const {
1614 st->print("NOP \t# %d bytes pad for loops and calls", 4 * _count);
1615 }
1616 #endif
1618 void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc * ) const {
1619 MacroAssembler _masm(&cbuf);
1620 for(int i = 0; i < _count; i += 1) {
1621 __ nop();
1622 }
1623 }
1625 uint MachNopNode::size(PhaseRegAlloc *ra_) const {
1626 return 4 * _count;
1627 }
1630 //=============================================================================
1631 #ifndef PRODUCT
1632 void BoxLockNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
1633 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
1634 int reg = ra_->get_reg_first(this);
1635 st->print("LEA [R_SP+#%d+BIAS],%s",offset,Matcher::regName[reg]);
1636 }
1637 #endif
1639 void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1640 MacroAssembler _masm(&cbuf);
1641 int offset = ra_->reg2offset(in_RegMask(0).find_first_elem()) + STACK_BIAS;
1642 int reg = ra_->get_encode(this);
1644 if (Assembler::is_simm13(offset)) {
1645 __ add(SP, offset, reg_to_register_object(reg));
1646 } else {
1647 __ set(offset, O7);
1648 __ add(SP, O7, reg_to_register_object(reg));
1649 }
1650 }
1652 uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
1653 // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_)
1654 assert(ra_ == ra_->C->regalloc(), "sanity");
1655 return ra_->C->scratch_emit_size(this);
1656 }
1658 //=============================================================================
1660 // emit call stub, compiled java to interpretor
1661 void emit_java_to_interp(CodeBuffer &cbuf ) {
1663 // Stub is fixed up when the corresponding call is converted from calling
1664 // compiled code to calling interpreted code.
1665 // set (empty), G5
1666 // jmp -1
1668 address mark = cbuf.insts_mark(); // get mark within main instrs section
1670 MacroAssembler _masm(&cbuf);
1672 address base =
1673 __ start_a_stub(Compile::MAX_stubs_size);
1674 if (base == NULL) return; // CodeBuffer::expand failed
1676 // static stub relocation stores the instruction address of the call
1677 __ relocate(static_stub_Relocation::spec(mark));
1679 __ set_metadata(NULL, reg_to_register_object(Matcher::inline_cache_reg_encode()));
1681 __ set_inst_mark();
1682 AddressLiteral addrlit(-1);
1683 __ JUMP(addrlit, G3, 0);
1685 __ delayed()->nop();
1687 // Update current stubs pointer and restore code_end.
1688 __ end_a_stub();
1689 }
1691 // size of call stub, compiled java to interpretor
1692 uint size_java_to_interp() {
1693 // This doesn't need to be accurate but it must be larger or equal to
1694 // the real size of the stub.
1695 return (NativeMovConstReg::instruction_size + // sethi/setlo;
1696 NativeJump::instruction_size + // sethi; jmp; nop
1697 (TraceJumps ? 20 * BytesPerInstWord : 0) );
1698 }
1699 // relocation entries for call stub, compiled java to interpretor
1700 uint reloc_java_to_interp() {
1701 return 10; // 4 in emit_java_to_interp + 1 in Java_Static_Call
1702 }
1705 //=============================================================================
1706 #ifndef PRODUCT
1707 void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
1708 st->print_cr("\nUEP:");
1709 #ifdef _LP64
1710 if (UseCompressedKlassPointers) {
1711 assert(Universe::heap() != NULL, "java heap should be initialized");
1712 st->print_cr("\tLDUW [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check - compressed klass");
1713 st->print_cr("\tSLL R_G5,3,R_G5");
1714 if (Universe::narrow_klass_base() != NULL)
1715 st->print_cr("\tADD R_G5,R_G6_heap_base,R_G5");
1716 } else {
1717 st->print_cr("\tLDX [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check");
1718 }
1719 st->print_cr("\tCMP R_G5,R_G3" );
1720 st->print ("\tTne xcc,R_G0+ST_RESERVED_FOR_USER_0+2");
1721 #else // _LP64
1722 st->print_cr("\tLDUW [R_O0 + oopDesc::klass_offset_in_bytes],R_G5\t! Inline cache check");
1723 st->print_cr("\tCMP R_G5,R_G3" );
1724 st->print ("\tTne icc,R_G0+ST_RESERVED_FOR_USER_0+2");
1725 #endif // _LP64
1726 }
1727 #endif
1729 void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
1730 MacroAssembler _masm(&cbuf);
1731 Register G5_ic_reg = reg_to_register_object(Matcher::inline_cache_reg_encode());
1732 Register temp_reg = G3;
1733 assert( G5_ic_reg != temp_reg, "conflicting registers" );
1735 // Load klass from receiver
1736 __ load_klass(O0, temp_reg);
1737 // Compare against expected klass
1738 __ cmp(temp_reg, G5_ic_reg);
1739 // Branch to miss code, checks xcc or icc depending
1740 __ trap(Assembler::notEqual, Assembler::ptr_cc, G0, ST_RESERVED_FOR_USER_0+2);
1741 }
1743 uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
1744 return MachNode::size(ra_);
1745 }
1748 //=============================================================================
1750 uint size_exception_handler() {
1751 if (TraceJumps) {
1752 return (400); // just a guess
1753 }
1754 return ( NativeJump::instruction_size ); // sethi;jmp;nop
1755 }
1757 uint size_deopt_handler() {
1758 if (TraceJumps) {
1759 return (400); // just a guess
1760 }
1761 return ( 4+ NativeJump::instruction_size ); // save;sethi;jmp;restore
1762 }
1764 // Emit exception handler code.
1765 int emit_exception_handler(CodeBuffer& cbuf) {
1766 Register temp_reg = G3;
1767 AddressLiteral exception_blob(OptoRuntime::exception_blob()->entry_point());
1768 MacroAssembler _masm(&cbuf);
1770 address base =
1771 __ start_a_stub(size_exception_handler());
1772 if (base == NULL) return 0; // CodeBuffer::expand failed
1774 int offset = __ offset();
1776 __ JUMP(exception_blob, temp_reg, 0); // sethi;jmp
1777 __ delayed()->nop();
1779 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow");
1781 __ end_a_stub();
1783 return offset;
1784 }
1786 int emit_deopt_handler(CodeBuffer& cbuf) {
1787 // Can't use any of the current frame's registers as we may have deopted
1788 // at a poll and everything (including G3) can be live.
1789 Register temp_reg = L0;
1790 AddressLiteral deopt_blob(SharedRuntime::deopt_blob()->unpack());
1791 MacroAssembler _masm(&cbuf);
1793 address base =
1794 __ start_a_stub(size_deopt_handler());
1795 if (base == NULL) return 0; // CodeBuffer::expand failed
1797 int offset = __ offset();
1798 __ save_frame(0);
1799 __ JUMP(deopt_blob, temp_reg, 0); // sethi;jmp
1800 __ delayed()->restore();
1802 assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
1804 __ end_a_stub();
1805 return offset;
1807 }
1809 // Given a register encoding, produce a Integer Register object
1810 static Register reg_to_register_object(int register_encoding) {
1811 assert(L5->encoding() == R_L5_enc && G1->encoding() == R_G1_enc, "right coding");
1812 return as_Register(register_encoding);
1813 }
1815 // Given a register encoding, produce a single-precision Float Register object
1816 static FloatRegister reg_to_SingleFloatRegister_object(int register_encoding) {
1817 assert(F5->encoding(FloatRegisterImpl::S) == R_F5_enc && F12->encoding(FloatRegisterImpl::S) == R_F12_enc, "right coding");
1818 return as_SingleFloatRegister(register_encoding);
1819 }
1821 // Given a register encoding, produce a double-precision Float Register object
1822 static FloatRegister reg_to_DoubleFloatRegister_object(int register_encoding) {
1823 assert(F4->encoding(FloatRegisterImpl::D) == R_F4_enc, "right coding");
1824 assert(F32->encoding(FloatRegisterImpl::D) == R_D32_enc, "right coding");
1825 return as_DoubleFloatRegister(register_encoding);
1826 }
1828 const bool Matcher::match_rule_supported(int opcode) {
1829 if (!has_match_rule(opcode))
1830 return false;
1832 switch (opcode) {
1833 case Op_CountLeadingZerosI:
1834 case Op_CountLeadingZerosL:
1835 case Op_CountTrailingZerosI:
1836 case Op_CountTrailingZerosL:
1837 case Op_PopCountI:
1838 case Op_PopCountL:
1839 if (!UsePopCountInstruction)
1840 return false;
1841 case Op_CompareAndSwapL:
1842 #ifdef _LP64
1843 case Op_CompareAndSwapP:
1844 #endif
1845 if (!VM_Version::supports_cx8())
1846 return false;
1847 break;
1848 }
1850 return true; // Per default match rules are supported.
1851 }
1853 int Matcher::regnum_to_fpu_offset(int regnum) {
1854 return regnum - 32; // The FP registers are in the second chunk
1855 }
1857 #ifdef ASSERT
1858 address last_rethrow = NULL; // debugging aid for Rethrow encoding
1859 #endif
1861 // Vector width in bytes
1862 const int Matcher::vector_width_in_bytes(BasicType bt) {
1863 assert(MaxVectorSize == 8, "");
1864 return 8;
1865 }
1867 // Vector ideal reg
1868 const int Matcher::vector_ideal_reg(int size) {
1869 assert(MaxVectorSize == 8, "");
1870 return Op_RegD;
1871 }
1873 const int Matcher::vector_shift_count_ideal_reg(int size) {
1874 fatal("vector shift is not supported");
1875 return Node::NotAMachineReg;
1876 }
1878 // Limits on vector size (number of elements) loaded into vector.
1879 const int Matcher::max_vector_size(const BasicType bt) {
1880 assert(is_java_primitive(bt), "only primitive type vectors");
1881 return vector_width_in_bytes(bt)/type2aelembytes(bt);
1882 }
1884 const int Matcher::min_vector_size(const BasicType bt) {
1885 return max_vector_size(bt); // Same as max.
1886 }
1888 // SPARC doesn't support misaligned vectors store/load.
1889 const bool Matcher::misaligned_vectors_ok() {
1890 return false;
1891 }
1893 // USII supports fxtof through the whole range of number, USIII doesn't
1894 const bool Matcher::convL2FSupported(void) {
1895 return VM_Version::has_fast_fxtof();
1896 }
1898 // Is this branch offset short enough that a short branch can be used?
1899 //
1900 // NOTE: If the platform does not provide any short branch variants, then
1901 // this method should return false for offset 0.
1902 bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) {
1903 // The passed offset is relative to address of the branch.
1904 // Don't need to adjust the offset.
1905 return UseCBCond && Assembler::is_simm12(offset);
1906 }
1908 const bool Matcher::isSimpleConstant64(jlong value) {
1909 // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?.
1910 // Depends on optimizations in MacroAssembler::setx.
1911 int hi = (int)(value >> 32);
1912 int lo = (int)(value & ~0);
1913 return (hi == 0) || (hi == -1) || (lo == 0);
1914 }
1916 // No scaling for the parameter the ClearArray node.
1917 const bool Matcher::init_array_count_is_in_bytes = true;
1919 // Threshold size for cleararray.
1920 const int Matcher::init_array_short_size = 8 * BytesPerLong;
1922 // No additional cost for CMOVL.
1923 const int Matcher::long_cmove_cost() { return 0; }
1925 // CMOVF/CMOVD are expensive on T4 and on SPARC64.
1926 const int Matcher::float_cmove_cost() {
1927 return (VM_Version::is_T4() || VM_Version::is_sparc64()) ? ConditionalMoveLimit : 0;
1928 }
1930 // Should the Matcher clone shifts on addressing modes, expecting them to
1931 // be subsumed into complex addressing expressions or compute them into
1932 // registers? True for Intel but false for most RISCs
1933 const bool Matcher::clone_shift_expressions = false;
1935 // Do we need to mask the count passed to shift instructions or does
1936 // the cpu only look at the lower 5/6 bits anyway?
1937 const bool Matcher::need_masked_shift_count = false;
1939 bool Matcher::narrow_oop_use_complex_address() {
1940 NOT_LP64(ShouldNotCallThis());
1941 assert(UseCompressedOops, "only for compressed oops code");
1942 return false;
1943 }
1945 bool Matcher::narrow_klass_use_complex_address() {
1946 NOT_LP64(ShouldNotCallThis());
1947 assert(UseCompressedKlassPointers, "only for compressed klass code");
1948 return false;
1949 }
1951 // Is it better to copy float constants, or load them directly from memory?
1952 // Intel can load a float constant from a direct address, requiring no
1953 // extra registers. Most RISCs will have to materialize an address into a
1954 // register first, so they would do better to copy the constant from stack.
1955 const bool Matcher::rematerialize_float_constants = false;
1957 // If CPU can load and store mis-aligned doubles directly then no fixup is
1958 // needed. Else we split the double into 2 integer pieces and move it
1959 // piece-by-piece. Only happens when passing doubles into C code as the
1960 // Java calling convention forces doubles to be aligned.
1961 #ifdef _LP64
1962 const bool Matcher::misaligned_doubles_ok = true;
1963 #else
1964 const bool Matcher::misaligned_doubles_ok = false;
1965 #endif
1967 // No-op on SPARC.
1968 void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
1969 }
1971 // Advertise here if the CPU requires explicit rounding operations
1972 // to implement the UseStrictFP mode.
1973 const bool Matcher::strict_fp_requires_explicit_rounding = false;
1975 // Are floats conerted to double when stored to stack during deoptimization?
1976 // Sparc does not handle callee-save floats.
1977 bool Matcher::float_in_double() { return false; }
1979 // Do ints take an entire long register or just half?
1980 // Note that we if-def off of _LP64.
1981 // The relevant question is how the int is callee-saved. In _LP64
1982 // the whole long is written but de-opt'ing will have to extract
1983 // the relevant 32 bits, in not-_LP64 only the low 32 bits is written.
1984 #ifdef _LP64
1985 const bool Matcher::int_in_long = true;
1986 #else
1987 const bool Matcher::int_in_long = false;
1988 #endif
1990 // Return whether or not this register is ever used as an argument. This
1991 // function is used on startup to build the trampoline stubs in generateOptoStub.
1992 // Registers not mentioned will be killed by the VM call in the trampoline, and
1993 // arguments in those registers not be available to the callee.
1994 bool Matcher::can_be_java_arg( int reg ) {
1995 // Standard sparc 6 args in registers
1996 if( reg == R_I0_num ||
1997 reg == R_I1_num ||
1998 reg == R_I2_num ||
1999 reg == R_I3_num ||
2000 reg == R_I4_num ||
2001 reg == R_I5_num ) return true;
2002 #ifdef _LP64
2003 // 64-bit builds can pass 64-bit pointers and longs in
2004 // the high I registers
2005 if( reg == R_I0H_num ||
2006 reg == R_I1H_num ||
2007 reg == R_I2H_num ||
2008 reg == R_I3H_num ||
2009 reg == R_I4H_num ||
2010 reg == R_I5H_num ) return true;
2012 if ((UseCompressedOops) && (reg == R_G6_num || reg == R_G6H_num)) {
2013 return true;
2014 }
2016 #else
2017 // 32-bit builds with longs-in-one-entry pass longs in G1 & G4.
2018 // Longs cannot be passed in O regs, because O regs become I regs
2019 // after a 'save' and I regs get their high bits chopped off on
2020 // interrupt.
2021 if( reg == R_G1H_num || reg == R_G1_num ) return true;
2022 if( reg == R_G4H_num || reg == R_G4_num ) return true;
2023 #endif
2024 // A few float args in registers
2025 if( reg >= R_F0_num && reg <= R_F7_num ) return true;
2027 return false;
2028 }
2030 bool Matcher::is_spillable_arg( int reg ) {
2031 return can_be_java_arg(reg);
2032 }
2034 bool Matcher::use_asm_for_ldiv_by_con( jlong divisor ) {
2035 // Use hardware SDIVX instruction when it is
2036 // faster than a code which use multiply.
2037 return VM_Version::has_fast_idiv();
2038 }
2040 // Register for DIVI projection of divmodI
2041 RegMask Matcher::divI_proj_mask() {
2042 ShouldNotReachHere();
2043 return RegMask();
2044 }
2046 // Register for MODI projection of divmodI
2047 RegMask Matcher::modI_proj_mask() {
2048 ShouldNotReachHere();
2049 return RegMask();
2050 }
2052 // Register for DIVL projection of divmodL
2053 RegMask Matcher::divL_proj_mask() {
2054 ShouldNotReachHere();
2055 return RegMask();
2056 }
2058 // Register for MODL projection of divmodL
2059 RegMask Matcher::modL_proj_mask() {
2060 ShouldNotReachHere();
2061 return RegMask();
2062 }
2064 const RegMask Matcher::method_handle_invoke_SP_save_mask() {
2065 return L7_REGP_mask();
2066 }
2068 %}
2071 // The intptr_t operand types, defined by textual substitution.
2072 // (Cf. opto/type.hpp. This lets us avoid many, many other ifdefs.)
2073 #ifdef _LP64
2074 #define immX immL
2075 #define immX13 immL13
2076 #define immX13m7 immL13m7
2077 #define iRegX iRegL
2078 #define g1RegX g1RegL
2079 #else
2080 #define immX immI
2081 #define immX13 immI13
2082 #define immX13m7 immI13m7
2083 #define iRegX iRegI
2084 #define g1RegX g1RegI
2085 #endif
2087 //----------ENCODING BLOCK-----------------------------------------------------
2088 // This block specifies the encoding classes used by the compiler to output
2089 // byte streams. Encoding classes are parameterized macros used by
2090 // Machine Instruction Nodes in order to generate the bit encoding of the
2091 // instruction. Operands specify their base encoding interface with the
2092 // interface keyword. There are currently supported four interfaces,
2093 // REG_INTER, CONST_INTER, MEMORY_INTER, & COND_INTER. REG_INTER causes an
2094 // operand to generate a function which returns its register number when
2095 // queried. CONST_INTER causes an operand to generate a function which
2096 // returns the value of the constant when queried. MEMORY_INTER causes an
2097 // operand to generate four functions which return the Base Register, the
2098 // Index Register, the Scale Value, and the Offset Value of the operand when
2099 // queried. COND_INTER causes an operand to generate six functions which
2100 // return the encoding code (ie - encoding bits for the instruction)
2101 // associated with each basic boolean condition for a conditional instruction.
2102 //
2103 // Instructions specify two basic values for encoding. Again, a function
2104 // is available to check if the constant displacement is an oop. They use the
2105 // ins_encode keyword to specify their encoding classes (which must be
2106 // a sequence of enc_class names, and their parameters, specified in
2107 // the encoding block), and they use the
2108 // opcode keyword to specify, in order, their primary, secondary, and
2109 // tertiary opcode. Only the opcode sections which a particular instruction
2110 // needs for encoding need to be specified.
2111 encode %{
2112 enc_class enc_untested %{
2113 #ifdef ASSERT
2114 MacroAssembler _masm(&cbuf);
2115 __ untested("encoding");
2116 #endif
2117 %}
2119 enc_class form3_mem_reg( memory mem, iRegI dst ) %{
2120 emit_form3_mem_reg(cbuf, this, $primary, $tertiary,
2121 $mem$$base, $mem$$disp, $mem$$index, $dst$$reg);
2122 %}
2124 enc_class simple_form3_mem_reg( memory mem, iRegI dst ) %{
2125 emit_form3_mem_reg(cbuf, this, $primary, -1,
2126 $mem$$base, $mem$$disp, $mem$$index, $dst$$reg);
2127 %}
2129 enc_class form3_mem_prefetch_read( memory mem ) %{
2130 emit_form3_mem_reg(cbuf, this, $primary, -1,
2131 $mem$$base, $mem$$disp, $mem$$index, 0/*prefetch function many-reads*/);
2132 %}
2134 enc_class form3_mem_prefetch_write( memory mem ) %{
2135 emit_form3_mem_reg(cbuf, this, $primary, -1,
2136 $mem$$base, $mem$$disp, $mem$$index, 2/*prefetch function many-writes*/);
2137 %}
2139 enc_class form3_mem_reg_long_unaligned_marshal( memory mem, iRegL reg ) %{
2140 assert(Assembler::is_simm13($mem$$disp ), "need disp and disp+4");
2141 assert(Assembler::is_simm13($mem$$disp+4), "need disp and disp+4");
2142 guarantee($mem$$index == R_G0_enc, "double index?");
2143 emit_form3_mem_reg(cbuf, this, $primary, -1, $mem$$base, $mem$$disp+4, R_G0_enc, R_O7_enc );
2144 emit_form3_mem_reg(cbuf, this, $primary, -1, $mem$$base, $mem$$disp, R_G0_enc, $reg$$reg );
2145 emit3_simm13( cbuf, Assembler::arith_op, $reg$$reg, Assembler::sllx_op3, $reg$$reg, 0x1020 );
2146 emit3( cbuf, Assembler::arith_op, $reg$$reg, Assembler::or_op3, $reg$$reg, 0, R_O7_enc );
2147 %}
2149 enc_class form3_mem_reg_double_unaligned( memory mem, RegD_low reg ) %{
2150 assert(Assembler::is_simm13($mem$$disp ), "need disp and disp+4");
2151 assert(Assembler::is_simm13($mem$$disp+4), "need disp and disp+4");
2152 guarantee($mem$$index == R_G0_enc, "double index?");
2153 // Load long with 2 instructions
2154 emit_form3_mem_reg(cbuf, this, $primary, -1, $mem$$base, $mem$$disp, R_G0_enc, $reg$$reg+0 );
2155 emit_form3_mem_reg(cbuf, this, $primary, -1, $mem$$base, $mem$$disp+4, R_G0_enc, $reg$$reg+1 );
2156 %}
2158 //%%% form3_mem_plus_4_reg is a hack--get rid of it
2159 enc_class form3_mem_plus_4_reg( memory mem, iRegI dst ) %{
2160 guarantee($mem$$disp, "cannot offset a reg-reg operand by 4");
2161 emit_form3_mem_reg(cbuf, this, $primary, -1, $mem$$base, $mem$$disp + 4, $mem$$index, $dst$$reg);
2162 %}
2164 enc_class form3_g0_rs2_rd_move( iRegI rs2, iRegI rd ) %{
2165 // Encode a reg-reg copy. If it is useless, then empty encoding.
2166 if( $rs2$$reg != $rd$$reg )
2167 emit3( cbuf, Assembler::arith_op, $rd$$reg, Assembler::or_op3, 0, 0, $rs2$$reg );
2168 %}
2170 // Target lo half of long
2171 enc_class form3_g0_rs2_rd_move_lo( iRegI rs2, iRegL rd ) %{
2172 // Encode a reg-reg copy. If it is useless, then empty encoding.
2173 if( $rs2$$reg != LONG_LO_REG($rd$$reg) )
2174 emit3( cbuf, Assembler::arith_op, LONG_LO_REG($rd$$reg), Assembler::or_op3, 0, 0, $rs2$$reg );
2175 %}
2177 // Source lo half of long
2178 enc_class form3_g0_rs2_rd_move_lo2( iRegL rs2, iRegI rd ) %{
2179 // Encode a reg-reg copy. If it is useless, then empty encoding.
2180 if( LONG_LO_REG($rs2$$reg) != $rd$$reg )
2181 emit3( cbuf, Assembler::arith_op, $rd$$reg, Assembler::or_op3, 0, 0, LONG_LO_REG($rs2$$reg) );
2182 %}
2184 // Target hi half of long
2185 enc_class form3_rs1_rd_copysign_hi( iRegI rs1, iRegL rd ) %{
2186 emit3_simm13( cbuf, Assembler::arith_op, $rd$$reg, Assembler::sra_op3, $rs1$$reg, 31 );
2187 %}
2189 // Source lo half of long, and leave it sign extended.
2190 enc_class form3_rs1_rd_signextend_lo1( iRegL rs1, iRegI rd ) %{
2191 // Sign extend low half
2192 emit3( cbuf, Assembler::arith_op, $rd$$reg, Assembler::sra_op3, $rs1$$reg, 0, 0 );
2193 %}
2195 // Source hi half of long, and leave it sign extended.
2196 enc_class form3_rs1_rd_copy_hi1( iRegL rs1, iRegI rd ) %{
2197 // Shift high half to low half
2198 emit3_simm13( cbuf, Assembler::arith_op, $rd$$reg, Assembler::srlx_op3, $rs1$$reg, 32 );
2199 %}
2201 // Source hi half of long
2202 enc_class form3_g0_rs2_rd_move_hi2( iRegL rs2, iRegI rd ) %{
2203 // Encode a reg-reg copy. If it is useless, then empty encoding.
2204 if( LONG_HI_REG($rs2$$reg) != $rd$$reg )
2205 emit3( cbuf, Assembler::arith_op, $rd$$reg, Assembler::or_op3, 0, 0, LONG_HI_REG($rs2$$reg) );
2206 %}
2208 enc_class form3_rs1_rs2_rd( iRegI rs1, iRegI rs2, iRegI rd ) %{
2209 emit3( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, 0, $rs2$$reg );
2210 %}
2212 enc_class enc_to_bool( iRegI src, iRegI dst ) %{
2213 emit3 ( cbuf, Assembler::arith_op, 0, Assembler::subcc_op3, 0, 0, $src$$reg );
2214 emit3_simm13( cbuf, Assembler::arith_op, $dst$$reg, Assembler::addc_op3 , 0, 0 );
2215 %}
2217 enc_class enc_ltmask( iRegI p, iRegI q, iRegI dst ) %{
2218 emit3 ( cbuf, Assembler::arith_op, 0, Assembler::subcc_op3, $p$$reg, 0, $q$$reg );
2219 // clear if nothing else is happening
2220 emit3_simm13( cbuf, Assembler::arith_op, $dst$$reg, Assembler::or_op3, 0, 0 );
2221 // blt,a,pn done
2222 emit2_19 ( cbuf, Assembler::branch_op, 1/*annul*/, Assembler::less, Assembler::bp_op2, Assembler::icc, 0/*predict not taken*/, 2 );
2223 // mov dst,-1 in delay slot
2224 emit3_simm13( cbuf, Assembler::arith_op, $dst$$reg, Assembler::or_op3, 0, -1 );
2225 %}
2227 enc_class form3_rs1_imm5_rd( iRegI rs1, immU5 imm5, iRegI rd ) %{
2228 emit3_simm13( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, $imm5$$constant & 0x1F );
2229 %}
2231 enc_class form3_sd_rs1_imm6_rd( iRegL rs1, immU6 imm6, iRegL rd ) %{
2232 emit3_simm13( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, ($imm6$$constant & 0x3F) | 0x1000 );
2233 %}
2235 enc_class form3_sd_rs1_rs2_rd( iRegL rs1, iRegI rs2, iRegL rd ) %{
2236 emit3( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, 0x80, $rs2$$reg );
2237 %}
2239 enc_class form3_rs1_simm13_rd( iRegI rs1, immI13 simm13, iRegI rd ) %{
2240 emit3_simm13( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, $simm13$$constant );
2241 %}
2243 enc_class move_return_pc_to_o1() %{
2244 emit3_simm13( cbuf, Assembler::arith_op, R_O1_enc, Assembler::add_op3, R_O7_enc, frame::pc_return_offset );
2245 %}
2247 #ifdef _LP64
2248 /* %%% merge with enc_to_bool */
2249 enc_class enc_convP2B( iRegI dst, iRegP src ) %{
2250 MacroAssembler _masm(&cbuf);
2252 Register src_reg = reg_to_register_object($src$$reg);
2253 Register dst_reg = reg_to_register_object($dst$$reg);
2254 __ movr(Assembler::rc_nz, src_reg, 1, dst_reg);
2255 %}
2256 #endif
2258 enc_class enc_cadd_cmpLTMask( iRegI p, iRegI q, iRegI y, iRegI tmp ) %{
2259 // (Set p (AddI (AndI (CmpLTMask p q) y) (SubI p q)))
2260 MacroAssembler _masm(&cbuf);
2262 Register p_reg = reg_to_register_object($p$$reg);
2263 Register q_reg = reg_to_register_object($q$$reg);
2264 Register y_reg = reg_to_register_object($y$$reg);
2265 Register tmp_reg = reg_to_register_object($tmp$$reg);
2267 __ subcc( p_reg, q_reg, p_reg );
2268 __ add ( p_reg, y_reg, tmp_reg );
2269 __ movcc( Assembler::less, false, Assembler::icc, tmp_reg, p_reg );
2270 %}
2272 enc_class form_d2i_helper(regD src, regF dst) %{
2273 // fcmp %fcc0,$src,$src
2274 emit3( cbuf, Assembler::arith_op , Assembler::fcc0, Assembler::fpop2_op3, $src$$reg, Assembler::fcmpd_opf, $src$$reg );
2275 // branch %fcc0 not-nan, predict taken
2276 emit2_19( cbuf, Assembler::branch_op, 0/*annul*/, Assembler::f_ordered, Assembler::fbp_op2, Assembler::fcc0, 1/*predict taken*/, 4 );
2277 // fdtoi $src,$dst
2278 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fdtoi_opf, $src$$reg );
2279 // fitos $dst,$dst (if nan)
2280 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fitos_opf, $dst$$reg );
2281 // clear $dst (if nan)
2282 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, $dst$$reg, Assembler::fsubs_opf, $dst$$reg );
2283 // carry on here...
2284 %}
2286 enc_class form_d2l_helper(regD src, regD dst) %{
2287 // fcmp %fcc0,$src,$src check for NAN
2288 emit3( cbuf, Assembler::arith_op , Assembler::fcc0, Assembler::fpop2_op3, $src$$reg, Assembler::fcmpd_opf, $src$$reg );
2289 // branch %fcc0 not-nan, predict taken
2290 emit2_19( cbuf, Assembler::branch_op, 0/*annul*/, Assembler::f_ordered, Assembler::fbp_op2, Assembler::fcc0, 1/*predict taken*/, 4 );
2291 // fdtox $src,$dst convert in delay slot
2292 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fdtox_opf, $src$$reg );
2293 // fxtod $dst,$dst (if nan)
2294 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fxtod_opf, $dst$$reg );
2295 // clear $dst (if nan)
2296 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, $dst$$reg, Assembler::fsubd_opf, $dst$$reg );
2297 // carry on here...
2298 %}
2300 enc_class form_f2i_helper(regF src, regF dst) %{
2301 // fcmps %fcc0,$src,$src
2302 emit3( cbuf, Assembler::arith_op , Assembler::fcc0, Assembler::fpop2_op3, $src$$reg, Assembler::fcmps_opf, $src$$reg );
2303 // branch %fcc0 not-nan, predict taken
2304 emit2_19( cbuf, Assembler::branch_op, 0/*annul*/, Assembler::f_ordered, Assembler::fbp_op2, Assembler::fcc0, 1/*predict taken*/, 4 );
2305 // fstoi $src,$dst
2306 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fstoi_opf, $src$$reg );
2307 // fitos $dst,$dst (if nan)
2308 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fitos_opf, $dst$$reg );
2309 // clear $dst (if nan)
2310 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, $dst$$reg, Assembler::fsubs_opf, $dst$$reg );
2311 // carry on here...
2312 %}
2314 enc_class form_f2l_helper(regF src, regD dst) %{
2315 // fcmps %fcc0,$src,$src
2316 emit3( cbuf, Assembler::arith_op , Assembler::fcc0, Assembler::fpop2_op3, $src$$reg, Assembler::fcmps_opf, $src$$reg );
2317 // branch %fcc0 not-nan, predict taken
2318 emit2_19( cbuf, Assembler::branch_op, 0/*annul*/, Assembler::f_ordered, Assembler::fbp_op2, Assembler::fcc0, 1/*predict taken*/, 4 );
2319 // fstox $src,$dst
2320 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fstox_opf, $src$$reg );
2321 // fxtod $dst,$dst (if nan)
2322 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, 0, Assembler::fxtod_opf, $dst$$reg );
2323 // clear $dst (if nan)
2324 emit3( cbuf, Assembler::arith_op , $dst$$reg, Assembler::fpop1_op3, $dst$$reg, Assembler::fsubd_opf, $dst$$reg );
2325 // carry on here...
2326 %}
2328 enc_class form3_opf_rs2F_rdF(regF rs2, regF rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg); %}
2329 enc_class form3_opf_rs2F_rdD(regF rs2, regD rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg); %}
2330 enc_class form3_opf_rs2D_rdF(regD rs2, regF rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg); %}
2331 enc_class form3_opf_rs2D_rdD(regD rs2, regD rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg); %}
2333 enc_class form3_opf_rs2D_lo_rdF(regD rs2, regF rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg+1); %}
2335 enc_class form3_opf_rs2D_hi_rdD_hi(regD rs2, regD rd) %{ emit3(cbuf,$secondary,$rd$$reg,$primary,0,$tertiary,$rs2$$reg); %}
2336 enc_class form3_opf_rs2D_lo_rdD_lo(regD rs2, regD rd) %{ emit3(cbuf,$secondary,$rd$$reg+1,$primary,0,$tertiary,$rs2$$reg+1); %}
2338 enc_class form3_opf_rs1F_rs2F_rdF( regF rs1, regF rs2, regF rd ) %{
2339 emit3( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, $tertiary, $rs2$$reg );
2340 %}
2342 enc_class form3_opf_rs1D_rs2D_rdD( regD rs1, regD rs2, regD rd ) %{
2343 emit3( cbuf, $secondary, $rd$$reg, $primary, $rs1$$reg, $tertiary, $rs2$$reg );
2344 %}
2346 enc_class form3_opf_rs1F_rs2F_fcc( regF rs1, regF rs2, flagsRegF fcc ) %{
2347 emit3( cbuf, $secondary, $fcc$$reg, $primary, $rs1$$reg, $tertiary, $rs2$$reg );
2348 %}
2350 enc_class form3_opf_rs1D_rs2D_fcc( regD rs1, regD rs2, flagsRegF fcc ) %{
2351 emit3( cbuf, $secondary, $fcc$$reg, $primary, $rs1$$reg, $tertiary, $rs2$$reg );
2352 %}
2354 enc_class form3_convI2F(regF rs2, regF rd) %{
2355 emit3(cbuf,Assembler::arith_op,$rd$$reg,Assembler::fpop1_op3,0,$secondary,$rs2$$reg);
2356 %}
2358 // Encloding class for traceable jumps
2359 enc_class form_jmpl(g3RegP dest) %{
2360 emit_jmpl(cbuf, $dest$$reg);
2361 %}
2363 enc_class form_jmpl_set_exception_pc(g1RegP dest) %{
2364 emit_jmpl_set_exception_pc(cbuf, $dest$$reg);
2365 %}
2367 enc_class form2_nop() %{
2368 emit_nop(cbuf);
2369 %}
2371 enc_class form2_illtrap() %{
2372 emit_illtrap(cbuf);
2373 %}
2376 // Compare longs and convert into -1, 0, 1.
2377 enc_class cmpl_flag( iRegL src1, iRegL src2, iRegI dst ) %{
2378 // CMP $src1,$src2
2379 emit3( cbuf, Assembler::arith_op, 0, Assembler::subcc_op3, $src1$$reg, 0, $src2$$reg );
2380 // blt,a,pn done
2381 emit2_19( cbuf, Assembler::branch_op, 1/*annul*/, Assembler::less , Assembler::bp_op2, Assembler::xcc, 0/*predict not taken*/, 5 );
2382 // mov dst,-1 in delay slot
2383 emit3_simm13( cbuf, Assembler::arith_op, $dst$$reg, Assembler::or_op3, 0, -1 );
2384 // bgt,a,pn done
2385 emit2_19( cbuf, Assembler::branch_op, 1/*annul*/, Assembler::greater, Assembler::bp_op2, Assembler::xcc, 0/*predict not taken*/, 3 );
2386 // mov dst,1 in delay slot
2387 emit3_simm13( cbuf, Assembler::arith_op, $dst$$reg, Assembler::or_op3, 0, 1 );
2388 // CLR $dst
2389 emit3( cbuf, Assembler::arith_op, $dst$$reg, Assembler::or_op3 , 0, 0, 0 );
2390 %}
2392 enc_class enc_PartialSubtypeCheck() %{
2393 MacroAssembler _masm(&cbuf);
2394 __ call(StubRoutines::Sparc::partial_subtype_check(), relocInfo::runtime_call_type);
2395 __ delayed()->nop();
2396 %}
2398 enc_class enc_bp( label labl, cmpOp cmp, flagsReg cc ) %{
2399 MacroAssembler _masm(&cbuf);
2400 Label* L = $labl$$label;
2401 Assembler::Predict predict_taken =
2402 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
2404 __ bp( (Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L);
2405 __ delayed()->nop();
2406 %}
2408 enc_class enc_bpr( label labl, cmpOp_reg cmp, iRegI op1 ) %{
2409 MacroAssembler _masm(&cbuf);
2410 Label* L = $labl$$label;
2411 Assembler::Predict predict_taken =
2412 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
2414 __ bpr( (Assembler::RCondition)($cmp$$cmpcode), false, predict_taken, as_Register($op1$$reg), *L);
2415 __ delayed()->nop();
2416 %}
2418 enc_class enc_cmov_reg( cmpOp cmp, iRegI dst, iRegI src, immI pcc) %{
2419 int op = (Assembler::arith_op << 30) |
2420 ($dst$$reg << 25) |
2421 (Assembler::movcc_op3 << 19) |
2422 (1 << 18) | // cc2 bit for 'icc'
2423 ($cmp$$cmpcode << 14) |
2424 (0 << 13) | // select register move
2425 ($pcc$$constant << 11) | // cc1, cc0 bits for 'icc' or 'xcc'
2426 ($src$$reg << 0);
2427 cbuf.insts()->emit_int32(op);
2428 %}
2430 enc_class enc_cmov_imm( cmpOp cmp, iRegI dst, immI11 src, immI pcc ) %{
2431 int simm11 = $src$$constant & ((1<<11)-1); // Mask to 11 bits
2432 int op = (Assembler::arith_op << 30) |
2433 ($dst$$reg << 25) |
2434 (Assembler::movcc_op3 << 19) |
2435 (1 << 18) | // cc2 bit for 'icc'
2436 ($cmp$$cmpcode << 14) |
2437 (1 << 13) | // select immediate move
2438 ($pcc$$constant << 11) | // cc1, cc0 bits for 'icc'
2439 (simm11 << 0);
2440 cbuf.insts()->emit_int32(op);
2441 %}
2443 enc_class enc_cmov_reg_f( cmpOpF cmp, iRegI dst, iRegI src, flagsRegF fcc ) %{
2444 int op = (Assembler::arith_op << 30) |
2445 ($dst$$reg << 25) |
2446 (Assembler::movcc_op3 << 19) |
2447 (0 << 18) | // cc2 bit for 'fccX'
2448 ($cmp$$cmpcode << 14) |
2449 (0 << 13) | // select register move
2450 ($fcc$$reg << 11) | // cc1, cc0 bits for fcc0-fcc3
2451 ($src$$reg << 0);
2452 cbuf.insts()->emit_int32(op);
2453 %}
2455 enc_class enc_cmov_imm_f( cmpOp cmp, iRegI dst, immI11 src, flagsRegF fcc ) %{
2456 int simm11 = $src$$constant & ((1<<11)-1); // Mask to 11 bits
2457 int op = (Assembler::arith_op << 30) |
2458 ($dst$$reg << 25) |
2459 (Assembler::movcc_op3 << 19) |
2460 (0 << 18) | // cc2 bit for 'fccX'
2461 ($cmp$$cmpcode << 14) |
2462 (1 << 13) | // select immediate move
2463 ($fcc$$reg << 11) | // cc1, cc0 bits for fcc0-fcc3
2464 (simm11 << 0);
2465 cbuf.insts()->emit_int32(op);
2466 %}
2468 enc_class enc_cmovf_reg( cmpOp cmp, regD dst, regD src, immI pcc ) %{
2469 int op = (Assembler::arith_op << 30) |
2470 ($dst$$reg << 25) |
2471 (Assembler::fpop2_op3 << 19) |
2472 (0 << 18) |
2473 ($cmp$$cmpcode << 14) |
2474 (1 << 13) | // select register move
2475 ($pcc$$constant << 11) | // cc1-cc0 bits for 'icc' or 'xcc'
2476 ($primary << 5) | // select single, double or quad
2477 ($src$$reg << 0);
2478 cbuf.insts()->emit_int32(op);
2479 %}
2481 enc_class enc_cmovff_reg( cmpOpF cmp, flagsRegF fcc, regD dst, regD src ) %{
2482 int op = (Assembler::arith_op << 30) |
2483 ($dst$$reg << 25) |
2484 (Assembler::fpop2_op3 << 19) |
2485 (0 << 18) |
2486 ($cmp$$cmpcode << 14) |
2487 ($fcc$$reg << 11) | // cc2-cc0 bits for 'fccX'
2488 ($primary << 5) | // select single, double or quad
2489 ($src$$reg << 0);
2490 cbuf.insts()->emit_int32(op);
2491 %}
2493 // Used by the MIN/MAX encodings. Same as a CMOV, but
2494 // the condition comes from opcode-field instead of an argument.
2495 enc_class enc_cmov_reg_minmax( iRegI dst, iRegI src ) %{
2496 int op = (Assembler::arith_op << 30) |
2497 ($dst$$reg << 25) |
2498 (Assembler::movcc_op3 << 19) |
2499 (1 << 18) | // cc2 bit for 'icc'
2500 ($primary << 14) |
2501 (0 << 13) | // select register move
2502 (0 << 11) | // cc1, cc0 bits for 'icc'
2503 ($src$$reg << 0);
2504 cbuf.insts()->emit_int32(op);
2505 %}
2507 enc_class enc_cmov_reg_minmax_long( iRegL dst, iRegL src ) %{
2508 int op = (Assembler::arith_op << 30) |
2509 ($dst$$reg << 25) |
2510 (Assembler::movcc_op3 << 19) |
2511 (6 << 16) | // cc2 bit for 'xcc'
2512 ($primary << 14) |
2513 (0 << 13) | // select register move
2514 (0 << 11) | // cc1, cc0 bits for 'icc'
2515 ($src$$reg << 0);
2516 cbuf.insts()->emit_int32(op);
2517 %}
2519 enc_class Set13( immI13 src, iRegI rd ) %{
2520 emit3_simm13( cbuf, Assembler::arith_op, $rd$$reg, Assembler::or_op3, 0, $src$$constant );
2521 %}
2523 enc_class SetHi22( immI src, iRegI rd ) %{
2524 emit2_22( cbuf, Assembler::branch_op, $rd$$reg, Assembler::sethi_op2, $src$$constant );
2525 %}
2527 enc_class Set32( immI src, iRegI rd ) %{
2528 MacroAssembler _masm(&cbuf);
2529 __ set($src$$constant, reg_to_register_object($rd$$reg));
2530 %}
2532 enc_class call_epilog %{
2533 if( VerifyStackAtCalls ) {
2534 MacroAssembler _masm(&cbuf);
2535 int framesize = ra_->C->frame_slots() << LogBytesPerInt;
2536 Register temp_reg = G3;
2537 __ add(SP, framesize, temp_reg);
2538 __ cmp(temp_reg, FP);
2539 __ breakpoint_trap(Assembler::notEqual, Assembler::ptr_cc);
2540 }
2541 %}
2543 // Long values come back from native calls in O0:O1 in the 32-bit VM, copy the value
2544 // to G1 so the register allocator will not have to deal with the misaligned register
2545 // pair.
2546 enc_class adjust_long_from_native_call %{
2547 #ifndef _LP64
2548 if (returns_long()) {
2549 // sllx O0,32,O0
2550 emit3_simm13( cbuf, Assembler::arith_op, R_O0_enc, Assembler::sllx_op3, R_O0_enc, 0x1020 );
2551 // srl O1,0,O1
2552 emit3_simm13( cbuf, Assembler::arith_op, R_O1_enc, Assembler::srl_op3, R_O1_enc, 0x0000 );
2553 // or O0,O1,G1
2554 emit3 ( cbuf, Assembler::arith_op, R_G1_enc, Assembler:: or_op3, R_O0_enc, 0, R_O1_enc );
2555 }
2556 #endif
2557 %}
2559 enc_class Java_To_Runtime (method meth) %{ // CALL Java_To_Runtime
2560 // CALL directly to the runtime
2561 // The user of this is responsible for ensuring that R_L7 is empty (killed).
2562 emit_call_reloc(cbuf, $meth$$method, relocInfo::runtime_call_type,
2563 /*preserve_g2=*/true);
2564 %}
2566 enc_class preserve_SP %{
2567 MacroAssembler _masm(&cbuf);
2568 __ mov(SP, L7_mh_SP_save);
2569 %}
2571 enc_class restore_SP %{
2572 MacroAssembler _masm(&cbuf);
2573 __ mov(L7_mh_SP_save, SP);
2574 %}
2576 enc_class Java_Static_Call (method meth) %{ // JAVA STATIC CALL
2577 // CALL to fixup routine. Fixup routine uses ScopeDesc info to determine
2578 // who we intended to call.
2579 if ( !_method ) {
2580 emit_call_reloc(cbuf, $meth$$method, relocInfo::runtime_call_type);
2581 } else if (_optimized_virtual) {
2582 emit_call_reloc(cbuf, $meth$$method, relocInfo::opt_virtual_call_type);
2583 } else {
2584 emit_call_reloc(cbuf, $meth$$method, relocInfo::static_call_type);
2585 }
2586 if( _method ) { // Emit stub for static call
2587 emit_java_to_interp(cbuf);
2588 }
2589 %}
2591 enc_class Java_Dynamic_Call (method meth) %{ // JAVA DYNAMIC CALL
2592 MacroAssembler _masm(&cbuf);
2593 __ set_inst_mark();
2594 int vtable_index = this->_vtable_index;
2595 // MachCallDynamicJavaNode::ret_addr_offset uses this same test
2596 if (vtable_index < 0) {
2597 // must be invalid_vtable_index, not nonvirtual_vtable_index
2598 assert(vtable_index == Method::invalid_vtable_index, "correct sentinel value");
2599 Register G5_ic_reg = reg_to_register_object(Matcher::inline_cache_reg_encode());
2600 assert(G5_ic_reg == G5_inline_cache_reg, "G5_inline_cache_reg used in assemble_ic_buffer_code()");
2601 assert(G5_ic_reg == G5_megamorphic_method, "G5_megamorphic_method used in megamorphic call stub");
2602 __ ic_call((address)$meth$$method);
2603 } else {
2604 assert(!UseInlineCaches, "expect vtable calls only if not using ICs");
2605 // Just go thru the vtable
2606 // get receiver klass (receiver already checked for non-null)
2607 // If we end up going thru a c2i adapter interpreter expects method in G5
2608 int off = __ offset();
2609 __ load_klass(O0, G3_scratch);
2610 int klass_load_size;
2611 if (UseCompressedKlassPointers) {
2612 assert(Universe::heap() != NULL, "java heap should be initialized");
2613 if (Universe::narrow_klass_base() == NULL)
2614 klass_load_size = 2*BytesPerInstWord;
2615 else
2616 klass_load_size = 3*BytesPerInstWord;
2617 } else {
2618 klass_load_size = 1*BytesPerInstWord;
2619 }
2620 int entry_offset = InstanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size();
2621 int v_off = entry_offset*wordSize + vtableEntry::method_offset_in_bytes();
2622 if (Assembler::is_simm13(v_off)) {
2623 __ ld_ptr(G3, v_off, G5_method);
2624 } else {
2625 // Generate 2 instructions
2626 __ Assembler::sethi(v_off & ~0x3ff, G5_method);
2627 __ or3(G5_method, v_off & 0x3ff, G5_method);
2628 // ld_ptr, set_hi, set
2629 assert(__ offset() - off == klass_load_size + 2*BytesPerInstWord,
2630 "Unexpected instruction size(s)");
2631 __ ld_ptr(G3, G5_method, G5_method);
2632 }
2633 // NOTE: for vtable dispatches, the vtable entry will never be null.
2634 // However it may very well end up in handle_wrong_method if the
2635 // method is abstract for the particular class.
2636 __ ld_ptr(G5_method, in_bytes(Method::from_compiled_offset()), G3_scratch);
2637 // jump to target (either compiled code or c2iadapter)
2638 __ jmpl(G3_scratch, G0, O7);
2639 __ delayed()->nop();
2640 }
2641 %}
2643 enc_class Java_Compiled_Call (method meth) %{ // JAVA COMPILED CALL
2644 MacroAssembler _masm(&cbuf);
2646 Register G5_ic_reg = reg_to_register_object(Matcher::inline_cache_reg_encode());
2647 Register temp_reg = G3; // caller must kill G3! We cannot reuse G5_ic_reg here because
2648 // we might be calling a C2I adapter which needs it.
2650 assert(temp_reg != G5_ic_reg, "conflicting registers");
2651 // Load nmethod
2652 __ ld_ptr(G5_ic_reg, in_bytes(Method::from_compiled_offset()), temp_reg);
2654 // CALL to compiled java, indirect the contents of G3
2655 __ set_inst_mark();
2656 __ callr(temp_reg, G0);
2657 __ delayed()->nop();
2658 %}
2660 enc_class idiv_reg(iRegIsafe src1, iRegIsafe src2, iRegIsafe dst) %{
2661 MacroAssembler _masm(&cbuf);
2662 Register Rdividend = reg_to_register_object($src1$$reg);
2663 Register Rdivisor = reg_to_register_object($src2$$reg);
2664 Register Rresult = reg_to_register_object($dst$$reg);
2666 __ sra(Rdivisor, 0, Rdivisor);
2667 __ sra(Rdividend, 0, Rdividend);
2668 __ sdivx(Rdividend, Rdivisor, Rresult);
2669 %}
2671 enc_class idiv_imm(iRegIsafe src1, immI13 imm, iRegIsafe dst) %{
2672 MacroAssembler _masm(&cbuf);
2674 Register Rdividend = reg_to_register_object($src1$$reg);
2675 int divisor = $imm$$constant;
2676 Register Rresult = reg_to_register_object($dst$$reg);
2678 __ sra(Rdividend, 0, Rdividend);
2679 __ sdivx(Rdividend, divisor, Rresult);
2680 %}
2682 enc_class enc_mul_hi(iRegIsafe dst, iRegIsafe src1, iRegIsafe src2) %{
2683 MacroAssembler _masm(&cbuf);
2684 Register Rsrc1 = reg_to_register_object($src1$$reg);
2685 Register Rsrc2 = reg_to_register_object($src2$$reg);
2686 Register Rdst = reg_to_register_object($dst$$reg);
2688 __ sra( Rsrc1, 0, Rsrc1 );
2689 __ sra( Rsrc2, 0, Rsrc2 );
2690 __ mulx( Rsrc1, Rsrc2, Rdst );
2691 __ srlx( Rdst, 32, Rdst );
2692 %}
2694 enc_class irem_reg(iRegIsafe src1, iRegIsafe src2, iRegIsafe dst, o7RegL scratch) %{
2695 MacroAssembler _masm(&cbuf);
2696 Register Rdividend = reg_to_register_object($src1$$reg);
2697 Register Rdivisor = reg_to_register_object($src2$$reg);
2698 Register Rresult = reg_to_register_object($dst$$reg);
2699 Register Rscratch = reg_to_register_object($scratch$$reg);
2701 assert(Rdividend != Rscratch, "");
2702 assert(Rdivisor != Rscratch, "");
2704 __ sra(Rdividend, 0, Rdividend);
2705 __ sra(Rdivisor, 0, Rdivisor);
2706 __ sdivx(Rdividend, Rdivisor, Rscratch);
2707 __ mulx(Rscratch, Rdivisor, Rscratch);
2708 __ sub(Rdividend, Rscratch, Rresult);
2709 %}
2711 enc_class irem_imm(iRegIsafe src1, immI13 imm, iRegIsafe dst, o7RegL scratch) %{
2712 MacroAssembler _masm(&cbuf);
2714 Register Rdividend = reg_to_register_object($src1$$reg);
2715 int divisor = $imm$$constant;
2716 Register Rresult = reg_to_register_object($dst$$reg);
2717 Register Rscratch = reg_to_register_object($scratch$$reg);
2719 assert(Rdividend != Rscratch, "");
2721 __ sra(Rdividend, 0, Rdividend);
2722 __ sdivx(Rdividend, divisor, Rscratch);
2723 __ mulx(Rscratch, divisor, Rscratch);
2724 __ sub(Rdividend, Rscratch, Rresult);
2725 %}
2727 enc_class fabss (sflt_reg dst, sflt_reg src) %{
2728 MacroAssembler _masm(&cbuf);
2730 FloatRegister Fdst = reg_to_SingleFloatRegister_object($dst$$reg);
2731 FloatRegister Fsrc = reg_to_SingleFloatRegister_object($src$$reg);
2733 __ fabs(FloatRegisterImpl::S, Fsrc, Fdst);
2734 %}
2736 enc_class fabsd (dflt_reg dst, dflt_reg src) %{
2737 MacroAssembler _masm(&cbuf);
2739 FloatRegister Fdst = reg_to_DoubleFloatRegister_object($dst$$reg);
2740 FloatRegister Fsrc = reg_to_DoubleFloatRegister_object($src$$reg);
2742 __ fabs(FloatRegisterImpl::D, Fsrc, Fdst);
2743 %}
2745 enc_class fnegd (dflt_reg dst, dflt_reg src) %{
2746 MacroAssembler _masm(&cbuf);
2748 FloatRegister Fdst = reg_to_DoubleFloatRegister_object($dst$$reg);
2749 FloatRegister Fsrc = reg_to_DoubleFloatRegister_object($src$$reg);
2751 __ fneg(FloatRegisterImpl::D, Fsrc, Fdst);
2752 %}
2754 enc_class fsqrts (sflt_reg dst, sflt_reg src) %{
2755 MacroAssembler _masm(&cbuf);
2757 FloatRegister Fdst = reg_to_SingleFloatRegister_object($dst$$reg);
2758 FloatRegister Fsrc = reg_to_SingleFloatRegister_object($src$$reg);
2760 __ fsqrt(FloatRegisterImpl::S, Fsrc, Fdst);
2761 %}
2763 enc_class fsqrtd (dflt_reg dst, dflt_reg src) %{
2764 MacroAssembler _masm(&cbuf);
2766 FloatRegister Fdst = reg_to_DoubleFloatRegister_object($dst$$reg);
2767 FloatRegister Fsrc = reg_to_DoubleFloatRegister_object($src$$reg);
2769 __ fsqrt(FloatRegisterImpl::D, Fsrc, Fdst);
2770 %}
2772 enc_class fmovs (dflt_reg dst, dflt_reg src) %{
2773 MacroAssembler _masm(&cbuf);
2775 FloatRegister Fdst = reg_to_SingleFloatRegister_object($dst$$reg);
2776 FloatRegister Fsrc = reg_to_SingleFloatRegister_object($src$$reg);
2778 __ fmov(FloatRegisterImpl::S, Fsrc, Fdst);
2779 %}
2781 enc_class fmovd (dflt_reg dst, dflt_reg src) %{
2782 MacroAssembler _masm(&cbuf);
2784 FloatRegister Fdst = reg_to_DoubleFloatRegister_object($dst$$reg);
2785 FloatRegister Fsrc = reg_to_DoubleFloatRegister_object($src$$reg);
2787 __ fmov(FloatRegisterImpl::D, Fsrc, Fdst);
2788 %}
2790 enc_class Fast_Lock(iRegP oop, iRegP box, o7RegP scratch, iRegP scratch2) %{
2791 MacroAssembler _masm(&cbuf);
2793 Register Roop = reg_to_register_object($oop$$reg);
2794 Register Rbox = reg_to_register_object($box$$reg);
2795 Register Rscratch = reg_to_register_object($scratch$$reg);
2796 Register Rmark = reg_to_register_object($scratch2$$reg);
2798 assert(Roop != Rscratch, "");
2799 assert(Roop != Rmark, "");
2800 assert(Rbox != Rscratch, "");
2801 assert(Rbox != Rmark, "");
2803 __ compiler_lock_object(Roop, Rmark, Rbox, Rscratch, _counters, UseBiasedLocking && !UseOptoBiasInlining);
2804 %}
2806 enc_class Fast_Unlock(iRegP oop, iRegP box, o7RegP scratch, iRegP scratch2) %{
2807 MacroAssembler _masm(&cbuf);
2809 Register Roop = reg_to_register_object($oop$$reg);
2810 Register Rbox = reg_to_register_object($box$$reg);
2811 Register Rscratch = reg_to_register_object($scratch$$reg);
2812 Register Rmark = reg_to_register_object($scratch2$$reg);
2814 assert(Roop != Rscratch, "");
2815 assert(Roop != Rmark, "");
2816 assert(Rbox != Rscratch, "");
2817 assert(Rbox != Rmark, "");
2819 __ compiler_unlock_object(Roop, Rmark, Rbox, Rscratch, UseBiasedLocking && !UseOptoBiasInlining);
2820 %}
2822 enc_class enc_cas( iRegP mem, iRegP old, iRegP new ) %{
2823 MacroAssembler _masm(&cbuf);
2824 Register Rmem = reg_to_register_object($mem$$reg);
2825 Register Rold = reg_to_register_object($old$$reg);
2826 Register Rnew = reg_to_register_object($new$$reg);
2828 // casx_under_lock picks 1 of 3 encodings:
2829 // For 32-bit pointers you get a 32-bit CAS
2830 // For 64-bit pointers you get a 64-bit CASX
2831 __ casn(Rmem, Rold, Rnew); // Swap(*Rmem,Rnew) if *Rmem == Rold
2832 __ cmp( Rold, Rnew );
2833 %}
2835 enc_class enc_casx( iRegP mem, iRegL old, iRegL new) %{
2836 Register Rmem = reg_to_register_object($mem$$reg);
2837 Register Rold = reg_to_register_object($old$$reg);
2838 Register Rnew = reg_to_register_object($new$$reg);
2840 MacroAssembler _masm(&cbuf);
2841 __ mov(Rnew, O7);
2842 __ casx(Rmem, Rold, O7);
2843 __ cmp( Rold, O7 );
2844 %}
2846 // raw int cas, used for compareAndSwap
2847 enc_class enc_casi( iRegP mem, iRegL old, iRegL new) %{
2848 Register Rmem = reg_to_register_object($mem$$reg);
2849 Register Rold = reg_to_register_object($old$$reg);
2850 Register Rnew = reg_to_register_object($new$$reg);
2852 MacroAssembler _masm(&cbuf);
2853 __ mov(Rnew, O7);
2854 __ cas(Rmem, Rold, O7);
2855 __ cmp( Rold, O7 );
2856 %}
2858 enc_class enc_lflags_ne_to_boolean( iRegI res ) %{
2859 Register Rres = reg_to_register_object($res$$reg);
2861 MacroAssembler _masm(&cbuf);
2862 __ mov(1, Rres);
2863 __ movcc( Assembler::notEqual, false, Assembler::xcc, G0, Rres );
2864 %}
2866 enc_class enc_iflags_ne_to_boolean( iRegI res ) %{
2867 Register Rres = reg_to_register_object($res$$reg);
2869 MacroAssembler _masm(&cbuf);
2870 __ mov(1, Rres);
2871 __ movcc( Assembler::notEqual, false, Assembler::icc, G0, Rres );
2872 %}
2874 enc_class floating_cmp ( iRegP dst, regF src1, regF src2 ) %{
2875 MacroAssembler _masm(&cbuf);
2876 Register Rdst = reg_to_register_object($dst$$reg);
2877 FloatRegister Fsrc1 = $primary ? reg_to_SingleFloatRegister_object($src1$$reg)
2878 : reg_to_DoubleFloatRegister_object($src1$$reg);
2879 FloatRegister Fsrc2 = $primary ? reg_to_SingleFloatRegister_object($src2$$reg)
2880 : reg_to_DoubleFloatRegister_object($src2$$reg);
2882 // Convert condition code fcc0 into -1,0,1; unordered reports less-than (-1)
2883 __ float_cmp( $primary, -1, Fsrc1, Fsrc2, Rdst);
2884 %}
2887 enc_class enc_String_Compare(o0RegP str1, o1RegP str2, g3RegI cnt1, g4RegI cnt2, notemp_iRegI result) %{
2888 Label Ldone, Lloop;
2889 MacroAssembler _masm(&cbuf);
2891 Register str1_reg = reg_to_register_object($str1$$reg);
2892 Register str2_reg = reg_to_register_object($str2$$reg);
2893 Register cnt1_reg = reg_to_register_object($cnt1$$reg);
2894 Register cnt2_reg = reg_to_register_object($cnt2$$reg);
2895 Register result_reg = reg_to_register_object($result$$reg);
2897 assert(result_reg != str1_reg &&
2898 result_reg != str2_reg &&
2899 result_reg != cnt1_reg &&
2900 result_reg != cnt2_reg ,
2901 "need different registers");
2903 // Compute the minimum of the string lengths(str1_reg) and the
2904 // difference of the string lengths (stack)
2906 // See if the lengths are different, and calculate min in str1_reg.
2907 // Stash diff in O7 in case we need it for a tie-breaker.
2908 Label Lskip;
2909 __ subcc(cnt1_reg, cnt2_reg, O7);
2910 __ sll(cnt1_reg, exact_log2(sizeof(jchar)), cnt1_reg); // scale the limit
2911 __ br(Assembler::greater, true, Assembler::pt, Lskip);
2912 // cnt2 is shorter, so use its count:
2913 __ delayed()->sll(cnt2_reg, exact_log2(sizeof(jchar)), cnt1_reg); // scale the limit
2914 __ bind(Lskip);
2916 // reallocate cnt1_reg, cnt2_reg, result_reg
2917 // Note: limit_reg holds the string length pre-scaled by 2
2918 Register limit_reg = cnt1_reg;
2919 Register chr2_reg = cnt2_reg;
2920 Register chr1_reg = result_reg;
2921 // str{12} are the base pointers
2923 // Is the minimum length zero?
2924 __ cmp(limit_reg, (int)(0 * sizeof(jchar))); // use cast to resolve overloading ambiguity
2925 __ br(Assembler::equal, true, Assembler::pn, Ldone);
2926 __ delayed()->mov(O7, result_reg); // result is difference in lengths
2928 // Load first characters
2929 __ lduh(str1_reg, 0, chr1_reg);
2930 __ lduh(str2_reg, 0, chr2_reg);
2932 // Compare first characters
2933 __ subcc(chr1_reg, chr2_reg, chr1_reg);
2934 __ br(Assembler::notZero, false, Assembler::pt, Ldone);
2935 assert(chr1_reg == result_reg, "result must be pre-placed");
2936 __ delayed()->nop();
2938 {
2939 // Check after comparing first character to see if strings are equivalent
2940 Label LSkip2;
2941 // Check if the strings start at same location
2942 __ cmp(str1_reg, str2_reg);
2943 __ brx(Assembler::notEqual, true, Assembler::pt, LSkip2);
2944 __ delayed()->nop();
2946 // Check if the length difference is zero (in O7)
2947 __ cmp(G0, O7);
2948 __ br(Assembler::equal, true, Assembler::pn, Ldone);
2949 __ delayed()->mov(G0, result_reg); // result is zero
2951 // Strings might not be equal
2952 __ bind(LSkip2);
2953 }
2955 __ subcc(limit_reg, 1 * sizeof(jchar), chr1_reg);
2956 __ br(Assembler::equal, true, Assembler::pn, Ldone);
2957 __ delayed()->mov(O7, result_reg); // result is difference in lengths
2959 // Shift str1_reg and str2_reg to the end of the arrays, negate limit
2960 __ add(str1_reg, limit_reg, str1_reg);
2961 __ add(str2_reg, limit_reg, str2_reg);
2962 __ neg(chr1_reg, limit_reg); // limit = -(limit-2)
2964 // Compare the rest of the characters
2965 __ lduh(str1_reg, limit_reg, chr1_reg);
2966 __ bind(Lloop);
2967 // __ lduh(str1_reg, limit_reg, chr1_reg); // hoisted
2968 __ lduh(str2_reg, limit_reg, chr2_reg);
2969 __ subcc(chr1_reg, chr2_reg, chr1_reg);
2970 __ br(Assembler::notZero, false, Assembler::pt, Ldone);
2971 assert(chr1_reg == result_reg, "result must be pre-placed");
2972 __ delayed()->inccc(limit_reg, sizeof(jchar));
2973 // annul LDUH if branch is not taken to prevent access past end of string
2974 __ br(Assembler::notZero, true, Assembler::pt, Lloop);
2975 __ delayed()->lduh(str1_reg, limit_reg, chr1_reg); // hoisted
2977 // If strings are equal up to min length, return the length difference.
2978 __ mov(O7, result_reg);
2980 // Otherwise, return the difference between the first mismatched chars.
2981 __ bind(Ldone);
2982 %}
2984 enc_class enc_String_Equals(o0RegP str1, o1RegP str2, g3RegI cnt, notemp_iRegI result) %{
2985 Label Lword_loop, Lpost_word, Lchar, Lchar_loop, Ldone;
2986 MacroAssembler _masm(&cbuf);
2988 Register str1_reg = reg_to_register_object($str1$$reg);
2989 Register str2_reg = reg_to_register_object($str2$$reg);
2990 Register cnt_reg = reg_to_register_object($cnt$$reg);
2991 Register tmp1_reg = O7;
2992 Register result_reg = reg_to_register_object($result$$reg);
2994 assert(result_reg != str1_reg &&
2995 result_reg != str2_reg &&
2996 result_reg != cnt_reg &&
2997 result_reg != tmp1_reg ,
2998 "need different registers");
3000 __ cmp(str1_reg, str2_reg); //same char[] ?
3001 __ brx(Assembler::equal, true, Assembler::pn, Ldone);
3002 __ delayed()->add(G0, 1, result_reg);
3004 __ cmp_zero_and_br(Assembler::zero, cnt_reg, Ldone, true, Assembler::pn);
3005 __ delayed()->add(G0, 1, result_reg); // count == 0
3007 //rename registers
3008 Register limit_reg = cnt_reg;
3009 Register chr1_reg = result_reg;
3010 Register chr2_reg = tmp1_reg;
3012 //check for alignment and position the pointers to the ends
3013 __ or3(str1_reg, str2_reg, chr1_reg);
3014 __ andcc(chr1_reg, 0x3, chr1_reg);
3015 // notZero means at least one not 4-byte aligned.
3016 // We could optimize the case when both arrays are not aligned
3017 // but it is not frequent case and it requires additional checks.
3018 __ br(Assembler::notZero, false, Assembler::pn, Lchar); // char by char compare
3019 __ delayed()->sll(limit_reg, exact_log2(sizeof(jchar)), limit_reg); // set byte count
3021 // Compare char[] arrays aligned to 4 bytes.
3022 __ char_arrays_equals(str1_reg, str2_reg, limit_reg, result_reg,
3023 chr1_reg, chr2_reg, Ldone);
3024 __ ba(Ldone);
3025 __ delayed()->add(G0, 1, result_reg);
3027 // char by char compare
3028 __ bind(Lchar);
3029 __ add(str1_reg, limit_reg, str1_reg);
3030 __ add(str2_reg, limit_reg, str2_reg);
3031 __ neg(limit_reg); //negate count
3033 __ lduh(str1_reg, limit_reg, chr1_reg);
3034 // Lchar_loop
3035 __ bind(Lchar_loop);
3036 __ lduh(str2_reg, limit_reg, chr2_reg);
3037 __ cmp(chr1_reg, chr2_reg);
3038 __ br(Assembler::notEqual, true, Assembler::pt, Ldone);
3039 __ delayed()->mov(G0, result_reg); //not equal
3040 __ inccc(limit_reg, sizeof(jchar));
3041 // annul LDUH if branch is not taken to prevent access past end of string
3042 __ br(Assembler::notZero, true, Assembler::pt, Lchar_loop);
3043 __ delayed()->lduh(str1_reg, limit_reg, chr1_reg); // hoisted
3045 __ add(G0, 1, result_reg); //equal
3047 __ bind(Ldone);
3048 %}
3050 enc_class enc_Array_Equals(o0RegP ary1, o1RegP ary2, g3RegP tmp1, notemp_iRegI result) %{
3051 Label Lvector, Ldone, Lloop;
3052 MacroAssembler _masm(&cbuf);
3054 Register ary1_reg = reg_to_register_object($ary1$$reg);
3055 Register ary2_reg = reg_to_register_object($ary2$$reg);
3056 Register tmp1_reg = reg_to_register_object($tmp1$$reg);
3057 Register tmp2_reg = O7;
3058 Register result_reg = reg_to_register_object($result$$reg);
3060 int length_offset = arrayOopDesc::length_offset_in_bytes();
3061 int base_offset = arrayOopDesc::base_offset_in_bytes(T_CHAR);
3063 // return true if the same array
3064 __ cmp(ary1_reg, ary2_reg);
3065 __ brx(Assembler::equal, true, Assembler::pn, Ldone);
3066 __ delayed()->add(G0, 1, result_reg); // equal
3068 __ br_null(ary1_reg, true, Assembler::pn, Ldone);
3069 __ delayed()->mov(G0, result_reg); // not equal
3071 __ br_null(ary2_reg, true, Assembler::pn, Ldone);
3072 __ delayed()->mov(G0, result_reg); // not equal
3074 //load the lengths of arrays
3075 __ ld(Address(ary1_reg, length_offset), tmp1_reg);
3076 __ ld(Address(ary2_reg, length_offset), tmp2_reg);
3078 // return false if the two arrays are not equal length
3079 __ cmp(tmp1_reg, tmp2_reg);
3080 __ br(Assembler::notEqual, true, Assembler::pn, Ldone);
3081 __ delayed()->mov(G0, result_reg); // not equal
3083 __ cmp_zero_and_br(Assembler::zero, tmp1_reg, Ldone, true, Assembler::pn);
3084 __ delayed()->add(G0, 1, result_reg); // zero-length arrays are equal
3086 // load array addresses
3087 __ add(ary1_reg, base_offset, ary1_reg);
3088 __ add(ary2_reg, base_offset, ary2_reg);
3090 // renaming registers
3091 Register chr1_reg = result_reg; // for characters in ary1
3092 Register chr2_reg = tmp2_reg; // for characters in ary2
3093 Register limit_reg = tmp1_reg; // length
3095 // set byte count
3096 __ sll(limit_reg, exact_log2(sizeof(jchar)), limit_reg);
3098 // Compare char[] arrays aligned to 4 bytes.
3099 __ char_arrays_equals(ary1_reg, ary2_reg, limit_reg, result_reg,
3100 chr1_reg, chr2_reg, Ldone);
3101 __ add(G0, 1, result_reg); // equals
3103 __ bind(Ldone);
3104 %}
3106 enc_class enc_rethrow() %{
3107 cbuf.set_insts_mark();
3108 Register temp_reg = G3;
3109 AddressLiteral rethrow_stub(OptoRuntime::rethrow_stub());
3110 assert(temp_reg != reg_to_register_object(R_I0_num), "temp must not break oop_reg");
3111 MacroAssembler _masm(&cbuf);
3112 #ifdef ASSERT
3113 __ save_frame(0);
3114 AddressLiteral last_rethrow_addrlit(&last_rethrow);
3115 __ sethi(last_rethrow_addrlit, L1);
3116 Address addr(L1, last_rethrow_addrlit.low10());
3117 __ get_pc(L2);
3118 __ inc(L2, 3 * BytesPerInstWord); // skip this & 2 more insns to point at jump_to
3119 __ st_ptr(L2, addr);
3120 __ restore();
3121 #endif
3122 __ JUMP(rethrow_stub, temp_reg, 0); // sethi;jmp
3123 __ delayed()->nop();
3124 %}
3126 enc_class emit_mem_nop() %{
3127 // Generates the instruction LDUXA [o6,g0],#0x82,g0
3128 cbuf.insts()->emit_int32((unsigned int) 0xc0839040);
3129 %}
3131 enc_class emit_fadd_nop() %{
3132 // Generates the instruction FMOVS f31,f31
3133 cbuf.insts()->emit_int32((unsigned int) 0xbfa0003f);
3134 %}
3136 enc_class emit_br_nop() %{
3137 // Generates the instruction BPN,PN .
3138 cbuf.insts()->emit_int32((unsigned int) 0x00400000);
3139 %}
3141 enc_class enc_membar_acquire %{
3142 MacroAssembler _masm(&cbuf);
3143 __ membar( Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::LoadLoad) );
3144 %}
3146 enc_class enc_membar_release %{
3147 MacroAssembler _masm(&cbuf);
3148 __ membar( Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::StoreStore) );
3149 %}
3151 enc_class enc_membar_volatile %{
3152 MacroAssembler _masm(&cbuf);
3153 __ membar( Assembler::Membar_mask_bits(Assembler::StoreLoad) );
3154 %}
3156 %}
3158 //----------FRAME--------------------------------------------------------------
3159 // Definition of frame structure and management information.
3160 //
3161 // S T A C K L A Y O U T Allocators stack-slot number
3162 // | (to get allocators register number
3163 // G Owned by | | v add VMRegImpl::stack0)
3164 // r CALLER | |
3165 // o | +--------+ pad to even-align allocators stack-slot
3166 // w V | pad0 | numbers; owned by CALLER
3167 // t -----------+--------+----> Matcher::_in_arg_limit, unaligned
3168 // h ^ | in | 5
3169 // | | args | 4 Holes in incoming args owned by SELF
3170 // | | | | 3
3171 // | | +--------+
3172 // V | | old out| Empty on Intel, window on Sparc
3173 // | old |preserve| Must be even aligned.
3174 // | SP-+--------+----> Matcher::_old_SP, 8 (or 16 in LP64)-byte aligned
3175 // | | in | 3 area for Intel ret address
3176 // Owned by |preserve| Empty on Sparc.
3177 // SELF +--------+
3178 // | | pad2 | 2 pad to align old SP
3179 // | +--------+ 1
3180 // | | locks | 0
3181 // | +--------+----> VMRegImpl::stack0, 8 (or 16 in LP64)-byte aligned
3182 // | | pad1 | 11 pad to align new SP
3183 // | +--------+
3184 // | | | 10
3185 // | | spills | 9 spills
3186 // V | | 8 (pad0 slot for callee)
3187 // -----------+--------+----> Matcher::_out_arg_limit, unaligned
3188 // ^ | out | 7
3189 // | | args | 6 Holes in outgoing args owned by CALLEE
3190 // Owned by +--------+
3191 // CALLEE | new out| 6 Empty on Intel, window on Sparc
3192 // | new |preserve| Must be even-aligned.
3193 // | SP-+--------+----> Matcher::_new_SP, even aligned
3194 // | | |
3195 //
3196 // Note 1: Only region 8-11 is determined by the allocator. Region 0-5 is
3197 // known from SELF's arguments and the Java calling convention.
3198 // Region 6-7 is determined per call site.
3199 // Note 2: If the calling convention leaves holes in the incoming argument
3200 // area, those holes are owned by SELF. Holes in the outgoing area
3201 // are owned by the CALLEE. Holes should not be nessecary in the
3202 // incoming area, as the Java calling convention is completely under
3203 // the control of the AD file. Doubles can be sorted and packed to
3204 // avoid holes. Holes in the outgoing arguments may be nessecary for
3205 // varargs C calling conventions.
3206 // Note 3: Region 0-3 is even aligned, with pad2 as needed. Region 3-5 is
3207 // even aligned with pad0 as needed.
3208 // Region 6 is even aligned. Region 6-7 is NOT even aligned;
3209 // region 6-11 is even aligned; it may be padded out more so that
3210 // the region from SP to FP meets the minimum stack alignment.
3212 frame %{
3213 // What direction does stack grow in (assumed to be same for native & Java)
3214 stack_direction(TOWARDS_LOW);
3216 // These two registers define part of the calling convention
3217 // between compiled code and the interpreter.
3218 inline_cache_reg(R_G5); // Inline Cache Register or Method* for I2C
3219 interpreter_method_oop_reg(R_G5); // Method Oop Register when calling interpreter
3221 // Optional: name the operand used by cisc-spilling to access [stack_pointer + offset]
3222 cisc_spilling_operand_name(indOffset);
3224 // Number of stack slots consumed by a Monitor enter
3225 #ifdef _LP64
3226 sync_stack_slots(2);
3227 #else
3228 sync_stack_slots(1);
3229 #endif
3231 // Compiled code's Frame Pointer
3232 frame_pointer(R_SP);
3234 // Stack alignment requirement
3235 stack_alignment(StackAlignmentInBytes);
3236 // LP64: Alignment size in bytes (128-bit -> 16 bytes)
3237 // !LP64: Alignment size in bytes (64-bit -> 8 bytes)
3239 // Number of stack slots between incoming argument block and the start of
3240 // a new frame. The PROLOG must add this many slots to the stack. The
3241 // EPILOG must remove this many slots.
3242 in_preserve_stack_slots(0);
3244 // Number of outgoing stack slots killed above the out_preserve_stack_slots
3245 // for calls to C. Supports the var-args backing area for register parms.
3246 // ADLC doesn't support parsing expressions, so I folded the math by hand.
3247 #ifdef _LP64
3248 // (callee_register_argument_save_area_words (6) + callee_aggregate_return_pointer_words (0)) * 2-stack-slots-per-word
3249 varargs_C_out_slots_killed(12);
3250 #else
3251 // (callee_register_argument_save_area_words (6) + callee_aggregate_return_pointer_words (1)) * 1-stack-slots-per-word
3252 varargs_C_out_slots_killed( 7);
3253 #endif
3255 // The after-PROLOG location of the return address. Location of
3256 // return address specifies a type (REG or STACK) and a number
3257 // representing the register number (i.e. - use a register name) or
3258 // stack slot.
3259 return_addr(REG R_I7); // Ret Addr is in register I7
3261 // Body of function which returns an OptoRegs array locating
3262 // arguments either in registers or in stack slots for calling
3263 // java
3264 calling_convention %{
3265 (void) SharedRuntime::java_calling_convention(sig_bt, regs, length, is_outgoing);
3267 %}
3269 // Body of function which returns an OptoRegs array locating
3270 // arguments either in registers or in stack slots for callin
3271 // C.
3272 c_calling_convention %{
3273 // This is obviously always outgoing
3274 (void) SharedRuntime::c_calling_convention(sig_bt, regs, length);
3275 %}
3277 // Location of native (C/C++) and interpreter return values. This is specified to
3278 // be the same as Java. In the 32-bit VM, long values are actually returned from
3279 // native calls in O0:O1 and returned to the interpreter in I0:I1. The copying
3280 // to and from the register pairs is done by the appropriate call and epilog
3281 // opcodes. This simplifies the register allocator.
3282 c_return_value %{
3283 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3284 #ifdef _LP64
3285 static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_O0_num };
3286 static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_O0H_num, OptoReg::Bad, R_F1_num, R_O0H_num};
3287 static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_I0_num };
3288 static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_I0H_num, OptoReg::Bad, R_F1_num, R_I0H_num};
3289 #else // !_LP64
3290 static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_G1_num };
3291 static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num };
3292 static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_G1_num };
3293 static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num };
3294 #endif
3295 return OptoRegPair( (is_outgoing?hi_out:hi_in)[ideal_reg],
3296 (is_outgoing?lo_out:lo_in)[ideal_reg] );
3297 %}
3299 // Location of compiled Java return values. Same as C
3300 return_value %{
3301 assert( ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, "only return normal values" );
3302 #ifdef _LP64
3303 static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_O0_num };
3304 static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_O0H_num, OptoReg::Bad, R_F1_num, R_O0H_num};
3305 static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_I0_num };
3306 static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_I0H_num, OptoReg::Bad, R_F1_num, R_I0H_num};
3307 #else // !_LP64
3308 static int lo_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_O0_num, R_O0_num, R_O0_num, R_F0_num, R_F0_num, R_G1_num };
3309 static int hi_out[Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num};
3310 static int lo_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, R_I0_num, R_I0_num, R_I0_num, R_F0_num, R_F0_num, R_G1_num };
3311 static int hi_in [Op_RegL+1] = { OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, OptoReg::Bad, R_F1_num, R_G1H_num};
3312 #endif
3313 return OptoRegPair( (is_outgoing?hi_out:hi_in)[ideal_reg],
3314 (is_outgoing?lo_out:lo_in)[ideal_reg] );
3315 %}
3317 %}
3320 //----------ATTRIBUTES---------------------------------------------------------
3321 //----------Operand Attributes-------------------------------------------------
3322 op_attrib op_cost(1); // Required cost attribute
3324 //----------Instruction Attributes---------------------------------------------
3325 ins_attrib ins_cost(DEFAULT_COST); // Required cost attribute
3326 ins_attrib ins_size(32); // Required size attribute (in bits)
3327 ins_attrib ins_avoid_back_to_back(0); // instruction should not be generated back to back
3328 ins_attrib ins_short_branch(0); // Required flag: is this instruction a
3329 // non-matching short branch variant of some
3330 // long branch?
3332 //----------OPERANDS-----------------------------------------------------------
3333 // Operand definitions must precede instruction definitions for correct parsing
3334 // in the ADLC because operands constitute user defined types which are used in
3335 // instruction definitions.
3337 //----------Simple Operands----------------------------------------------------
3338 // Immediate Operands
3339 // Integer Immediate: 32-bit
3340 operand immI() %{
3341 match(ConI);
3343 op_cost(0);
3344 // formats are generated automatically for constants and base registers
3345 format %{ %}
3346 interface(CONST_INTER);
3347 %}
3349 // Integer Immediate: 8-bit
3350 operand immI8() %{
3351 predicate(Assembler::is_simm8(n->get_int()));
3352 match(ConI);
3353 op_cost(0);
3354 format %{ %}
3355 interface(CONST_INTER);
3356 %}
3358 // Integer Immediate: 13-bit
3359 operand immI13() %{
3360 predicate(Assembler::is_simm13(n->get_int()));
3361 match(ConI);
3362 op_cost(0);
3364 format %{ %}
3365 interface(CONST_INTER);
3366 %}
3368 // Integer Immediate: 13-bit minus 7
3369 operand immI13m7() %{
3370 predicate((-4096 < n->get_int()) && ((n->get_int() + 7) <= 4095));
3371 match(ConI);
3372 op_cost(0);
3374 format %{ %}
3375 interface(CONST_INTER);
3376 %}
3378 // Integer Immediate: 16-bit
3379 operand immI16() %{
3380 predicate(Assembler::is_simm16(n->get_int()));
3381 match(ConI);
3382 op_cost(0);
3383 format %{ %}
3384 interface(CONST_INTER);
3385 %}
3387 // Unsigned (positive) Integer Immediate: 13-bit
3388 operand immU13() %{
3389 predicate((0 <= n->get_int()) && Assembler::is_simm13(n->get_int()));
3390 match(ConI);
3391 op_cost(0);
3393 format %{ %}
3394 interface(CONST_INTER);
3395 %}
3397 // Integer Immediate: 6-bit
3398 operand immU6() %{
3399 predicate(n->get_int() >= 0 && n->get_int() <= 63);
3400 match(ConI);
3401 op_cost(0);
3402 format %{ %}
3403 interface(CONST_INTER);
3404 %}
3406 // Integer Immediate: 11-bit
3407 operand immI11() %{
3408 predicate(Assembler::is_simm11(n->get_int()));
3409 match(ConI);
3410 op_cost(0);
3411 format %{ %}
3412 interface(CONST_INTER);
3413 %}
3415 // Integer Immediate: 5-bit
3416 operand immI5() %{
3417 predicate(Assembler::is_simm5(n->get_int()));
3418 match(ConI);
3419 op_cost(0);
3420 format %{ %}
3421 interface(CONST_INTER);
3422 %}
3424 // Integer Immediate: 0-bit
3425 operand immI0() %{
3426 predicate(n->get_int() == 0);
3427 match(ConI);
3428 op_cost(0);
3430 format %{ %}
3431 interface(CONST_INTER);
3432 %}
3434 // Integer Immediate: the value 10
3435 operand immI10() %{
3436 predicate(n->get_int() == 10);
3437 match(ConI);
3438 op_cost(0);
3440 format %{ %}
3441 interface(CONST_INTER);
3442 %}
3444 // Integer Immediate: the values 0-31
3445 operand immU5() %{
3446 predicate(n->get_int() >= 0 && n->get_int() <= 31);
3447 match(ConI);
3448 op_cost(0);
3450 format %{ %}
3451 interface(CONST_INTER);
3452 %}
3454 // Integer Immediate: the values 1-31
3455 operand immI_1_31() %{
3456 predicate(n->get_int() >= 1 && n->get_int() <= 31);
3457 match(ConI);
3458 op_cost(0);
3460 format %{ %}
3461 interface(CONST_INTER);
3462 %}
3464 // Integer Immediate: the values 32-63
3465 operand immI_32_63() %{
3466 predicate(n->get_int() >= 32 && n->get_int() <= 63);
3467 match(ConI);
3468 op_cost(0);
3470 format %{ %}
3471 interface(CONST_INTER);
3472 %}
3474 // Immediates for special shifts (sign extend)
3476 // Integer Immediate: the value 16
3477 operand immI_16() %{
3478 predicate(n->get_int() == 16);
3479 match(ConI);
3480 op_cost(0);
3482 format %{ %}
3483 interface(CONST_INTER);
3484 %}
3486 // Integer Immediate: the value 24
3487 operand immI_24() %{
3488 predicate(n->get_int() == 24);
3489 match(ConI);
3490 op_cost(0);
3492 format %{ %}
3493 interface(CONST_INTER);
3494 %}
3496 // Integer Immediate: the value 255
3497 operand immI_255() %{
3498 predicate( n->get_int() == 255 );
3499 match(ConI);
3500 op_cost(0);
3502 format %{ %}
3503 interface(CONST_INTER);
3504 %}
3506 // Integer Immediate: the value 65535
3507 operand immI_65535() %{
3508 predicate(n->get_int() == 65535);
3509 match(ConI);
3510 op_cost(0);
3512 format %{ %}
3513 interface(CONST_INTER);
3514 %}
3516 // Long Immediate: the value FF
3517 operand immL_FF() %{
3518 predicate( n->get_long() == 0xFFL );
3519 match(ConL);
3520 op_cost(0);
3522 format %{ %}
3523 interface(CONST_INTER);
3524 %}
3526 // Long Immediate: the value FFFF
3527 operand immL_FFFF() %{
3528 predicate( n->get_long() == 0xFFFFL );
3529 match(ConL);
3530 op_cost(0);
3532 format %{ %}
3533 interface(CONST_INTER);
3534 %}
3536 // Pointer Immediate: 32 or 64-bit
3537 operand immP() %{
3538 match(ConP);
3540 op_cost(5);
3541 // formats are generated automatically for constants and base registers
3542 format %{ %}
3543 interface(CONST_INTER);
3544 %}
3546 #ifdef _LP64
3547 // Pointer Immediate: 64-bit
3548 operand immP_set() %{
3549 predicate(!VM_Version::is_niagara_plus());
3550 match(ConP);
3552 op_cost(5);
3553 // formats are generated automatically for constants and base registers
3554 format %{ %}
3555 interface(CONST_INTER);
3556 %}
3558 // Pointer Immediate: 64-bit
3559 // From Niagara2 processors on a load should be better than materializing.
3560 operand immP_load() %{
3561 predicate(VM_Version::is_niagara_plus() && (n->bottom_type()->isa_oop_ptr() || (MacroAssembler::insts_for_set(n->get_ptr()) > 3)));
3562 match(ConP);
3564 op_cost(5);
3565 // formats are generated automatically for constants and base registers
3566 format %{ %}
3567 interface(CONST_INTER);
3568 %}
3570 // Pointer Immediate: 64-bit
3571 operand immP_no_oop_cheap() %{
3572 predicate(VM_Version::is_niagara_plus() && !n->bottom_type()->isa_oop_ptr() && (MacroAssembler::insts_for_set(n->get_ptr()) <= 3));
3573 match(ConP);
3575 op_cost(5);
3576 // formats are generated automatically for constants and base registers
3577 format %{ %}
3578 interface(CONST_INTER);
3579 %}
3580 #endif
3582 operand immP13() %{
3583 predicate((-4096 < n->get_ptr()) && (n->get_ptr() <= 4095));
3584 match(ConP);
3585 op_cost(0);
3587 format %{ %}
3588 interface(CONST_INTER);
3589 %}
3591 operand immP0() %{
3592 predicate(n->get_ptr() == 0);
3593 match(ConP);
3594 op_cost(0);
3596 format %{ %}
3597 interface(CONST_INTER);
3598 %}
3600 operand immP_poll() %{
3601 predicate(n->get_ptr() != 0 && n->get_ptr() == (intptr_t)os::get_polling_page());
3602 match(ConP);
3604 // formats are generated automatically for constants and base registers
3605 format %{ %}
3606 interface(CONST_INTER);
3607 %}
3609 // Pointer Immediate
3610 operand immN()
3611 %{
3612 match(ConN);
3614 op_cost(10);
3615 format %{ %}
3616 interface(CONST_INTER);
3617 %}
3619 operand immNKlass()
3620 %{
3621 match(ConNKlass);
3623 op_cost(10);
3624 format %{ %}
3625 interface(CONST_INTER);
3626 %}
3628 // NULL Pointer Immediate
3629 operand immN0()
3630 %{
3631 predicate(n->get_narrowcon() == 0);
3632 match(ConN);
3634 op_cost(0);
3635 format %{ %}
3636 interface(CONST_INTER);
3637 %}
3639 operand immL() %{
3640 match(ConL);
3641 op_cost(40);
3642 // formats are generated automatically for constants and base registers
3643 format %{ %}
3644 interface(CONST_INTER);
3645 %}
3647 operand immL0() %{
3648 predicate(n->get_long() == 0L);
3649 match(ConL);
3650 op_cost(0);
3651 // formats are generated automatically for constants and base registers
3652 format %{ %}
3653 interface(CONST_INTER);
3654 %}
3656 // Integer Immediate: 5-bit
3657 operand immL5() %{
3658 predicate(n->get_long() == (int)n->get_long() && Assembler::is_simm5((int)n->get_long()));
3659 match(ConL);
3660 op_cost(0);
3661 format %{ %}
3662 interface(CONST_INTER);
3663 %}
3665 // Long Immediate: 13-bit
3666 operand immL13() %{
3667 predicate((-4096L < n->get_long()) && (n->get_long() <= 4095L));
3668 match(ConL);
3669 op_cost(0);
3671 format %{ %}
3672 interface(CONST_INTER);
3673 %}
3675 // Long Immediate: 13-bit minus 7
3676 operand immL13m7() %{
3677 predicate((-4096L < n->get_long()) && ((n->get_long() + 7L) <= 4095L));
3678 match(ConL);
3679 op_cost(0);
3681 format %{ %}
3682 interface(CONST_INTER);
3683 %}
3685 // Long Immediate: low 32-bit mask
3686 operand immL_32bits() %{
3687 predicate(n->get_long() == 0xFFFFFFFFL);
3688 match(ConL);
3689 op_cost(0);
3691 format %{ %}
3692 interface(CONST_INTER);
3693 %}
3695 // Long Immediate: cheap (materialize in <= 3 instructions)
3696 operand immL_cheap() %{
3697 predicate(!VM_Version::is_niagara_plus() || MacroAssembler::insts_for_set64(n->get_long()) <= 3);
3698 match(ConL);
3699 op_cost(0);
3701 format %{ %}
3702 interface(CONST_INTER);
3703 %}
3705 // Long Immediate: expensive (materialize in > 3 instructions)
3706 operand immL_expensive() %{
3707 predicate(VM_Version::is_niagara_plus() && MacroAssembler::insts_for_set64(n->get_long()) > 3);
3708 match(ConL);
3709 op_cost(0);
3711 format %{ %}
3712 interface(CONST_INTER);
3713 %}
3715 // Double Immediate
3716 operand immD() %{
3717 match(ConD);
3719 op_cost(40);
3720 format %{ %}
3721 interface(CONST_INTER);
3722 %}
3724 operand immD0() %{
3725 #ifdef _LP64
3726 // on 64-bit architectures this comparision is faster
3727 predicate(jlong_cast(n->getd()) == 0);
3728 #else
3729 predicate((n->getd() == 0) && (fpclass(n->getd()) == FP_PZERO));
3730 #endif
3731 match(ConD);
3733 op_cost(0);
3734 format %{ %}
3735 interface(CONST_INTER);
3736 %}
3738 // Float Immediate
3739 operand immF() %{
3740 match(ConF);
3742 op_cost(20);
3743 format %{ %}
3744 interface(CONST_INTER);
3745 %}
3747 // Float Immediate: 0
3748 operand immF0() %{
3749 predicate((n->getf() == 0) && (fpclass(n->getf()) == FP_PZERO));
3750 match(ConF);
3752 op_cost(0);
3753 format %{ %}
3754 interface(CONST_INTER);
3755 %}
3757 // Integer Register Operands
3758 // Integer Register
3759 operand iRegI() %{
3760 constraint(ALLOC_IN_RC(int_reg));
3761 match(RegI);
3763 match(notemp_iRegI);
3764 match(g1RegI);
3765 match(o0RegI);
3766 match(iRegIsafe);
3768 format %{ %}
3769 interface(REG_INTER);
3770 %}
3772 operand notemp_iRegI() %{
3773 constraint(ALLOC_IN_RC(notemp_int_reg));
3774 match(RegI);
3776 match(o0RegI);
3778 format %{ %}
3779 interface(REG_INTER);
3780 %}
3782 operand o0RegI() %{
3783 constraint(ALLOC_IN_RC(o0_regI));
3784 match(iRegI);
3786 format %{ %}
3787 interface(REG_INTER);
3788 %}
3790 // Pointer Register
3791 operand iRegP() %{
3792 constraint(ALLOC_IN_RC(ptr_reg));
3793 match(RegP);
3795 match(lock_ptr_RegP);
3796 match(g1RegP);
3797 match(g2RegP);
3798 match(g3RegP);
3799 match(g4RegP);
3800 match(i0RegP);
3801 match(o0RegP);
3802 match(o1RegP);
3803 match(l7RegP);
3805 format %{ %}
3806 interface(REG_INTER);
3807 %}
3809 operand sp_ptr_RegP() %{
3810 constraint(ALLOC_IN_RC(sp_ptr_reg));
3811 match(RegP);
3812 match(iRegP);
3814 format %{ %}
3815 interface(REG_INTER);
3816 %}
3818 operand lock_ptr_RegP() %{
3819 constraint(ALLOC_IN_RC(lock_ptr_reg));
3820 match(RegP);
3821 match(i0RegP);
3822 match(o0RegP);
3823 match(o1RegP);
3824 match(l7RegP);
3826 format %{ %}
3827 interface(REG_INTER);
3828 %}
3830 operand g1RegP() %{
3831 constraint(ALLOC_IN_RC(g1_regP));
3832 match(iRegP);
3834 format %{ %}
3835 interface(REG_INTER);
3836 %}
3838 operand g2RegP() %{
3839 constraint(ALLOC_IN_RC(g2_regP));
3840 match(iRegP);
3842 format %{ %}
3843 interface(REG_INTER);
3844 %}
3846 operand g3RegP() %{
3847 constraint(ALLOC_IN_RC(g3_regP));
3848 match(iRegP);
3850 format %{ %}
3851 interface(REG_INTER);
3852 %}
3854 operand g1RegI() %{
3855 constraint(ALLOC_IN_RC(g1_regI));
3856 match(iRegI);
3858 format %{ %}
3859 interface(REG_INTER);
3860 %}
3862 operand g3RegI() %{
3863 constraint(ALLOC_IN_RC(g3_regI));
3864 match(iRegI);
3866 format %{ %}
3867 interface(REG_INTER);
3868 %}
3870 operand g4RegI() %{
3871 constraint(ALLOC_IN_RC(g4_regI));
3872 match(iRegI);
3874 format %{ %}
3875 interface(REG_INTER);
3876 %}
3878 operand g4RegP() %{
3879 constraint(ALLOC_IN_RC(g4_regP));
3880 match(iRegP);
3882 format %{ %}
3883 interface(REG_INTER);
3884 %}
3886 operand i0RegP() %{
3887 constraint(ALLOC_IN_RC(i0_regP));
3888 match(iRegP);
3890 format %{ %}
3891 interface(REG_INTER);
3892 %}
3894 operand o0RegP() %{
3895 constraint(ALLOC_IN_RC(o0_regP));
3896 match(iRegP);
3898 format %{ %}
3899 interface(REG_INTER);
3900 %}
3902 operand o1RegP() %{
3903 constraint(ALLOC_IN_RC(o1_regP));
3904 match(iRegP);
3906 format %{ %}
3907 interface(REG_INTER);
3908 %}
3910 operand o2RegP() %{
3911 constraint(ALLOC_IN_RC(o2_regP));
3912 match(iRegP);
3914 format %{ %}
3915 interface(REG_INTER);
3916 %}
3918 operand o7RegP() %{
3919 constraint(ALLOC_IN_RC(o7_regP));
3920 match(iRegP);
3922 format %{ %}
3923 interface(REG_INTER);
3924 %}
3926 operand l7RegP() %{
3927 constraint(ALLOC_IN_RC(l7_regP));
3928 match(iRegP);
3930 format %{ %}
3931 interface(REG_INTER);
3932 %}
3934 operand o7RegI() %{
3935 constraint(ALLOC_IN_RC(o7_regI));
3936 match(iRegI);
3938 format %{ %}
3939 interface(REG_INTER);
3940 %}
3942 operand iRegN() %{
3943 constraint(ALLOC_IN_RC(int_reg));
3944 match(RegN);
3946 format %{ %}
3947 interface(REG_INTER);
3948 %}
3950 // Long Register
3951 operand iRegL() %{
3952 constraint(ALLOC_IN_RC(long_reg));
3953 match(RegL);
3955 format %{ %}
3956 interface(REG_INTER);
3957 %}
3959 operand o2RegL() %{
3960 constraint(ALLOC_IN_RC(o2_regL));
3961 match(iRegL);
3963 format %{ %}
3964 interface(REG_INTER);
3965 %}
3967 operand o7RegL() %{
3968 constraint(ALLOC_IN_RC(o7_regL));
3969 match(iRegL);
3971 format %{ %}
3972 interface(REG_INTER);
3973 %}
3975 operand g1RegL() %{
3976 constraint(ALLOC_IN_RC(g1_regL));
3977 match(iRegL);
3979 format %{ %}
3980 interface(REG_INTER);
3981 %}
3983 operand g3RegL() %{
3984 constraint(ALLOC_IN_RC(g3_regL));
3985 match(iRegL);
3987 format %{ %}
3988 interface(REG_INTER);
3989 %}
3991 // Int Register safe
3992 // This is 64bit safe
3993 operand iRegIsafe() %{
3994 constraint(ALLOC_IN_RC(long_reg));
3996 match(iRegI);
3998 format %{ %}
3999 interface(REG_INTER);
4000 %}
4002 // Condition Code Flag Register
4003 operand flagsReg() %{
4004 constraint(ALLOC_IN_RC(int_flags));
4005 match(RegFlags);
4007 format %{ "ccr" %} // both ICC and XCC
4008 interface(REG_INTER);
4009 %}
4011 // Condition Code Register, unsigned comparisons.
4012 operand flagsRegU() %{
4013 constraint(ALLOC_IN_RC(int_flags));
4014 match(RegFlags);
4016 format %{ "icc_U" %}
4017 interface(REG_INTER);
4018 %}
4020 // Condition Code Register, pointer comparisons.
4021 operand flagsRegP() %{
4022 constraint(ALLOC_IN_RC(int_flags));
4023 match(RegFlags);
4025 #ifdef _LP64
4026 format %{ "xcc_P" %}
4027 #else
4028 format %{ "icc_P" %}
4029 #endif
4030 interface(REG_INTER);
4031 %}
4033 // Condition Code Register, long comparisons.
4034 operand flagsRegL() %{
4035 constraint(ALLOC_IN_RC(int_flags));
4036 match(RegFlags);
4038 format %{ "xcc_L" %}
4039 interface(REG_INTER);
4040 %}
4042 // Condition Code Register, floating comparisons, unordered same as "less".
4043 operand flagsRegF() %{
4044 constraint(ALLOC_IN_RC(float_flags));
4045 match(RegFlags);
4046 match(flagsRegF0);
4048 format %{ %}
4049 interface(REG_INTER);
4050 %}
4052 operand flagsRegF0() %{
4053 constraint(ALLOC_IN_RC(float_flag0));
4054 match(RegFlags);
4056 format %{ %}
4057 interface(REG_INTER);
4058 %}
4061 // Condition Code Flag Register used by long compare
4062 operand flagsReg_long_LTGE() %{
4063 constraint(ALLOC_IN_RC(int_flags));
4064 match(RegFlags);
4065 format %{ "icc_LTGE" %}
4066 interface(REG_INTER);
4067 %}
4068 operand flagsReg_long_EQNE() %{
4069 constraint(ALLOC_IN_RC(int_flags));
4070 match(RegFlags);
4071 format %{ "icc_EQNE" %}
4072 interface(REG_INTER);
4073 %}
4074 operand flagsReg_long_LEGT() %{
4075 constraint(ALLOC_IN_RC(int_flags));
4076 match(RegFlags);
4077 format %{ "icc_LEGT" %}
4078 interface(REG_INTER);
4079 %}
4082 operand regD() %{
4083 constraint(ALLOC_IN_RC(dflt_reg));
4084 match(RegD);
4086 match(regD_low);
4088 format %{ %}
4089 interface(REG_INTER);
4090 %}
4092 operand regF() %{
4093 constraint(ALLOC_IN_RC(sflt_reg));
4094 match(RegF);
4096 format %{ %}
4097 interface(REG_INTER);
4098 %}
4100 operand regD_low() %{
4101 constraint(ALLOC_IN_RC(dflt_low_reg));
4102 match(regD);
4104 format %{ %}
4105 interface(REG_INTER);
4106 %}
4108 // Special Registers
4110 // Method Register
4111 operand inline_cache_regP(iRegP reg) %{
4112 constraint(ALLOC_IN_RC(g5_regP)); // G5=inline_cache_reg but uses 2 bits instead of 1
4113 match(reg);
4114 format %{ %}
4115 interface(REG_INTER);
4116 %}
4118 operand interpreter_method_oop_regP(iRegP reg) %{
4119 constraint(ALLOC_IN_RC(g5_regP)); // G5=interpreter_method_oop_reg but uses 2 bits instead of 1
4120 match(reg);
4121 format %{ %}
4122 interface(REG_INTER);
4123 %}
4126 //----------Complex Operands---------------------------------------------------
4127 // Indirect Memory Reference
4128 operand indirect(sp_ptr_RegP reg) %{
4129 constraint(ALLOC_IN_RC(sp_ptr_reg));
4130 match(reg);
4132 op_cost(100);
4133 format %{ "[$reg]" %}
4134 interface(MEMORY_INTER) %{
4135 base($reg);
4136 index(0x0);
4137 scale(0x0);
4138 disp(0x0);
4139 %}
4140 %}
4142 // Indirect with simm13 Offset
4143 operand indOffset13(sp_ptr_RegP reg, immX13 offset) %{
4144 constraint(ALLOC_IN_RC(sp_ptr_reg));
4145 match(AddP reg offset);
4147 op_cost(100);
4148 format %{ "[$reg + $offset]" %}
4149 interface(MEMORY_INTER) %{
4150 base($reg);
4151 index(0x0);
4152 scale(0x0);
4153 disp($offset);
4154 %}
4155 %}
4157 // Indirect with simm13 Offset minus 7
4158 operand indOffset13m7(sp_ptr_RegP reg, immX13m7 offset) %{
4159 constraint(ALLOC_IN_RC(sp_ptr_reg));
4160 match(AddP reg offset);
4162 op_cost(100);
4163 format %{ "[$reg + $offset]" %}
4164 interface(MEMORY_INTER) %{
4165 base($reg);
4166 index(0x0);
4167 scale(0x0);
4168 disp($offset);
4169 %}
4170 %}
4172 // Note: Intel has a swapped version also, like this:
4173 //operand indOffsetX(iRegI reg, immP offset) %{
4174 // constraint(ALLOC_IN_RC(int_reg));
4175 // match(AddP offset reg);
4176 //
4177 // op_cost(100);
4178 // format %{ "[$reg + $offset]" %}
4179 // interface(MEMORY_INTER) %{
4180 // base($reg);
4181 // index(0x0);
4182 // scale(0x0);
4183 // disp($offset);
4184 // %}
4185 //%}
4186 //// However, it doesn't make sense for SPARC, since
4187 // we have no particularly good way to embed oops in
4188 // single instructions.
4190 // Indirect with Register Index
4191 operand indIndex(iRegP addr, iRegX index) %{
4192 constraint(ALLOC_IN_RC(ptr_reg));
4193 match(AddP addr index);
4195 op_cost(100);
4196 format %{ "[$addr + $index]" %}
4197 interface(MEMORY_INTER) %{
4198 base($addr);
4199 index($index);
4200 scale(0x0);
4201 disp(0x0);
4202 %}
4203 %}
4205 //----------Special Memory Operands--------------------------------------------
4206 // Stack Slot Operand - This operand is used for loading and storing temporary
4207 // values on the stack where a match requires a value to
4208 // flow through memory.
4209 operand stackSlotI(sRegI reg) %{
4210 constraint(ALLOC_IN_RC(stack_slots));
4211 op_cost(100);
4212 //match(RegI);
4213 format %{ "[$reg]" %}
4214 interface(MEMORY_INTER) %{
4215 base(0xE); // R_SP
4216 index(0x0);
4217 scale(0x0);
4218 disp($reg); // Stack Offset
4219 %}
4220 %}
4222 operand stackSlotP(sRegP reg) %{
4223 constraint(ALLOC_IN_RC(stack_slots));
4224 op_cost(100);
4225 //match(RegP);
4226 format %{ "[$reg]" %}
4227 interface(MEMORY_INTER) %{
4228 base(0xE); // R_SP
4229 index(0x0);
4230 scale(0x0);
4231 disp($reg); // Stack Offset
4232 %}
4233 %}
4235 operand stackSlotF(sRegF reg) %{
4236 constraint(ALLOC_IN_RC(stack_slots));
4237 op_cost(100);
4238 //match(RegF);
4239 format %{ "[$reg]" %}
4240 interface(MEMORY_INTER) %{
4241 base(0xE); // R_SP
4242 index(0x0);
4243 scale(0x0);
4244 disp($reg); // Stack Offset
4245 %}
4246 %}
4247 operand stackSlotD(sRegD reg) %{
4248 constraint(ALLOC_IN_RC(stack_slots));
4249 op_cost(100);
4250 //match(RegD);
4251 format %{ "[$reg]" %}
4252 interface(MEMORY_INTER) %{
4253 base(0xE); // R_SP
4254 index(0x0);
4255 scale(0x0);
4256 disp($reg); // Stack Offset
4257 %}
4258 %}
4259 operand stackSlotL(sRegL reg) %{
4260 constraint(ALLOC_IN_RC(stack_slots));
4261 op_cost(100);
4262 //match(RegL);
4263 format %{ "[$reg]" %}
4264 interface(MEMORY_INTER) %{
4265 base(0xE); // R_SP
4266 index(0x0);
4267 scale(0x0);
4268 disp($reg); // Stack Offset
4269 %}
4270 %}
4272 // Operands for expressing Control Flow
4273 // NOTE: Label is a predefined operand which should not be redefined in
4274 // the AD file. It is generically handled within the ADLC.
4276 //----------Conditional Branch Operands----------------------------------------
4277 // Comparison Op - This is the operation of the comparison, and is limited to
4278 // the following set of codes:
4279 // L (<), LE (<=), G (>), GE (>=), E (==), NE (!=)
4280 //
4281 // Other attributes of the comparison, such as unsignedness, are specified
4282 // by the comparison instruction that sets a condition code flags register.
4283 // That result is represented by a flags operand whose subtype is appropriate
4284 // to the unsignedness (etc.) of the comparison.
4285 //
4286 // Later, the instruction which matches both the Comparison Op (a Bool) and
4287 // the flags (produced by the Cmp) specifies the coding of the comparison op
4288 // by matching a specific subtype of Bool operand below, such as cmpOpU.
4290 operand cmpOp() %{
4291 match(Bool);
4293 format %{ "" %}
4294 interface(COND_INTER) %{
4295 equal(0x1);
4296 not_equal(0x9);
4297 less(0x3);
4298 greater_equal(0xB);
4299 less_equal(0x2);
4300 greater(0xA);
4301 %}
4302 %}
4304 // Comparison Op, unsigned
4305 operand cmpOpU() %{
4306 match(Bool);
4308 format %{ "u" %}
4309 interface(COND_INTER) %{
4310 equal(0x1);
4311 not_equal(0x9);
4312 less(0x5);
4313 greater_equal(0xD);
4314 less_equal(0x4);
4315 greater(0xC);
4316 %}
4317 %}
4319 // Comparison Op, pointer (same as unsigned)
4320 operand cmpOpP() %{
4321 match(Bool);
4323 format %{ "p" %}
4324 interface(COND_INTER) %{
4325 equal(0x1);
4326 not_equal(0x9);
4327 less(0x5);
4328 greater_equal(0xD);
4329 less_equal(0x4);
4330 greater(0xC);
4331 %}
4332 %}
4334 // Comparison Op, branch-register encoding
4335 operand cmpOp_reg() %{
4336 match(Bool);
4338 format %{ "" %}
4339 interface(COND_INTER) %{
4340 equal (0x1);
4341 not_equal (0x5);
4342 less (0x3);
4343 greater_equal(0x7);
4344 less_equal (0x2);
4345 greater (0x6);
4346 %}
4347 %}
4349 // Comparison Code, floating, unordered same as less
4350 operand cmpOpF() %{
4351 match(Bool);
4353 format %{ "fl" %}
4354 interface(COND_INTER) %{
4355 equal(0x9);
4356 not_equal(0x1);
4357 less(0x3);
4358 greater_equal(0xB);
4359 less_equal(0xE);
4360 greater(0x6);
4361 %}
4362 %}
4364 // Used by long compare
4365 operand cmpOp_commute() %{
4366 match(Bool);
4368 format %{ "" %}
4369 interface(COND_INTER) %{
4370 equal(0x1);
4371 not_equal(0x9);
4372 less(0xA);
4373 greater_equal(0x2);
4374 less_equal(0xB);
4375 greater(0x3);
4376 %}
4377 %}
4379 //----------OPERAND CLASSES----------------------------------------------------
4380 // Operand Classes are groups of operands that are used to simplify
4381 // instruction definitions by not requiring the AD writer to specify separate
4382 // instructions for every form of operand when the instruction accepts
4383 // multiple operand types with the same basic encoding and format. The classic
4384 // case of this is memory operands.
4385 opclass memory( indirect, indOffset13, indIndex );
4386 opclass indIndexMemory( indIndex );
4388 //----------PIPELINE-----------------------------------------------------------
4389 pipeline %{
4391 //----------ATTRIBUTES---------------------------------------------------------
4392 attributes %{
4393 fixed_size_instructions; // Fixed size instructions
4394 branch_has_delay_slot; // Branch has delay slot following
4395 max_instructions_per_bundle = 4; // Up to 4 instructions per bundle
4396 instruction_unit_size = 4; // An instruction is 4 bytes long
4397 instruction_fetch_unit_size = 16; // The processor fetches one line
4398 instruction_fetch_units = 1; // of 16 bytes
4400 // List of nop instructions
4401 nops( Nop_A0, Nop_A1, Nop_MS, Nop_FA, Nop_BR );
4402 %}
4404 //----------RESOURCES----------------------------------------------------------
4405 // Resources are the functional units available to the machine
4406 resources(A0, A1, MS, BR, FA, FM, IDIV, FDIV, IALU = A0 | A1);
4408 //----------PIPELINE DESCRIPTION-----------------------------------------------
4409 // Pipeline Description specifies the stages in the machine's pipeline
4411 pipe_desc(A, P, F, B, I, J, S, R, E, C, M, W, X, T, D);
4413 //----------PIPELINE CLASSES---------------------------------------------------
4414 // Pipeline Classes describe the stages in which input and output are
4415 // referenced by the hardware pipeline.
4417 // Integer ALU reg-reg operation
4418 pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
4419 single_instruction;
4420 dst : E(write);
4421 src1 : R(read);
4422 src2 : R(read);
4423 IALU : R;
4424 %}
4426 // Integer ALU reg-reg long operation
4427 pipe_class ialu_reg_reg_2(iRegL dst, iRegL src1, iRegL src2) %{
4428 instruction_count(2);
4429 dst : E(write);
4430 src1 : R(read);
4431 src2 : R(read);
4432 IALU : R;
4433 IALU : R;
4434 %}
4436 // Integer ALU reg-reg long dependent operation
4437 pipe_class ialu_reg_reg_2_dep(iRegL dst, iRegL src1, iRegL src2, flagsReg cr) %{
4438 instruction_count(1); multiple_bundles;
4439 dst : E(write);
4440 src1 : R(read);
4441 src2 : R(read);
4442 cr : E(write);
4443 IALU : R(2);
4444 %}
4446 // Integer ALU reg-imm operaion
4447 pipe_class ialu_reg_imm(iRegI dst, iRegI src1, immI13 src2) %{
4448 single_instruction;
4449 dst : E(write);
4450 src1 : R(read);
4451 IALU : R;
4452 %}
4454 // Integer ALU reg-reg operation with condition code
4455 pipe_class ialu_cc_reg_reg(iRegI dst, iRegI src1, iRegI src2, flagsReg cr) %{
4456 single_instruction;
4457 dst : E(write);
4458 cr : E(write);
4459 src1 : R(read);
4460 src2 : R(read);
4461 IALU : R;
4462 %}
4464 // Integer ALU reg-imm operation with condition code
4465 pipe_class ialu_cc_reg_imm(iRegI dst, iRegI src1, immI13 src2, flagsReg cr) %{
4466 single_instruction;
4467 dst : E(write);
4468 cr : E(write);
4469 src1 : R(read);
4470 IALU : R;
4471 %}
4473 // Integer ALU zero-reg operation
4474 pipe_class ialu_zero_reg(iRegI dst, immI0 zero, iRegI src2) %{
4475 single_instruction;
4476 dst : E(write);
4477 src2 : R(read);
4478 IALU : R;
4479 %}
4481 // Integer ALU zero-reg operation with condition code only
4482 pipe_class ialu_cconly_zero_reg(flagsReg cr, iRegI src) %{
4483 single_instruction;
4484 cr : E(write);
4485 src : R(read);
4486 IALU : R;
4487 %}
4489 // Integer ALU reg-reg operation with condition code only
4490 pipe_class ialu_cconly_reg_reg(flagsReg cr, iRegI src1, iRegI src2) %{
4491 single_instruction;
4492 cr : E(write);
4493 src1 : R(read);
4494 src2 : R(read);
4495 IALU : R;
4496 %}
4498 // Integer ALU reg-imm operation with condition code only
4499 pipe_class ialu_cconly_reg_imm(flagsReg cr, iRegI src1, immI13 src2) %{
4500 single_instruction;
4501 cr : E(write);
4502 src1 : R(read);
4503 IALU : R;
4504 %}
4506 // Integer ALU reg-reg-zero operation with condition code only
4507 pipe_class ialu_cconly_reg_reg_zero(flagsReg cr, iRegI src1, iRegI src2, immI0 zero) %{
4508 single_instruction;
4509 cr : E(write);
4510 src1 : R(read);
4511 src2 : R(read);
4512 IALU : R;
4513 %}
4515 // Integer ALU reg-imm-zero operation with condition code only
4516 pipe_class ialu_cconly_reg_imm_zero(flagsReg cr, iRegI src1, immI13 src2, immI0 zero) %{
4517 single_instruction;
4518 cr : E(write);
4519 src1 : R(read);
4520 IALU : R;
4521 %}
4523 // Integer ALU reg-reg operation with condition code, src1 modified
4524 pipe_class ialu_cc_rwreg_reg(flagsReg cr, iRegI src1, iRegI src2) %{
4525 single_instruction;
4526 cr : E(write);
4527 src1 : E(write);
4528 src1 : R(read);
4529 src2 : R(read);
4530 IALU : R;
4531 %}
4533 // Integer ALU reg-imm operation with condition code, src1 modified
4534 pipe_class ialu_cc_rwreg_imm(flagsReg cr, iRegI src1, immI13 src2) %{
4535 single_instruction;
4536 cr : E(write);
4537 src1 : E(write);
4538 src1 : R(read);
4539 IALU : R;
4540 %}
4542 pipe_class cmpL_reg(iRegI dst, iRegL src1, iRegL src2, flagsReg cr ) %{
4543 multiple_bundles;
4544 dst : E(write)+4;
4545 cr : E(write);
4546 src1 : R(read);
4547 src2 : R(read);
4548 IALU : R(3);
4549 BR : R(2);
4550 %}
4552 // Integer ALU operation
4553 pipe_class ialu_none(iRegI dst) %{
4554 single_instruction;
4555 dst : E(write);
4556 IALU : R;
4557 %}
4559 // Integer ALU reg operation
4560 pipe_class ialu_reg(iRegI dst, iRegI src) %{
4561 single_instruction; may_have_no_code;
4562 dst : E(write);
4563 src : R(read);
4564 IALU : R;
4565 %}
4567 // Integer ALU reg conditional operation
4568 // This instruction has a 1 cycle stall, and cannot execute
4569 // in the same cycle as the instruction setting the condition
4570 // code. We kludge this by pretending to read the condition code
4571 // 1 cycle earlier, and by marking the functional units as busy
4572 // for 2 cycles with the result available 1 cycle later than
4573 // is really the case.
4574 pipe_class ialu_reg_flags( iRegI op2_out, iRegI op2_in, iRegI op1, flagsReg cr ) %{
4575 single_instruction;
4576 op2_out : C(write);
4577 op1 : R(read);
4578 cr : R(read); // This is really E, with a 1 cycle stall
4579 BR : R(2);
4580 MS : R(2);
4581 %}
4583 #ifdef _LP64
4584 pipe_class ialu_clr_and_mover( iRegI dst, iRegP src ) %{
4585 instruction_count(1); multiple_bundles;
4586 dst : C(write)+1;
4587 src : R(read)+1;
4588 IALU : R(1);
4589 BR : E(2);
4590 MS : E(2);
4591 %}
4592 #endif
4594 // Integer ALU reg operation
4595 pipe_class ialu_move_reg_L_to_I(iRegI dst, iRegL src) %{
4596 single_instruction; may_have_no_code;
4597 dst : E(write);
4598 src : R(read);
4599 IALU : R;
4600 %}
4601 pipe_class ialu_move_reg_I_to_L(iRegL dst, iRegI src) %{
4602 single_instruction; may_have_no_code;
4603 dst : E(write);
4604 src : R(read);
4605 IALU : R;
4606 %}
4608 // Two integer ALU reg operations
4609 pipe_class ialu_reg_2(iRegL dst, iRegL src) %{
4610 instruction_count(2);
4611 dst : E(write);
4612 src : R(read);
4613 A0 : R;
4614 A1 : R;
4615 %}
4617 // Two integer ALU reg operations
4618 pipe_class ialu_move_reg_L_to_L(iRegL dst, iRegL src) %{
4619 instruction_count(2); may_have_no_code;
4620 dst : E(write);
4621 src : R(read);
4622 A0 : R;
4623 A1 : R;
4624 %}
4626 // Integer ALU imm operation
4627 pipe_class ialu_imm(iRegI dst, immI13 src) %{
4628 single_instruction;
4629 dst : E(write);
4630 IALU : R;
4631 %}
4633 // Integer ALU reg-reg with carry operation
4634 pipe_class ialu_reg_reg_cy(iRegI dst, iRegI src1, iRegI src2, iRegI cy) %{
4635 single_instruction;
4636 dst : E(write);
4637 src1 : R(read);
4638 src2 : R(read);
4639 IALU : R;
4640 %}
4642 // Integer ALU cc operation
4643 pipe_class ialu_cc(iRegI dst, flagsReg cc) %{
4644 single_instruction;
4645 dst : E(write);
4646 cc : R(read);
4647 IALU : R;
4648 %}
4650 // Integer ALU cc / second IALU operation
4651 pipe_class ialu_reg_ialu( iRegI dst, iRegI src ) %{
4652 instruction_count(1); multiple_bundles;
4653 dst : E(write)+1;
4654 src : R(read);
4655 IALU : R;
4656 %}
4658 // Integer ALU cc / second IALU operation
4659 pipe_class ialu_reg_reg_ialu( iRegI dst, iRegI p, iRegI q ) %{
4660 instruction_count(1); multiple_bundles;
4661 dst : E(write)+1;
4662 p : R(read);
4663 q : R(read);
4664 IALU : R;
4665 %}
4667 // Integer ALU hi-lo-reg operation
4668 pipe_class ialu_hi_lo_reg(iRegI dst, immI src) %{
4669 instruction_count(1); multiple_bundles;
4670 dst : E(write)+1;
4671 IALU : R(2);
4672 %}
4674 // Float ALU hi-lo-reg operation (with temp)
4675 pipe_class ialu_hi_lo_reg_temp(regF dst, immF src, g3RegP tmp) %{
4676 instruction_count(1); multiple_bundles;
4677 dst : E(write)+1;
4678 IALU : R(2);
4679 %}
4681 // Long Constant
4682 pipe_class loadConL( iRegL dst, immL src ) %{
4683 instruction_count(2); multiple_bundles;
4684 dst : E(write)+1;
4685 IALU : R(2);
4686 IALU : R(2);
4687 %}
4689 // Pointer Constant
4690 pipe_class loadConP( iRegP dst, immP src ) %{
4691 instruction_count(0); multiple_bundles;
4692 fixed_latency(6);
4693 %}
4695 // Polling Address
4696 pipe_class loadConP_poll( iRegP dst, immP_poll src ) %{
4697 #ifdef _LP64
4698 instruction_count(0); multiple_bundles;
4699 fixed_latency(6);
4700 #else
4701 dst : E(write);
4702 IALU : R;
4703 #endif
4704 %}
4706 // Long Constant small
4707 pipe_class loadConLlo( iRegL dst, immL src ) %{
4708 instruction_count(2);
4709 dst : E(write);
4710 IALU : R;
4711 IALU : R;
4712 %}
4714 // [PHH] This is wrong for 64-bit. See LdImmF/D.
4715 pipe_class loadConFD(regF dst, immF src, g3RegP tmp) %{
4716 instruction_count(1); multiple_bundles;
4717 src : R(read);
4718 dst : M(write)+1;
4719 IALU : R;
4720 MS : E;
4721 %}
4723 // Integer ALU nop operation
4724 pipe_class ialu_nop() %{
4725 single_instruction;
4726 IALU : R;
4727 %}
4729 // Integer ALU nop operation
4730 pipe_class ialu_nop_A0() %{
4731 single_instruction;
4732 A0 : R;
4733 %}
4735 // Integer ALU nop operation
4736 pipe_class ialu_nop_A1() %{
4737 single_instruction;
4738 A1 : R;
4739 %}
4741 // Integer Multiply reg-reg operation
4742 pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
4743 single_instruction;
4744 dst : E(write);
4745 src1 : R(read);
4746 src2 : R(read);
4747 MS : R(5);
4748 %}
4750 // Integer Multiply reg-imm operation
4751 pipe_class imul_reg_imm(iRegI dst, iRegI src1, immI13 src2) %{
4752 single_instruction;
4753 dst : E(write);
4754 src1 : R(read);
4755 MS : R(5);
4756 %}
4758 pipe_class mulL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
4759 single_instruction;
4760 dst : E(write)+4;
4761 src1 : R(read);
4762 src2 : R(read);
4763 MS : R(6);
4764 %}
4766 pipe_class mulL_reg_imm(iRegL dst, iRegL src1, immL13 src2) %{
4767 single_instruction;
4768 dst : E(write)+4;
4769 src1 : R(read);
4770 MS : R(6);
4771 %}
4773 // Integer Divide reg-reg
4774 pipe_class sdiv_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI temp, flagsReg cr) %{
4775 instruction_count(1); multiple_bundles;
4776 dst : E(write);
4777 temp : E(write);
4778 src1 : R(read);
4779 src2 : R(read);
4780 temp : R(read);
4781 MS : R(38);
4782 %}
4784 // Integer Divide reg-imm
4785 pipe_class sdiv_reg_imm(iRegI dst, iRegI src1, immI13 src2, iRegI temp, flagsReg cr) %{
4786 instruction_count(1); multiple_bundles;
4787 dst : E(write);
4788 temp : E(write);
4789 src1 : R(read);
4790 temp : R(read);
4791 MS : R(38);
4792 %}
4794 // Long Divide
4795 pipe_class divL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
4796 dst : E(write)+71;
4797 src1 : R(read);
4798 src2 : R(read)+1;
4799 MS : R(70);
4800 %}
4802 pipe_class divL_reg_imm(iRegL dst, iRegL src1, immL13 src2) %{
4803 dst : E(write)+71;
4804 src1 : R(read);
4805 MS : R(70);
4806 %}
4808 // Floating Point Add Float
4809 pipe_class faddF_reg_reg(regF dst, regF src1, regF src2) %{
4810 single_instruction;
4811 dst : X(write);
4812 src1 : E(read);
4813 src2 : E(read);
4814 FA : R;
4815 %}
4817 // Floating Point Add Double
4818 pipe_class faddD_reg_reg(regD dst, regD src1, regD src2) %{
4819 single_instruction;
4820 dst : X(write);
4821 src1 : E(read);
4822 src2 : E(read);
4823 FA : R;
4824 %}
4826 // Floating Point Conditional Move based on integer flags
4827 pipe_class int_conditional_float_move (cmpOp cmp, flagsReg cr, regF dst, regF src) %{
4828 single_instruction;
4829 dst : X(write);
4830 src : E(read);
4831 cr : R(read);
4832 FA : R(2);
4833 BR : R(2);
4834 %}
4836 // Floating Point Conditional Move based on integer flags
4837 pipe_class int_conditional_double_move (cmpOp cmp, flagsReg cr, regD dst, regD src) %{
4838 single_instruction;
4839 dst : X(write);
4840 src : E(read);
4841 cr : R(read);
4842 FA : R(2);
4843 BR : R(2);
4844 %}
4846 // Floating Point Multiply Float
4847 pipe_class fmulF_reg_reg(regF dst, regF src1, regF src2) %{
4848 single_instruction;
4849 dst : X(write);
4850 src1 : E(read);
4851 src2 : E(read);
4852 FM : R;
4853 %}
4855 // Floating Point Multiply Double
4856 pipe_class fmulD_reg_reg(regD dst, regD src1, regD src2) %{
4857 single_instruction;
4858 dst : X(write);
4859 src1 : E(read);
4860 src2 : E(read);
4861 FM : R;
4862 %}
4864 // Floating Point Divide Float
4865 pipe_class fdivF_reg_reg(regF dst, regF src1, regF src2) %{
4866 single_instruction;
4867 dst : X(write);
4868 src1 : E(read);
4869 src2 : E(read);
4870 FM : R;
4871 FDIV : C(14);
4872 %}
4874 // Floating Point Divide Double
4875 pipe_class fdivD_reg_reg(regD dst, regD src1, regD src2) %{
4876 single_instruction;
4877 dst : X(write);
4878 src1 : E(read);
4879 src2 : E(read);
4880 FM : R;
4881 FDIV : C(17);
4882 %}
4884 // Floating Point Move/Negate/Abs Float
4885 pipe_class faddF_reg(regF dst, regF src) %{
4886 single_instruction;
4887 dst : W(write);
4888 src : E(read);
4889 FA : R(1);
4890 %}
4892 // Floating Point Move/Negate/Abs Double
4893 pipe_class faddD_reg(regD dst, regD src) %{
4894 single_instruction;
4895 dst : W(write);
4896 src : E(read);
4897 FA : R;
4898 %}
4900 // Floating Point Convert F->D
4901 pipe_class fcvtF2D(regD dst, regF src) %{
4902 single_instruction;
4903 dst : X(write);
4904 src : E(read);
4905 FA : R;
4906 %}
4908 // Floating Point Convert I->D
4909 pipe_class fcvtI2D(regD dst, regF src) %{
4910 single_instruction;
4911 dst : X(write);
4912 src : E(read);
4913 FA : R;
4914 %}
4916 // Floating Point Convert LHi->D
4917 pipe_class fcvtLHi2D(regD dst, regD src) %{
4918 single_instruction;
4919 dst : X(write);
4920 src : E(read);
4921 FA : R;
4922 %}
4924 // Floating Point Convert L->D
4925 pipe_class fcvtL2D(regD dst, regF src) %{
4926 single_instruction;
4927 dst : X(write);
4928 src : E(read);
4929 FA : R;
4930 %}
4932 // Floating Point Convert L->F
4933 pipe_class fcvtL2F(regD dst, regF src) %{
4934 single_instruction;
4935 dst : X(write);
4936 src : E(read);
4937 FA : R;
4938 %}
4940 // Floating Point Convert D->F
4941 pipe_class fcvtD2F(regD dst, regF src) %{
4942 single_instruction;
4943 dst : X(write);
4944 src : E(read);
4945 FA : R;
4946 %}
4948 // Floating Point Convert I->L
4949 pipe_class fcvtI2L(regD dst, regF src) %{
4950 single_instruction;
4951 dst : X(write);
4952 src : E(read);
4953 FA : R;
4954 %}
4956 // Floating Point Convert D->F
4957 pipe_class fcvtD2I(regF dst, regD src, flagsReg cr) %{
4958 instruction_count(1); multiple_bundles;
4959 dst : X(write)+6;
4960 src : E(read);
4961 FA : R;
4962 %}
4964 // Floating Point Convert D->L
4965 pipe_class fcvtD2L(regD dst, regD src, flagsReg cr) %{
4966 instruction_count(1); multiple_bundles;
4967 dst : X(write)+6;
4968 src : E(read);
4969 FA : R;
4970 %}
4972 // Floating Point Convert F->I
4973 pipe_class fcvtF2I(regF dst, regF src, flagsReg cr) %{
4974 instruction_count(1); multiple_bundles;
4975 dst : X(write)+6;
4976 src : E(read);
4977 FA : R;
4978 %}
4980 // Floating Point Convert F->L
4981 pipe_class fcvtF2L(regD dst, regF src, flagsReg cr) %{
4982 instruction_count(1); multiple_bundles;
4983 dst : X(write)+6;
4984 src : E(read);
4985 FA : R;
4986 %}
4988 // Floating Point Convert I->F
4989 pipe_class fcvtI2F(regF dst, regF src) %{
4990 single_instruction;
4991 dst : X(write);
4992 src : E(read);
4993 FA : R;
4994 %}
4996 // Floating Point Compare
4997 pipe_class faddF_fcc_reg_reg_zero(flagsRegF cr, regF src1, regF src2, immI0 zero) %{
4998 single_instruction;
4999 cr : X(write);
5000 src1 : E(read);
5001 src2 : E(read);
5002 FA : R;
5003 %}
5005 // Floating Point Compare
5006 pipe_class faddD_fcc_reg_reg_zero(flagsRegF cr, regD src1, regD src2, immI0 zero) %{
5007 single_instruction;
5008 cr : X(write);
5009 src1 : E(read);
5010 src2 : E(read);
5011 FA : R;
5012 %}
5014 // Floating Add Nop
5015 pipe_class fadd_nop() %{
5016 single_instruction;
5017 FA : R;
5018 %}
5020 // Integer Store to Memory
5021 pipe_class istore_mem_reg(memory mem, iRegI src) %{
5022 single_instruction;
5023 mem : R(read);
5024 src : C(read);
5025 MS : R;
5026 %}
5028 // Integer Store to Memory
5029 pipe_class istore_mem_spORreg(memory mem, sp_ptr_RegP src) %{
5030 single_instruction;
5031 mem : R(read);
5032 src : C(read);
5033 MS : R;
5034 %}
5036 // Integer Store Zero to Memory
5037 pipe_class istore_mem_zero(memory mem, immI0 src) %{
5038 single_instruction;
5039 mem : R(read);
5040 MS : R;
5041 %}
5043 // Special Stack Slot Store
5044 pipe_class istore_stk_reg(stackSlotI stkSlot, iRegI src) %{
5045 single_instruction;
5046 stkSlot : R(read);
5047 src : C(read);
5048 MS : R;
5049 %}
5051 // Special Stack Slot Store
5052 pipe_class lstoreI_stk_reg(stackSlotL stkSlot, iRegI src) %{
5053 instruction_count(2); multiple_bundles;
5054 stkSlot : R(read);
5055 src : C(read);
5056 MS : R(2);
5057 %}
5059 // Float Store
5060 pipe_class fstoreF_mem_reg(memory mem, RegF src) %{
5061 single_instruction;
5062 mem : R(read);
5063 src : C(read);
5064 MS : R;
5065 %}
5067 // Float Store
5068 pipe_class fstoreF_mem_zero(memory mem, immF0 src) %{
5069 single_instruction;
5070 mem : R(read);
5071 MS : R;
5072 %}
5074 // Double Store
5075 pipe_class fstoreD_mem_reg(memory mem, RegD src) %{
5076 instruction_count(1);
5077 mem : R(read);
5078 src : C(read);
5079 MS : R;
5080 %}
5082 // Double Store
5083 pipe_class fstoreD_mem_zero(memory mem, immD0 src) %{
5084 single_instruction;
5085 mem : R(read);
5086 MS : R;
5087 %}
5089 // Special Stack Slot Float Store
5090 pipe_class fstoreF_stk_reg(stackSlotI stkSlot, RegF src) %{
5091 single_instruction;
5092 stkSlot : R(read);
5093 src : C(read);
5094 MS : R;
5095 %}
5097 // Special Stack Slot Double Store
5098 pipe_class fstoreD_stk_reg(stackSlotI stkSlot, RegD src) %{
5099 single_instruction;
5100 stkSlot : R(read);
5101 src : C(read);
5102 MS : R;
5103 %}
5105 // Integer Load (when sign bit propagation not needed)
5106 pipe_class iload_mem(iRegI dst, memory mem) %{
5107 single_instruction;
5108 mem : R(read);
5109 dst : C(write);
5110 MS : R;
5111 %}
5113 // Integer Load from stack operand
5114 pipe_class iload_stkD(iRegI dst, stackSlotD mem ) %{
5115 single_instruction;
5116 mem : R(read);
5117 dst : C(write);
5118 MS : R;
5119 %}
5121 // Integer Load (when sign bit propagation or masking is needed)
5122 pipe_class iload_mask_mem(iRegI dst, memory mem) %{
5123 single_instruction;
5124 mem : R(read);
5125 dst : M(write);
5126 MS : R;
5127 %}
5129 // Float Load
5130 pipe_class floadF_mem(regF dst, memory mem) %{
5131 single_instruction;
5132 mem : R(read);
5133 dst : M(write);
5134 MS : R;
5135 %}
5137 // Float Load
5138 pipe_class floadD_mem(regD dst, memory mem) %{
5139 instruction_count(1); multiple_bundles; // Again, unaligned argument is only multiple case
5140 mem : R(read);
5141 dst : M(write);
5142 MS : R;
5143 %}
5145 // Float Load
5146 pipe_class floadF_stk(regF dst, stackSlotI stkSlot) %{
5147 single_instruction;
5148 stkSlot : R(read);
5149 dst : M(write);
5150 MS : R;
5151 %}
5153 // Float Load
5154 pipe_class floadD_stk(regD dst, stackSlotI stkSlot) %{
5155 single_instruction;
5156 stkSlot : R(read);
5157 dst : M(write);
5158 MS : R;
5159 %}
5161 // Memory Nop
5162 pipe_class mem_nop() %{
5163 single_instruction;
5164 MS : R;
5165 %}
5167 pipe_class sethi(iRegP dst, immI src) %{
5168 single_instruction;
5169 dst : E(write);
5170 IALU : R;
5171 %}
5173 pipe_class loadPollP(iRegP poll) %{
5174 single_instruction;
5175 poll : R(read);
5176 MS : R;
5177 %}
5179 pipe_class br(Universe br, label labl) %{
5180 single_instruction_with_delay_slot;
5181 BR : R;
5182 %}
5184 pipe_class br_cc(Universe br, cmpOp cmp, flagsReg cr, label labl) %{
5185 single_instruction_with_delay_slot;
5186 cr : E(read);
5187 BR : R;
5188 %}
5190 pipe_class br_reg(Universe br, cmpOp cmp, iRegI op1, label labl) %{
5191 single_instruction_with_delay_slot;
5192 op1 : E(read);
5193 BR : R;
5194 MS : R;
5195 %}
5197 // Compare and branch
5198 pipe_class cmp_br_reg_reg(Universe br, cmpOp cmp, iRegI src1, iRegI src2, label labl, flagsReg cr) %{
5199 instruction_count(2); has_delay_slot;
5200 cr : E(write);
5201 src1 : R(read);
5202 src2 : R(read);
5203 IALU : R;
5204 BR : R;
5205 %}
5207 // Compare and branch
5208 pipe_class cmp_br_reg_imm(Universe br, cmpOp cmp, iRegI src1, immI13 src2, label labl, flagsReg cr) %{
5209 instruction_count(2); has_delay_slot;
5210 cr : E(write);
5211 src1 : R(read);
5212 IALU : R;
5213 BR : R;
5214 %}
5216 // Compare and branch using cbcond
5217 pipe_class cbcond_reg_reg(Universe br, cmpOp cmp, iRegI src1, iRegI src2, label labl) %{
5218 single_instruction;
5219 src1 : E(read);
5220 src2 : E(read);
5221 IALU : R;
5222 BR : R;
5223 %}
5225 // Compare and branch using cbcond
5226 pipe_class cbcond_reg_imm(Universe br, cmpOp cmp, iRegI src1, immI5 src2, label labl) %{
5227 single_instruction;
5228 src1 : E(read);
5229 IALU : R;
5230 BR : R;
5231 %}
5233 pipe_class br_fcc(Universe br, cmpOpF cc, flagsReg cr, label labl) %{
5234 single_instruction_with_delay_slot;
5235 cr : E(read);
5236 BR : R;
5237 %}
5239 pipe_class br_nop() %{
5240 single_instruction;
5241 BR : R;
5242 %}
5244 pipe_class simple_call(method meth) %{
5245 instruction_count(2); multiple_bundles; force_serialization;
5246 fixed_latency(100);
5247 BR : R(1);
5248 MS : R(1);
5249 A0 : R(1);
5250 %}
5252 pipe_class compiled_call(method meth) %{
5253 instruction_count(1); multiple_bundles; force_serialization;
5254 fixed_latency(100);
5255 MS : R(1);
5256 %}
5258 pipe_class call(method meth) %{
5259 instruction_count(0); multiple_bundles; force_serialization;
5260 fixed_latency(100);
5261 %}
5263 pipe_class tail_call(Universe ignore, label labl) %{
5264 single_instruction; has_delay_slot;
5265 fixed_latency(100);
5266 BR : R(1);
5267 MS : R(1);
5268 %}
5270 pipe_class ret(Universe ignore) %{
5271 single_instruction; has_delay_slot;
5272 BR : R(1);
5273 MS : R(1);
5274 %}
5276 pipe_class ret_poll(g3RegP poll) %{
5277 instruction_count(3); has_delay_slot;
5278 poll : E(read);
5279 MS : R;
5280 %}
5282 // The real do-nothing guy
5283 pipe_class empty( ) %{
5284 instruction_count(0);
5285 %}
5287 pipe_class long_memory_op() %{
5288 instruction_count(0); multiple_bundles; force_serialization;
5289 fixed_latency(25);
5290 MS : R(1);
5291 %}
5293 // Check-cast
5294 pipe_class partial_subtype_check_pipe(Universe ignore, iRegP array, iRegP match ) %{
5295 array : R(read);
5296 match : R(read);
5297 IALU : R(2);
5298 BR : R(2);
5299 MS : R;
5300 %}
5302 // Convert FPU flags into +1,0,-1
5303 pipe_class floating_cmp( iRegI dst, regF src1, regF src2 ) %{
5304 src1 : E(read);
5305 src2 : E(read);
5306 dst : E(write);
5307 FA : R;
5308 MS : R(2);
5309 BR : R(2);
5310 %}
5312 // Compare for p < q, and conditionally add y
5313 pipe_class cadd_cmpltmask( iRegI p, iRegI q, iRegI y ) %{
5314 p : E(read);
5315 q : E(read);
5316 y : E(read);
5317 IALU : R(3)
5318 %}
5320 // Perform a compare, then move conditionally in a branch delay slot.
5321 pipe_class min_max( iRegI src2, iRegI srcdst ) %{
5322 src2 : E(read);
5323 srcdst : E(read);
5324 IALU : R;
5325 BR : R;
5326 %}
5328 // Define the class for the Nop node
5329 define %{
5330 MachNop = ialu_nop;
5331 %}
5333 %}
5335 //----------INSTRUCTIONS-------------------------------------------------------
5337 //------------Special Stack Slot instructions - no match rules-----------------
5338 instruct stkI_to_regF(regF dst, stackSlotI src) %{
5339 // No match rule to avoid chain rule match.
5340 effect(DEF dst, USE src);
5341 ins_cost(MEMORY_REF_COST);
5342 size(4);
5343 format %{ "LDF $src,$dst\t! stkI to regF" %}
5344 opcode(Assembler::ldf_op3);
5345 ins_encode(simple_form3_mem_reg(src, dst));
5346 ins_pipe(floadF_stk);
5347 %}
5349 instruct stkL_to_regD(regD dst, stackSlotL src) %{
5350 // No match rule to avoid chain rule match.
5351 effect(DEF dst, USE src);
5352 ins_cost(MEMORY_REF_COST);
5353 size(4);
5354 format %{ "LDDF $src,$dst\t! stkL to regD" %}
5355 opcode(Assembler::lddf_op3);
5356 ins_encode(simple_form3_mem_reg(src, dst));
5357 ins_pipe(floadD_stk);
5358 %}
5360 instruct regF_to_stkI(stackSlotI dst, regF src) %{
5361 // No match rule to avoid chain rule match.
5362 effect(DEF dst, USE src);
5363 ins_cost(MEMORY_REF_COST);
5364 size(4);
5365 format %{ "STF $src,$dst\t! regF to stkI" %}
5366 opcode(Assembler::stf_op3);
5367 ins_encode(simple_form3_mem_reg(dst, src));
5368 ins_pipe(fstoreF_stk_reg);
5369 %}
5371 instruct regD_to_stkL(stackSlotL dst, regD src) %{
5372 // No match rule to avoid chain rule match.
5373 effect(DEF dst, USE src);
5374 ins_cost(MEMORY_REF_COST);
5375 size(4);
5376 format %{ "STDF $src,$dst\t! regD to stkL" %}
5377 opcode(Assembler::stdf_op3);
5378 ins_encode(simple_form3_mem_reg(dst, src));
5379 ins_pipe(fstoreD_stk_reg);
5380 %}
5382 instruct regI_to_stkLHi(stackSlotL dst, iRegI src) %{
5383 effect(DEF dst, USE src);
5384 ins_cost(MEMORY_REF_COST*2);
5385 size(8);
5386 format %{ "STW $src,$dst.hi\t! long\n\t"
5387 "STW R_G0,$dst.lo" %}
5388 opcode(Assembler::stw_op3);
5389 ins_encode(simple_form3_mem_reg(dst, src), form3_mem_plus_4_reg(dst, R_G0));
5390 ins_pipe(lstoreI_stk_reg);
5391 %}
5393 instruct regL_to_stkD(stackSlotD dst, iRegL src) %{
5394 // No match rule to avoid chain rule match.
5395 effect(DEF dst, USE src);
5396 ins_cost(MEMORY_REF_COST);
5397 size(4);
5398 format %{ "STX $src,$dst\t! regL to stkD" %}
5399 opcode(Assembler::stx_op3);
5400 ins_encode(simple_form3_mem_reg( dst, src ) );
5401 ins_pipe(istore_stk_reg);
5402 %}
5404 //---------- Chain stack slots between similar types --------
5406 // Load integer from stack slot
5407 instruct stkI_to_regI( iRegI dst, stackSlotI src ) %{
5408 match(Set dst src);
5409 ins_cost(MEMORY_REF_COST);
5411 size(4);
5412 format %{ "LDUW $src,$dst\t!stk" %}
5413 opcode(Assembler::lduw_op3);
5414 ins_encode(simple_form3_mem_reg( src, dst ) );
5415 ins_pipe(iload_mem);
5416 %}
5418 // Store integer to stack slot
5419 instruct regI_to_stkI( stackSlotI dst, iRegI src ) %{
5420 match(Set dst src);
5421 ins_cost(MEMORY_REF_COST);
5423 size(4);
5424 format %{ "STW $src,$dst\t!stk" %}
5425 opcode(Assembler::stw_op3);
5426 ins_encode(simple_form3_mem_reg( dst, src ) );
5427 ins_pipe(istore_mem_reg);
5428 %}
5430 // Load long from stack slot
5431 instruct stkL_to_regL( iRegL dst, stackSlotL src ) %{
5432 match(Set dst src);
5434 ins_cost(MEMORY_REF_COST);
5435 size(4);
5436 format %{ "LDX $src,$dst\t! long" %}
5437 opcode(Assembler::ldx_op3);
5438 ins_encode(simple_form3_mem_reg( src, dst ) );
5439 ins_pipe(iload_mem);
5440 %}
5442 // Store long to stack slot
5443 instruct regL_to_stkL(stackSlotL dst, iRegL src) %{
5444 match(Set dst src);
5446 ins_cost(MEMORY_REF_COST);
5447 size(4);
5448 format %{ "STX $src,$dst\t! long" %}
5449 opcode(Assembler::stx_op3);
5450 ins_encode(simple_form3_mem_reg( dst, src ) );
5451 ins_pipe(istore_mem_reg);
5452 %}
5454 #ifdef _LP64
5455 // Load pointer from stack slot, 64-bit encoding
5456 instruct stkP_to_regP( iRegP dst, stackSlotP src ) %{
5457 match(Set dst src);
5458 ins_cost(MEMORY_REF_COST);
5459 size(4);
5460 format %{ "LDX $src,$dst\t!ptr" %}
5461 opcode(Assembler::ldx_op3);
5462 ins_encode(simple_form3_mem_reg( src, dst ) );
5463 ins_pipe(iload_mem);
5464 %}
5466 // Store pointer to stack slot
5467 instruct regP_to_stkP(stackSlotP dst, iRegP src) %{
5468 match(Set dst src);
5469 ins_cost(MEMORY_REF_COST);
5470 size(4);
5471 format %{ "STX $src,$dst\t!ptr" %}
5472 opcode(Assembler::stx_op3);
5473 ins_encode(simple_form3_mem_reg( dst, src ) );
5474 ins_pipe(istore_mem_reg);
5475 %}
5476 #else // _LP64
5477 // Load pointer from stack slot, 32-bit encoding
5478 instruct stkP_to_regP( iRegP dst, stackSlotP src ) %{
5479 match(Set dst src);
5480 ins_cost(MEMORY_REF_COST);
5481 format %{ "LDUW $src,$dst\t!ptr" %}
5482 opcode(Assembler::lduw_op3, Assembler::ldst_op);
5483 ins_encode(simple_form3_mem_reg( src, dst ) );
5484 ins_pipe(iload_mem);
5485 %}
5487 // Store pointer to stack slot
5488 instruct regP_to_stkP(stackSlotP dst, iRegP src) %{
5489 match(Set dst src);
5490 ins_cost(MEMORY_REF_COST);
5491 format %{ "STW $src,$dst\t!ptr" %}
5492 opcode(Assembler::stw_op3, Assembler::ldst_op);
5493 ins_encode(simple_form3_mem_reg( dst, src ) );
5494 ins_pipe(istore_mem_reg);
5495 %}
5496 #endif // _LP64
5498 //------------Special Nop instructions for bundling - no match rules-----------
5499 // Nop using the A0 functional unit
5500 instruct Nop_A0() %{
5501 ins_cost(0);
5503 format %{ "NOP ! Alu Pipeline" %}
5504 opcode(Assembler::or_op3, Assembler::arith_op);
5505 ins_encode( form2_nop() );
5506 ins_pipe(ialu_nop_A0);
5507 %}
5509 // Nop using the A1 functional unit
5510 instruct Nop_A1( ) %{
5511 ins_cost(0);
5513 format %{ "NOP ! Alu Pipeline" %}
5514 opcode(Assembler::or_op3, Assembler::arith_op);
5515 ins_encode( form2_nop() );
5516 ins_pipe(ialu_nop_A1);
5517 %}
5519 // Nop using the memory functional unit
5520 instruct Nop_MS( ) %{
5521 ins_cost(0);
5523 format %{ "NOP ! Memory Pipeline" %}
5524 ins_encode( emit_mem_nop );
5525 ins_pipe(mem_nop);
5526 %}
5528 // Nop using the floating add functional unit
5529 instruct Nop_FA( ) %{
5530 ins_cost(0);
5532 format %{ "NOP ! Floating Add Pipeline" %}
5533 ins_encode( emit_fadd_nop );
5534 ins_pipe(fadd_nop);
5535 %}
5537 // Nop using the branch functional unit
5538 instruct Nop_BR( ) %{
5539 ins_cost(0);
5541 format %{ "NOP ! Branch Pipeline" %}
5542 ins_encode( emit_br_nop );
5543 ins_pipe(br_nop);
5544 %}
5546 //----------Load/Store/Move Instructions---------------------------------------
5547 //----------Load Instructions--------------------------------------------------
5548 // Load Byte (8bit signed)
5549 instruct loadB(iRegI dst, memory mem) %{
5550 match(Set dst (LoadB mem));
5551 ins_cost(MEMORY_REF_COST);
5553 size(4);
5554 format %{ "LDSB $mem,$dst\t! byte" %}
5555 ins_encode %{
5556 __ ldsb($mem$$Address, $dst$$Register);
5557 %}
5558 ins_pipe(iload_mask_mem);
5559 %}
5561 // Load Byte (8bit signed) into a Long Register
5562 instruct loadB2L(iRegL dst, memory mem) %{
5563 match(Set dst (ConvI2L (LoadB mem)));
5564 ins_cost(MEMORY_REF_COST);
5566 size(4);
5567 format %{ "LDSB $mem,$dst\t! byte -> long" %}
5568 ins_encode %{
5569 __ ldsb($mem$$Address, $dst$$Register);
5570 %}
5571 ins_pipe(iload_mask_mem);
5572 %}
5574 // Load Unsigned Byte (8bit UNsigned) into an int reg
5575 instruct loadUB(iRegI dst, memory mem) %{
5576 match(Set dst (LoadUB mem));
5577 ins_cost(MEMORY_REF_COST);
5579 size(4);
5580 format %{ "LDUB $mem,$dst\t! ubyte" %}
5581 ins_encode %{
5582 __ ldub($mem$$Address, $dst$$Register);
5583 %}
5584 ins_pipe(iload_mem);
5585 %}
5587 // Load Unsigned Byte (8bit UNsigned) into a Long Register
5588 instruct loadUB2L(iRegL dst, memory mem) %{
5589 match(Set dst (ConvI2L (LoadUB mem)));
5590 ins_cost(MEMORY_REF_COST);
5592 size(4);
5593 format %{ "LDUB $mem,$dst\t! ubyte -> long" %}
5594 ins_encode %{
5595 __ ldub($mem$$Address, $dst$$Register);
5596 %}
5597 ins_pipe(iload_mem);
5598 %}
5600 // Load Unsigned Byte (8 bit UNsigned) with 8-bit mask into Long Register
5601 instruct loadUB2L_immI8(iRegL dst, memory mem, immI8 mask) %{
5602 match(Set dst (ConvI2L (AndI (LoadUB mem) mask)));
5603 ins_cost(MEMORY_REF_COST + DEFAULT_COST);
5605 size(2*4);
5606 format %{ "LDUB $mem,$dst\t# ubyte & 8-bit mask -> long\n\t"
5607 "AND $dst,$mask,$dst" %}
5608 ins_encode %{
5609 __ ldub($mem$$Address, $dst$$Register);
5610 __ and3($dst$$Register, $mask$$constant, $dst$$Register);
5611 %}
5612 ins_pipe(iload_mem);
5613 %}
5615 // Load Short (16bit signed)
5616 instruct loadS(iRegI dst, memory mem) %{
5617 match(Set dst (LoadS mem));
5618 ins_cost(MEMORY_REF_COST);
5620 size(4);
5621 format %{ "LDSH $mem,$dst\t! short" %}
5622 ins_encode %{
5623 __ ldsh($mem$$Address, $dst$$Register);
5624 %}
5625 ins_pipe(iload_mask_mem);
5626 %}
5628 // Load Short (16 bit signed) to Byte (8 bit signed)
5629 instruct loadS2B(iRegI dst, indOffset13m7 mem, immI_24 twentyfour) %{
5630 match(Set dst (RShiftI (LShiftI (LoadS mem) twentyfour) twentyfour));
5631 ins_cost(MEMORY_REF_COST);
5633 size(4);
5635 format %{ "LDSB $mem+1,$dst\t! short -> byte" %}
5636 ins_encode %{
5637 __ ldsb($mem$$Address, $dst$$Register, 1);
5638 %}
5639 ins_pipe(iload_mask_mem);
5640 %}
5642 // Load Short (16bit signed) into a Long Register
5643 instruct loadS2L(iRegL dst, memory mem) %{
5644 match(Set dst (ConvI2L (LoadS mem)));
5645 ins_cost(MEMORY_REF_COST);
5647 size(4);
5648 format %{ "LDSH $mem,$dst\t! short -> long" %}
5649 ins_encode %{
5650 __ ldsh($mem$$Address, $dst$$Register);
5651 %}
5652 ins_pipe(iload_mask_mem);
5653 %}
5655 // Load Unsigned Short/Char (16bit UNsigned)
5656 instruct loadUS(iRegI dst, memory mem) %{
5657 match(Set dst (LoadUS mem));
5658 ins_cost(MEMORY_REF_COST);
5660 size(4);
5661 format %{ "LDUH $mem,$dst\t! ushort/char" %}
5662 ins_encode %{
5663 __ lduh($mem$$Address, $dst$$Register);
5664 %}
5665 ins_pipe(iload_mem);
5666 %}
5668 // Load Unsigned Short/Char (16 bit UNsigned) to Byte (8 bit signed)
5669 instruct loadUS2B(iRegI dst, indOffset13m7 mem, immI_24 twentyfour) %{
5670 match(Set dst (RShiftI (LShiftI (LoadUS mem) twentyfour) twentyfour));
5671 ins_cost(MEMORY_REF_COST);
5673 size(4);
5674 format %{ "LDSB $mem+1,$dst\t! ushort -> byte" %}
5675 ins_encode %{
5676 __ ldsb($mem$$Address, $dst$$Register, 1);
5677 %}
5678 ins_pipe(iload_mask_mem);
5679 %}
5681 // Load Unsigned Short/Char (16bit UNsigned) into a Long Register
5682 instruct loadUS2L(iRegL dst, memory mem) %{
5683 match(Set dst (ConvI2L (LoadUS mem)));
5684 ins_cost(MEMORY_REF_COST);
5686 size(4);
5687 format %{ "LDUH $mem,$dst\t! ushort/char -> long" %}
5688 ins_encode %{
5689 __ lduh($mem$$Address, $dst$$Register);
5690 %}
5691 ins_pipe(iload_mem);
5692 %}
5694 // Load Unsigned Short/Char (16bit UNsigned) with mask 0xFF into a Long Register
5695 instruct loadUS2L_immI_255(iRegL dst, indOffset13m7 mem, immI_255 mask) %{
5696 match(Set dst (ConvI2L (AndI (LoadUS mem) mask)));
5697 ins_cost(MEMORY_REF_COST);
5699 size(4);
5700 format %{ "LDUB $mem+1,$dst\t! ushort/char & 0xFF -> long" %}
5701 ins_encode %{
5702 __ ldub($mem$$Address, $dst$$Register, 1); // LSB is index+1 on BE
5703 %}
5704 ins_pipe(iload_mem);
5705 %}
5707 // Load Unsigned Short/Char (16bit UNsigned) with a 13-bit mask into a Long Register
5708 instruct loadUS2L_immI13(iRegL dst, memory mem, immI13 mask) %{
5709 match(Set dst (ConvI2L (AndI (LoadUS mem) mask)));
5710 ins_cost(MEMORY_REF_COST + DEFAULT_COST);
5712 size(2*4);
5713 format %{ "LDUH $mem,$dst\t! ushort/char & 13-bit mask -> long\n\t"
5714 "AND $dst,$mask,$dst" %}
5715 ins_encode %{
5716 Register Rdst = $dst$$Register;
5717 __ lduh($mem$$Address, Rdst);
5718 __ and3(Rdst, $mask$$constant, Rdst);
5719 %}
5720 ins_pipe(iload_mem);
5721 %}
5723 // Load Unsigned Short/Char (16bit UNsigned) with a 16-bit mask into a Long Register
5724 instruct loadUS2L_immI16(iRegL dst, memory mem, immI16 mask, iRegL tmp) %{
5725 match(Set dst (ConvI2L (AndI (LoadUS mem) mask)));
5726 effect(TEMP dst, TEMP tmp);
5727 ins_cost(MEMORY_REF_COST + 2*DEFAULT_COST);
5729 size((3+1)*4); // set may use two instructions.
5730 format %{ "LDUH $mem,$dst\t! ushort/char & 16-bit mask -> long\n\t"
5731 "SET $mask,$tmp\n\t"
5732 "AND $dst,$tmp,$dst" %}
5733 ins_encode %{
5734 Register Rdst = $dst$$Register;
5735 Register Rtmp = $tmp$$Register;
5736 __ lduh($mem$$Address, Rdst);
5737 __ set($mask$$constant, Rtmp);
5738 __ and3(Rdst, Rtmp, Rdst);
5739 %}
5740 ins_pipe(iload_mem);
5741 %}
5743 // Load Integer
5744 instruct loadI(iRegI dst, memory mem) %{
5745 match(Set dst (LoadI mem));
5746 ins_cost(MEMORY_REF_COST);
5748 size(4);
5749 format %{ "LDUW $mem,$dst\t! int" %}
5750 ins_encode %{
5751 __ lduw($mem$$Address, $dst$$Register);
5752 %}
5753 ins_pipe(iload_mem);
5754 %}
5756 // Load Integer to Byte (8 bit signed)
5757 instruct loadI2B(iRegI dst, indOffset13m7 mem, immI_24 twentyfour) %{
5758 match(Set dst (RShiftI (LShiftI (LoadI mem) twentyfour) twentyfour));
5759 ins_cost(MEMORY_REF_COST);
5761 size(4);
5763 format %{ "LDSB $mem+3,$dst\t! int -> byte" %}
5764 ins_encode %{
5765 __ ldsb($mem$$Address, $dst$$Register, 3);
5766 %}
5767 ins_pipe(iload_mask_mem);
5768 %}
5770 // Load Integer to Unsigned Byte (8 bit UNsigned)
5771 instruct loadI2UB(iRegI dst, indOffset13m7 mem, immI_255 mask) %{
5772 match(Set dst (AndI (LoadI mem) mask));
5773 ins_cost(MEMORY_REF_COST);
5775 size(4);
5777 format %{ "LDUB $mem+3,$dst\t! int -> ubyte" %}
5778 ins_encode %{
5779 __ ldub($mem$$Address, $dst$$Register, 3);
5780 %}
5781 ins_pipe(iload_mask_mem);
5782 %}
5784 // Load Integer to Short (16 bit signed)
5785 instruct loadI2S(iRegI dst, indOffset13m7 mem, immI_16 sixteen) %{
5786 match(Set dst (RShiftI (LShiftI (LoadI mem) sixteen) sixteen));
5787 ins_cost(MEMORY_REF_COST);
5789 size(4);
5791 format %{ "LDSH $mem+2,$dst\t! int -> short" %}
5792 ins_encode %{
5793 __ ldsh($mem$$Address, $dst$$Register, 2);
5794 %}
5795 ins_pipe(iload_mask_mem);
5796 %}
5798 // Load Integer to Unsigned Short (16 bit UNsigned)
5799 instruct loadI2US(iRegI dst, indOffset13m7 mem, immI_65535 mask) %{
5800 match(Set dst (AndI (LoadI mem) mask));
5801 ins_cost(MEMORY_REF_COST);
5803 size(4);
5805 format %{ "LDUH $mem+2,$dst\t! int -> ushort/char" %}
5806 ins_encode %{
5807 __ lduh($mem$$Address, $dst$$Register, 2);
5808 %}
5809 ins_pipe(iload_mask_mem);
5810 %}
5812 // Load Integer into a Long Register
5813 instruct loadI2L(iRegL dst, memory mem) %{
5814 match(Set dst (ConvI2L (LoadI mem)));
5815 ins_cost(MEMORY_REF_COST);
5817 size(4);
5818 format %{ "LDSW $mem,$dst\t! int -> long" %}
5819 ins_encode %{
5820 __ ldsw($mem$$Address, $dst$$Register);
5821 %}
5822 ins_pipe(iload_mask_mem);
5823 %}
5825 // Load Integer with mask 0xFF into a Long Register
5826 instruct loadI2L_immI_255(iRegL dst, indOffset13m7 mem, immI_255 mask) %{
5827 match(Set dst (ConvI2L (AndI (LoadI mem) mask)));
5828 ins_cost(MEMORY_REF_COST);
5830 size(4);
5831 format %{ "LDUB $mem+3,$dst\t! int & 0xFF -> long" %}
5832 ins_encode %{
5833 __ ldub($mem$$Address, $dst$$Register, 3); // LSB is index+3 on BE
5834 %}
5835 ins_pipe(iload_mem);
5836 %}
5838 // Load Integer with mask 0xFFFF into a Long Register
5839 instruct loadI2L_immI_65535(iRegL dst, indOffset13m7 mem, immI_65535 mask) %{
5840 match(Set dst (ConvI2L (AndI (LoadI mem) mask)));
5841 ins_cost(MEMORY_REF_COST);
5843 size(4);
5844 format %{ "LDUH $mem+2,$dst\t! int & 0xFFFF -> long" %}
5845 ins_encode %{
5846 __ lduh($mem$$Address, $dst$$Register, 2); // LSW is index+2 on BE
5847 %}
5848 ins_pipe(iload_mem);
5849 %}
5851 // Load Integer with a 13-bit mask into a Long Register
5852 instruct loadI2L_immI13(iRegL dst, memory mem, immI13 mask) %{
5853 match(Set dst (ConvI2L (AndI (LoadI mem) mask)));
5854 ins_cost(MEMORY_REF_COST + DEFAULT_COST);
5856 size(2*4);
5857 format %{ "LDUW $mem,$dst\t! int & 13-bit mask -> long\n\t"
5858 "AND $dst,$mask,$dst" %}
5859 ins_encode %{
5860 Register Rdst = $dst$$Register;
5861 __ lduw($mem$$Address, Rdst);
5862 __ and3(Rdst, $mask$$constant, Rdst);
5863 %}
5864 ins_pipe(iload_mem);
5865 %}
5867 // Load Integer with a 32-bit mask into a Long Register
5868 instruct loadI2L_immI(iRegL dst, memory mem, immI mask, iRegL tmp) %{
5869 match(Set dst (ConvI2L (AndI (LoadI mem) mask)));
5870 effect(TEMP dst, TEMP tmp);
5871 ins_cost(MEMORY_REF_COST + 2*DEFAULT_COST);
5873 size((3+1)*4); // set may use two instructions.
5874 format %{ "LDUW $mem,$dst\t! int & 32-bit mask -> long\n\t"
5875 "SET $mask,$tmp\n\t"
5876 "AND $dst,$tmp,$dst" %}
5877 ins_encode %{
5878 Register Rdst = $dst$$Register;
5879 Register Rtmp = $tmp$$Register;
5880 __ lduw($mem$$Address, Rdst);
5881 __ set($mask$$constant, Rtmp);
5882 __ and3(Rdst, Rtmp, Rdst);
5883 %}
5884 ins_pipe(iload_mem);
5885 %}
5887 // Load Unsigned Integer into a Long Register
5888 instruct loadUI2L(iRegL dst, memory mem) %{
5889 match(Set dst (LoadUI2L mem));
5890 ins_cost(MEMORY_REF_COST);
5892 size(4);
5893 format %{ "LDUW $mem,$dst\t! uint -> long" %}
5894 ins_encode %{
5895 __ lduw($mem$$Address, $dst$$Register);
5896 %}
5897 ins_pipe(iload_mem);
5898 %}
5900 // Load Long - aligned
5901 instruct loadL(iRegL dst, memory mem ) %{
5902 match(Set dst (LoadL mem));
5903 ins_cost(MEMORY_REF_COST);
5905 size(4);
5906 format %{ "LDX $mem,$dst\t! long" %}
5907 ins_encode %{
5908 __ ldx($mem$$Address, $dst$$Register);
5909 %}
5910 ins_pipe(iload_mem);
5911 %}
5913 // Load Long - UNaligned
5914 instruct loadL_unaligned(iRegL dst, memory mem, o7RegI tmp) %{
5915 match(Set dst (LoadL_unaligned mem));
5916 effect(KILL tmp);
5917 ins_cost(MEMORY_REF_COST*2+DEFAULT_COST);
5918 size(16);
5919 format %{ "LDUW $mem+4,R_O7\t! misaligned long\n"
5920 "\tLDUW $mem ,$dst\n"
5921 "\tSLLX #32, $dst, $dst\n"
5922 "\tOR $dst, R_O7, $dst" %}
5923 opcode(Assembler::lduw_op3);
5924 ins_encode(form3_mem_reg_long_unaligned_marshal( mem, dst ));
5925 ins_pipe(iload_mem);
5926 %}
5928 // Load Range
5929 instruct loadRange(iRegI dst, memory mem) %{
5930 match(Set dst (LoadRange mem));
5931 ins_cost(MEMORY_REF_COST);
5933 size(4);
5934 format %{ "LDUW $mem,$dst\t! range" %}
5935 opcode(Assembler::lduw_op3);
5936 ins_encode(simple_form3_mem_reg( mem, dst ) );
5937 ins_pipe(iload_mem);
5938 %}
5940 // Load Integer into %f register (for fitos/fitod)
5941 instruct loadI_freg(regF dst, memory mem) %{
5942 match(Set dst (LoadI mem));
5943 ins_cost(MEMORY_REF_COST);
5944 size(4);
5946 format %{ "LDF $mem,$dst\t! for fitos/fitod" %}
5947 opcode(Assembler::ldf_op3);
5948 ins_encode(simple_form3_mem_reg( mem, dst ) );
5949 ins_pipe(floadF_mem);
5950 %}
5952 // Load Pointer
5953 instruct loadP(iRegP dst, memory mem) %{
5954 match(Set dst (LoadP mem));
5955 ins_cost(MEMORY_REF_COST);
5956 size(4);
5958 #ifndef _LP64
5959 format %{ "LDUW $mem,$dst\t! ptr" %}
5960 ins_encode %{
5961 __ lduw($mem$$Address, $dst$$Register);
5962 %}
5963 #else
5964 format %{ "LDX $mem,$dst\t! ptr" %}
5965 ins_encode %{
5966 __ ldx($mem$$Address, $dst$$Register);
5967 %}
5968 #endif
5969 ins_pipe(iload_mem);
5970 %}
5972 // Load Compressed Pointer
5973 instruct loadN(iRegN dst, memory mem) %{
5974 match(Set dst (LoadN mem));
5975 ins_cost(MEMORY_REF_COST);
5976 size(4);
5978 format %{ "LDUW $mem,$dst\t! compressed ptr" %}
5979 ins_encode %{
5980 __ lduw($mem$$Address, $dst$$Register);
5981 %}
5982 ins_pipe(iload_mem);
5983 %}
5985 // Load Klass Pointer
5986 instruct loadKlass(iRegP dst, memory mem) %{
5987 match(Set dst (LoadKlass mem));
5988 ins_cost(MEMORY_REF_COST);
5989 size(4);
5991 #ifndef _LP64
5992 format %{ "LDUW $mem,$dst\t! klass ptr" %}
5993 ins_encode %{
5994 __ lduw($mem$$Address, $dst$$Register);
5995 %}
5996 #else
5997 format %{ "LDX $mem,$dst\t! klass ptr" %}
5998 ins_encode %{
5999 __ ldx($mem$$Address, $dst$$Register);
6000 %}
6001 #endif
6002 ins_pipe(iload_mem);
6003 %}
6005 // Load narrow Klass Pointer
6006 instruct loadNKlass(iRegN dst, memory mem) %{
6007 match(Set dst (LoadNKlass mem));
6008 ins_cost(MEMORY_REF_COST);
6009 size(4);
6011 format %{ "LDUW $mem,$dst\t! compressed klass ptr" %}
6012 ins_encode %{
6013 __ lduw($mem$$Address, $dst$$Register);
6014 %}
6015 ins_pipe(iload_mem);
6016 %}
6018 // Load Double
6019 instruct loadD(regD dst, memory mem) %{
6020 match(Set dst (LoadD mem));
6021 ins_cost(MEMORY_REF_COST);
6023 size(4);
6024 format %{ "LDDF $mem,$dst" %}
6025 opcode(Assembler::lddf_op3);
6026 ins_encode(simple_form3_mem_reg( mem, dst ) );
6027 ins_pipe(floadD_mem);
6028 %}
6030 // Load Double - UNaligned
6031 instruct loadD_unaligned(regD_low dst, memory mem ) %{
6032 match(Set dst (LoadD_unaligned mem));
6033 ins_cost(MEMORY_REF_COST*2+DEFAULT_COST);
6034 size(8);
6035 format %{ "LDF $mem ,$dst.hi\t! misaligned double\n"
6036 "\tLDF $mem+4,$dst.lo\t!" %}
6037 opcode(Assembler::ldf_op3);
6038 ins_encode( form3_mem_reg_double_unaligned( mem, dst ));
6039 ins_pipe(iload_mem);
6040 %}
6042 // Load Float
6043 instruct loadF(regF dst, memory mem) %{
6044 match(Set dst (LoadF mem));
6045 ins_cost(MEMORY_REF_COST);
6047 size(4);
6048 format %{ "LDF $mem,$dst" %}
6049 opcode(Assembler::ldf_op3);
6050 ins_encode(simple_form3_mem_reg( mem, dst ) );
6051 ins_pipe(floadF_mem);
6052 %}
6054 // Load Constant
6055 instruct loadConI( iRegI dst, immI src ) %{
6056 match(Set dst src);
6057 ins_cost(DEFAULT_COST * 3/2);
6058 format %{ "SET $src,$dst" %}
6059 ins_encode( Set32(src, dst) );
6060 ins_pipe(ialu_hi_lo_reg);
6061 %}
6063 instruct loadConI13( iRegI dst, immI13 src ) %{
6064 match(Set dst src);
6066 size(4);
6067 format %{ "MOV $src,$dst" %}
6068 ins_encode( Set13( src, dst ) );
6069 ins_pipe(ialu_imm);
6070 %}
6072 #ifndef _LP64
6073 instruct loadConP(iRegP dst, immP con) %{
6074 match(Set dst con);
6075 ins_cost(DEFAULT_COST * 3/2);
6076 format %{ "SET $con,$dst\t!ptr" %}
6077 ins_encode %{
6078 relocInfo::relocType constant_reloc = _opnds[1]->constant_reloc();
6079 intptr_t val = $con$$constant;
6080 if (constant_reloc == relocInfo::oop_type) {
6081 __ set_oop_constant((jobject) val, $dst$$Register);
6082 } else if (constant_reloc == relocInfo::metadata_type) {
6083 __ set_metadata_constant((Metadata*)val, $dst$$Register);
6084 } else { // non-oop pointers, e.g. card mark base, heap top
6085 assert(constant_reloc == relocInfo::none, "unexpected reloc type");
6086 __ set(val, $dst$$Register);
6087 }
6088 %}
6089 ins_pipe(loadConP);
6090 %}
6091 #else
6092 instruct loadConP_set(iRegP dst, immP_set con) %{
6093 match(Set dst con);
6094 ins_cost(DEFAULT_COST * 3/2);
6095 format %{ "SET $con,$dst\t! ptr" %}
6096 ins_encode %{
6097 relocInfo::relocType constant_reloc = _opnds[1]->constant_reloc();
6098 intptr_t val = $con$$constant;
6099 if (constant_reloc == relocInfo::oop_type) {
6100 __ set_oop_constant((jobject) val, $dst$$Register);
6101 } else if (constant_reloc == relocInfo::metadata_type) {
6102 __ set_metadata_constant((Metadata*)val, $dst$$Register);
6103 } else { // non-oop pointers, e.g. card mark base, heap top
6104 assert(constant_reloc == relocInfo::none, "unexpected reloc type");
6105 __ set(val, $dst$$Register);
6106 }
6107 %}
6108 ins_pipe(loadConP);
6109 %}
6111 instruct loadConP_load(iRegP dst, immP_load con) %{
6112 match(Set dst con);
6113 ins_cost(MEMORY_REF_COST);
6114 format %{ "LD [$constanttablebase + $constantoffset],$dst\t! load from constant table: ptr=$con" %}
6115 ins_encode %{
6116 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset($con), $dst$$Register);
6117 __ ld_ptr($constanttablebase, con_offset, $dst$$Register);
6118 %}
6119 ins_pipe(loadConP);
6120 %}
6122 instruct loadConP_no_oop_cheap(iRegP dst, immP_no_oop_cheap con) %{
6123 match(Set dst con);
6124 ins_cost(DEFAULT_COST * 3/2);
6125 format %{ "SET $con,$dst\t! non-oop ptr" %}
6126 ins_encode %{
6127 __ set($con$$constant, $dst$$Register);
6128 %}
6129 ins_pipe(loadConP);
6130 %}
6131 #endif // _LP64
6133 instruct loadConP0(iRegP dst, immP0 src) %{
6134 match(Set dst src);
6136 size(4);
6137 format %{ "CLR $dst\t!ptr" %}
6138 ins_encode %{
6139 __ clr($dst$$Register);
6140 %}
6141 ins_pipe(ialu_imm);
6142 %}
6144 instruct loadConP_poll(iRegP dst, immP_poll src) %{
6145 match(Set dst src);
6146 ins_cost(DEFAULT_COST);
6147 format %{ "SET $src,$dst\t!ptr" %}
6148 ins_encode %{
6149 AddressLiteral polling_page(os::get_polling_page());
6150 __ sethi(polling_page, reg_to_register_object($dst$$reg));
6151 %}
6152 ins_pipe(loadConP_poll);
6153 %}
6155 instruct loadConN0(iRegN dst, immN0 src) %{
6156 match(Set dst src);
6158 size(4);
6159 format %{ "CLR $dst\t! compressed NULL ptr" %}
6160 ins_encode %{
6161 __ clr($dst$$Register);
6162 %}
6163 ins_pipe(ialu_imm);
6164 %}
6166 instruct loadConN(iRegN dst, immN src) %{
6167 match(Set dst src);
6168 ins_cost(DEFAULT_COST * 3/2);
6169 format %{ "SET $src,$dst\t! compressed ptr" %}
6170 ins_encode %{
6171 Register dst = $dst$$Register;
6172 __ set_narrow_oop((jobject)$src$$constant, dst);
6173 %}
6174 ins_pipe(ialu_hi_lo_reg);
6175 %}
6177 instruct loadConNKlass(iRegN dst, immNKlass src) %{
6178 match(Set dst src);
6179 ins_cost(DEFAULT_COST * 3/2);
6180 format %{ "SET $src,$dst\t! compressed klass ptr" %}
6181 ins_encode %{
6182 Register dst = $dst$$Register;
6183 __ set_narrow_klass((Klass*)$src$$constant, dst);
6184 %}
6185 ins_pipe(ialu_hi_lo_reg);
6186 %}
6188 // Materialize long value (predicated by immL_cheap).
6189 instruct loadConL_set64(iRegL dst, immL_cheap con, o7RegL tmp) %{
6190 match(Set dst con);
6191 effect(KILL tmp);
6192 ins_cost(DEFAULT_COST * 3);
6193 format %{ "SET64 $con,$dst KILL $tmp\t! cheap long" %}
6194 ins_encode %{
6195 __ set64($con$$constant, $dst$$Register, $tmp$$Register);
6196 %}
6197 ins_pipe(loadConL);
6198 %}
6200 // Load long value from constant table (predicated by immL_expensive).
6201 instruct loadConL_ldx(iRegL dst, immL_expensive con) %{
6202 match(Set dst con);
6203 ins_cost(MEMORY_REF_COST);
6204 format %{ "LDX [$constanttablebase + $constantoffset],$dst\t! load from constant table: long=$con" %}
6205 ins_encode %{
6206 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset($con), $dst$$Register);
6207 __ ldx($constanttablebase, con_offset, $dst$$Register);
6208 %}
6209 ins_pipe(loadConL);
6210 %}
6212 instruct loadConL0( iRegL dst, immL0 src ) %{
6213 match(Set dst src);
6214 ins_cost(DEFAULT_COST);
6215 size(4);
6216 format %{ "CLR $dst\t! long" %}
6217 ins_encode( Set13( src, dst ) );
6218 ins_pipe(ialu_imm);
6219 %}
6221 instruct loadConL13( iRegL dst, immL13 src ) %{
6222 match(Set dst src);
6223 ins_cost(DEFAULT_COST * 2);
6225 size(4);
6226 format %{ "MOV $src,$dst\t! long" %}
6227 ins_encode( Set13( src, dst ) );
6228 ins_pipe(ialu_imm);
6229 %}
6231 instruct loadConF(regF dst, immF con, o7RegI tmp) %{
6232 match(Set dst con);
6233 effect(KILL tmp);
6234 format %{ "LDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: float=$con" %}
6235 ins_encode %{
6236 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset($con), $tmp$$Register);
6237 __ ldf(FloatRegisterImpl::S, $constanttablebase, con_offset, $dst$$FloatRegister);
6238 %}
6239 ins_pipe(loadConFD);
6240 %}
6242 instruct loadConD(regD dst, immD con, o7RegI tmp) %{
6243 match(Set dst con);
6244 effect(KILL tmp);
6245 format %{ "LDDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: double=$con" %}
6246 ins_encode %{
6247 // XXX This is a quick fix for 6833573.
6248 //__ ldf(FloatRegisterImpl::D, $constanttablebase, $constantoffset($con), $dst$$FloatRegister);
6249 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset($con), $tmp$$Register);
6250 __ ldf(FloatRegisterImpl::D, $constanttablebase, con_offset, as_DoubleFloatRegister($dst$$reg));
6251 %}
6252 ins_pipe(loadConFD);
6253 %}
6255 // Prefetch instructions.
6256 // Must be safe to execute with invalid address (cannot fault).
6258 instruct prefetchr( memory mem ) %{
6259 match( PrefetchRead mem );
6260 ins_cost(MEMORY_REF_COST);
6261 size(4);
6263 format %{ "PREFETCH $mem,0\t! Prefetch read-many" %}
6264 opcode(Assembler::prefetch_op3);
6265 ins_encode( form3_mem_prefetch_read( mem ) );
6266 ins_pipe(iload_mem);
6267 %}
6269 instruct prefetchw( memory mem ) %{
6270 match( PrefetchWrite mem );
6271 ins_cost(MEMORY_REF_COST);
6272 size(4);
6274 format %{ "PREFETCH $mem,2\t! Prefetch write-many (and read)" %}
6275 opcode(Assembler::prefetch_op3);
6276 ins_encode( form3_mem_prefetch_write( mem ) );
6277 ins_pipe(iload_mem);
6278 %}
6280 // Prefetch instructions for allocation.
6282 instruct prefetchAlloc( memory mem ) %{
6283 predicate(AllocatePrefetchInstr == 0);
6284 match( PrefetchAllocation mem );
6285 ins_cost(MEMORY_REF_COST);
6286 size(4);
6288 format %{ "PREFETCH $mem,2\t! Prefetch allocation" %}
6289 opcode(Assembler::prefetch_op3);
6290 ins_encode( form3_mem_prefetch_write( mem ) );
6291 ins_pipe(iload_mem);
6292 %}
6294 // Use BIS instruction to prefetch for allocation.
6295 // Could fault, need space at the end of TLAB.
6296 instruct prefetchAlloc_bis( iRegP dst ) %{
6297 predicate(AllocatePrefetchInstr == 1);
6298 match( PrefetchAllocation dst );
6299 ins_cost(MEMORY_REF_COST);
6300 size(4);
6302 format %{ "STXA [$dst]\t! // Prefetch allocation using BIS" %}
6303 ins_encode %{
6304 __ stxa(G0, $dst$$Register, G0, Assembler::ASI_ST_BLKINIT_PRIMARY);
6305 %}
6306 ins_pipe(istore_mem_reg);
6307 %}
6309 // Next code is used for finding next cache line address to prefetch.
6310 #ifndef _LP64
6311 instruct cacheLineAdr( iRegP dst, iRegP src, immI13 mask ) %{
6312 match(Set dst (CastX2P (AndI (CastP2X src) mask)));
6313 ins_cost(DEFAULT_COST);
6314 size(4);
6316 format %{ "AND $src,$mask,$dst\t! next cache line address" %}
6317 ins_encode %{
6318 __ and3($src$$Register, $mask$$constant, $dst$$Register);
6319 %}
6320 ins_pipe(ialu_reg_imm);
6321 %}
6322 #else
6323 instruct cacheLineAdr( iRegP dst, iRegP src, immL13 mask ) %{
6324 match(Set dst (CastX2P (AndL (CastP2X src) mask)));
6325 ins_cost(DEFAULT_COST);
6326 size(4);
6328 format %{ "AND $src,$mask,$dst\t! next cache line address" %}
6329 ins_encode %{
6330 __ and3($src$$Register, $mask$$constant, $dst$$Register);
6331 %}
6332 ins_pipe(ialu_reg_imm);
6333 %}
6334 #endif
6336 //----------Store Instructions-------------------------------------------------
6337 // Store Byte
6338 instruct storeB(memory mem, iRegI src) %{
6339 match(Set mem (StoreB mem src));
6340 ins_cost(MEMORY_REF_COST);
6342 size(4);
6343 format %{ "STB $src,$mem\t! byte" %}
6344 opcode(Assembler::stb_op3);
6345 ins_encode(simple_form3_mem_reg( mem, src ) );
6346 ins_pipe(istore_mem_reg);
6347 %}
6349 instruct storeB0(memory mem, immI0 src) %{
6350 match(Set mem (StoreB mem src));
6351 ins_cost(MEMORY_REF_COST);
6353 size(4);
6354 format %{ "STB $src,$mem\t! byte" %}
6355 opcode(Assembler::stb_op3);
6356 ins_encode(simple_form3_mem_reg( mem, R_G0 ) );
6357 ins_pipe(istore_mem_zero);
6358 %}
6360 instruct storeCM0(memory mem, immI0 src) %{
6361 match(Set mem (StoreCM mem src));
6362 ins_cost(MEMORY_REF_COST);
6364 size(4);
6365 format %{ "STB $src,$mem\t! CMS card-mark byte 0" %}
6366 opcode(Assembler::stb_op3);
6367 ins_encode(simple_form3_mem_reg( mem, R_G0 ) );
6368 ins_pipe(istore_mem_zero);
6369 %}
6371 // Store Char/Short
6372 instruct storeC(memory mem, iRegI src) %{
6373 match(Set mem (StoreC mem src));
6374 ins_cost(MEMORY_REF_COST);
6376 size(4);
6377 format %{ "STH $src,$mem\t! short" %}
6378 opcode(Assembler::sth_op3);
6379 ins_encode(simple_form3_mem_reg( mem, src ) );
6380 ins_pipe(istore_mem_reg);
6381 %}
6383 instruct storeC0(memory mem, immI0 src) %{
6384 match(Set mem (StoreC mem src));
6385 ins_cost(MEMORY_REF_COST);
6387 size(4);
6388 format %{ "STH $src,$mem\t! short" %}
6389 opcode(Assembler::sth_op3);
6390 ins_encode(simple_form3_mem_reg( mem, R_G0 ) );
6391 ins_pipe(istore_mem_zero);
6392 %}
6394 // Store Integer
6395 instruct storeI(memory mem, iRegI src) %{
6396 match(Set mem (StoreI mem src));
6397 ins_cost(MEMORY_REF_COST);
6399 size(4);
6400 format %{ "STW $src,$mem" %}
6401 opcode(Assembler::stw_op3);
6402 ins_encode(simple_form3_mem_reg( mem, src ) );
6403 ins_pipe(istore_mem_reg);
6404 %}
6406 // Store Long
6407 instruct storeL(memory mem, iRegL src) %{
6408 match(Set mem (StoreL mem src));
6409 ins_cost(MEMORY_REF_COST);
6410 size(4);
6411 format %{ "STX $src,$mem\t! long" %}
6412 opcode(Assembler::stx_op3);
6413 ins_encode(simple_form3_mem_reg( mem, src ) );
6414 ins_pipe(istore_mem_reg);
6415 %}
6417 instruct storeI0(memory mem, immI0 src) %{
6418 match(Set mem (StoreI mem src));
6419 ins_cost(MEMORY_REF_COST);
6421 size(4);
6422 format %{ "STW $src,$mem" %}
6423 opcode(Assembler::stw_op3);
6424 ins_encode(simple_form3_mem_reg( mem, R_G0 ) );
6425 ins_pipe(istore_mem_zero);
6426 %}
6428 instruct storeL0(memory mem, immL0 src) %{
6429 match(Set mem (StoreL mem src));
6430 ins_cost(MEMORY_REF_COST);
6432 size(4);
6433 format %{ "STX $src,$mem" %}
6434 opcode(Assembler::stx_op3);
6435 ins_encode(simple_form3_mem_reg( mem, R_G0 ) );
6436 ins_pipe(istore_mem_zero);
6437 %}
6439 // Store Integer from float register (used after fstoi)
6440 instruct storeI_Freg(memory mem, regF src) %{
6441 match(Set mem (StoreI mem src));
6442 ins_cost(MEMORY_REF_COST);
6444 size(4);
6445 format %{ "STF $src,$mem\t! after fstoi/fdtoi" %}
6446 opcode(Assembler::stf_op3);
6447 ins_encode(simple_form3_mem_reg( mem, src ) );
6448 ins_pipe(fstoreF_mem_reg);
6449 %}
6451 // Store Pointer
6452 instruct storeP(memory dst, sp_ptr_RegP src) %{
6453 match(Set dst (StoreP dst src));
6454 ins_cost(MEMORY_REF_COST);
6455 size(4);
6457 #ifndef _LP64
6458 format %{ "STW $src,$dst\t! ptr" %}
6459 opcode(Assembler::stw_op3, 0, REGP_OP);
6460 #else
6461 format %{ "STX $src,$dst\t! ptr" %}
6462 opcode(Assembler::stx_op3, 0, REGP_OP);
6463 #endif
6464 ins_encode( form3_mem_reg( dst, src ) );
6465 ins_pipe(istore_mem_spORreg);
6466 %}
6468 instruct storeP0(memory dst, immP0 src) %{
6469 match(Set dst (StoreP dst src));
6470 ins_cost(MEMORY_REF_COST);
6471 size(4);
6473 #ifndef _LP64
6474 format %{ "STW $src,$dst\t! ptr" %}
6475 opcode(Assembler::stw_op3, 0, REGP_OP);
6476 #else
6477 format %{ "STX $src,$dst\t! ptr" %}
6478 opcode(Assembler::stx_op3, 0, REGP_OP);
6479 #endif
6480 ins_encode( form3_mem_reg( dst, R_G0 ) );
6481 ins_pipe(istore_mem_zero);
6482 %}
6484 // Store Compressed Pointer
6485 instruct storeN(memory dst, iRegN src) %{
6486 match(Set dst (StoreN dst src));
6487 ins_cost(MEMORY_REF_COST);
6488 size(4);
6490 format %{ "STW $src,$dst\t! compressed ptr" %}
6491 ins_encode %{
6492 Register base = as_Register($dst$$base);
6493 Register index = as_Register($dst$$index);
6494 Register src = $src$$Register;
6495 if (index != G0) {
6496 __ stw(src, base, index);
6497 } else {
6498 __ stw(src, base, $dst$$disp);
6499 }
6500 %}
6501 ins_pipe(istore_mem_spORreg);
6502 %}
6504 instruct storeNKlass(memory dst, iRegN src) %{
6505 match(Set dst (StoreNKlass dst src));
6506 ins_cost(MEMORY_REF_COST);
6507 size(4);
6509 format %{ "STW $src,$dst\t! compressed klass ptr" %}
6510 ins_encode %{
6511 Register base = as_Register($dst$$base);
6512 Register index = as_Register($dst$$index);
6513 Register src = $src$$Register;
6514 if (index != G0) {
6515 __ stw(src, base, index);
6516 } else {
6517 __ stw(src, base, $dst$$disp);
6518 }
6519 %}
6520 ins_pipe(istore_mem_spORreg);
6521 %}
6523 instruct storeN0(memory dst, immN0 src) %{
6524 match(Set dst (StoreN dst src));
6525 ins_cost(MEMORY_REF_COST);
6526 size(4);
6528 format %{ "STW $src,$dst\t! compressed ptr" %}
6529 ins_encode %{
6530 Register base = as_Register($dst$$base);
6531 Register index = as_Register($dst$$index);
6532 if (index != G0) {
6533 __ stw(0, base, index);
6534 } else {
6535 __ stw(0, base, $dst$$disp);
6536 }
6537 %}
6538 ins_pipe(istore_mem_zero);
6539 %}
6541 // Store Double
6542 instruct storeD( memory mem, regD src) %{
6543 match(Set mem (StoreD mem src));
6544 ins_cost(MEMORY_REF_COST);
6546 size(4);
6547 format %{ "STDF $src,$mem" %}
6548 opcode(Assembler::stdf_op3);
6549 ins_encode(simple_form3_mem_reg( mem, src ) );
6550 ins_pipe(fstoreD_mem_reg);
6551 %}
6553 instruct storeD0( memory mem, immD0 src) %{
6554 match(Set mem (StoreD mem src));
6555 ins_cost(MEMORY_REF_COST);
6557 size(4);
6558 format %{ "STX $src,$mem" %}
6559 opcode(Assembler::stx_op3);
6560 ins_encode(simple_form3_mem_reg( mem, R_G0 ) );
6561 ins_pipe(fstoreD_mem_zero);
6562 %}
6564 // Store Float
6565 instruct storeF( memory mem, regF src) %{
6566 match(Set mem (StoreF mem src));
6567 ins_cost(MEMORY_REF_COST);
6569 size(4);
6570 format %{ "STF $src,$mem" %}
6571 opcode(Assembler::stf_op3);
6572 ins_encode(simple_form3_mem_reg( mem, src ) );
6573 ins_pipe(fstoreF_mem_reg);
6574 %}
6576 instruct storeF0( memory mem, immF0 src) %{
6577 match(Set mem (StoreF mem src));
6578 ins_cost(MEMORY_REF_COST);
6580 size(4);
6581 format %{ "STW $src,$mem\t! storeF0" %}
6582 opcode(Assembler::stw_op3);
6583 ins_encode(simple_form3_mem_reg( mem, R_G0 ) );
6584 ins_pipe(fstoreF_mem_zero);
6585 %}
6587 // Convert oop pointer into compressed form
6588 instruct encodeHeapOop(iRegN dst, iRegP src) %{
6589 predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull);
6590 match(Set dst (EncodeP src));
6591 format %{ "encode_heap_oop $src, $dst" %}
6592 ins_encode %{
6593 __ encode_heap_oop($src$$Register, $dst$$Register);
6594 %}
6595 ins_pipe(ialu_reg);
6596 %}
6598 instruct encodeHeapOop_not_null(iRegN dst, iRegP src) %{
6599 predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull);
6600 match(Set dst (EncodeP src));
6601 format %{ "encode_heap_oop_not_null $src, $dst" %}
6602 ins_encode %{
6603 __ encode_heap_oop_not_null($src$$Register, $dst$$Register);
6604 %}
6605 ins_pipe(ialu_reg);
6606 %}
6608 instruct decodeHeapOop(iRegP dst, iRegN src) %{
6609 predicate(n->bottom_type()->is_oopptr()->ptr() != TypePtr::NotNull &&
6610 n->bottom_type()->is_oopptr()->ptr() != TypePtr::Constant);
6611 match(Set dst (DecodeN src));
6612 format %{ "decode_heap_oop $src, $dst" %}
6613 ins_encode %{
6614 __ decode_heap_oop($src$$Register, $dst$$Register);
6615 %}
6616 ins_pipe(ialu_reg);
6617 %}
6619 instruct decodeHeapOop_not_null(iRegP dst, iRegN src) %{
6620 predicate(n->bottom_type()->is_oopptr()->ptr() == TypePtr::NotNull ||
6621 n->bottom_type()->is_oopptr()->ptr() == TypePtr::Constant);
6622 match(Set dst (DecodeN src));
6623 format %{ "decode_heap_oop_not_null $src, $dst" %}
6624 ins_encode %{
6625 __ decode_heap_oop_not_null($src$$Register, $dst$$Register);
6626 %}
6627 ins_pipe(ialu_reg);
6628 %}
6630 instruct encodeKlass_not_null(iRegN dst, iRegP src) %{
6631 match(Set dst (EncodePKlass src));
6632 format %{ "encode_klass_not_null $src, $dst" %}
6633 ins_encode %{
6634 __ encode_klass_not_null($src$$Register, $dst$$Register);
6635 %}
6636 ins_pipe(ialu_reg);
6637 %}
6639 instruct decodeKlass_not_null(iRegP dst, iRegN src) %{
6640 match(Set dst (DecodeNKlass src));
6641 format %{ "decode_klass_not_null $src, $dst" %}
6642 ins_encode %{
6643 __ decode_klass_not_null($src$$Register, $dst$$Register);
6644 %}
6645 ins_pipe(ialu_reg);
6646 %}
6648 //----------MemBar Instructions-----------------------------------------------
6649 // Memory barrier flavors
6651 instruct membar_acquire() %{
6652 match(MemBarAcquire);
6653 ins_cost(4*MEMORY_REF_COST);
6655 size(0);
6656 format %{ "MEMBAR-acquire" %}
6657 ins_encode( enc_membar_acquire );
6658 ins_pipe(long_memory_op);
6659 %}
6661 instruct membar_acquire_lock() %{
6662 match(MemBarAcquireLock);
6663 ins_cost(0);
6665 size(0);
6666 format %{ "!MEMBAR-acquire (CAS in prior FastLock so empty encoding)" %}
6667 ins_encode( );
6668 ins_pipe(empty);
6669 %}
6671 instruct membar_release() %{
6672 match(MemBarRelease);
6673 ins_cost(4*MEMORY_REF_COST);
6675 size(0);
6676 format %{ "MEMBAR-release" %}
6677 ins_encode( enc_membar_release );
6678 ins_pipe(long_memory_op);
6679 %}
6681 instruct membar_release_lock() %{
6682 match(MemBarReleaseLock);
6683 ins_cost(0);
6685 size(0);
6686 format %{ "!MEMBAR-release (CAS in succeeding FastUnlock so empty encoding)" %}
6687 ins_encode( );
6688 ins_pipe(empty);
6689 %}
6691 instruct membar_volatile() %{
6692 match(MemBarVolatile);
6693 ins_cost(4*MEMORY_REF_COST);
6695 size(4);
6696 format %{ "MEMBAR-volatile" %}
6697 ins_encode( enc_membar_volatile );
6698 ins_pipe(long_memory_op);
6699 %}
6701 instruct unnecessary_membar_volatile() %{
6702 match(MemBarVolatile);
6703 predicate(Matcher::post_store_load_barrier(n));
6704 ins_cost(0);
6706 size(0);
6707 format %{ "!MEMBAR-volatile (unnecessary so empty encoding)" %}
6708 ins_encode( );
6709 ins_pipe(empty);
6710 %}
6712 instruct membar_storestore() %{
6713 match(MemBarStoreStore);
6714 ins_cost(0);
6716 size(0);
6717 format %{ "!MEMBAR-storestore (empty encoding)" %}
6718 ins_encode( );
6719 ins_pipe(empty);
6720 %}
6722 //----------Register Move Instructions-----------------------------------------
6723 instruct roundDouble_nop(regD dst) %{
6724 match(Set dst (RoundDouble dst));
6725 ins_cost(0);
6726 // SPARC results are already "rounded" (i.e., normal-format IEEE)
6727 ins_encode( );
6728 ins_pipe(empty);
6729 %}
6732 instruct roundFloat_nop(regF dst) %{
6733 match(Set dst (RoundFloat dst));
6734 ins_cost(0);
6735 // SPARC results are already "rounded" (i.e., normal-format IEEE)
6736 ins_encode( );
6737 ins_pipe(empty);
6738 %}
6741 // Cast Index to Pointer for unsafe natives
6742 instruct castX2P(iRegX src, iRegP dst) %{
6743 match(Set dst (CastX2P src));
6745 format %{ "MOV $src,$dst\t! IntX->Ptr" %}
6746 ins_encode( form3_g0_rs2_rd_move( src, dst ) );
6747 ins_pipe(ialu_reg);
6748 %}
6750 // Cast Pointer to Index for unsafe natives
6751 instruct castP2X(iRegP src, iRegX dst) %{
6752 match(Set dst (CastP2X src));
6754 format %{ "MOV $src,$dst\t! Ptr->IntX" %}
6755 ins_encode( form3_g0_rs2_rd_move( src, dst ) );
6756 ins_pipe(ialu_reg);
6757 %}
6759 instruct stfSSD(stackSlotD stkSlot, regD src) %{
6760 // %%%% TO DO: Tell the coalescer that this kind of node is a copy!
6761 match(Set stkSlot src); // chain rule
6762 ins_cost(MEMORY_REF_COST);
6763 format %{ "STDF $src,$stkSlot\t!stk" %}
6764 opcode(Assembler::stdf_op3);
6765 ins_encode(simple_form3_mem_reg(stkSlot, src));
6766 ins_pipe(fstoreD_stk_reg);
6767 %}
6769 instruct ldfSSD(regD dst, stackSlotD stkSlot) %{
6770 // %%%% TO DO: Tell the coalescer that this kind of node is a copy!
6771 match(Set dst stkSlot); // chain rule
6772 ins_cost(MEMORY_REF_COST);
6773 format %{ "LDDF $stkSlot,$dst\t!stk" %}
6774 opcode(Assembler::lddf_op3);
6775 ins_encode(simple_form3_mem_reg(stkSlot, dst));
6776 ins_pipe(floadD_stk);
6777 %}
6779 instruct stfSSF(stackSlotF stkSlot, regF src) %{
6780 // %%%% TO DO: Tell the coalescer that this kind of node is a copy!
6781 match(Set stkSlot src); // chain rule
6782 ins_cost(MEMORY_REF_COST);
6783 format %{ "STF $src,$stkSlot\t!stk" %}
6784 opcode(Assembler::stf_op3);
6785 ins_encode(simple_form3_mem_reg(stkSlot, src));
6786 ins_pipe(fstoreF_stk_reg);
6787 %}
6789 //----------Conditional Move---------------------------------------------------
6790 // Conditional move
6791 instruct cmovIP_reg(cmpOpP cmp, flagsRegP pcc, iRegI dst, iRegI src) %{
6792 match(Set dst (CMoveI (Binary cmp pcc) (Binary dst src)));
6793 ins_cost(150);
6794 format %{ "MOV$cmp $pcc,$src,$dst" %}
6795 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::ptr_cc)) );
6796 ins_pipe(ialu_reg);
6797 %}
6799 instruct cmovIP_imm(cmpOpP cmp, flagsRegP pcc, iRegI dst, immI11 src) %{
6800 match(Set dst (CMoveI (Binary cmp pcc) (Binary dst src)));
6801 ins_cost(140);
6802 format %{ "MOV$cmp $pcc,$src,$dst" %}
6803 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::ptr_cc)) );
6804 ins_pipe(ialu_imm);
6805 %}
6807 instruct cmovII_reg(cmpOp cmp, flagsReg icc, iRegI dst, iRegI src) %{
6808 match(Set dst (CMoveI (Binary cmp icc) (Binary dst src)));
6809 ins_cost(150);
6810 size(4);
6811 format %{ "MOV$cmp $icc,$src,$dst" %}
6812 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) );
6813 ins_pipe(ialu_reg);
6814 %}
6816 instruct cmovII_imm(cmpOp cmp, flagsReg icc, iRegI dst, immI11 src) %{
6817 match(Set dst (CMoveI (Binary cmp icc) (Binary dst src)));
6818 ins_cost(140);
6819 size(4);
6820 format %{ "MOV$cmp $icc,$src,$dst" %}
6821 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::icc)) );
6822 ins_pipe(ialu_imm);
6823 %}
6825 instruct cmovIIu_reg(cmpOpU cmp, flagsRegU icc, iRegI dst, iRegI src) %{
6826 match(Set dst (CMoveI (Binary cmp icc) (Binary dst src)));
6827 ins_cost(150);
6828 size(4);
6829 format %{ "MOV$cmp $icc,$src,$dst" %}
6830 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) );
6831 ins_pipe(ialu_reg);
6832 %}
6834 instruct cmovIIu_imm(cmpOpU cmp, flagsRegU icc, iRegI dst, immI11 src) %{
6835 match(Set dst (CMoveI (Binary cmp icc) (Binary dst src)));
6836 ins_cost(140);
6837 size(4);
6838 format %{ "MOV$cmp $icc,$src,$dst" %}
6839 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::icc)) );
6840 ins_pipe(ialu_imm);
6841 %}
6843 instruct cmovIF_reg(cmpOpF cmp, flagsRegF fcc, iRegI dst, iRegI src) %{
6844 match(Set dst (CMoveI (Binary cmp fcc) (Binary dst src)));
6845 ins_cost(150);
6846 size(4);
6847 format %{ "MOV$cmp $fcc,$src,$dst" %}
6848 ins_encode( enc_cmov_reg_f(cmp,dst,src, fcc) );
6849 ins_pipe(ialu_reg);
6850 %}
6852 instruct cmovIF_imm(cmpOpF cmp, flagsRegF fcc, iRegI dst, immI11 src) %{
6853 match(Set dst (CMoveI (Binary cmp fcc) (Binary dst src)));
6854 ins_cost(140);
6855 size(4);
6856 format %{ "MOV$cmp $fcc,$src,$dst" %}
6857 ins_encode( enc_cmov_imm_f(cmp,dst,src, fcc) );
6858 ins_pipe(ialu_imm);
6859 %}
6861 // Conditional move for RegN. Only cmov(reg,reg).
6862 instruct cmovNP_reg(cmpOpP cmp, flagsRegP pcc, iRegN dst, iRegN src) %{
6863 match(Set dst (CMoveN (Binary cmp pcc) (Binary dst src)));
6864 ins_cost(150);
6865 format %{ "MOV$cmp $pcc,$src,$dst" %}
6866 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::ptr_cc)) );
6867 ins_pipe(ialu_reg);
6868 %}
6870 // This instruction also works with CmpN so we don't need cmovNN_reg.
6871 instruct cmovNI_reg(cmpOp cmp, flagsReg icc, iRegN dst, iRegN src) %{
6872 match(Set dst (CMoveN (Binary cmp icc) (Binary dst src)));
6873 ins_cost(150);
6874 size(4);
6875 format %{ "MOV$cmp $icc,$src,$dst" %}
6876 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) );
6877 ins_pipe(ialu_reg);
6878 %}
6880 // This instruction also works with CmpN so we don't need cmovNN_reg.
6881 instruct cmovNIu_reg(cmpOpU cmp, flagsRegU icc, iRegN dst, iRegN src) %{
6882 match(Set dst (CMoveN (Binary cmp icc) (Binary dst src)));
6883 ins_cost(150);
6884 size(4);
6885 format %{ "MOV$cmp $icc,$src,$dst" %}
6886 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) );
6887 ins_pipe(ialu_reg);
6888 %}
6890 instruct cmovNF_reg(cmpOpF cmp, flagsRegF fcc, iRegN dst, iRegN src) %{
6891 match(Set dst (CMoveN (Binary cmp fcc) (Binary dst src)));
6892 ins_cost(150);
6893 size(4);
6894 format %{ "MOV$cmp $fcc,$src,$dst" %}
6895 ins_encode( enc_cmov_reg_f(cmp,dst,src, fcc) );
6896 ins_pipe(ialu_reg);
6897 %}
6899 // Conditional move
6900 instruct cmovPP_reg(cmpOpP cmp, flagsRegP pcc, iRegP dst, iRegP src) %{
6901 match(Set dst (CMoveP (Binary cmp pcc) (Binary dst src)));
6902 ins_cost(150);
6903 format %{ "MOV$cmp $pcc,$src,$dst\t! ptr" %}
6904 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::ptr_cc)) );
6905 ins_pipe(ialu_reg);
6906 %}
6908 instruct cmovPP_imm(cmpOpP cmp, flagsRegP pcc, iRegP dst, immP0 src) %{
6909 match(Set dst (CMoveP (Binary cmp pcc) (Binary dst src)));
6910 ins_cost(140);
6911 format %{ "MOV$cmp $pcc,$src,$dst\t! ptr" %}
6912 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::ptr_cc)) );
6913 ins_pipe(ialu_imm);
6914 %}
6916 // This instruction also works with CmpN so we don't need cmovPN_reg.
6917 instruct cmovPI_reg(cmpOp cmp, flagsReg icc, iRegP dst, iRegP src) %{
6918 match(Set dst (CMoveP (Binary cmp icc) (Binary dst src)));
6919 ins_cost(150);
6921 size(4);
6922 format %{ "MOV$cmp $icc,$src,$dst\t! ptr" %}
6923 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) );
6924 ins_pipe(ialu_reg);
6925 %}
6927 instruct cmovPIu_reg(cmpOpU cmp, flagsRegU icc, iRegP dst, iRegP src) %{
6928 match(Set dst (CMoveP (Binary cmp icc) (Binary dst src)));
6929 ins_cost(150);
6931 size(4);
6932 format %{ "MOV$cmp $icc,$src,$dst\t! ptr" %}
6933 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) );
6934 ins_pipe(ialu_reg);
6935 %}
6937 instruct cmovPI_imm(cmpOp cmp, flagsReg icc, iRegP dst, immP0 src) %{
6938 match(Set dst (CMoveP (Binary cmp icc) (Binary dst src)));
6939 ins_cost(140);
6941 size(4);
6942 format %{ "MOV$cmp $icc,$src,$dst\t! ptr" %}
6943 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::icc)) );
6944 ins_pipe(ialu_imm);
6945 %}
6947 instruct cmovPIu_imm(cmpOpU cmp, flagsRegU icc, iRegP dst, immP0 src) %{
6948 match(Set dst (CMoveP (Binary cmp icc) (Binary dst src)));
6949 ins_cost(140);
6951 size(4);
6952 format %{ "MOV$cmp $icc,$src,$dst\t! ptr" %}
6953 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::icc)) );
6954 ins_pipe(ialu_imm);
6955 %}
6957 instruct cmovPF_reg(cmpOpF cmp, flagsRegF fcc, iRegP dst, iRegP src) %{
6958 match(Set dst (CMoveP (Binary cmp fcc) (Binary dst src)));
6959 ins_cost(150);
6960 size(4);
6961 format %{ "MOV$cmp $fcc,$src,$dst" %}
6962 ins_encode( enc_cmov_reg_f(cmp,dst,src, fcc) );
6963 ins_pipe(ialu_imm);
6964 %}
6966 instruct cmovPF_imm(cmpOpF cmp, flagsRegF fcc, iRegP dst, immP0 src) %{
6967 match(Set dst (CMoveP (Binary cmp fcc) (Binary dst src)));
6968 ins_cost(140);
6969 size(4);
6970 format %{ "MOV$cmp $fcc,$src,$dst" %}
6971 ins_encode( enc_cmov_imm_f(cmp,dst,src, fcc) );
6972 ins_pipe(ialu_imm);
6973 %}
6975 // Conditional move
6976 instruct cmovFP_reg(cmpOpP cmp, flagsRegP pcc, regF dst, regF src) %{
6977 match(Set dst (CMoveF (Binary cmp pcc) (Binary dst src)));
6978 ins_cost(150);
6979 opcode(0x101);
6980 format %{ "FMOVD$cmp $pcc,$src,$dst" %}
6981 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::ptr_cc)) );
6982 ins_pipe(int_conditional_float_move);
6983 %}
6985 instruct cmovFI_reg(cmpOp cmp, flagsReg icc, regF dst, regF src) %{
6986 match(Set dst (CMoveF (Binary cmp icc) (Binary dst src)));
6987 ins_cost(150);
6989 size(4);
6990 format %{ "FMOVS$cmp $icc,$src,$dst" %}
6991 opcode(0x101);
6992 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::icc)) );
6993 ins_pipe(int_conditional_float_move);
6994 %}
6996 instruct cmovFIu_reg(cmpOpU cmp, flagsRegU icc, regF dst, regF src) %{
6997 match(Set dst (CMoveF (Binary cmp icc) (Binary dst src)));
6998 ins_cost(150);
7000 size(4);
7001 format %{ "FMOVS$cmp $icc,$src,$dst" %}
7002 opcode(0x101);
7003 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::icc)) );
7004 ins_pipe(int_conditional_float_move);
7005 %}
7007 // Conditional move,
7008 instruct cmovFF_reg(cmpOpF cmp, flagsRegF fcc, regF dst, regF src) %{
7009 match(Set dst (CMoveF (Binary cmp fcc) (Binary dst src)));
7010 ins_cost(150);
7011 size(4);
7012 format %{ "FMOVF$cmp $fcc,$src,$dst" %}
7013 opcode(0x1);
7014 ins_encode( enc_cmovff_reg(cmp,fcc,dst,src) );
7015 ins_pipe(int_conditional_double_move);
7016 %}
7018 // Conditional move
7019 instruct cmovDP_reg(cmpOpP cmp, flagsRegP pcc, regD dst, regD src) %{
7020 match(Set dst (CMoveD (Binary cmp pcc) (Binary dst src)));
7021 ins_cost(150);
7022 size(4);
7023 opcode(0x102);
7024 format %{ "FMOVD$cmp $pcc,$src,$dst" %}
7025 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::ptr_cc)) );
7026 ins_pipe(int_conditional_double_move);
7027 %}
7029 instruct cmovDI_reg(cmpOp cmp, flagsReg icc, regD dst, regD src) %{
7030 match(Set dst (CMoveD (Binary cmp icc) (Binary dst src)));
7031 ins_cost(150);
7033 size(4);
7034 format %{ "FMOVD$cmp $icc,$src,$dst" %}
7035 opcode(0x102);
7036 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::icc)) );
7037 ins_pipe(int_conditional_double_move);
7038 %}
7040 instruct cmovDIu_reg(cmpOpU cmp, flagsRegU icc, regD dst, regD src) %{
7041 match(Set dst (CMoveD (Binary cmp icc) (Binary dst src)));
7042 ins_cost(150);
7044 size(4);
7045 format %{ "FMOVD$cmp $icc,$src,$dst" %}
7046 opcode(0x102);
7047 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::icc)) );
7048 ins_pipe(int_conditional_double_move);
7049 %}
7051 // Conditional move,
7052 instruct cmovDF_reg(cmpOpF cmp, flagsRegF fcc, regD dst, regD src) %{
7053 match(Set dst (CMoveD (Binary cmp fcc) (Binary dst src)));
7054 ins_cost(150);
7055 size(4);
7056 format %{ "FMOVD$cmp $fcc,$src,$dst" %}
7057 opcode(0x2);
7058 ins_encode( enc_cmovff_reg(cmp,fcc,dst,src) );
7059 ins_pipe(int_conditional_double_move);
7060 %}
7062 // Conditional move
7063 instruct cmovLP_reg(cmpOpP cmp, flagsRegP pcc, iRegL dst, iRegL src) %{
7064 match(Set dst (CMoveL (Binary cmp pcc) (Binary dst src)));
7065 ins_cost(150);
7066 format %{ "MOV$cmp $pcc,$src,$dst\t! long" %}
7067 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::ptr_cc)) );
7068 ins_pipe(ialu_reg);
7069 %}
7071 instruct cmovLP_imm(cmpOpP cmp, flagsRegP pcc, iRegL dst, immI11 src) %{
7072 match(Set dst (CMoveL (Binary cmp pcc) (Binary dst src)));
7073 ins_cost(140);
7074 format %{ "MOV$cmp $pcc,$src,$dst\t! long" %}
7075 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::ptr_cc)) );
7076 ins_pipe(ialu_imm);
7077 %}
7079 instruct cmovLI_reg(cmpOp cmp, flagsReg icc, iRegL dst, iRegL src) %{
7080 match(Set dst (CMoveL (Binary cmp icc) (Binary dst src)));
7081 ins_cost(150);
7083 size(4);
7084 format %{ "MOV$cmp $icc,$src,$dst\t! long" %}
7085 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) );
7086 ins_pipe(ialu_reg);
7087 %}
7090 instruct cmovLIu_reg(cmpOpU cmp, flagsRegU icc, iRegL dst, iRegL src) %{
7091 match(Set dst (CMoveL (Binary cmp icc) (Binary dst src)));
7092 ins_cost(150);
7094 size(4);
7095 format %{ "MOV$cmp $icc,$src,$dst\t! long" %}
7096 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::icc)) );
7097 ins_pipe(ialu_reg);
7098 %}
7101 instruct cmovLF_reg(cmpOpF cmp, flagsRegF fcc, iRegL dst, iRegL src) %{
7102 match(Set dst (CMoveL (Binary cmp fcc) (Binary dst src)));
7103 ins_cost(150);
7105 size(4);
7106 format %{ "MOV$cmp $fcc,$src,$dst\t! long" %}
7107 ins_encode( enc_cmov_reg_f(cmp,dst,src, fcc) );
7108 ins_pipe(ialu_reg);
7109 %}
7113 //----------OS and Locking Instructions----------------------------------------
7115 // This name is KNOWN by the ADLC and cannot be changed.
7116 // The ADLC forces a 'TypeRawPtr::BOTTOM' output type
7117 // for this guy.
7118 instruct tlsLoadP(g2RegP dst) %{
7119 match(Set dst (ThreadLocal));
7121 size(0);
7122 ins_cost(0);
7123 format %{ "# TLS is in G2" %}
7124 ins_encode( /*empty encoding*/ );
7125 ins_pipe(ialu_none);
7126 %}
7128 instruct checkCastPP( iRegP dst ) %{
7129 match(Set dst (CheckCastPP dst));
7131 size(0);
7132 format %{ "# checkcastPP of $dst" %}
7133 ins_encode( /*empty encoding*/ );
7134 ins_pipe(empty);
7135 %}
7138 instruct castPP( iRegP dst ) %{
7139 match(Set dst (CastPP dst));
7140 format %{ "# castPP of $dst" %}
7141 ins_encode( /*empty encoding*/ );
7142 ins_pipe(empty);
7143 %}
7145 instruct castII( iRegI dst ) %{
7146 match(Set dst (CastII dst));
7147 format %{ "# castII of $dst" %}
7148 ins_encode( /*empty encoding*/ );
7149 ins_cost(0);
7150 ins_pipe(empty);
7151 %}
7153 //----------Arithmetic Instructions--------------------------------------------
7154 // Addition Instructions
7155 // Register Addition
7156 instruct addI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
7157 match(Set dst (AddI src1 src2));
7159 size(4);
7160 format %{ "ADD $src1,$src2,$dst" %}
7161 ins_encode %{
7162 __ add($src1$$Register, $src2$$Register, $dst$$Register);
7163 %}
7164 ins_pipe(ialu_reg_reg);
7165 %}
7167 // Immediate Addition
7168 instruct addI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{
7169 match(Set dst (AddI src1 src2));
7171 size(4);
7172 format %{ "ADD $src1,$src2,$dst" %}
7173 opcode(Assembler::add_op3, Assembler::arith_op);
7174 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) );
7175 ins_pipe(ialu_reg_imm);
7176 %}
7178 // Pointer Register Addition
7179 instruct addP_reg_reg(iRegP dst, iRegP src1, iRegX src2) %{
7180 match(Set dst (AddP src1 src2));
7182 size(4);
7183 format %{ "ADD $src1,$src2,$dst" %}
7184 opcode(Assembler::add_op3, Assembler::arith_op);
7185 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7186 ins_pipe(ialu_reg_reg);
7187 %}
7189 // Pointer Immediate Addition
7190 instruct addP_reg_imm13(iRegP dst, iRegP src1, immX13 src2) %{
7191 match(Set dst (AddP src1 src2));
7193 size(4);
7194 format %{ "ADD $src1,$src2,$dst" %}
7195 opcode(Assembler::add_op3, Assembler::arith_op);
7196 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) );
7197 ins_pipe(ialu_reg_imm);
7198 %}
7200 // Long Addition
7201 instruct addL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
7202 match(Set dst (AddL src1 src2));
7204 size(4);
7205 format %{ "ADD $src1,$src2,$dst\t! long" %}
7206 opcode(Assembler::add_op3, Assembler::arith_op);
7207 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7208 ins_pipe(ialu_reg_reg);
7209 %}
7211 instruct addL_reg_imm13(iRegL dst, iRegL src1, immL13 con) %{
7212 match(Set dst (AddL src1 con));
7214 size(4);
7215 format %{ "ADD $src1,$con,$dst" %}
7216 opcode(Assembler::add_op3, Assembler::arith_op);
7217 ins_encode( form3_rs1_simm13_rd( src1, con, dst ) );
7218 ins_pipe(ialu_reg_imm);
7219 %}
7221 //----------Conditional_store--------------------------------------------------
7222 // Conditional-store of the updated heap-top.
7223 // Used during allocation of the shared heap.
7224 // Sets flags (EQ) on success. Implemented with a CASA on Sparc.
7226 // LoadP-locked. Same as a regular pointer load when used with a compare-swap
7227 instruct loadPLocked(iRegP dst, memory mem) %{
7228 match(Set dst (LoadPLocked mem));
7229 ins_cost(MEMORY_REF_COST);
7231 #ifndef _LP64
7232 size(4);
7233 format %{ "LDUW $mem,$dst\t! ptr" %}
7234 opcode(Assembler::lduw_op3, 0, REGP_OP);
7235 #else
7236 format %{ "LDX $mem,$dst\t! ptr" %}
7237 opcode(Assembler::ldx_op3, 0, REGP_OP);
7238 #endif
7239 ins_encode( form3_mem_reg( mem, dst ) );
7240 ins_pipe(iload_mem);
7241 %}
7243 instruct storePConditional( iRegP heap_top_ptr, iRegP oldval, g3RegP newval, flagsRegP pcc ) %{
7244 match(Set pcc (StorePConditional heap_top_ptr (Binary oldval newval)));
7245 effect( KILL newval );
7246 format %{ "CASA [$heap_top_ptr],$oldval,R_G3\t! If $oldval==[$heap_top_ptr] Then store R_G3 into [$heap_top_ptr], set R_G3=[$heap_top_ptr] in any case\n\t"
7247 "CMP R_G3,$oldval\t\t! See if we made progress" %}
7248 ins_encode( enc_cas(heap_top_ptr,oldval,newval) );
7249 ins_pipe( long_memory_op );
7250 %}
7252 // Conditional-store of an int value.
7253 instruct storeIConditional( iRegP mem_ptr, iRegI oldval, g3RegI newval, flagsReg icc ) %{
7254 match(Set icc (StoreIConditional mem_ptr (Binary oldval newval)));
7255 effect( KILL newval );
7256 format %{ "CASA [$mem_ptr],$oldval,$newval\t! If $oldval==[$mem_ptr] Then store $newval into [$mem_ptr], set $newval=[$mem_ptr] in any case\n\t"
7257 "CMP $oldval,$newval\t\t! See if we made progress" %}
7258 ins_encode( enc_cas(mem_ptr,oldval,newval) );
7259 ins_pipe( long_memory_op );
7260 %}
7262 // Conditional-store of a long value.
7263 instruct storeLConditional( iRegP mem_ptr, iRegL oldval, g3RegL newval, flagsRegL xcc ) %{
7264 match(Set xcc (StoreLConditional mem_ptr (Binary oldval newval)));
7265 effect( KILL newval );
7266 format %{ "CASXA [$mem_ptr],$oldval,$newval\t! If $oldval==[$mem_ptr] Then store $newval into [$mem_ptr], set $newval=[$mem_ptr] in any case\n\t"
7267 "CMP $oldval,$newval\t\t! See if we made progress" %}
7268 ins_encode( enc_cas(mem_ptr,oldval,newval) );
7269 ins_pipe( long_memory_op );
7270 %}
7272 // No flag versions for CompareAndSwap{P,I,L} because matcher can't match them
7274 instruct compareAndSwapL_bool(iRegP mem_ptr, iRegL oldval, iRegL newval, iRegI res, o7RegI tmp1, flagsReg ccr ) %{
7275 predicate(VM_Version::supports_cx8());
7276 match(Set res (CompareAndSwapL mem_ptr (Binary oldval newval)));
7277 effect( USE mem_ptr, KILL ccr, KILL tmp1);
7278 format %{
7279 "MOV $newval,O7\n\t"
7280 "CASXA [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t"
7281 "CMP $oldval,O7\t\t! See if we made progress\n\t"
7282 "MOV 1,$res\n\t"
7283 "MOVne xcc,R_G0,$res"
7284 %}
7285 ins_encode( enc_casx(mem_ptr, oldval, newval),
7286 enc_lflags_ne_to_boolean(res) );
7287 ins_pipe( long_memory_op );
7288 %}
7291 instruct compareAndSwapI_bool(iRegP mem_ptr, iRegI oldval, iRegI newval, iRegI res, o7RegI tmp1, flagsReg ccr ) %{
7292 match(Set res (CompareAndSwapI mem_ptr (Binary oldval newval)));
7293 effect( USE mem_ptr, KILL ccr, KILL tmp1);
7294 format %{
7295 "MOV $newval,O7\n\t"
7296 "CASA [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t"
7297 "CMP $oldval,O7\t\t! See if we made progress\n\t"
7298 "MOV 1,$res\n\t"
7299 "MOVne icc,R_G0,$res"
7300 %}
7301 ins_encode( enc_casi(mem_ptr, oldval, newval),
7302 enc_iflags_ne_to_boolean(res) );
7303 ins_pipe( long_memory_op );
7304 %}
7306 instruct compareAndSwapP_bool(iRegP mem_ptr, iRegP oldval, iRegP newval, iRegI res, o7RegI tmp1, flagsReg ccr ) %{
7307 #ifdef _LP64
7308 predicate(VM_Version::supports_cx8());
7309 #endif
7310 match(Set res (CompareAndSwapP mem_ptr (Binary oldval newval)));
7311 effect( USE mem_ptr, KILL ccr, KILL tmp1);
7312 format %{
7313 "MOV $newval,O7\n\t"
7314 "CASA_PTR [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t"
7315 "CMP $oldval,O7\t\t! See if we made progress\n\t"
7316 "MOV 1,$res\n\t"
7317 "MOVne xcc,R_G0,$res"
7318 %}
7319 #ifdef _LP64
7320 ins_encode( enc_casx(mem_ptr, oldval, newval),
7321 enc_lflags_ne_to_boolean(res) );
7322 #else
7323 ins_encode( enc_casi(mem_ptr, oldval, newval),
7324 enc_iflags_ne_to_boolean(res) );
7325 #endif
7326 ins_pipe( long_memory_op );
7327 %}
7329 instruct compareAndSwapN_bool(iRegP mem_ptr, iRegN oldval, iRegN newval, iRegI res, o7RegI tmp1, flagsReg ccr ) %{
7330 match(Set res (CompareAndSwapN mem_ptr (Binary oldval newval)));
7331 effect( USE mem_ptr, KILL ccr, KILL tmp1);
7332 format %{
7333 "MOV $newval,O7\n\t"
7334 "CASA [$mem_ptr],$oldval,O7\t! If $oldval==[$mem_ptr] Then store O7 into [$mem_ptr], set O7=[$mem_ptr] in any case\n\t"
7335 "CMP $oldval,O7\t\t! See if we made progress\n\t"
7336 "MOV 1,$res\n\t"
7337 "MOVne icc,R_G0,$res"
7338 %}
7339 ins_encode( enc_casi(mem_ptr, oldval, newval),
7340 enc_iflags_ne_to_boolean(res) );
7341 ins_pipe( long_memory_op );
7342 %}
7344 instruct xchgI( memory mem, iRegI newval) %{
7345 match(Set newval (GetAndSetI mem newval));
7346 format %{ "SWAP [$mem],$newval" %}
7347 size(4);
7348 ins_encode %{
7349 __ swap($mem$$Address, $newval$$Register);
7350 %}
7351 ins_pipe( long_memory_op );
7352 %}
7354 #ifndef _LP64
7355 instruct xchgP( memory mem, iRegP newval) %{
7356 match(Set newval (GetAndSetP mem newval));
7357 format %{ "SWAP [$mem],$newval" %}
7358 size(4);
7359 ins_encode %{
7360 __ swap($mem$$Address, $newval$$Register);
7361 %}
7362 ins_pipe( long_memory_op );
7363 %}
7364 #endif
7366 instruct xchgN( memory mem, iRegN newval) %{
7367 match(Set newval (GetAndSetN mem newval));
7368 format %{ "SWAP [$mem],$newval" %}
7369 size(4);
7370 ins_encode %{
7371 __ swap($mem$$Address, $newval$$Register);
7372 %}
7373 ins_pipe( long_memory_op );
7374 %}
7376 //---------------------
7377 // Subtraction Instructions
7378 // Register Subtraction
7379 instruct subI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
7380 match(Set dst (SubI src1 src2));
7382 size(4);
7383 format %{ "SUB $src1,$src2,$dst" %}
7384 opcode(Assembler::sub_op3, Assembler::arith_op);
7385 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7386 ins_pipe(ialu_reg_reg);
7387 %}
7389 // Immediate Subtraction
7390 instruct subI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{
7391 match(Set dst (SubI src1 src2));
7393 size(4);
7394 format %{ "SUB $src1,$src2,$dst" %}
7395 opcode(Assembler::sub_op3, Assembler::arith_op);
7396 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) );
7397 ins_pipe(ialu_reg_imm);
7398 %}
7400 instruct subI_zero_reg(iRegI dst, immI0 zero, iRegI src2) %{
7401 match(Set dst (SubI zero src2));
7403 size(4);
7404 format %{ "NEG $src2,$dst" %}
7405 opcode(Assembler::sub_op3, Assembler::arith_op);
7406 ins_encode( form3_rs1_rs2_rd( R_G0, src2, dst ) );
7407 ins_pipe(ialu_zero_reg);
7408 %}
7410 // Long subtraction
7411 instruct subL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
7412 match(Set dst (SubL src1 src2));
7414 size(4);
7415 format %{ "SUB $src1,$src2,$dst\t! long" %}
7416 opcode(Assembler::sub_op3, Assembler::arith_op);
7417 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7418 ins_pipe(ialu_reg_reg);
7419 %}
7421 // Immediate Subtraction
7422 instruct subL_reg_imm13(iRegL dst, iRegL src1, immL13 con) %{
7423 match(Set dst (SubL src1 con));
7425 size(4);
7426 format %{ "SUB $src1,$con,$dst\t! long" %}
7427 opcode(Assembler::sub_op3, Assembler::arith_op);
7428 ins_encode( form3_rs1_simm13_rd( src1, con, dst ) );
7429 ins_pipe(ialu_reg_imm);
7430 %}
7432 // Long negation
7433 instruct negL_reg_reg(iRegL dst, immL0 zero, iRegL src2) %{
7434 match(Set dst (SubL zero src2));
7436 size(4);
7437 format %{ "NEG $src2,$dst\t! long" %}
7438 opcode(Assembler::sub_op3, Assembler::arith_op);
7439 ins_encode( form3_rs1_rs2_rd( R_G0, src2, dst ) );
7440 ins_pipe(ialu_zero_reg);
7441 %}
7443 // Multiplication Instructions
7444 // Integer Multiplication
7445 // Register Multiplication
7446 instruct mulI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
7447 match(Set dst (MulI src1 src2));
7449 size(4);
7450 format %{ "MULX $src1,$src2,$dst" %}
7451 opcode(Assembler::mulx_op3, Assembler::arith_op);
7452 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7453 ins_pipe(imul_reg_reg);
7454 %}
7456 // Immediate Multiplication
7457 instruct mulI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{
7458 match(Set dst (MulI src1 src2));
7460 size(4);
7461 format %{ "MULX $src1,$src2,$dst" %}
7462 opcode(Assembler::mulx_op3, Assembler::arith_op);
7463 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) );
7464 ins_pipe(imul_reg_imm);
7465 %}
7467 instruct mulL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
7468 match(Set dst (MulL src1 src2));
7469 ins_cost(DEFAULT_COST * 5);
7470 size(4);
7471 format %{ "MULX $src1,$src2,$dst\t! long" %}
7472 opcode(Assembler::mulx_op3, Assembler::arith_op);
7473 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7474 ins_pipe(mulL_reg_reg);
7475 %}
7477 // Immediate Multiplication
7478 instruct mulL_reg_imm13(iRegL dst, iRegL src1, immL13 src2) %{
7479 match(Set dst (MulL src1 src2));
7480 ins_cost(DEFAULT_COST * 5);
7481 size(4);
7482 format %{ "MULX $src1,$src2,$dst" %}
7483 opcode(Assembler::mulx_op3, Assembler::arith_op);
7484 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) );
7485 ins_pipe(mulL_reg_imm);
7486 %}
7488 // Integer Division
7489 // Register Division
7490 instruct divI_reg_reg(iRegI dst, iRegIsafe src1, iRegIsafe src2) %{
7491 match(Set dst (DivI src1 src2));
7492 ins_cost((2+71)*DEFAULT_COST);
7494 format %{ "SRA $src2,0,$src2\n\t"
7495 "SRA $src1,0,$src1\n\t"
7496 "SDIVX $src1,$src2,$dst" %}
7497 ins_encode( idiv_reg( src1, src2, dst ) );
7498 ins_pipe(sdiv_reg_reg);
7499 %}
7501 // Immediate Division
7502 instruct divI_reg_imm13(iRegI dst, iRegIsafe src1, immI13 src2) %{
7503 match(Set dst (DivI src1 src2));
7504 ins_cost((2+71)*DEFAULT_COST);
7506 format %{ "SRA $src1,0,$src1\n\t"
7507 "SDIVX $src1,$src2,$dst" %}
7508 ins_encode( idiv_imm( src1, src2, dst ) );
7509 ins_pipe(sdiv_reg_imm);
7510 %}
7512 //----------Div-By-10-Expansion------------------------------------------------
7513 // Extract hi bits of a 32x32->64 bit multiply.
7514 // Expand rule only, not matched
7515 instruct mul_hi(iRegIsafe dst, iRegIsafe src1, iRegIsafe src2 ) %{
7516 effect( DEF dst, USE src1, USE src2 );
7517 format %{ "MULX $src1,$src2,$dst\t! Used in div-by-10\n\t"
7518 "SRLX $dst,#32,$dst\t\t! Extract only hi word of result" %}
7519 ins_encode( enc_mul_hi(dst,src1,src2));
7520 ins_pipe(sdiv_reg_reg);
7521 %}
7523 // Magic constant, reciprocal of 10
7524 instruct loadConI_x66666667(iRegIsafe dst) %{
7525 effect( DEF dst );
7527 size(8);
7528 format %{ "SET 0x66666667,$dst\t! Used in div-by-10" %}
7529 ins_encode( Set32(0x66666667, dst) );
7530 ins_pipe(ialu_hi_lo_reg);
7531 %}
7533 // Register Shift Right Arithmetic Long by 32-63
7534 instruct sra_31( iRegI dst, iRegI src ) %{
7535 effect( DEF dst, USE src );
7536 format %{ "SRA $src,31,$dst\t! Used in div-by-10" %}
7537 ins_encode( form3_rs1_rd_copysign_hi(src,dst) );
7538 ins_pipe(ialu_reg_reg);
7539 %}
7541 // Arithmetic Shift Right by 8-bit immediate
7542 instruct sra_reg_2( iRegI dst, iRegI src ) %{
7543 effect( DEF dst, USE src );
7544 format %{ "SRA $src,2,$dst\t! Used in div-by-10" %}
7545 opcode(Assembler::sra_op3, Assembler::arith_op);
7546 ins_encode( form3_rs1_simm13_rd( src, 0x2, dst ) );
7547 ins_pipe(ialu_reg_imm);
7548 %}
7550 // Integer DIV with 10
7551 instruct divI_10( iRegI dst, iRegIsafe src, immI10 div ) %{
7552 match(Set dst (DivI src div));
7553 ins_cost((6+6)*DEFAULT_COST);
7554 expand %{
7555 iRegIsafe tmp1; // Killed temps;
7556 iRegIsafe tmp2; // Killed temps;
7557 iRegI tmp3; // Killed temps;
7558 iRegI tmp4; // Killed temps;
7559 loadConI_x66666667( tmp1 ); // SET 0x66666667 -> tmp1
7560 mul_hi( tmp2, src, tmp1 ); // MUL hibits(src * tmp1) -> tmp2
7561 sra_31( tmp3, src ); // SRA src,31 -> tmp3
7562 sra_reg_2( tmp4, tmp2 ); // SRA tmp2,2 -> tmp4
7563 subI_reg_reg( dst,tmp4,tmp3); // SUB tmp4 - tmp3 -> dst
7564 %}
7565 %}
7567 // Register Long Division
7568 instruct divL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
7569 match(Set dst (DivL src1 src2));
7570 ins_cost(DEFAULT_COST*71);
7571 size(4);
7572 format %{ "SDIVX $src1,$src2,$dst\t! long" %}
7573 opcode(Assembler::sdivx_op3, Assembler::arith_op);
7574 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7575 ins_pipe(divL_reg_reg);
7576 %}
7578 // Register Long Division
7579 instruct divL_reg_imm13(iRegL dst, iRegL src1, immL13 src2) %{
7580 match(Set dst (DivL src1 src2));
7581 ins_cost(DEFAULT_COST*71);
7582 size(4);
7583 format %{ "SDIVX $src1,$src2,$dst\t! long" %}
7584 opcode(Assembler::sdivx_op3, Assembler::arith_op);
7585 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) );
7586 ins_pipe(divL_reg_imm);
7587 %}
7589 // Integer Remainder
7590 // Register Remainder
7591 instruct modI_reg_reg(iRegI dst, iRegIsafe src1, iRegIsafe src2, o7RegP temp, flagsReg ccr ) %{
7592 match(Set dst (ModI src1 src2));
7593 effect( KILL ccr, KILL temp);
7595 format %{ "SREM $src1,$src2,$dst" %}
7596 ins_encode( irem_reg(src1, src2, dst, temp) );
7597 ins_pipe(sdiv_reg_reg);
7598 %}
7600 // Immediate Remainder
7601 instruct modI_reg_imm13(iRegI dst, iRegIsafe src1, immI13 src2, o7RegP temp, flagsReg ccr ) %{
7602 match(Set dst (ModI src1 src2));
7603 effect( KILL ccr, KILL temp);
7605 format %{ "SREM $src1,$src2,$dst" %}
7606 ins_encode( irem_imm(src1, src2, dst, temp) );
7607 ins_pipe(sdiv_reg_imm);
7608 %}
7610 // Register Long Remainder
7611 instruct divL_reg_reg_1(iRegL dst, iRegL src1, iRegL src2) %{
7612 effect(DEF dst, USE src1, USE src2);
7613 size(4);
7614 format %{ "SDIVX $src1,$src2,$dst\t! long" %}
7615 opcode(Assembler::sdivx_op3, Assembler::arith_op);
7616 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7617 ins_pipe(divL_reg_reg);
7618 %}
7620 // Register Long Division
7621 instruct divL_reg_imm13_1(iRegL dst, iRegL src1, immL13 src2) %{
7622 effect(DEF dst, USE src1, USE src2);
7623 size(4);
7624 format %{ "SDIVX $src1,$src2,$dst\t! long" %}
7625 opcode(Assembler::sdivx_op3, Assembler::arith_op);
7626 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) );
7627 ins_pipe(divL_reg_imm);
7628 %}
7630 instruct mulL_reg_reg_1(iRegL dst, iRegL src1, iRegL src2) %{
7631 effect(DEF dst, USE src1, USE src2);
7632 size(4);
7633 format %{ "MULX $src1,$src2,$dst\t! long" %}
7634 opcode(Assembler::mulx_op3, Assembler::arith_op);
7635 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7636 ins_pipe(mulL_reg_reg);
7637 %}
7639 // Immediate Multiplication
7640 instruct mulL_reg_imm13_1(iRegL dst, iRegL src1, immL13 src2) %{
7641 effect(DEF dst, USE src1, USE src2);
7642 size(4);
7643 format %{ "MULX $src1,$src2,$dst" %}
7644 opcode(Assembler::mulx_op3, Assembler::arith_op);
7645 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) );
7646 ins_pipe(mulL_reg_imm);
7647 %}
7649 instruct subL_reg_reg_1(iRegL dst, iRegL src1, iRegL src2) %{
7650 effect(DEF dst, USE src1, USE src2);
7651 size(4);
7652 format %{ "SUB $src1,$src2,$dst\t! long" %}
7653 opcode(Assembler::sub_op3, Assembler::arith_op);
7654 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7655 ins_pipe(ialu_reg_reg);
7656 %}
7658 instruct subL_reg_reg_2(iRegL dst, iRegL src1, iRegL src2) %{
7659 effect(DEF dst, USE src1, USE src2);
7660 size(4);
7661 format %{ "SUB $src1,$src2,$dst\t! long" %}
7662 opcode(Assembler::sub_op3, Assembler::arith_op);
7663 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7664 ins_pipe(ialu_reg_reg);
7665 %}
7667 // Register Long Remainder
7668 instruct modL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
7669 match(Set dst (ModL src1 src2));
7670 ins_cost(DEFAULT_COST*(71 + 6 + 1));
7671 expand %{
7672 iRegL tmp1;
7673 iRegL tmp2;
7674 divL_reg_reg_1(tmp1, src1, src2);
7675 mulL_reg_reg_1(tmp2, tmp1, src2);
7676 subL_reg_reg_1(dst, src1, tmp2);
7677 %}
7678 %}
7680 // Register Long Remainder
7681 instruct modL_reg_imm13(iRegL dst, iRegL src1, immL13 src2) %{
7682 match(Set dst (ModL src1 src2));
7683 ins_cost(DEFAULT_COST*(71 + 6 + 1));
7684 expand %{
7685 iRegL tmp1;
7686 iRegL tmp2;
7687 divL_reg_imm13_1(tmp1, src1, src2);
7688 mulL_reg_imm13_1(tmp2, tmp1, src2);
7689 subL_reg_reg_2 (dst, src1, tmp2);
7690 %}
7691 %}
7693 // Integer Shift Instructions
7694 // Register Shift Left
7695 instruct shlI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
7696 match(Set dst (LShiftI src1 src2));
7698 size(4);
7699 format %{ "SLL $src1,$src2,$dst" %}
7700 opcode(Assembler::sll_op3, Assembler::arith_op);
7701 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7702 ins_pipe(ialu_reg_reg);
7703 %}
7705 // Register Shift Left Immediate
7706 instruct shlI_reg_imm5(iRegI dst, iRegI src1, immU5 src2) %{
7707 match(Set dst (LShiftI src1 src2));
7709 size(4);
7710 format %{ "SLL $src1,$src2,$dst" %}
7711 opcode(Assembler::sll_op3, Assembler::arith_op);
7712 ins_encode( form3_rs1_imm5_rd( src1, src2, dst ) );
7713 ins_pipe(ialu_reg_imm);
7714 %}
7716 // Register Shift Left
7717 instruct shlL_reg_reg(iRegL dst, iRegL src1, iRegI src2) %{
7718 match(Set dst (LShiftL src1 src2));
7720 size(4);
7721 format %{ "SLLX $src1,$src2,$dst" %}
7722 opcode(Assembler::sllx_op3, Assembler::arith_op);
7723 ins_encode( form3_sd_rs1_rs2_rd( src1, src2, dst ) );
7724 ins_pipe(ialu_reg_reg);
7725 %}
7727 // Register Shift Left Immediate
7728 instruct shlL_reg_imm6(iRegL dst, iRegL src1, immU6 src2) %{
7729 match(Set dst (LShiftL src1 src2));
7731 size(4);
7732 format %{ "SLLX $src1,$src2,$dst" %}
7733 opcode(Assembler::sllx_op3, Assembler::arith_op);
7734 ins_encode( form3_sd_rs1_imm6_rd( src1, src2, dst ) );
7735 ins_pipe(ialu_reg_imm);
7736 %}
7738 // Register Arithmetic Shift Right
7739 instruct sarI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
7740 match(Set dst (RShiftI src1 src2));
7741 size(4);
7742 format %{ "SRA $src1,$src2,$dst" %}
7743 opcode(Assembler::sra_op3, Assembler::arith_op);
7744 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7745 ins_pipe(ialu_reg_reg);
7746 %}
7748 // Register Arithmetic Shift Right Immediate
7749 instruct sarI_reg_imm5(iRegI dst, iRegI src1, immU5 src2) %{
7750 match(Set dst (RShiftI src1 src2));
7752 size(4);
7753 format %{ "SRA $src1,$src2,$dst" %}
7754 opcode(Assembler::sra_op3, Assembler::arith_op);
7755 ins_encode( form3_rs1_imm5_rd( src1, src2, dst ) );
7756 ins_pipe(ialu_reg_imm);
7757 %}
7759 // Register Shift Right Arithmatic Long
7760 instruct sarL_reg_reg(iRegL dst, iRegL src1, iRegI src2) %{
7761 match(Set dst (RShiftL src1 src2));
7763 size(4);
7764 format %{ "SRAX $src1,$src2,$dst" %}
7765 opcode(Assembler::srax_op3, Assembler::arith_op);
7766 ins_encode( form3_sd_rs1_rs2_rd( src1, src2, dst ) );
7767 ins_pipe(ialu_reg_reg);
7768 %}
7770 // Register Shift Left Immediate
7771 instruct sarL_reg_imm6(iRegL dst, iRegL src1, immU6 src2) %{
7772 match(Set dst (RShiftL src1 src2));
7774 size(4);
7775 format %{ "SRAX $src1,$src2,$dst" %}
7776 opcode(Assembler::srax_op3, Assembler::arith_op);
7777 ins_encode( form3_sd_rs1_imm6_rd( src1, src2, dst ) );
7778 ins_pipe(ialu_reg_imm);
7779 %}
7781 // Register Shift Right
7782 instruct shrI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
7783 match(Set dst (URShiftI src1 src2));
7785 size(4);
7786 format %{ "SRL $src1,$src2,$dst" %}
7787 opcode(Assembler::srl_op3, Assembler::arith_op);
7788 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
7789 ins_pipe(ialu_reg_reg);
7790 %}
7792 // Register Shift Right Immediate
7793 instruct shrI_reg_imm5(iRegI dst, iRegI src1, immU5 src2) %{
7794 match(Set dst (URShiftI src1 src2));
7796 size(4);
7797 format %{ "SRL $src1,$src2,$dst" %}
7798 opcode(Assembler::srl_op3, Assembler::arith_op);
7799 ins_encode( form3_rs1_imm5_rd( src1, src2, dst ) );
7800 ins_pipe(ialu_reg_imm);
7801 %}
7803 // Register Shift Right
7804 instruct shrL_reg_reg(iRegL dst, iRegL src1, iRegI src2) %{
7805 match(Set dst (URShiftL src1 src2));
7807 size(4);
7808 format %{ "SRLX $src1,$src2,$dst" %}
7809 opcode(Assembler::srlx_op3, Assembler::arith_op);
7810 ins_encode( form3_sd_rs1_rs2_rd( src1, src2, dst ) );
7811 ins_pipe(ialu_reg_reg);
7812 %}
7814 // Register Shift Right Immediate
7815 instruct shrL_reg_imm6(iRegL dst, iRegL src1, immU6 src2) %{
7816 match(Set dst (URShiftL src1 src2));
7818 size(4);
7819 format %{ "SRLX $src1,$src2,$dst" %}
7820 opcode(Assembler::srlx_op3, Assembler::arith_op);
7821 ins_encode( form3_sd_rs1_imm6_rd( src1, src2, dst ) );
7822 ins_pipe(ialu_reg_imm);
7823 %}
7825 // Register Shift Right Immediate with a CastP2X
7826 #ifdef _LP64
7827 instruct shrP_reg_imm6(iRegL dst, iRegP src1, immU6 src2) %{
7828 match(Set dst (URShiftL (CastP2X src1) src2));
7829 size(4);
7830 format %{ "SRLX $src1,$src2,$dst\t! Cast ptr $src1 to long and shift" %}
7831 opcode(Assembler::srlx_op3, Assembler::arith_op);
7832 ins_encode( form3_sd_rs1_imm6_rd( src1, src2, dst ) );
7833 ins_pipe(ialu_reg_imm);
7834 %}
7835 #else
7836 instruct shrP_reg_imm5(iRegI dst, iRegP src1, immU5 src2) %{
7837 match(Set dst (URShiftI (CastP2X src1) src2));
7838 size(4);
7839 format %{ "SRL $src1,$src2,$dst\t! Cast ptr $src1 to int and shift" %}
7840 opcode(Assembler::srl_op3, Assembler::arith_op);
7841 ins_encode( form3_rs1_imm5_rd( src1, src2, dst ) );
7842 ins_pipe(ialu_reg_imm);
7843 %}
7844 #endif
7847 //----------Floating Point Arithmetic Instructions-----------------------------
7849 // Add float single precision
7850 instruct addF_reg_reg(regF dst, regF src1, regF src2) %{
7851 match(Set dst (AddF src1 src2));
7853 size(4);
7854 format %{ "FADDS $src1,$src2,$dst" %}
7855 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fadds_opf);
7856 ins_encode(form3_opf_rs1F_rs2F_rdF(src1, src2, dst));
7857 ins_pipe(faddF_reg_reg);
7858 %}
7860 // Add float double precision
7861 instruct addD_reg_reg(regD dst, regD src1, regD src2) %{
7862 match(Set dst (AddD src1 src2));
7864 size(4);
7865 format %{ "FADDD $src1,$src2,$dst" %}
7866 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::faddd_opf);
7867 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst));
7868 ins_pipe(faddD_reg_reg);
7869 %}
7871 // Sub float single precision
7872 instruct subF_reg_reg(regF dst, regF src1, regF src2) %{
7873 match(Set dst (SubF src1 src2));
7875 size(4);
7876 format %{ "FSUBS $src1,$src2,$dst" %}
7877 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fsubs_opf);
7878 ins_encode(form3_opf_rs1F_rs2F_rdF(src1, src2, dst));
7879 ins_pipe(faddF_reg_reg);
7880 %}
7882 // Sub float double precision
7883 instruct subD_reg_reg(regD dst, regD src1, regD src2) %{
7884 match(Set dst (SubD src1 src2));
7886 size(4);
7887 format %{ "FSUBD $src1,$src2,$dst" %}
7888 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fsubd_opf);
7889 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst));
7890 ins_pipe(faddD_reg_reg);
7891 %}
7893 // Mul float single precision
7894 instruct mulF_reg_reg(regF dst, regF src1, regF src2) %{
7895 match(Set dst (MulF src1 src2));
7897 size(4);
7898 format %{ "FMULS $src1,$src2,$dst" %}
7899 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fmuls_opf);
7900 ins_encode(form3_opf_rs1F_rs2F_rdF(src1, src2, dst));
7901 ins_pipe(fmulF_reg_reg);
7902 %}
7904 // Mul float double precision
7905 instruct mulD_reg_reg(regD dst, regD src1, regD src2) %{
7906 match(Set dst (MulD src1 src2));
7908 size(4);
7909 format %{ "FMULD $src1,$src2,$dst" %}
7910 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fmuld_opf);
7911 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst));
7912 ins_pipe(fmulD_reg_reg);
7913 %}
7915 // Div float single precision
7916 instruct divF_reg_reg(regF dst, regF src1, regF src2) %{
7917 match(Set dst (DivF src1 src2));
7919 size(4);
7920 format %{ "FDIVS $src1,$src2,$dst" %}
7921 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fdivs_opf);
7922 ins_encode(form3_opf_rs1F_rs2F_rdF(src1, src2, dst));
7923 ins_pipe(fdivF_reg_reg);
7924 %}
7926 // Div float double precision
7927 instruct divD_reg_reg(regD dst, regD src1, regD src2) %{
7928 match(Set dst (DivD src1 src2));
7930 size(4);
7931 format %{ "FDIVD $src1,$src2,$dst" %}
7932 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fdivd_opf);
7933 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst));
7934 ins_pipe(fdivD_reg_reg);
7935 %}
7937 // Absolute float double precision
7938 instruct absD_reg(regD dst, regD src) %{
7939 match(Set dst (AbsD src));
7941 format %{ "FABSd $src,$dst" %}
7942 ins_encode(fabsd(dst, src));
7943 ins_pipe(faddD_reg);
7944 %}
7946 // Absolute float single precision
7947 instruct absF_reg(regF dst, regF src) %{
7948 match(Set dst (AbsF src));
7950 format %{ "FABSs $src,$dst" %}
7951 ins_encode(fabss(dst, src));
7952 ins_pipe(faddF_reg);
7953 %}
7955 instruct negF_reg(regF dst, regF src) %{
7956 match(Set dst (NegF src));
7958 size(4);
7959 format %{ "FNEGs $src,$dst" %}
7960 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fnegs_opf);
7961 ins_encode(form3_opf_rs2F_rdF(src, dst));
7962 ins_pipe(faddF_reg);
7963 %}
7965 instruct negD_reg(regD dst, regD src) %{
7966 match(Set dst (NegD src));
7968 format %{ "FNEGd $src,$dst" %}
7969 ins_encode(fnegd(dst, src));
7970 ins_pipe(faddD_reg);
7971 %}
7973 // Sqrt float double precision
7974 instruct sqrtF_reg_reg(regF dst, regF src) %{
7975 match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
7977 size(4);
7978 format %{ "FSQRTS $src,$dst" %}
7979 ins_encode(fsqrts(dst, src));
7980 ins_pipe(fdivF_reg_reg);
7981 %}
7983 // Sqrt float double precision
7984 instruct sqrtD_reg_reg(regD dst, regD src) %{
7985 match(Set dst (SqrtD src));
7987 size(4);
7988 format %{ "FSQRTD $src,$dst" %}
7989 ins_encode(fsqrtd(dst, src));
7990 ins_pipe(fdivD_reg_reg);
7991 %}
7993 //----------Logical Instructions-----------------------------------------------
7994 // And Instructions
7995 // Register And
7996 instruct andI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
7997 match(Set dst (AndI src1 src2));
7999 size(4);
8000 format %{ "AND $src1,$src2,$dst" %}
8001 opcode(Assembler::and_op3, Assembler::arith_op);
8002 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
8003 ins_pipe(ialu_reg_reg);
8004 %}
8006 // Immediate And
8007 instruct andI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{
8008 match(Set dst (AndI src1 src2));
8010 size(4);
8011 format %{ "AND $src1,$src2,$dst" %}
8012 opcode(Assembler::and_op3, Assembler::arith_op);
8013 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) );
8014 ins_pipe(ialu_reg_imm);
8015 %}
8017 // Register And Long
8018 instruct andL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
8019 match(Set dst (AndL src1 src2));
8021 ins_cost(DEFAULT_COST);
8022 size(4);
8023 format %{ "AND $src1,$src2,$dst\t! long" %}
8024 opcode(Assembler::and_op3, Assembler::arith_op);
8025 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
8026 ins_pipe(ialu_reg_reg);
8027 %}
8029 instruct andL_reg_imm13(iRegL dst, iRegL src1, immL13 con) %{
8030 match(Set dst (AndL src1 con));
8032 ins_cost(DEFAULT_COST);
8033 size(4);
8034 format %{ "AND $src1,$con,$dst\t! long" %}
8035 opcode(Assembler::and_op3, Assembler::arith_op);
8036 ins_encode( form3_rs1_simm13_rd( src1, con, dst ) );
8037 ins_pipe(ialu_reg_imm);
8038 %}
8040 // Or Instructions
8041 // Register Or
8042 instruct orI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
8043 match(Set dst (OrI src1 src2));
8045 size(4);
8046 format %{ "OR $src1,$src2,$dst" %}
8047 opcode(Assembler::or_op3, Assembler::arith_op);
8048 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
8049 ins_pipe(ialu_reg_reg);
8050 %}
8052 // Immediate Or
8053 instruct orI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{
8054 match(Set dst (OrI src1 src2));
8056 size(4);
8057 format %{ "OR $src1,$src2,$dst" %}
8058 opcode(Assembler::or_op3, Assembler::arith_op);
8059 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) );
8060 ins_pipe(ialu_reg_imm);
8061 %}
8063 // Register Or Long
8064 instruct orL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
8065 match(Set dst (OrL src1 src2));
8067 ins_cost(DEFAULT_COST);
8068 size(4);
8069 format %{ "OR $src1,$src2,$dst\t! long" %}
8070 opcode(Assembler::or_op3, Assembler::arith_op);
8071 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
8072 ins_pipe(ialu_reg_reg);
8073 %}
8075 instruct orL_reg_imm13(iRegL dst, iRegL src1, immL13 con) %{
8076 match(Set dst (OrL src1 con));
8077 ins_cost(DEFAULT_COST*2);
8079 ins_cost(DEFAULT_COST);
8080 size(4);
8081 format %{ "OR $src1,$con,$dst\t! long" %}
8082 opcode(Assembler::or_op3, Assembler::arith_op);
8083 ins_encode( form3_rs1_simm13_rd( src1, con, dst ) );
8084 ins_pipe(ialu_reg_imm);
8085 %}
8087 #ifndef _LP64
8089 // Use sp_ptr_RegP to match G2 (TLS register) without spilling.
8090 instruct orI_reg_castP2X(iRegI dst, iRegI src1, sp_ptr_RegP src2) %{
8091 match(Set dst (OrI src1 (CastP2X src2)));
8093 size(4);
8094 format %{ "OR $src1,$src2,$dst" %}
8095 opcode(Assembler::or_op3, Assembler::arith_op);
8096 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
8097 ins_pipe(ialu_reg_reg);
8098 %}
8100 #else
8102 instruct orL_reg_castP2X(iRegL dst, iRegL src1, sp_ptr_RegP src2) %{
8103 match(Set dst (OrL src1 (CastP2X src2)));
8105 ins_cost(DEFAULT_COST);
8106 size(4);
8107 format %{ "OR $src1,$src2,$dst\t! long" %}
8108 opcode(Assembler::or_op3, Assembler::arith_op);
8109 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
8110 ins_pipe(ialu_reg_reg);
8111 %}
8113 #endif
8115 // Xor Instructions
8116 // Register Xor
8117 instruct xorI_reg_reg(iRegI dst, iRegI src1, iRegI src2) %{
8118 match(Set dst (XorI src1 src2));
8120 size(4);
8121 format %{ "XOR $src1,$src2,$dst" %}
8122 opcode(Assembler::xor_op3, Assembler::arith_op);
8123 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
8124 ins_pipe(ialu_reg_reg);
8125 %}
8127 // Immediate Xor
8128 instruct xorI_reg_imm13(iRegI dst, iRegI src1, immI13 src2) %{
8129 match(Set dst (XorI src1 src2));
8131 size(4);
8132 format %{ "XOR $src1,$src2,$dst" %}
8133 opcode(Assembler::xor_op3, Assembler::arith_op);
8134 ins_encode( form3_rs1_simm13_rd( src1, src2, dst ) );
8135 ins_pipe(ialu_reg_imm);
8136 %}
8138 // Register Xor Long
8139 instruct xorL_reg_reg(iRegL dst, iRegL src1, iRegL src2) %{
8140 match(Set dst (XorL src1 src2));
8142 ins_cost(DEFAULT_COST);
8143 size(4);
8144 format %{ "XOR $src1,$src2,$dst\t! long" %}
8145 opcode(Assembler::xor_op3, Assembler::arith_op);
8146 ins_encode( form3_rs1_rs2_rd( src1, src2, dst ) );
8147 ins_pipe(ialu_reg_reg);
8148 %}
8150 instruct xorL_reg_imm13(iRegL dst, iRegL src1, immL13 con) %{
8151 match(Set dst (XorL src1 con));
8153 ins_cost(DEFAULT_COST);
8154 size(4);
8155 format %{ "XOR $src1,$con,$dst\t! long" %}
8156 opcode(Assembler::xor_op3, Assembler::arith_op);
8157 ins_encode( form3_rs1_simm13_rd( src1, con, dst ) );
8158 ins_pipe(ialu_reg_imm);
8159 %}
8161 //----------Convert to Boolean-------------------------------------------------
8162 // Nice hack for 32-bit tests but doesn't work for
8163 // 64-bit pointers.
8164 instruct convI2B( iRegI dst, iRegI src, flagsReg ccr ) %{
8165 match(Set dst (Conv2B src));
8166 effect( KILL ccr );
8167 ins_cost(DEFAULT_COST*2);
8168 format %{ "CMP R_G0,$src\n\t"
8169 "ADDX R_G0,0,$dst" %}
8170 ins_encode( enc_to_bool( src, dst ) );
8171 ins_pipe(ialu_reg_ialu);
8172 %}
8174 #ifndef _LP64
8175 instruct convP2B( iRegI dst, iRegP src, flagsReg ccr ) %{
8176 match(Set dst (Conv2B src));
8177 effect( KILL ccr );
8178 ins_cost(DEFAULT_COST*2);
8179 format %{ "CMP R_G0,$src\n\t"
8180 "ADDX R_G0,0,$dst" %}
8181 ins_encode( enc_to_bool( src, dst ) );
8182 ins_pipe(ialu_reg_ialu);
8183 %}
8184 #else
8185 instruct convP2B( iRegI dst, iRegP src ) %{
8186 match(Set dst (Conv2B src));
8187 ins_cost(DEFAULT_COST*2);
8188 format %{ "MOV $src,$dst\n\t"
8189 "MOVRNZ $src,1,$dst" %}
8190 ins_encode( form3_g0_rs2_rd_move( src, dst ), enc_convP2B( dst, src ) );
8191 ins_pipe(ialu_clr_and_mover);
8192 %}
8193 #endif
8195 instruct cmpLTMask0( iRegI dst, iRegI src, immI0 zero, flagsReg ccr ) %{
8196 match(Set dst (CmpLTMask src zero));
8197 effect(KILL ccr);
8198 size(4);
8199 format %{ "SRA $src,#31,$dst\t# cmpLTMask0" %}
8200 ins_encode %{
8201 __ sra($src$$Register, 31, $dst$$Register);
8202 %}
8203 ins_pipe(ialu_reg_imm);
8204 %}
8206 instruct cmpLTMask_reg_reg( iRegI dst, iRegI p, iRegI q, flagsReg ccr ) %{
8207 match(Set dst (CmpLTMask p q));
8208 effect( KILL ccr );
8209 ins_cost(DEFAULT_COST*4);
8210 format %{ "CMP $p,$q\n\t"
8211 "MOV #0,$dst\n\t"
8212 "BLT,a .+8\n\t"
8213 "MOV #-1,$dst" %}
8214 ins_encode( enc_ltmask(p,q,dst) );
8215 ins_pipe(ialu_reg_reg_ialu);
8216 %}
8218 instruct cadd_cmpLTMask( iRegI p, iRegI q, iRegI y, iRegI tmp, flagsReg ccr ) %{
8219 match(Set p (AddI (AndI (CmpLTMask p q) y) (SubI p q)));
8220 effect(KILL ccr, TEMP tmp);
8221 ins_cost(DEFAULT_COST*3);
8223 format %{ "SUBcc $p,$q,$p\t! p' = p-q\n\t"
8224 "ADD $p,$y,$tmp\t! g3=p-q+y\n\t"
8225 "MOVlt $tmp,$p\t! p' < 0 ? p'+y : p'" %}
8226 ins_encode( enc_cadd_cmpLTMask(p, q, y, tmp) );
8227 ins_pipe( cadd_cmpltmask );
8228 %}
8231 //-----------------------------------------------------------------
8232 // Direct raw moves between float and general registers using VIS3.
8234 // ins_pipe(faddF_reg);
8235 instruct MoveF2I_reg_reg(iRegI dst, regF src) %{
8236 predicate(UseVIS >= 3);
8237 match(Set dst (MoveF2I src));
8239 format %{ "MOVSTOUW $src,$dst\t! MoveF2I" %}
8240 ins_encode %{
8241 __ movstouw($src$$FloatRegister, $dst$$Register);
8242 %}
8243 ins_pipe(ialu_reg_reg);
8244 %}
8246 instruct MoveI2F_reg_reg(regF dst, iRegI src) %{
8247 predicate(UseVIS >= 3);
8248 match(Set dst (MoveI2F src));
8250 format %{ "MOVWTOS $src,$dst\t! MoveI2F" %}
8251 ins_encode %{
8252 __ movwtos($src$$Register, $dst$$FloatRegister);
8253 %}
8254 ins_pipe(ialu_reg_reg);
8255 %}
8257 instruct MoveD2L_reg_reg(iRegL dst, regD src) %{
8258 predicate(UseVIS >= 3);
8259 match(Set dst (MoveD2L src));
8261 format %{ "MOVDTOX $src,$dst\t! MoveD2L" %}
8262 ins_encode %{
8263 __ movdtox(as_DoubleFloatRegister($src$$reg), $dst$$Register);
8264 %}
8265 ins_pipe(ialu_reg_reg);
8266 %}
8268 instruct MoveL2D_reg_reg(regD dst, iRegL src) %{
8269 predicate(UseVIS >= 3);
8270 match(Set dst (MoveL2D src));
8272 format %{ "MOVXTOD $src,$dst\t! MoveL2D" %}
8273 ins_encode %{
8274 __ movxtod($src$$Register, as_DoubleFloatRegister($dst$$reg));
8275 %}
8276 ins_pipe(ialu_reg_reg);
8277 %}
8280 // Raw moves between float and general registers using stack.
8282 instruct MoveF2I_stack_reg(iRegI dst, stackSlotF src) %{
8283 match(Set dst (MoveF2I src));
8284 effect(DEF dst, USE src);
8285 ins_cost(MEMORY_REF_COST);
8287 size(4);
8288 format %{ "LDUW $src,$dst\t! MoveF2I" %}
8289 opcode(Assembler::lduw_op3);
8290 ins_encode(simple_form3_mem_reg( src, dst ) );
8291 ins_pipe(iload_mem);
8292 %}
8294 instruct MoveI2F_stack_reg(regF dst, stackSlotI src) %{
8295 match(Set dst (MoveI2F src));
8296 effect(DEF dst, USE src);
8297 ins_cost(MEMORY_REF_COST);
8299 size(4);
8300 format %{ "LDF $src,$dst\t! MoveI2F" %}
8301 opcode(Assembler::ldf_op3);
8302 ins_encode(simple_form3_mem_reg(src, dst));
8303 ins_pipe(floadF_stk);
8304 %}
8306 instruct MoveD2L_stack_reg(iRegL dst, stackSlotD src) %{
8307 match(Set dst (MoveD2L src));
8308 effect(DEF dst, USE src);
8309 ins_cost(MEMORY_REF_COST);
8311 size(4);
8312 format %{ "LDX $src,$dst\t! MoveD2L" %}
8313 opcode(Assembler::ldx_op3);
8314 ins_encode(simple_form3_mem_reg( src, dst ) );
8315 ins_pipe(iload_mem);
8316 %}
8318 instruct MoveL2D_stack_reg(regD dst, stackSlotL src) %{
8319 match(Set dst (MoveL2D src));
8320 effect(DEF dst, USE src);
8321 ins_cost(MEMORY_REF_COST);
8323 size(4);
8324 format %{ "LDDF $src,$dst\t! MoveL2D" %}
8325 opcode(Assembler::lddf_op3);
8326 ins_encode(simple_form3_mem_reg(src, dst));
8327 ins_pipe(floadD_stk);
8328 %}
8330 instruct MoveF2I_reg_stack(stackSlotI dst, regF src) %{
8331 match(Set dst (MoveF2I src));
8332 effect(DEF dst, USE src);
8333 ins_cost(MEMORY_REF_COST);
8335 size(4);
8336 format %{ "STF $src,$dst\t! MoveF2I" %}
8337 opcode(Assembler::stf_op3);
8338 ins_encode(simple_form3_mem_reg(dst, src));
8339 ins_pipe(fstoreF_stk_reg);
8340 %}
8342 instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{
8343 match(Set dst (MoveI2F src));
8344 effect(DEF dst, USE src);
8345 ins_cost(MEMORY_REF_COST);
8347 size(4);
8348 format %{ "STW $src,$dst\t! MoveI2F" %}
8349 opcode(Assembler::stw_op3);
8350 ins_encode(simple_form3_mem_reg( dst, src ) );
8351 ins_pipe(istore_mem_reg);
8352 %}
8354 instruct MoveD2L_reg_stack(stackSlotL dst, regD src) %{
8355 match(Set dst (MoveD2L src));
8356 effect(DEF dst, USE src);
8357 ins_cost(MEMORY_REF_COST);
8359 size(4);
8360 format %{ "STDF $src,$dst\t! MoveD2L" %}
8361 opcode(Assembler::stdf_op3);
8362 ins_encode(simple_form3_mem_reg(dst, src));
8363 ins_pipe(fstoreD_stk_reg);
8364 %}
8366 instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{
8367 match(Set dst (MoveL2D src));
8368 effect(DEF dst, USE src);
8369 ins_cost(MEMORY_REF_COST);
8371 size(4);
8372 format %{ "STX $src,$dst\t! MoveL2D" %}
8373 opcode(Assembler::stx_op3);
8374 ins_encode(simple_form3_mem_reg( dst, src ) );
8375 ins_pipe(istore_mem_reg);
8376 %}
8379 //----------Arithmetic Conversion Instructions---------------------------------
8380 // The conversions operations are all Alpha sorted. Please keep it that way!
8382 instruct convD2F_reg(regF dst, regD src) %{
8383 match(Set dst (ConvD2F src));
8384 size(4);
8385 format %{ "FDTOS $src,$dst" %}
8386 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fdtos_opf);
8387 ins_encode(form3_opf_rs2D_rdF(src, dst));
8388 ins_pipe(fcvtD2F);
8389 %}
8392 // Convert a double to an int in a float register.
8393 // If the double is a NAN, stuff a zero in instead.
8394 instruct convD2I_helper(regF dst, regD src, flagsRegF0 fcc0) %{
8395 effect(DEF dst, USE src, KILL fcc0);
8396 format %{ "FCMPd fcc0,$src,$src\t! check for NAN\n\t"
8397 "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t"
8398 "FDTOI $src,$dst\t! convert in delay slot\n\t"
8399 "FITOS $dst,$dst\t! change NaN/max-int to valid float\n\t"
8400 "FSUBs $dst,$dst,$dst\t! cleared only if nan\n"
8401 "skip:" %}
8402 ins_encode(form_d2i_helper(src,dst));
8403 ins_pipe(fcvtD2I);
8404 %}
8406 instruct convD2I_stk(stackSlotI dst, regD src) %{
8407 match(Set dst (ConvD2I src));
8408 ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST);
8409 expand %{
8410 regF tmp;
8411 convD2I_helper(tmp, src);
8412 regF_to_stkI(dst, tmp);
8413 %}
8414 %}
8416 instruct convD2I_reg(iRegI dst, regD src) %{
8417 predicate(UseVIS >= 3);
8418 match(Set dst (ConvD2I src));
8419 ins_cost(DEFAULT_COST*2 + BRANCH_COST);
8420 expand %{
8421 regF tmp;
8422 convD2I_helper(tmp, src);
8423 MoveF2I_reg_reg(dst, tmp);
8424 %}
8425 %}
8428 // Convert a double to a long in a double register.
8429 // If the double is a NAN, stuff a zero in instead.
8430 instruct convD2L_helper(regD dst, regD src, flagsRegF0 fcc0) %{
8431 effect(DEF dst, USE src, KILL fcc0);
8432 format %{ "FCMPd fcc0,$src,$src\t! check for NAN\n\t"
8433 "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t"
8434 "FDTOX $src,$dst\t! convert in delay slot\n\t"
8435 "FXTOD $dst,$dst\t! change NaN/max-long to valid double\n\t"
8436 "FSUBd $dst,$dst,$dst\t! cleared only if nan\n"
8437 "skip:" %}
8438 ins_encode(form_d2l_helper(src,dst));
8439 ins_pipe(fcvtD2L);
8440 %}
8442 instruct convD2L_stk(stackSlotL dst, regD src) %{
8443 match(Set dst (ConvD2L src));
8444 ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST);
8445 expand %{
8446 regD tmp;
8447 convD2L_helper(tmp, src);
8448 regD_to_stkL(dst, tmp);
8449 %}
8450 %}
8452 instruct convD2L_reg(iRegL dst, regD src) %{
8453 predicate(UseVIS >= 3);
8454 match(Set dst (ConvD2L src));
8455 ins_cost(DEFAULT_COST*2 + BRANCH_COST);
8456 expand %{
8457 regD tmp;
8458 convD2L_helper(tmp, src);
8459 MoveD2L_reg_reg(dst, tmp);
8460 %}
8461 %}
8464 instruct convF2D_reg(regD dst, regF src) %{
8465 match(Set dst (ConvF2D src));
8466 format %{ "FSTOD $src,$dst" %}
8467 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fstod_opf);
8468 ins_encode(form3_opf_rs2F_rdD(src, dst));
8469 ins_pipe(fcvtF2D);
8470 %}
8473 // Convert a float to an int in a float register.
8474 // If the float is a NAN, stuff a zero in instead.
8475 instruct convF2I_helper(regF dst, regF src, flagsRegF0 fcc0) %{
8476 effect(DEF dst, USE src, KILL fcc0);
8477 format %{ "FCMPs fcc0,$src,$src\t! check for NAN\n\t"
8478 "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t"
8479 "FSTOI $src,$dst\t! convert in delay slot\n\t"
8480 "FITOS $dst,$dst\t! change NaN/max-int to valid float\n\t"
8481 "FSUBs $dst,$dst,$dst\t! cleared only if nan\n"
8482 "skip:" %}
8483 ins_encode(form_f2i_helper(src,dst));
8484 ins_pipe(fcvtF2I);
8485 %}
8487 instruct convF2I_stk(stackSlotI dst, regF src) %{
8488 match(Set dst (ConvF2I src));
8489 ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST);
8490 expand %{
8491 regF tmp;
8492 convF2I_helper(tmp, src);
8493 regF_to_stkI(dst, tmp);
8494 %}
8495 %}
8497 instruct convF2I_reg(iRegI dst, regF src) %{
8498 predicate(UseVIS >= 3);
8499 match(Set dst (ConvF2I src));
8500 ins_cost(DEFAULT_COST*2 + BRANCH_COST);
8501 expand %{
8502 regF tmp;
8503 convF2I_helper(tmp, src);
8504 MoveF2I_reg_reg(dst, tmp);
8505 %}
8506 %}
8509 // Convert a float to a long in a float register.
8510 // If the float is a NAN, stuff a zero in instead.
8511 instruct convF2L_helper(regD dst, regF src, flagsRegF0 fcc0) %{
8512 effect(DEF dst, USE src, KILL fcc0);
8513 format %{ "FCMPs fcc0,$src,$src\t! check for NAN\n\t"
8514 "FBO,pt fcc0,skip\t! branch on ordered, predict taken\n\t"
8515 "FSTOX $src,$dst\t! convert in delay slot\n\t"
8516 "FXTOD $dst,$dst\t! change NaN/max-long to valid double\n\t"
8517 "FSUBd $dst,$dst,$dst\t! cleared only if nan\n"
8518 "skip:" %}
8519 ins_encode(form_f2l_helper(src,dst));
8520 ins_pipe(fcvtF2L);
8521 %}
8523 instruct convF2L_stk(stackSlotL dst, regF src) %{
8524 match(Set dst (ConvF2L src));
8525 ins_cost(DEFAULT_COST*2 + MEMORY_REF_COST*2 + BRANCH_COST);
8526 expand %{
8527 regD tmp;
8528 convF2L_helper(tmp, src);
8529 regD_to_stkL(dst, tmp);
8530 %}
8531 %}
8533 instruct convF2L_reg(iRegL dst, regF src) %{
8534 predicate(UseVIS >= 3);
8535 match(Set dst (ConvF2L src));
8536 ins_cost(DEFAULT_COST*2 + BRANCH_COST);
8537 expand %{
8538 regD tmp;
8539 convF2L_helper(tmp, src);
8540 MoveD2L_reg_reg(dst, tmp);
8541 %}
8542 %}
8545 instruct convI2D_helper(regD dst, regF tmp) %{
8546 effect(USE tmp, DEF dst);
8547 format %{ "FITOD $tmp,$dst" %}
8548 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fitod_opf);
8549 ins_encode(form3_opf_rs2F_rdD(tmp, dst));
8550 ins_pipe(fcvtI2D);
8551 %}
8553 instruct convI2D_stk(stackSlotI src, regD dst) %{
8554 match(Set dst (ConvI2D src));
8555 ins_cost(DEFAULT_COST + MEMORY_REF_COST);
8556 expand %{
8557 regF tmp;
8558 stkI_to_regF(tmp, src);
8559 convI2D_helper(dst, tmp);
8560 %}
8561 %}
8563 instruct convI2D_reg(regD_low dst, iRegI src) %{
8564 predicate(UseVIS >= 3);
8565 match(Set dst (ConvI2D src));
8566 expand %{
8567 regF tmp;
8568 MoveI2F_reg_reg(tmp, src);
8569 convI2D_helper(dst, tmp);
8570 %}
8571 %}
8573 instruct convI2D_mem(regD_low dst, memory mem) %{
8574 match(Set dst (ConvI2D (LoadI mem)));
8575 ins_cost(DEFAULT_COST + MEMORY_REF_COST);
8576 size(8);
8577 format %{ "LDF $mem,$dst\n\t"
8578 "FITOD $dst,$dst" %}
8579 opcode(Assembler::ldf_op3, Assembler::fitod_opf);
8580 ins_encode(simple_form3_mem_reg( mem, dst ), form3_convI2F(dst, dst));
8581 ins_pipe(floadF_mem);
8582 %}
8585 instruct convI2F_helper(regF dst, regF tmp) %{
8586 effect(DEF dst, USE tmp);
8587 format %{ "FITOS $tmp,$dst" %}
8588 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fitos_opf);
8589 ins_encode(form3_opf_rs2F_rdF(tmp, dst));
8590 ins_pipe(fcvtI2F);
8591 %}
8593 instruct convI2F_stk(regF dst, stackSlotI src) %{
8594 match(Set dst (ConvI2F src));
8595 ins_cost(DEFAULT_COST + MEMORY_REF_COST);
8596 expand %{
8597 regF tmp;
8598 stkI_to_regF(tmp,src);
8599 convI2F_helper(dst, tmp);
8600 %}
8601 %}
8603 instruct convI2F_reg(regF dst, iRegI src) %{
8604 predicate(UseVIS >= 3);
8605 match(Set dst (ConvI2F src));
8606 ins_cost(DEFAULT_COST);
8607 expand %{
8608 regF tmp;
8609 MoveI2F_reg_reg(tmp, src);
8610 convI2F_helper(dst, tmp);
8611 %}
8612 %}
8614 instruct convI2F_mem( regF dst, memory mem ) %{
8615 match(Set dst (ConvI2F (LoadI mem)));
8616 ins_cost(DEFAULT_COST + MEMORY_REF_COST);
8617 size(8);
8618 format %{ "LDF $mem,$dst\n\t"
8619 "FITOS $dst,$dst" %}
8620 opcode(Assembler::ldf_op3, Assembler::fitos_opf);
8621 ins_encode(simple_form3_mem_reg( mem, dst ), form3_convI2F(dst, dst));
8622 ins_pipe(floadF_mem);
8623 %}
8626 instruct convI2L_reg(iRegL dst, iRegI src) %{
8627 match(Set dst (ConvI2L src));
8628 size(4);
8629 format %{ "SRA $src,0,$dst\t! int->long" %}
8630 opcode(Assembler::sra_op3, Assembler::arith_op);
8631 ins_encode( form3_rs1_rs2_rd( src, R_G0, dst ) );
8632 ins_pipe(ialu_reg_reg);
8633 %}
8635 // Zero-extend convert int to long
8636 instruct convI2L_reg_zex(iRegL dst, iRegI src, immL_32bits mask ) %{
8637 match(Set dst (AndL (ConvI2L src) mask) );
8638 size(4);
8639 format %{ "SRL $src,0,$dst\t! zero-extend int to long" %}
8640 opcode(Assembler::srl_op3, Assembler::arith_op);
8641 ins_encode( form3_rs1_rs2_rd( src, R_G0, dst ) );
8642 ins_pipe(ialu_reg_reg);
8643 %}
8645 // Zero-extend long
8646 instruct zerox_long(iRegL dst, iRegL src, immL_32bits mask ) %{
8647 match(Set dst (AndL src mask) );
8648 size(4);
8649 format %{ "SRL $src,0,$dst\t! zero-extend long" %}
8650 opcode(Assembler::srl_op3, Assembler::arith_op);
8651 ins_encode( form3_rs1_rs2_rd( src, R_G0, dst ) );
8652 ins_pipe(ialu_reg_reg);
8653 %}
8656 //-----------
8657 // Long to Double conversion using V8 opcodes.
8658 // Still useful because cheetah traps and becomes
8659 // amazingly slow for some common numbers.
8661 // Magic constant, 0x43300000
8662 instruct loadConI_x43300000(iRegI dst) %{
8663 effect(DEF dst);
8664 size(4);
8665 format %{ "SETHI HI(0x43300000),$dst\t! 2^52" %}
8666 ins_encode(SetHi22(0x43300000, dst));
8667 ins_pipe(ialu_none);
8668 %}
8670 // Magic constant, 0x41f00000
8671 instruct loadConI_x41f00000(iRegI dst) %{
8672 effect(DEF dst);
8673 size(4);
8674 format %{ "SETHI HI(0x41f00000),$dst\t! 2^32" %}
8675 ins_encode(SetHi22(0x41f00000, dst));
8676 ins_pipe(ialu_none);
8677 %}
8679 // Construct a double from two float halves
8680 instruct regDHi_regDLo_to_regD(regD_low dst, regD_low src1, regD_low src2) %{
8681 effect(DEF dst, USE src1, USE src2);
8682 size(8);
8683 format %{ "FMOVS $src1.hi,$dst.hi\n\t"
8684 "FMOVS $src2.lo,$dst.lo" %}
8685 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fmovs_opf);
8686 ins_encode(form3_opf_rs2D_hi_rdD_hi(src1, dst), form3_opf_rs2D_lo_rdD_lo(src2, dst));
8687 ins_pipe(faddD_reg_reg);
8688 %}
8690 // Convert integer in high half of a double register (in the lower half of
8691 // the double register file) to double
8692 instruct convI2D_regDHi_regD(regD dst, regD_low src) %{
8693 effect(DEF dst, USE src);
8694 size(4);
8695 format %{ "FITOD $src,$dst" %}
8696 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fitod_opf);
8697 ins_encode(form3_opf_rs2D_rdD(src, dst));
8698 ins_pipe(fcvtLHi2D);
8699 %}
8701 // Add float double precision
8702 instruct addD_regD_regD(regD dst, regD src1, regD src2) %{
8703 effect(DEF dst, USE src1, USE src2);
8704 size(4);
8705 format %{ "FADDD $src1,$src2,$dst" %}
8706 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::faddd_opf);
8707 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst));
8708 ins_pipe(faddD_reg_reg);
8709 %}
8711 // Sub float double precision
8712 instruct subD_regD_regD(regD dst, regD src1, regD src2) %{
8713 effect(DEF dst, USE src1, USE src2);
8714 size(4);
8715 format %{ "FSUBD $src1,$src2,$dst" %}
8716 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fsubd_opf);
8717 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst));
8718 ins_pipe(faddD_reg_reg);
8719 %}
8721 // Mul float double precision
8722 instruct mulD_regD_regD(regD dst, regD src1, regD src2) %{
8723 effect(DEF dst, USE src1, USE src2);
8724 size(4);
8725 format %{ "FMULD $src1,$src2,$dst" %}
8726 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fmuld_opf);
8727 ins_encode(form3_opf_rs1D_rs2D_rdD(src1, src2, dst));
8728 ins_pipe(fmulD_reg_reg);
8729 %}
8731 instruct convL2D_reg_slow_fxtof(regD dst, stackSlotL src) %{
8732 match(Set dst (ConvL2D src));
8733 ins_cost(DEFAULT_COST*8 + MEMORY_REF_COST*6);
8735 expand %{
8736 regD_low tmpsrc;
8737 iRegI ix43300000;
8738 iRegI ix41f00000;
8739 stackSlotL lx43300000;
8740 stackSlotL lx41f00000;
8741 regD_low dx43300000;
8742 regD dx41f00000;
8743 regD tmp1;
8744 regD_low tmp2;
8745 regD tmp3;
8746 regD tmp4;
8748 stkL_to_regD(tmpsrc, src);
8750 loadConI_x43300000(ix43300000);
8751 loadConI_x41f00000(ix41f00000);
8752 regI_to_stkLHi(lx43300000, ix43300000);
8753 regI_to_stkLHi(lx41f00000, ix41f00000);
8754 stkL_to_regD(dx43300000, lx43300000);
8755 stkL_to_regD(dx41f00000, lx41f00000);
8757 convI2D_regDHi_regD(tmp1, tmpsrc);
8758 regDHi_regDLo_to_regD(tmp2, dx43300000, tmpsrc);
8759 subD_regD_regD(tmp3, tmp2, dx43300000);
8760 mulD_regD_regD(tmp4, tmp1, dx41f00000);
8761 addD_regD_regD(dst, tmp3, tmp4);
8762 %}
8763 %}
8765 // Long to Double conversion using fast fxtof
8766 instruct convL2D_helper(regD dst, regD tmp) %{
8767 effect(DEF dst, USE tmp);
8768 size(4);
8769 format %{ "FXTOD $tmp,$dst" %}
8770 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fxtod_opf);
8771 ins_encode(form3_opf_rs2D_rdD(tmp, dst));
8772 ins_pipe(fcvtL2D);
8773 %}
8775 instruct convL2D_stk_fast_fxtof(regD dst, stackSlotL src) %{
8776 predicate(VM_Version::has_fast_fxtof());
8777 match(Set dst (ConvL2D src));
8778 ins_cost(DEFAULT_COST + 3 * MEMORY_REF_COST);
8779 expand %{
8780 regD tmp;
8781 stkL_to_regD(tmp, src);
8782 convL2D_helper(dst, tmp);
8783 %}
8784 %}
8786 instruct convL2D_reg(regD dst, iRegL src) %{
8787 predicate(UseVIS >= 3);
8788 match(Set dst (ConvL2D src));
8789 expand %{
8790 regD tmp;
8791 MoveL2D_reg_reg(tmp, src);
8792 convL2D_helper(dst, tmp);
8793 %}
8794 %}
8796 // Long to Float conversion using fast fxtof
8797 instruct convL2F_helper(regF dst, regD tmp) %{
8798 effect(DEF dst, USE tmp);
8799 size(4);
8800 format %{ "FXTOS $tmp,$dst" %}
8801 opcode(Assembler::fpop1_op3, Assembler::arith_op, Assembler::fxtos_opf);
8802 ins_encode(form3_opf_rs2D_rdF(tmp, dst));
8803 ins_pipe(fcvtL2F);
8804 %}
8806 instruct convL2F_stk_fast_fxtof(regF dst, stackSlotL src) %{
8807 match(Set dst (ConvL2F src));
8808 ins_cost(DEFAULT_COST + MEMORY_REF_COST);
8809 expand %{
8810 regD tmp;
8811 stkL_to_regD(tmp, src);
8812 convL2F_helper(dst, tmp);
8813 %}
8814 %}
8816 instruct convL2F_reg(regF dst, iRegL src) %{
8817 predicate(UseVIS >= 3);
8818 match(Set dst (ConvL2F src));
8819 ins_cost(DEFAULT_COST);
8820 expand %{
8821 regD tmp;
8822 MoveL2D_reg_reg(tmp, src);
8823 convL2F_helper(dst, tmp);
8824 %}
8825 %}
8827 //-----------
8829 instruct convL2I_reg(iRegI dst, iRegL src) %{
8830 match(Set dst (ConvL2I src));
8831 #ifndef _LP64
8832 format %{ "MOV $src.lo,$dst\t! long->int" %}
8833 ins_encode( form3_g0_rs2_rd_move_lo2( src, dst ) );
8834 ins_pipe(ialu_move_reg_I_to_L);
8835 #else
8836 size(4);
8837 format %{ "SRA $src,R_G0,$dst\t! long->int" %}
8838 ins_encode( form3_rs1_rd_signextend_lo1( src, dst ) );
8839 ins_pipe(ialu_reg);
8840 #endif
8841 %}
8843 // Register Shift Right Immediate
8844 instruct shrL_reg_imm6_L2I(iRegI dst, iRegL src, immI_32_63 cnt) %{
8845 match(Set dst (ConvL2I (RShiftL src cnt)));
8847 size(4);
8848 format %{ "SRAX $src,$cnt,$dst" %}
8849 opcode(Assembler::srax_op3, Assembler::arith_op);
8850 ins_encode( form3_sd_rs1_imm6_rd( src, cnt, dst ) );
8851 ins_pipe(ialu_reg_imm);
8852 %}
8854 //----------Control Flow Instructions------------------------------------------
8855 // Compare Instructions
8856 // Compare Integers
8857 instruct compI_iReg(flagsReg icc, iRegI op1, iRegI op2) %{
8858 match(Set icc (CmpI op1 op2));
8859 effect( DEF icc, USE op1, USE op2 );
8861 size(4);
8862 format %{ "CMP $op1,$op2" %}
8863 opcode(Assembler::subcc_op3, Assembler::arith_op);
8864 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) );
8865 ins_pipe(ialu_cconly_reg_reg);
8866 %}
8868 instruct compU_iReg(flagsRegU icc, iRegI op1, iRegI op2) %{
8869 match(Set icc (CmpU op1 op2));
8871 size(4);
8872 format %{ "CMP $op1,$op2\t! unsigned" %}
8873 opcode(Assembler::subcc_op3, Assembler::arith_op);
8874 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) );
8875 ins_pipe(ialu_cconly_reg_reg);
8876 %}
8878 instruct compI_iReg_imm13(flagsReg icc, iRegI op1, immI13 op2) %{
8879 match(Set icc (CmpI op1 op2));
8880 effect( DEF icc, USE op1 );
8882 size(4);
8883 format %{ "CMP $op1,$op2" %}
8884 opcode(Assembler::subcc_op3, Assembler::arith_op);
8885 ins_encode( form3_rs1_simm13_rd( op1, op2, R_G0 ) );
8886 ins_pipe(ialu_cconly_reg_imm);
8887 %}
8889 instruct testI_reg_reg( flagsReg icc, iRegI op1, iRegI op2, immI0 zero ) %{
8890 match(Set icc (CmpI (AndI op1 op2) zero));
8892 size(4);
8893 format %{ "BTST $op2,$op1" %}
8894 opcode(Assembler::andcc_op3, Assembler::arith_op);
8895 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) );
8896 ins_pipe(ialu_cconly_reg_reg_zero);
8897 %}
8899 instruct testI_reg_imm( flagsReg icc, iRegI op1, immI13 op2, immI0 zero ) %{
8900 match(Set icc (CmpI (AndI op1 op2) zero));
8902 size(4);
8903 format %{ "BTST $op2,$op1" %}
8904 opcode(Assembler::andcc_op3, Assembler::arith_op);
8905 ins_encode( form3_rs1_simm13_rd( op1, op2, R_G0 ) );
8906 ins_pipe(ialu_cconly_reg_imm_zero);
8907 %}
8909 instruct compL_reg_reg(flagsRegL xcc, iRegL op1, iRegL op2 ) %{
8910 match(Set xcc (CmpL op1 op2));
8911 effect( DEF xcc, USE op1, USE op2 );
8913 size(4);
8914 format %{ "CMP $op1,$op2\t\t! long" %}
8915 opcode(Assembler::subcc_op3, Assembler::arith_op);
8916 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) );
8917 ins_pipe(ialu_cconly_reg_reg);
8918 %}
8920 instruct compL_reg_con(flagsRegL xcc, iRegL op1, immL13 con) %{
8921 match(Set xcc (CmpL op1 con));
8922 effect( DEF xcc, USE op1, USE con );
8924 size(4);
8925 format %{ "CMP $op1,$con\t\t! long" %}
8926 opcode(Assembler::subcc_op3, Assembler::arith_op);
8927 ins_encode( form3_rs1_simm13_rd( op1, con, R_G0 ) );
8928 ins_pipe(ialu_cconly_reg_reg);
8929 %}
8931 instruct testL_reg_reg(flagsRegL xcc, iRegL op1, iRegL op2, immL0 zero) %{
8932 match(Set xcc (CmpL (AndL op1 op2) zero));
8933 effect( DEF xcc, USE op1, USE op2 );
8935 size(4);
8936 format %{ "BTST $op1,$op2\t\t! long" %}
8937 opcode(Assembler::andcc_op3, Assembler::arith_op);
8938 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) );
8939 ins_pipe(ialu_cconly_reg_reg);
8940 %}
8942 // useful for checking the alignment of a pointer:
8943 instruct testL_reg_con(flagsRegL xcc, iRegL op1, immL13 con, immL0 zero) %{
8944 match(Set xcc (CmpL (AndL op1 con) zero));
8945 effect( DEF xcc, USE op1, USE con );
8947 size(4);
8948 format %{ "BTST $op1,$con\t\t! long" %}
8949 opcode(Assembler::andcc_op3, Assembler::arith_op);
8950 ins_encode( form3_rs1_simm13_rd( op1, con, R_G0 ) );
8951 ins_pipe(ialu_cconly_reg_reg);
8952 %}
8954 instruct compU_iReg_imm13(flagsRegU icc, iRegI op1, immU13 op2 ) %{
8955 match(Set icc (CmpU op1 op2));
8957 size(4);
8958 format %{ "CMP $op1,$op2\t! unsigned" %}
8959 opcode(Assembler::subcc_op3, Assembler::arith_op);
8960 ins_encode( form3_rs1_simm13_rd( op1, op2, R_G0 ) );
8961 ins_pipe(ialu_cconly_reg_imm);
8962 %}
8964 // Compare Pointers
8965 instruct compP_iRegP(flagsRegP pcc, iRegP op1, iRegP op2 ) %{
8966 match(Set pcc (CmpP op1 op2));
8968 size(4);
8969 format %{ "CMP $op1,$op2\t! ptr" %}
8970 opcode(Assembler::subcc_op3, Assembler::arith_op);
8971 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) );
8972 ins_pipe(ialu_cconly_reg_reg);
8973 %}
8975 instruct compP_iRegP_imm13(flagsRegP pcc, iRegP op1, immP13 op2 ) %{
8976 match(Set pcc (CmpP op1 op2));
8978 size(4);
8979 format %{ "CMP $op1,$op2\t! ptr" %}
8980 opcode(Assembler::subcc_op3, Assembler::arith_op);
8981 ins_encode( form3_rs1_simm13_rd( op1, op2, R_G0 ) );
8982 ins_pipe(ialu_cconly_reg_imm);
8983 %}
8985 // Compare Narrow oops
8986 instruct compN_iRegN(flagsReg icc, iRegN op1, iRegN op2 ) %{
8987 match(Set icc (CmpN op1 op2));
8989 size(4);
8990 format %{ "CMP $op1,$op2\t! compressed ptr" %}
8991 opcode(Assembler::subcc_op3, Assembler::arith_op);
8992 ins_encode( form3_rs1_rs2_rd( op1, op2, R_G0 ) );
8993 ins_pipe(ialu_cconly_reg_reg);
8994 %}
8996 instruct compN_iRegN_immN0(flagsReg icc, iRegN op1, immN0 op2 ) %{
8997 match(Set icc (CmpN op1 op2));
8999 size(4);
9000 format %{ "CMP $op1,$op2\t! compressed ptr" %}
9001 opcode(Assembler::subcc_op3, Assembler::arith_op);
9002 ins_encode( form3_rs1_simm13_rd( op1, op2, R_G0 ) );
9003 ins_pipe(ialu_cconly_reg_imm);
9004 %}
9006 //----------Max and Min--------------------------------------------------------
9007 // Min Instructions
9008 // Conditional move for min
9009 instruct cmovI_reg_lt( iRegI op2, iRegI op1, flagsReg icc ) %{
9010 effect( USE_DEF op2, USE op1, USE icc );
9012 size(4);
9013 format %{ "MOVlt icc,$op1,$op2\t! min" %}
9014 opcode(Assembler::less);
9015 ins_encode( enc_cmov_reg_minmax(op2,op1) );
9016 ins_pipe(ialu_reg_flags);
9017 %}
9019 // Min Register with Register.
9020 instruct minI_eReg(iRegI op1, iRegI op2) %{
9021 match(Set op2 (MinI op1 op2));
9022 ins_cost(DEFAULT_COST*2);
9023 expand %{
9024 flagsReg icc;
9025 compI_iReg(icc,op1,op2);
9026 cmovI_reg_lt(op2,op1,icc);
9027 %}
9028 %}
9030 // Max Instructions
9031 // Conditional move for max
9032 instruct cmovI_reg_gt( iRegI op2, iRegI op1, flagsReg icc ) %{
9033 effect( USE_DEF op2, USE op1, USE icc );
9034 format %{ "MOVgt icc,$op1,$op2\t! max" %}
9035 opcode(Assembler::greater);
9036 ins_encode( enc_cmov_reg_minmax(op2,op1) );
9037 ins_pipe(ialu_reg_flags);
9038 %}
9040 // Max Register with Register
9041 instruct maxI_eReg(iRegI op1, iRegI op2) %{
9042 match(Set op2 (MaxI op1 op2));
9043 ins_cost(DEFAULT_COST*2);
9044 expand %{
9045 flagsReg icc;
9046 compI_iReg(icc,op1,op2);
9047 cmovI_reg_gt(op2,op1,icc);
9048 %}
9049 %}
9052 //----------Float Compares----------------------------------------------------
9053 // Compare floating, generate condition code
9054 instruct cmpF_cc(flagsRegF fcc, regF src1, regF src2) %{
9055 match(Set fcc (CmpF src1 src2));
9057 size(4);
9058 format %{ "FCMPs $fcc,$src1,$src2" %}
9059 opcode(Assembler::fpop2_op3, Assembler::arith_op, Assembler::fcmps_opf);
9060 ins_encode( form3_opf_rs1F_rs2F_fcc( src1, src2, fcc ) );
9061 ins_pipe(faddF_fcc_reg_reg_zero);
9062 %}
9064 instruct cmpD_cc(flagsRegF fcc, regD src1, regD src2) %{
9065 match(Set fcc (CmpD src1 src2));
9067 size(4);
9068 format %{ "FCMPd $fcc,$src1,$src2" %}
9069 opcode(Assembler::fpop2_op3, Assembler::arith_op, Assembler::fcmpd_opf);
9070 ins_encode( form3_opf_rs1D_rs2D_fcc( src1, src2, fcc ) );
9071 ins_pipe(faddD_fcc_reg_reg_zero);
9072 %}
9075 // Compare floating, generate -1,0,1
9076 instruct cmpF_reg(iRegI dst, regF src1, regF src2, flagsRegF0 fcc0) %{
9077 match(Set dst (CmpF3 src1 src2));
9078 effect(KILL fcc0);
9079 ins_cost(DEFAULT_COST*3+BRANCH_COST*3);
9080 format %{ "fcmpl $dst,$src1,$src2" %}
9081 // Primary = float
9082 opcode( true );
9083 ins_encode( floating_cmp( dst, src1, src2 ) );
9084 ins_pipe( floating_cmp );
9085 %}
9087 instruct cmpD_reg(iRegI dst, regD src1, regD src2, flagsRegF0 fcc0) %{
9088 match(Set dst (CmpD3 src1 src2));
9089 effect(KILL fcc0);
9090 ins_cost(DEFAULT_COST*3+BRANCH_COST*3);
9091 format %{ "dcmpl $dst,$src1,$src2" %}
9092 // Primary = double (not float)
9093 opcode( false );
9094 ins_encode( floating_cmp( dst, src1, src2 ) );
9095 ins_pipe( floating_cmp );
9096 %}
9098 //----------Branches---------------------------------------------------------
9099 // Jump
9100 // (compare 'operand indIndex' and 'instruct addP_reg_reg' above)
9101 instruct jumpXtnd(iRegX switch_val, o7RegI table) %{
9102 match(Jump switch_val);
9103 effect(TEMP table);
9105 ins_cost(350);
9107 format %{ "ADD $constanttablebase, $constantoffset, O7\n\t"
9108 "LD [O7 + $switch_val], O7\n\t"
9109 "JUMP O7" %}
9110 ins_encode %{
9111 // Calculate table address into a register.
9112 Register table_reg;
9113 Register label_reg = O7;
9114 // If we are calculating the size of this instruction don't trust
9115 // zero offsets because they might change when
9116 // MachConstantBaseNode decides to optimize the constant table
9117 // base.
9118 if ((constant_offset() == 0) && !Compile::current()->in_scratch_emit_size()) {
9119 table_reg = $constanttablebase;
9120 } else {
9121 table_reg = O7;
9122 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset, O7);
9123 __ add($constanttablebase, con_offset, table_reg);
9124 }
9126 // Jump to base address + switch value
9127 __ ld_ptr(table_reg, $switch_val$$Register, label_reg);
9128 __ jmp(label_reg, G0);
9129 __ delayed()->nop();
9130 %}
9131 ins_pipe(ialu_reg_reg);
9132 %}
9134 // Direct Branch. Use V8 version with longer range.
9135 instruct branch(label labl) %{
9136 match(Goto);
9137 effect(USE labl);
9139 size(8);
9140 ins_cost(BRANCH_COST);
9141 format %{ "BA $labl" %}
9142 ins_encode %{
9143 Label* L = $labl$$label;
9144 __ ba(*L);
9145 __ delayed()->nop();
9146 %}
9147 ins_pipe(br);
9148 %}
9150 // Direct Branch, short with no delay slot
9151 instruct branch_short(label labl) %{
9152 match(Goto);
9153 predicate(UseCBCond);
9154 effect(USE labl);
9156 size(4);
9157 ins_cost(BRANCH_COST);
9158 format %{ "BA $labl\t! short branch" %}
9159 ins_encode %{
9160 Label* L = $labl$$label;
9161 assert(__ use_cbcond(*L), "back to back cbcond");
9162 __ ba_short(*L);
9163 %}
9164 ins_short_branch(1);
9165 ins_avoid_back_to_back(1);
9166 ins_pipe(cbcond_reg_imm);
9167 %}
9169 // Conditional Direct Branch
9170 instruct branchCon(cmpOp cmp, flagsReg icc, label labl) %{
9171 match(If cmp icc);
9172 effect(USE labl);
9174 size(8);
9175 ins_cost(BRANCH_COST);
9176 format %{ "BP$cmp $icc,$labl" %}
9177 // Prim = bits 24-22, Secnd = bits 31-30
9178 ins_encode( enc_bp( labl, cmp, icc ) );
9179 ins_pipe(br_cc);
9180 %}
9182 instruct branchConU(cmpOpU cmp, flagsRegU icc, label labl) %{
9183 match(If cmp icc);
9184 effect(USE labl);
9186 ins_cost(BRANCH_COST);
9187 format %{ "BP$cmp $icc,$labl" %}
9188 // Prim = bits 24-22, Secnd = bits 31-30
9189 ins_encode( enc_bp( labl, cmp, icc ) );
9190 ins_pipe(br_cc);
9191 %}
9193 instruct branchConP(cmpOpP cmp, flagsRegP pcc, label labl) %{
9194 match(If cmp pcc);
9195 effect(USE labl);
9197 size(8);
9198 ins_cost(BRANCH_COST);
9199 format %{ "BP$cmp $pcc,$labl" %}
9200 ins_encode %{
9201 Label* L = $labl$$label;
9202 Assembler::Predict predict_taken =
9203 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9205 __ bp( (Assembler::Condition)($cmp$$cmpcode), false, Assembler::ptr_cc, predict_taken, *L);
9206 __ delayed()->nop();
9207 %}
9208 ins_pipe(br_cc);
9209 %}
9211 instruct branchConF(cmpOpF cmp, flagsRegF fcc, label labl) %{
9212 match(If cmp fcc);
9213 effect(USE labl);
9215 size(8);
9216 ins_cost(BRANCH_COST);
9217 format %{ "FBP$cmp $fcc,$labl" %}
9218 ins_encode %{
9219 Label* L = $labl$$label;
9220 Assembler::Predict predict_taken =
9221 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9223 __ fbp( (Assembler::Condition)($cmp$$cmpcode), false, (Assembler::CC)($fcc$$reg), predict_taken, *L);
9224 __ delayed()->nop();
9225 %}
9226 ins_pipe(br_fcc);
9227 %}
9229 instruct branchLoopEnd(cmpOp cmp, flagsReg icc, label labl) %{
9230 match(CountedLoopEnd cmp icc);
9231 effect(USE labl);
9233 size(8);
9234 ins_cost(BRANCH_COST);
9235 format %{ "BP$cmp $icc,$labl\t! Loop end" %}
9236 // Prim = bits 24-22, Secnd = bits 31-30
9237 ins_encode( enc_bp( labl, cmp, icc ) );
9238 ins_pipe(br_cc);
9239 %}
9241 instruct branchLoopEndU(cmpOpU cmp, flagsRegU icc, label labl) %{
9242 match(CountedLoopEnd cmp icc);
9243 effect(USE labl);
9245 size(8);
9246 ins_cost(BRANCH_COST);
9247 format %{ "BP$cmp $icc,$labl\t! Loop end" %}
9248 // Prim = bits 24-22, Secnd = bits 31-30
9249 ins_encode( enc_bp( labl, cmp, icc ) );
9250 ins_pipe(br_cc);
9251 %}
9253 // Compare and branch instructions
9254 instruct cmpI_reg_branch(cmpOp cmp, iRegI op1, iRegI op2, label labl, flagsReg icc) %{
9255 match(If cmp (CmpI op1 op2));
9256 effect(USE labl, KILL icc);
9258 size(12);
9259 ins_cost(BRANCH_COST);
9260 format %{ "CMP $op1,$op2\t! int\n\t"
9261 "BP$cmp $labl" %}
9262 ins_encode %{
9263 Label* L = $labl$$label;
9264 Assembler::Predict predict_taken =
9265 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9266 __ cmp($op1$$Register, $op2$$Register);
9267 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L);
9268 __ delayed()->nop();
9269 %}
9270 ins_pipe(cmp_br_reg_reg);
9271 %}
9273 instruct cmpI_imm_branch(cmpOp cmp, iRegI op1, immI5 op2, label labl, flagsReg icc) %{
9274 match(If cmp (CmpI op1 op2));
9275 effect(USE labl, KILL icc);
9277 size(12);
9278 ins_cost(BRANCH_COST);
9279 format %{ "CMP $op1,$op2\t! int\n\t"
9280 "BP$cmp $labl" %}
9281 ins_encode %{
9282 Label* L = $labl$$label;
9283 Assembler::Predict predict_taken =
9284 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9285 __ cmp($op1$$Register, $op2$$constant);
9286 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L);
9287 __ delayed()->nop();
9288 %}
9289 ins_pipe(cmp_br_reg_imm);
9290 %}
9292 instruct cmpU_reg_branch(cmpOpU cmp, iRegI op1, iRegI op2, label labl, flagsRegU icc) %{
9293 match(If cmp (CmpU op1 op2));
9294 effect(USE labl, KILL icc);
9296 size(12);
9297 ins_cost(BRANCH_COST);
9298 format %{ "CMP $op1,$op2\t! unsigned\n\t"
9299 "BP$cmp $labl" %}
9300 ins_encode %{
9301 Label* L = $labl$$label;
9302 Assembler::Predict predict_taken =
9303 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9304 __ cmp($op1$$Register, $op2$$Register);
9305 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L);
9306 __ delayed()->nop();
9307 %}
9308 ins_pipe(cmp_br_reg_reg);
9309 %}
9311 instruct cmpU_imm_branch(cmpOpU cmp, iRegI op1, immI5 op2, label labl, flagsRegU icc) %{
9312 match(If cmp (CmpU op1 op2));
9313 effect(USE labl, KILL icc);
9315 size(12);
9316 ins_cost(BRANCH_COST);
9317 format %{ "CMP $op1,$op2\t! unsigned\n\t"
9318 "BP$cmp $labl" %}
9319 ins_encode %{
9320 Label* L = $labl$$label;
9321 Assembler::Predict predict_taken =
9322 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9323 __ cmp($op1$$Register, $op2$$constant);
9324 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L);
9325 __ delayed()->nop();
9326 %}
9327 ins_pipe(cmp_br_reg_imm);
9328 %}
9330 instruct cmpL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, flagsRegL xcc) %{
9331 match(If cmp (CmpL op1 op2));
9332 effect(USE labl, KILL xcc);
9334 size(12);
9335 ins_cost(BRANCH_COST);
9336 format %{ "CMP $op1,$op2\t! long\n\t"
9337 "BP$cmp $labl" %}
9338 ins_encode %{
9339 Label* L = $labl$$label;
9340 Assembler::Predict predict_taken =
9341 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9342 __ cmp($op1$$Register, $op2$$Register);
9343 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::xcc, predict_taken, *L);
9344 __ delayed()->nop();
9345 %}
9346 ins_pipe(cmp_br_reg_reg);
9347 %}
9349 instruct cmpL_imm_branch(cmpOp cmp, iRegL op1, immL5 op2, label labl, flagsRegL xcc) %{
9350 match(If cmp (CmpL op1 op2));
9351 effect(USE labl, KILL xcc);
9353 size(12);
9354 ins_cost(BRANCH_COST);
9355 format %{ "CMP $op1,$op2\t! long\n\t"
9356 "BP$cmp $labl" %}
9357 ins_encode %{
9358 Label* L = $labl$$label;
9359 Assembler::Predict predict_taken =
9360 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9361 __ cmp($op1$$Register, $op2$$constant);
9362 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::xcc, predict_taken, *L);
9363 __ delayed()->nop();
9364 %}
9365 ins_pipe(cmp_br_reg_imm);
9366 %}
9368 // Compare Pointers and branch
9369 instruct cmpP_reg_branch(cmpOpP cmp, iRegP op1, iRegP op2, label labl, flagsRegP pcc) %{
9370 match(If cmp (CmpP op1 op2));
9371 effect(USE labl, KILL pcc);
9373 size(12);
9374 ins_cost(BRANCH_COST);
9375 format %{ "CMP $op1,$op2\t! ptr\n\t"
9376 "B$cmp $labl" %}
9377 ins_encode %{
9378 Label* L = $labl$$label;
9379 Assembler::Predict predict_taken =
9380 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9381 __ cmp($op1$$Register, $op2$$Register);
9382 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::ptr_cc, predict_taken, *L);
9383 __ delayed()->nop();
9384 %}
9385 ins_pipe(cmp_br_reg_reg);
9386 %}
9388 instruct cmpP_null_branch(cmpOpP cmp, iRegP op1, immP0 null, label labl, flagsRegP pcc) %{
9389 match(If cmp (CmpP op1 null));
9390 effect(USE labl, KILL pcc);
9392 size(12);
9393 ins_cost(BRANCH_COST);
9394 format %{ "CMP $op1,0\t! ptr\n\t"
9395 "B$cmp $labl" %}
9396 ins_encode %{
9397 Label* L = $labl$$label;
9398 Assembler::Predict predict_taken =
9399 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9400 __ cmp($op1$$Register, G0);
9401 // bpr() is not used here since it has shorter distance.
9402 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::ptr_cc, predict_taken, *L);
9403 __ delayed()->nop();
9404 %}
9405 ins_pipe(cmp_br_reg_reg);
9406 %}
9408 instruct cmpN_reg_branch(cmpOp cmp, iRegN op1, iRegN op2, label labl, flagsReg icc) %{
9409 match(If cmp (CmpN op1 op2));
9410 effect(USE labl, KILL icc);
9412 size(12);
9413 ins_cost(BRANCH_COST);
9414 format %{ "CMP $op1,$op2\t! compressed ptr\n\t"
9415 "BP$cmp $labl" %}
9416 ins_encode %{
9417 Label* L = $labl$$label;
9418 Assembler::Predict predict_taken =
9419 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9420 __ cmp($op1$$Register, $op2$$Register);
9421 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L);
9422 __ delayed()->nop();
9423 %}
9424 ins_pipe(cmp_br_reg_reg);
9425 %}
9427 instruct cmpN_null_branch(cmpOp cmp, iRegN op1, immN0 null, label labl, flagsReg icc) %{
9428 match(If cmp (CmpN op1 null));
9429 effect(USE labl, KILL icc);
9431 size(12);
9432 ins_cost(BRANCH_COST);
9433 format %{ "CMP $op1,0\t! compressed ptr\n\t"
9434 "BP$cmp $labl" %}
9435 ins_encode %{
9436 Label* L = $labl$$label;
9437 Assembler::Predict predict_taken =
9438 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9439 __ cmp($op1$$Register, G0);
9440 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L);
9441 __ delayed()->nop();
9442 %}
9443 ins_pipe(cmp_br_reg_reg);
9444 %}
9446 // Loop back branch
9447 instruct cmpI_reg_branchLoopEnd(cmpOp cmp, iRegI op1, iRegI op2, label labl, flagsReg icc) %{
9448 match(CountedLoopEnd cmp (CmpI op1 op2));
9449 effect(USE labl, KILL icc);
9451 size(12);
9452 ins_cost(BRANCH_COST);
9453 format %{ "CMP $op1,$op2\t! int\n\t"
9454 "BP$cmp $labl\t! Loop end" %}
9455 ins_encode %{
9456 Label* L = $labl$$label;
9457 Assembler::Predict predict_taken =
9458 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9459 __ cmp($op1$$Register, $op2$$Register);
9460 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L);
9461 __ delayed()->nop();
9462 %}
9463 ins_pipe(cmp_br_reg_reg);
9464 %}
9466 instruct cmpI_imm_branchLoopEnd(cmpOp cmp, iRegI op1, immI5 op2, label labl, flagsReg icc) %{
9467 match(CountedLoopEnd cmp (CmpI op1 op2));
9468 effect(USE labl, KILL icc);
9470 size(12);
9471 ins_cost(BRANCH_COST);
9472 format %{ "CMP $op1,$op2\t! int\n\t"
9473 "BP$cmp $labl\t! Loop end" %}
9474 ins_encode %{
9475 Label* L = $labl$$label;
9476 Assembler::Predict predict_taken =
9477 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9478 __ cmp($op1$$Register, $op2$$constant);
9479 __ bp((Assembler::Condition)($cmp$$cmpcode), false, Assembler::icc, predict_taken, *L);
9480 __ delayed()->nop();
9481 %}
9482 ins_pipe(cmp_br_reg_imm);
9483 %}
9485 // Short compare and branch instructions
9486 instruct cmpI_reg_branch_short(cmpOp cmp, iRegI op1, iRegI op2, label labl, flagsReg icc) %{
9487 match(If cmp (CmpI op1 op2));
9488 predicate(UseCBCond);
9489 effect(USE labl, KILL icc);
9491 size(4);
9492 ins_cost(BRANCH_COST);
9493 format %{ "CWB$cmp $op1,$op2,$labl\t! int" %}
9494 ins_encode %{
9495 Label* L = $labl$$label;
9496 assert(__ use_cbcond(*L), "back to back cbcond");
9497 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L);
9498 %}
9499 ins_short_branch(1);
9500 ins_avoid_back_to_back(1);
9501 ins_pipe(cbcond_reg_reg);
9502 %}
9504 instruct cmpI_imm_branch_short(cmpOp cmp, iRegI op1, immI5 op2, label labl, flagsReg icc) %{
9505 match(If cmp (CmpI op1 op2));
9506 predicate(UseCBCond);
9507 effect(USE labl, KILL icc);
9509 size(4);
9510 ins_cost(BRANCH_COST);
9511 format %{ "CWB$cmp $op1,$op2,$labl\t! int" %}
9512 ins_encode %{
9513 Label* L = $labl$$label;
9514 assert(__ use_cbcond(*L), "back to back cbcond");
9515 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$constant, *L);
9516 %}
9517 ins_short_branch(1);
9518 ins_avoid_back_to_back(1);
9519 ins_pipe(cbcond_reg_imm);
9520 %}
9522 instruct cmpU_reg_branch_short(cmpOpU cmp, iRegI op1, iRegI op2, label labl, flagsRegU icc) %{
9523 match(If cmp (CmpU op1 op2));
9524 predicate(UseCBCond);
9525 effect(USE labl, KILL icc);
9527 size(4);
9528 ins_cost(BRANCH_COST);
9529 format %{ "CWB$cmp $op1,$op2,$labl\t! unsigned" %}
9530 ins_encode %{
9531 Label* L = $labl$$label;
9532 assert(__ use_cbcond(*L), "back to back cbcond");
9533 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L);
9534 %}
9535 ins_short_branch(1);
9536 ins_avoid_back_to_back(1);
9537 ins_pipe(cbcond_reg_reg);
9538 %}
9540 instruct cmpU_imm_branch_short(cmpOpU cmp, iRegI op1, immI5 op2, label labl, flagsRegU icc) %{
9541 match(If cmp (CmpU op1 op2));
9542 predicate(UseCBCond);
9543 effect(USE labl, KILL icc);
9545 size(4);
9546 ins_cost(BRANCH_COST);
9547 format %{ "CWB$cmp $op1,$op2,$labl\t! unsigned" %}
9548 ins_encode %{
9549 Label* L = $labl$$label;
9550 assert(__ use_cbcond(*L), "back to back cbcond");
9551 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$constant, *L);
9552 %}
9553 ins_short_branch(1);
9554 ins_avoid_back_to_back(1);
9555 ins_pipe(cbcond_reg_imm);
9556 %}
9558 instruct cmpL_reg_branch_short(cmpOp cmp, iRegL op1, iRegL op2, label labl, flagsRegL xcc) %{
9559 match(If cmp (CmpL op1 op2));
9560 predicate(UseCBCond);
9561 effect(USE labl, KILL xcc);
9563 size(4);
9564 ins_cost(BRANCH_COST);
9565 format %{ "CXB$cmp $op1,$op2,$labl\t! long" %}
9566 ins_encode %{
9567 Label* L = $labl$$label;
9568 assert(__ use_cbcond(*L), "back to back cbcond");
9569 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::xcc, $op1$$Register, $op2$$Register, *L);
9570 %}
9571 ins_short_branch(1);
9572 ins_avoid_back_to_back(1);
9573 ins_pipe(cbcond_reg_reg);
9574 %}
9576 instruct cmpL_imm_branch_short(cmpOp cmp, iRegL op1, immL5 op2, label labl, flagsRegL xcc) %{
9577 match(If cmp (CmpL op1 op2));
9578 predicate(UseCBCond);
9579 effect(USE labl, KILL xcc);
9581 size(4);
9582 ins_cost(BRANCH_COST);
9583 format %{ "CXB$cmp $op1,$op2,$labl\t! long" %}
9584 ins_encode %{
9585 Label* L = $labl$$label;
9586 assert(__ use_cbcond(*L), "back to back cbcond");
9587 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::xcc, $op1$$Register, $op2$$constant, *L);
9588 %}
9589 ins_short_branch(1);
9590 ins_avoid_back_to_back(1);
9591 ins_pipe(cbcond_reg_imm);
9592 %}
9594 // Compare Pointers and branch
9595 instruct cmpP_reg_branch_short(cmpOpP cmp, iRegP op1, iRegP op2, label labl, flagsRegP pcc) %{
9596 match(If cmp (CmpP op1 op2));
9597 predicate(UseCBCond);
9598 effect(USE labl, KILL pcc);
9600 size(4);
9601 ins_cost(BRANCH_COST);
9602 #ifdef _LP64
9603 format %{ "CXB$cmp $op1,$op2,$labl\t! ptr" %}
9604 #else
9605 format %{ "CWB$cmp $op1,$op2,$labl\t! ptr" %}
9606 #endif
9607 ins_encode %{
9608 Label* L = $labl$$label;
9609 assert(__ use_cbcond(*L), "back to back cbcond");
9610 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::ptr_cc, $op1$$Register, $op2$$Register, *L);
9611 %}
9612 ins_short_branch(1);
9613 ins_avoid_back_to_back(1);
9614 ins_pipe(cbcond_reg_reg);
9615 %}
9617 instruct cmpP_null_branch_short(cmpOpP cmp, iRegP op1, immP0 null, label labl, flagsRegP pcc) %{
9618 match(If cmp (CmpP op1 null));
9619 predicate(UseCBCond);
9620 effect(USE labl, KILL pcc);
9622 size(4);
9623 ins_cost(BRANCH_COST);
9624 #ifdef _LP64
9625 format %{ "CXB$cmp $op1,0,$labl\t! ptr" %}
9626 #else
9627 format %{ "CWB$cmp $op1,0,$labl\t! ptr" %}
9628 #endif
9629 ins_encode %{
9630 Label* L = $labl$$label;
9631 assert(__ use_cbcond(*L), "back to back cbcond");
9632 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::ptr_cc, $op1$$Register, G0, *L);
9633 %}
9634 ins_short_branch(1);
9635 ins_avoid_back_to_back(1);
9636 ins_pipe(cbcond_reg_reg);
9637 %}
9639 instruct cmpN_reg_branch_short(cmpOp cmp, iRegN op1, iRegN op2, label labl, flagsReg icc) %{
9640 match(If cmp (CmpN op1 op2));
9641 predicate(UseCBCond);
9642 effect(USE labl, KILL icc);
9644 size(4);
9645 ins_cost(BRANCH_COST);
9646 format %{ "CWB$cmp $op1,op2,$labl\t! compressed ptr" %}
9647 ins_encode %{
9648 Label* L = $labl$$label;
9649 assert(__ use_cbcond(*L), "back to back cbcond");
9650 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L);
9651 %}
9652 ins_short_branch(1);
9653 ins_avoid_back_to_back(1);
9654 ins_pipe(cbcond_reg_reg);
9655 %}
9657 instruct cmpN_null_branch_short(cmpOp cmp, iRegN op1, immN0 null, label labl, flagsReg icc) %{
9658 match(If cmp (CmpN op1 null));
9659 predicate(UseCBCond);
9660 effect(USE labl, KILL icc);
9662 size(4);
9663 ins_cost(BRANCH_COST);
9664 format %{ "CWB$cmp $op1,0,$labl\t! compressed ptr" %}
9665 ins_encode %{
9666 Label* L = $labl$$label;
9667 assert(__ use_cbcond(*L), "back to back cbcond");
9668 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, G0, *L);
9669 %}
9670 ins_short_branch(1);
9671 ins_avoid_back_to_back(1);
9672 ins_pipe(cbcond_reg_reg);
9673 %}
9675 // Loop back branch
9676 instruct cmpI_reg_branchLoopEnd_short(cmpOp cmp, iRegI op1, iRegI op2, label labl, flagsReg icc) %{
9677 match(CountedLoopEnd cmp (CmpI op1 op2));
9678 predicate(UseCBCond);
9679 effect(USE labl, KILL icc);
9681 size(4);
9682 ins_cost(BRANCH_COST);
9683 format %{ "CWB$cmp $op1,$op2,$labl\t! Loop end" %}
9684 ins_encode %{
9685 Label* L = $labl$$label;
9686 assert(__ use_cbcond(*L), "back to back cbcond");
9687 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$Register, *L);
9688 %}
9689 ins_short_branch(1);
9690 ins_avoid_back_to_back(1);
9691 ins_pipe(cbcond_reg_reg);
9692 %}
9694 instruct cmpI_imm_branchLoopEnd_short(cmpOp cmp, iRegI op1, immI5 op2, label labl, flagsReg icc) %{
9695 match(CountedLoopEnd cmp (CmpI op1 op2));
9696 predicate(UseCBCond);
9697 effect(USE labl, KILL icc);
9699 size(4);
9700 ins_cost(BRANCH_COST);
9701 format %{ "CWB$cmp $op1,$op2,$labl\t! Loop end" %}
9702 ins_encode %{
9703 Label* L = $labl$$label;
9704 assert(__ use_cbcond(*L), "back to back cbcond");
9705 __ cbcond((Assembler::Condition)($cmp$$cmpcode), Assembler::icc, $op1$$Register, $op2$$constant, *L);
9706 %}
9707 ins_short_branch(1);
9708 ins_avoid_back_to_back(1);
9709 ins_pipe(cbcond_reg_imm);
9710 %}
9712 // Branch-on-register tests all 64 bits. We assume that values
9713 // in 64-bit registers always remains zero or sign extended
9714 // unless our code munges the high bits. Interrupts can chop
9715 // the high order bits to zero or sign at any time.
9716 instruct branchCon_regI(cmpOp_reg cmp, iRegI op1, immI0 zero, label labl) %{
9717 match(If cmp (CmpI op1 zero));
9718 predicate(can_branch_register(_kids[0]->_leaf, _kids[1]->_leaf));
9719 effect(USE labl);
9721 size(8);
9722 ins_cost(BRANCH_COST);
9723 format %{ "BR$cmp $op1,$labl" %}
9724 ins_encode( enc_bpr( labl, cmp, op1 ) );
9725 ins_pipe(br_reg);
9726 %}
9728 instruct branchCon_regP(cmpOp_reg cmp, iRegP op1, immP0 null, label labl) %{
9729 match(If cmp (CmpP op1 null));
9730 predicate(can_branch_register(_kids[0]->_leaf, _kids[1]->_leaf));
9731 effect(USE labl);
9733 size(8);
9734 ins_cost(BRANCH_COST);
9735 format %{ "BR$cmp $op1,$labl" %}
9736 ins_encode( enc_bpr( labl, cmp, op1 ) );
9737 ins_pipe(br_reg);
9738 %}
9740 instruct branchCon_regL(cmpOp_reg cmp, iRegL op1, immL0 zero, label labl) %{
9741 match(If cmp (CmpL op1 zero));
9742 predicate(can_branch_register(_kids[0]->_leaf, _kids[1]->_leaf));
9743 effect(USE labl);
9745 size(8);
9746 ins_cost(BRANCH_COST);
9747 format %{ "BR$cmp $op1,$labl" %}
9748 ins_encode( enc_bpr( labl, cmp, op1 ) );
9749 ins_pipe(br_reg);
9750 %}
9753 // ============================================================================
9754 // Long Compare
9755 //
9756 // Currently we hold longs in 2 registers. Comparing such values efficiently
9757 // is tricky. The flavor of compare used depends on whether we are testing
9758 // for LT, LE, or EQ. For a simple LT test we can check just the sign bit.
9759 // The GE test is the negated LT test. The LE test can be had by commuting
9760 // the operands (yielding a GE test) and then negating; negate again for the
9761 // GT test. The EQ test is done by ORcc'ing the high and low halves, and the
9762 // NE test is negated from that.
9764 // Due to a shortcoming in the ADLC, it mixes up expressions like:
9765 // (foo (CmpI (CmpL X Y) 0)) and (bar (CmpI (CmpL X 0L) 0)). Note the
9766 // difference between 'Y' and '0L'. The tree-matches for the CmpI sections
9767 // are collapsed internally in the ADLC's dfa-gen code. The match for
9768 // (CmpI (CmpL X Y) 0) is silently replaced with (CmpI (CmpL X 0L) 0) and the
9769 // foo match ends up with the wrong leaf. One fix is to not match both
9770 // reg-reg and reg-zero forms of long-compare. This is unfortunate because
9771 // both forms beat the trinary form of long-compare and both are very useful
9772 // on Intel which has so few registers.
9774 instruct branchCon_long(cmpOp cmp, flagsRegL xcc, label labl) %{
9775 match(If cmp xcc);
9776 effect(USE labl);
9778 size(8);
9779 ins_cost(BRANCH_COST);
9780 format %{ "BP$cmp $xcc,$labl" %}
9781 ins_encode %{
9782 Label* L = $labl$$label;
9783 Assembler::Predict predict_taken =
9784 cbuf.is_backward_branch(*L) ? Assembler::pt : Assembler::pn;
9786 __ bp( (Assembler::Condition)($cmp$$cmpcode), false, Assembler::xcc, predict_taken, *L);
9787 __ delayed()->nop();
9788 %}
9789 ins_pipe(br_cc);
9790 %}
9792 // Manifest a CmpL3 result in an integer register. Very painful.
9793 // This is the test to avoid.
9794 instruct cmpL3_reg_reg(iRegI dst, iRegL src1, iRegL src2, flagsReg ccr ) %{
9795 match(Set dst (CmpL3 src1 src2) );
9796 effect( KILL ccr );
9797 ins_cost(6*DEFAULT_COST);
9798 size(24);
9799 format %{ "CMP $src1,$src2\t\t! long\n"
9800 "\tBLT,a,pn done\n"
9801 "\tMOV -1,$dst\t! delay slot\n"
9802 "\tBGT,a,pn done\n"
9803 "\tMOV 1,$dst\t! delay slot\n"
9804 "\tCLR $dst\n"
9805 "done:" %}
9806 ins_encode( cmpl_flag(src1,src2,dst) );
9807 ins_pipe(cmpL_reg);
9808 %}
9810 // Conditional move
9811 instruct cmovLL_reg(cmpOp cmp, flagsRegL xcc, iRegL dst, iRegL src) %{
9812 match(Set dst (CMoveL (Binary cmp xcc) (Binary dst src)));
9813 ins_cost(150);
9814 format %{ "MOV$cmp $xcc,$src,$dst\t! long" %}
9815 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::xcc)) );
9816 ins_pipe(ialu_reg);
9817 %}
9819 instruct cmovLL_imm(cmpOp cmp, flagsRegL xcc, iRegL dst, immL0 src) %{
9820 match(Set dst (CMoveL (Binary cmp xcc) (Binary dst src)));
9821 ins_cost(140);
9822 format %{ "MOV$cmp $xcc,$src,$dst\t! long" %}
9823 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::xcc)) );
9824 ins_pipe(ialu_imm);
9825 %}
9827 instruct cmovIL_reg(cmpOp cmp, flagsRegL xcc, iRegI dst, iRegI src) %{
9828 match(Set dst (CMoveI (Binary cmp xcc) (Binary dst src)));
9829 ins_cost(150);
9830 format %{ "MOV$cmp $xcc,$src,$dst" %}
9831 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::xcc)) );
9832 ins_pipe(ialu_reg);
9833 %}
9835 instruct cmovIL_imm(cmpOp cmp, flagsRegL xcc, iRegI dst, immI11 src) %{
9836 match(Set dst (CMoveI (Binary cmp xcc) (Binary dst src)));
9837 ins_cost(140);
9838 format %{ "MOV$cmp $xcc,$src,$dst" %}
9839 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::xcc)) );
9840 ins_pipe(ialu_imm);
9841 %}
9843 instruct cmovNL_reg(cmpOp cmp, flagsRegL xcc, iRegN dst, iRegN src) %{
9844 match(Set dst (CMoveN (Binary cmp xcc) (Binary dst src)));
9845 ins_cost(150);
9846 format %{ "MOV$cmp $xcc,$src,$dst" %}
9847 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::xcc)) );
9848 ins_pipe(ialu_reg);
9849 %}
9851 instruct cmovPL_reg(cmpOp cmp, flagsRegL xcc, iRegP dst, iRegP src) %{
9852 match(Set dst (CMoveP (Binary cmp xcc) (Binary dst src)));
9853 ins_cost(150);
9854 format %{ "MOV$cmp $xcc,$src,$dst" %}
9855 ins_encode( enc_cmov_reg(cmp,dst,src, (Assembler::xcc)) );
9856 ins_pipe(ialu_reg);
9857 %}
9859 instruct cmovPL_imm(cmpOp cmp, flagsRegL xcc, iRegP dst, immP0 src) %{
9860 match(Set dst (CMoveP (Binary cmp xcc) (Binary dst src)));
9861 ins_cost(140);
9862 format %{ "MOV$cmp $xcc,$src,$dst" %}
9863 ins_encode( enc_cmov_imm(cmp,dst,src, (Assembler::xcc)) );
9864 ins_pipe(ialu_imm);
9865 %}
9867 instruct cmovFL_reg(cmpOp cmp, flagsRegL xcc, regF dst, regF src) %{
9868 match(Set dst (CMoveF (Binary cmp xcc) (Binary dst src)));
9869 ins_cost(150);
9870 opcode(0x101);
9871 format %{ "FMOVS$cmp $xcc,$src,$dst" %}
9872 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::xcc)) );
9873 ins_pipe(int_conditional_float_move);
9874 %}
9876 instruct cmovDL_reg(cmpOp cmp, flagsRegL xcc, regD dst, regD src) %{
9877 match(Set dst (CMoveD (Binary cmp xcc) (Binary dst src)));
9878 ins_cost(150);
9879 opcode(0x102);
9880 format %{ "FMOVD$cmp $xcc,$src,$dst" %}
9881 ins_encode( enc_cmovf_reg(cmp,dst,src, (Assembler::xcc)) );
9882 ins_pipe(int_conditional_float_move);
9883 %}
9885 // ============================================================================
9886 // Safepoint Instruction
9887 instruct safePoint_poll(iRegP poll) %{
9888 match(SafePoint poll);
9889 effect(USE poll);
9891 size(4);
9892 #ifdef _LP64
9893 format %{ "LDX [$poll],R_G0\t! Safepoint: poll for GC" %}
9894 #else
9895 format %{ "LDUW [$poll],R_G0\t! Safepoint: poll for GC" %}
9896 #endif
9897 ins_encode %{
9898 __ relocate(relocInfo::poll_type);
9899 __ ld_ptr($poll$$Register, 0, G0);
9900 %}
9901 ins_pipe(loadPollP);
9902 %}
9904 // ============================================================================
9905 // Call Instructions
9906 // Call Java Static Instruction
9907 instruct CallStaticJavaDirect( method meth ) %{
9908 match(CallStaticJava);
9909 predicate(! ((CallStaticJavaNode*)n)->is_method_handle_invoke());
9910 effect(USE meth);
9912 size(8);
9913 ins_cost(CALL_COST);
9914 format %{ "CALL,static ; NOP ==> " %}
9915 ins_encode( Java_Static_Call( meth ), call_epilog );
9916 ins_pipe(simple_call);
9917 %}
9919 // Call Java Static Instruction (method handle version)
9920 instruct CallStaticJavaHandle(method meth, l7RegP l7_mh_SP_save) %{
9921 match(CallStaticJava);
9922 predicate(((CallStaticJavaNode*)n)->is_method_handle_invoke());
9923 effect(USE meth, KILL l7_mh_SP_save);
9925 size(16);
9926 ins_cost(CALL_COST);
9927 format %{ "CALL,static/MethodHandle" %}
9928 ins_encode(preserve_SP, Java_Static_Call(meth), restore_SP, call_epilog);
9929 ins_pipe(simple_call);
9930 %}
9932 // Call Java Dynamic Instruction
9933 instruct CallDynamicJavaDirect( method meth ) %{
9934 match(CallDynamicJava);
9935 effect(USE meth);
9937 ins_cost(CALL_COST);
9938 format %{ "SET (empty),R_G5\n\t"
9939 "CALL,dynamic ; NOP ==> " %}
9940 ins_encode( Java_Dynamic_Call( meth ), call_epilog );
9941 ins_pipe(call);
9942 %}
9944 // Call Runtime Instruction
9945 instruct CallRuntimeDirect(method meth, l7RegP l7) %{
9946 match(CallRuntime);
9947 effect(USE meth, KILL l7);
9948 ins_cost(CALL_COST);
9949 format %{ "CALL,runtime" %}
9950 ins_encode( Java_To_Runtime( meth ),
9951 call_epilog, adjust_long_from_native_call );
9952 ins_pipe(simple_call);
9953 %}
9955 // Call runtime without safepoint - same as CallRuntime
9956 instruct CallLeafDirect(method meth, l7RegP l7) %{
9957 match(CallLeaf);
9958 effect(USE meth, KILL l7);
9959 ins_cost(CALL_COST);
9960 format %{ "CALL,runtime leaf" %}
9961 ins_encode( Java_To_Runtime( meth ),
9962 call_epilog,
9963 adjust_long_from_native_call );
9964 ins_pipe(simple_call);
9965 %}
9967 // Call runtime without safepoint - same as CallLeaf
9968 instruct CallLeafNoFPDirect(method meth, l7RegP l7) %{
9969 match(CallLeafNoFP);
9970 effect(USE meth, KILL l7);
9971 ins_cost(CALL_COST);
9972 format %{ "CALL,runtime leaf nofp" %}
9973 ins_encode( Java_To_Runtime( meth ),
9974 call_epilog,
9975 adjust_long_from_native_call );
9976 ins_pipe(simple_call);
9977 %}
9979 // Tail Call; Jump from runtime stub to Java code.
9980 // Also known as an 'interprocedural jump'.
9981 // Target of jump will eventually return to caller.
9982 // TailJump below removes the return address.
9983 instruct TailCalljmpInd(g3RegP jump_target, inline_cache_regP method_oop) %{
9984 match(TailCall jump_target method_oop );
9986 ins_cost(CALL_COST);
9987 format %{ "Jmp $jump_target ; NOP \t! $method_oop holds method oop" %}
9988 ins_encode(form_jmpl(jump_target));
9989 ins_pipe(tail_call);
9990 %}
9993 // Return Instruction
9994 instruct Ret() %{
9995 match(Return);
9997 // The epilogue node did the ret already.
9998 size(0);
9999 format %{ "! return" %}
10000 ins_encode();
10001 ins_pipe(empty);
10002 %}
10005 // Tail Jump; remove the return address; jump to target.
10006 // TailCall above leaves the return address around.
10007 // TailJump is used in only one place, the rethrow_Java stub (fancy_jump=2).
10008 // ex_oop (Exception Oop) is needed in %o0 at the jump. As there would be a
10009 // "restore" before this instruction (in Epilogue), we need to materialize it
10010 // in %i0.
10011 instruct tailjmpInd(g1RegP jump_target, i0RegP ex_oop) %{
10012 match( TailJump jump_target ex_oop );
10013 ins_cost(CALL_COST);
10014 format %{ "! discard R_O7\n\t"
10015 "Jmp $jump_target ; ADD O7,8,O1 \t! $ex_oop holds exc. oop" %}
10016 ins_encode(form_jmpl_set_exception_pc(jump_target));
10017 // opcode(Assembler::jmpl_op3, Assembler::arith_op);
10018 // The hack duplicates the exception oop into G3, so that CreateEx can use it there.
10019 // ins_encode( form3_rs1_simm13_rd( jump_target, 0x00, R_G0 ), move_return_pc_to_o1() );
10020 ins_pipe(tail_call);
10021 %}
10023 // Create exception oop: created by stack-crawling runtime code.
10024 // Created exception is now available to this handler, and is setup
10025 // just prior to jumping to this handler. No code emitted.
10026 instruct CreateException( o0RegP ex_oop )
10027 %{
10028 match(Set ex_oop (CreateEx));
10029 ins_cost(0);
10031 size(0);
10032 // use the following format syntax
10033 format %{ "! exception oop is in R_O0; no code emitted" %}
10034 ins_encode();
10035 ins_pipe(empty);
10036 %}
10039 // Rethrow exception:
10040 // The exception oop will come in the first argument position.
10041 // Then JUMP (not call) to the rethrow stub code.
10042 instruct RethrowException()
10043 %{
10044 match(Rethrow);
10045 ins_cost(CALL_COST);
10047 // use the following format syntax
10048 format %{ "Jmp rethrow_stub" %}
10049 ins_encode(enc_rethrow);
10050 ins_pipe(tail_call);
10051 %}
10054 // Die now
10055 instruct ShouldNotReachHere( )
10056 %{
10057 match(Halt);
10058 ins_cost(CALL_COST);
10060 size(4);
10061 // Use the following format syntax
10062 format %{ "ILLTRAP ; ShouldNotReachHere" %}
10063 ins_encode( form2_illtrap() );
10064 ins_pipe(tail_call);
10065 %}
10067 // ============================================================================
10068 // The 2nd slow-half of a subtype check. Scan the subklass's 2ndary superklass
10069 // array for an instance of the superklass. Set a hidden internal cache on a
10070 // hit (cache is checked with exposed code in gen_subtype_check()). Return
10071 // not zero for a miss or zero for a hit. The encoding ALSO sets flags.
10072 instruct partialSubtypeCheck( o0RegP index, o1RegP sub, o2RegP super, flagsRegP pcc, o7RegP o7 ) %{
10073 match(Set index (PartialSubtypeCheck sub super));
10074 effect( KILL pcc, KILL o7 );
10075 ins_cost(DEFAULT_COST*10);
10076 format %{ "CALL PartialSubtypeCheck\n\tNOP" %}
10077 ins_encode( enc_PartialSubtypeCheck() );
10078 ins_pipe(partial_subtype_check_pipe);
10079 %}
10081 instruct partialSubtypeCheck_vs_zero( flagsRegP pcc, o1RegP sub, o2RegP super, immP0 zero, o0RegP idx, o7RegP o7 ) %{
10082 match(Set pcc (CmpP (PartialSubtypeCheck sub super) zero));
10083 effect( KILL idx, KILL o7 );
10084 ins_cost(DEFAULT_COST*10);
10085 format %{ "CALL PartialSubtypeCheck\n\tNOP\t# (sets condition codes)" %}
10086 ins_encode( enc_PartialSubtypeCheck() );
10087 ins_pipe(partial_subtype_check_pipe);
10088 %}
10091 // ============================================================================
10092 // inlined locking and unlocking
10094 instruct cmpFastLock(flagsRegP pcc, iRegP object, o1RegP box, iRegP scratch2, o7RegP scratch ) %{
10095 match(Set pcc (FastLock object box));
10097 effect(TEMP scratch2, USE_KILL box, KILL scratch);
10098 ins_cost(100);
10100 format %{ "FASTLOCK $object,$box\t! kills $box,$scratch,$scratch2" %}
10101 ins_encode( Fast_Lock(object, box, scratch, scratch2) );
10102 ins_pipe(long_memory_op);
10103 %}
10106 instruct cmpFastUnlock(flagsRegP pcc, iRegP object, o1RegP box, iRegP scratch2, o7RegP scratch ) %{
10107 match(Set pcc (FastUnlock object box));
10108 effect(TEMP scratch2, USE_KILL box, KILL scratch);
10109 ins_cost(100);
10111 format %{ "FASTUNLOCK $object,$box\t! kills $box,$scratch,$scratch2" %}
10112 ins_encode( Fast_Unlock(object, box, scratch, scratch2) );
10113 ins_pipe(long_memory_op);
10114 %}
10116 // The encodings are generic.
10117 instruct clear_array(iRegX cnt, iRegP base, iRegX temp, Universe dummy, flagsReg ccr) %{
10118 predicate(!use_block_zeroing(n->in(2)) );
10119 match(Set dummy (ClearArray cnt base));
10120 effect(TEMP temp, KILL ccr);
10121 ins_cost(300);
10122 format %{ "MOV $cnt,$temp\n"
10123 "loop: SUBcc $temp,8,$temp\t! Count down a dword of bytes\n"
10124 " BRge loop\t\t! Clearing loop\n"
10125 " STX G0,[$base+$temp]\t! delay slot" %}
10127 ins_encode %{
10128 // Compiler ensures base is doubleword aligned and cnt is count of doublewords
10129 Register nof_bytes_arg = $cnt$$Register;
10130 Register nof_bytes_tmp = $temp$$Register;
10131 Register base_pointer_arg = $base$$Register;
10133 Label loop;
10134 __ mov(nof_bytes_arg, nof_bytes_tmp);
10136 // Loop and clear, walking backwards through the array.
10137 // nof_bytes_tmp (if >0) is always the number of bytes to zero
10138 __ bind(loop);
10139 __ deccc(nof_bytes_tmp, 8);
10140 __ br(Assembler::greaterEqual, true, Assembler::pt, loop);
10141 __ delayed()-> stx(G0, base_pointer_arg, nof_bytes_tmp);
10142 // %%%% this mini-loop must not cross a cache boundary!
10143 %}
10144 ins_pipe(long_memory_op);
10145 %}
10147 instruct clear_array_bis(g1RegX cnt, o0RegP base, Universe dummy, flagsReg ccr) %{
10148 predicate(use_block_zeroing(n->in(2)));
10149 match(Set dummy (ClearArray cnt base));
10150 effect(USE_KILL cnt, USE_KILL base, KILL ccr);
10151 ins_cost(300);
10152 format %{ "CLEAR [$base, $cnt]\t! ClearArray" %}
10154 ins_encode %{
10156 assert(MinObjAlignmentInBytes >= BytesPerLong, "need alternate implementation");
10157 Register to = $base$$Register;
10158 Register count = $cnt$$Register;
10160 Label Ldone;
10161 __ nop(); // Separate short branches
10162 // Use BIS for zeroing (temp is not used).
10163 __ bis_zeroing(to, count, G0, Ldone);
10164 __ bind(Ldone);
10166 %}
10167 ins_pipe(long_memory_op);
10168 %}
10170 instruct clear_array_bis_2(g1RegX cnt, o0RegP base, iRegX tmp, Universe dummy, flagsReg ccr) %{
10171 predicate(use_block_zeroing(n->in(2)) && !Assembler::is_simm13((int)BlockZeroingLowLimit));
10172 match(Set dummy (ClearArray cnt base));
10173 effect(TEMP tmp, USE_KILL cnt, USE_KILL base, KILL ccr);
10174 ins_cost(300);
10175 format %{ "CLEAR [$base, $cnt]\t! ClearArray" %}
10177 ins_encode %{
10179 assert(MinObjAlignmentInBytes >= BytesPerLong, "need alternate implementation");
10180 Register to = $base$$Register;
10181 Register count = $cnt$$Register;
10182 Register temp = $tmp$$Register;
10184 Label Ldone;
10185 __ nop(); // Separate short branches
10186 // Use BIS for zeroing
10187 __ bis_zeroing(to, count, temp, Ldone);
10188 __ bind(Ldone);
10190 %}
10191 ins_pipe(long_memory_op);
10192 %}
10194 instruct string_compare(o0RegP str1, o1RegP str2, g3RegI cnt1, g4RegI cnt2, notemp_iRegI result,
10195 o7RegI tmp, flagsReg ccr) %{
10196 match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2)));
10197 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL ccr, KILL tmp);
10198 ins_cost(300);
10199 format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result // KILL $tmp" %}
10200 ins_encode( enc_String_Compare(str1, str2, cnt1, cnt2, result) );
10201 ins_pipe(long_memory_op);
10202 %}
10204 instruct string_equals(o0RegP str1, o1RegP str2, g3RegI cnt, notemp_iRegI result,
10205 o7RegI tmp, flagsReg ccr) %{
10206 match(Set result (StrEquals (Binary str1 str2) cnt));
10207 effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL tmp, KILL ccr);
10208 ins_cost(300);
10209 format %{ "String Equals $str1,$str2,$cnt -> $result // KILL $tmp" %}
10210 ins_encode( enc_String_Equals(str1, str2, cnt, result) );
10211 ins_pipe(long_memory_op);
10212 %}
10214 instruct array_equals(o0RegP ary1, o1RegP ary2, g3RegI tmp1, notemp_iRegI result,
10215 o7RegI tmp2, flagsReg ccr) %{
10216 match(Set result (AryEq ary1 ary2));
10217 effect(USE_KILL ary1, USE_KILL ary2, KILL tmp1, KILL tmp2, KILL ccr);
10218 ins_cost(300);
10219 format %{ "Array Equals $ary1,$ary2 -> $result // KILL $tmp1,$tmp2" %}
10220 ins_encode( enc_Array_Equals(ary1, ary2, tmp1, result));
10221 ins_pipe(long_memory_op);
10222 %}
10225 //---------- Zeros Count Instructions ------------------------------------------
10227 instruct countLeadingZerosI(iRegI dst, iRegI src, iRegI tmp, flagsReg cr) %{
10228 predicate(UsePopCountInstruction); // See Matcher::match_rule_supported
10229 match(Set dst (CountLeadingZerosI src));
10230 effect(TEMP dst, TEMP tmp, KILL cr);
10232 // x |= (x >> 1);
10233 // x |= (x >> 2);
10234 // x |= (x >> 4);
10235 // x |= (x >> 8);
10236 // x |= (x >> 16);
10237 // return (WORDBITS - popc(x));
10238 format %{ "SRL $src,1,$tmp\t! count leading zeros (int)\n\t"
10239 "SRL $src,0,$dst\t! 32-bit zero extend\n\t"
10240 "OR $dst,$tmp,$dst\n\t"
10241 "SRL $dst,2,$tmp\n\t"
10242 "OR $dst,$tmp,$dst\n\t"
10243 "SRL $dst,4,$tmp\n\t"
10244 "OR $dst,$tmp,$dst\n\t"
10245 "SRL $dst,8,$tmp\n\t"
10246 "OR $dst,$tmp,$dst\n\t"
10247 "SRL $dst,16,$tmp\n\t"
10248 "OR $dst,$tmp,$dst\n\t"
10249 "POPC $dst,$dst\n\t"
10250 "MOV 32,$tmp\n\t"
10251 "SUB $tmp,$dst,$dst" %}
10252 ins_encode %{
10253 Register Rdst = $dst$$Register;
10254 Register Rsrc = $src$$Register;
10255 Register Rtmp = $tmp$$Register;
10256 __ srl(Rsrc, 1, Rtmp);
10257 __ srl(Rsrc, 0, Rdst);
10258 __ or3(Rdst, Rtmp, Rdst);
10259 __ srl(Rdst, 2, Rtmp);
10260 __ or3(Rdst, Rtmp, Rdst);
10261 __ srl(Rdst, 4, Rtmp);
10262 __ or3(Rdst, Rtmp, Rdst);
10263 __ srl(Rdst, 8, Rtmp);
10264 __ or3(Rdst, Rtmp, Rdst);
10265 __ srl(Rdst, 16, Rtmp);
10266 __ or3(Rdst, Rtmp, Rdst);
10267 __ popc(Rdst, Rdst);
10268 __ mov(BitsPerInt, Rtmp);
10269 __ sub(Rtmp, Rdst, Rdst);
10270 %}
10271 ins_pipe(ialu_reg);
10272 %}
10274 instruct countLeadingZerosL(iRegIsafe dst, iRegL src, iRegL tmp, flagsReg cr) %{
10275 predicate(UsePopCountInstruction); // See Matcher::match_rule_supported
10276 match(Set dst (CountLeadingZerosL src));
10277 effect(TEMP dst, TEMP tmp, KILL cr);
10279 // x |= (x >> 1);
10280 // x |= (x >> 2);
10281 // x |= (x >> 4);
10282 // x |= (x >> 8);
10283 // x |= (x >> 16);
10284 // x |= (x >> 32);
10285 // return (WORDBITS - popc(x));
10286 format %{ "SRLX $src,1,$tmp\t! count leading zeros (long)\n\t"
10287 "OR $src,$tmp,$dst\n\t"
10288 "SRLX $dst,2,$tmp\n\t"
10289 "OR $dst,$tmp,$dst\n\t"
10290 "SRLX $dst,4,$tmp\n\t"
10291 "OR $dst,$tmp,$dst\n\t"
10292 "SRLX $dst,8,$tmp\n\t"
10293 "OR $dst,$tmp,$dst\n\t"
10294 "SRLX $dst,16,$tmp\n\t"
10295 "OR $dst,$tmp,$dst\n\t"
10296 "SRLX $dst,32,$tmp\n\t"
10297 "OR $dst,$tmp,$dst\n\t"
10298 "POPC $dst,$dst\n\t"
10299 "MOV 64,$tmp\n\t"
10300 "SUB $tmp,$dst,$dst" %}
10301 ins_encode %{
10302 Register Rdst = $dst$$Register;
10303 Register Rsrc = $src$$Register;
10304 Register Rtmp = $tmp$$Register;
10305 __ srlx(Rsrc, 1, Rtmp);
10306 __ or3( Rsrc, Rtmp, Rdst);
10307 __ srlx(Rdst, 2, Rtmp);
10308 __ or3( Rdst, Rtmp, Rdst);
10309 __ srlx(Rdst, 4, Rtmp);
10310 __ or3( Rdst, Rtmp, Rdst);
10311 __ srlx(Rdst, 8, Rtmp);
10312 __ or3( Rdst, Rtmp, Rdst);
10313 __ srlx(Rdst, 16, Rtmp);
10314 __ or3( Rdst, Rtmp, Rdst);
10315 __ srlx(Rdst, 32, Rtmp);
10316 __ or3( Rdst, Rtmp, Rdst);
10317 __ popc(Rdst, Rdst);
10318 __ mov(BitsPerLong, Rtmp);
10319 __ sub(Rtmp, Rdst, Rdst);
10320 %}
10321 ins_pipe(ialu_reg);
10322 %}
10324 instruct countTrailingZerosI(iRegI dst, iRegI src, flagsReg cr) %{
10325 predicate(UsePopCountInstruction); // See Matcher::match_rule_supported
10326 match(Set dst (CountTrailingZerosI src));
10327 effect(TEMP dst, KILL cr);
10329 // return popc(~x & (x - 1));
10330 format %{ "SUB $src,1,$dst\t! count trailing zeros (int)\n\t"
10331 "ANDN $dst,$src,$dst\n\t"
10332 "SRL $dst,R_G0,$dst\n\t"
10333 "POPC $dst,$dst" %}
10334 ins_encode %{
10335 Register Rdst = $dst$$Register;
10336 Register Rsrc = $src$$Register;
10337 __ sub(Rsrc, 1, Rdst);
10338 __ andn(Rdst, Rsrc, Rdst);
10339 __ srl(Rdst, G0, Rdst);
10340 __ popc(Rdst, Rdst);
10341 %}
10342 ins_pipe(ialu_reg);
10343 %}
10345 instruct countTrailingZerosL(iRegIsafe dst, iRegL src, flagsReg cr) %{
10346 predicate(UsePopCountInstruction); // See Matcher::match_rule_supported
10347 match(Set dst (CountTrailingZerosL src));
10348 effect(TEMP dst, KILL cr);
10350 // return popc(~x & (x - 1));
10351 format %{ "SUB $src,1,$dst\t! count trailing zeros (long)\n\t"
10352 "ANDN $dst,$src,$dst\n\t"
10353 "POPC $dst,$dst" %}
10354 ins_encode %{
10355 Register Rdst = $dst$$Register;
10356 Register Rsrc = $src$$Register;
10357 __ sub(Rsrc, 1, Rdst);
10358 __ andn(Rdst, Rsrc, Rdst);
10359 __ popc(Rdst, Rdst);
10360 %}
10361 ins_pipe(ialu_reg);
10362 %}
10365 //---------- Population Count Instructions -------------------------------------
10367 instruct popCountI(iRegI dst, iRegI src) %{
10368 predicate(UsePopCountInstruction);
10369 match(Set dst (PopCountI src));
10371 format %{ "POPC $src, $dst" %}
10372 ins_encode %{
10373 __ popc($src$$Register, $dst$$Register);
10374 %}
10375 ins_pipe(ialu_reg);
10376 %}
10378 // Note: Long.bitCount(long) returns an int.
10379 instruct popCountL(iRegI dst, iRegL src) %{
10380 predicate(UsePopCountInstruction);
10381 match(Set dst (PopCountL src));
10383 format %{ "POPC $src, $dst" %}
10384 ins_encode %{
10385 __ popc($src$$Register, $dst$$Register);
10386 %}
10387 ins_pipe(ialu_reg);
10388 %}
10391 // ============================================================================
10392 //------------Bytes reverse--------------------------------------------------
10394 instruct bytes_reverse_int(iRegI dst, stackSlotI src) %{
10395 match(Set dst (ReverseBytesI src));
10397 // Op cost is artificially doubled to make sure that load or store
10398 // instructions are preferred over this one which requires a spill
10399 // onto a stack slot.
10400 ins_cost(2*DEFAULT_COST + MEMORY_REF_COST);
10401 format %{ "LDUWA $src, $dst\t!asi=primary_little" %}
10403 ins_encode %{
10404 __ set($src$$disp + STACK_BIAS, O7);
10405 __ lduwa($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
10406 %}
10407 ins_pipe( iload_mem );
10408 %}
10410 instruct bytes_reverse_long(iRegL dst, stackSlotL src) %{
10411 match(Set dst (ReverseBytesL src));
10413 // Op cost is artificially doubled to make sure that load or store
10414 // instructions are preferred over this one which requires a spill
10415 // onto a stack slot.
10416 ins_cost(2*DEFAULT_COST + MEMORY_REF_COST);
10417 format %{ "LDXA $src, $dst\t!asi=primary_little" %}
10419 ins_encode %{
10420 __ set($src$$disp + STACK_BIAS, O7);
10421 __ ldxa($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
10422 %}
10423 ins_pipe( iload_mem );
10424 %}
10426 instruct bytes_reverse_unsigned_short(iRegI dst, stackSlotI src) %{
10427 match(Set dst (ReverseBytesUS src));
10429 // Op cost is artificially doubled to make sure that load or store
10430 // instructions are preferred over this one which requires a spill
10431 // onto a stack slot.
10432 ins_cost(2*DEFAULT_COST + MEMORY_REF_COST);
10433 format %{ "LDUHA $src, $dst\t!asi=primary_little\n\t" %}
10435 ins_encode %{
10436 // the value was spilled as an int so bias the load
10437 __ set($src$$disp + STACK_BIAS + 2, O7);
10438 __ lduha($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
10439 %}
10440 ins_pipe( iload_mem );
10441 %}
10443 instruct bytes_reverse_short(iRegI dst, stackSlotI src) %{
10444 match(Set dst (ReverseBytesS src));
10446 // Op cost is artificially doubled to make sure that load or store
10447 // instructions are preferred over this one which requires a spill
10448 // onto a stack slot.
10449 ins_cost(2*DEFAULT_COST + MEMORY_REF_COST);
10450 format %{ "LDSHA $src, $dst\t!asi=primary_little\n\t" %}
10452 ins_encode %{
10453 // the value was spilled as an int so bias the load
10454 __ set($src$$disp + STACK_BIAS + 2, O7);
10455 __ ldsha($src$$base$$Register, O7, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
10456 %}
10457 ins_pipe( iload_mem );
10458 %}
10460 // Load Integer reversed byte order
10461 instruct loadI_reversed(iRegI dst, indIndexMemory src) %{
10462 match(Set dst (ReverseBytesI (LoadI src)));
10464 ins_cost(DEFAULT_COST + MEMORY_REF_COST);
10465 size(4);
10466 format %{ "LDUWA $src, $dst\t!asi=primary_little" %}
10468 ins_encode %{
10469 __ lduwa($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
10470 %}
10471 ins_pipe(iload_mem);
10472 %}
10474 // Load Long - aligned and reversed
10475 instruct loadL_reversed(iRegL dst, indIndexMemory src) %{
10476 match(Set dst (ReverseBytesL (LoadL src)));
10478 ins_cost(MEMORY_REF_COST);
10479 size(4);
10480 format %{ "LDXA $src, $dst\t!asi=primary_little" %}
10482 ins_encode %{
10483 __ ldxa($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
10484 %}
10485 ins_pipe(iload_mem);
10486 %}
10488 // Load unsigned short / char reversed byte order
10489 instruct loadUS_reversed(iRegI dst, indIndexMemory src) %{
10490 match(Set dst (ReverseBytesUS (LoadUS src)));
10492 ins_cost(MEMORY_REF_COST);
10493 size(4);
10494 format %{ "LDUHA $src, $dst\t!asi=primary_little" %}
10496 ins_encode %{
10497 __ lduha($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
10498 %}
10499 ins_pipe(iload_mem);
10500 %}
10502 // Load short reversed byte order
10503 instruct loadS_reversed(iRegI dst, indIndexMemory src) %{
10504 match(Set dst (ReverseBytesS (LoadS src)));
10506 ins_cost(MEMORY_REF_COST);
10507 size(4);
10508 format %{ "LDSHA $src, $dst\t!asi=primary_little" %}
10510 ins_encode %{
10511 __ ldsha($src$$base$$Register, $src$$index$$Register, Assembler::ASI_PRIMARY_LITTLE, $dst$$Register);
10512 %}
10513 ins_pipe(iload_mem);
10514 %}
10516 // Store Integer reversed byte order
10517 instruct storeI_reversed(indIndexMemory dst, iRegI src) %{
10518 match(Set dst (StoreI dst (ReverseBytesI src)));
10520 ins_cost(MEMORY_REF_COST);
10521 size(4);
10522 format %{ "STWA $src, $dst\t!asi=primary_little" %}
10524 ins_encode %{
10525 __ stwa($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE);
10526 %}
10527 ins_pipe(istore_mem_reg);
10528 %}
10530 // Store Long reversed byte order
10531 instruct storeL_reversed(indIndexMemory dst, iRegL src) %{
10532 match(Set dst (StoreL dst (ReverseBytesL src)));
10534 ins_cost(MEMORY_REF_COST);
10535 size(4);
10536 format %{ "STXA $src, $dst\t!asi=primary_little" %}
10538 ins_encode %{
10539 __ stxa($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE);
10540 %}
10541 ins_pipe(istore_mem_reg);
10542 %}
10544 // Store unsighed short/char reversed byte order
10545 instruct storeUS_reversed(indIndexMemory dst, iRegI src) %{
10546 match(Set dst (StoreC dst (ReverseBytesUS src)));
10548 ins_cost(MEMORY_REF_COST);
10549 size(4);
10550 format %{ "STHA $src, $dst\t!asi=primary_little" %}
10552 ins_encode %{
10553 __ stha($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE);
10554 %}
10555 ins_pipe(istore_mem_reg);
10556 %}
10558 // Store short reversed byte order
10559 instruct storeS_reversed(indIndexMemory dst, iRegI src) %{
10560 match(Set dst (StoreC dst (ReverseBytesS src)));
10562 ins_cost(MEMORY_REF_COST);
10563 size(4);
10564 format %{ "STHA $src, $dst\t!asi=primary_little" %}
10566 ins_encode %{
10567 __ stha($src$$Register, $dst$$base$$Register, $dst$$index$$Register, Assembler::ASI_PRIMARY_LITTLE);
10568 %}
10569 ins_pipe(istore_mem_reg);
10570 %}
10572 // ====================VECTOR INSTRUCTIONS=====================================
10574 // Load Aligned Packed values into a Double Register
10575 instruct loadV8(regD dst, memory mem) %{
10576 predicate(n->as_LoadVector()->memory_size() == 8);
10577 match(Set dst (LoadVector mem));
10578 ins_cost(MEMORY_REF_COST);
10579 size(4);
10580 format %{ "LDDF $mem,$dst\t! load vector (8 bytes)" %}
10581 ins_encode %{
10582 __ ldf(FloatRegisterImpl::D, $mem$$Address, as_DoubleFloatRegister($dst$$reg));
10583 %}
10584 ins_pipe(floadD_mem);
10585 %}
10587 // Store Vector in Double register to memory
10588 instruct storeV8(memory mem, regD src) %{
10589 predicate(n->as_StoreVector()->memory_size() == 8);
10590 match(Set mem (StoreVector mem src));
10591 ins_cost(MEMORY_REF_COST);
10592 size(4);
10593 format %{ "STDF $src,$mem\t! store vector (8 bytes)" %}
10594 ins_encode %{
10595 __ stf(FloatRegisterImpl::D, as_DoubleFloatRegister($src$$reg), $mem$$Address);
10596 %}
10597 ins_pipe(fstoreD_mem_reg);
10598 %}
10600 // Store Zero into vector in memory
10601 instruct storeV8B_zero(memory mem, immI0 zero) %{
10602 predicate(n->as_StoreVector()->memory_size() == 8);
10603 match(Set mem (StoreVector mem (ReplicateB zero)));
10604 ins_cost(MEMORY_REF_COST);
10605 size(4);
10606 format %{ "STX $zero,$mem\t! store zero vector (8 bytes)" %}
10607 ins_encode %{
10608 __ stx(G0, $mem$$Address);
10609 %}
10610 ins_pipe(fstoreD_mem_zero);
10611 %}
10613 instruct storeV4S_zero(memory mem, immI0 zero) %{
10614 predicate(n->as_StoreVector()->memory_size() == 8);
10615 match(Set mem (StoreVector mem (ReplicateS zero)));
10616 ins_cost(MEMORY_REF_COST);
10617 size(4);
10618 format %{ "STX $zero,$mem\t! store zero vector (4 shorts)" %}
10619 ins_encode %{
10620 __ stx(G0, $mem$$Address);
10621 %}
10622 ins_pipe(fstoreD_mem_zero);
10623 %}
10625 instruct storeV2I_zero(memory mem, immI0 zero) %{
10626 predicate(n->as_StoreVector()->memory_size() == 8);
10627 match(Set mem (StoreVector mem (ReplicateI zero)));
10628 ins_cost(MEMORY_REF_COST);
10629 size(4);
10630 format %{ "STX $zero,$mem\t! store zero vector (2 ints)" %}
10631 ins_encode %{
10632 __ stx(G0, $mem$$Address);
10633 %}
10634 ins_pipe(fstoreD_mem_zero);
10635 %}
10637 instruct storeV2F_zero(memory mem, immF0 zero) %{
10638 predicate(n->as_StoreVector()->memory_size() == 8);
10639 match(Set mem (StoreVector mem (ReplicateF zero)));
10640 ins_cost(MEMORY_REF_COST);
10641 size(4);
10642 format %{ "STX $zero,$mem\t! store zero vector (2 floats)" %}
10643 ins_encode %{
10644 __ stx(G0, $mem$$Address);
10645 %}
10646 ins_pipe(fstoreD_mem_zero);
10647 %}
10649 // Replicate scalar to packed byte values into Double register
10650 instruct Repl8B_reg(regD dst, iRegI src, iRegL tmp, o7RegL tmp2) %{
10651 predicate(n->as_Vector()->length() == 8 && UseVIS >= 3);
10652 match(Set dst (ReplicateB src));
10653 effect(DEF dst, USE src, TEMP tmp, KILL tmp2);
10654 format %{ "SLLX $src,56,$tmp\n\t"
10655 "SRLX $tmp, 8,$tmp2\n\t"
10656 "OR $tmp,$tmp2,$tmp\n\t"
10657 "SRLX $tmp,16,$tmp2\n\t"
10658 "OR $tmp,$tmp2,$tmp\n\t"
10659 "SRLX $tmp,32,$tmp2\n\t"
10660 "OR $tmp,$tmp2,$tmp\t! replicate8B\n\t"
10661 "MOVXTOD $tmp,$dst\t! MoveL2D" %}
10662 ins_encode %{
10663 Register Rsrc = $src$$Register;
10664 Register Rtmp = $tmp$$Register;
10665 Register Rtmp2 = $tmp2$$Register;
10666 __ sllx(Rsrc, 56, Rtmp);
10667 __ srlx(Rtmp, 8, Rtmp2);
10668 __ or3 (Rtmp, Rtmp2, Rtmp);
10669 __ srlx(Rtmp, 16, Rtmp2);
10670 __ or3 (Rtmp, Rtmp2, Rtmp);
10671 __ srlx(Rtmp, 32, Rtmp2);
10672 __ or3 (Rtmp, Rtmp2, Rtmp);
10673 __ movxtod(Rtmp, as_DoubleFloatRegister($dst$$reg));
10674 %}
10675 ins_pipe(ialu_reg);
10676 %}
10678 // Replicate scalar to packed byte values into Double stack
10679 instruct Repl8B_stk(stackSlotD dst, iRegI src, iRegL tmp, o7RegL tmp2) %{
10680 predicate(n->as_Vector()->length() == 8 && UseVIS < 3);
10681 match(Set dst (ReplicateB src));
10682 effect(DEF dst, USE src, TEMP tmp, KILL tmp2);
10683 format %{ "SLLX $src,56,$tmp\n\t"
10684 "SRLX $tmp, 8,$tmp2\n\t"
10685 "OR $tmp,$tmp2,$tmp\n\t"
10686 "SRLX $tmp,16,$tmp2\n\t"
10687 "OR $tmp,$tmp2,$tmp\n\t"
10688 "SRLX $tmp,32,$tmp2\n\t"
10689 "OR $tmp,$tmp2,$tmp\t! replicate8B\n\t"
10690 "STX $tmp,$dst\t! regL to stkD" %}
10691 ins_encode %{
10692 Register Rsrc = $src$$Register;
10693 Register Rtmp = $tmp$$Register;
10694 Register Rtmp2 = $tmp2$$Register;
10695 __ sllx(Rsrc, 56, Rtmp);
10696 __ srlx(Rtmp, 8, Rtmp2);
10697 __ or3 (Rtmp, Rtmp2, Rtmp);
10698 __ srlx(Rtmp, 16, Rtmp2);
10699 __ or3 (Rtmp, Rtmp2, Rtmp);
10700 __ srlx(Rtmp, 32, Rtmp2);
10701 __ or3 (Rtmp, Rtmp2, Rtmp);
10702 __ set ($dst$$disp + STACK_BIAS, Rtmp2);
10703 __ stx (Rtmp, Rtmp2, $dst$$base$$Register);
10704 %}
10705 ins_pipe(ialu_reg);
10706 %}
10708 // Replicate scalar constant to packed byte values in Double register
10709 instruct Repl8B_immI(regD dst, immI13 con, o7RegI tmp) %{
10710 predicate(n->as_Vector()->length() == 8);
10711 match(Set dst (ReplicateB con));
10712 effect(KILL tmp);
10713 format %{ "LDDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: Repl8B($con)" %}
10714 ins_encode %{
10715 // XXX This is a quick fix for 6833573.
10716 //__ ldf(FloatRegisterImpl::D, $constanttablebase, $constantoffset(replicate_immI($con$$constant, 8, 1)), $dst$$FloatRegister);
10717 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset(replicate_immI($con$$constant, 8, 1)), $tmp$$Register);
10718 __ ldf(FloatRegisterImpl::D, $constanttablebase, con_offset, as_DoubleFloatRegister($dst$$reg));
10719 %}
10720 ins_pipe(loadConFD);
10721 %}
10723 // Replicate scalar to packed char/short values into Double register
10724 instruct Repl4S_reg(regD dst, iRegI src, iRegL tmp, o7RegL tmp2) %{
10725 predicate(n->as_Vector()->length() == 4 && UseVIS >= 3);
10726 match(Set dst (ReplicateS src));
10727 effect(DEF dst, USE src, TEMP tmp, KILL tmp2);
10728 format %{ "SLLX $src,48,$tmp\n\t"
10729 "SRLX $tmp,16,$tmp2\n\t"
10730 "OR $tmp,$tmp2,$tmp\n\t"
10731 "SRLX $tmp,32,$tmp2\n\t"
10732 "OR $tmp,$tmp2,$tmp\t! replicate4S\n\t"
10733 "MOVXTOD $tmp,$dst\t! MoveL2D" %}
10734 ins_encode %{
10735 Register Rsrc = $src$$Register;
10736 Register Rtmp = $tmp$$Register;
10737 Register Rtmp2 = $tmp2$$Register;
10738 __ sllx(Rsrc, 48, Rtmp);
10739 __ srlx(Rtmp, 16, Rtmp2);
10740 __ or3 (Rtmp, Rtmp2, Rtmp);
10741 __ srlx(Rtmp, 32, Rtmp2);
10742 __ or3 (Rtmp, Rtmp2, Rtmp);
10743 __ movxtod(Rtmp, as_DoubleFloatRegister($dst$$reg));
10744 %}
10745 ins_pipe(ialu_reg);
10746 %}
10748 // Replicate scalar to packed char/short values into Double stack
10749 instruct Repl4S_stk(stackSlotD dst, iRegI src, iRegL tmp, o7RegL tmp2) %{
10750 predicate(n->as_Vector()->length() == 4 && UseVIS < 3);
10751 match(Set dst (ReplicateS src));
10752 effect(DEF dst, USE src, TEMP tmp, KILL tmp2);
10753 format %{ "SLLX $src,48,$tmp\n\t"
10754 "SRLX $tmp,16,$tmp2\n\t"
10755 "OR $tmp,$tmp2,$tmp\n\t"
10756 "SRLX $tmp,32,$tmp2\n\t"
10757 "OR $tmp,$tmp2,$tmp\t! replicate4S\n\t"
10758 "STX $tmp,$dst\t! regL to stkD" %}
10759 ins_encode %{
10760 Register Rsrc = $src$$Register;
10761 Register Rtmp = $tmp$$Register;
10762 Register Rtmp2 = $tmp2$$Register;
10763 __ sllx(Rsrc, 48, Rtmp);
10764 __ srlx(Rtmp, 16, Rtmp2);
10765 __ or3 (Rtmp, Rtmp2, Rtmp);
10766 __ srlx(Rtmp, 32, Rtmp2);
10767 __ or3 (Rtmp, Rtmp2, Rtmp);
10768 __ set ($dst$$disp + STACK_BIAS, Rtmp2);
10769 __ stx (Rtmp, Rtmp2, $dst$$base$$Register);
10770 %}
10771 ins_pipe(ialu_reg);
10772 %}
10774 // Replicate scalar constant to packed char/short values in Double register
10775 instruct Repl4S_immI(regD dst, immI con, o7RegI tmp) %{
10776 predicate(n->as_Vector()->length() == 4);
10777 match(Set dst (ReplicateS con));
10778 effect(KILL tmp);
10779 format %{ "LDDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: Repl4S($con)" %}
10780 ins_encode %{
10781 // XXX This is a quick fix for 6833573.
10782 //__ ldf(FloatRegisterImpl::D, $constanttablebase, $constantoffset(replicate_immI($con$$constant, 4, 2)), $dst$$FloatRegister);
10783 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset(replicate_immI($con$$constant, 4, 2)), $tmp$$Register);
10784 __ ldf(FloatRegisterImpl::D, $constanttablebase, con_offset, as_DoubleFloatRegister($dst$$reg));
10785 %}
10786 ins_pipe(loadConFD);
10787 %}
10789 // Replicate scalar to packed int values into Double register
10790 instruct Repl2I_reg(regD dst, iRegI src, iRegL tmp, o7RegL tmp2) %{
10791 predicate(n->as_Vector()->length() == 2 && UseVIS >= 3);
10792 match(Set dst (ReplicateI src));
10793 effect(DEF dst, USE src, TEMP tmp, KILL tmp2);
10794 format %{ "SLLX $src,32,$tmp\n\t"
10795 "SRLX $tmp,32,$tmp2\n\t"
10796 "OR $tmp,$tmp2,$tmp\t! replicate2I\n\t"
10797 "MOVXTOD $tmp,$dst\t! MoveL2D" %}
10798 ins_encode %{
10799 Register Rsrc = $src$$Register;
10800 Register Rtmp = $tmp$$Register;
10801 Register Rtmp2 = $tmp2$$Register;
10802 __ sllx(Rsrc, 32, Rtmp);
10803 __ srlx(Rtmp, 32, Rtmp2);
10804 __ or3 (Rtmp, Rtmp2, Rtmp);
10805 __ movxtod(Rtmp, as_DoubleFloatRegister($dst$$reg));
10806 %}
10807 ins_pipe(ialu_reg);
10808 %}
10810 // Replicate scalar to packed int values into Double stack
10811 instruct Repl2I_stk(stackSlotD dst, iRegI src, iRegL tmp, o7RegL tmp2) %{
10812 predicate(n->as_Vector()->length() == 2 && UseVIS < 3);
10813 match(Set dst (ReplicateI src));
10814 effect(DEF dst, USE src, TEMP tmp, KILL tmp2);
10815 format %{ "SLLX $src,32,$tmp\n\t"
10816 "SRLX $tmp,32,$tmp2\n\t"
10817 "OR $tmp,$tmp2,$tmp\t! replicate2I\n\t"
10818 "STX $tmp,$dst\t! regL to stkD" %}
10819 ins_encode %{
10820 Register Rsrc = $src$$Register;
10821 Register Rtmp = $tmp$$Register;
10822 Register Rtmp2 = $tmp2$$Register;
10823 __ sllx(Rsrc, 32, Rtmp);
10824 __ srlx(Rtmp, 32, Rtmp2);
10825 __ or3 (Rtmp, Rtmp2, Rtmp);
10826 __ set ($dst$$disp + STACK_BIAS, Rtmp2);
10827 __ stx (Rtmp, Rtmp2, $dst$$base$$Register);
10828 %}
10829 ins_pipe(ialu_reg);
10830 %}
10832 // Replicate scalar zero constant to packed int values in Double register
10833 instruct Repl2I_immI(regD dst, immI con, o7RegI tmp) %{
10834 predicate(n->as_Vector()->length() == 2);
10835 match(Set dst (ReplicateI con));
10836 effect(KILL tmp);
10837 format %{ "LDDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: Repl2I($con)" %}
10838 ins_encode %{
10839 // XXX This is a quick fix for 6833573.
10840 //__ ldf(FloatRegisterImpl::D, $constanttablebase, $constantoffset(replicate_immI($con$$constant, 2, 4)), $dst$$FloatRegister);
10841 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset(replicate_immI($con$$constant, 2, 4)), $tmp$$Register);
10842 __ ldf(FloatRegisterImpl::D, $constanttablebase, con_offset, as_DoubleFloatRegister($dst$$reg));
10843 %}
10844 ins_pipe(loadConFD);
10845 %}
10847 // Replicate scalar to packed float values into Double stack
10848 instruct Repl2F_stk(stackSlotD dst, regF src) %{
10849 predicate(n->as_Vector()->length() == 2);
10850 match(Set dst (ReplicateF src));
10851 ins_cost(MEMORY_REF_COST*2);
10852 format %{ "STF $src,$dst.hi\t! packed2F\n\t"
10853 "STF $src,$dst.lo" %}
10854 opcode(Assembler::stf_op3);
10855 ins_encode(simple_form3_mem_reg(dst, src), form3_mem_plus_4_reg(dst, src));
10856 ins_pipe(fstoreF_stk_reg);
10857 %}
10859 // Replicate scalar zero constant to packed float values in Double register
10860 instruct Repl2F_immF(regD dst, immF con, o7RegI tmp) %{
10861 predicate(n->as_Vector()->length() == 2);
10862 match(Set dst (ReplicateF con));
10863 effect(KILL tmp);
10864 format %{ "LDDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: Repl2F($con)" %}
10865 ins_encode %{
10866 // XXX This is a quick fix for 6833573.
10867 //__ ldf(FloatRegisterImpl::D, $constanttablebase, $constantoffset(replicate_immF($con$$constant)), $dst$$FloatRegister);
10868 RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset(replicate_immF($con$$constant)), $tmp$$Register);
10869 __ ldf(FloatRegisterImpl::D, $constanttablebase, con_offset, as_DoubleFloatRegister($dst$$reg));
10870 %}
10871 ins_pipe(loadConFD);
10872 %}
10874 //----------PEEPHOLE RULES-----------------------------------------------------
10875 // These must follow all instruction definitions as they use the names
10876 // defined in the instructions definitions.
10877 //
10878 // peepmatch ( root_instr_name [preceding_instruction]* );
10879 //
10880 // peepconstraint %{
10881 // (instruction_number.operand_name relational_op instruction_number.operand_name
10882 // [, ...] );
10883 // // instruction numbers are zero-based using left to right order in peepmatch
10884 //
10885 // peepreplace ( instr_name ( [instruction_number.operand_name]* ) );
10886 // // provide an instruction_number.operand_name for each operand that appears
10887 // // in the replacement instruction's match rule
10888 //
10889 // ---------VM FLAGS---------------------------------------------------------
10890 //
10891 // All peephole optimizations can be turned off using -XX:-OptoPeephole
10892 //
10893 // Each peephole rule is given an identifying number starting with zero and
10894 // increasing by one in the order seen by the parser. An individual peephole
10895 // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=#
10896 // on the command-line.
10897 //
10898 // ---------CURRENT LIMITATIONS----------------------------------------------
10899 //
10900 // Only match adjacent instructions in same basic block
10901 // Only equality constraints
10902 // Only constraints between operands, not (0.dest_reg == EAX_enc)
10903 // Only one replacement instruction
10904 //
10905 // ---------EXAMPLE----------------------------------------------------------
10906 //
10907 // // pertinent parts of existing instructions in architecture description
10908 // instruct movI(eRegI dst, eRegI src) %{
10909 // match(Set dst (CopyI src));
10910 // %}
10911 //
10912 // instruct incI_eReg(eRegI dst, immI1 src, eFlagsReg cr) %{
10913 // match(Set dst (AddI dst src));
10914 // effect(KILL cr);
10915 // %}
10916 //
10917 // // Change (inc mov) to lea
10918 // peephole %{
10919 // // increment preceeded by register-register move
10920 // peepmatch ( incI_eReg movI );
10921 // // require that the destination register of the increment
10922 // // match the destination register of the move
10923 // peepconstraint ( 0.dst == 1.dst );
10924 // // construct a replacement instruction that sets
10925 // // the destination to ( move's source register + one )
10926 // peepreplace ( incI_eReg_immI1( 0.dst 1.src 0.src ) );
10927 // %}
10928 //
10930 // // Change load of spilled value to only a spill
10931 // instruct storeI(memory mem, eRegI src) %{
10932 // match(Set mem (StoreI mem src));
10933 // %}
10934 //
10935 // instruct loadI(eRegI dst, memory mem) %{
10936 // match(Set dst (LoadI mem));
10937 // %}
10938 //
10939 // peephole %{
10940 // peepmatch ( loadI storeI );
10941 // peepconstraint ( 1.src == 0.dst, 1.mem == 0.mem );
10942 // peepreplace ( storeI( 1.mem 1.mem 1.src ) );
10943 // %}
10945 //----------SMARTSPILL RULES---------------------------------------------------
10946 // These must follow all instruction definitions as they use the names
10947 // defined in the instructions definitions.
10948 //
10949 // SPARC will probably not have any of these rules due to RISC instruction set.
10951 //----------PIPELINE-----------------------------------------------------------
10952 // Rules which define the behavior of the target architectures pipeline.